diff --git a/.bazelrc b/.bazelrc index 95d9adb1060c..b21b7289af6a 100644 --- a/.bazelrc +++ b/.bazelrc @@ -8,73 +8,12 @@ build --local_test_jobs=1 build --flag_alias=erlang_home=@rules_erlang//:erlang_home build --flag_alias=erlang_version=@rules_erlang//:erlang_version -build --flag_alias=elixir_home=//:elixir_home +build --flag_alias=elixir_home=@rules_elixir//:elixir_home build --flag_alias=test_build=//:enable_test_build build --test_timeout=7200 -build:buildbuddy --bes_results_url=https://app.buildbuddy.io/invocation/ -build:buildbuddy --bes_backend=grpcs://remote.buildbuddy.io -build:buildbuddy --remote_cache=grpcs://remote.buildbuddy.io -build:buildbuddy --remote_timeout=1200 -build:buildbuddy --grpc_keepalive_time=360s -build:buildbuddy --grpc_keepalive_timeout=360s -build:buildbuddy --remote_download_minimal -build:buildbuddy --build_metadata=REPO_URL=https://github.com/rabbitmq/rabbitmq-server.git -build:buildbuddy --experimental_remote_cache_compression -build:buildbuddy --experimental_remote_cache_async -build:buildbuddy --noslim_profile -build:buildbuddy --experimental_profile_include_target_label -build:buildbuddy --experimental_profile_include_primary_output -build:buildbuddy --experimental_remote_build_event_upload=minimal -build:buildbuddy --nolegacy_important_outputs - -# buildbuddy implies remote cache, so ct_logdir is restored to its default for reproducibility -build:buildbuddy --@rules_erlang//:ct_logdir= - -build:rbe --config=buildbuddy - -build:rbe --remote_executor=grpcs://remote.buildbuddy.io - -build:rbe --spawn_strategy=remote,local -build:rbe --test_strategy="" -build:rbe --jobs=50 - -build:rbe --crosstool_top=@rbe//cc:toolchain -build:rbe --extra_toolchains=@rbe//config:cc-toolchain - -build:rbe --host_platform=//bazel/platforms:erlang_internal_platform - -build:rbe --host_cpu=k8 -build:rbe --cpu=k8 - -build:rbe-24 --config=rbe -build:rbe-24 --platforms=//bazel/platforms:erlang_linux_24_platform - -build:rbe-25 --config=rbe -build:rbe-25 --platforms=//bazel/platforms:erlang_linux_25_3_platform - -build:rbe-25_0 --config=rbe -build:rbe-25_0 --platforms=//bazel/platforms:erlang_linux_25_0_platform - -build:rbe-25_1 --config=rbe -build:rbe-25_1 --platforms=//bazel/platforms:erlang_linux_25_1_platform - -build:rbe-25_2 --config=rbe -build:rbe-25_2 --platforms=//bazel/platforms:erlang_linux_25_2_platform - -build:rbe-25_3 --config=rbe -build:rbe-25_3 --platforms=//bazel/platforms:erlang_linux_25_3_platform - -build:rbe-26 --config=rbe -build:rbe-26 --platforms=//bazel/platforms:erlang_linux_26_platform - -# no-op config so that --config=local does not error -build:local --color=auto - -# having bzlmod enabled seems to interfere with docker toolchain resolution, -# so we set this flag -build --@io_bazel_rules_docker//transitions:enable=false +build --combined_report=lcov # Try importing a user specific .bazelrc # You can create your own by copying and editing the template-user.bazelrc template: diff --git a/.elp.toml b/.elp.toml new file mode 100644 index 000000000000..ffdddf132669 --- /dev/null +++ b/.elp.toml @@ -0,0 +1,3 @@ +[build_info] +apps = "deps/*" +deps = "" \ No newline at end of file diff --git a/.git-blame-ignore-revs b/.git-blame-ignore-revs index fb86a8deb832..c2f0a39259c1 100644 --- a/.git-blame-ignore-revs +++ b/.git-blame-ignore-revs @@ -2,3 +2,5 @@ 209f23fa2f58e0240116b3e8e5be9cd54d34b569 # Format MQTT code with erlfmt 1de9fcf582def91d1cee6bea457dd24e8a53a431 +# Remove unsed imports +cfa3de4b2b1a07e9f4ef90d0aa6b22252238bc47 diff --git a/.github/ISSUE_TEMPLATE.md b/.github/ISSUE_TEMPLATE.md index eb314d2b5a7e..05c1c54749a7 100644 --- a/.github/ISSUE_TEMPLATE.md +++ b/.github/ISSUE_TEMPLATE.md @@ -1,21 +1,24 @@ Thank you for using RabbitMQ. -**STOP NOW AND READ THIS** BEFORE OPENING A NEW ISSUE ON GITHUB +**STOP NOW AND READ THIS** before proceeding, or your issue will be moved to Discussions +and deleted. -Unless you are CERTAIN you have found a reproducible problem in RabbitMQ or -have a **specific, actionable** suggestion for our team, you must first ask -your question or discuss your suspected issue on the mailing list: +Please read https://github.com/rabbitmq/rabbitmq-server/blob/main/COMMUNITY_SUPPORT.md +and https://github.com/rabbitmq/rabbitmq-server/blob/main/CONTRIBUTING.md#github-issues +first. -https://groups.google.com/forum/#!forum/rabbitmq-users +Unless you are **certain** that you are eligible for community support from the core team, please direct your +questions to https://github.com/rabbitmq/rabbitmq-server/discussions and https://rabbitmq.com/discord/. -Team RabbitMQ does not use GitHub issues for discussions, investigations, root -cause analysis and so on. +Issues that lack key relevant information, such as -Please take the time to read the CONTRIBUTING.md document for instructions on -how to effectively ask a question or report a suspected issue: + * RabbitMQ and Erlang versions used + * The operating system used and its version (distribution) + * Client library (and any "higher level" libraries or frameworks used) used and their versions + * **Specific and detailed** steps to set up an environment where the behavior can be reproduced -https://github.com/rabbitmq/rabbitmq-server/blob/main/CONTRIBUTING.md#github-issues +will be moved to Discussions and then deleted. -Following these rules **will save time** for both you and RabbitMQ's maintainers. - -Thank you. +RabbitMQ Core Team does use GitHub issues for questions and discussions. Root cause analysis +and technical operations guidance will ONLY will provided to those eligible per +https://github.com/rabbitmq/rabbitmq-server/blob/main/COMMUNITY_SUPPORT.md. diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md index c8fd6629e611..67caa216bf2b 100644 --- a/.github/PULL_REQUEST_TEMPLATE.md +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -5,9 +5,6 @@ If it fixes a bug or resolves a feature request, be sure to link to that issue. A pull request that doesn't explain **why** the change was made has a much lower chance of being accepted. -If English isn't your first language, don't worry about it and try to communicate the problem you are trying to solve to the best of your abilities. -As long as we can understand the intent, it's all good. - ## Types of Changes What types of changes does your code introduce to this project? @@ -37,4 +34,5 @@ This is simply a reminder of what we are going to look for before merging your c ## Further Comments -If this is a relatively large or complex change, kick off the discussion by explaining why you chose the solution you did and what alternatives you considered, etc. +If this is a relatively large or complex change, kick off the discussion by explaining why you chose the solution +you did and what alternatives you considered, etc. diff --git a/.github/dependabot.yaml b/.github/dependabot.yaml index c3389c007716..168f3a19ce32 100644 --- a/.github/dependabot.yaml +++ b/.github/dependabot.yaml @@ -10,22 +10,12 @@ updates: directory: "/" schedule: interval: "daily" - target-branch: "v3.11.x" + target-branch: "v4.0.x" - package-ecosystem: "github-actions" directory: "/" schedule: interval: "daily" - target-branch: "v3.10.x" - - package-ecosystem: "github-actions" - directory: "/" - schedule: - interval: "daily" - target-branch: "v3.9.x" - - package-ecosystem: "github-actions" - directory: "/" - schedule: - interval: "daily" - target-branch: "v3.8.x" + target-branch: "v3.13.x" # Maintain dependencies for Java test projects - package-ecosystem: "maven" directory: "/deps/rabbitmq_mqtt/test/java_SUITE_data" diff --git a/.github/mergify.yml b/.github/mergify.yml index edeb794b8975..3d1aedb22fa8 100644 --- a/.github/mergify.yml +++ b/.github/mergify.yml @@ -13,139 +13,49 @@ pull_request_rules: label: add: - make - - name: Automatically backport to v3.12.x based on label + - name: Automatically backport to v4.0.x based on label conditions: - base=main - - label=backport-v3.12.x - - label!=backport-v3.11.x - - label!=backport-v3.10.x - - label!=backport-v3.9.x + - label=backport-v4.0.x + - label!=backport-v3.13.x + - label!=backport-v3.12.x actions: backport: branches: - - v3.12.x + - v4.0.x assignees: - "{{ author }}" - - name: Automatically backport to v3.12.x & v3.11.x based on label + - name: Automatically backport to v4.0.x & v3.13.x based on label conditions: - base=main - - label=backport-v3.12.x - - label=backport-v3.11.x - - label!=backport-v3.10.x - - label!=backport-v3.9.x + - label=backport-v4.0.x + - label=backport-v3.13.x actions: backport: branches: - - v3.12.x + - v4.0.x labels: - - backport-v3.11.x + - backport-v3.13.x assignees: - "{{ author }}" - - name: Automatically backport to v3.12.x, v3.11.x & v3.10.x based on labels + - name: Automatically backport to v3.13.x based on label conditions: - - base=main - - label=backport-v3.12.x - - label=backport-v3.11.x - - label=backport-v3.10.x - - label!=backport-v3.9.x + - base=v4.0.x + - label=backport-v3.13.x + - label!=backport-v3.12.x actions: backport: branches: - - v3.12.x - labels: - - backport-v3.11.x - - backport-v3.10.x + - v3.13.x assignees: - "{{ author }}" - - name: Automatically backport to v3.12.x, v3.11.x, v3.10.x & v3.9.x based on labels + - name: Automatically backport to v3.12.x based on label conditions: - - base=main + - base=v3.13.x - label=backport-v3.12.x - - label=backport-v3.11.x - - label=backport-v3.10.x - - label=backport-v3.9.x actions: backport: branches: - v3.12.x - labels: - - backport-v3.11.x - - backport-v3.10.x - - backport-v3.9.x - assignees: - - "{{ author }}" - - name: Automatically backport to v3.11.x based on label - conditions: - - base=v3.12.x - - label=backport-v3.11.x - - label!=backport-v3.10.x - - label!=backport-v3.9.x - actions: - backport: - branches: - - v3.11.x - assignees: - - "{{ author }}" - - name: Automatically backport to v3.11.x & v3.10.x based on labels - conditions: - - base=v3.12.x - - label=backport-v3.11.x - - label=backport-v3.10.x - - label!=backport-v3.9.x - actions: - backport: - branches: - - v3.11.x - labels: - - backport-v3.10.x - assignees: - - "{{ author }}" - - name: Automatically backport to v3.11.x, v3.10.x & v3.9.x based on labels - conditions: - - base=v3.12.x - - label=backport-v3.11.x - - label=backport-v3.10.x - - label=backport-v3.9.x - actions: - backport: - branches: - - v3.11.x - labels: - - backport-v3.10.x - - backport-v3.9.x - assignees: - - "{{ author }}" - - name: Automatically backport to v3.10.x based on label - conditions: - - base=v3.11.x - - label=backport-v3.10.x - - label!=backport-v3.9.x - actions: - backport: - branches: - - v3.10.x - assignees: - - "{{ author }}" - - name: Automatically backport to v3.10.x & v3.9.x based on labels - conditions: - - base=v3.11.x - - label=backport-v3.10.x - - label=backport-v3.9.x - actions: - backport: - branches: - - v3.10.x - labels: - - backport-v3.9.x - assignees: - - "{{ author }}" - - name: Automatically backport to v3.9.x based on label - conditions: - - base=v3.10.x - - label=backport-v3.9.x - actions: - backport: - branches: - - v3.9.x assignees: - "{{ author }}" diff --git a/.github/workflows/check-build-system-equivalence-release-branches.yaml b/.github/workflows/check-build-system-equivalence-release-branches.yaml index bc29ce9f1d73..5b3d5f15a197 100644 --- a/.github/workflows/check-build-system-equivalence-release-branches.yaml +++ b/.github/workflows/check-build-system-equivalence-release-branches.yaml @@ -1,28 +1,37 @@ -name: Check Bazel/Erlang.mk Equivalence (Matrix) +name: Check Bazel/Erlang.mk Equivalence on Release Branches on: schedule: - cron: '0 2 * * *' + workflow_dispatch: jobs: check-main: uses: ./.github/workflows/check-build-system-equivalence.yaml with: - ref: main - erlang_version: 26.0 + ref: refs/heads/main + erlang_version: 26.2 elixir_version: 1.15 - project_version: 3.13.0 + project_version: 4.0.0 - check-v3_12_x: + check-v4_0_x: uses: ./.github/workflows/check-build-system-equivalence.yaml with: - ref: v3.12.x - erlang_version: 26.0 + ref: refs/heads/main + erlang_version: 26.2 elixir_version: 1.15 - project_version: 3.12.0 + project_version: 4.0.0 + + check-v3_13_x: + uses: ./.github/workflows/check-build-system-equivalence.yaml + with: + ref: refs/heads/v3.13.x + erlang_version: 26.2 + elixir_version: 1.15 + project_version: 3.13.0 - check-v3_11_x: + check-v3_12_x: uses: ./.github/workflows/check-build-system-equivalence.yaml with: - ref: v3.11.x - erlang_version: 26.0 + ref: refs/heads/v3.12.x + erlang_version: 26.1 elixir_version: 1.15 - project_version: 3.11.0 + project_version: 3.12.0 diff --git a/.github/workflows/check-build-system-equivalence.yaml b/.github/workflows/check-build-system-equivalence.yaml index ef3d883cf247..bcc4c16ac800 100644 --- a/.github/workflows/check-build-system-equivalence.yaml +++ b/.github/workflows/check-build-system-equivalence.yaml @@ -19,7 +19,7 @@ on: erlang_version: description: 'OTP version to build with' required: true - default: "26.0" + default: "26.2" elixir_version: description: 'Elixir version to build with' required: true @@ -27,7 +27,7 @@ on: project_version: description: 'PROJECT_VERSION used for make' required: true - default: "3.13.0" + default: "4.0.0" env: erlang_version: ${{ inputs.erlang_version || github.event.inputs.erlang_version }} elixir_version: ${{ inputs.elixir_version || github.event.inputs.elixir_version }} @@ -43,9 +43,9 @@ jobs: - name: CHECKOUT REPOSITORY uses: actions/checkout@v4 with: - ref: ${{ github.event.inputs.ref || github.ref }} + ref: ${{ inputs.ref || github.ref }} - name: CONFIGURE ERLANG - uses: erlef/setup-beam@v1.16 + uses: erlef/setup-beam@v1.17 with: otp-version: ${{ env.erlang_version }} elixir-version: ${{ env.elixir_version }} @@ -62,9 +62,9 @@ jobs: run: | echo "archives_dir=$(readlink -f bazel-bin)" >> $GITHUB_ENV - name: UPLOAD package-generic-unix.tar.xz - uses: actions/upload-artifact@v3.1.3 + uses: actions/upload-artifact@v4.3.2 with: - name: bazel-package-generic-unix.tar.xz + name: bazel-package-generic-unix-${{ env.VERSION }}.tar.xz path: ${{ env.archives_dir }}/package-generic-unix.tar.xz if-no-files-found: error @@ -77,9 +77,9 @@ jobs: uses: actions/checkout@v4 with: path: rabbitmq - ref: ${{ github.event.inputs.ref || github.ref }} + ref: ${{ inputs.ref || github.ref }} - name: CONFIGURE ERLANG - uses: erlef/setup-beam@v1.16 + uses: erlef/setup-beam@v1.17 with: otp-version: ${{ env.erlang_version }} elixir-version: ${{ env.elixir_version }} @@ -97,9 +97,9 @@ jobs: PACKAGES_DIR="$PWD/PACKAGES" \ VERSION="$VERSION" - name: UPLOAD package-generic-unix.tar.xz - uses: actions/upload-artifact@v3.1.3 + uses: actions/upload-artifact@v4.3.2 with: - name: make-package-generic-unix.tar.xz + name: make-package-generic-unix-${{ env.VERSION }}.tar.xz path: PACKAGES/rabbitmq-server-generic-unix-*.tar.xz if-no-files-found: error @@ -115,20 +115,20 @@ jobs: uses: actions/checkout@v4 with: path: rabbitmq-server - ref: ${{ github.event.inputs.ref || github.ref }} + ref: ${{ inputs.ref || github.ref }} - name: CONFIGURE ERLANG - uses: erlef/setup-beam@v1.16 + uses: erlef/setup-beam@v1.17 with: otp-version: ${{ env.erlang_version }} elixir-version: ${{ env.elixir_version }} - name: DOWNLOAD bazel-package-generic-unix.tar.xz - uses: actions/download-artifact@v3 + uses: actions/download-artifact@v4 with: - name: bazel-package-generic-unix.tar.xz + name: bazel-package-generic-unix-${{ env.VERSION }}.tar.xz - name: DOWNLOAD make-package-generic-unix.tar.xz - uses: actions/download-artifact@v3 + uses: actions/download-artifact@v4 with: - name: make-package-generic-unix.tar.xz + name: make-package-generic-unix-${{ env.VERSION }}.tar.xz - name: EXPAND & COMPARE run: | mkdir bazel @@ -145,8 +145,8 @@ jobs: find . | sort > ${{ github.workspace }}/make.manifest popd - tree -L 2 bazel - tree -L 2 make + tree -L 3 bazel + tree -L 3 make sleep 1 diff --git a/.github/workflows/gazelle-scheduled.yaml b/.github/workflows/gazelle-scheduled.yaml index ae1a83ec86b6..122a120eadf1 100644 --- a/.github/workflows/gazelle-scheduled.yaml +++ b/.github/workflows/gazelle-scheduled.yaml @@ -5,27 +5,32 @@ on: jobs: bazel-run-gazelle: name: bazel run gazelle - runs-on: ubuntu-20.04 + runs-on: ubuntu-latest strategy: max-parallel: 1 fail-fast: false matrix: target_branch: - main + - v4.0.x + - v3.13.x - v3.12.x - - v3.11.x - - v3.10.x timeout-minutes: 10 steps: - name: CHECKOUT REPOSITORY uses: actions/checkout@v4 with: ref: ${{ matrix.target_branch }} + - name: Configure Erlang + uses: erlef/setup-beam@v1 + with: + otp-version: 26.2 + elixir-version: 1.15 - name: BAZEL RUN GAZELLE run: | bazel run gazelle - name: CREATE PULL REQUEST - uses: peter-evans/create-pull-request@v5.0.2 + uses: peter-evans/create-pull-request@v6.1.0 with: token: ${{ secrets.REPO_SCOPED_TOKEN }} committer: GitHub diff --git a/.github/workflows/gazelle.yaml b/.github/workflows/gazelle.yaml index d58394fb12e2..5bb10f3ee206 100644 --- a/.github/workflows/gazelle.yaml +++ b/.github/workflows/gazelle.yaml @@ -9,18 +9,23 @@ on: jobs: bazel-run-gazelle: name: bazel run gazelle - runs-on: ubuntu-20.04 + runs-on: ubuntu-latest timeout-minutes: 10 steps: - name: CHECKOUT REPOSITORY uses: actions/checkout@v4 with: ref: ${{ github.event.inputs.target_branch }} + - name: Configure Erlang + uses: erlef/setup-beam@v1 + with: + otp-version: 26.2 + elixir-version: 1.15 - name: BAZEL RUN GAZELLE run: | bazel run gazelle - name: CREATE PULL REQUEST - uses: peter-evans/create-pull-request@v5.0.2 + uses: peter-evans/create-pull-request@v6.1.0 with: token: ${{ secrets.REPO_SCOPED_TOKEN }} committer: GitHub diff --git a/.github/workflows/ibm-mq-make.yaml b/.github/workflows/ibm-mq-make.yaml new file mode 100644 index 000000000000..2663a61f3232 --- /dev/null +++ b/.github/workflows/ibm-mq-make.yaml @@ -0,0 +1,67 @@ +name: IBM MQ docker image (make) + +on: + push: + branches: + - 'main' + paths: + - '.github/workflows/ibm-mq-make.yaml' + pull_request: + paths: + - '.github/workflows/ibm-mq-make.yaml' + +env: + REGISTRY_IMAGE: pivotalrabbitmq/ibm-mqadvanced-server-dev + IBM_MQ_REPOSITORY: ibm-messaging/mq-container + IBM_MQ_BRANCH_NAME: 9.3.5 + IMAGE_TAG: 9.3.5.1-amd64 +jobs: + docker: + runs-on: ubuntu-latest + steps: + + - name: Docker meta + id: meta + uses: docker/metadata-action@v5 + with: + images: ${{ env.REGISTRY_IMAGE }} + + - + name: Set up QEMU + uses: docker/setup-qemu-action@v3 + - + name: Set up Docker Buildx + uses: docker/setup-buildx-action@v3 + + - name: Checkout ibm-mqadvanced-server-dev + uses: actions/checkout@v4 + with: + repository: ${{ env.IBM_MQ_REPOSITORY }} + ref: ${{ env.IBM_MQ_BRANCH_NAME }} + + - name: Prepare image + run: | + ls + echo "Enabling AMQP capability" + sed -i -e 's/genmqpkg_incamqp=0/genmqpkg_incamqp=1/g' Dockerfile-server + echo "AMQP Bootstrap instructions" + cat << EOF >> incubating/mqadvanced-server-dev/10-dev.mqsc.tpl + SET AUTHREC PRINCIPAL('app') OBJTYPE(QMGR) AUTHADD(CONNECT,INQ,ALTUSR) + SET CHLAUTH('SYSTEM.DEF.AMQP') TYPE(ADDRESSMAP) ADDRESS('*') USERSRC(CHANNEL) CHCKCLNT({{ .ChckClnt }}) DESCR('Allows connection via APP channel') ACTION(REPLACE) + SET AUTHREC PROFILE('SYSTEM.BASE.TOPIC') PRINCIPAL('app') OBJTYPE(TOPIC) AUTHADD(PUB,SUB) + SET AUTHREC PROFILE('SYSTEM.DEFAULT.MODEL.QUEUE') PRINCIPAL('app') OBJTYPE(QUEUE) AUTHADD(PUT,DSP) + ALTER CHANNEL(SYSTEM.DEF.AMQP) CHLTYPE(AMQP) MCAUSER('app') + START SERVICE(SYSTEM.AMQP.SERVICE) + START CHANNEL(SYSTEM.DEF.AMQP) + EOF + make build-devserver + docker tag ibm-mqadvanced-server-dev:${{ env.IMAGE_TAG }} ${{ env.REGISTRY_IMAGE }}:${{ env.IMAGE_TAG }} + - + name: Login to Docker Hub + uses: docker/login-action@v3 + with: + username: ${{ secrets.DOCKERHUB_USERNAME }} + password: ${{ secrets.DOCKERHUB_PASSWORD }} + - name: Push + run: | + docker push ${{ env.REGISTRY_IMAGE }}:${{ env.IMAGE_TAG }} \ No newline at end of file diff --git a/.github/workflows/oci-arm64-make.yaml b/.github/workflows/oci-arm64-make.yaml new file mode 100644 index 000000000000..8af0a78ed110 --- /dev/null +++ b/.github/workflows/oci-arm64-make.yaml @@ -0,0 +1,177 @@ +# This file should be identical to oci-make, except it should built the ARM64 +# image and only for the main branch. It's a separate workflow due to the performance +# of building the ARM64 image. This way we only build it on main, where it should +# take advantage of the cache. +# +# https://github.com/marketplace/actions/build-and-push-docker-images +name: OCI ARM64 (make) +on: + push: + branches: + - main + paths-ignore: + - '.github/workflows/secondary-umbrella.yaml' + - '.github/workflows/update-elixir-patches.yaml' + - '.github/workflows/update-otp-patches.yaml' + workflow_dispatch: +env: + REGISTRY_IMAGE: pivotalrabbitmq/rabbitmq-arm64 +concurrency: + group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }} + cancel-in-progress: true +jobs: + build-package-generic-unix: + runs-on: ubuntu-latest + outputs: + authorized: ${{ steps.authorized.outputs.authorized }} + steps: + - name: CHECK IF IMAGE WILL PUSH + id: authorized + run: | + if [ -n "${{ secrets.DOCKERHUB_PASSWORD }}" ]; then + echo "authorized=true" | tee -a $GITHUB_OUTPUT + else + echo "authorized=false" | tee -a $GITHUB_OUTPUT + fi + - name: Checkout + if: steps.authorized.outputs.authorized == 'true' + uses: actions/checkout@v4 + - name: Configure Erlang + if: steps.authorized.outputs.authorized == 'true' + uses: erlef/setup-beam@v1 + with: + otp-version: 26.2 + elixir-version: 1.15 + - name: make package-generic-unix + if: steps.authorized.outputs.authorized == 'true' + run: | + make package-generic-unix PROJECT_VERSION=4.0.0 + - name: Upload package-generic-unix + if: steps.authorized.outputs.authorized == 'true' + uses: actions/upload-artifact@v4.3.1 + with: + name: package-generic-unix + path: PACKAGES/rabbitmq-server-*.tar.xz + + build: + needs: build-package-generic-unix + runs-on: ubuntu-latest + if: ${{ needs.build-package-generic-unix.outputs.authorized }} == 'true' + strategy: + fail-fast: false + matrix: + platform: + - linux/arm64 + steps: + - name: Prepare + run: | + platform=${{ matrix.platform }} + echo "PLATFORM_PAIR=${platform//\//-}" >> $GITHUB_ENV + - name: Checkout + uses: actions/checkout@v4 + - name: Download package-generic-unix + uses: actions/download-artifact@v4 + with: + name: package-generic-unix + path: PACKAGES + - name: Rename package-generic-unix + run: | + cp \ + PACKAGES/rabbitmq-server-generic-unix-*.tar.xz \ + packaging/docker-image/package-generic-unix.tar.xz + - name: Docker meta + id: meta + uses: docker/metadata-action@v5 + with: + images: ${{ env.REGISTRY_IMAGE }} + tags: | + type=ref,event=branch + type=ref,event=pr + type=sha,format=long + - name: Set up QEMU + uses: docker/setup-qemu-action@v3 + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v3 + - name: Login to GitHub Container Registry + uses: docker/login-action@v3 + with: + registry: ghcr.io + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} + - name: Login to Docker Hub + uses: docker/login-action@v3 + with: + username: ${{ secrets.DOCKERHUB_USERNAME }} + password: ${{ secrets.DOCKERHUB_PASSWORD }} + - name: Build and push by digest + id: build + uses: docker/build-push-action@v6 + with: + context: packaging/docker-image + platforms: ${{ matrix.platform }} + labels: ${{ steps.meta.outputs.labels }} + cache-to: type=gha + cache-from: type=gha + outputs: type=image,name=${{ env.REGISTRY_IMAGE }},push-by-digest=true,name-canonical=true,push=true + - name: Export digest + run: | + mkdir -p /tmp/digests + digest="${{ steps.build.outputs.digest }}" + touch "/tmp/digests/${digest#sha256:}" + - name: Upload digest + uses: actions/upload-artifact@v4 + with: + name: digests-${{ env.PLATFORM_PAIR }} + path: /tmp/digests/* + if-no-files-found: error + retention-days: 1 + + merge: + needs: + - build + runs-on: ubuntu-latest + if: ${{ needs.build-package-generic-unix.outputs.authorized }} == 'true' + steps: + - name: Download digests + uses: actions/download-artifact@v4 + with: + path: /tmp/digests + pattern: digests-* + merge-multiple: true + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v3 + - name: Docker meta + id: meta + uses: docker/metadata-action@v5 + with: + images: ${{ env.REGISTRY_IMAGE }} + tags: | + type=ref,event=branch + type=ref,event=pr + type=sha,format=long + - name: Login to Docker Hub + uses: docker/login-action@v3 + with: + username: ${{ secrets.DOCKERHUB_USERNAME }} + password: ${{ secrets.DOCKERHUB_PASSWORD }} + - name: Create manifest list and push + working-directory: /tmp/digests + run: | + docker buildx imagetools create $(jq -cr '.tags | map("-t " + .) | join(" ")' <<< "$DOCKER_METADATA_OUTPUT_JSON") \ + $(printf '${{ env.REGISTRY_IMAGE }}@sha256:%s ' *) + - name: Inspect image + run: | + docker buildx imagetools inspect ${{ env.REGISTRY_IMAGE }}:${{ steps.meta.outputs.version }} + + summary-oci: + needs: + - build-package-generic-unix + - build + - merge + runs-on: ubuntu-latest + steps: + - name: SUMMARY + run: | + cat << 'EOF' | jq -e 'map(.result == "success") | all(.)' + ${{ toJson(needs) }} + EOF diff --git a/.github/workflows/oci-base.yaml b/.github/workflows/oci-base.yaml deleted file mode 100644 index 6b26e99d1b3c..000000000000 --- a/.github/workflows/oci-base.yaml +++ /dev/null @@ -1,69 +0,0 @@ -# https://github.com/marketplace/actions/build-and-push-docker-images -name: OCI Base Image -on: - schedule: - - cron: '0 3 * * *' - workflow_dispatch: -jobs: - build-publish: - runs-on: ubuntu-20.04 - steps: - - name: Checkout - uses: actions/checkout@v4 - - - name: Set up QEMU - id: qemu - uses: docker/setup-qemu-action@v3 - with: - image: tonistiigi/binfmt:latest - platforms: linux/amd64,linux/arm64 - - - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v3 - - - name: Available platforms - run: echo ${{ steps.buildx.outputs.platforms }} - - - name: Cache Docker layers - uses: actions/cache@v3.3.2 - with: - path: /tmp/.buildx-cache - key: ${{ runner.os }}-${{ matrix.image_tag_suffix }}-buildx-${{ github.event.pull_request.head.sha || github.sha }} - restore-keys: | - ${{ runner.os }}-${{ matrix.image_tag_suffix }}-buildx- - - - name: Check for Push Credentials - id: authorized - run: | - if [ -n "${{ secrets.DOCKERHUB_USERNAME }}" ]; then - echo "PUSH=true" >> $GITHUB_OUTPUT - else - echo "PUSH=false" >> $GITHUB_OUTPUT - fi - - - name: Login to DockerHub - if: steps.authorized.outputs.PUSH == 'true' - uses: docker/login-action@v3 - with: - username: ${{ secrets.DOCKERHUB_USERNAME }} - password: ${{ secrets.DOCKERHUB_PASSWORD }} - - - name: Build and push - uses: docker/build-push-action@v5 - with: - context: packaging/base-image - platforms: linux/amd64,linux/arm64 - pull: true - push: ${{ steps.authorized.outputs.PUSH }} - tags: | - pivotalrabbitmq/ubuntu:20.04 - cache-from: type=local,src=/tmp/.buildx-cache - cache-to: type=local,dest=/tmp/.buildx-cache-new - - # Temp fix - # https://github.com/docker/build-push-action/issues/252 - # https://github.com/moby/buildkit/issues/1896 - - name: Move cache - run: | - rm -rf /tmp/.buildx-cache - mv /tmp/.buildx-cache-new /tmp/.buildx-cache diff --git a/.github/workflows/oci-make.yaml b/.github/workflows/oci-make.yaml new file mode 100644 index 000000000000..3d631d7f7aec --- /dev/null +++ b/.github/workflows/oci-make.yaml @@ -0,0 +1,174 @@ +# https://github.com/marketplace/actions/build-and-push-docker-images +name: OCI (make) +on: + push: + paths-ignore: + - '.github/workflows/secondary-umbrella.yaml' + - '.github/workflows/update-elixir-patches.yaml' + - '.github/workflows/update-otp-patches.yaml' + workflow_dispatch: +env: + REGISTRY_IMAGE: pivotalrabbitmq/rabbitmq +concurrency: + group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }} + cancel-in-progress: true +jobs: + build-package-generic-unix: + runs-on: ubuntu-latest + outputs: + authorized: ${{ steps.authorized.outputs.authorized }} + steps: + - name: CHECK IF IMAGE WILL PUSH + id: authorized + run: | + if [ -n "${{ secrets.DOCKERHUB_PASSWORD }}" ]; then + echo "authorized=true" | tee -a $GITHUB_OUTPUT + else + echo "authorized=false" | tee -a $GITHUB_OUTPUT + fi + - name: Checkout + if: steps.authorized.outputs.authorized == 'true' + uses: actions/checkout@v4 + - name: Configure Erlang + if: steps.authorized.outputs.authorized == 'true' + uses: erlef/setup-beam@v1 + with: + otp-version: 26.2 + elixir-version: 1.15 + - name: make package-generic-unix + if: steps.authorized.outputs.authorized == 'true' + run: | + make package-generic-unix PROJECT_VERSION=4.0.0 + - name: Upload package-generic-unix + if: steps.authorized.outputs.authorized == 'true' + uses: actions/upload-artifact@v4.3.1 + with: + name: package-generic-unix + path: PACKAGES/rabbitmq-server-*.tar.xz + + build: + needs: build-package-generic-unix + runs-on: ubuntu-latest + if: ${{ needs.build-package-generic-unix.outputs.authorized }} == 'true' + strategy: + fail-fast: false + matrix: + platform: + - linux/amd64 + # Unfortunately even with type=gha cache, OpenSSL and OTP + # are rebuilt often and it takes ~90 minutes to do that + # in the emulated ARM mode. Disabling until we have a better solution. + #- linux/arm64 + steps: + - name: Prepare + run: | + platform=${{ matrix.platform }} + echo "PLATFORM_PAIR=${platform//\//-}" >> $GITHUB_ENV + - name: Checkout + uses: actions/checkout@v4 + - name: Download package-generic-unix + uses: actions/download-artifact@v4 + with: + name: package-generic-unix + path: PACKAGES + - name: Rename package-generic-unix + run: | + cp \ + PACKAGES/rabbitmq-server-generic-unix-*.tar.xz \ + packaging/docker-image/package-generic-unix.tar.xz + - name: Docker meta + id: meta + uses: docker/metadata-action@v5 + with: + images: ${{ env.REGISTRY_IMAGE }} + tags: | + type=ref,event=branch + type=ref,event=pr + type=sha,format=long + - name: Set up QEMU + uses: docker/setup-qemu-action@v3 + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v3 + - name: Login to GitHub Container Registry + uses: docker/login-action@v3 + with: + registry: ghcr.io + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} + - name: Login to Docker Hub + uses: docker/login-action@v3 + with: + username: ${{ secrets.DOCKERHUB_USERNAME }} + password: ${{ secrets.DOCKERHUB_PASSWORD }} + - name: Build and push by digest + id: build + uses: docker/build-push-action@v6 + with: + context: packaging/docker-image + platforms: ${{ matrix.platform }} + labels: ${{ steps.meta.outputs.labels }} + cache-to: type=gha + cache-from: type=gha + outputs: type=image,name=${{ env.REGISTRY_IMAGE }},push-by-digest=true,name-canonical=true,push=true + - name: Export digest + run: | + mkdir -p /tmp/digests + digest="${{ steps.build.outputs.digest }}" + touch "/tmp/digests/${digest#sha256:}" + - name: Upload digest + uses: actions/upload-artifact@v4 + with: + name: digests-${{ env.PLATFORM_PAIR }} + path: /tmp/digests/* + if-no-files-found: error + retention-days: 1 + + merge: + needs: + - build + runs-on: ubuntu-latest + if: ${{ needs.build-package-generic-unix.outputs.authorized }} == 'true' + steps: + - name: Download digests + uses: actions/download-artifact@v4 + with: + path: /tmp/digests + pattern: digests-* + merge-multiple: true + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v3 + - name: Docker meta + id: meta + uses: docker/metadata-action@v5 + with: + images: ${{ env.REGISTRY_IMAGE }} + tags: | + type=ref,event=branch + type=ref,event=pr + type=sha,format=long + - name: Login to Docker Hub + uses: docker/login-action@v3 + with: + username: ${{ secrets.DOCKERHUB_USERNAME }} + password: ${{ secrets.DOCKERHUB_PASSWORD }} + - name: Create manifest list and push + working-directory: /tmp/digests + run: | + docker buildx imagetools create $(jq -cr '.tags | map("-t " + .) | join(" ")' <<< "$DOCKER_METADATA_OUTPUT_JSON") \ + $(printf '${{ env.REGISTRY_IMAGE }}@sha256:%s ' *) + - name: Inspect image + run: | + docker buildx imagetools inspect ${{ env.REGISTRY_IMAGE }}:${{ steps.meta.outputs.version }} + + summary-oci: + needs: + - build-package-generic-unix + - build + - merge + runs-on: ubuntu-latest + steps: + - name: SUMMARY + run: | + cat << 'EOF' | jq -e 'map(.result == "success") | all(.)' + ${{ toJson(needs) }} + EOF diff --git a/.github/workflows/oci.yaml b/.github/workflows/oci.yaml deleted file mode 100644 index 10430fabdfb4..000000000000 --- a/.github/workflows/oci.yaml +++ /dev/null @@ -1,144 +0,0 @@ -# https://github.com/marketplace/actions/build-and-push-docker-images -name: OCI -on: - push: - paths-ignore: - - '.github/workflows/secondary-umbrella.yaml' - - '.github/workflows/update-elixir-patches.yaml' - - '.github/workflows/update-otp-patches.yaml' - workflow_dispatch: -env: - GENERIC_UNIX_ARCHIVE: ${{ github.workspace }}/bazel-bin/package-generic-unix.tar.xz - RABBITMQ_VERSION: ${{ github.event.pull_request.head.sha || github.sha }} - VERSION: ${{ github.event.pull_request.head.sha || github.sha }} -concurrency: - group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }} - cancel-in-progress: true -jobs: - - # This job will build one docker image per supported Erlang major version. - # Each image will have two tags (one containing the Git commit SHA, one containing the branch name). - # - # For example, for Git commit SHA '111aaa' and branch name 'main' and maximum supported Erlang major version '26', - # the following tags will be pushed to Dockerhub: - # - # * 111aaa-otp-min (image OTP 25) - # * main-otp-min (image OTP 25) - # * 111aaa-otp-max (image OTP 26) - # * main-otp-max (image OTP 26) - - build-publish-dev-bazel: - runs-on: ubuntu-20.04 - strategy: - fail-fast: false - matrix: - include: - - image_tag_suffix: otp-min-bazel - otp_version_id: 25_3 - - image_tag_suffix: otp-max-bazel - otp_version_id: 26 - steps: - - name: Free Disk Space (Ubuntu) - uses: jlumbroso/free-disk-space@main - with: - # this might remove tools that are actually needed, - # if set to "true" but frees about 6 GB - tool-cache: false - # all of these default to true, but feel free to set to - # "false" if necessary for your workflow - android: true - dotnet: true - haskell: true - large-packages: true - docker-images: false - swap-storage: true - - - name: Checkout - uses: actions/checkout@v4 - - - name: Mount Bazel Cache - uses: actions/cache@v3.3.2 - with: - path: "/home/runner/repo-cache/" - key: ${{ runner.os }}-repo-cache-${{ hashFiles('MODULE.bazel','WORKSPACE','bazel/bzlmod/secondary_umbrella.bzl') }} - restore-keys: | - ${{ runner.os }}-repo-cache- - - - name: Configure Bazel - run: | - if [ -n "${{ secrets.BUILDBUDDY_API_KEY }}" ]; then - cat << EOF >> user.bazelrc - build:buildbuddy --remote_header=x-buildbuddy-api-key=${{ secrets.BUILDBUDDY_API_KEY }} - EOF - fi - cat << EOF >> user.bazelrc - build:buildbuddy --build_metadata=ROLE=CI - build:buildbuddy --build_metadata=VISIBILITY=PRIVATE - build:buildbuddy --repository_cache=/home/runner/repo-cache/ - build:buildbuddy --color=yes - build:buildbuddy --disk_cache= - - build:buildbuddy --remote_download_toplevel - EOF - - - name: Configure otp for the OCI image - run: | - sudo npm install --global --silent @bazel/buildozer - - buildozer 'set tars ["@otp_src_${{ matrix.otp_version_id }}//file"]' \ - //packaging/docker-image:otp_source - - - name: Build - run: | - bazelisk build //packaging/docker-image:rabbitmq \ - --config=rbe-${{ matrix.otp_version_id }} - - - name: Test - run: | - OCI_TESTS=$(bazel query 'tests(//packaging/docker-image/...)') - bazelisk test ${OCI_TESTS} \ - --config=rbe-${{ matrix.otp_version_id }} - - - name: Load - run: | - bazelisk run //packaging/docker-image:rabbitmq \ - --config=rbe-${{ matrix.otp_version_id }} - - - name: Check for Push Credentials - id: authorized - run: | - if [ -n "${{ secrets.DOCKERHUB_USERNAME }}" ]; then - echo "PUSH=true" >> $GITHUB_OUTPUT - else - echo "PUSH=false" >> $GITHUB_OUTPUT - fi - - - name: Login to DockerHub - if: steps.authorized.outputs.PUSH == 'true' - uses: docker/login-action@v3 - with: - username: ${{ secrets.DOCKERHUB_USERNAME }} - password: ${{ secrets.DOCKERHUB_PASSWORD }} - - - name: Tag and Push - if: steps.authorized.outputs.PUSH == 'true' - run: | - TAG_1="${{ github.event.pull_request.head.sha || github.sha }}-${{ matrix.image_tag_suffix }}" - TAG_2="${GITHUB_REF##*/}-${{ matrix.image_tag_suffix }}" - - docker tag bazel/packaging/docker-image:rabbitmq \ - pivotalrabbitmq/rabbitmq:${TAG_1} - docker tag bazel/packaging/docker-image:rabbitmq \ - pivotalrabbitmq/rabbitmq:${TAG_2} - - docker push pivotalrabbitmq/rabbitmq:${TAG_1} - docker push pivotalrabbitmq/rabbitmq:${TAG_2} - - summary-oci: - needs: - - build-publish-dev-bazel - runs-on: ubuntu-latest - steps: - - name: SUMMARY - run: | - echo "SUCCESS" diff --git a/.github/workflows/perform-bazel-execution-comparison.yaml b/.github/workflows/perform-bazel-execution-comparison.yaml deleted file mode 100644 index f9c1f3fc35ad..000000000000 --- a/.github/workflows/perform-bazel-execution-comparison.yaml +++ /dev/null @@ -1,111 +0,0 @@ -name: Gather Bazel Execution Logs -on: - workflow_dispatch: - inputs: - target: - description: 'A bazel label representing the test target' - required: true - default: '//deps/rabbit:rabbit_stream_queue_SUITE' -jobs: - run-a: - name: Run A - runs-on: ubuntu-20.04 - strategy: - matrix: - erlang_version: - - "25" - include: - - erlang_version: "25" - cache_name: ci-bazel-cache-analysis - timeout-minutes: 120 - steps: - - name: CHECKOUT REPOSITORY - uses: actions/checkout@v4 - - name: CONFIGURE BAZEL - run: | - cat << EOF >> user.bazelrc - build:buildbuddy --remote_header=x-buildbuddy-api-key=${{ secrets.BUILDBUDDY_API_KEY }} - - build:buildbuddy --build_metadata=ROLE=CI - build:buildbuddy --remote_instance_name=buildbuddy-io/buildbuddy/ci-${{ matrix.cache_name }} - EOF - - name: RUN TESTS - run: | - sudo sysctl -w net.ipv4.tcp_keepalive_time=60 - sudo ethtool -K eth0 tso off gso off gro off tx off rx off lro off - bazelisk test ${{ github.event.inputs.target }} \ - --config=rbe-${{ matrix.erlang_version }} \ - --execution_log_binary_file=/tmp/exec.log - - name: SAVE EXECUTION LOG BINARY - uses: actions/upload-artifact@v3.1.3 - with: - name: execution-log-binary-A - path: /tmp/exec.log - run-b: - name: Run B - needs: run-a - runs-on: ubuntu-20.04 - strategy: - matrix: - erlang_version: - - "25" - include: - - erlang_version: "25" - cache_name: ci-bazel-cache-analysis - timeout-minutes: 120 - steps: - - name: CHECKOUT REPOSITORY - uses: actions/checkout@v4 - - name: CONFIGURE BAZEL - run: | - cat << EOF >> user.bazelrc - build:buildbuddy --remote_header=x-buildbuddy-api-key=${{ secrets.BUILDBUDDY_API_KEY }} - - build:buildbuddy --build_metadata=ROLE=CI - build:buildbuddy --remote_instance_name=buildbuddy-io/buildbuddy/ci-${{ matrix.cache_name }} - EOF - - name: RUN TESTS - run: | - sudo sysctl -w net.ipv4.tcp_keepalive_time=60 - sudo ethtool -K eth0 tso off gso off gro off tx off rx off lro off - bazelisk test ${{ github.event.inputs.target }} \ - --config=rbe-${{ matrix.erlang_version }} \ - --execution_log_binary_file=/tmp/exec.log - - name: SAVE EXECUTION LOG BINARY - uses: actions/upload-artifact@v3.1.3 - with: - name: execution-log-binary-B - path: /tmp/exec.log - parse-logs: - name: Parse Logs - needs: [run-a, run-b] - runs-on: ubuntu-20.04 - steps: - - name: CHECKOUT BAZEL - uses: actions/checkout@v4 - with: - repository: bazelbuild/bazel - path: bazel - - name: MOUNT BAZEL CACHE - uses: actions/cache@v3.3.2 - with: - path: "/home/runner/.cache/bazel" - key: bazel - - name: BUILD EXECLOG PARSER - working-directory: bazel - run: | - bazelisk build src/tools/execlog:parser - - name: FETCH LOGS - uses: actions/download-artifact@v3 - - name: PARSE LOGS - run: | - bazel/bazel-bin/src/tools/execlog/parser \ - --log_path=./execution-log-binary-A/exec.log \ - --log_path=./execution-log-binary-B/exec.log \ - --output_path=/tmp/execution-log-binary-A.log.txt \ - --output_path=/tmp/execution-log-binary-B.log.txt - - name: SAVE PARSED LOGS - uses: actions/upload-artifact@v3.1.3 - with: - name: parsed-logs - path: /tmp/execution-log-binary-*.log.txt diff --git a/.github/workflows/rabbitmq_peer_discovery_aws.yaml b/.github/workflows/rabbitmq_peer_discovery_aws.yaml index 02fbaebc3e6e..396edca21ae7 100644 --- a/.github/workflows/rabbitmq_peer_discovery_aws.yaml +++ b/.github/workflows/rabbitmq_peer_discovery_aws.yaml @@ -1,58 +1,103 @@ name: Peer Discovery AWS Integration Test on: + push: + paths-ignore: + - '.github/workflows/secondary-umbrella.yaml' + - '.github/workflows/update-elixir-patches.yaml' + - '.github/workflows/update-otp-patches.yaml' workflow_dispatch: +concurrency: + group: ${{ github.workflow }}-${{ github.ref_name }} + cancel-in-progress: true jobs: peer-discovery-aws-integration-test: name: Integration Test - runs-on: ubuntu-20.04 - strategy: - matrix: - include: - - image_tag_suffix: otp-max-bazel - otp_version_id: 26 + runs-on: ubuntu-22.04 timeout-minutes: 45 steps: + - name: CHECK IF IMAGE WILL PUSH + id: authorized + run: | + if [ -n "${{ secrets.DOCKERHUB_PASSWORD }}" ]; then + echo "authorized=true" | tee -a $GITHUB_OUTPUT + else + echo "authorized=false" | tee -a $GITHUB_OUTPUT + fi - name: CHECKOUT REPOSITORY + if: steps.authorized.outputs.authorized == 'true' uses: actions/checkout@v4 - - name: WAIT FOR OCI IMAGE WORKFLOW - uses: lewagon/wait-on-check-action@v1.3.1 + - uses: docker/metadata-action@v5 + if: steps.authorized.outputs.authorized == 'true' + id: metadata + with: + images: pivotalrabbitmq/rabbitmq + tags: | + type=sha,format=long + - uses: int128/wait-for-docker-image-action@v1 + if: steps.authorized.outputs.authorized == 'true' + with: + tags: ${{ steps.metadata.outputs.tags }} + timeout-seconds: 3600 + polling-seconds: 60 + - name: COMPUTE REPO CACHE KEY + if: steps.authorized.outputs.authorized == 'true' + id: repo-cache-key + run: | + echo "value=bazel-repo-cache-${{ hashFiles('MODULE.bazel') }}" | tee -a $GITHUB_OUTPUT + - name: LOAD REPO CACHE + if: steps.authorized.outputs.authorized == 'true' + uses: actions/cache/restore@v4 + with: + key: ${{ steps.repo-cache-key.outputs.value }} + path: /home/runner/repo-cache/ + - name: CONFIGURE OTP & ELIXIR + if: steps.authorized.outputs.authorized == 'true' + uses: erlef/setup-beam@v1.17 with: - ref: ${{ github.ref }} - check-name: build-publish-dev-bazel (${{ matrix.image_tag_suffix }}, ${{ matrix.otp_version_id }}) - repo-token: ${{ secrets.GITHUB_TOKEN }} - wait-interval: 30 # seconds - - name: MOUNT BAZEL CACHE - uses: actions/cache@v3.3.2 + otp-version: 26 + elixir-version: 1.15 + - name: SETUP ecs-cli + if: steps.authorized.outputs.authorized == 'true' + env: + ECS_CLI_VERSION: 1.21.0 + run: | + curl -Lo /usr/local/bin/ecs-cli https://amazon-ecs-cli.s3.amazonaws.com/ecs-cli-linux-amd64-v${ECS_CLI_VERSION} && \ + chmod +x /usr/local/bin/ecs-cli && \ + ecs-cli --version + - name: AUTHENTICATE TO GOOGLE CLOUD + if: steps.authorized.outputs.authorized == 'true' + uses: google-github-actions/auth@v2.1.4 with: - path: "/home/runner/repo-cache/" - key: ${{ runner.os }}-repo-cache-${{ hashFiles('MODULE.bazel','WORKSPACE','bazel/bzlmod/secondary_umbrella.bzl') }} - restore-keys: | - ${{ runner.os }}-repo-cache- + credentials_json: ${{ secrets.REMOTE_CACHE_CREDENTIALS_JSON }} - name: CONFIGURE BAZEL + if: steps.authorized.outputs.authorized == 'true' run: | + if [ -n "${{ secrets.REMOTE_CACHE_BUCKET_NAME }}" ]; then cat << EOF >> user.bazelrc - build:buildbuddy --remote_header=x-buildbuddy-api-key=${{ secrets.BUILDBUDDY_API_KEY }} + build --remote_cache=https://storage.googleapis.com/${{ secrets.REMOTE_CACHE_BUCKET_NAME }} + build --google_default_credentials - build:buildbuddy --build_metadata=ROLE=CI - build:buildbuddy --build_metadata=VISIBILITY=PRIVATE - build:buildbuddy --repository_cache=/home/runner/repo-cache/ - build:buildbuddy --color=yes - build:buildbuddy --disk_cache= + build --experimental_guard_against_concurrent_changes + EOF + fi + cat << EOF >> user.bazelrc + build --repository_cache=/home/runner/repo-cache/ + build --color=yes EOF + + bazelisk info release #! - name: Setup tmate session #! uses: mxschmitt/action-tmate@v3 - name: RUN INTEGRATION TESTS + if: steps.authorized.outputs.authorized == 'true' run: | - sudo sysctl -w net.ipv4.tcp_keepalive_time=60 - sudo ethtool -K eth0 tso off gso off gro off tx off rx off lro off - branch_or_tag="${GITHUB_REF##*/}" bazelisk test //deps/rabbitmq_peer_discovery_aws:integration_SUITE \ - --config=rbe-${{ matrix.otp_version_id }} \ --test_tag_filters=aws \ --build_tests_only \ --test_env AWS_ACCESS_KEY_ID=${{ secrets.CONCOURSE_AWS_ACCESS_KEY_ID }} \ --test_env AWS_SECRET_ACCESS_KEY=${{ secrets.CONCOURSE_AWS_SECRET_ACCESS_KEY }} \ - --test_env RABBITMQ_IMAGE="pivotalrabbitmq/rabbitmq:${{ github.sha }}-otp-max-bazel" \ - --test_env AWS_ECS_CLUSTER_NAME="rabbitmq-peer-discovery-aws-actions-${branch_or_tag//./-}" \ + --test_env RABBITMQ_IMAGE="pivotalrabbitmq/rabbitmq:sha-${{ github.sha }}" \ + --test_env AWS_ECS_CLUSTER_NAME="rabbitmq-peer-discovery-aws-actions-${branch_or_tag//[._]/-}" \ + --test_output=streamed \ --verbose_failures diff --git a/.github/workflows/templates/test-mixed-versions.template.yaml b/.github/workflows/templates/test-mixed-versions.template.yaml new file mode 100644 index 000000000000..5191c9fdbf64 --- /dev/null +++ b/.github/workflows/templates/test-mixed-versions.template.yaml @@ -0,0 +1,214 @@ +#@ load("@ytt:data", "data") +#@yaml/text-templated-strings + +#@ def job_names(plugins): +#@ names = [] +#@ for p in plugins: +#@ names.append("test-"+p+"-mixed") +#@ end +#@ return names +#@ end + +#@ def sharded_job_names(plugin, shard_count): +#@ names = [] +#@ for shard_index in range(0, shard_count): +#@ names.append("test-"+plugin+"-"+str(shard_index)+"-mixed") +#@ end +#@ return names +#@ end + +--- +name: Test Mixed Version Clusters +on: + push: + branches: + - main + - v4.0.x + - v3.13.x + - bump-otp-* + - bump-elixir-* + - bump-rbe-* + - bump-rules_erlang + paths: + - 'deps/**' + - 'scripts/**' + - Makefile + - plugins.mk + - rabbitmq-components.mk + - .bazelrc + - .bazelversion + - BUILD.* + - '*.bzl' + - '*.bazel' + - .github/workflows/test-mixed-versions.yaml + pull_request: +concurrency: + group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }} + cancel-in-progress: true +jobs: + ensure-mixed-version-archive: + runs-on: ubuntu-22.04 + steps: + - name: CHECKOUT REPOSITORY + uses: actions/checkout@v4 + with: + path: primary-umbrella + #! - name: Setup tmate session + #! uses: mxschmitt/action-tmate@v3 + - name: CHECK FOR ARCHIVE ON S3 + id: check + working-directory: primary-umbrella + run: | + set -u + + ARCHIVE_URL="$(grep -Eo 'https://rabbitmq-github-actions.s3.eu-west-1.amazonaws.com.*.tar.xz' bazel/bzlmod/secondary_umbrella.bzl)" + echo "ARCHIVE_URL: ${ARCHIVE_URL}" + + curl -LO "${ARCHIVE_URL}" + + if xzcat --test package-generic-unix-for-mixed-version-testing-v*.tar.xz; then + exists=true + else + exists=false + fi + echo "exists=${exists}" | tee $GITHUB_ENV + + OTP_VERSION=${ARCHIVE_URL#*secondary-umbrellas/} + OTP_VERSION=${OTP_VERSION%*/package-generic-unix-for-mixed-version-testing-v*.tar.xz} + echo "otp_version=${OTP_VERSION}" | tee -a $GITHUB_OUTPUT + + VERSION=${ARCHIVE_URL#*package-generic-unix-for-mixed-version-testing-v} + VERSION=${VERSION%*.tar.xz} + echo "version=${VERSION}" | tee -a $GITHUB_OUTPUT + - name: CHECKOUT REPOSITORY (MIXED VERSION) + if: env.exists != 'true' + uses: actions/checkout@v4 + with: + ref: v${{ steps.check.outputs.version }} + path: secondary-umbrella + - name: CONFIGURE OTP & ELIXIR + if: env.exists != 'true' + uses: erlef/setup-beam@v1.17 + with: + otp-version: ${{ steps.check.outputs.otp_version }} + elixir-version: 1.15 + hexpm-mirrors: | + https://builds.hex.pm + https://cdn.jsdelivr.net/hex + - name: AUTHENTICATE TO GOOGLE CLOUD + uses: google-github-actions/auth@v2.1.4 + with: + credentials_json: ${{ secrets.REMOTE_CACHE_CREDENTIALS_JSON }} + - name: BUILD SECONDARY UMBRELLA ARCHIVE + if: env.exists != 'true' + working-directory: secondary-umbrella + run: | + if [ -n "${{ secrets.REMOTE_CACHE_BUCKET_NAME_MIXED }}" ]; then + cat << EOF >> user.bazelrc + build --remote_cache=https://storage.googleapis.com/${{ secrets.REMOTE_CACHE_BUCKET_NAME_MIXED }} + build --google_default_credentials + + build --remote_download_toplevel + EOF + fi + + sed -i"_orig" -E "/APP_VERSION/ s/3\.[0-9]+\.[0-9]+/${{ steps.check.outputs.version }}/" rabbitmq.bzl + bazelisk build :package-generic-unix \ + --test_build \ + --verbose_failures + + OUTPUT_DIR=${{ github.workspace }}/output + mkdir -p ${OUTPUT_DIR}/${{ steps.check.outputs.otp_version }} + cp \ + bazel-bin/package-generic-unix.tar.xz \ + ${OUTPUT_DIR}/${{ steps.check.outputs.otp_version }}/package-generic-unix-for-mixed-version-testing-v${{ steps.check.outputs.version }}.tar.xz + - name: UPLOAD THE ARCHIVE TO S3 + if: env.exists != 'true' + uses: jakejarvis/s3-sync-action@v0.5.1 + with: + args: --acl public-read --follow-symlinks + env: + AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }} + AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY}} + AWS_S3_BUCKET: ${{ secrets.AWS_S3_BUCKET }} + AWS_REGION: ${{ secrets.AWS_REGION }} + SOURCE_DIR: output + DEST_DIR: secondary-umbrellas + + check-workflow: + needs: ensure-mixed-version-archive + runs-on: ubuntu-latest + outputs: + repo_cache_key: ${{ steps.repo-cache-key.outputs.value }} + steps: + - name: CHECKOUT REPOSITORY + uses: actions/checkout@v4 + - name: SETUP ERLANG/ELIXIR + uses: erlef/setup-beam@v1 + with: + otp-version: 26 + elixir-version: 1.15 + hexpm-mirrors: | + https://builds.hex.pm + https://cdn.jsdelivr.net/hex + - name: ENSURE WORKFLOWS ARE UP TO DATE + run: | + mkdir local-bin/ + curl -L https://carvel.dev/install.sh | K14SIO_INSTALL_BIN_DIR=local-bin bash + make actions-workflows YTT=$PWD/local-bin/ytt + git diff --exit-code + - name: COMPUTE REPO CACHE KEY + id: repo-cache-key + run: | + echo "value=bazel-repo-cache-${{ hashFiles('MODULE.bazel') }}" | tee -a $GITHUB_OUTPUT + +#@ for plugin in data.values.internal_deps: + test-(@= plugin @)-mixed: + needs: check-workflow + uses: ./.github/workflows/test-plugin-mixed.yaml + with: + repo_cache_key: ${{ needs.check-workflow.outputs.repo_cache_key }} + plugin: #@ plugin + secrets: inherit +#@ end + +#@ rabbit_shard_count = 10 +#@ for shard_index in range(0, rabbit_shard_count): + test-rabbit-(@= str(shard_index) @)-mixed: + needs: #@ ["check-workflow"] + job_names(data.values.internal_deps) + uses: ./.github/workflows/test-plugin-mixed.yaml + with: + repo_cache_key: ${{ needs.check-workflow.outputs.repo_cache_key }} + plugin: rabbit + shard_index: #@ shard_index + shard_count: #@ rabbit_shard_count + secrets: inherit +#@ end + + test-rabbitmq_cli-mixed: + needs: check-workflow + uses: ./.github/workflows/test-plugin-mixed.yaml + with: + repo_cache_key: ${{ needs.check-workflow.outputs.repo_cache_key }} + plugin: rabbitmq_cli + secrets: inherit + +#@ for plugin in data.values.tier1_plugins: + test-(@= plugin @)-mixed: + needs: #@ ["check-workflow"] + sharded_job_names("rabbit", rabbit_shard_count) + uses: ./.github/workflows/test-plugin-mixed.yaml + with: + repo_cache_key: ${{ needs.check-workflow.outputs.repo_cache_key }} + plugin: #@ plugin + secrets: inherit +#@ end + + summary-test: + needs: #@ job_names(data.values.internal_deps + data.values.tier1_plugins) + sharded_job_names("rabbit", rabbit_shard_count) + ["test-rabbitmq_cli-mixed"] + runs-on: ubuntu-latest + steps: + - name: SUMMARY + run: | + cat << 'EOF' | jq -e 'map(.result == "success") | all(.)' + ${{ toJson(needs) }} + EOF diff --git a/.github/workflows/templates/test.template.yaml b/.github/workflows/templates/test.template.yaml new file mode 100644 index 000000000000..b4ea6d53f979 --- /dev/null +++ b/.github/workflows/templates/test.template.yaml @@ -0,0 +1,151 @@ +#@ load("@ytt:data", "data") +#@yaml/text-templated-strings + +#@ def job_names(plugins): +#@ names = [] +#@ for p in plugins: +#@ names.append("test-"+p) +#@ end +#@ return names +#@ end + +#@ def sharded_job_names(plugin, shard_count): +#@ names = [] +#@ for shard_index in range(0, shard_count): +#@ names.append("test-"+plugin+"-"+str(shard_index)) +#@ end +#@ return names +#@ end + +--- +name: Test +on: + push: + branches: + - main + - v3.13.x + - v3.12.x + - v3.11.x + - bump-otp-for-oci + - bump-rbe-* + - bump-rules_erlang + paths: + - 'deps/**' + - 'scripts/**' + - Makefile + - plugins.mk + - rabbitmq-components.mk + - .bazelrc + - .bazelversion + - BUILD.* + - '*.bzl' + - '*.bazel' + - .github/workflows/test.yaml + pull_request: +concurrency: + group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }} + cancel-in-progress: true +jobs: + check-workflow: + runs-on: ubuntu-latest + outputs: + repo_cache_key: ${{ steps.repo-cache-key.outputs.value }} + steps: + - name: CHECKOUT REPOSITORY + uses: actions/checkout@v4 + - name: SETUP ERLANG/ELIXIR + uses: erlef/setup-beam@v1 + with: + otp-version: 26 + elixir-version: 1.15 + hexpm-mirrors: | + https://builds.hex.pm + https://cdn.jsdelivr.net/hex + - name: ENSURE WORKFLOWS ARE UP TO DATE + run: | + mkdir local-bin/ + curl -L https://carvel.dev/install.sh | K14SIO_INSTALL_BIN_DIR=local-bin bash + make actions-workflows YTT=$PWD/local-bin/ytt + git diff --exit-code + - name: COMPUTE REPO CACHE KEY + id: repo-cache-key + run: | + echo "value=bazel-repo-cache-${{ hashFiles('MODULE.bazel') }}" | tee -a $GITHUB_OUTPUT + - name: AUTHENTICATE TO GOOGLE CLOUD + uses: google-github-actions/auth@v2.1.4 + with: + credentials_json: ${{ secrets.REMOTE_CACHE_CREDENTIALS_JSON }} + - name: REPO CACHE + id: cache + uses: actions/cache@v4 + with: + key: ${{ steps.repo-cache-key.outputs.value }} + path: /home/runner/repo-cache/ + - name: PRIME CACHE + if: steps.cache.outputs.cache-hit != 'true' + run: | + if [ -n "${{ secrets.REMOTE_CACHE_BUCKET_NAME }}" ]; then + cat << EOF >> user.bazelrc + build --remote_cache=https://storage.googleapis.com/${{ secrets.REMOTE_CACHE_BUCKET_NAME }} + build --google_default_credentials + EOF + fi + cat << EOF >> user.bazelrc + build --repository_cache=/home/runner/repo-cache/ + build --color=yes + EOF + + bazelisk cquery \ + 'tests(//...) except attr("tags", "manual|mixed-version-cluster", //deps/...)' \ + --output=label + +#@ for plugin in data.values.internal_deps: + test-(@= plugin @): + needs: check-workflow + uses: ./.github/workflows/test-plugin.yaml + with: + repo_cache_key: ${{ needs.check-workflow.outputs.repo_cache_key }} + plugin: #@ plugin + secrets: inherit +#@ end + +#@ rabbit_shard_count = 10 +#@ for shard_index in range(0, rabbit_shard_count): + test-rabbit-(@= str(shard_index) @): + needs: #@ ["check-workflow"] + job_names(data.values.internal_deps) + uses: ./.github/workflows/test-plugin.yaml + with: + repo_cache_key: ${{ needs.check-workflow.outputs.repo_cache_key }} + plugin: rabbit + shard_index: #@ shard_index + shard_count: #@ rabbit_shard_count + secrets: inherit +#@ end + + test-rabbitmq_cli: + needs: check-workflow + uses: ./.github/workflows/test-plugin.yaml + with: + repo_cache_key: ${{ needs.check-workflow.outputs.repo_cache_key }} + plugin: rabbitmq_cli + secrets: inherit + +#@ for plugin in data.values.tier1_plugins: + test-(@= plugin @): + needs: #@ ["check-workflow"] + sharded_job_names("rabbit", rabbit_shard_count) + uses: ./.github/workflows/test-plugin.yaml + with: + repo_cache_key: ${{ needs.check-workflow.outputs.repo_cache_key }} + plugin: #@ plugin + secrets: inherit +#@ end + + summary-test: + needs: #@ job_names(data.values.internal_deps + data.values.tier1_plugins) + sharded_job_names("rabbit", rabbit_shard_count) + ["test-rabbitmq_cli"] + runs-on: ubuntu-latest + steps: + - name: SUMMARY + run: | + cat << 'EOF' | jq -e 'map(.result == "success") | all(.)' + ${{ toJson(needs) }} + EOF diff --git a/.github/workflows/test-authnz.yaml b/.github/workflows/test-authnz.yaml index c0e69ffdd6fa..d70b23662dcc 100644 --- a/.github/workflows/test-authnz.yaml +++ b/.github/workflows/test-authnz.yaml @@ -1,4 +1,4 @@ -name: Test Authentication/Authorization backends +name: Test Authentication/Authorization backends via mutiple messaging protocols on: push: branches: @@ -20,23 +20,27 @@ on: paths: - 'deps/rabbit/**' - 'deps/rabbitmq_auth_/**' + - 'deps/rabbitmq_mqtt/**' + - 'deps/rabbitmq_management/selenium/full-suite-authnz-messaging' + - 'deps/rabbitmq_management/selenium/suites/authnz-messaging' + - 'deps/rabbitmq_management/selenium/test/authnz-msg-protocols' - .github/workflows/test-authnz.yaml concurrency: group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }} cancel-in-progress: true jobs: selenium: - runs-on: ubuntu-20.04 + runs-on: ubuntu-22.04 strategy: fail-fast: false matrix: erlang_version: - - "26.0" + - "26.2" browser: - chrome include: - - erlang_version: "26.0" - elixir_version: 1.14.5 + - erlang_version: "26.2" + elixir_version: 1.15.7 env: SELENIUM_DIR: deps/rabbitmq_management/selenium DOCKER_NETWORK: rabbitmq_net @@ -45,46 +49,36 @@ jobs: uses: actions/checkout@v4 - name: Configure OTP & Elixir - uses: erlef/setup-beam@v1.16 + uses: erlef/setup-beam@v1.17 with: otp-version: ${{ matrix.erlang_version }} elixir-version: ${{ matrix.elixir_version }} + hexpm-mirrors: | + https://builds.hex.pm + https://cdn.jsdelivr.net/hex - - name: Mount Bazel Cache - uses: actions/cache@v3.3.2 + - name: Authenticate To Google Cloud + uses: google-github-actions/auth@v2.1.4 with: - path: "/home/runner/repo-cache/" - key: ${{ runner.os }}-repo-cache-${{ hashFiles('MODULE.bazel','WORKSPACE','bazel/bzlmod/secondary_umbrella.bzl') }} - restore-keys: | - ${{ runner.os }}-repo-cache- + credentials_json: ${{ secrets.REMOTE_CACHE_CREDENTIALS_JSON }} - name: Configure Bazel run: | - ERLANG_HOME="$(dirname $(dirname $(which erl)))" - ELIXIR_HOME="$(dirname $(dirname $(which iex)))" - if [ -n "${{ secrets.BUILDBUDDY_API_KEY }}" ]; then + if [ -n "${{ secrets.REMOTE_CACHE_BUCKET_NAME }}" ]; then cat << EOF >> user.bazelrc - build:buildbuddy --remote_header=x-buildbuddy-api-key=${{ secrets.BUILDBUDDY_API_KEY }} + build --remote_cache=https://storage.googleapis.com/${{ secrets.REMOTE_CACHE_BUCKET_NAME }} + build --google_default_credentials + + build --remote_download_toplevel EOF fi cat << EOF >> user.bazelrc - build:buildbuddy --build_metadata=ROLE=CI - build:buildbuddy --build_metadata=VISIBILITY=PRIVATE - build:buildbuddy --repository_cache=/home/runner/repo-cache/ - build:buildbuddy --color=yes - build:buildbuddy --disk_cache= - - build:buildbuddy --remote_download_toplevel - - build --@rules_erlang//:erlang_version=${{ matrix.erlang_version }} - build --@rules_erlang//:erlang_home=${ERLANG_HOME} - build --//:elixir_home=${ELIXIR_HOME} + build --color=yes EOF - name: Build & Load RabbitMQ OCI run: | - bazelisk run packaging/docker-image:rabbitmq \ - --config=buildbuddy + bazelisk run packaging/docker-image:rabbitmq-amd64 - name: Configure Docker Network run: | @@ -97,11 +91,11 @@ jobs: - name: Run Suites run: | - RABBITMQ_DOCKER_IMAGE=bazel/packaging/docker-image:rabbitmq ${SELENIUM_DIR}/run-suites.sh full-suite-authnz + RABBITMQ_DOCKER_IMAGE=bazel/packaging/docker-image:rabbitmq-amd64 ${SELENIUM_DIR}/run-suites.sh full-suite-authnz-messaging - name: Upload Test Artifacts if: always() - uses: actions/upload-artifact@v3.1.3 + uses: actions/upload-artifact@v4.3.2 with: name: test-artifacts-${{ matrix.browser }}-${{ matrix.erlang_version }} path: | diff --git a/.github/workflows/test-erlang-git.yaml b/.github/workflows/test-erlang-git.yaml deleted file mode 100644 index ffdf4b1a8169..000000000000 --- a/.github/workflows/test-erlang-git.yaml +++ /dev/null @@ -1,49 +0,0 @@ -name: Test Erlang Git Master -on: - schedule: - - cron: '0 2 * * *' - workflow_dispatch: -jobs: - test-erlang-git: - name: Test (Erlang Git Master) - runs-on: ubuntu-20.04 - timeout-minutes: 120 - steps: - - name: CHECKOUT REPOSITORY - uses: actions/checkout@v4 - - name: MOUNT BAZEL CACHE - uses: actions/cache@v3.3.2 - with: - path: "/home/runner/repo-cache/" - key: ${{ runner.os }}-repo-cache-${{ hashFiles('MODULE.bazel','WORKSPACE','bazel/bzlmod/secondary_umbrella.bzl') }} - restore-keys: | - ${{ runner.os }}-repo-cache- - - name: CONFIGURE BAZEL - run: | - if [ -n "${{ secrets.BUILDBUDDY_API_KEY }}" ]; then - cat << EOF >> user.bazelrc - build:buildbuddy --remote_header=x-buildbuddy-api-key=${{ secrets.BUILDBUDDY_API_KEY }} - EOF - fi - cat << EOF >> user.bazelrc - build:buildbuddy --build_metadata=ROLE=CI - build:buildbuddy --build_metadata=VISIBILITY=PUBLIC - build:buildbuddy --repository_cache=/home/runner/repo-cache/ - build:buildbuddy --color=yes - build:buildbuddy --disk_cache= - - build:rbe --platforms=//bazel/platforms:erlang_linux_git_master_platform - EOF - - bazelisk info release - #! - name: Setup tmate session - #! uses: mxschmitt/action-tmate@v3 - - name: RUN TESTS - run: | - sudo sysctl -w net.ipv4.tcp_keepalive_time=60 - sudo ethtool -K eth0 tso off gso off gro off tx off rx off lro off - bazelisk test //... \ - --config=rbe \ - --test_tag_filters=-mixed-version-cluster,-aws,-docker \ - --build_tests_only \ - --verbose_failures diff --git a/.github/workflows/test-mixed-versions.yaml b/.github/workflows/test-mixed-versions.yaml index 52f6be1c0f49..4b03199c0cdf 100644 --- a/.github/workflows/test-mixed-versions.yaml +++ b/.github/workflows/test-mixed-versions.yaml @@ -2,43 +2,37 @@ name: Test Mixed Version Clusters on: push: branches: - - main - - v3.12.x - - v3.11.x - - v3.10.x - - v3.9.x - - v3.8.x - - bump-otp-* - - bump-elixir-* - - bump-rbe-* - - bump-rules_erlang + - main + - v4.0.x + - v3.13.x + - bump-otp-* + - bump-elixir-* + - bump-rbe-* + - bump-rules_erlang paths: - - 'deps/**' - - 'scripts/**' - - Makefile - - plugins.mk - - rabbitmq-components.mk - - .bazelrc - - .bazelversion - - BUILD.* - - '*.bzl' - - '*.bazel' - - .github/workflows/test-mixed-versions.yaml + - deps/** + - scripts/** + - Makefile + - plugins.mk + - rabbitmq-components.mk + - .bazelrc + - .bazelversion + - BUILD.* + - '*.bzl' + - '*.bazel' + - .github/workflows/test-mixed-versions.yaml pull_request: concurrency: group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }} cancel-in-progress: true jobs: ensure-mixed-version-archive: - name: Prepare Artifacts - runs-on: ubuntu-20.04 + runs-on: ubuntu-22.04 steps: - name: CHECKOUT REPOSITORY uses: actions/checkout@v4 with: path: primary-umbrella - #! - name: Setup tmate session - #! uses: mxschmitt/action-tmate@v3 - name: CHECK FOR ARCHIVE ON S3 id: check working-directory: primary-umbrella @@ -57,9 +51,9 @@ jobs: fi echo "exists=${exists}" | tee $GITHUB_ENV - OTP_VERSION_ID=${ARCHIVE_URL#*secondary-umbrellas/rbe-} - OTP_VERSION_ID=${OTP_VERSION_ID%*/package-generic-unix-for-mixed-version-testing-v*.tar.xz} - echo "otp_version_id=${OTP_VERSION_ID}" | tee -a $GITHUB_OUTPUT + OTP_VERSION=${ARCHIVE_URL#*secondary-umbrellas/} + OTP_VERSION=${OTP_VERSION%*/package-generic-unix-for-mixed-version-testing-v*.tar.xz} + echo "otp_version=${OTP_VERSION}" | tee -a $GITHUB_OUTPUT VERSION=${ARCHIVE_URL#*package-generic-unix-for-mixed-version-testing-v} VERSION=${VERSION%*.tar.xz} @@ -70,45 +64,42 @@ jobs: with: ref: v${{ steps.check.outputs.version }} path: secondary-umbrella - - name: MOUNT BAZEL CACHE + - name: CONFIGURE OTP & ELIXIR if: env.exists != 'true' - uses: actions/cache@v3.3.2 + uses: erlef/setup-beam@v1.17 with: - path: "/home/runner/repo-cache/" - key: repo-cache-secondary-umbrella-${{ hashFiles('primary-umbrella/MODULE.bazel','primary-umbrella/WORKSPACE','primary-umbrella/bazel/bzlmod/secondary_umbrella.bzl') }} - restore-keys: | - repo-cache-secondary-umbrella- + otp-version: ${{ steps.check.outputs.otp_version }} + elixir-version: 1.15 + hexpm-mirrors: | + https://builds.hex.pm + https://cdn.jsdelivr.net/hex + - name: AUTHENTICATE TO GOOGLE CLOUD + uses: google-github-actions/auth@v2.1.4 + with: + credentials_json: ${{ secrets.REMOTE_CACHE_CREDENTIALS_JSON }} - name: BUILD SECONDARY UMBRELLA ARCHIVE if: env.exists != 'true' working-directory: secondary-umbrella run: | - if [ -n "${{ secrets.BUILDBUDDY_API_KEY }}" ]; then + if [ -n "${{ secrets.REMOTE_CACHE_BUCKET_NAME_MIXED }}" ]; then cat << EOF >> user.bazelrc - build:buildbuddy --remote_header=x-buildbuddy-api-key=${{ secrets.BUILDBUDDY_API_KEY }} - EOF - fi - cat << EOF >> user.bazelrc - build:buildbuddy --build_metadata=ROLE=CI - build:buildbuddy --build_metadata=VISIBILITY=PRIVATE - build:buildbuddy --remote_instance_name=buildbuddy-io/buildbuddy/ci-secondary-umbrella - build:buildbuddy --repository_cache=/home/runner/repo-cache/ - build:buildbuddy --color=yes - build:buildbuddy --disk_cache= + build --remote_cache=https://storage.googleapis.com/${{ secrets.REMOTE_CACHE_BUCKET_NAME_MIXED }} + build --google_default_credentials - build:buildbuddy --remote_download_toplevel + build --remote_download_toplevel EOF + fi sed -i"_orig" -E "/APP_VERSION/ s/3\.[0-9]+\.[0-9]+/${{ steps.check.outputs.version }}/" rabbitmq.bzl bazelisk build :package-generic-unix \ - --config=rbe-${{ steps.check.outputs.otp_version_id }} \ --test_build \ --verbose_failures OUTPUT_DIR=${{ github.workspace }}/output - mkdir -p ${OUTPUT_DIR}/rbe-${{ steps.check.outputs.otp_version_id }} + mkdir -p ${OUTPUT_DIR}/${{ steps.check.outputs.otp_version }} cp \ bazel-bin/package-generic-unix.tar.xz \ - ${OUTPUT_DIR}/rbe-${{ steps.check.outputs.otp_version_id }}/package-generic-unix-for-mixed-version-testing-v${{ steps.check.outputs.version }}.tar.xz + ${OUTPUT_DIR}/${{ steps.check.outputs.otp_version }}/package-generic-unix-for-mixed-version-testing-v${{ steps.check.outputs.version }}.tar.xz - name: UPLOAD THE ARCHIVE TO S3 if: env.exists != 'true' uses: jakejarvis/s3-sync-action@v0.5.1 @@ -121,57 +112,1097 @@ jobs: AWS_REGION: ${{ secrets.AWS_REGION }} SOURCE_DIR: output DEST_DIR: secondary-umbrellas - - test-mixed-versions: - name: Test (Mixed Version Cluster) - runs-on: ubuntu-20.04 + check-workflow: needs: ensure-mixed-version-archive - strategy: - fail-fast: false - matrix: - otp_version_id: - - "25_3" - timeout-minutes: 120 + runs-on: ubuntu-latest + outputs: + repo_cache_key: ${{ steps.repo-cache-key.outputs.value }} steps: - name: CHECKOUT REPOSITORY uses: actions/checkout@v4 - - name: MOUNT BAZEL CACHE - uses: actions/cache@v3.3.2 + - name: SETUP ERLANG/ELIXIR + uses: erlef/setup-beam@v1 with: - path: "/home/runner/repo-cache/" - key: ${{ runner.os }}-repo-cache-${{ hashFiles('MODULE.bazel','WORKSPACE','bazel/bzlmod/secondary_umbrella.bzl') }} - restore-keys: | - ${{ runner.os }}-repo-cache- - - name: CONFIGURE BAZEL + otp-version: 26 + elixir-version: 1.15 + hexpm-mirrors: | + https://builds.hex.pm + https://cdn.jsdelivr.net/hex + - name: ENSURE WORKFLOWS ARE UP TO DATE run: | - if [ -n "${{ secrets.BUILDBUDDY_API_KEY }}" ]; then - cat << EOF >> user.bazelrc - build:buildbuddy --remote_header=x-buildbuddy-api-key=${{ secrets.BUILDBUDDY_API_KEY }} - EOF - fi - cat << EOF >> user.bazelrc - build:buildbuddy --build_metadata=ROLE=CI - build:buildbuddy --build_metadata=VISIBILITY=PUBLIC - build:buildbuddy --repository_cache=/home/runner/repo-cache/ - build:buildbuddy --color=yes - build:buildbuddy --disk_cache= - EOF - #! - name: Setup tmate session - #! uses: mxschmitt/action-tmate@v3 - - name: RUN TESTS + mkdir local-bin/ + curl -L https://carvel.dev/install.sh | K14SIO_INSTALL_BIN_DIR=local-bin bash + make actions-workflows YTT=$PWD/local-bin/ytt + git diff --exit-code + - name: COMPUTE REPO CACHE KEY + id: repo-cache-key run: | - sudo sysctl -w net.ipv4.tcp_keepalive_time=60 - sudo ethtool -K eth0 tso off gso off gro off tx off rx off lro off - bazelisk test //... \ - --config=rbe-${{ matrix.otp_version_id }} \ - --test_tag_filters=mixed-version-cluster,-aws,-docker \ - --build_tests_only \ - --verbose_failures - summary-mixed-versions: + echo "value=bazel-repo-cache-${{ hashFiles('MODULE.bazel') }}" | tee -a $GITHUB_OUTPUT + test-amqp10_client-mixed: + needs: check-workflow + uses: ./.github/workflows/test-plugin-mixed.yaml + with: + repo_cache_key: ${{ needs.check-workflow.outputs.repo_cache_key }} + plugin: amqp10_client + secrets: inherit + test-amqp10_common-mixed: + needs: check-workflow + uses: ./.github/workflows/test-plugin-mixed.yaml + with: + repo_cache_key: ${{ needs.check-workflow.outputs.repo_cache_key }} + plugin: amqp10_common + secrets: inherit + test-amqp_client-mixed: + needs: check-workflow + uses: ./.github/workflows/test-plugin-mixed.yaml + with: + repo_cache_key: ${{ needs.check-workflow.outputs.repo_cache_key }} + plugin: amqp_client + secrets: inherit + test-oauth2_client-mixed: + needs: check-workflow + uses: ./.github/workflows/test-plugin-mixed.yaml + with: + repo_cache_key: ${{ needs.check-workflow.outputs.repo_cache_key }} + plugin: oauth2_client + secrets: inherit + test-rabbit_common-mixed: + needs: check-workflow + uses: ./.github/workflows/test-plugin-mixed.yaml + with: + repo_cache_key: ${{ needs.check-workflow.outputs.repo_cache_key }} + plugin: rabbit_common + secrets: inherit + test-rabbitmq_ct_client_helpers-mixed: + needs: check-workflow + uses: ./.github/workflows/test-plugin-mixed.yaml + with: + repo_cache_key: ${{ needs.check-workflow.outputs.repo_cache_key }} + plugin: rabbitmq_ct_client_helpers + secrets: inherit + test-rabbitmq_ct_helpers-mixed: + needs: check-workflow + uses: ./.github/workflows/test-plugin-mixed.yaml + with: + repo_cache_key: ${{ needs.check-workflow.outputs.repo_cache_key }} + plugin: rabbitmq_ct_helpers + secrets: inherit + test-rabbitmq_stream_common-mixed: + needs: check-workflow + uses: ./.github/workflows/test-plugin-mixed.yaml + with: + repo_cache_key: ${{ needs.check-workflow.outputs.repo_cache_key }} + plugin: rabbitmq_stream_common + secrets: inherit + test-trust_store_http-mixed: + needs: check-workflow + uses: ./.github/workflows/test-plugin-mixed.yaml + with: + repo_cache_key: ${{ needs.check-workflow.outputs.repo_cache_key }} + plugin: trust_store_http + secrets: inherit + test-rabbit-0-mixed: + needs: + - check-workflow + - test-amqp10_client-mixed + - test-amqp10_common-mixed + - test-amqp_client-mixed + - test-oauth2_client-mixed + - test-rabbit_common-mixed + - test-rabbitmq_ct_client_helpers-mixed + - test-rabbitmq_ct_helpers-mixed + - test-rabbitmq_stream_common-mixed + - test-trust_store_http-mixed + uses: ./.github/workflows/test-plugin-mixed.yaml + with: + repo_cache_key: ${{ needs.check-workflow.outputs.repo_cache_key }} + plugin: rabbit + shard_index: 0 + shard_count: 10 + secrets: inherit + test-rabbit-1-mixed: + needs: + - check-workflow + - test-amqp10_client-mixed + - test-amqp10_common-mixed + - test-amqp_client-mixed + - test-oauth2_client-mixed + - test-rabbit_common-mixed + - test-rabbitmq_ct_client_helpers-mixed + - test-rabbitmq_ct_helpers-mixed + - test-rabbitmq_stream_common-mixed + - test-trust_store_http-mixed + uses: ./.github/workflows/test-plugin-mixed.yaml + with: + repo_cache_key: ${{ needs.check-workflow.outputs.repo_cache_key }} + plugin: rabbit + shard_index: 1 + shard_count: 10 + secrets: inherit + test-rabbit-2-mixed: + needs: + - check-workflow + - test-amqp10_client-mixed + - test-amqp10_common-mixed + - test-amqp_client-mixed + - test-oauth2_client-mixed + - test-rabbit_common-mixed + - test-rabbitmq_ct_client_helpers-mixed + - test-rabbitmq_ct_helpers-mixed + - test-rabbitmq_stream_common-mixed + - test-trust_store_http-mixed + uses: ./.github/workflows/test-plugin-mixed.yaml + with: + repo_cache_key: ${{ needs.check-workflow.outputs.repo_cache_key }} + plugin: rabbit + shard_index: 2 + shard_count: 10 + secrets: inherit + test-rabbit-3-mixed: + needs: + - check-workflow + - test-amqp10_client-mixed + - test-amqp10_common-mixed + - test-amqp_client-mixed + - test-oauth2_client-mixed + - test-rabbit_common-mixed + - test-rabbitmq_ct_client_helpers-mixed + - test-rabbitmq_ct_helpers-mixed + - test-rabbitmq_stream_common-mixed + - test-trust_store_http-mixed + uses: ./.github/workflows/test-plugin-mixed.yaml + with: + repo_cache_key: ${{ needs.check-workflow.outputs.repo_cache_key }} + plugin: rabbit + shard_index: 3 + shard_count: 10 + secrets: inherit + test-rabbit-4-mixed: + needs: + - check-workflow + - test-amqp10_client-mixed + - test-amqp10_common-mixed + - test-amqp_client-mixed + - test-oauth2_client-mixed + - test-rabbit_common-mixed + - test-rabbitmq_ct_client_helpers-mixed + - test-rabbitmq_ct_helpers-mixed + - test-rabbitmq_stream_common-mixed + - test-trust_store_http-mixed + uses: ./.github/workflows/test-plugin-mixed.yaml + with: + repo_cache_key: ${{ needs.check-workflow.outputs.repo_cache_key }} + plugin: rabbit + shard_index: 4 + shard_count: 10 + secrets: inherit + test-rabbit-5-mixed: + needs: + - check-workflow + - test-amqp10_client-mixed + - test-amqp10_common-mixed + - test-amqp_client-mixed + - test-oauth2_client-mixed + - test-rabbit_common-mixed + - test-rabbitmq_ct_client_helpers-mixed + - test-rabbitmq_ct_helpers-mixed + - test-rabbitmq_stream_common-mixed + - test-trust_store_http-mixed + uses: ./.github/workflows/test-plugin-mixed.yaml + with: + repo_cache_key: ${{ needs.check-workflow.outputs.repo_cache_key }} + plugin: rabbit + shard_index: 5 + shard_count: 10 + secrets: inherit + test-rabbit-6-mixed: + needs: + - check-workflow + - test-amqp10_client-mixed + - test-amqp10_common-mixed + - test-amqp_client-mixed + - test-oauth2_client-mixed + - test-rabbit_common-mixed + - test-rabbitmq_ct_client_helpers-mixed + - test-rabbitmq_ct_helpers-mixed + - test-rabbitmq_stream_common-mixed + - test-trust_store_http-mixed + uses: ./.github/workflows/test-plugin-mixed.yaml + with: + repo_cache_key: ${{ needs.check-workflow.outputs.repo_cache_key }} + plugin: rabbit + shard_index: 6 + shard_count: 10 + secrets: inherit + test-rabbit-7-mixed: + needs: + - check-workflow + - test-amqp10_client-mixed + - test-amqp10_common-mixed + - test-amqp_client-mixed + - test-oauth2_client-mixed + - test-rabbit_common-mixed + - test-rabbitmq_ct_client_helpers-mixed + - test-rabbitmq_ct_helpers-mixed + - test-rabbitmq_stream_common-mixed + - test-trust_store_http-mixed + uses: ./.github/workflows/test-plugin-mixed.yaml + with: + repo_cache_key: ${{ needs.check-workflow.outputs.repo_cache_key }} + plugin: rabbit + shard_index: 7 + shard_count: 10 + secrets: inherit + test-rabbit-8-mixed: + needs: + - check-workflow + - test-amqp10_client-mixed + - test-amqp10_common-mixed + - test-amqp_client-mixed + - test-oauth2_client-mixed + - test-rabbit_common-mixed + - test-rabbitmq_ct_client_helpers-mixed + - test-rabbitmq_ct_helpers-mixed + - test-rabbitmq_stream_common-mixed + - test-trust_store_http-mixed + uses: ./.github/workflows/test-plugin-mixed.yaml + with: + repo_cache_key: ${{ needs.check-workflow.outputs.repo_cache_key }} + plugin: rabbit + shard_index: 8 + shard_count: 10 + secrets: inherit + test-rabbit-9-mixed: + needs: + - check-workflow + - test-amqp10_client-mixed + - test-amqp10_common-mixed + - test-amqp_client-mixed + - test-oauth2_client-mixed + - test-rabbit_common-mixed + - test-rabbitmq_ct_client_helpers-mixed + - test-rabbitmq_ct_helpers-mixed + - test-rabbitmq_stream_common-mixed + - test-trust_store_http-mixed + uses: ./.github/workflows/test-plugin-mixed.yaml + with: + repo_cache_key: ${{ needs.check-workflow.outputs.repo_cache_key }} + plugin: rabbit + shard_index: 9 + shard_count: 10 + secrets: inherit + test-rabbitmq_cli-mixed: + needs: check-workflow + uses: ./.github/workflows/test-plugin-mixed.yaml + with: + repo_cache_key: ${{ needs.check-workflow.outputs.repo_cache_key }} + plugin: rabbitmq_cli + secrets: inherit + test-rabbitmq_amqp_client-mixed: + needs: + - check-workflow + - test-rabbit-0-mixed + - test-rabbit-1-mixed + - test-rabbit-2-mixed + - test-rabbit-3-mixed + - test-rabbit-4-mixed + - test-rabbit-5-mixed + - test-rabbit-6-mixed + - test-rabbit-7-mixed + - test-rabbit-8-mixed + - test-rabbit-9-mixed + uses: ./.github/workflows/test-plugin-mixed.yaml + with: + repo_cache_key: ${{ needs.check-workflow.outputs.repo_cache_key }} + plugin: rabbitmq_amqp_client + secrets: inherit + test-rabbitmq_amqp1_0-mixed: + needs: + - check-workflow + - test-rabbit-0-mixed + - test-rabbit-1-mixed + - test-rabbit-2-mixed + - test-rabbit-3-mixed + - test-rabbit-4-mixed + - test-rabbit-5-mixed + - test-rabbit-6-mixed + - test-rabbit-7-mixed + - test-rabbit-8-mixed + - test-rabbit-9-mixed + uses: ./.github/workflows/test-plugin-mixed.yaml + with: + repo_cache_key: ${{ needs.check-workflow.outputs.repo_cache_key }} + plugin: rabbitmq_amqp1_0 + secrets: inherit + test-rabbitmq_auth_backend_cache-mixed: + needs: + - check-workflow + - test-rabbit-0-mixed + - test-rabbit-1-mixed + - test-rabbit-2-mixed + - test-rabbit-3-mixed + - test-rabbit-4-mixed + - test-rabbit-5-mixed + - test-rabbit-6-mixed + - test-rabbit-7-mixed + - test-rabbit-8-mixed + - test-rabbit-9-mixed + uses: ./.github/workflows/test-plugin-mixed.yaml + with: + repo_cache_key: ${{ needs.check-workflow.outputs.repo_cache_key }} + plugin: rabbitmq_auth_backend_cache + secrets: inherit + test-rabbitmq_auth_backend_http-mixed: + needs: + - check-workflow + - test-rabbit-0-mixed + - test-rabbit-1-mixed + - test-rabbit-2-mixed + - test-rabbit-3-mixed + - test-rabbit-4-mixed + - test-rabbit-5-mixed + - test-rabbit-6-mixed + - test-rabbit-7-mixed + - test-rabbit-8-mixed + - test-rabbit-9-mixed + uses: ./.github/workflows/test-plugin-mixed.yaml + with: + repo_cache_key: ${{ needs.check-workflow.outputs.repo_cache_key }} + plugin: rabbitmq_auth_backend_http + secrets: inherit + test-rabbitmq_auth_backend_ldap-mixed: + needs: + - check-workflow + - test-rabbit-0-mixed + - test-rabbit-1-mixed + - test-rabbit-2-mixed + - test-rabbit-3-mixed + - test-rabbit-4-mixed + - test-rabbit-5-mixed + - test-rabbit-6-mixed + - test-rabbit-7-mixed + - test-rabbit-8-mixed + - test-rabbit-9-mixed + uses: ./.github/workflows/test-plugin-mixed.yaml + with: + repo_cache_key: ${{ needs.check-workflow.outputs.repo_cache_key }} + plugin: rabbitmq_auth_backend_ldap + secrets: inherit + test-rabbitmq_auth_backend_oauth2-mixed: + needs: + - check-workflow + - test-rabbit-0-mixed + - test-rabbit-1-mixed + - test-rabbit-2-mixed + - test-rabbit-3-mixed + - test-rabbit-4-mixed + - test-rabbit-5-mixed + - test-rabbit-6-mixed + - test-rabbit-7-mixed + - test-rabbit-8-mixed + - test-rabbit-9-mixed + uses: ./.github/workflows/test-plugin-mixed.yaml + with: + repo_cache_key: ${{ needs.check-workflow.outputs.repo_cache_key }} + plugin: rabbitmq_auth_backend_oauth2 + secrets: inherit + test-rabbitmq_auth_mechanism_ssl-mixed: + needs: + - check-workflow + - test-rabbit-0-mixed + - test-rabbit-1-mixed + - test-rabbit-2-mixed + - test-rabbit-3-mixed + - test-rabbit-4-mixed + - test-rabbit-5-mixed + - test-rabbit-6-mixed + - test-rabbit-7-mixed + - test-rabbit-8-mixed + - test-rabbit-9-mixed + uses: ./.github/workflows/test-plugin-mixed.yaml + with: + repo_cache_key: ${{ needs.check-workflow.outputs.repo_cache_key }} + plugin: rabbitmq_auth_mechanism_ssl + secrets: inherit + test-rabbitmq_aws-mixed: + needs: + - check-workflow + - test-rabbit-0-mixed + - test-rabbit-1-mixed + - test-rabbit-2-mixed + - test-rabbit-3-mixed + - test-rabbit-4-mixed + - test-rabbit-5-mixed + - test-rabbit-6-mixed + - test-rabbit-7-mixed + - test-rabbit-8-mixed + - test-rabbit-9-mixed + uses: ./.github/workflows/test-plugin-mixed.yaml + with: + repo_cache_key: ${{ needs.check-workflow.outputs.repo_cache_key }} + plugin: rabbitmq_aws + secrets: inherit + test-rabbitmq_consistent_hash_exchange-mixed: + needs: + - check-workflow + - test-rabbit-0-mixed + - test-rabbit-1-mixed + - test-rabbit-2-mixed + - test-rabbit-3-mixed + - test-rabbit-4-mixed + - test-rabbit-5-mixed + - test-rabbit-6-mixed + - test-rabbit-7-mixed + - test-rabbit-8-mixed + - test-rabbit-9-mixed + uses: ./.github/workflows/test-plugin-mixed.yaml + with: + repo_cache_key: ${{ needs.check-workflow.outputs.repo_cache_key }} + plugin: rabbitmq_consistent_hash_exchange + secrets: inherit + test-rabbitmq_event_exchange-mixed: + needs: + - check-workflow + - test-rabbit-0-mixed + - test-rabbit-1-mixed + - test-rabbit-2-mixed + - test-rabbit-3-mixed + - test-rabbit-4-mixed + - test-rabbit-5-mixed + - test-rabbit-6-mixed + - test-rabbit-7-mixed + - test-rabbit-8-mixed + - test-rabbit-9-mixed + uses: ./.github/workflows/test-plugin-mixed.yaml + with: + repo_cache_key: ${{ needs.check-workflow.outputs.repo_cache_key }} + plugin: rabbitmq_event_exchange + secrets: inherit + test-rabbitmq_federation-mixed: needs: - - test-mixed-versions + - check-workflow + - test-rabbit-0-mixed + - test-rabbit-1-mixed + - test-rabbit-2-mixed + - test-rabbit-3-mixed + - test-rabbit-4-mixed + - test-rabbit-5-mixed + - test-rabbit-6-mixed + - test-rabbit-7-mixed + - test-rabbit-8-mixed + - test-rabbit-9-mixed + uses: ./.github/workflows/test-plugin-mixed.yaml + with: + repo_cache_key: ${{ needs.check-workflow.outputs.repo_cache_key }} + plugin: rabbitmq_federation + secrets: inherit + test-rabbitmq_federation_management-mixed: + needs: + - check-workflow + - test-rabbit-0-mixed + - test-rabbit-1-mixed + - test-rabbit-2-mixed + - test-rabbit-3-mixed + - test-rabbit-4-mixed + - test-rabbit-5-mixed + - test-rabbit-6-mixed + - test-rabbit-7-mixed + - test-rabbit-8-mixed + - test-rabbit-9-mixed + uses: ./.github/workflows/test-plugin-mixed.yaml + with: + repo_cache_key: ${{ needs.check-workflow.outputs.repo_cache_key }} + plugin: rabbitmq_federation_management + secrets: inherit + test-rabbitmq_federation_prometheus-mixed: + needs: + - check-workflow + - test-rabbit-0-mixed + - test-rabbit-1-mixed + - test-rabbit-2-mixed + - test-rabbit-3-mixed + - test-rabbit-4-mixed + - test-rabbit-5-mixed + - test-rabbit-6-mixed + - test-rabbit-7-mixed + - test-rabbit-8-mixed + - test-rabbit-9-mixed + uses: ./.github/workflows/test-plugin-mixed.yaml + with: + repo_cache_key: ${{ needs.check-workflow.outputs.repo_cache_key }} + plugin: rabbitmq_federation_prometheus + secrets: inherit + test-rabbitmq_jms_topic_exchange-mixed: + needs: + - check-workflow + - test-rabbit-0-mixed + - test-rabbit-1-mixed + - test-rabbit-2-mixed + - test-rabbit-3-mixed + - test-rabbit-4-mixed + - test-rabbit-5-mixed + - test-rabbit-6-mixed + - test-rabbit-7-mixed + - test-rabbit-8-mixed + - test-rabbit-9-mixed + uses: ./.github/workflows/test-plugin-mixed.yaml + with: + repo_cache_key: ${{ needs.check-workflow.outputs.repo_cache_key }} + plugin: rabbitmq_jms_topic_exchange + secrets: inherit + test-rabbitmq_management-mixed: + needs: + - check-workflow + - test-rabbit-0-mixed + - test-rabbit-1-mixed + - test-rabbit-2-mixed + - test-rabbit-3-mixed + - test-rabbit-4-mixed + - test-rabbit-5-mixed + - test-rabbit-6-mixed + - test-rabbit-7-mixed + - test-rabbit-8-mixed + - test-rabbit-9-mixed + uses: ./.github/workflows/test-plugin-mixed.yaml + with: + repo_cache_key: ${{ needs.check-workflow.outputs.repo_cache_key }} + plugin: rabbitmq_management + secrets: inherit + test-rabbitmq_management_agent-mixed: + needs: + - check-workflow + - test-rabbit-0-mixed + - test-rabbit-1-mixed + - test-rabbit-2-mixed + - test-rabbit-3-mixed + - test-rabbit-4-mixed + - test-rabbit-5-mixed + - test-rabbit-6-mixed + - test-rabbit-7-mixed + - test-rabbit-8-mixed + - test-rabbit-9-mixed + uses: ./.github/workflows/test-plugin-mixed.yaml + with: + repo_cache_key: ${{ needs.check-workflow.outputs.repo_cache_key }} + plugin: rabbitmq_management_agent + secrets: inherit + test-rabbitmq_mqtt-mixed: + needs: + - check-workflow + - test-rabbit-0-mixed + - test-rabbit-1-mixed + - test-rabbit-2-mixed + - test-rabbit-3-mixed + - test-rabbit-4-mixed + - test-rabbit-5-mixed + - test-rabbit-6-mixed + - test-rabbit-7-mixed + - test-rabbit-8-mixed + - test-rabbit-9-mixed + uses: ./.github/workflows/test-plugin-mixed.yaml + with: + repo_cache_key: ${{ needs.check-workflow.outputs.repo_cache_key }} + plugin: rabbitmq_mqtt + secrets: inherit + test-rabbitmq_peer_discovery_aws-mixed: + needs: + - check-workflow + - test-rabbit-0-mixed + - test-rabbit-1-mixed + - test-rabbit-2-mixed + - test-rabbit-3-mixed + - test-rabbit-4-mixed + - test-rabbit-5-mixed + - test-rabbit-6-mixed + - test-rabbit-7-mixed + - test-rabbit-8-mixed + - test-rabbit-9-mixed + uses: ./.github/workflows/test-plugin-mixed.yaml + with: + repo_cache_key: ${{ needs.check-workflow.outputs.repo_cache_key }} + plugin: rabbitmq_peer_discovery_aws + secrets: inherit + test-rabbitmq_peer_discovery_common-mixed: + needs: + - check-workflow + - test-rabbit-0-mixed + - test-rabbit-1-mixed + - test-rabbit-2-mixed + - test-rabbit-3-mixed + - test-rabbit-4-mixed + - test-rabbit-5-mixed + - test-rabbit-6-mixed + - test-rabbit-7-mixed + - test-rabbit-8-mixed + - test-rabbit-9-mixed + uses: ./.github/workflows/test-plugin-mixed.yaml + with: + repo_cache_key: ${{ needs.check-workflow.outputs.repo_cache_key }} + plugin: rabbitmq_peer_discovery_common + secrets: inherit + test-rabbitmq_peer_discovery_consul-mixed: + needs: + - check-workflow + - test-rabbit-0-mixed + - test-rabbit-1-mixed + - test-rabbit-2-mixed + - test-rabbit-3-mixed + - test-rabbit-4-mixed + - test-rabbit-5-mixed + - test-rabbit-6-mixed + - test-rabbit-7-mixed + - test-rabbit-8-mixed + - test-rabbit-9-mixed + uses: ./.github/workflows/test-plugin-mixed.yaml + with: + repo_cache_key: ${{ needs.check-workflow.outputs.repo_cache_key }} + plugin: rabbitmq_peer_discovery_consul + secrets: inherit + test-rabbitmq_peer_discovery_etcd-mixed: + needs: + - check-workflow + - test-rabbit-0-mixed + - test-rabbit-1-mixed + - test-rabbit-2-mixed + - test-rabbit-3-mixed + - test-rabbit-4-mixed + - test-rabbit-5-mixed + - test-rabbit-6-mixed + - test-rabbit-7-mixed + - test-rabbit-8-mixed + - test-rabbit-9-mixed + uses: ./.github/workflows/test-plugin-mixed.yaml + with: + repo_cache_key: ${{ needs.check-workflow.outputs.repo_cache_key }} + plugin: rabbitmq_peer_discovery_etcd + secrets: inherit + test-rabbitmq_peer_discovery_k8s-mixed: + needs: + - check-workflow + - test-rabbit-0-mixed + - test-rabbit-1-mixed + - test-rabbit-2-mixed + - test-rabbit-3-mixed + - test-rabbit-4-mixed + - test-rabbit-5-mixed + - test-rabbit-6-mixed + - test-rabbit-7-mixed + - test-rabbit-8-mixed + - test-rabbit-9-mixed + uses: ./.github/workflows/test-plugin-mixed.yaml + with: + repo_cache_key: ${{ needs.check-workflow.outputs.repo_cache_key }} + plugin: rabbitmq_peer_discovery_k8s + secrets: inherit + test-rabbitmq_prelaunch-mixed: + needs: + - check-workflow + - test-rabbit-0-mixed + - test-rabbit-1-mixed + - test-rabbit-2-mixed + - test-rabbit-3-mixed + - test-rabbit-4-mixed + - test-rabbit-5-mixed + - test-rabbit-6-mixed + - test-rabbit-7-mixed + - test-rabbit-8-mixed + - test-rabbit-9-mixed + uses: ./.github/workflows/test-plugin-mixed.yaml + with: + repo_cache_key: ${{ needs.check-workflow.outputs.repo_cache_key }} + plugin: rabbitmq_prelaunch + secrets: inherit + test-rabbitmq_prometheus-mixed: + needs: + - check-workflow + - test-rabbit-0-mixed + - test-rabbit-1-mixed + - test-rabbit-2-mixed + - test-rabbit-3-mixed + - test-rabbit-4-mixed + - test-rabbit-5-mixed + - test-rabbit-6-mixed + - test-rabbit-7-mixed + - test-rabbit-8-mixed + - test-rabbit-9-mixed + uses: ./.github/workflows/test-plugin-mixed.yaml + with: + repo_cache_key: ${{ needs.check-workflow.outputs.repo_cache_key }} + plugin: rabbitmq_prometheus + secrets: inherit + test-rabbitmq_random_exchange-mixed: + needs: + - check-workflow + - test-rabbit-0-mixed + - test-rabbit-1-mixed + - test-rabbit-2-mixed + - test-rabbit-3-mixed + - test-rabbit-4-mixed + - test-rabbit-5-mixed + - test-rabbit-6-mixed + - test-rabbit-7-mixed + - test-rabbit-8-mixed + - test-rabbit-9-mixed + uses: ./.github/workflows/test-plugin-mixed.yaml + with: + repo_cache_key: ${{ needs.check-workflow.outputs.repo_cache_key }} + plugin: rabbitmq_random_exchange + secrets: inherit + test-rabbitmq_recent_history_exchange-mixed: + needs: + - check-workflow + - test-rabbit-0-mixed + - test-rabbit-1-mixed + - test-rabbit-2-mixed + - test-rabbit-3-mixed + - test-rabbit-4-mixed + - test-rabbit-5-mixed + - test-rabbit-6-mixed + - test-rabbit-7-mixed + - test-rabbit-8-mixed + - test-rabbit-9-mixed + uses: ./.github/workflows/test-plugin-mixed.yaml + with: + repo_cache_key: ${{ needs.check-workflow.outputs.repo_cache_key }} + plugin: rabbitmq_recent_history_exchange + secrets: inherit + test-rabbitmq_sharding-mixed: + needs: + - check-workflow + - test-rabbit-0-mixed + - test-rabbit-1-mixed + - test-rabbit-2-mixed + - test-rabbit-3-mixed + - test-rabbit-4-mixed + - test-rabbit-5-mixed + - test-rabbit-6-mixed + - test-rabbit-7-mixed + - test-rabbit-8-mixed + - test-rabbit-9-mixed + uses: ./.github/workflows/test-plugin-mixed.yaml + with: + repo_cache_key: ${{ needs.check-workflow.outputs.repo_cache_key }} + plugin: rabbitmq_sharding + secrets: inherit + test-rabbitmq_shovel-mixed: + needs: + - check-workflow + - test-rabbit-0-mixed + - test-rabbit-1-mixed + - test-rabbit-2-mixed + - test-rabbit-3-mixed + - test-rabbit-4-mixed + - test-rabbit-5-mixed + - test-rabbit-6-mixed + - test-rabbit-7-mixed + - test-rabbit-8-mixed + - test-rabbit-9-mixed + uses: ./.github/workflows/test-plugin-mixed.yaml + with: + repo_cache_key: ${{ needs.check-workflow.outputs.repo_cache_key }} + plugin: rabbitmq_shovel + secrets: inherit + test-rabbitmq_shovel_management-mixed: + needs: + - check-workflow + - test-rabbit-0-mixed + - test-rabbit-1-mixed + - test-rabbit-2-mixed + - test-rabbit-3-mixed + - test-rabbit-4-mixed + - test-rabbit-5-mixed + - test-rabbit-6-mixed + - test-rabbit-7-mixed + - test-rabbit-8-mixed + - test-rabbit-9-mixed + uses: ./.github/workflows/test-plugin-mixed.yaml + with: + repo_cache_key: ${{ needs.check-workflow.outputs.repo_cache_key }} + plugin: rabbitmq_shovel_management + secrets: inherit + test-rabbitmq_shovel_prometheus-mixed: + needs: + - check-workflow + - test-rabbit-0-mixed + - test-rabbit-1-mixed + - test-rabbit-2-mixed + - test-rabbit-3-mixed + - test-rabbit-4-mixed + - test-rabbit-5-mixed + - test-rabbit-6-mixed + - test-rabbit-7-mixed + - test-rabbit-8-mixed + - test-rabbit-9-mixed + uses: ./.github/workflows/test-plugin-mixed.yaml + with: + repo_cache_key: ${{ needs.check-workflow.outputs.repo_cache_key }} + plugin: rabbitmq_shovel_prometheus + secrets: inherit + test-rabbitmq_stomp-mixed: + needs: + - check-workflow + - test-rabbit-0-mixed + - test-rabbit-1-mixed + - test-rabbit-2-mixed + - test-rabbit-3-mixed + - test-rabbit-4-mixed + - test-rabbit-5-mixed + - test-rabbit-6-mixed + - test-rabbit-7-mixed + - test-rabbit-8-mixed + - test-rabbit-9-mixed + uses: ./.github/workflows/test-plugin-mixed.yaml + with: + repo_cache_key: ${{ needs.check-workflow.outputs.repo_cache_key }} + plugin: rabbitmq_stomp + secrets: inherit + test-rabbitmq_stream-mixed: + needs: + - check-workflow + - test-rabbit-0-mixed + - test-rabbit-1-mixed + - test-rabbit-2-mixed + - test-rabbit-3-mixed + - test-rabbit-4-mixed + - test-rabbit-5-mixed + - test-rabbit-6-mixed + - test-rabbit-7-mixed + - test-rabbit-8-mixed + - test-rabbit-9-mixed + uses: ./.github/workflows/test-plugin-mixed.yaml + with: + repo_cache_key: ${{ needs.check-workflow.outputs.repo_cache_key }} + plugin: rabbitmq_stream + secrets: inherit + test-rabbitmq_stream_management-mixed: + needs: + - check-workflow + - test-rabbit-0-mixed + - test-rabbit-1-mixed + - test-rabbit-2-mixed + - test-rabbit-3-mixed + - test-rabbit-4-mixed + - test-rabbit-5-mixed + - test-rabbit-6-mixed + - test-rabbit-7-mixed + - test-rabbit-8-mixed + - test-rabbit-9-mixed + uses: ./.github/workflows/test-plugin-mixed.yaml + with: + repo_cache_key: ${{ needs.check-workflow.outputs.repo_cache_key }} + plugin: rabbitmq_stream_management + secrets: inherit + test-rabbitmq_top-mixed: + needs: + - check-workflow + - test-rabbit-0-mixed + - test-rabbit-1-mixed + - test-rabbit-2-mixed + - test-rabbit-3-mixed + - test-rabbit-4-mixed + - test-rabbit-5-mixed + - test-rabbit-6-mixed + - test-rabbit-7-mixed + - test-rabbit-8-mixed + - test-rabbit-9-mixed + uses: ./.github/workflows/test-plugin-mixed.yaml + with: + repo_cache_key: ${{ needs.check-workflow.outputs.repo_cache_key }} + plugin: rabbitmq_top + secrets: inherit + test-rabbitmq_tracing-mixed: + needs: + - check-workflow + - test-rabbit-0-mixed + - test-rabbit-1-mixed + - test-rabbit-2-mixed + - test-rabbit-3-mixed + - test-rabbit-4-mixed + - test-rabbit-5-mixed + - test-rabbit-6-mixed + - test-rabbit-7-mixed + - test-rabbit-8-mixed + - test-rabbit-9-mixed + uses: ./.github/workflows/test-plugin-mixed.yaml + with: + repo_cache_key: ${{ needs.check-workflow.outputs.repo_cache_key }} + plugin: rabbitmq_tracing + secrets: inherit + test-rabbitmq_trust_store-mixed: + needs: + - check-workflow + - test-rabbit-0-mixed + - test-rabbit-1-mixed + - test-rabbit-2-mixed + - test-rabbit-3-mixed + - test-rabbit-4-mixed + - test-rabbit-5-mixed + - test-rabbit-6-mixed + - test-rabbit-7-mixed + - test-rabbit-8-mixed + - test-rabbit-9-mixed + uses: ./.github/workflows/test-plugin-mixed.yaml + with: + repo_cache_key: ${{ needs.check-workflow.outputs.repo_cache_key }} + plugin: rabbitmq_trust_store + secrets: inherit + test-rabbitmq_web_dispatch-mixed: + needs: + - check-workflow + - test-rabbit-0-mixed + - test-rabbit-1-mixed + - test-rabbit-2-mixed + - test-rabbit-3-mixed + - test-rabbit-4-mixed + - test-rabbit-5-mixed + - test-rabbit-6-mixed + - test-rabbit-7-mixed + - test-rabbit-8-mixed + - test-rabbit-9-mixed + uses: ./.github/workflows/test-plugin-mixed.yaml + with: + repo_cache_key: ${{ needs.check-workflow.outputs.repo_cache_key }} + plugin: rabbitmq_web_dispatch + secrets: inherit + test-rabbitmq_web_mqtt-mixed: + needs: + - check-workflow + - test-rabbit-0-mixed + - test-rabbit-1-mixed + - test-rabbit-2-mixed + - test-rabbit-3-mixed + - test-rabbit-4-mixed + - test-rabbit-5-mixed + - test-rabbit-6-mixed + - test-rabbit-7-mixed + - test-rabbit-8-mixed + - test-rabbit-9-mixed + uses: ./.github/workflows/test-plugin-mixed.yaml + with: + repo_cache_key: ${{ needs.check-workflow.outputs.repo_cache_key }} + plugin: rabbitmq_web_mqtt + secrets: inherit + test-rabbitmq_web_mqtt_examples-mixed: + needs: + - check-workflow + - test-rabbit-0-mixed + - test-rabbit-1-mixed + - test-rabbit-2-mixed + - test-rabbit-3-mixed + - test-rabbit-4-mixed + - test-rabbit-5-mixed + - test-rabbit-6-mixed + - test-rabbit-7-mixed + - test-rabbit-8-mixed + - test-rabbit-9-mixed + uses: ./.github/workflows/test-plugin-mixed.yaml + with: + repo_cache_key: ${{ needs.check-workflow.outputs.repo_cache_key }} + plugin: rabbitmq_web_mqtt_examples + secrets: inherit + test-rabbitmq_web_stomp-mixed: + needs: + - check-workflow + - test-rabbit-0-mixed + - test-rabbit-1-mixed + - test-rabbit-2-mixed + - test-rabbit-3-mixed + - test-rabbit-4-mixed + - test-rabbit-5-mixed + - test-rabbit-6-mixed + - test-rabbit-7-mixed + - test-rabbit-8-mixed + - test-rabbit-9-mixed + uses: ./.github/workflows/test-plugin-mixed.yaml + with: + repo_cache_key: ${{ needs.check-workflow.outputs.repo_cache_key }} + plugin: rabbitmq_web_stomp + secrets: inherit + test-rabbitmq_web_stomp_examples-mixed: + needs: + - check-workflow + - test-rabbit-0-mixed + - test-rabbit-1-mixed + - test-rabbit-2-mixed + - test-rabbit-3-mixed + - test-rabbit-4-mixed + - test-rabbit-5-mixed + - test-rabbit-6-mixed + - test-rabbit-7-mixed + - test-rabbit-8-mixed + - test-rabbit-9-mixed + uses: ./.github/workflows/test-plugin-mixed.yaml + with: + repo_cache_key: ${{ needs.check-workflow.outputs.repo_cache_key }} + plugin: rabbitmq_web_stomp_examples + secrets: inherit + summary-test: + needs: + - test-amqp10_client-mixed + - test-amqp10_common-mixed + - test-amqp_client-mixed + - test-oauth2_client-mixed + - test-rabbit_common-mixed + - test-rabbitmq_ct_client_helpers-mixed + - test-rabbitmq_ct_helpers-mixed + - test-rabbitmq_stream_common-mixed + - test-trust_store_http-mixed + - test-rabbitmq_amqp_client-mixed + - test-rabbitmq_amqp1_0-mixed + - test-rabbitmq_auth_backend_cache-mixed + - test-rabbitmq_auth_backend_http-mixed + - test-rabbitmq_auth_backend_ldap-mixed + - test-rabbitmq_auth_backend_oauth2-mixed + - test-rabbitmq_auth_mechanism_ssl-mixed + - test-rabbitmq_aws-mixed + - test-rabbitmq_consistent_hash_exchange-mixed + - test-rabbitmq_event_exchange-mixed + - test-rabbitmq_federation-mixed + - test-rabbitmq_federation_management-mixed + - test-rabbitmq_federation_prometheus-mixed + - test-rabbitmq_jms_topic_exchange-mixed + - test-rabbitmq_management-mixed + - test-rabbitmq_management_agent-mixed + - test-rabbitmq_mqtt-mixed + - test-rabbitmq_peer_discovery_aws-mixed + - test-rabbitmq_peer_discovery_common-mixed + - test-rabbitmq_peer_discovery_consul-mixed + - test-rabbitmq_peer_discovery_etcd-mixed + - test-rabbitmq_peer_discovery_k8s-mixed + - test-rabbitmq_prelaunch-mixed + - test-rabbitmq_prometheus-mixed + - test-rabbitmq_random_exchange-mixed + - test-rabbitmq_recent_history_exchange-mixed + - test-rabbitmq_sharding-mixed + - test-rabbitmq_shovel-mixed + - test-rabbitmq_shovel_management-mixed + - test-rabbitmq_shovel_prometheus-mixed + - test-rabbitmq_stomp-mixed + - test-rabbitmq_stream-mixed + - test-rabbitmq_stream_management-mixed + - test-rabbitmq_top-mixed + - test-rabbitmq_tracing-mixed + - test-rabbitmq_trust_store-mixed + - test-rabbitmq_web_dispatch-mixed + - test-rabbitmq_web_mqtt-mixed + - test-rabbitmq_web_mqtt_examples-mixed + - test-rabbitmq_web_stomp-mixed + - test-rabbitmq_web_stomp_examples-mixed + - test-rabbit-0-mixed + - test-rabbit-1-mixed + - test-rabbit-2-mixed + - test-rabbit-3-mixed + - test-rabbit-4-mixed + - test-rabbit-5-mixed + - test-rabbit-6-mixed + - test-rabbit-7-mixed + - test-rabbit-8-mixed + - test-rabbit-9-mixed + - test-rabbitmq_cli-mixed runs-on: ubuntu-latest steps: - name: SUMMARY run: | - echo "SUCCESS" + cat << 'EOF' | jq -e 'map(.result == "success") | all(.)' + ${{ toJson(needs) }} + EOF diff --git a/.github/workflows/test-plugin-mixed.yaml b/.github/workflows/test-plugin-mixed.yaml new file mode 100644 index 000000000000..74b483e98e99 --- /dev/null +++ b/.github/workflows/test-plugin-mixed.yaml @@ -0,0 +1,178 @@ +name: Test Plugin Mixed Version Clusters +on: + workflow_call: + inputs: + repo_cache_key: + required: true + type: string + plugin: + required: true + type: string + shard_index: + default: 0 + type: number + shard_count: + default: 1 + type: number + secrets: + REMOTE_CACHE_BUCKET_NAME_MIXED: + required: true + REMOTE_CACHE_CREDENTIALS_JSON: + required: true +jobs: + test: + runs-on: ubuntu-latest + strategy: + fail-fast: false + matrix: + erlang_version: + - 26 + metadata_store: + - mnesia + # Khepri is currently skipped because Khepri is an unstable feature: we don't guarantee upgrability. + # Mixed-version tests currently fail with Khepri because of a new machine version introduced in + # Khepri v0.14.0. + # - khepri + include: + - erlang_version: 26 + elixir_version: 1.15 + timeout-minutes: 120 + steps: + - name: LOAD REPO CACHE + uses: actions/cache/restore@v4 + with: + key: ${{ inputs.repo_cache_key }} + path: /home/runner/repo-cache/ + - name: CHECKOUT REPOSITORY + uses: actions/checkout@v4 + - name: CONFIGURE OTP & ELIXIR + uses: erlef/setup-beam@v1.17 + with: + otp-version: ${{ matrix.erlang_version }} + elixir-version: ${{ matrix.elixir_version }} + hexpm-mirrors: | + https://builds.hex.pm + https://cdn.jsdelivr.net/hex + - name: AUTHENTICATE TO GOOGLE CLOUD + uses: google-github-actions/auth@v2.1.4 + with: + credentials_json: ${{ secrets.REMOTE_CACHE_CREDENTIALS_JSON }} + - name: CONFIGURE BAZEL + run: | + if [ -n "${{ secrets.REMOTE_CACHE_BUCKET_NAME_MIXED }}" ]; then + cat << EOF >> user.bazelrc + build --remote_cache=https://storage.googleapis.com/${{ secrets.REMOTE_CACHE_BUCKET_NAME_MIXED }} + build --google_default_credentials + + build --experimental_guard_against_concurrent_changes + EOF + fi + cat << EOF >> user.bazelrc + build --repository_cache=/home/runner/repo-cache/ + build --color=yes + EOF + + bazelisk info release + #! - name: Setup tmate session + #! uses: mxschmitt/action-tmate@v3 + - uses: actions/setup-dotnet@v4 + if: inputs.plugin == 'rabbit' + with: + dotnet-version: '3.1.x' + - name: deps/amqp10_client SETUP + if: inputs.plugin == 'amqp10_client' + run: | + # reduce sandboxing so that activemq works + cat << EOF >> user.bazelrc + build --strategy=TestRunner=local + EOF + - name: deps/rabbit SETUP + if: inputs.plugin == 'rabbit' + run: | + # reduce sandboxing so that maven works + cat << EOF >> user.bazelrc + build --strategy=TestRunner=local + EOF + - name: deps/rabbitmq_auth_backend_ldap SETUP + if: inputs.plugin == 'rabbitmq_auth_backend_ldap' + run: | + sudo apt-get update && \ + sudo apt-get install -y \ + apparmor-utils \ + ldap-utils \ + slapd + + sudo aa-complain `which slapd` + + cat << EOF >> user.bazelrc + build --strategy=TestRunner=local + EOF + - name: deps/rabbitmq_mqtt SETUP + if: inputs.plugin == 'rabbitmq_mqtt' + run: | + cat << EOF >> user.bazelrc + build --strategy=TestRunner=local + EOF + - name: deps/rabbitmq_peer_discovery_consul SETUP + if: inputs.plugin == 'rabbitmq_peer_discovery_consul' + run: | + cat << EOF >> user.bazelrc + build --strategy=TestRunner=local + EOF + - name: deps/rabbitmq_stream SETUP + if: inputs.plugin == 'rabbitmq_stream' + run: | + cat << EOF >> user.bazelrc + build --strategy=TestRunner=local + EOF + - name: deps/rabbitmq_stream_management SETUP + if: inputs.plugin == 'rabbitmq_stream_management' + run: | + cat << EOF >> user.bazelrc + build --strategy=TestRunner=local + EOF + - name: deps/rabbitmq_tracing SETUP + if: inputs.plugin == 'rabbitmq_tracing' + run: | + cat << EOF >> user.bazelrc + build --strategy=TestRunner=local + EOF + - name: COMPUTE TESTS IN SHARD + id: shard + run: | + bazelisk cquery \ + 'attr("tags", "mixed-version-cluster", tests(//deps/${{ inputs.plugin }}/...)) except attr("tags", "manual", //deps/${{ inputs.plugin }}/...)' \ + --output=label \ + | awk '{print $1;}' > tests.log + if [[ $(wc -l < tests.log) != "0" ]]; then + split -da 3 -l $((`wc -l < tests.log`/${{ inputs.shard_count }})) tests.log shard + printf -v padded_index "%03d" ${{ inputs.shard_index }} + echo "file=shard$padded_index" | tee -a $GITHUB_OUTPUT + else + echo "No tests in this shard" + echo "file=" | tee -a $GITHUB_OUTPUT + fi + - name: RUN TESTS + if: steps.shard.outputs.file != '' && inputs.plugin != 'rabbitmq_peer_discovery_aws' + run: | + echo "Tests in shard:" + cat ${{ steps.shard.outputs.file }} + echo "" + + ## WARNING: + ## secrets must not be set in --test_env or --action_env, + ## or otherwise logs must not be saved as artifacts. + ## rabbit_ct_helpers or other code may log portions of the + ## env vars and leak them + + bazelisk test $(< ${{ steps.shard.outputs.file }}) \ + --test_env RABBITMQ_METADATA_STORE=${{ matrix.metadata_store }} \ + --build_tests_only \ + --verbose_failures + - name: UPLOAD TEST LOGS + if: always() + uses: actions/upload-artifact@v4 + with: + name: bazel-testlogs-${{ inputs.plugin }}-${{ inputs.shard_index }}-${{ matrix.erlang_version }}-${{ matrix.metadata_store }}-mixed + path: | + bazel-testlogs/deps/${{ inputs.plugin }}/* diff --git a/.github/workflows/test-plugin.yaml b/.github/workflows/test-plugin.yaml new file mode 100644 index 000000000000..afcbce286c49 --- /dev/null +++ b/.github/workflows/test-plugin.yaml @@ -0,0 +1,175 @@ +name: Test Plugin +on: + workflow_call: + inputs: + repo_cache_key: + required: true + type: string + plugin: + required: true + type: string + shard_index: + default: 0 + type: number + shard_count: + default: 1 + type: number + secrets: + REMOTE_CACHE_BUCKET_NAME: + required: true + REMOTE_CACHE_CREDENTIALS_JSON: + required: true +jobs: + test: + runs-on: ubuntu-latest + strategy: + fail-fast: false + matrix: + erlang_version: + - 26 + metadata_store: + - mnesia + - khepri + include: + - erlang_version: 26 + elixir_version: 1.15 + timeout-minutes: 120 + steps: + - name: LOAD REPO CACHE + uses: actions/cache/restore@v4 + with: + key: ${{ inputs.repo_cache_key }} + path: /home/runner/repo-cache/ + - name: CHECKOUT REPOSITORY + uses: actions/checkout@v4 + - name: CONFIGURE OTP & ELIXIR + uses: erlef/setup-beam@v1.17 + with: + otp-version: ${{ matrix.erlang_version }} + elixir-version: ${{ matrix.elixir_version }} + hexpm-mirrors: | + https://builds.hex.pm + https://cdn.jsdelivr.net/hex + - name: AUTHENTICATE TO GOOGLE CLOUD + uses: google-github-actions/auth@v2.1.4 + with: + credentials_json: ${{ secrets.REMOTE_CACHE_CREDENTIALS_JSON }} + - name: CONFIGURE BAZEL + run: | + if [ -n "${{ secrets.REMOTE_CACHE_BUCKET_NAME }}" ]; then + cat << EOF >> user.bazelrc + build --remote_cache=https://storage.googleapis.com/${{ secrets.REMOTE_CACHE_BUCKET_NAME }} + build --google_default_credentials + + build --experimental_guard_against_concurrent_changes + EOF + fi + cat << EOF >> user.bazelrc + build --repository_cache=/home/runner/repo-cache/ + build --color=yes + EOF + + bazelisk info release + #! - name: Setup tmate session + #! uses: mxschmitt/action-tmate@v3 + - uses: actions/setup-dotnet@v4 + if: inputs.plugin == 'rabbit' + with: + dotnet-version: '3.1.x' + - name: deps/amqp10_client SETUP + if: inputs.plugin == 'amqp10_client' + run: | + # reduce sandboxing so that activemq works + cat << EOF >> user.bazelrc + build --strategy=TestRunner=local + EOF + - name: deps/rabbit SETUP + if: inputs.plugin == 'rabbit' + run: | + # reduce sandboxing so that maven works + cat << EOF >> user.bazelrc + build --strategy=TestRunner=local + EOF + - name: deps/rabbitmq_auth_backend_ldap SETUP + if: inputs.plugin == 'rabbitmq_auth_backend_ldap' + run: | + sudo apt-get update && \ + sudo apt-get install -y \ + apparmor-utils \ + ldap-utils \ + slapd + + sudo aa-complain `which slapd` + + cat << EOF >> user.bazelrc + build --strategy=TestRunner=local + EOF + - name: deps/rabbitmq_mqtt SETUP + if: inputs.plugin == 'rabbitmq_mqtt' + run: | + cat << EOF >> user.bazelrc + build --strategy=TestRunner=local + EOF + - name: deps/rabbitmq_peer_discovery_consul SETUP + if: inputs.plugin == 'rabbitmq_peer_discovery_consul' + run: | + cat << EOF >> user.bazelrc + build --strategy=TestRunner=local + EOF + - name: deps/rabbitmq_stream SETUP + if: inputs.plugin == 'rabbitmq_stream' + run: | + cat << EOF >> user.bazelrc + build --strategy=TestRunner=local + EOF + - name: deps/rabbitmq_stream_management SETUP + if: inputs.plugin == 'rabbitmq_stream_management' + run: | + cat << EOF >> user.bazelrc + build --strategy=TestRunner=local + EOF + - name: deps/rabbitmq_tracing SETUP + if: inputs.plugin == 'rabbitmq_tracing' + run: | + cat << EOF >> user.bazelrc + build --strategy=TestRunner=local + EOF + - name: CLI COMPILE WARNINGS AS ERRORS + if: inputs.plugin == 'rabbitmq_cli' + run: | + bazel build //deps/rabbitmq_cli:compile_warnings_as_errors \ + --verbose_failures + - name: COMPUTE TESTS IN SHARD + id: shard + run: | + bazelisk cquery \ + 'tests(//deps/${{ inputs.plugin }}/...) except attr("tags", "manual|mixed-version-cluster", //deps/...)' \ + --output=label \ + | awk '{print $1;}' > tests.log + split -da 3 -l $((`wc -l < tests.log`/${{ inputs.shard_count }})) tests.log shard + printf -v padded_index "%03d" ${{ inputs.shard_index }} + echo "file=shard$padded_index" | tee -a $GITHUB_OUTPUT + - name: RUN TESTS + if: inputs.plugin != 'rabbitmq_peer_discovery_aws' + run: | + echo "Tests in shard:" + cat ${{ steps.shard.outputs.file }} + echo "" + + ## WARNING: + ## secrets must not be set in --test_env or --action_env, + ## or otherwise logs must not be saved as artifacts. + ## rabbit_ct_helpers or other code may log portions of the + ## env vars and leak them + + bazelisk test $(< ${{ steps.shard.outputs.file }}) \ + --test_env RABBITMQ_METADATA_STORE=${{ matrix.metadata_store }} \ + --build_tests_only \ + --verbose_failures + - name: UPLOAD TEST LOGS + if: always() + uses: actions/upload-artifact@v4 + with: + name: bazel-testlogs-${{ inputs.plugin }}-${{ inputs.shard_index }}-${{ matrix.erlang_version }}-${{ matrix.metadata_store }} + path: | + bazel-testlogs/deps/${{ inputs.plugin }}/* diff --git a/.github/workflows/test-selenium.yaml b/.github/workflows/test-selenium.yaml index a11d6310463e..c9955ca1d213 100644 --- a/.github/workflows/test-selenium.yaml +++ b/.github/workflows/test-selenium.yaml @@ -26,17 +26,17 @@ concurrency: cancel-in-progress: true jobs: selenium: - runs-on: ubuntu-20.04 + runs-on: ubuntu-22.04 strategy: fail-fast: false matrix: erlang_version: - - "26.0" + - "26.2" browser: - chrome include: - - erlang_version: "26.0" - elixir_version: 1.14.5 + - erlang_version: "26.2" + elixir_version: 1.15.7 env: SELENIUM_DIR: deps/rabbitmq_management/selenium DOCKER_NETWORK: rabbitmq_net @@ -45,46 +45,36 @@ jobs: uses: actions/checkout@v4 - name: Configure OTP & Elixir - uses: erlef/setup-beam@v1.16 + uses: erlef/setup-beam@v1.17 with: otp-version: ${{ matrix.erlang_version }} elixir-version: ${{ matrix.elixir_version }} + hexpm-mirrors: | + https://builds.hex.pm + https://cdn.jsdelivr.net/hex - - name: Mount Bazel Cache - uses: actions/cache@v3.3.2 + - name: Authenticate To Google Cloud + uses: google-github-actions/auth@v2.1.4 with: - path: "/home/runner/repo-cache/" - key: ${{ runner.os }}-repo-cache-${{ hashFiles('MODULE.bazel','WORKSPACE','bazel/bzlmod/secondary_umbrella.bzl') }} - restore-keys: | - ${{ runner.os }}-repo-cache- + credentials_json: ${{ secrets.REMOTE_CACHE_CREDENTIALS_JSON }} - name: Configure Bazel run: | - ERLANG_HOME="$(dirname $(dirname $(which erl)))" - ELIXIR_HOME="$(dirname $(dirname $(which iex)))" - if [ -n "${{ secrets.BUILDBUDDY_API_KEY }}" ]; then + if [ -n "${{ secrets.REMOTE_CACHE_BUCKET_NAME }}" ]; then cat << EOF >> user.bazelrc - build:buildbuddy --remote_header=x-buildbuddy-api-key=${{ secrets.BUILDBUDDY_API_KEY }} + build --remote_cache=https://storage.googleapis.com/${{ secrets.REMOTE_CACHE_BUCKET_NAME }} + build --google_default_credentials + + build --remote_download_toplevel EOF fi cat << EOF >> user.bazelrc - build:buildbuddy --build_metadata=ROLE=CI - build:buildbuddy --build_metadata=VISIBILITY=PRIVATE - build:buildbuddy --repository_cache=/home/runner/repo-cache/ - build:buildbuddy --color=yes - build:buildbuddy --disk_cache= - - build:buildbuddy --remote_download_toplevel - - build --@rules_erlang//:erlang_version=${{ matrix.erlang_version }} - build --@rules_erlang//:erlang_home=${ERLANG_HOME} - build --//:elixir_home=${ELIXIR_HOME} + build --color=yes EOF - name: Build & Load RabbitMQ OCI run: | - bazelisk run packaging/docker-image:rabbitmq \ - --config=buildbuddy + bazelisk run packaging/docker-image:rabbitmq-amd64 - name: Configure Docker Network run: | @@ -97,11 +87,11 @@ jobs: - name: Run Suites run: | - RABBITMQ_DOCKER_IMAGE=bazel/packaging/docker-image:rabbitmq ${SELENIUM_DIR}/run-suites.sh + RABBITMQ_DOCKER_IMAGE=bazel/packaging/docker-image:rabbitmq-amd64 ${SELENIUM_DIR}/run-suites.sh - name: Upload Test Artifacts if: always() - uses: actions/upload-artifact@v3.1.3 + uses: actions/upload-artifact@v4.3.2 with: name: test-artifacts-${{ matrix.browser }}-${{ matrix.erlang_version }} path: | diff --git a/.github/workflows/test-windows.yaml b/.github/workflows/test-windows.yaml index 66a14a01c030..87e929ad8609 100644 --- a/.github/workflows/test-windows.yaml +++ b/.github/workflows/test-windows.yaml @@ -11,14 +11,17 @@ jobs: fail-fast: false matrix: include: - - erlang_version: "26.0" + - erlang_version: "26.1" elixir_version: "1.15.2" + metadata_store: + - mnesia + - khepri timeout-minutes: 120 steps: - name: CHECKOUT REPOSITORY uses: actions/checkout@v4 - name: CONFIGURE ERLANG - uses: erlef/setup-beam@v1.16 + uses: erlef/setup-beam@v1.17 with: otp-version: ${{ matrix.erlang_version }} elixir-version: ${{ matrix.elixir_version }} @@ -31,24 +34,17 @@ jobs: id: configure shell: bash run: | - if [ -n "${{ secrets.BUILDBUDDY_API_KEY }}" ]; then + if [ -n "${{ secrets.REMOTE_CACHE_BUCKET_NAME }}" ]; then cat << EOF >> user.bazelrc - build:buildbuddy --remote_header=x-buildbuddy-api-key=${{ secrets.BUILDBUDDY_API_KEY }} + build --remote_cache=https://storage.googleapis.com/${{ secrets.REMOTE_CACHE_BUCKET_NAME }} + build --google_default_credentials EOF fi cat << EOF >> user.bazelrc startup --output_user_root=C:/tmp startup --windows_enable_symlinks build --enable_runfiles - - build:buildbuddy --build_metadata=ROLE=CI - build:buildbuddy --build_metadata=VISIBILITY=PUBLIC - build:buildbuddy --remote_instance_name=buildbuddy-io/buildbuddy/ci-windows - # build:buildbuddy --repository_cache=/home/runner/repo-cache/ - build:buildbuddy --color=yes - build:buildbuddy --disk_cache= - build:buildbuddy --noexperimental_remote_cache_compression - build:buildbuddy --noexperimental_remote_cache_async + build --color=yes EOF bazelisk info release @@ -57,6 +53,7 @@ jobs: run: | bazelisk test //... ^ --config=buildbuddy ^ + --test_env RABBITMQ_METADATA_STORE=${{ matrix.metadata_store }} ^ --test_tag_filters=-aws,-docker,-bats,-starts-background-broker,-dialyze ^ --build_tests_only ^ --verbose_failures diff --git a/.github/workflows/test.yaml b/.github/workflows/test.yaml index 6f571d8bb157..c1a8dfa57b78 100644 --- a/.github/workflows/test.yaml +++ b/.github/workflows/test.yaml @@ -2,84 +2,1147 @@ name: Test on: push: branches: - - main - - v3.12.x - - v3.11.x - - v3.10.x - - v3.9.x - - v3.8.x - - bump-otp-for-oci - - bump-rbe-* - - bump-rules_erlang + - main + - v3.13.x + - v3.12.x + - v3.11.x + - bump-otp-for-oci + - bump-rbe-* + - bump-rules_erlang paths: - - 'deps/**' - - 'scripts/**' - - Makefile - - plugins.mk - - rabbitmq-components.mk - - .bazelrc - - .bazelversion - - BUILD.* - - '*.bzl' - - '*.bazel' - - .github/workflows/test.yaml + - deps/** + - scripts/** + - Makefile + - plugins.mk + - rabbitmq-components.mk + - .bazelrc + - .bazelversion + - BUILD.* + - '*.bzl' + - '*.bazel' + - .github/workflows/test.yaml pull_request: concurrency: group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }} cancel-in-progress: true jobs: - test: - name: Test - runs-on: ubuntu-20.04 - strategy: - fail-fast: false - matrix: - otp_version_id: - - 25_3 - - 26 - timeout-minutes: 120 + check-workflow: + runs-on: ubuntu-latest + outputs: + repo_cache_key: ${{ steps.repo-cache-key.outputs.value }} steps: - name: CHECKOUT REPOSITORY uses: actions/checkout@v4 - - name: MOUNT BAZEL CACHE - uses: actions/cache@v3.3.2 + - name: SETUP ERLANG/ELIXIR + uses: erlef/setup-beam@v1 + with: + otp-version: 26 + elixir-version: 1.15 + hexpm-mirrors: | + https://builds.hex.pm + https://cdn.jsdelivr.net/hex + - name: ENSURE WORKFLOWS ARE UP TO DATE + run: | + mkdir local-bin/ + curl -L https://carvel.dev/install.sh | K14SIO_INSTALL_BIN_DIR=local-bin bash + make actions-workflows YTT=$PWD/local-bin/ytt + git diff --exit-code + - name: COMPUTE REPO CACHE KEY + id: repo-cache-key + run: | + echo "value=bazel-repo-cache-${{ hashFiles('MODULE.bazel') }}" | tee -a $GITHUB_OUTPUT + - name: AUTHENTICATE TO GOOGLE CLOUD + uses: google-github-actions/auth@v2.1.4 with: - path: "/home/runner/repo-cache/" - key: ${{ runner.os }}-repo-cache-${{ hashFiles('MODULE.bazel','WORKSPACE','bazel/bzlmod/secondary_umbrella.bzl') }} - restore-keys: | - ${{ runner.os }}-repo-cache- - - name: CONFIGURE BAZEL + credentials_json: ${{ secrets.REMOTE_CACHE_CREDENTIALS_JSON }} + - name: REPO CACHE + id: cache + uses: actions/cache@v4 + with: + key: ${{ steps.repo-cache-key.outputs.value }} + path: /home/runner/repo-cache/ + - name: PRIME CACHE + if: steps.cache.outputs.cache-hit != 'true' run: | - if [ -n "${{ secrets.BUILDBUDDY_API_KEY }}" ]; then + if [ -n "${{ secrets.REMOTE_CACHE_BUCKET_NAME }}" ]; then cat << EOF >> user.bazelrc - build:buildbuddy --remote_header=x-buildbuddy-api-key=${{ secrets.BUILDBUDDY_API_KEY }} + build --remote_cache=https://storage.googleapis.com/${{ secrets.REMOTE_CACHE_BUCKET_NAME }} + build --google_default_credentials EOF fi cat << EOF >> user.bazelrc - build:buildbuddy --build_metadata=ROLE=CI - build:buildbuddy --build_metadata=VISIBILITY=PUBLIC - build:buildbuddy --repository_cache=/home/runner/repo-cache/ - build:buildbuddy --color=yes - build:buildbuddy --disk_cache= + build --repository_cache=/home/runner/repo-cache/ + build --color=yes EOF - bazelisk info release - #! - name: Setup tmate session - #! uses: mxschmitt/action-tmate@v3 - - name: RUN TESTS - run: | - sudo sysctl -w net.ipv4.tcp_keepalive_time=60 - sudo ethtool -K eth0 tso off gso off gro off tx off rx off lro off - bazelisk test //... \ - --config=rbe-${{ matrix.otp_version_id }} \ - --test_tag_filters=-aws,-docker,-mixed-version-cluster \ - --build_tests_only \ - --verbose_failures + bazelisk cquery \ + 'tests(//...) except attr("tags", "manual|mixed-version-cluster", //deps/...)' \ + --output=label + test-amqp10_client: + needs: check-workflow + uses: ./.github/workflows/test-plugin.yaml + with: + repo_cache_key: ${{ needs.check-workflow.outputs.repo_cache_key }} + plugin: amqp10_client + secrets: inherit + test-amqp10_common: + needs: check-workflow + uses: ./.github/workflows/test-plugin.yaml + with: + repo_cache_key: ${{ needs.check-workflow.outputs.repo_cache_key }} + plugin: amqp10_common + secrets: inherit + test-amqp_client: + needs: check-workflow + uses: ./.github/workflows/test-plugin.yaml + with: + repo_cache_key: ${{ needs.check-workflow.outputs.repo_cache_key }} + plugin: amqp_client + secrets: inherit + test-oauth2_client: + needs: check-workflow + uses: ./.github/workflows/test-plugin.yaml + with: + repo_cache_key: ${{ needs.check-workflow.outputs.repo_cache_key }} + plugin: oauth2_client + secrets: inherit + test-rabbit_common: + needs: check-workflow + uses: ./.github/workflows/test-plugin.yaml + with: + repo_cache_key: ${{ needs.check-workflow.outputs.repo_cache_key }} + plugin: rabbit_common + secrets: inherit + test-rabbitmq_ct_client_helpers: + needs: check-workflow + uses: ./.github/workflows/test-plugin.yaml + with: + repo_cache_key: ${{ needs.check-workflow.outputs.repo_cache_key }} + plugin: rabbitmq_ct_client_helpers + secrets: inherit + test-rabbitmq_ct_helpers: + needs: check-workflow + uses: ./.github/workflows/test-plugin.yaml + with: + repo_cache_key: ${{ needs.check-workflow.outputs.repo_cache_key }} + plugin: rabbitmq_ct_helpers + secrets: inherit + test-rabbitmq_stream_common: + needs: check-workflow + uses: ./.github/workflows/test-plugin.yaml + with: + repo_cache_key: ${{ needs.check-workflow.outputs.repo_cache_key }} + plugin: rabbitmq_stream_common + secrets: inherit + test-trust_store_http: + needs: check-workflow + uses: ./.github/workflows/test-plugin.yaml + with: + repo_cache_key: ${{ needs.check-workflow.outputs.repo_cache_key }} + plugin: trust_store_http + secrets: inherit + test-rabbit-0: + needs: + - check-workflow + - test-amqp10_client + - test-amqp10_common + - test-amqp_client + - test-oauth2_client + - test-rabbit_common + - test-rabbitmq_ct_client_helpers + - test-rabbitmq_ct_helpers + - test-rabbitmq_stream_common + - test-trust_store_http + uses: ./.github/workflows/test-plugin.yaml + with: + repo_cache_key: ${{ needs.check-workflow.outputs.repo_cache_key }} + plugin: rabbit + shard_index: 0 + shard_count: 10 + secrets: inherit + test-rabbit-1: + needs: + - check-workflow + - test-amqp10_client + - test-amqp10_common + - test-amqp_client + - test-oauth2_client + - test-rabbit_common + - test-rabbitmq_ct_client_helpers + - test-rabbitmq_ct_helpers + - test-rabbitmq_stream_common + - test-trust_store_http + uses: ./.github/workflows/test-plugin.yaml + with: + repo_cache_key: ${{ needs.check-workflow.outputs.repo_cache_key }} + plugin: rabbit + shard_index: 1 + shard_count: 10 + secrets: inherit + test-rabbit-2: + needs: + - check-workflow + - test-amqp10_client + - test-amqp10_common + - test-amqp_client + - test-oauth2_client + - test-rabbit_common + - test-rabbitmq_ct_client_helpers + - test-rabbitmq_ct_helpers + - test-rabbitmq_stream_common + - test-trust_store_http + uses: ./.github/workflows/test-plugin.yaml + with: + repo_cache_key: ${{ needs.check-workflow.outputs.repo_cache_key }} + plugin: rabbit + shard_index: 2 + shard_count: 10 + secrets: inherit + test-rabbit-3: + needs: + - check-workflow + - test-amqp10_client + - test-amqp10_common + - test-amqp_client + - test-oauth2_client + - test-rabbit_common + - test-rabbitmq_ct_client_helpers + - test-rabbitmq_ct_helpers + - test-rabbitmq_stream_common + - test-trust_store_http + uses: ./.github/workflows/test-plugin.yaml + with: + repo_cache_key: ${{ needs.check-workflow.outputs.repo_cache_key }} + plugin: rabbit + shard_index: 3 + shard_count: 10 + secrets: inherit + test-rabbit-4: + needs: + - check-workflow + - test-amqp10_client + - test-amqp10_common + - test-amqp_client + - test-oauth2_client + - test-rabbit_common + - test-rabbitmq_ct_client_helpers + - test-rabbitmq_ct_helpers + - test-rabbitmq_stream_common + - test-trust_store_http + uses: ./.github/workflows/test-plugin.yaml + with: + repo_cache_key: ${{ needs.check-workflow.outputs.repo_cache_key }} + plugin: rabbit + shard_index: 4 + shard_count: 10 + secrets: inherit + test-rabbit-5: + needs: + - check-workflow + - test-amqp10_client + - test-amqp10_common + - test-amqp_client + - test-oauth2_client + - test-rabbit_common + - test-rabbitmq_ct_client_helpers + - test-rabbitmq_ct_helpers + - test-rabbitmq_stream_common + - test-trust_store_http + uses: ./.github/workflows/test-plugin.yaml + with: + repo_cache_key: ${{ needs.check-workflow.outputs.repo_cache_key }} + plugin: rabbit + shard_index: 5 + shard_count: 10 + secrets: inherit + test-rabbit-6: + needs: + - check-workflow + - test-amqp10_client + - test-amqp10_common + - test-amqp_client + - test-oauth2_client + - test-rabbit_common + - test-rabbitmq_ct_client_helpers + - test-rabbitmq_ct_helpers + - test-rabbitmq_stream_common + - test-trust_store_http + uses: ./.github/workflows/test-plugin.yaml + with: + repo_cache_key: ${{ needs.check-workflow.outputs.repo_cache_key }} + plugin: rabbit + shard_index: 6 + shard_count: 10 + secrets: inherit + test-rabbit-7: + needs: + - check-workflow + - test-amqp10_client + - test-amqp10_common + - test-amqp_client + - test-oauth2_client + - test-rabbit_common + - test-rabbitmq_ct_client_helpers + - test-rabbitmq_ct_helpers + - test-rabbitmq_stream_common + - test-trust_store_http + uses: ./.github/workflows/test-plugin.yaml + with: + repo_cache_key: ${{ needs.check-workflow.outputs.repo_cache_key }} + plugin: rabbit + shard_index: 7 + shard_count: 10 + secrets: inherit + test-rabbit-8: + needs: + - check-workflow + - test-amqp10_client + - test-amqp10_common + - test-amqp_client + - test-oauth2_client + - test-rabbit_common + - test-rabbitmq_ct_client_helpers + - test-rabbitmq_ct_helpers + - test-rabbitmq_stream_common + - test-trust_store_http + uses: ./.github/workflows/test-plugin.yaml + with: + repo_cache_key: ${{ needs.check-workflow.outputs.repo_cache_key }} + plugin: rabbit + shard_index: 8 + shard_count: 10 + secrets: inherit + test-rabbit-9: + needs: + - check-workflow + - test-amqp10_client + - test-amqp10_common + - test-amqp_client + - test-oauth2_client + - test-rabbit_common + - test-rabbitmq_ct_client_helpers + - test-rabbitmq_ct_helpers + - test-rabbitmq_stream_common + - test-trust_store_http + uses: ./.github/workflows/test-plugin.yaml + with: + repo_cache_key: ${{ needs.check-workflow.outputs.repo_cache_key }} + plugin: rabbit + shard_index: 9 + shard_count: 10 + secrets: inherit + test-rabbitmq_cli: + needs: check-workflow + uses: ./.github/workflows/test-plugin.yaml + with: + repo_cache_key: ${{ needs.check-workflow.outputs.repo_cache_key }} + plugin: rabbitmq_cli + secrets: inherit + test-rabbitmq_amqp_client: + needs: + - check-workflow + - test-rabbit-0 + - test-rabbit-1 + - test-rabbit-2 + - test-rabbit-3 + - test-rabbit-4 + - test-rabbit-5 + - test-rabbit-6 + - test-rabbit-7 + - test-rabbit-8 + - test-rabbit-9 + uses: ./.github/workflows/test-plugin.yaml + with: + repo_cache_key: ${{ needs.check-workflow.outputs.repo_cache_key }} + plugin: rabbitmq_amqp_client + secrets: inherit + test-rabbitmq_amqp1_0: + needs: + - check-workflow + - test-rabbit-0 + - test-rabbit-1 + - test-rabbit-2 + - test-rabbit-3 + - test-rabbit-4 + - test-rabbit-5 + - test-rabbit-6 + - test-rabbit-7 + - test-rabbit-8 + - test-rabbit-9 + uses: ./.github/workflows/test-plugin.yaml + with: + repo_cache_key: ${{ needs.check-workflow.outputs.repo_cache_key }} + plugin: rabbitmq_amqp1_0 + secrets: inherit + test-rabbitmq_auth_backend_cache: + needs: + - check-workflow + - test-rabbit-0 + - test-rabbit-1 + - test-rabbit-2 + - test-rabbit-3 + - test-rabbit-4 + - test-rabbit-5 + - test-rabbit-6 + - test-rabbit-7 + - test-rabbit-8 + - test-rabbit-9 + uses: ./.github/workflows/test-plugin.yaml + with: + repo_cache_key: ${{ needs.check-workflow.outputs.repo_cache_key }} + plugin: rabbitmq_auth_backend_cache + secrets: inherit + test-rabbitmq_auth_backend_http: + needs: + - check-workflow + - test-rabbit-0 + - test-rabbit-1 + - test-rabbit-2 + - test-rabbit-3 + - test-rabbit-4 + - test-rabbit-5 + - test-rabbit-6 + - test-rabbit-7 + - test-rabbit-8 + - test-rabbit-9 + uses: ./.github/workflows/test-plugin.yaml + with: + repo_cache_key: ${{ needs.check-workflow.outputs.repo_cache_key }} + plugin: rabbitmq_auth_backend_http + secrets: inherit + test-rabbitmq_auth_backend_ldap: + needs: + - check-workflow + - test-rabbit-0 + - test-rabbit-1 + - test-rabbit-2 + - test-rabbit-3 + - test-rabbit-4 + - test-rabbit-5 + - test-rabbit-6 + - test-rabbit-7 + - test-rabbit-8 + - test-rabbit-9 + uses: ./.github/workflows/test-plugin.yaml + with: + repo_cache_key: ${{ needs.check-workflow.outputs.repo_cache_key }} + plugin: rabbitmq_auth_backend_ldap + secrets: inherit + test-rabbitmq_auth_backend_oauth2: + needs: + - check-workflow + - test-rabbit-0 + - test-rabbit-1 + - test-rabbit-2 + - test-rabbit-3 + - test-rabbit-4 + - test-rabbit-5 + - test-rabbit-6 + - test-rabbit-7 + - test-rabbit-8 + - test-rabbit-9 + uses: ./.github/workflows/test-plugin.yaml + with: + repo_cache_key: ${{ needs.check-workflow.outputs.repo_cache_key }} + plugin: rabbitmq_auth_backend_oauth2 + secrets: inherit + test-rabbitmq_auth_mechanism_ssl: + needs: + - check-workflow + - test-rabbit-0 + - test-rabbit-1 + - test-rabbit-2 + - test-rabbit-3 + - test-rabbit-4 + - test-rabbit-5 + - test-rabbit-6 + - test-rabbit-7 + - test-rabbit-8 + - test-rabbit-9 + uses: ./.github/workflows/test-plugin.yaml + with: + repo_cache_key: ${{ needs.check-workflow.outputs.repo_cache_key }} + plugin: rabbitmq_auth_mechanism_ssl + secrets: inherit + test-rabbitmq_aws: + needs: + - check-workflow + - test-rabbit-0 + - test-rabbit-1 + - test-rabbit-2 + - test-rabbit-3 + - test-rabbit-4 + - test-rabbit-5 + - test-rabbit-6 + - test-rabbit-7 + - test-rabbit-8 + - test-rabbit-9 + uses: ./.github/workflows/test-plugin.yaml + with: + repo_cache_key: ${{ needs.check-workflow.outputs.repo_cache_key }} + plugin: rabbitmq_aws + secrets: inherit + test-rabbitmq_consistent_hash_exchange: + needs: + - check-workflow + - test-rabbit-0 + - test-rabbit-1 + - test-rabbit-2 + - test-rabbit-3 + - test-rabbit-4 + - test-rabbit-5 + - test-rabbit-6 + - test-rabbit-7 + - test-rabbit-8 + - test-rabbit-9 + uses: ./.github/workflows/test-plugin.yaml + with: + repo_cache_key: ${{ needs.check-workflow.outputs.repo_cache_key }} + plugin: rabbitmq_consistent_hash_exchange + secrets: inherit + test-rabbitmq_event_exchange: + needs: + - check-workflow + - test-rabbit-0 + - test-rabbit-1 + - test-rabbit-2 + - test-rabbit-3 + - test-rabbit-4 + - test-rabbit-5 + - test-rabbit-6 + - test-rabbit-7 + - test-rabbit-8 + - test-rabbit-9 + uses: ./.github/workflows/test-plugin.yaml + with: + repo_cache_key: ${{ needs.check-workflow.outputs.repo_cache_key }} + plugin: rabbitmq_event_exchange + secrets: inherit + test-rabbitmq_federation: + needs: + - check-workflow + - test-rabbit-0 + - test-rabbit-1 + - test-rabbit-2 + - test-rabbit-3 + - test-rabbit-4 + - test-rabbit-5 + - test-rabbit-6 + - test-rabbit-7 + - test-rabbit-8 + - test-rabbit-9 + uses: ./.github/workflows/test-plugin.yaml + with: + repo_cache_key: ${{ needs.check-workflow.outputs.repo_cache_key }} + plugin: rabbitmq_federation + secrets: inherit + test-rabbitmq_federation_management: + needs: + - check-workflow + - test-rabbit-0 + - test-rabbit-1 + - test-rabbit-2 + - test-rabbit-3 + - test-rabbit-4 + - test-rabbit-5 + - test-rabbit-6 + - test-rabbit-7 + - test-rabbit-8 + - test-rabbit-9 + uses: ./.github/workflows/test-plugin.yaml + with: + repo_cache_key: ${{ needs.check-workflow.outputs.repo_cache_key }} + plugin: rabbitmq_federation_management + secrets: inherit + test-rabbitmq_federation_prometheus: + needs: + - check-workflow + - test-rabbit-0 + - test-rabbit-1 + - test-rabbit-2 + - test-rabbit-3 + - test-rabbit-4 + - test-rabbit-5 + - test-rabbit-6 + - test-rabbit-7 + - test-rabbit-8 + - test-rabbit-9 + uses: ./.github/workflows/test-plugin.yaml + with: + repo_cache_key: ${{ needs.check-workflow.outputs.repo_cache_key }} + plugin: rabbitmq_federation_prometheus + secrets: inherit + test-rabbitmq_jms_topic_exchange: + needs: + - check-workflow + - test-rabbit-0 + - test-rabbit-1 + - test-rabbit-2 + - test-rabbit-3 + - test-rabbit-4 + - test-rabbit-5 + - test-rabbit-6 + - test-rabbit-7 + - test-rabbit-8 + - test-rabbit-9 + uses: ./.github/workflows/test-plugin.yaml + with: + repo_cache_key: ${{ needs.check-workflow.outputs.repo_cache_key }} + plugin: rabbitmq_jms_topic_exchange + secrets: inherit + test-rabbitmq_management: + needs: + - check-workflow + - test-rabbit-0 + - test-rabbit-1 + - test-rabbit-2 + - test-rabbit-3 + - test-rabbit-4 + - test-rabbit-5 + - test-rabbit-6 + - test-rabbit-7 + - test-rabbit-8 + - test-rabbit-9 + uses: ./.github/workflows/test-plugin.yaml + with: + repo_cache_key: ${{ needs.check-workflow.outputs.repo_cache_key }} + plugin: rabbitmq_management + secrets: inherit + test-rabbitmq_management_agent: + needs: + - check-workflow + - test-rabbit-0 + - test-rabbit-1 + - test-rabbit-2 + - test-rabbit-3 + - test-rabbit-4 + - test-rabbit-5 + - test-rabbit-6 + - test-rabbit-7 + - test-rabbit-8 + - test-rabbit-9 + uses: ./.github/workflows/test-plugin.yaml + with: + repo_cache_key: ${{ needs.check-workflow.outputs.repo_cache_key }} + plugin: rabbitmq_management_agent + secrets: inherit + test-rabbitmq_mqtt: + needs: + - check-workflow + - test-rabbit-0 + - test-rabbit-1 + - test-rabbit-2 + - test-rabbit-3 + - test-rabbit-4 + - test-rabbit-5 + - test-rabbit-6 + - test-rabbit-7 + - test-rabbit-8 + - test-rabbit-9 + uses: ./.github/workflows/test-plugin.yaml + with: + repo_cache_key: ${{ needs.check-workflow.outputs.repo_cache_key }} + plugin: rabbitmq_mqtt + secrets: inherit + test-rabbitmq_peer_discovery_aws: + needs: + - check-workflow + - test-rabbit-0 + - test-rabbit-1 + - test-rabbit-2 + - test-rabbit-3 + - test-rabbit-4 + - test-rabbit-5 + - test-rabbit-6 + - test-rabbit-7 + - test-rabbit-8 + - test-rabbit-9 + uses: ./.github/workflows/test-plugin.yaml + with: + repo_cache_key: ${{ needs.check-workflow.outputs.repo_cache_key }} + plugin: rabbitmq_peer_discovery_aws + secrets: inherit + test-rabbitmq_peer_discovery_common: + needs: + - check-workflow + - test-rabbit-0 + - test-rabbit-1 + - test-rabbit-2 + - test-rabbit-3 + - test-rabbit-4 + - test-rabbit-5 + - test-rabbit-6 + - test-rabbit-7 + - test-rabbit-8 + - test-rabbit-9 + uses: ./.github/workflows/test-plugin.yaml + with: + repo_cache_key: ${{ needs.check-workflow.outputs.repo_cache_key }} + plugin: rabbitmq_peer_discovery_common + secrets: inherit + test-rabbitmq_peer_discovery_consul: + needs: + - check-workflow + - test-rabbit-0 + - test-rabbit-1 + - test-rabbit-2 + - test-rabbit-3 + - test-rabbit-4 + - test-rabbit-5 + - test-rabbit-6 + - test-rabbit-7 + - test-rabbit-8 + - test-rabbit-9 + uses: ./.github/workflows/test-plugin.yaml + with: + repo_cache_key: ${{ needs.check-workflow.outputs.repo_cache_key }} + plugin: rabbitmq_peer_discovery_consul + secrets: inherit + test-rabbitmq_peer_discovery_etcd: + needs: + - check-workflow + - test-rabbit-0 + - test-rabbit-1 + - test-rabbit-2 + - test-rabbit-3 + - test-rabbit-4 + - test-rabbit-5 + - test-rabbit-6 + - test-rabbit-7 + - test-rabbit-8 + - test-rabbit-9 + uses: ./.github/workflows/test-plugin.yaml + with: + repo_cache_key: ${{ needs.check-workflow.outputs.repo_cache_key }} + plugin: rabbitmq_peer_discovery_etcd + secrets: inherit + test-rabbitmq_peer_discovery_k8s: + needs: + - check-workflow + - test-rabbit-0 + - test-rabbit-1 + - test-rabbit-2 + - test-rabbit-3 + - test-rabbit-4 + - test-rabbit-5 + - test-rabbit-6 + - test-rabbit-7 + - test-rabbit-8 + - test-rabbit-9 + uses: ./.github/workflows/test-plugin.yaml + with: + repo_cache_key: ${{ needs.check-workflow.outputs.repo_cache_key }} + plugin: rabbitmq_peer_discovery_k8s + secrets: inherit + test-rabbitmq_prelaunch: + needs: + - check-workflow + - test-rabbit-0 + - test-rabbit-1 + - test-rabbit-2 + - test-rabbit-3 + - test-rabbit-4 + - test-rabbit-5 + - test-rabbit-6 + - test-rabbit-7 + - test-rabbit-8 + - test-rabbit-9 + uses: ./.github/workflows/test-plugin.yaml + with: + repo_cache_key: ${{ needs.check-workflow.outputs.repo_cache_key }} + plugin: rabbitmq_prelaunch + secrets: inherit + test-rabbitmq_prometheus: + needs: + - check-workflow + - test-rabbit-0 + - test-rabbit-1 + - test-rabbit-2 + - test-rabbit-3 + - test-rabbit-4 + - test-rabbit-5 + - test-rabbit-6 + - test-rabbit-7 + - test-rabbit-8 + - test-rabbit-9 + uses: ./.github/workflows/test-plugin.yaml + with: + repo_cache_key: ${{ needs.check-workflow.outputs.repo_cache_key }} + plugin: rabbitmq_prometheus + secrets: inherit + test-rabbitmq_random_exchange: + needs: + - check-workflow + - test-rabbit-0 + - test-rabbit-1 + - test-rabbit-2 + - test-rabbit-3 + - test-rabbit-4 + - test-rabbit-5 + - test-rabbit-6 + - test-rabbit-7 + - test-rabbit-8 + - test-rabbit-9 + uses: ./.github/workflows/test-plugin.yaml + with: + repo_cache_key: ${{ needs.check-workflow.outputs.repo_cache_key }} + plugin: rabbitmq_random_exchange + secrets: inherit + test-rabbitmq_recent_history_exchange: + needs: + - check-workflow + - test-rabbit-0 + - test-rabbit-1 + - test-rabbit-2 + - test-rabbit-3 + - test-rabbit-4 + - test-rabbit-5 + - test-rabbit-6 + - test-rabbit-7 + - test-rabbit-8 + - test-rabbit-9 + uses: ./.github/workflows/test-plugin.yaml + with: + repo_cache_key: ${{ needs.check-workflow.outputs.repo_cache_key }} + plugin: rabbitmq_recent_history_exchange + secrets: inherit + test-rabbitmq_sharding: + needs: + - check-workflow + - test-rabbit-0 + - test-rabbit-1 + - test-rabbit-2 + - test-rabbit-3 + - test-rabbit-4 + - test-rabbit-5 + - test-rabbit-6 + - test-rabbit-7 + - test-rabbit-8 + - test-rabbit-9 + uses: ./.github/workflows/test-plugin.yaml + with: + repo_cache_key: ${{ needs.check-workflow.outputs.repo_cache_key }} + plugin: rabbitmq_sharding + secrets: inherit + test-rabbitmq_shovel: + needs: + - check-workflow + - test-rabbit-0 + - test-rabbit-1 + - test-rabbit-2 + - test-rabbit-3 + - test-rabbit-4 + - test-rabbit-5 + - test-rabbit-6 + - test-rabbit-7 + - test-rabbit-8 + - test-rabbit-9 + uses: ./.github/workflows/test-plugin.yaml + with: + repo_cache_key: ${{ needs.check-workflow.outputs.repo_cache_key }} + plugin: rabbitmq_shovel + secrets: inherit + test-rabbitmq_shovel_management: + needs: + - check-workflow + - test-rabbit-0 + - test-rabbit-1 + - test-rabbit-2 + - test-rabbit-3 + - test-rabbit-4 + - test-rabbit-5 + - test-rabbit-6 + - test-rabbit-7 + - test-rabbit-8 + - test-rabbit-9 + uses: ./.github/workflows/test-plugin.yaml + with: + repo_cache_key: ${{ needs.check-workflow.outputs.repo_cache_key }} + plugin: rabbitmq_shovel_management + secrets: inherit + test-rabbitmq_shovel_prometheus: + needs: + - check-workflow + - test-rabbit-0 + - test-rabbit-1 + - test-rabbit-2 + - test-rabbit-3 + - test-rabbit-4 + - test-rabbit-5 + - test-rabbit-6 + - test-rabbit-7 + - test-rabbit-8 + - test-rabbit-9 + uses: ./.github/workflows/test-plugin.yaml + with: + repo_cache_key: ${{ needs.check-workflow.outputs.repo_cache_key }} + plugin: rabbitmq_shovel_prometheus + secrets: inherit + test-rabbitmq_stomp: + needs: + - check-workflow + - test-rabbit-0 + - test-rabbit-1 + - test-rabbit-2 + - test-rabbit-3 + - test-rabbit-4 + - test-rabbit-5 + - test-rabbit-6 + - test-rabbit-7 + - test-rabbit-8 + - test-rabbit-9 + uses: ./.github/workflows/test-plugin.yaml + with: + repo_cache_key: ${{ needs.check-workflow.outputs.repo_cache_key }} + plugin: rabbitmq_stomp + secrets: inherit + test-rabbitmq_stream: + needs: + - check-workflow + - test-rabbit-0 + - test-rabbit-1 + - test-rabbit-2 + - test-rabbit-3 + - test-rabbit-4 + - test-rabbit-5 + - test-rabbit-6 + - test-rabbit-7 + - test-rabbit-8 + - test-rabbit-9 + uses: ./.github/workflows/test-plugin.yaml + with: + repo_cache_key: ${{ needs.check-workflow.outputs.repo_cache_key }} + plugin: rabbitmq_stream + secrets: inherit + test-rabbitmq_stream_management: + needs: + - check-workflow + - test-rabbit-0 + - test-rabbit-1 + - test-rabbit-2 + - test-rabbit-3 + - test-rabbit-4 + - test-rabbit-5 + - test-rabbit-6 + - test-rabbit-7 + - test-rabbit-8 + - test-rabbit-9 + uses: ./.github/workflows/test-plugin.yaml + with: + repo_cache_key: ${{ needs.check-workflow.outputs.repo_cache_key }} + plugin: rabbitmq_stream_management + secrets: inherit + test-rabbitmq_top: + needs: + - check-workflow + - test-rabbit-0 + - test-rabbit-1 + - test-rabbit-2 + - test-rabbit-3 + - test-rabbit-4 + - test-rabbit-5 + - test-rabbit-6 + - test-rabbit-7 + - test-rabbit-8 + - test-rabbit-9 + uses: ./.github/workflows/test-plugin.yaml + with: + repo_cache_key: ${{ needs.check-workflow.outputs.repo_cache_key }} + plugin: rabbitmq_top + secrets: inherit + test-rabbitmq_tracing: + needs: + - check-workflow + - test-rabbit-0 + - test-rabbit-1 + - test-rabbit-2 + - test-rabbit-3 + - test-rabbit-4 + - test-rabbit-5 + - test-rabbit-6 + - test-rabbit-7 + - test-rabbit-8 + - test-rabbit-9 + uses: ./.github/workflows/test-plugin.yaml + with: + repo_cache_key: ${{ needs.check-workflow.outputs.repo_cache_key }} + plugin: rabbitmq_tracing + secrets: inherit + test-rabbitmq_trust_store: + needs: + - check-workflow + - test-rabbit-0 + - test-rabbit-1 + - test-rabbit-2 + - test-rabbit-3 + - test-rabbit-4 + - test-rabbit-5 + - test-rabbit-6 + - test-rabbit-7 + - test-rabbit-8 + - test-rabbit-9 + uses: ./.github/workflows/test-plugin.yaml + with: + repo_cache_key: ${{ needs.check-workflow.outputs.repo_cache_key }} + plugin: rabbitmq_trust_store + secrets: inherit + test-rabbitmq_web_dispatch: + needs: + - check-workflow + - test-rabbit-0 + - test-rabbit-1 + - test-rabbit-2 + - test-rabbit-3 + - test-rabbit-4 + - test-rabbit-5 + - test-rabbit-6 + - test-rabbit-7 + - test-rabbit-8 + - test-rabbit-9 + uses: ./.github/workflows/test-plugin.yaml + with: + repo_cache_key: ${{ needs.check-workflow.outputs.repo_cache_key }} + plugin: rabbitmq_web_dispatch + secrets: inherit + test-rabbitmq_web_mqtt: + needs: + - check-workflow + - test-rabbit-0 + - test-rabbit-1 + - test-rabbit-2 + - test-rabbit-3 + - test-rabbit-4 + - test-rabbit-5 + - test-rabbit-6 + - test-rabbit-7 + - test-rabbit-8 + - test-rabbit-9 + uses: ./.github/workflows/test-plugin.yaml + with: + repo_cache_key: ${{ needs.check-workflow.outputs.repo_cache_key }} + plugin: rabbitmq_web_mqtt + secrets: inherit + test-rabbitmq_web_mqtt_examples: + needs: + - check-workflow + - test-rabbit-0 + - test-rabbit-1 + - test-rabbit-2 + - test-rabbit-3 + - test-rabbit-4 + - test-rabbit-5 + - test-rabbit-6 + - test-rabbit-7 + - test-rabbit-8 + - test-rabbit-9 + uses: ./.github/workflows/test-plugin.yaml + with: + repo_cache_key: ${{ needs.check-workflow.outputs.repo_cache_key }} + plugin: rabbitmq_web_mqtt_examples + secrets: inherit + test-rabbitmq_web_stomp: + needs: + - check-workflow + - test-rabbit-0 + - test-rabbit-1 + - test-rabbit-2 + - test-rabbit-3 + - test-rabbit-4 + - test-rabbit-5 + - test-rabbit-6 + - test-rabbit-7 + - test-rabbit-8 + - test-rabbit-9 + uses: ./.github/workflows/test-plugin.yaml + with: + repo_cache_key: ${{ needs.check-workflow.outputs.repo_cache_key }} + plugin: rabbitmq_web_stomp + secrets: inherit + test-rabbitmq_web_stomp_examples: + needs: + - check-workflow + - test-rabbit-0 + - test-rabbit-1 + - test-rabbit-2 + - test-rabbit-3 + - test-rabbit-4 + - test-rabbit-5 + - test-rabbit-6 + - test-rabbit-7 + - test-rabbit-8 + - test-rabbit-9 + uses: ./.github/workflows/test-plugin.yaml + with: + repo_cache_key: ${{ needs.check-workflow.outputs.repo_cache_key }} + plugin: rabbitmq_web_stomp_examples + secrets: inherit summary-test: needs: - - test + - test-amqp10_client + - test-amqp10_common + - test-amqp_client + - test-oauth2_client + - test-rabbit_common + - test-rabbitmq_ct_client_helpers + - test-rabbitmq_ct_helpers + - test-rabbitmq_stream_common + - test-trust_store_http + - test-rabbitmq_amqp_client + - test-rabbitmq_amqp1_0 + - test-rabbitmq_auth_backend_cache + - test-rabbitmq_auth_backend_http + - test-rabbitmq_auth_backend_ldap + - test-rabbitmq_auth_backend_oauth2 + - test-rabbitmq_auth_mechanism_ssl + - test-rabbitmq_aws + - test-rabbitmq_consistent_hash_exchange + - test-rabbitmq_event_exchange + - test-rabbitmq_federation + - test-rabbitmq_federation_management + - test-rabbitmq_federation_prometheus + - test-rabbitmq_jms_topic_exchange + - test-rabbitmq_management + - test-rabbitmq_management_agent + - test-rabbitmq_mqtt + - test-rabbitmq_peer_discovery_aws + - test-rabbitmq_peer_discovery_common + - test-rabbitmq_peer_discovery_consul + - test-rabbitmq_peer_discovery_etcd + - test-rabbitmq_peer_discovery_k8s + - test-rabbitmq_prelaunch + - test-rabbitmq_prometheus + - test-rabbitmq_random_exchange + - test-rabbitmq_recent_history_exchange + - test-rabbitmq_sharding + - test-rabbitmq_shovel + - test-rabbitmq_shovel_management + - test-rabbitmq_shovel_prometheus + - test-rabbitmq_stomp + - test-rabbitmq_stream + - test-rabbitmq_stream_management + - test-rabbitmq_top + - test-rabbitmq_tracing + - test-rabbitmq_trust_store + - test-rabbitmq_web_dispatch + - test-rabbitmq_web_mqtt + - test-rabbitmq_web_mqtt_examples + - test-rabbitmq_web_stomp + - test-rabbitmq_web_stomp_examples + - test-rabbit-0 + - test-rabbit-1 + - test-rabbit-2 + - test-rabbit-3 + - test-rabbit-4 + - test-rabbit-5 + - test-rabbit-6 + - test-rabbit-7 + - test-rabbit-8 + - test-rabbit-9 + - test-rabbitmq_cli runs-on: ubuntu-latest steps: - name: SUMMARY run: | - echo "SUCCESS" + cat << 'EOF' | jq -e 'map(.result == "success") | all(.)' + ${{ toJson(needs) }} + EOF diff --git a/.github/workflows/update-elixir-patches.yaml b/.github/workflows/update-elixir-patches.yaml deleted file mode 100644 index 16c6f3ff8516..000000000000 --- a/.github/workflows/update-elixir-patches.yaml +++ /dev/null @@ -1,91 +0,0 @@ -name: Update Elixir Patch Versions for Bazel Based Workflows -on: - schedule: - - cron: '0 3 * * *' - workflow_dispatch: -jobs: - update-toolchains: - name: Update Elixir Versions - runs-on: ubuntu-20.04 - strategy: - max-parallel: 1 - fail-fast: false - matrix: - include: - - elixir_version: "1.13" - name: '1_13' - - elixir_version: "1.14" - name: '1_14' - timeout-minutes: 10 - env: - branch: bump-elixir-${{ matrix.elixir_version }} - steps: - - name: CHECKOUT REPOSITORY - uses: actions/checkout@v4 - - name: FAIL IF THE PR ALREADY EXISTS - id: check-for-branch - run: | - set +e - if git ls-remote --exit-code --heads origin ${{ env.branch }}; then - echo "Branch ${{ env.branch }} already exits" - exit 1 - fi - - name: DETERMINE LATEST PATCH & SHA - id: fetch-version - run: | - TAG_NAME=$(curl -s GET https://api.github.com/repos/elixir-lang/elixir/tags?per_page=100 \ - | jq -r 'map(select(.name | contains("v${{ matrix.elixir_version }}"))) | first | .name') - - if [[ -z "${TAG_NAME}" ]]; then - echo "Failed to determine latest TAG_NAME for v${{ matrix.elixir_version }}" - exit 1 - fi - - ARCHIVE_URL="https://github.com/elixir-lang/elixir/archive/${TAG_NAME}.tar.gz" - wget --continue --quiet --output-document="/tmp/elixir.tar.gz" "${ARCHIVE_URL}" && \ - SHA="$(shasum -a 256 "/tmp/elixir.tar.gz" | awk '{print $1}')" - - if [[ -z "${SHA}" ]]; then - echo "Failed to determine SHA for ${TAG_NAME}" - exit 1 - fi - - echo "VERSION=${TAG_NAME#v}" >> $GITHUB_OUTPUT - echo "SHA=${SHA}" >> $GITHUB_OUTPUT - - name: MODIFY VERSION FILE - run: | - sudo npm install --global --silent @bazel/buildozer - - OLD_SHA="$(cat MODULE.bazel | buildozer 'print sha256' -:${{ matrix.name }})" - OLD_VERSION="$(cat MODULE.bazel | buildozer 'print version' -:${{ matrix.name }})" - - echo "OLD_SHA: $OLD_SHA" - echo "OLD_VERSION: $OLD_VERSION" - - echo "$(cat MODULE.bazel | buildozer 'set sha256 "${{ steps.fetch-version.outputs.SHA }}"' -:${{ matrix.name }})" > MODULE.bazel - echo "$(cat MODULE.bazel | buildozer 'set version "${{ steps.fetch-version.outputs.VERSION }}"' -:${{ matrix.name }})" > MODULE.bazel - - echo "MODULE.bazel updated" - - set -x - git diff - - name: CREATE PULL REQUEST - uses: peter-evans/create-pull-request@v5.0.2 - with: - token: ${{ secrets.REPO_SCOPED_TOKEN }} - committer: GitHub - author: GitHub - title: Adopt elixir ${{ steps.fetch-version.outputs.VERSION }} - body: > - Automated changes created by - ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }} - using the [create-pull-request](https://github.com/peter-evans/create-pull-request) - GitHub action in the ${{ github.workflow }} workflow. - commit-message: | - Adopt elixir ${{ steps.fetch-version.outputs.VERSION }} - labels: | - backport-v3.12.x - backport-v3.11.x - backport-v3.10.x - branch: ${{ env.branch }} - delete-branch: true diff --git a/.github/workflows/update-otp-patches.yaml b/.github/workflows/update-otp-patches.yaml deleted file mode 100644 index 1dea087d7a24..000000000000 --- a/.github/workflows/update-otp-patches.yaml +++ /dev/null @@ -1,146 +0,0 @@ -name: Update OTP Patch Versions for Bazel Based Workflows -on: - schedule: - - cron: '0 3 * * *' - workflow_dispatch: -jobs: - update-toolchains: - name: Update OTP Versions - runs-on: ubuntu-20.04 - strategy: - max-parallel: 1 - fail-fast: false - matrix: - erlang_version: - - "24.3" - - "25.0" - - "25.1" - - "25.2" - - "25.3" - - "26.0" - include: - - erlang_version: "24.3" - name: '24' - branch: v3.10.x - labels: "" - - erlang_version: "25.0" - name: '25_0' - branch: main - labels: | - backport-v3.12.x - backport-v3.11.x - - erlang_version: "25.1" - name: '25_1' - branch: main - labels: | - backport-v3.12.x - backport-v3.11.x - backport-v3.10.x - - erlang_version: "25.2" - name: '25_2' - branch: main - labels: | - backport-v3.12.x - backport-v3.11.x - backport-v3.10.x - - erlang_version: "25.3" - name: '25_3' - branch: main - labels: | - backport-v3.12.x - backport-v3.11.x - backport-v3.10.x - - erlang_version: "26.0" - name: '26' - branch: main - labels: | - backport-v3.12.x - backport-v3.11.x - timeout-minutes: 10 - env: - branch: bump-otp-${{ matrix.erlang_version }} - steps: - - name: CHECKOUT REPOSITORY - uses: actions/checkout@v4 - with: - ref: ${{ matrix.branch }} - - name: FAIL IF THE PR ALREADY EXISTS - id: check-for-branch - run: | - set +e - if git ls-remote --exit-code --heads origin ${{ env.branch }}; then - echo "Branch ${{ env.branch }} already exits" - exit 1 - fi - - name: DETERMINE LATEST PATCH & SHA - id: fetch-version - run: | - TAG_NAME=$(curl -s GET https://api.github.com/repos/erlang/otp/tags?per_page=100 \ - | jq -r 'map(select(.name | contains("OTP-${{ matrix.erlang_version }}"))) | first | .name') - VERSION=${TAG_NAME#OTP-} - - if [[ -z "${VERSION}" ]]; then - echo "Failed to determine latest VERSION for OTP-${{ matrix.erlang_version }}" - exit 1 - fi - - ARCHIVE_RBE_URL="https://github.com/erlang/otp/releases/download/${TAG_NAME}/otp_src_${VERSION}.tar.gz" - wget --continue --quiet --output-document="/tmp/otp_src_${VERSION}.tar.gz" "${ARCHIVE_RBE_URL}" - SHA="$(shasum -a 256 "/tmp/otp_src_${VERSION}.tar.gz" | awk '{print $1}')" - - if [[ -z "${SHA}" ]]; then - echo "Failed to determine SHA for ${TAG_NAME}" - exit 1 - fi - - ARCHIVE_OCI_URL="https://github.com/erlang/otp/archive/OTP-${VERSION}.tar.gz" - wget --continue --quiet --output-document="/tmp/OTP-${VERSION}.tar.gz" "${ARCHIVE_OCI_URL}" - SHA2="$(shasum -a 256 "/tmp/OTP-${VERSION}.tar.gz" | awk '{print $1}')" - - if [[ -z "${SHA2}" ]]; then - echo "Failed to determine SHA2 for ${TAG_NAME}" - exit 1 - fi - - echo "VERSION=${VERSION}" >> $GITHUB_OUTPUT - echo "SHA=${SHA}" >> $GITHUB_OUTPUT - echo "SHA2=${SHA2}" >> $GITHUB_OUTPUT - - name: MODIFY VERSION FILE - run: | - sudo npm install --global --silent @bazel/buildozer - - OLD_SHA="$(cat MODULE.bazel | buildozer 'print sha256' -:${{ matrix.name }})" - OLD_VERSION="$(cat MODULE.bazel | buildozer 'print version' -:${{ matrix.name }})" - - echo "OLD_SHA: $OLD_SHA" - echo "OLD_VERSION: $OLD_VERSION" - - echo "$(cat MODULE.bazel | buildozer 'set sha256 "${{ steps.fetch-version.outputs.SHA }}"' -:${{ matrix.name }})" > MODULE.bazel - echo "$(cat MODULE.bazel | buildozer 'set version "${{ steps.fetch-version.outputs.VERSION }}"' -:${{ matrix.name }})" > MODULE.bazel - echo "MODULE.bazel updated" - - echo "$(cat WORKSPACE | buildozer 'set downloaded_file_path "OTP-${{ steps.fetch-version.outputs.VERSION }}.tar.gz"' -:otp_src_${{ matrix.name }})" > WORKSPACE - echo "$(cat WORKSPACE | buildozer 'set urls ["https://github.com/erlang/otp/archive/OTP-${{ steps.fetch-version.outputs.VERSION }}.tar.gz"]' -:otp_src_${{ matrix.name }})" > WORKSPACE - echo "$(cat WORKSPACE | buildozer 'set sha256 "${{ steps.fetch-version.outputs.SHA2 }}"' -:otp_src_${{ matrix.name }})" > WORKSPACE - - echo "WORKSPACE updated" - - set -x - git diff - - name: CREATE PULL REQUEST - uses: peter-evans/create-pull-request@v5.0.2 - with: - token: ${{ secrets.REPO_SCOPED_TOKEN }} - committer: GitHub - author: GitHub - title: Adopt otp ${{ steps.fetch-version.outputs.VERSION }} - body: > - Automated changes created by - ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }} - using the [create-pull-request](https://github.com/peter-evans/create-pull-request) - GitHub action in the ${{ github.workflow }} workflow. - commit-message: | - Adopt otp ${{ steps.fetch-version.outputs.VERSION }} - labels: ${{ matrix.labels }} - branch: ${{ env.branch }} - delete-branch: true diff --git a/.gitignore b/.gitignore index 234e48aa8500..1bc1578cb1d2 100644 --- a/.gitignore +++ b/.gitignore @@ -1,18 +1,39 @@ +# Editor/OS temporary and unwanted files. *~ +\#* +.#* .sw? -*.tmp .*.sw? -/.erlang.mk/ -/_build/ -/ebin/ -/bin/ +*.orig +*.tmp +.idea/ +.tool-versions +.vscode/ +.DS_Store + +# Erlang.mk temporary files and outputs. +*.plt +*.d +.erlang.mk/ +cover/ +docs/*.html +ebin/ +logs/ +**/test/*.beam +**/test/ct.cover.spec + +elvis + +# Dependencies excluding RabbitMQ applications. /deps/* !/deps/rabbit/ !/deps/rabbit_common/ !/deps/amqp_client/ !/deps/amqp10_client/ !/deps/amqp10_common/ +!/deps/oauth2_client/ !/deps/rabbitmq_amqp1_0/ +!/deps/rabbitmq_amqp_client/ !/deps/rabbitmq_auth_backend_cache/ !/deps/rabbitmq_auth_backend_http/ !/deps/rabbitmq_auth_backend_ldap/ @@ -27,6 +48,7 @@ !/deps/rabbitmq_event_exchange/ !/deps/rabbitmq_federation/ !/deps/rabbitmq_federation_management/ +!/deps/rabbitmq_federation_prometheus/ !/deps/rabbitmq_jms_topic_exchange/ !/deps/rabbitmq_management/ !/deps/rabbitmq_management_agent/ @@ -36,12 +58,14 @@ !/deps/rabbitmq_peer_discovery_consul/ !/deps/rabbitmq_peer_discovery_etcd/ !/deps/rabbitmq_peer_discovery_k8s/ +!/deps/rabbitmq_prelaunch/ !/deps/rabbitmq_prometheus/ !/deps/rabbitmq_random_exchange/ !/deps/rabbitmq_recent_history_exchange/ !/deps/rabbitmq_sharding/ !/deps/rabbitmq_shovel/ !/deps/rabbitmq_shovel_management/ +!/deps/rabbitmq_shovel_prometheus/ !/deps/rabbitmq_stomp/ !/deps/rabbitmq_stream/ !/deps/rabbitmq_stream_common/ @@ -56,42 +80,57 @@ !/deps/rabbitmq_web_stomp/ !/deps/rabbitmq_web_stomp_examples/ !/deps/trust_store_http/ -/escript/ -/escript.lock -/plugins/ -/plugins.lock -/sbin/ -/sbin.lock -ct.cover.spec -erl_crash.dump + +# Rebar3/Elixir temporary files and outputs. +.rebar3/ +_build/ +_checkouts/ rebar3.crashdump -.envrc -.exrc -.ignore -*.plt -*.lock -/logs/ +# Automatically generated rebar.config files. +rebar.config +!/deps/amqp10_common/rebar.config +!/rebar.config -/topic-branch-scratch/ +# Bazel. +.bazelrc +user.bazelrc +bazel-* +extra_deps/ -PACKAGES/ -packaging/docker-image/rabbitmq_server-*/ +# Erlang/OTP unwanted files. +.erlang.cookie +erl_crash.dump +MnesiaCore.* -# Source distribution. +# RabbitMQ Makefiles temporary files and outputs. +*.lock +escript/ +plugins/ +sbin/ +git-revisions.txt + +PACKAGES/ rabbitmq-server-*/ rabbitmq-server-*.tar.gz rabbitmq-server-*.tar.bz2 rabbitmq-server-*.tar.xz rabbitmq-server-*.zip +# Trace tools output. +*-ttb +*.ti +*.lz4* traces* -deps/rabbitmq_stomp/test/python_SUITE_data/src/deps -callgrand* +callgrind.out* +callgraph.dot* -/user.bazelrc -/bazel-* -/extra_deps/ +# Unknown. If you know in which category this belongs to, +# please move it there. Thank you. +.envrc +.exrc +.ignore +/genhtml/ -.vscode -.idea +# @todo Should be moved to rabbitmq_stomp directly. +deps/rabbitmq_stomp/test/python_SUITE_data/src/deps diff --git a/.mailmap b/.mailmap new file mode 100644 index 000000000000..b0eccf41cc4f --- /dev/null +++ b/.mailmap @@ -0,0 +1 @@ +Rin Kuryloski diff --git a/BAZEL.md b/BAZEL.md index 868b02be7a49..856f0453c448 100644 --- a/BAZEL.md +++ b/BAZEL.md @@ -75,20 +75,22 @@ Note: This takes quite some time on a single machine. ### from hex.pm -1. `bazel run gazelle-update-repos -- -args hex.pm/accept@0.3.5` to generate/update `bazel/BUILD.accept` -1. `git checkout WORKSPACE` to reset the workspace file +1. `bazel run gazelle-update-repos -- hex.pm/accept@0.3.5` to generate/update `bazel/BUILD.accept` 1. Add/update the entry in MODULE.bazel ### from github -1. `bazel run gazelle-update-repos -- -args --testonly github.com/extend/ct_helper@master` -1. `git checkout WORKSPACE` to reset the workspace file +1. `bazel run gazelle-update-repos -- --testonly github.com/extend/ct_helper@master` 1. Add/update the entry in MODULE.bazel ## Update BUILD files `bazel run gazelle` +## Regenerate moduleindex.yaml + +`bazel run :moduleindex > moduleindex.yaml` + ## Additional Useful Commands - Format all bazel files consistently (requires [buildifier](https://github.com/bazelbuild/buildtools/blob/master/buildifier/README.md)): diff --git a/BUILD.bazel b/BUILD.bazel index a0952d2ee5fe..5572770617a0 100644 --- a/BUILD.bazel +++ b/BUILD.bazel @@ -1,7 +1,6 @@ load( "@bazel_skylib//rules:common_settings.bzl", "bool_flag", - "string_flag", ) load("@rules_pkg//pkg:mappings.bzl", "pkg_files") load("@bazel_gazelle//:def.bzl", "gazelle") @@ -11,7 +10,7 @@ load("@rules_erlang//:dialyze.bzl", "DEFAULT_PLT_APPS", "plt") load("@rules_erlang//:shell.bzl", "shell") load("@rules_erlang//:erl_eval.bzl", "erl_eval") load("@rules_erlang//gazelle:moduleindex.bzl", "moduleindex") -load("//bazel/elixir:iex_eval.bzl", "iex_eval") +load("@rules_elixir//:iex_eval.bzl", "iex_eval") load(":rabbitmq_home.bzl", "rabbitmq_home") load(":rabbitmq_run.bzl", "rabbitmq_run", "rabbitmq_run_command") load(":rabbitmqctl.bzl", "rabbitmqctl") @@ -124,12 +123,6 @@ config_setting( }, ) -string_flag( - name = "elixir_home", - build_setting_default = "", - visibility = ["//visibility:public"], -) - plt( name = "base_plt", apps = DEFAULT_PLT_APPS + [ @@ -141,10 +134,7 @@ plt( PLUGINS = all_plugins( rabbitmq_workspace = "", -) + select({ - "@rules_erlang//:debug_build": ["@looking_glass//:erlang_app"], - "//conditions:default": [], -}) +) rabbitmq_home( name = "broker-home", @@ -157,7 +147,7 @@ rabbitmq_run( visibility = ["//visibility:public"], ) -# Allow us to `bazel run broker` +# Allows us to `bazel run broker` # for the equivalent of `make run-broker` rabbitmq_run_command( name = "broker", @@ -165,7 +155,23 @@ rabbitmq_run_command( subcommand = "run-broker", ) -# Allow us to `bazel run start-cluster` +# Allows us to `bazel run background-broker` +# to start a broker in the background +rabbitmq_run_command( + name = "background-broker", + rabbitmq_run = ":rabbitmq-run", + subcommand = "start-background-broker", +) + +# Allows us to `bazel run stop-broker` +# Useful is broker started in the background +rabbitmq_run_command( + name = "stop-broker", + rabbitmq_run = ":rabbitmq-run", + subcommand = "stop-node", +) + +# Allows us to `bazel run start-cluster` # for the equivalent of `make start-cluster` rabbitmq_run_command( name = "start-cluster", @@ -173,7 +179,7 @@ rabbitmq_run_command( subcommand = "start-cluster", ) -# Allow us to `bazel run stop-cluster` +# Allows us to `bazel run stop-cluster` # for the equivalent of `make stop-cluster` rabbitmq_run_command( name = "stop-cluster", @@ -222,12 +228,14 @@ erl_eval( name = "otp_version", outs = ["otp_version.txt"], expression = """{ok, Version} = file:read_file(filename:join([code:root_dir(), "releases", erlang:system_info(otp_release), "OTP_VERSION"])), file:write_file(os:getenv("OUTS"), Version), halt().""", + visibility = ["//visibility:public"], ) iex_eval( name = "elixir_version", outs = ["elixir_version.txt"], expression = """File.write!(System.get_env("OUTS"), System.version()); System.halt()""", + visibility = ["//visibility:public"], ) filegroup( diff --git a/COMMUNITY_SUPPORT.md b/COMMUNITY_SUPPORT.md new file mode 100644 index 000000000000..31e530cb5a75 --- /dev/null +++ b/COMMUNITY_SUPPORT.md @@ -0,0 +1,70 @@ + +# RabbitMQ Community Support Eligibility + +This document explains who is eligible for community support for open source RabbitMQ. + +### What is Community Support? + +Community support is defined as all questions, root cause analysis requests, issue reports, and other interactions the RabbitMQ core team has with open source RabbitMQ users on GitHub +and our community forums. + +### What is Broadcom's Obligation to Reply to Messages or Issues Reported? + +The RabbitMQ Core team at Broadcom has no obligation to reply to any message or issue posted by the community of open source RabbitMQ users. + +### Who is Eligible for community support + +The following groups of users are eligible for community support: + + * Users who regularly contribute to RabbitMQ development (a definition of "contribution" is provided at the end of this document) + * Users who use [the most recent release series](https://www.rabbitmq.com/release-information) and provide detailed and well researched issue reports, including responsibly disclosed security vulnerabilities + +All other users are not eligible for community support from the RabbitMQ Core Team. + +Users with a [commercial support license](https://tanzu.vmware.com/rabbitmq/oss) or a [commercial edition license](https://tanzu.vmware.com/rabbitmq) should +use commercial support channels. + +### Exceptions: Reports that Will Always Be Investigated + +The RabbitMQ core team will always investigate the following issues, even if they are reported by an ineligible user: + + * Responsibly disclosed security vulnerabilities + * Detailed issues with a proof that data safety may be at risk + * Detailed issues with a proof that a node may fail to start, join the cluster, or rejoin the cluster + +### Exceptions: Question that Will Be Ignored + +Unless overwhelming evidence of a systemic problem in RabbitMQ is demonstrated, the following issues will get minimum or no attention at all from the core team: + +* Questions related to [OAuth2 configuration](https://www.rabbitmq.com/docs/oauth2), [OAuth 2 configuration examples](https://www.rabbitmq.com/docs/oauth2-examples) and [troubleshooting of OAuth 2](https://www.rabbitmq.com/docs/troubleshooting-oauth2) +* Questions related to [TLS configuration](https://www.rabbitmq.com/docs/ssl) and [troubleshooting of TLS connections](https://www.rabbitmq.com/docs/troubleshooting-ssl) +* Questions related to [troubleshooting of network connectivity](https://www.rabbitmq.com/docs/troubleshooting-networking) +* Questions related to [LDAP configuration](https://www.rabbitmq.com/docs/ldap) and [troubleshooting](https://www.rabbitmq.com/docs/ldap#troubleshooting) + +These topics represent some of the most time consuming questions to investigate and respond to thoroughly. Guidance and investigations related to these features will only be available to customers with VMware Tanzu RabbitMQ commercial licenses. + +## Definition of "contribution" + +For the purpose of this policy, the RabbitMQ team defines a "contribution" as any of the following: + +* A pull request that fixes any bug, introduces a new feature, clarifies example documentation, or introduces any other behavior change that may not be easy to categorize but the team is willing to accept +* An issue report that includes RabbitMQ and Erlang versions used, a reasonably detailed problem definition, a detailed set of specific steps that can be followed in order to quickly reproduce the behavior, and all the necessary evidence: log snippets from all nodes with relevant information, metrics dashboards over a relevant period of time, + code snippets that demonstrate application behavior, and any other information necessary to quickly and efficiently reproduce the reported behavior at least some of the time +* Executable benchmarks (for example, using PerfTest) that demonstrate regressions +* Donated infrastructure or services (this can be IaaS provider credits, credits for services, and anything else that the RabbitMQ core team can use to build and distribute open source RabbitMQ packages, tools, libraries) +* Meaningful contributions to RabbitMQ documentation, not including typo fixes, grammar corrections, re-wording. Contributions must include new original content, produced by a human, that makes it easier to install, operate, upgrade, and communicate with RabbitMQ from applications +* A detailed, RFC-style feature request where the status quo, the end goal, the pros and the cons of the proposed feature are well defined +* Meaningful build system updates previously pre-approved by the RabbitMQ core team + +The above rules equally apply to contributions to RabbitMQ, officially supported RabbitMQ client libraries, key RabbitMQ dependencies (Erlang/OTP, Ra, Osiris, Khepri, Cuttlefish, Horus), and the Kubernetes cluster Operators maintained by the RabbitMQ core team. + +## Release Series Eligible for Community Support + +Only releases in the latest minor series of the latest major version are eligible for community support. Currently this is RabbitMQ 3.13.x in the 3.x major series. + +All patches (bug fixes and improvements alike) will only be available for the latest minor series in the latest major series. This applies to all changes contributed by the community. + +For example, if the latest supported series (minor) is 3.13.x, all core and community contributions will ship in a 3.13.x release until a newer minor or major comes out (say, 3.14.x or 4.0.x). + +The RabbitMQ team will not backport patches to older release series (such as 3.12.x) of open source RabbitMQ, including cases where a patch was contributed by the community. +Patch releases for older release series are exclusively available to users with VMware Tanzu RabbitMQ commercial licenses. diff --git a/LICENSE-APACHE2 b/LICENSE-APACHE2 index 62589edd12a3..012c24c00992 100644 --- a/LICENSE-APACHE2 +++ b/LICENSE-APACHE2 @@ -187,7 +187,7 @@ same "printed page" as the copyright notice for easier identification within third-party archives. - Copyright [yyyy] [name of copyright owner] + Copyright 2007-2024 Broadcom. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/MODULE.bazel b/MODULE.bazel index c2a2ea73bed3..ea992c06c105 100644 --- a/MODULE.bazel +++ b/MODULE.bazel @@ -1,158 +1,87 @@ module( name = "rabbitmq-server", - version = "3.13.0", + version = "4.0.0", ) bazel_dep( name = "rules_pkg", - version = "0.9.1", + version = "0.10.1", ) bazel_dep( name = "bazel_skylib", - version = "1.4.1", + version = "1.7.1", +) + +bazel_dep( + name = "aspect_bazel_lib", + version = "2.5.3", ) bazel_dep( name = "platforms", - version = "0.0.6", + version = "0.0.8", ) bazel_dep( name = "rules_cc", - version = "0.0.2", + version = "0.0.9", +) + +bazel_dep( + name = "rules_oci", + version = "1.7.4", +) + +bazel_dep( + name = "container_structure_test", + version = "1.16.0", ) bazel_dep( name = "gazelle", - version = "0.29.0", + version = "0.33.0", repo_name = "bazel_gazelle", ) bazel_dep( name = "rules_erlang", - version = "3.11.4", + version = "3.16.0", ) bazel_dep( - name = "com_github_rabbitmq_looking_glass", - version = "0.2.1", - dev_dependency = True, - repo_name = "looking_glass", + name = "rules_elixir", + version = "1.1.0", ) bazel_dep( name = "rabbitmq_osiris", - version = "1.6.4", + version = "1.8.2", repo_name = "osiris", ) -bazel_dep( - name = "rabbitmq_ra", - version = "2.6.3", - repo_name = "ra", -) - erlang_config = use_extension( "@rules_erlang//bzlmod:extensions.bzl", "erlang_config", ) -erlang_config.internal_erlang_from_github_release( - name = "24", - sha256 = "8444ff9abe23aea268adbb95463561fc222c965052d35d7c950b17be01c3ad82", - version = "24.3.4.6", -) - -erlang_config.internal_erlang_from_github_release( - name = "25_0", - sha256 = "8fc707f92a124b2aeb0f65dcf9ac8e27b2a305e7bcc4cc1b2fdf770eec0165bf", - version = "25.0.4", -) - -erlang_config.internal_erlang_from_github_release( - name = "25_1", - sha256 = "1cd2fbe225a412009cda9b1fd9f3fff0293e75e3020daa48abf68721471e91eb", - version = "25.1.2.1", -) - -erlang_config.internal_erlang_from_github_release( - name = "25_2", - sha256 = "f4d9f11d67ba478a053d72e635a44722a975603fe1284063fdf38276366bc61c", - version = "25.2.3", -) - -erlang_config.internal_erlang_from_github_release( - name = "25_3", - sha256 = "14f519bb63f9cc8d1db62ef7c58abc56fa94f8f76d918d23acad374f38434088", - version = "25.3.2.6", -) - -erlang_config.internal_erlang_from_github_release( - name = "26", - sha256 = "47853ea9230643a0a31004433f07a71c1b92d6e0094534f629e3b75dbc62f193", - version = "26.0.2", -) - -erlang_config.internal_erlang_from_http_archive( - name = "git_master", - strip_prefix = "otp-master", - url = "https://github.com/erlang/otp/archive/refs/heads/master.tar.gz", - version = "27", -) - use_repo( erlang_config, "erlang_config", ) elixir_config = use_extension( - "//bazel/bzlmod:extensions.bzl", + "@rules_elixir//bzlmod:extensions.bzl", "elixir_config", ) -elixir_config.internal_elixir_from_github_release( - name = "1_13", - sha256 = "95daf2dd3052e6ca7d4d849457eaaba09de52d65ca38d6933c65bc1cdf6b8579", - version = "1.13.4", -) - -elixir_config.internal_elixir_from_github_release( - name = "1_14", - sha256 = "2ea249566c67e57f8365ecdcd0efd9b6c375f57609b3ac2de326488ac37c8ebd", - version = "1.14.5", -) - -elixir_config.internal_elixir_from_github_release( - name = "1_15", - sha256 = "3cfadca57c3092ccbd3ec3f17e5eab529bbd2946f50e4941a903c55c39e3c5f5", - version = "1.15.2", -) - use_repo( elixir_config, "elixir_config", ) register_toolchains( - "@erlang_config//24:toolchain_major", - "@erlang_config//24:toolchain_major_minor", - "@erlang_config//25_0:toolchain_major", - "@erlang_config//25_0:toolchain_major_minor", - "@erlang_config//25_1:toolchain_major", - "@erlang_config//25_1:toolchain_major_minor", - "@erlang_config//25_2:toolchain_major", - "@erlang_config//25_2:toolchain_major_minor", - "@erlang_config//25_3:toolchain_major", - "@erlang_config//25_3:toolchain_major_minor", - "@erlang_config//26:toolchain_major", - "@erlang_config//26:toolchain_major_minor", - "@erlang_config//git_master:toolchain_major", - "@erlang_config//git_master:toolchain_major_minor", "@elixir_config//external:toolchain", - "@elixir_config//1_13:toolchain", - "@elixir_config//1_14:toolchain", - "@elixir_config//1_15:toolchain", ) erlang_package = use_extension( @@ -170,8 +99,8 @@ erlang_package.hex_package( erlang_package.hex_package( name = "aten", build_file = "@rabbitmq-server//bazel:BUILD.aten", - sha256 = "64d40a8cf0ddfea4e13af00b7327f0925147f83612d0627d9506cbffe90c13ef", - version = "0.5.8", + sha256 = "5f39a164206ae3f211ef5880b1f7819415686436e3229d30b6a058564fbaa168", + version = "0.6.0", ) erlang_package.hex_package( @@ -187,8 +116,8 @@ erlang_package.hex_package( patch_cmds = [ "rm ebin/cowboy.app", ], - sha256 = "3afdccb7183cc6f143cb14d3cf51fa00e53db9ec80cdcd525482f5e99bc41d6b", - version = "2.10.0", + sha256 = "8a7abe6d183372ceb21caa2709bec928ab2b72e18a3911aa1771639bef82651e", + version = "2.12.0", ) erlang_package.hex_package( @@ -197,8 +126,8 @@ erlang_package.hex_package( patch_cmds = [ "rm ebin/cowlib.app", ], - sha256 = "163b73f6367a7341b33c794c4e88e7dbfe6498ac42dcd69ef44c5bc5507c8db0", - version = "2.12.1", + sha256 = "e1e1284dc3fc030a64b1ad0d8382ae7e99da46c3246b815318a4b848873800a4", + version = "2.13.0", ) erlang_package.hex_package( @@ -211,15 +140,15 @@ erlang_package.hex_package( erlang_package.hex_package( name = "csv", build_file = "@rabbitmq-server//bazel:BUILD.csv", - sha256 = "cbbe5455c93df5f3f2943e995e28b7a8808361ba34cf3e44267d77a01eaf1609", - version = "3.0.5", + sha256 = "f5ee7299a55ff84fbe623d9aea7218b800d19ecccb2b3eac2bcb327d644365ea", + version = "3.2.0", ) erlang_package.hex_package( name = "cuttlefish", build_file = "@rabbitmq-server//bazel:BUILD.cuttlefish", - sha256 = "d3ef90bd2f5923477ab772fbda5cd5ad088438e4fd56801b455b87ada9f46fa3", - version = "3.1.0", + sha256 = "43cadd7f34b3dbbab52a7f4110d1df276a13cff5e11afe0f5a774f69f012b76b", + version = "3.4.0", ) erlang_package.hex_package( @@ -257,11 +186,18 @@ erlang_package.hex_package( version = "1.3.3", ) -erlang_package.git_package( +erlang_package.hex_package( + name = "horus", + build_file = "@rabbitmq-server//bazel:BUILD.horus", + sha256 = "4ebcb0ce86c8ee411d24b289c504b14431ee004d9f2c48e6f88d4128ded33a2e", + version = "0.2.6", +) + +erlang_package.hex_package( name = "jose", build_file = "@rabbitmq-server//bazel:BUILD.jose", - commit = "d63c1c5c8f9c1a4f1438e234b886de8607a0034e", - repository = "michaelklishin/erlang-jose", + sha256 = "0d6cd36ff8ba174db29148fc112b5842186b68a90ce9fc2b3ec3afe76593e614", + version = "1.11.10", ) erlang_package.hex_package( @@ -271,6 +207,20 @@ erlang_package.hex_package( version = "1.4.1", ) +erlang_package.hex_package( + name = "khepri", + build_file = "@rabbitmq-server//bazel:BUILD.khepri", + sha256 = "dccfaeb3583a04722e2258911f7f906ce67f8efac80504be4923aaafae6d4e21", + version = "0.14.0", +) + +erlang_package.hex_package( + name = "khepri_mnesia_migration", + build_file = "@rabbitmq-server//bazel:BUILD.khepri_mnesia_migration", + sha256 = "f56d277ca7876371615cef9c5674c78854f31cf9f26ce97fd3f4b5a65573ccc4", + version = "0.5.0", +) + erlang_package.hex_package( name = "thoas", build_file = "@rabbitmq-server//bazel:BUILD.thoas", @@ -281,15 +231,15 @@ erlang_package.hex_package( erlang_package.hex_package( name = "observer_cli", build_file = "@rabbitmq-server//bazel:BUILD.observer_cli", - sha256 = "a41b6d3e11a3444e063e09cc225f7f3e631ce14019e5fbcaebfda89b1bd788ea", - version = "1.7.3", + sha256 = "872cf8e833a3a71ebd05420692678ec8aaede8fd96c805a4687398f6b23a3014", + version = "1.7.5", ) erlang_package.hex_package( name = "prometheus", build_file = "@rabbitmq-server//bazel:BUILD.prometheus", - sha256 = "2a99bb6dce85e238c7236fde6b0064f9834dc420ddbd962aac4ea2a3c3d59384", - version = "4.10.0", + sha256 = "719862351aabf4df7079b05dc085d2bbcbe3ac0ac3009e956671b1d5ab88247d", + version = "4.11.0", ) erlang_package.hex_package( @@ -299,6 +249,21 @@ erlang_package.hex_package( version = "0.2.1", ) +erlang_package.hex_package( + name = "ra", + build_file = "@rabbitmq-server//bazel:BUILD.ra", + pkg = "ra", + sha256 = "0be7645dce4a76edd4c4642d0fa69639518c72b6b60a34fc86590d1909166aeb", + version = "2.13.6", +) + +erlang_package.git_package( + name = "seshat", + build_file = "@rabbitmq-server//bazel:BUILD.seshat", + repository = "rabbitmq/seshat", + tag = "v0.6.1", +) + erlang_package.hex_package( name = "ranch", build_file = "@rabbitmq-server//bazel:BUILD.ranch", @@ -323,13 +288,6 @@ erlang_package.hex_package( version = "2.0.7", ) -erlang_package.hex_package( - name = "seshat", - build_file = "@rabbitmq-server//bazel:BUILD.seshat", - sha256 = "2c3deec7ff86e0d0c05edebd3455c8363123c227be292ffffc1a05eec08bff63", - version = "0.4.0", -) - erlang_package.hex_package( name = "stdout_formatter", build_file = "@rabbitmq-server//bazel:BUILD.stdout_formatter", @@ -371,10 +329,14 @@ use_repo( "gen_batch_server", "getopt", "gun", + "horus", "jose", "json", + "khepri", + "khepri_mnesia_migration", "observer_cli", "prometheus", + "ra", "ranch", "recon", "redbug", @@ -396,8 +358,8 @@ erlang_dev_package.hex_package( build_file = "@rabbitmq-server//bazel:BUILD.amqp", patch_args = ["-p1"], patches = ["@rabbitmq-server//bazel:amqp.patch"], - sha256 = "b6d926770e4508e30e3e9e476c57b6c8aeda44f7715663bdc38935620ce5be6f", - version = "2.1.1", + sha256 = "8d3ae139d2646c630d674a1b8d68c7f85134f9e8b2a1c3dd5621616994b10a8b", + version = "3.3.0", ) erlang_dev_package.git_package( @@ -406,18 +368,11 @@ erlang_dev_package.git_package( repository = "ninenines/ct_helper", ) -erlang_dev_package.hex_package( - name = "dialyxir", - build_file = "@rabbitmq-server//bazel:BUILD.dialyxir", - sha256 = "6c32a70ed5d452c6650916555b1f96c79af5fc4bf286997f8b15f213de786f73", - version = "0.5.1", -) - erlang_dev_package.git_package( name = "emqtt", - branch = "master", + tag = "1.11.0", build_file = "@rabbitmq-server//bazel:BUILD.emqtt", - repository = "rabbitmq/emqtt", + repository = "emqx/emqtt", ) erlang_dev_package.git_package( @@ -449,15 +404,14 @@ erlang_dev_package.hex_package( erlang_dev_package.hex_package( name = "x509", build_file = "@rabbitmq-server//bazel:BUILD.x509", - sha256 = "3604125d6a0171da6e8a935810b58c999fccab0e3d20b2ed28d97fa2d9e2f6b4", - version = "0.8.7", + sha256 = "ccc3bff61406e5bb6a63f06d549f3dba3a1bbb456d84517efaaa210d8a33750f", + version = "0.8.8", ) use_repo( erlang_dev_package, "amqp", "ct_helper", - "dialyxir", "emqtt", "inet_tcp_proxy_dist", "meck", @@ -466,21 +420,6 @@ use_repo( "x509", ) -rbe = use_extension( - "//bazel/bzlmod:extensions.bzl", - "rbe", -) - -rbe.git_repository( - branch = "linux-rbe", - remote = "https://github.com/rabbitmq/rbe-erlang-platform.git", -) - -use_repo( - rbe, - "rbe", -) - secondary_umbrella = use_extension( "//bazel/bzlmod:extensions.bzl", "secondary_umbrella", @@ -489,7 +428,7 @@ secondary_umbrella = use_extension( use_repo( secondary_umbrella, - "rabbitmq-server-generic-unix-3.11", + "rabbitmq-server-generic-unix-3.13", ) hex = use_extension( diff --git a/Makefile b/Makefile index 3b1af65e8a67..ffa5da854e24 100644 --- a/Makefile +++ b/Makefile @@ -5,7 +5,7 @@ PROJECT_DESCRIPTION = RabbitMQ Server # other components. If PROJECT_VERSION is unset, then an empty variable # is propagated and the default version will fallback to the default # value from rabbitmq-components.mk. -export RABBITMQ_VERSION = $(PROJECT_VERSION) +export RABBITMQ_VERSION := $(PROJECT_VERSION) # Release artifacts are put in $(PACKAGES_DIR). PACKAGES_DIR ?= $(abspath PACKAGES) @@ -14,11 +14,11 @@ PACKAGES_DIR ?= $(abspath PACKAGES) include plugins.mk # An additional list of plugins to include in a RabbitMQ release, -# on top of the standard plugins. For example, looking_glass. +# on top of the standard plugins. # # Note: When including NIFs in a release make sure to build # them on the appropriate platform for the target environment. -# For example build looking_glass on Linux when targeting Docker. +# For example build on Linux when targeting Docker. ADDITIONAL_PLUGINS ?= DEPS = rabbit_common rabbit $(PLUGINS) $(ADDITIONAL_PLUGINS) @@ -29,11 +29,12 @@ DEP_PLUGINS = rabbit_common/mk/rabbitmq-dist.mk \ DISABLE_DISTCLEAN = 1 +ifeq ($(filter-out xref,$(MAKECMDGOALS)),) XREF_SCOPE = app deps # We add all the applications that are in non-standard paths # so they are included in the analyses as well. -XREF_EXTRA_APP_DIRS = $(filter-out deps/rabbitmq_cli/_build/dev/lib/rabbit_common/,$(wildcard deps/rabbitmq_cli/_build/dev/lib/*/)) deps/rabbit/apps/rabbitmq_prelaunch/ +XREF_EXTRA_APP_DIRS = $(filter-out deps/rabbitmq_cli/_build/dev/lib/rabbit_common/,$(wildcard deps/rabbitmq_cli/_build/dev/lib/*/)) deps/rabbitmq_prelaunch/ # For Elixir protocols the right fix is to include the consolidated/ # folders in addition to ebin/. However this creates conflicts because @@ -46,17 +47,30 @@ XREF_IGNORE = [ \ {'Elixir.RabbitMQ.CLI.Core.DataCoercion',impl_for,1}] # Include Elixir libraries in the Xref checks. -xref: ERL_LIBS := $(ERL_LIBS):$(CURDIR)/apps:$(CURDIR)/deps:$(dir $(shell elixir --eval ":io.format '~s~n', [:code.lib_dir :elixir ]")) +xref: ERL_LIBS := $(ERL_LIBS):$(CURDIR)/apps:$(CURDIR)/deps:$(dir $(shell elixir --eval ':io.format "~s~n", [:code.lib_dir :elixir ]')) +endif ifneq ($(wildcard deps/.hex/cache.erl),) deps:: restore-hex-cache-ets-file endif include rabbitmq-components.mk + +# Set PROJECT_VERSION, calculated in rabbitmq-components.mk, +# in stone now, because in this Makefile we will be using it +# multiple times (including for release file names and whatnot). +PROJECT_VERSION := $(PROJECT_VERSION) + include erlang.mk include mk/github-actions.mk include mk/bazel.mk -include mk/topic-branches.mk + +# If PLUGINS was set when we use run-broker we want to +# fill in the enabled plugins list. PLUGINS is a more +# natural space-separated list. +ifdef PLUGINS +RABBITMQ_ENABLED_PLUGINS ?= $(call comma_list,$(PLUGINS)) +endif # -------------------------------------------------------------------- # Mix Hex cache management. @@ -137,7 +151,6 @@ RSYNC_FLAGS += -a $(RSYNC_V) \ --exclude '*.pyc' \ --exclude '.git*' \ --exclude '.hg*' \ - --exclude '.travis.yml*' \ --exclude '.*.plt' \ --exclude '*.bzl' \ --exclude '*.bazel' \ @@ -168,7 +181,6 @@ RSYNC_FLAGS += -a $(RSYNC_V) \ --include 'cli/plugins' \ --exclude '$(notdir $(DIST_DIR))/' \ --exclude 'test' \ - --exclude 'xrefr' \ --exclude '/$(notdir $(PACKAGES_DIR))/' \ --exclude '/PACKAGES/' \ --exclude '/amqp_client/doc/' \ @@ -176,8 +188,6 @@ RSYNC_FLAGS += -a $(RSYNC_V) \ --exclude '/cowboy/doc/' \ --exclude '/cowboy/examples/' \ --exclude '/rabbit/escript/' \ - --exclude '/rabbitmq_amqp1_0/test/swiftmq/build/'\ - --exclude '/rabbitmq_amqp1_0/test/swiftmq/swiftmq*'\ --exclude '/rabbitmq_cli/escript/' \ --exclude '/rabbitmq_mqtt/test/build/' \ --exclude '/rabbitmq_mqtt/test/test_client/'\ @@ -559,3 +569,79 @@ install-windows-docs: install-windows-erlapp *) mv "$$file" "$$file.txt" ;; \ esac; \ done + +INTERNAL_DEPS := \ + amqp10_client \ + amqp10_common \ + amqp_client \ + oauth2_client \ + rabbit_common \ + rabbitmq_ct_client_helpers \ + rabbitmq_ct_helpers \ + rabbitmq_stream_common \ + trust_store_http + +TIER1_PLUGINS := \ + rabbitmq_amqp_client \ + rabbitmq_amqp1_0 \ + rabbitmq_auth_backend_cache \ + rabbitmq_auth_backend_http \ + rabbitmq_auth_backend_ldap \ + rabbitmq_auth_backend_oauth2 \ + rabbitmq_auth_mechanism_ssl \ + rabbitmq_aws \ + rabbitmq_consistent_hash_exchange \ + rabbitmq_event_exchange \ + rabbitmq_federation \ + rabbitmq_federation_management \ + rabbitmq_federation_prometheus \ + rabbitmq_jms_topic_exchange \ + rabbitmq_management \ + rabbitmq_management_agent \ + rabbitmq_mqtt \ + rabbitmq_peer_discovery_aws \ + rabbitmq_peer_discovery_common \ + rabbitmq_peer_discovery_consul \ + rabbitmq_peer_discovery_etcd \ + rabbitmq_peer_discovery_k8s \ + rabbitmq_prelaunch \ + rabbitmq_prometheus \ + rabbitmq_random_exchange \ + rabbitmq_recent_history_exchange \ + rabbitmq_sharding \ + rabbitmq_shovel \ + rabbitmq_shovel_management \ + rabbitmq_shovel_prometheus \ + rabbitmq_stomp \ + rabbitmq_stream \ + rabbitmq_stream_management \ + rabbitmq_top \ + rabbitmq_tracing \ + rabbitmq_trust_store \ + rabbitmq_web_dispatch \ + rabbitmq_web_mqtt \ + rabbitmq_web_mqtt_examples \ + rabbitmq_web_stomp \ + rabbitmq_web_stomp_examples + +YTT ?= ytt + +actions-workflows: .github/workflows/test.yaml .github/workflows/test-mixed-versions.yaml + +.PHONY: .github/workflows/test.yaml .github/workflows/test-mixed-versions.yaml + +.github/workflows/test.yaml: .github/workflows/templates/test.template.yaml + $(gen_verbose) $(YTT) \ + --file $< \ + --data-value-yaml internal_deps=[$(subst $(space),$(comma),$(foreach s,$(INTERNAL_DEPS),"$s"))] \ + --data-value-yaml tier1_plugins=[$(subst $(space),$(comma),$(foreach s,$(TIER1_PLUGINS),"$s"))] \ + | sed 's/^true:/on:/' \ + | sed 's/pull_request: null/pull_request:/'> $@ + +.github/workflows/test-mixed-versions.yaml: .github/workflows/templates/test-mixed-versions.template.yaml + $(gen_verbose) $(YTT) \ + --file $< \ + --data-value-yaml internal_deps=[$(subst $(space),$(comma),$(foreach s,$(INTERNAL_DEPS),"$s"))] \ + --data-value-yaml tier1_plugins=[$(subst $(space),$(comma),$(foreach s,$(TIER1_PLUGINS),"$s"))] \ + | sed 's/^true:/on:/' \ + | sed 's/pull_request: null/pull_request:/'> $@ diff --git a/README.md b/README.md index a916a75450fb..884e5175bfd1 100644 --- a/README.md +++ b/README.md @@ -2,81 +2,93 @@ # RabbitMQ Server -[RabbitMQ](https://rabbitmq.com) is a [feature rich](https://rabbitmq.com/documentation.html), +[RabbitMQ](https://rabbitmq.com) is a [feature rich](https://www.rabbitmq.com/docs), multi-protocol messaging and streaming broker. It supports: * AMQP 0-9-1 * AMQP 1.0 - * [RabbitMQ Stream Protocol](https://rabbitmq.com/streams.html) - * MQTT 3.1.1 + * [RabbitMQ Stream Protocol](https://www.rabbitmq.com/docs/streams) + * MQTT 3.1, 3.1.1, and 5.0 * STOMP 1.0 through 1.2 + * [MQTT over WebSockets](https://www.rabbitmq.com/docs/web-mqtt) + * [STOMP over WebSockets](https://www.rabbitmq.com/docs/web-stomp) ## Installation - * [Installation guides](https://rabbitmq.com/download.html) for various platforms - * [Kubernetes Cluster Operator](https://rabbitmq.com/kubernetes/operator/operator-overview.html) - * [Changelog](https://www.rabbitmq.com/changelog.html) + * [Currently supported](https://www.rabbitmq.com/release-information) released series + * [Installation guides](https://www.rabbitmq.com/docs/download) for various platforms + * [Kubernetes Cluster Operator](https://www.rabbitmq.com/kubernetes/operator/operator-overview) + * [Changelog](https://www.rabbitmq.com/release-information) * [Releases](https://github.com/rabbitmq/rabbitmq-server/releases) on GitHub - * [Currently supported released series](https://www.rabbitmq.com/versions.html) - * [Supported Erlang versions](https://www.rabbitmq.com/which-erlang.html) + * [Community Support Eligibility Policy](https://github.com/rabbitmq/rabbitmq-server/blob/main/COMMUNITY_SUPPORT.md) + * [Supported Erlang versions](https://www.rabbitmq.com/docs/which-erlang) ## Tutorials and Documentation - * [RabbitMQ tutorials](https://rabbitmq.com/getstarted.html) - * [All documentation guides](https://rabbitmq.com/documentation.html) + * [RabbitMQ tutorials](https://www.rabbitmq.com/tutorials) and their [executable versions on GitHub](https://github.com/rabbitmq/rabbitmq-tutorials) + * [Documentation guides](https://rabbitmq.com/docs/) * [RabbitMQ blog](https://blog.rabbitmq.com/) Some key doc guides include - * [CLI tools guide](https://rabbitmq.com/cli.html) - * [Clustering](https://www.rabbitmq.com/clustering.html) and [Cluster Formation](https://www.rabbitmq.com/cluster-formation.html) guides - * [Configuration guide](https://rabbitmq.com/configure.html) - * [Client libraries and tools](https://rabbitmq.com/devtools.html) - * [Monitoring](https://rabbitmq.com/monitoring.html) and [Prometheus/Grafana](https://www.rabbitmq.com/prometheus.html) guides - * [Kubernetes Cluster Operator](https://rabbitmq.com/kubernetes/operator/operator-overview.html) - * [Production checklist](https://rabbitmq.com/production-checklist.html) - * [Quorum queues](https://rabbitmq.com/quorum-queues.html): a replicated, data safety- and consistency-oriented queue type - * [Streams](https://rabbitmq.com/streams.html): a persistent and replicated append-only log with non-destructive consumer semantics + * [CLI tools guide](https://www.rabbitmq.com/docs/cli) + * [Clustering](https://www.rabbitmq.com/docs/clustering) and [Cluster Formation](https://www.rabbitmq.com/docs/cluster-formation) + * [Configuration guide](https://www.rabbitmq.com/docs/configure) + * [Client libraries and tools](https://www.rabbitmq.com/client-libraries/devtools) + * [Monitoring](https://www.rabbitmq.com/docs/monitoring) and [Prometheus/Grafana](https://www.rabbitmq.com/docs/prometheus) + * [Upgrading](https://www.rabbitmq.com/docs/upgrade) + * [Kubernetes Cluster Operator](https://www.rabbitmq.com/kubernetes/operator/operator-overview) + * [Production checklist](https://www.rabbitmq.com/docs/production-checklist) + * [Quorum queues](https://www.rabbitmq.com/docs/quorum-queues): a replicated, data safety- and consistency-oriented queue type + * [Streams](https://www.rabbitmq.com/docs/streams): a persistent and replicated append-only log with non-destructive consumer semantics + * [Runtime Parameters and Policies](https://www.rabbitmq.com/docs/parameters) * [Runnable tutorials](https://github.com/rabbitmq/rabbitmq-tutorials/) RabbitMQ documentation is also [developed on GitHub](https://github.com/rabbitmq/rabbitmq-website/). ## Commercial Features and Support - * [Commercial edition of RabbitMQ](https://www.vmware.com/products/rabbitmq.html) - * [Commercial edition for Kubernetes](https://rabbitmq.com/kubernetes/tanzu/installation.html) - * [Commercial support](https://rabbitmq.com/services.html) from [VMware](https://vmware.com) for open source RabbitMQ + * [Commercial editions of RabbitMQ](https://tanzu.vmware.com/rabbitmq) + * [Commercial edition for Kubernetes](https://docs.vmware.com/en/VMware-RabbitMQ-for-Kubernetes/1/rmq/installation.html) + * [Commercial support](https://tanzu.vmware.com/rabbitmq/oss) from [Broadcom](https://vmware.com) for open source RabbitMQ ## Getting Help from the Community - * [Community Discord server](https://rabbitmq.com/discord/) - * [Community Slack](https://rabbitmq.com/slack/) +Please read the [Community Support Eligibility Policy](https://github.com/rabbitmq/rabbitmq-server/blob/main/COMMUNITY_SUPPORT.md) document +first. + +The recommended community forums are + * [GitHub Discussions](https://github.com/rabbitmq/rabbitmq-server/discussions/) - * [RabbitMQ mailing list](https://groups.google.com/forum/#!forum/rabbitmq-users) + * [Community Discord server](https://rabbitmq.com/discord/) * `#rabbitmq` on [Libera Chat](https://libera.chat/) ## Contributing -See [CONTRIBUTING.md](./CONTRIBUTING.md) and our [development process overview](https://rabbitmq.com/github.html). +See [CONTRIBUTING.md](./CONTRIBUTING.md) and our [development process overview](https://www.rabbitmq.com/github). -Questions about contributing, internals and so on are very welcome on the [mailing list](https://groups.google.com/forum/#!forum/rabbitmq-users). +Questions about contributing, internals and so on are very welcome in [GitHub Discussions](https://github.com/rabbitmq/rabbitmq-server/discussions) +or [community Discord server](https://www.rabbitmq.com/discord/) in the `core-and-plugin-dev` channel. ## Licensing RabbitMQ server is [licensed under the MPL 2.0](LICENSE-MPL-RabbitMQ). +[Community Support Eligibility Policy](https://github.com/rabbitmq/rabbitmq-server/blob/main/COMMUNITY_SUPPORT.md) +document explains the open source RabbitMQ support policy adopted by the RabbitMQ Core Team. + ## Building From Source and Packaging * [Contributor resources](https://github.com/rabbitmq/contribute) - * [Building RabbitMQ from Source](https://rabbitmq.com/build-server.html) - * [Building RabbitMQ Distribution Packages](https://rabbitmq.com/build-server.html) + * [Building RabbitMQ from Source](https://www.rabbitmq.com/docs/build-server) + * [Building RabbitMQ Distribution Packages](https://www.rabbitmq.com/docs/build-server) ## Copyright -(c) 2007-2023 VMware, Inc. or its affiliates. +(c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. diff --git a/SERVER_RELEASES.md b/SERVER_RELEASES.md index 41cfec6285ae..b01cb7dbf049 100644 --- a/SERVER_RELEASES.md +++ b/SERVER_RELEASES.md @@ -17,13 +17,13 @@ server releases. It is organized in the following way: * To create a source archive and all supported packages, with a given version: ``` - make packages PROJECT_VERSION=3.8.1-rc.1 + make packages PROJECT_VERSION=3.13.0-rc.3 ``` * To create all suported packages from an existing source archive: ``` - make -C packaging SOURCE_DIST_FILE=/path/to/rabbitmq-server-3.8.1-rc.1.tar.xz + make -C packaging SOURCE_DIST_FILE=/path/to/rabbitmq-server-3.13.0-rc.3.tar.xz ``` The standalone package is different because it embeds the build @@ -35,7 +35,7 @@ build host: ``` make package-standalone-macosx # or -make -C packaging package-standalone-macosx SOURCE_DIST_FILE=/path/to/rabbitmq-server-3.8.1-rc.1.tar.xz +make -C packaging package-standalone-macosx SOURCE_DIST_FILE=/path/to/rabbitmq-server-3.13.0-rc.3.tar.xz ``` The instructions in the [`PKG_LINUX.md`](PKG_LINUX.md) document include a @@ -57,7 +57,7 @@ based on the last tag and the current HEAD. Here is an example with an explicit version: ``` -make source-dist PROJECT_VERSION=3.8.1-rc.1 +make source-dist PROJECT_VERSION=3.13.0-rc.3 ``` The version is automatically propagated to the broker and plugins so @@ -66,7 +66,7 @@ they all advertise the same version. The result is then available in the `PACKAGES` subdirectory. You can override the output directory with the `PACKAGES_DIR` variable: ``` -make source-dist PROJDCT_VERSION=3.8.1-rc.1 \ +make source-dist PROJDCT_VERSION=3.13.0-rc.3 \ PACKAGES_DIR=/tmp ``` @@ -77,7 +77,7 @@ By default, two archives are produced: You can ask for more/different types by specifying the `SOURCE_DIST_SUFFIXES` variable: ``` -make source-dist PROJECT_VERSION=3.8.1-rc.1 \ +make source-dist PROJECT_VERSION=3.13.0-rc.3 \ SOURCE_DIST_SUFFIXES='tar.xz tar.gz' ``` @@ -95,7 +95,7 @@ list of plugins is in the `plugins.mk` file. You can override this list by setting the `PLUGINS` variable to the list you want: ``` -make source-dist PROJECT_VERSION=3.8.1-rc.1 \ +make source-dist PROJECT_VERSION=3.13.0-rc.3 \ PLUGINS='rabbitmq_shovel rabbitmq_rabbitmq_shovel_management' ``` @@ -122,7 +122,7 @@ This has the following rules: If you want the source archive to be created automatically, use the top-level `Makefile`: ``` -make package-$type PROJECT_VERSION=3.8.1-rc.1 ... +make package-$type PROJECT_VERSION=3.13.0-rc.3 ... ``` Packages are written to `PACKAGES_DIR`, like the source archive. @@ -146,7 +146,7 @@ with the `VERSION` variable: ``` make -C packaging package-generic-unix \ SOURCE_DIST_FILE=rabbitmq-server.tar.xz \ - VERSION=3.8.1-rc.1 + VERSION=3.13.0-rc.3 ``` ### Debian package @@ -166,18 +166,18 @@ with the `VERSION` variable: ``` make -C packaging package-deb \ SOURCE_DIST_FILE=rabbitmq-server.tar.xz \ - VERSION=3.8.1-rc.1 + VERSION=3.13.0-rc.3 ``` By default, the package version is converted from `VERSION` with -all `-` characters replaced by `~` (eg. `3.8.1~rc.1` in the example +all `-` characters replaced by `~` (eg. `3.13.0~rc.1` in the example above). If you want to override that conversion, you can specify the `DEBIAN_VERSION` variable: ``` make -C packaging package-deb \ SOURCE_DIST_FILE=rabbitmq-server.tar.xz \ - VERSION=3.8.1-rc.1 - DEBIAN_VERSION=3.8.1~rc.1 + VERSION=3.13.0-rc.3 + DEBIAN_VERSION=3.13.0~rc.1 ``` ### RPM package @@ -242,45 +242,17 @@ make -C packaging/windows-exe ZIP=/path/to/rabbitmq-server-windows.zip By default, the *product version* is the project version where everything following the third integer was replaced by `.0`. Thus it's -only fine if the version is a semver-based version (eg. 3.8.1-pre.3 or +only fine if the version is a semver-based version (eg. 3.13.0-pre.3 or 3.8.2). If the version doesn't conform to that, you need to set the `PRODUCT_VERSION` variable: ``` -make package-windows PROJECT_VERSION=3.8.1-rc.1 PRODUCT_VERSION=3.8.1.0 +make package-windows PROJECT_VERSION=3.13.0-rc.3 PRODUCT_VERSION=3.13.0.0 ``` To build the Windows package using a Windows machine, follow the instructions in [`PKG_WINDOWS.md`](PKG_WINDOWS.md). -### Standalone package - -This is the equivalent of the `generic-unix` package with Erlang -embbeded. - -To create it: -``` -make -C packaging/standalone SOURCE_DIST_FILE=... VERSION=... -``` - -There is no package revision, only the project version and no -restriction on it. - -Unlike other packages, the top-level `Makefile` and `packaging/Makefile` -provide targets to build the standalone package for specific platforms: -``` -make package-standalone-macosx -make package-standalone-linux-x86_64 -make package-standalone-freebsd-x86_64 -``` - -Cross-build isn't supported so using those targets on incompatible -platforms is a no-op. - -If you want to build a standalone package for your platform, you can use -`packaging/standalone/Makefile` as described at the beginning of this -section. - ### Building all packages in one go If you want to build all packages in one command, you can use the @@ -297,8 +269,8 @@ However, be careful with the versioning! Because all package have incompatible requirements, you can only use a version with 3 integers (like a final semver-based version): ``` -make packages PROJECT_VERSION=3.8.1 -make -C packaging packages SOURCE_DIST_FILE=rabbitmq-server-3.8.1.tar.xz +make packages PROJECT_VERSION=3.13.0 +make -C packaging packages SOURCE_DIST_FILE=rabbitmq-server-3.13.0.tar.xz ``` If you do not follow that rule, the build will fail one way or another; @@ -309,5 +281,5 @@ Another possibility is to specify the Windows *product version* and rely on automatic conversion for Debian and RPM packages (or use the `DEBIAN_VERSION` and `RPM_VERSION` variables), but this is untested: ``` -make packages PROJECT_VERSION=3.8.1-rc.1 PRODUCT_VERSION=3.8.1.0 +make packages PROJECT_VERSION=3.13.0-rc.3 PRODUCT_VERSION=3.13.0.0 ``` diff --git a/WORKSPACE b/WORKSPACE index 338b044d6a14..3bbed84e3656 100644 --- a/WORKSPACE +++ b/WORKSPACE @@ -1,14 +1,14 @@ workspace(name = "rabbitmq-server") -load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive", "http_file") load("@bazel_tools//tools/build_defs/repo:git.bzl", "git_repository", "new_git_repository") +load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive", "http_file") http_archive( name = "rules_pkg", - sha256 = "8f9ee2dc10c1ae514ee599a8b42ed99fa262b757058f65ad3c384289ff70c4b8", + sha256 = "d250924a2ecc5176808fc4c25d5cf5e9e79e6346d79d5ab1c493e289e722d1d0", urls = [ - "https://mirror.bazel.build/github.com/bazelbuild/rules_pkg/releases/download/0.9.1/rules_pkg-0.9.1.tar.gz", - "https://github.com/bazelbuild/rules_pkg/releases/download/0.9.1/rules_pkg-0.9.1.tar.gz", + "https://mirror.bazel.build/github.com/bazelbuild/rules_pkg/releases/download/0.10.1/rules_pkg-0.10.1.tar.gz", + "https://github.com/bazelbuild/rules_pkg/releases/download/0.10.1/rules_pkg-0.10.1.tar.gz", ], ) @@ -19,7 +19,7 @@ rules_pkg_dependencies() git_repository( name = "rules_erlang", remote = "https://github.com/rabbitmq/rules_erlang.git", - tag = "3.11.4", + tag = "3.15.1", ) load("@rules_erlang//:internal_deps.bzl", "rules_erlang_internal_deps") @@ -34,84 +34,6 @@ load("@rules_erlang//gazelle:deps.bzl", "gazelle_deps") gazelle_deps() -http_archive( - name = "io_bazel_rules_docker", - sha256 = "b1e80761a8a8243d03ebca8845e9cc1ba6c82ce7c5179ce2b295cd36f7e394bf", - urls = ["https://github.com/bazelbuild/rules_docker/releases/download/v0.25.0/rules_docker-v0.25.0.tar.gz"], -) - -load( - "@io_bazel_rules_docker//repositories:repositories.bzl", - container_repositories = "repositories", -) - -container_repositories() - -load("@io_bazel_rules_docker//repositories:deps.bzl", container_deps = "deps") - -container_deps() - -load( - "@io_bazel_rules_docker//container:container.bzl", - "container_pull", -) - -container_pull( - name = "ubuntu2004", - registry = "index.docker.io", - repository = "pivotalrabbitmq/ubuntu", - tag = "20.04", -) - -http_file( - name = "openssl-3.1.1", - downloaded_file_path = "openssl-3.1.1.tar.gz", - sha256 = "b3aa61334233b852b63ddb048df181177c2c659eb9d4376008118f9c08d07674", - urls = ["https://github.com/openssl/openssl/releases/download/openssl-3.1.1/openssl-3.1.1.tar.gz"], -) - -http_file( - name = "otp_src_24", - downloaded_file_path = "OTP-24.3.4.6.tar.gz", - sha256 = "dc3d2c54eeb093e0dc9a0fe493bc69d6dfac0affbe77c9e3c935aa86c0f63cd5", - urls = ["https://github.com/erlang/otp/archive/OTP-24.3.4.6.tar.gz"], -) - -http_file( - name = "otp_src_25_0", - downloaded_file_path = "OTP-25.0.4.tar.gz", - sha256 = "05878cb51a64b33c86836b12a21903075c300409b609ad5e941ddb0feb8c2120", - urls = ["https://github.com/erlang/otp/archive/OTP-25.0.4.tar.gz"], -) - -http_file( - name = "otp_src_25_1", - downloaded_file_path = "OTP-25.1.2.1.tar.gz", - sha256 = "79f8e31bb9ff7d43a920f207ef104d1106b2332fdbadf11241d714eacb6d8d1a", - urls = ["https://github.com/erlang/otp/archive/OTP-25.1.2.1.tar.gz"], -) - -http_file( - name = "otp_src_25_2", - downloaded_file_path = "OTP-25.2.3.tar.gz", - sha256 = "637bc5cf68dd229fd3c3fe889a6f84dd32c4a827488550a0a98123b00c2d78b5", - urls = ["https://github.com/erlang/otp/archive/OTP-25.2.3.tar.gz"], -) - -http_file( - name = "otp_src_25_3", - downloaded_file_path = "OTP-25.3.2.6.tar.gz", - sha256 = "67e0f5c209a335cfc216a57b1f016072a69eb9683d36d6d101bf2f60a2e45926", - urls = ["https://github.com/erlang/otp/archive/OTP-25.3.2.6.tar.gz"], -) - -http_file( - name = "otp_src_26", - downloaded_file_path = "OTP-26.0.2.tar.gz", - sha256 = "4def5ed5e49815fb02fceae8a66e94abc1049f5de30f97d9ad12fdf3293a2470", - urls = ["https://github.com/erlang/otp/archive/OTP-26.0.2.tar.gz"], -) - new_git_repository( name = "bats", build_file = "@//:BUILD.bats", @@ -126,9 +48,3 @@ activemq_archive() load("//bazel/bzlmod:secondary_umbrella.bzl", "secondary_umbrella") secondary_umbrella() - -git_repository( - name = "rbe", - branch = "linux-rbe", - remote = "https://github.com/rabbitmq/rbe-erlang-platform.git", -) diff --git a/bazel/BUILD.amqp b/bazel/BUILD.amqp index ad2e97bb0bd2..db8b68607714 100644 --- a/bazel/BUILD.amqp +++ b/bazel/BUILD.amqp @@ -1,3 +1,5 @@ +load("@rules_erlang//:erlang_bytecode2.bzl", "erlc_opts") + filegroup( name = "sources", srcs = [ @@ -8,3 +10,17 @@ filegroup( ]), visibility = ["//visibility:public"], ) + +erlc_opts( + name = "erlc_opts", + values = select({ + "@rules_erlang//:debug_build": [ + "+debug_info", + ], + "//conditions:default": [ + "+debug_info", + "+deterministic", + ], + }), + visibility = [":__subpackages__"], +) diff --git a/bazel/BUILD.cowboy b/bazel/BUILD.cowboy index 247f07fd425f..bd5ec4fb0c85 100644 --- a/bazel/BUILD.cowboy +++ b/bazel/BUILD.cowboy @@ -49,6 +49,7 @@ erlang_bytecode( "src/cowboy_clock.erl", "src/cowboy_compress_h.erl", "src/cowboy_constraints.erl", + "src/cowboy_decompress_h.erl", "src/cowboy_handler.erl", "src/cowboy_http.erl", "src/cowboy_http2.erl", @@ -94,6 +95,7 @@ filegroup( "src/cowboy_clock.erl", "src/cowboy_compress_h.erl", "src/cowboy_constraints.erl", + "src/cowboy_decompress_h.erl", "src/cowboy_handler.erl", "src/cowboy_http.erl", "src/cowboy_http2.erl", @@ -145,10 +147,10 @@ erlang_app( name = "erlang_app", srcs = [":all_srcs"], hdrs = [":public_hdrs"], - app_name = "cowboy", app_description = "Small, fast, modern HTTP server.", - app_version = "2.10.0", + app_name = "cowboy", app_registered = ["cowboy_clock"], + app_version = "2.12.0", beam_files = [":beam_files"], extra_apps = ["crypto"], license_files = [":license_files"], diff --git a/bazel/BUILD.cowlib b/bazel/BUILD.cowlib index f56aea23955b..130cb5b98bc0 100644 --- a/bazel/BUILD.cowlib +++ b/bazel/BUILD.cowlib @@ -39,35 +39,9 @@ erlang_bytecode( "src/cow_uri_template.erl", "src/cow_ws.erl", ], - outs = [ - "ebin/cow_base64url.beam", - "ebin/cow_cookie.beam", - "ebin/cow_date.beam", - "ebin/cow_hpack.beam", - "ebin/cow_http.beam", - "ebin/cow_http2.beam", - "ebin/cow_http2_machine.beam", - "ebin/cow_http_hd.beam", - "ebin/cow_http_struct_hd.beam", - "ebin/cow_http_te.beam", - "ebin/cow_iolists.beam", - "ebin/cow_link.beam", - "ebin/cow_mimetypes.beam", - "ebin/cow_multipart.beam", - "ebin/cow_qs.beam", - "ebin/cow_spdy.beam", - "ebin/cow_sse.beam", - "ebin/cow_uri.beam", - "ebin/cow_uri_template.beam", - "ebin/cow_ws.beam", - ], - hdrs = [ - "include/cow_inline.hrl", - "include/cow_parse.hrl", - "src/cow_hpack_dec_huffman_lookup.hrl", - "src/cow_spdy.hrl", - ], + hdrs = [":public_and_private_hdrs"], app_name = "cowlib", + dest = "ebin", erlc_opts = "//:erlc_opts", ) @@ -147,11 +121,13 @@ erlang_app( name = "erlang_app", srcs = [":all_srcs"], hdrs = [":public_hdrs"], - app_name = "cowlib", app_description = "Support library for manipulating Web protocols.", - app_version = "2.12.1", + app_name = "cowlib", + app_version = "2.13.0", beam_files = [":beam_files"], extra_apps = ["crypto"], + license_files = [":license_files"], + priv = [":priv"], ) alias( @@ -159,3 +135,10 @@ alias( actual = ":erlang_app", visibility = ["//visibility:public"], ) + +filegroup( + name = "license_files", + srcs = [ + "LICENSE", + ], +) diff --git a/bazel/BUILD.dialyxir b/bazel/BUILD.dialyxir deleted file mode 100644 index ad2e97bb0bd2..000000000000 --- a/bazel/BUILD.dialyxir +++ /dev/null @@ -1,10 +0,0 @@ -filegroup( - name = "sources", - srcs = [ - "mix.exs", - ] + glob([ - "LICENSE*", - "lib/**/*", - ]), - visibility = ["//visibility:public"], -) diff --git a/bazel/BUILD.horus b/bazel/BUILD.horus new file mode 100644 index 000000000000..e3d75efe9921 --- /dev/null +++ b/bazel/BUILD.horus @@ -0,0 +1,114 @@ +load("@rules_erlang//:erlang_bytecode2.bzl", "erlang_bytecode", "erlc_opts") +load("@rules_erlang//:erlang_app.bzl", "erlang_app") + +erlc_opts( + name = "erlc_opts", + values = select({ + "@rules_erlang//:debug_build": [ + "+debug_info", + "+warn_export_vars", + "+warnings_as_errors", + ], + "//conditions:default": [ + "+debug_info", + "+deterministic", + "+warn_export_vars", + "+warnings_as_errors", + ], + }), + visibility = [":__subpackages__"], +) + +erlang_bytecode( + name = "other_beam", + srcs = [ + "src/horus.erl", + "src/horus_cover.erl", + "src/horus_utils.erl", + ], + hdrs = [":public_and_private_hdrs"], + app_name = "horus", + dest = "ebin", + erlc_opts = "//:erlc_opts", +) + +filegroup( + name = "beam_files", + srcs = [":other_beam"], +) + +filegroup( + name = "srcs", + srcs = [ + "src/horus.app.src", + "src/horus.erl", + "src/horus_cover.erl", + "src/horus_utils.erl", + ], +) + +filegroup( + name = "private_hdrs", + srcs = [ + "src/horus_error.hrl", + "src/horus_fun.hrl", + ], +) + +filegroup( + name = "public_hdrs", + srcs = [ + "include/horus.hrl", + ], +) + +filegroup( + name = "priv", + srcs = [ + "priv/horus_cover_helper.erl", + ], +) + +filegroup( + name = "license_files", + srcs = [ + "LICENSE-Apache-2.0", + "LICENSE-MPL-2.0", + ], +) + +filegroup( + name = "public_and_private_hdrs", + srcs = [ + ":private_hdrs", + ":public_hdrs", + ], +) + +filegroup( + name = "all_srcs", + srcs = [ + ":public_and_private_hdrs", + ":srcs", + ], +) + +erlang_app( + name = "erlang_app", + srcs = [":all_srcs"], + hdrs = [":public_hdrs"], + app_name = "horus", + beam_files = [":beam_files"], + extra_apps = [ + "compiler", + "tools", + ], + license_files = [":license_files"], + priv = [":priv"], +) + +alias( + name = "horus", + actual = ":erlang_app", + visibility = ["//visibility:public"], +) diff --git a/bazel/BUILD.jose b/bazel/BUILD.jose index 8d12eecb3fb2..50bca8223f68 100644 --- a/bazel/BUILD.jose +++ b/bazel/BUILD.jose @@ -6,12 +6,10 @@ erlc_opts( values = select({ "@rules_erlang//:debug_build": [ "+debug_info", - "+warnings_as_errors", ], "//conditions:default": [ "+debug_info", "+deterministic", - "+warnings_as_errors", ], }), visibility = [":__subpackages__"], @@ -39,12 +37,12 @@ erlang_bytecode( name = "behaviours", srcs = [ "src/jose_block_encryptor.erl", - "src/jose_chacha20_poly1305.erl", - "src/jose_curve25519.erl", - "src/jose_curve448.erl", - "src/jose_sha3.erl", - "src/jose_xchacha20_poly1305.erl", "src/json/jose_json.erl", + "src/jwa/chacha20_poly1305/jose_chacha20_poly1305.erl", + "src/jwa/curve25519/jose_curve25519.erl", + "src/jwa/curve448/jose_curve448.erl", + "src/jwa/sha3/jose_sha3.erl", + "src/jwa/xchacha20_poly1305/jose_xchacha20_poly1305.erl", "src/jwe/jose_jwe.erl", "src/jwe/jose_jwe_alg.erl", "src/jwe/jose_jwe_enc.erl", @@ -56,37 +54,9 @@ erlang_bytecode( "src/jws/jose_jws.erl", "src/jws/jose_jws_alg.erl", ], - outs = [ - "ebin/jose_block_encryptor.beam", - "ebin/jose_chacha20_poly1305.beam", - "ebin/jose_curve25519.beam", - "ebin/jose_curve448.beam", - "ebin/jose_json.beam", - "ebin/jose_jwe.beam", - "ebin/jose_jwe_alg.beam", - "ebin/jose_jwe_enc.beam", - "ebin/jose_jwk.beam", - "ebin/jose_jwk_kty.beam", - "ebin/jose_jwk_oct.beam", - "ebin/jose_jwk_use_enc.beam", - "ebin/jose_jwk_use_sig.beam", - "ebin/jose_jws.beam", - "ebin/jose_jws_alg.beam", - "ebin/jose_sha3.beam", - "ebin/jose_xchacha20_poly1305.beam", - ], - hdrs = [ - "include/jose.hrl", - "include/jose_base.hrl", - "include/jose_compat.hrl", - "include/jose_jwe.hrl", - "include/jose_jwk.hrl", - "include/jose_jws.hrl", - "include/jose_jwt.hrl", - "include/jose_public_key.hrl", - ], + hdrs = [":public_and_private_hdrs"], app_name = "jose", - beam = [":parse_transforms"], + dest = "ebin", erlc_opts = "//:erlc_opts", ) @@ -97,24 +67,10 @@ erlang_bytecode( "src/base/jose_base64url.erl", "src/jose.erl", "src/jose_app.erl", - "src/jose_chacha20_poly1305_crypto.erl", - "src/jose_chacha20_poly1305_libsodium.erl", - "src/jose_chacha20_poly1305_unsupported.erl", "src/jose_crypto_compat.erl", - "src/jose_curve25519_libdecaf.erl", - "src/jose_curve25519_libsodium.erl", - "src/jose_curve25519_unsupported.erl", - "src/jose_curve448_libdecaf.erl", - "src/jose_curve448_unsupported.erl", "src/jose_public_key.erl", "src/jose_server.erl", - "src/jose_sha3_keccakf1600_driver.erl", - "src/jose_sha3_keccakf1600_nif.erl", - "src/jose_sha3_libdecaf.erl", - "src/jose_sha3_unsupported.erl", "src/jose_sup.erl", - "src/jose_xchacha20_poly1305_crypto.erl", - "src/jose_xchacha20_poly1305_unsupported.erl", "src/json/jose_json_jason.erl", "src/json/jose_json_jiffy.erl", "src/json/jose_json_jsone.erl", @@ -125,6 +81,18 @@ erlang_bytecode( "src/json/jose_json_poison_lexical_encoder.erl", "src/json/jose_json_thoas.erl", "src/json/jose_json_unsupported.erl", + "src/jwa/chacha20_poly1305/jose_chacha20_poly1305_crypto.erl", + "src/jwa/chacha20_poly1305/jose_chacha20_poly1305_libsodium.erl", + "src/jwa/chacha20_poly1305/jose_chacha20_poly1305_unsupported.erl", + "src/jwa/curve25519/jose_curve25519_crypto.erl", + "src/jwa/curve25519/jose_curve25519_fallback.erl", + "src/jwa/curve25519/jose_curve25519_libdecaf.erl", + "src/jwa/curve25519/jose_curve25519_libsodium.erl", + "src/jwa/curve25519/jose_curve25519_unsupported.erl", + "src/jwa/curve448/jose_curve448_crypto.erl", + "src/jwa/curve448/jose_curve448_fallback.erl", + "src/jwa/curve448/jose_curve448_libdecaf.erl", + "src/jwa/curve448/jose_curve448_unsupported.erl", "src/jwa/jose_jwa.erl", "src/jwa/jose_jwa_aes.erl", "src/jwa/jose_jwa_aes_kw.erl", @@ -149,11 +117,19 @@ erlang_bytecode( "src/jwa/jose_jwa_x448.erl", "src/jwa/jose_jwa_xchacha20.erl", "src/jwa/jose_jwa_xchacha20_poly1305.erl", + "src/jwa/sha3/jose_sha3_keccakf1600_driver.erl", + "src/jwa/sha3/jose_sha3_keccakf1600_nif.erl", + "src/jwa/sha3/jose_sha3_libdecaf.erl", + "src/jwa/sha3/jose_sha3_unsupported.erl", + "src/jwa/xchacha20_poly1305/jose_xchacha20_poly1305_crypto.erl", + "src/jwa/xchacha20_poly1305/jose_xchacha20_poly1305_libsodium.erl", + "src/jwa/xchacha20_poly1305/jose_xchacha20_poly1305_unsupported.erl", "src/jwe/jose_jwe_alg_aes_kw.erl", "src/jwe/jose_jwe_alg_c20p_kw.erl", "src/jwe/jose_jwe_alg_dir.erl", "src/jwe/jose_jwe_alg_ecdh_1pu.erl", "src/jwe/jose_jwe_alg_ecdh_es.erl", + "src/jwe/jose_jwe_alg_ecdh_ss.erl", "src/jwe/jose_jwe_alg_pbes2.erl", "src/jwe/jose_jwe_alg_rsa.erl", "src/jwe/jose_jwe_alg_xc20p_kw.erl", @@ -183,112 +159,10 @@ erlang_bytecode( "src/jws/jose_jws_alg_rsa_pss.erl", "src/jwt/jose_jwt.erl", ], - outs = [ - "ebin/jose.beam", - "ebin/jose_app.beam", - "ebin/jose_base64.beam", - "ebin/jose_base64url.beam", - "ebin/jose_chacha20_poly1305_crypto.beam", - "ebin/jose_chacha20_poly1305_libsodium.beam", - "ebin/jose_chacha20_poly1305_unsupported.beam", - "ebin/jose_crypto_compat.beam", - "ebin/jose_curve25519_libdecaf.beam", - "ebin/jose_curve25519_libsodium.beam", - "ebin/jose_curve25519_unsupported.beam", - "ebin/jose_curve448_libdecaf.beam", - "ebin/jose_curve448_unsupported.beam", - "ebin/jose_json_jason.beam", - "ebin/jose_json_jiffy.beam", - "ebin/jose_json_jsone.beam", - "ebin/jose_json_jsx.beam", - "ebin/jose_json_ojson.beam", - "ebin/jose_json_poison.beam", - "ebin/jose_json_poison_compat_encoder.beam", - "ebin/jose_json_poison_lexical_encoder.beam", - "ebin/jose_json_thoas.beam", - "ebin/jose_json_unsupported.beam", - "ebin/jose_jwa.beam", - "ebin/jose_jwa_aes.beam", - "ebin/jose_jwa_aes_kw.beam", - "ebin/jose_jwa_base64url.beam", - "ebin/jose_jwa_bench.beam", - "ebin/jose_jwa_chacha20.beam", - "ebin/jose_jwa_chacha20_poly1305.beam", - "ebin/jose_jwa_concat_kdf.beam", - "ebin/jose_jwa_curve25519.beam", - "ebin/jose_jwa_curve448.beam", - "ebin/jose_jwa_ed25519.beam", - "ebin/jose_jwa_ed448.beam", - "ebin/jose_jwa_hchacha20.beam", - "ebin/jose_jwa_math.beam", - "ebin/jose_jwa_pkcs1.beam", - "ebin/jose_jwa_pkcs5.beam", - "ebin/jose_jwa_pkcs7.beam", - "ebin/jose_jwa_poly1305.beam", - "ebin/jose_jwa_sha3.beam", - "ebin/jose_jwa_unsupported.beam", - "ebin/jose_jwa_x25519.beam", - "ebin/jose_jwa_x448.beam", - "ebin/jose_jwa_xchacha20.beam", - "ebin/jose_jwa_xchacha20_poly1305.beam", - "ebin/jose_jwe_alg_aes_kw.beam", - "ebin/jose_jwe_alg_c20p_kw.beam", - "ebin/jose_jwe_alg_dir.beam", - "ebin/jose_jwe_alg_ecdh_1pu.beam", - "ebin/jose_jwe_alg_ecdh_es.beam", - "ebin/jose_jwe_alg_pbes2.beam", - "ebin/jose_jwe_alg_rsa.beam", - "ebin/jose_jwe_alg_xc20p_kw.beam", - "ebin/jose_jwe_enc_aes.beam", - "ebin/jose_jwe_enc_c20p.beam", - "ebin/jose_jwe_enc_xc20p.beam", - "ebin/jose_jwe_zip.beam", - "ebin/jose_jwk_der.beam", - "ebin/jose_jwk_kty_ec.beam", - "ebin/jose_jwk_kty_oct.beam", - "ebin/jose_jwk_kty_okp_ed25519.beam", - "ebin/jose_jwk_kty_okp_ed25519ph.beam", - "ebin/jose_jwk_kty_okp_ed448.beam", - "ebin/jose_jwk_kty_okp_ed448ph.beam", - "ebin/jose_jwk_kty_okp_x25519.beam", - "ebin/jose_jwk_kty_okp_x448.beam", - "ebin/jose_jwk_kty_rsa.beam", - "ebin/jose_jwk_openssh_key.beam", - "ebin/jose_jwk_pem.beam", - "ebin/jose_jwk_set.beam", - "ebin/jose_jws_alg_ecdsa.beam", - "ebin/jose_jws_alg_eddsa.beam", - "ebin/jose_jws_alg_hmac.beam", - "ebin/jose_jws_alg_none.beam", - "ebin/jose_jws_alg_poly1305.beam", - "ebin/jose_jws_alg_rsa_pkcs1_v1_5.beam", - "ebin/jose_jws_alg_rsa_pss.beam", - "ebin/jose_jwt.beam", - "ebin/jose_public_key.beam", - "ebin/jose_server.beam", - "ebin/jose_sha3_keccakf1600_driver.beam", - "ebin/jose_sha3_keccakf1600_nif.beam", - "ebin/jose_sha3_libdecaf.beam", - "ebin/jose_sha3_unsupported.beam", - "ebin/jose_sup.beam", - "ebin/jose_xchacha20_poly1305_crypto.beam", - "ebin/jose_xchacha20_poly1305_unsupported.beam", - ], - hdrs = [ - "include/jose.hrl", - "include/jose_base.hrl", - "include/jose_compat.hrl", - "include/jose_jwe.hrl", - "include/jose_jwk.hrl", - "include/jose_jws.hrl", - "include/jose_jwt.hrl", - "include/jose_public_key.hrl", - ], + hdrs = [":public_and_private_hdrs"], app_name = "jose", - beam = [ - ":parse_transforms", - ":behaviours", - ], + beam = [":behaviours"], + dest = "ebin", erlc_opts = "//:erlc_opts", ) @@ -297,43 +171,22 @@ filegroup( srcs = [ ":behaviours", ":other_beam", - ":parse_transforms", ], ) filegroup( name = "srcs", srcs = [ - "src/base/jose_base.erl", "src/base/jose_base64.erl", "src/base/jose_base64url.erl", "src/jose.app.src", "src/jose.erl", "src/jose_app.erl", "src/jose_block_encryptor.erl", - "src/jose_chacha20_poly1305.erl", - "src/jose_chacha20_poly1305_crypto.erl", - "src/jose_chacha20_poly1305_libsodium.erl", - "src/jose_chacha20_poly1305_unsupported.erl", "src/jose_crypto_compat.erl", - "src/jose_curve25519.erl", - "src/jose_curve25519_libdecaf.erl", - "src/jose_curve25519_libsodium.erl", - "src/jose_curve25519_unsupported.erl", - "src/jose_curve448.erl", - "src/jose_curve448_libdecaf.erl", - "src/jose_curve448_unsupported.erl", "src/jose_public_key.erl", "src/jose_server.erl", - "src/jose_sha3.erl", - "src/jose_sha3_keccakf1600_driver.erl", - "src/jose_sha3_keccakf1600_nif.erl", - "src/jose_sha3_libdecaf.erl", - "src/jose_sha3_unsupported.erl", "src/jose_sup.erl", - "src/jose_xchacha20_poly1305.erl", - "src/jose_xchacha20_poly1305_crypto.erl", - "src/jose_xchacha20_poly1305_unsupported.erl", "src/json/jose_json.erl", "src/json/jose_json_jason.erl", "src/json/jose_json_jiffy.erl", @@ -345,6 +198,21 @@ filegroup( "src/json/jose_json_poison_lexical_encoder.erl", "src/json/jose_json_thoas.erl", "src/json/jose_json_unsupported.erl", + "src/jwa/chacha20_poly1305/jose_chacha20_poly1305.erl", + "src/jwa/chacha20_poly1305/jose_chacha20_poly1305_crypto.erl", + "src/jwa/chacha20_poly1305/jose_chacha20_poly1305_libsodium.erl", + "src/jwa/chacha20_poly1305/jose_chacha20_poly1305_unsupported.erl", + "src/jwa/curve25519/jose_curve25519.erl", + "src/jwa/curve25519/jose_curve25519_crypto.erl", + "src/jwa/curve25519/jose_curve25519_fallback.erl", + "src/jwa/curve25519/jose_curve25519_libdecaf.erl", + "src/jwa/curve25519/jose_curve25519_libsodium.erl", + "src/jwa/curve25519/jose_curve25519_unsupported.erl", + "src/jwa/curve448/jose_curve448.erl", + "src/jwa/curve448/jose_curve448_crypto.erl", + "src/jwa/curve448/jose_curve448_fallback.erl", + "src/jwa/curve448/jose_curve448_libdecaf.erl", + "src/jwa/curve448/jose_curve448_unsupported.erl", "src/jwa/jose_jwa.erl", "src/jwa/jose_jwa_aes.erl", "src/jwa/jose_jwa_aes_kw.erl", @@ -369,6 +237,15 @@ filegroup( "src/jwa/jose_jwa_x448.erl", "src/jwa/jose_jwa_xchacha20.erl", "src/jwa/jose_jwa_xchacha20_poly1305.erl", + "src/jwa/sha3/jose_sha3.erl", + "src/jwa/sha3/jose_sha3_keccakf1600_driver.erl", + "src/jwa/sha3/jose_sha3_keccakf1600_nif.erl", + "src/jwa/sha3/jose_sha3_libdecaf.erl", + "src/jwa/sha3/jose_sha3_unsupported.erl", + "src/jwa/xchacha20_poly1305/jose_xchacha20_poly1305.erl", + "src/jwa/xchacha20_poly1305/jose_xchacha20_poly1305_crypto.erl", + "src/jwa/xchacha20_poly1305/jose_xchacha20_poly1305_libsodium.erl", + "src/jwa/xchacha20_poly1305/jose_xchacha20_poly1305_unsupported.erl", "src/jwe/jose_jwe.erl", "src/jwe/jose_jwe_alg.erl", "src/jwe/jose_jwe_alg_aes_kw.erl", @@ -376,6 +253,7 @@ filegroup( "src/jwe/jose_jwe_alg_dir.erl", "src/jwe/jose_jwe_alg_ecdh_1pu.erl", "src/jwe/jose_jwe_alg_ecdh_es.erl", + "src/jwe/jose_jwe_alg_ecdh_ss.erl", "src/jwe/jose_jwe_alg_pbes2.erl", "src/jwe/jose_jwe_alg_rsa.erl", "src/jwe/jose_jwe_alg_xc20p_kw.erl", @@ -415,10 +293,7 @@ filegroup( ], ) -filegroup( - name = "private_hdrs", - srcs = [], -) +filegroup(name = "private_hdrs") filegroup( name = "public_hdrs", @@ -436,7 +311,9 @@ filegroup( filegroup( name = "priv", - srcs = ["priv/Dockerfile"], + srcs = [ + "priv/.keep", + ], ) filegroup( @@ -471,6 +348,8 @@ erlang_app( "crypto", "public_key", ], + license_files = [":license_files"], + priv = [":priv"], deps = ["@thoas//:erlang_app"], ) @@ -479,3 +358,10 @@ alias( actual = ":erlang_app", visibility = ["//visibility:public"], ) + +filegroup( + name = "license_files", + srcs = [ + "LICENSE.md", + ], +) diff --git a/bazel/BUILD.khepri b/bazel/BUILD.khepri new file mode 100644 index 000000000000..1e4c6a294d8b --- /dev/null +++ b/bazel/BUILD.khepri @@ -0,0 +1,182 @@ +load("@rules_erlang//:erlang_bytecode2.bzl", "erlang_bytecode", "erlc_opts") +load("@rules_erlang//:erlang_app.bzl", "erlang_app") + +erlc_opts( + name = "erlc_opts", + values = select({ + "@rules_erlang//:debug_build": [ + "+debug_info", + "+warn_export_vars", + "+warnings_as_errors", + ], + "//conditions:default": [ + "+debug_info", + "+deterministic", + "+warn_export_vars", + "+warnings_as_errors", + ], + }), + visibility = [":__subpackages__"], +) + +erlang_bytecode( + name = "behaviours", + srcs = [ + "src/khepri_import_export.erl", + ], + hdrs = [":public_and_private_hdrs"], + app_name = "khepri", + dest = "ebin", + erlc_opts = "//:erlc_opts", +) + +filegroup( + name = "beam_files", + srcs = [ + ":behaviours", + ":other_beam", + ], +) + +filegroup( + name = "srcs", + srcs = [ + "src/khepri.app.src", + "src/khepri.erl", + "src/khepri_adv.erl", + "src/khepri_app.erl", + "src/khepri_cluster.erl", + "src/khepri_condition.erl", + "src/khepri_event_handler.erl", + "src/khepri_evf.erl", + "src/khepri_export_erlang.erl", + "src/khepri_import_export.erl", + "src/khepri_machine.erl", + "src/khepri_machine_v0.erl", + "src/khepri_path.erl", + "src/khepri_pattern_tree.erl", + "src/khepri_payload.erl", + "src/khepri_projection.erl", + "src/khepri_sproc.erl", + "src/khepri_sup.erl", + "src/khepri_tree.erl", + "src/khepri_tx.erl", + "src/khepri_tx_adv.erl", + "src/khepri_utils.erl", + ], +) + +filegroup( + name = "private_hdrs", + srcs = [ + "src/khepri_bang.hrl", + "src/khepri_cluster.hrl", + "src/khepri_error.hrl", + "src/khepri_evf.hrl", + "src/khepri_machine.hrl", + "src/khepri_payload.hrl", + "src/khepri_projection.hrl", + "src/khepri_ret.hrl", + "src/khepri_tree.hrl", + "src/khepri_tx.hrl", + ], +) + +filegroup( + name = "public_hdrs", + srcs = [ + "include/khepri.hrl", + ], +) + +filegroup(name = "priv") + +filegroup( + name = "licenses", + srcs = [ + "LICENSE-Apache-2.0", + "LICENSE-MPL-2.0", + ], +) + +filegroup( + name = "public_and_private_hdrs", + srcs = [ + ":private_hdrs", + ":public_hdrs", + ], +) + +filegroup( + name = "all_srcs", + srcs = [ + ":public_and_private_hdrs", + ":srcs", + ], +) + +erlang_app( + name = "erlang_app", + srcs = [":all_srcs"], + hdrs = [":public_hdrs"], + app_name = "khepri", + beam_files = [":beam_files"], + extra_apps = ["compiler"], + license_files = [":license_files"], + priv = [":priv"], + deps = [ + "@horus//:erlang_app", + "@ra//:erlang_app", + "@seshat//:erlang_app", + ], +) + +alias( + name = "khepri", + actual = ":erlang_app", + visibility = ["//visibility:public"], +) + +erlang_bytecode( + name = "other_beam", + srcs = [ + "src/khepri.erl", + "src/khepri_adv.erl", + "src/khepri_app.erl", + "src/khepri_cluster.erl", + "src/khepri_condition.erl", + "src/khepri_event_handler.erl", + "src/khepri_evf.erl", + "src/khepri_export_erlang.erl", + "src/khepri_machine.erl", + "src/khepri_machine_v0.erl", + "src/khepri_path.erl", + "src/khepri_pattern_tree.erl", + "src/khepri_payload.erl", + "src/khepri_projection.erl", + "src/khepri_sproc.erl", + "src/khepri_sup.erl", + "src/khepri_tree.erl", + "src/khepri_tx.erl", + "src/khepri_tx_adv.erl", + "src/khepri_utils.erl", + ], + hdrs = [":public_and_private_hdrs"], + app_name = "khepri", + beam = [":behaviours"], + dest = "ebin", + erlc_opts = "//:erlc_opts", + deps = [ + "@horus//:erlang_app", + "@ra//:erlang_app", + "@seshat//:erlang_app", + ], +) + +filegroup( + name = "license_files", + srcs = [ + "LICENSE-Apache-2.0", + "LICENSE-MPL-2.0", + ], +) diff --git a/bazel/BUILD.khepri_mnesia_migration b/bazel/BUILD.khepri_mnesia_migration new file mode 100644 index 000000000000..b01afc3951c6 --- /dev/null +++ b/bazel/BUILD.khepri_mnesia_migration @@ -0,0 +1,146 @@ +load("@rules_erlang//:erlang_bytecode2.bzl", "erlang_bytecode", "erlc_opts") +load("@rules_erlang//:erlang_app.bzl", "erlang_app") + +erlc_opts( + name = "erlc_opts", + values = select({ + "@rules_erlang//:debug_build": [ + "+debug_info", + "+warn_export_vars", + "+warnings_as_errors", + ], + "//conditions:default": [ + "+debug_info", + "+deterministic", + "+warn_export_vars", + "+warnings_as_errors", + ], + }), + visibility = [":__subpackages__"], +) + +erlang_bytecode( + name = "other_beam", + srcs = [ + "src/khepri_mnesia_migration_app.erl", + "src/khepri_mnesia_migration_sup.erl", + "src/kmm_utils.erl", + "src/m2k_cluster_sync.erl", + "src/m2k_cluster_sync_sup.erl", + "src/m2k_export.erl", + "src/m2k_subscriber.erl", + "src/m2k_table_copy.erl", + "src/m2k_table_copy_sup.erl", + "src/m2k_table_copy_sup_sup.erl", + "src/mnesia_to_khepri.erl", + "src/mnesia_to_khepri_example_converter.erl", + ], + hdrs = [":public_and_private_hdrs"], + app_name = "khepri_mnesia_migration", + beam = [":behaviours"], + dest = "ebin", + erlc_opts = "//:erlc_opts", + deps = ["@khepri//:erlang_app"], +) + +filegroup( + name = "beam_files", + srcs = [ + ":behaviours", + ":other_beam", + ], +) + +filegroup( + name = "srcs", + srcs = [ + "src/khepri_mnesia_migration.app.src", + "src/khepri_mnesia_migration_app.erl", + "src/khepri_mnesia_migration_sup.erl", + "src/kmm_utils.erl", + "src/m2k_cluster_sync.erl", + "src/m2k_cluster_sync_sup.erl", + "src/m2k_export.erl", + "src/m2k_subscriber.erl", + "src/m2k_table_copy.erl", + "src/m2k_table_copy_sup.erl", + "src/m2k_table_copy_sup_sup.erl", + "src/mnesia_to_khepri.erl", + "src/mnesia_to_khepri_example_converter.erl", + ], +) + +filegroup( + name = "private_hdrs", + srcs = [ + "src/kmm_error.hrl", + # "src/kmm_logging.hrl", # keep + ], +) + +filegroup( + name = "public_hdrs", + srcs = ["src/kmm_logging.hrl"] + glob(["include/**/*.hrl"]), # keep +) + +filegroup( + name = "priv", + srcs = glob(["priv/**/*"]), +) + +filegroup( + name = "licenses", + srcs = [ + "LICENSE-Apache-2.0", + "LICENSE-MPL-2.0", + ], +) + +filegroup( + name = "public_and_private_hdrs", + srcs = [ + ":private_hdrs", + ":public_hdrs", + ], +) + +filegroup( + name = "all_srcs", + srcs = [ + ":public_and_private_hdrs", + ":srcs", + ], +) + +erlang_app( + name = "erlang_app", + srcs = [":all_srcs"], + hdrs = [":public_hdrs"], + app_name = "khepri_mnesia_migration", + beam_files = [":beam_files"], + license_files = [":license_files"], + priv = [":priv"], + deps = ["@khepri//:erlang_app"], +) + +alias( + name = "khepri_mnesia_migration", + actual = ":erlang_app", + visibility = ["//visibility:public"], +) + +erlang_bytecode( + name = "behaviours", + srcs = [ + "src/mnesia_to_khepri_converter.erl", + ], + hdrs = [":public_and_private_hdrs"], + app_name = "khepri_mnesia_migration", + dest = "ebin", + erlc_opts = "//:erlc_opts", +) + +filegroup( + name = "license_files", + srcs = glob(["LICENSE*"]), +) diff --git a/bazel/BUILD.ra b/bazel/BUILD.ra index 517a03476319..374f6065706d 100644 --- a/bazel/BUILD.ra +++ b/bazel/BUILD.ra @@ -38,12 +38,14 @@ erlang_bytecode( srcs = [ "src/ra.erl", "src/ra_app.erl", + "src/ra_aux.erl", "src/ra_bench.erl", "src/ra_counters.erl", "src/ra_dbg.erl", "src/ra_directory.erl", "src/ra_env.erl", "src/ra_ets_queue.erl", + "src/ra_file.erl", "src/ra_file_handle.erl", "src/ra_flru.erl", "src/ra_leaderboard.erl", @@ -70,6 +72,7 @@ erlang_bytecode( "src/ra_server_sup_sup.erl", "src/ra_sup.erl", "src/ra_system.erl", + "src/ra_system_recover.erl", "src/ra_system_sup.erl", "src/ra_systems_sup.erl", ], @@ -97,12 +100,14 @@ filegroup( "src/ra.app.src", "src/ra.erl", "src/ra_app.erl", + "src/ra_aux.erl", "src/ra_bench.erl", "src/ra_counters.erl", "src/ra_dbg.erl", "src/ra_directory.erl", "src/ra_env.erl", "src/ra_ets_queue.erl", + "src/ra_file.erl", "src/ra_file_handle.erl", "src/ra_flru.erl", "src/ra_leaderboard.erl", @@ -131,6 +136,7 @@ filegroup( "src/ra_snapshot.erl", "src/ra_sup.erl", "src/ra_system.erl", + "src/ra_system_recover.erl", "src/ra_system_sup.erl", "src/ra_systems_sup.erl", ], diff --git a/bazel/BUILD.ranch b/bazel/BUILD.ranch index 89555ce604aa..09bf62408b5f 100644 --- a/bazel/BUILD.ranch +++ b/bazel/BUILD.ranch @@ -88,12 +88,7 @@ filegroup(name = "private_hdrs") filegroup(name = "public_hdrs") -filegroup( - name = "priv", - srcs = [ - "ebin/ranch.appup", # keep - ], -) +filegroup(name = "priv") filegroup( name = "licenses", diff --git a/bazel/BUILD.x509 b/bazel/BUILD.x509 index ad2e97bb0bd2..db8b68607714 100644 --- a/bazel/BUILD.x509 +++ b/bazel/BUILD.x509 @@ -1,3 +1,5 @@ +load("@rules_erlang//:erlang_bytecode2.bzl", "erlc_opts") + filegroup( name = "sources", srcs = [ @@ -8,3 +10,17 @@ filegroup( ]), visibility = ["//visibility:public"], ) + +erlc_opts( + name = "erlc_opts", + values = select({ + "@rules_erlang//:debug_build": [ + "+debug_info", + ], + "//conditions:default": [ + "+debug_info", + "+deterministic", + ], + }), + visibility = [":__subpackages__"], +) diff --git a/bazel/bzlmod/extensions.bzl b/bazel/bzlmod/extensions.bzl index 121466979fc3..f721bf37d449 100644 --- a/bazel/bzlmod/extensions.bzl +++ b/bazel/bzlmod/extensions.bzl @@ -1,158 +1,18 @@ -load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive") load("@bazel_tools//tools/build_defs/repo:git.bzl", "git_repository") +load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive") load( ":secondary_umbrella.bzl", fetch_secondary_umbrella = "secondary_umbrella", ) -load( - "//bazel/repositories:elixir_config.bzl", - "INSTALLATION_TYPE_EXTERNAL", - "INSTALLATION_TYPE_INTERNAL", - _elixir_config_rule = "elixir_config", -) -load( - "//bazel/elixir:elixir.bzl", - "DEFAULT_ELIXIR_SHA256", - "DEFAULT_ELIXIR_VERSION", -) - -def _elixir_config(ctx): - types = {} - versions = {} - urls = {} - strip_prefixs = {} - sha256s = {} - elixir_homes = {} - - for mod in ctx.modules: - for elixir in mod.tags.external_elixir_from_path: - types[elixir.name] = INSTALLATION_TYPE_EXTERNAL - versions[elixir.name] = elixir.version - elixir_homes[elixir.name] = elixir.elixir_home - - for elixir in mod.tags.internal_elixir_from_http_archive: - types[elixir.name] = INSTALLATION_TYPE_INTERNAL - versions[elixir.name] = elixir.version - urls[elixir.name] = elixir.url - strip_prefixs[elixir.name] = elixir.strip_prefix - sha256s[elixir.name] = elixir.sha256 - - for elixir in mod.tags.internal_elixir_from_github_release: - url = "https://github.com/elixir-lang/elixir/archive/refs/tags/v{}.tar.gz".format( - elixir.version, - ) - strip_prefix = "elixir-{}".format(elixir.version) - - types[elixir.name] = INSTALLATION_TYPE_INTERNAL - versions[elixir.name] = elixir.version - urls[elixir.name] = url - strip_prefixs[elixir.name] = strip_prefix - sha256s[elixir.name] = elixir.sha256 - - _elixir_config_rule( - name = "elixir_config", - rabbitmq_server_workspace = "@rabbitmq-server", - types = types, - versions = versions, - urls = urls, - strip_prefixs = strip_prefixs, - sha256s = sha256s, - elixir_homes = elixir_homes, - ) - -external_elixir_from_path = tag_class(attrs = { - "name": attr.string(), - "version": attr.string(), - "elixir_home": attr.string(), -}) - -internal_elixir_from_http_archive = tag_class(attrs = { - "name": attr.string(), - "version": attr.string(), - "url": attr.string(), - "strip_prefix": attr.string(), - "sha256": attr.string(), -}) - -internal_elixir_from_github_release = tag_class(attrs = { - "name": attr.string( - default = "internal", - ), - "version": attr.string( - default = DEFAULT_ELIXIR_VERSION, - ), - "sha256": attr.string( - default = DEFAULT_ELIXIR_SHA256, - ), -}) - -elixir_config = module_extension( - implementation = _elixir_config, - tag_classes = { - "external_elixir_from_path": external_elixir_from_path, - "internal_elixir_from_http_archive": internal_elixir_from_http_archive, - "internal_elixir_from_github_release": internal_elixir_from_github_release, - }, -) - -def _rbe(ctx): - root_rbe_repo_props = [] - rbe_repo_props = [] - for mod in ctx.modules: - for repo in mod.tags.git_repository: - props = {"remote": repo.remote} - if repo.commit != "": - props["commit"] = repo.commit - if repo.tag != "": - props["tag"] = repo.tag - if repo.branch != "": - props["branch"] = repo.branch - if mod.is_root: - if not props in root_rbe_repo_props: - root_rbe_repo_props.append(props) - elif not props in rbe_repo_props: - rbe_repo_props.append(props) - - if len(root_rbe_repo_props) > 1: - fail("Multiple definitions for @rbe exist in root module: {}".format(rbe_repo_props)) - - if len(root_rbe_repo_props) > 0: - git_repository( - name = "rbe", - **root_rbe_repo_props[0] - ) - else: - if len(rbe_repo_props) > 1: - fail("Multiple definitions for @rbe exist: {}".format(rbe_repo_props)) - - if len(rbe_repo_props) > 0: - git_repository( - name = "rbe", - **rbe_repo_props[0] - ) - -git_repository_tag = tag_class(attrs = { - "remote": attr.string(), - "branch": attr.string(), - "tag": attr.string(), - "commit": attr.string(), -}) - -rbe = module_extension( - implementation = _rbe, - tag_classes = { - "git_repository": git_repository_tag, - }, -) -def _secondary_umbrella(ctx): +def _secondary_umbrella(_ctx): fetch_secondary_umbrella() secondary_umbrella = module_extension( implementation = _secondary_umbrella, ) -def _hex(ctx): +def _hex(_ctx): http_archive( name = "hex", sha256 = "0e3e3290d0fcbdc6bb0526b73ca174d68dcff4d53ee86015c49ad0493e39ee65", diff --git a/bazel/bzlmod/secondary_umbrella.bzl b/bazel/bzlmod/secondary_umbrella.bzl index 2ca4b36626c8..adfb76cff4e2 100644 --- a/bazel/bzlmod/secondary_umbrella.bzl +++ b/bazel/bzlmod/secondary_umbrella.bzl @@ -25,12 +25,12 @@ EOF def secondary_umbrella(): http_archive( - name = "rabbitmq-server-generic-unix-3.11", + name = "rabbitmq-server-generic-unix-3.13", build_file = "@//:BUILD.package_generic_unix", patch_cmds = [ADD_PLUGINS_DIR_BUILD_FILE], - strip_prefix = "rabbitmq_server-3.11.18", + strip_prefix = "rabbitmq_server-3.13.1", # This file is produced just in time by the test-mixed-versions.yaml GitHub Actions workflow. urls = [ - "https://rabbitmq-github-actions.s3.eu-west-1.amazonaws.com/secondary-umbrellas/rbe-25_3/package-generic-unix-for-mixed-version-testing-v3.11.18.tar.xz", + "https://rabbitmq-github-actions.s3.eu-west-1.amazonaws.com/secondary-umbrellas/26.1/package-generic-unix-for-mixed-version-testing-v3.13.1.tar.xz", ], ) diff --git a/bazel/elixir/BUILD.bazel b/bazel/elixir/BUILD.bazel index f21e9181d40f..e6ca258ecc6e 100644 --- a/bazel/elixir/BUILD.bazel +++ b/bazel/elixir/BUILD.bazel @@ -1,26 +1 @@ -load( - ":elixir_as_app.bzl", - "elixir_as_app", -) - -toolchain_type( - name = "toolchain_type", - visibility = ["//visibility:public"], -) - -elixir_as_app( - name = "erlang_app", - visibility = ["//visibility:public"], -) - -elixir_as_app( - name = "logger", - app = "logger", - visibility = ["//visibility:public"], -) - -elixir_as_app( - name = "iex", - app = "iex", - visibility = ["//visibility:public"], -) +exports_files(["elixir_escript_main.exs"]) diff --git a/bazel/elixir/elixir.bzl b/bazel/elixir/elixir.bzl deleted file mode 100644 index d36240a9a862..000000000000 --- a/bazel/elixir/elixir.bzl +++ /dev/null @@ -1,147 +0,0 @@ -load( - ":elixir_build.bzl", - "elixir_build", - "elixir_external", -) -load( - ":elixir_toolchain.bzl", - "elixir_toolchain", -) -load( - "//bazel/repositories:elixir_config.bzl", - "INSTALLATION_TYPE_INTERNAL", - _elixir_config = "elixir_config", -) - -def elixir_toolchain_external(): - """DEPRECATED""" - - elixir_external( - name = "external_elixir_installation_ref", - target_compatible_with = [ - Label("//bazel/platforms:elixir_external"), - ], - ) - - elixir_toolchain( - name = "elixir_external", - elixir = ":external_elixir_installation_ref", - ) - - native.toolchain( - name = "elixir_toolchain_external", - exec_compatible_with = [ - Label("@erlang_config//:erlang_external"), - ], - target_compatible_with = [ - Label("//bazel/platforms:elixir_external"), - ], - toolchain = ":elixir_external", - toolchain_type = Label("//bazel/elixir:toolchain_type"), - visibility = ["//visibility:public"], - ) - -def elixir_toolchain_from_http_archive( - name_suffix = "", - url = None, - strip_prefix = None, - sha256 = None, - elixir_constraints = None): - """DEPRECATED""" - - elixir_build( - name = "elixir_build{}".format(name_suffix), - url = url, - strip_prefix = strip_prefix, - sha256 = sha256, - target_compatible_with = elixir_constraints, - ) - - elixir_toolchain( - name = "elixir{}".format(name_suffix), - elixir = ":elixir_build{}".format(name_suffix), - ) - - native.toolchain( - name = "elixir_toolchain{}".format(name_suffix), - exec_compatible_with = [ - Label("@erlang_config//:erlang_internal"), - ], - target_compatible_with = elixir_constraints, - toolchain = ":elixir{}".format(name_suffix), - toolchain_type = Label("//bazel/elixir:toolchain_type"), - visibility = ["//visibility:public"], - ) - -def elixir_toolchain_from_github_release( - name_suffix = "_default", - version = None, - sha256 = None): - """DEPRECATED""" - - [major, minor, patch] = version.split(".") - elixir_constraints = [ - Label("//bazel/platforms:elixir_{}_{}".format(major, minor)), - ] - url = "https://github.com/elixir-lang/elixir/archive/refs/tags/v{}.tar.gz".format(version) - elixir_toolchain_from_http_archive( - name_suffix = name_suffix, - url = url, - strip_prefix = "elixir-{}".format(version), - sha256 = sha256, - elixir_constraints = elixir_constraints, - ) - -DEFAULT_ELIXIR_VERSION = "1.13.4" -DEFAULT_ELIXIR_SHA256 = "95daf2dd3052e6ca7d4d849457eaaba09de52d65ca38d6933c65bc1cdf6b8579" - -# Generates the @elixir_config repository, which contains erlang -# toolchains and platform defintions -def elixir_config( - rabbitmq_server_workspace = "@rabbitmq-server", - internal_elixir_configs = []): - types = {c.name: INSTALLATION_TYPE_INTERNAL for c in internal_elixir_configs} - versions = {c.name: c.version for c in internal_elixir_configs} - urls = {c.name: c.url for c in internal_elixir_configs} - strip_prefixs = {c.name: c.strip_prefix for c in internal_elixir_configs if c.strip_prefix} - sha256s = {c.name: c.sha256 for c in internal_elixir_configs if c.sha256} - - _elixir_config( - name = "elixir_config", - rabbitmq_server_workspace = rabbitmq_server_workspace, - types = types, - versions = versions, - urls = urls, - strip_prefixs = strip_prefixs, - sha256s = sha256s, - ) - -def internal_elixir_from_http_archive( - name = None, - version = None, - url = None, - strip_prefix = None, - sha256 = None): - return struct( - name = name, - version = version, - url = url, - strip_prefix = strip_prefix, - sha256 = sha256, - ) - -def internal_elixir_from_github_release( - name = "internal", - version = DEFAULT_ELIXIR_VERSION, - sha256 = DEFAULT_ELIXIR_SHA256): - url = "https://github.com/elixir-lang/elixir/archive/refs/tags/v{}.tar.gz".format( - version, - ) - - return internal_elixir_from_http_archive( - name = name, - version = version, - url = url, - strip_prefix = "elixir-{}".format(version), - sha256 = sha256, - ) diff --git a/bazel/elixir/elixir_as_app.bzl b/bazel/elixir/elixir_as_app.bzl deleted file mode 100644 index eb615722c989..000000000000 --- a/bazel/elixir/elixir_as_app.bzl +++ /dev/null @@ -1,52 +0,0 @@ -load( - "@rules_erlang//:erlang_app_info.bzl", - "ErlangAppInfo", -) -load( - "@rules_erlang//:util.bzl", - "path_join", -) -load( - ":elixir_toolchain.bzl", - "elixir_dirs", -) - -def _impl(ctx): - ebin = ctx.actions.declare_directory(path_join(ctx.label.name, "ebin")) - - (elixir_home, elixir_runfiles) = elixir_dirs(ctx) - - ctx.actions.run_shell( - inputs = elixir_runfiles.files, - outputs = [ebin], - command = """set -euo pipefail - -cp -r "{elixir_home}"/lib/{app}/ebin/* {ebin} -""".format( - elixir_home = elixir_home, - app = ctx.attr.app, - ebin = ebin.path, - ), - ) - - return [ - DefaultInfo(files = depset([ebin])), - ErlangAppInfo( - app_name = ctx.attr.app, - include = [], - beam = [ebin], - priv = [], - license_files = [], - srcs = [], - deps = [], - ), - ] - -elixir_as_app = rule( - implementation = _impl, - attrs = { - "app": attr.string(default = "elixir"), - }, - toolchains = [":toolchain_type"], - provides = [ErlangAppInfo], -) diff --git a/bazel/elixir/elixir_build.bzl b/bazel/elixir/elixir_build.bzl deleted file mode 100644 index 3ccc2c152e7d..000000000000 --- a/bazel/elixir/elixir_build.bzl +++ /dev/null @@ -1,172 +0,0 @@ -load( - "@bazel_skylib//rules:common_settings.bzl", - "BuildSettingInfo", -) -load( - "@rules_erlang//tools:erlang_toolchain.bzl", - "erlang_dirs", - "maybe_install_erlang", -) - -ElixirInfo = provider( - doc = "A Home directory of a built Elixir", - fields = [ - "release_dir", - "elixir_home", - "version_file", - ], -) - -def _impl(ctx): - (_, _, filename) = ctx.attr.url.rpartition("/") - downloaded_archive = ctx.actions.declare_file(filename) - - release_dir = ctx.actions.declare_directory(ctx.label.name + "_release") - build_dir = ctx.actions.declare_directory(ctx.label.name + "_build") - - version_file = ctx.actions.declare_file(ctx.label.name + "_version") - - ctx.actions.run_shell( - inputs = [], - outputs = [downloaded_archive], - command = """set -euo pipefail - -curl -L "{archive_url}" -o {archive_path} - -if [ -n "{sha256}" ]; then - echo "{sha256} {archive_path}" | sha256sum --check --strict - -fi -""".format( - archive_url = ctx.attr.url, - archive_path = downloaded_archive.path, - sha256 = ctx.attr.sha256, - ), - mnemonic = "CURL", - progress_message = "Downloading {}".format(ctx.attr.url), - ) - - (erlang_home, _, runfiles) = erlang_dirs(ctx) - - inputs = depset( - direct = [downloaded_archive], - transitive = [runfiles.files], - ) - - strip_prefix = ctx.attr.strip_prefix - if strip_prefix != "": - strip_prefix += "\\/" - - ctx.actions.run_shell( - inputs = inputs, - outputs = [release_dir, build_dir, version_file], - command = """set -euo pipefail - -{maybe_install_erlang} - -export PATH="{erlang_home}"/bin:${{PATH}} -export HOME="$(mktemp -d)" - -ABS_BUILD_DIR=$PWD/{build_path} -ABS_RELEASE_DIR=$PWD/{release_path} -ABS_VERSION_FILE=$PWD/{version_file} - -tar --extract \\ - --transform 's/{strip_prefix}//' \\ - --file {archive_path} \\ - --directory $ABS_BUILD_DIR - -cd $ABS_BUILD_DIR - -make - -cp -r bin $ABS_RELEASE_DIR/ -cp -r lib $ABS_RELEASE_DIR/ - -$ABS_RELEASE_DIR/bin/iex --version > $ABS_VERSION_FILE -""".format( - maybe_install_erlang = maybe_install_erlang(ctx), - erlang_home = erlang_home, - archive_path = downloaded_archive.path, - strip_prefix = strip_prefix, - build_path = build_dir.path, - release_path = release_dir.path, - version_file = version_file.path, - ), - mnemonic = "ELIXIR", - progress_message = "Compiling elixir from source", - ) - - return [ - DefaultInfo( - files = depset([ - release_dir, - version_file, - ]), - ), - ctx.toolchains["@rules_erlang//tools:toolchain_type"].otpinfo, - ElixirInfo( - release_dir = release_dir, - elixir_home = None, - version_file = version_file, - ), - ] - -elixir_build = rule( - implementation = _impl, - attrs = { - "url": attr.string(mandatory = True), - "strip_prefix": attr.string(), - "sha256": attr.string(), - }, - toolchains = ["@rules_erlang//tools:toolchain_type"], -) - -def _elixir_external_impl(ctx): - elixir_home = ctx.attr.elixir_home - if elixir_home == "": - elixir_home = ctx.attr._elixir_home[BuildSettingInfo].value - - version_file = ctx.actions.declare_file(ctx.label.name + "_version") - - (erlang_home, _, runfiles) = erlang_dirs(ctx) - - ctx.actions.run_shell( - inputs = runfiles.files, - outputs = [version_file], - command = """set -euo pipefail - -{maybe_install_erlang} - -export PATH="{erlang_home}"/bin:${{PATH}} - -"{elixir_home}"/bin/iex --version > {version_file} -""".format( - maybe_install_erlang = maybe_install_erlang(ctx), - erlang_home = erlang_home, - elixir_home = elixir_home, - version_file = version_file.path, - ), - mnemonic = "ELIXIR", - progress_message = "Validating elixir at {}".format(elixir_home), - ) - - return [ - DefaultInfo( - files = depset([version_file]), - ), - ctx.toolchains["@rules_erlang//tools:toolchain_type"].otpinfo, - ElixirInfo( - release_dir = None, - elixir_home = elixir_home, - version_file = version_file, - ), - ] - -elixir_external = rule( - implementation = _elixir_external_impl, - attrs = { - "_elixir_home": attr.label(default = Label("//:elixir_home")), - "elixir_home": attr.string(), - }, - toolchains = ["@rules_erlang//tools:toolchain_type"], -) diff --git a/bazel/elixir/elixir_escript_main.bzl b/bazel/elixir/elixir_escript_main.bzl new file mode 100644 index 000000000000..e65780c50d12 --- /dev/null +++ b/bazel/elixir/elixir_escript_main.bzl @@ -0,0 +1,94 @@ +load( + "@rules_elixir//private:elixir_toolchain.bzl", + "elixir_dirs", + "erlang_dirs", + "maybe_install_erlang", +) +load( + "@rules_erlang//:erlang_app_info.bzl", + "ErlangAppInfo", +) + +def _impl(ctx): + (erlang_home, _, erlang_runfiles) = erlang_dirs(ctx) + (elixir_home, elixir_runfiles) = elixir_dirs(ctx) + + app_info = ctx.attr.app[ErlangAppInfo] + + env = "\n".join([ + "export {}={}".format(k, v) + for k, v in ctx.attr.env.items() + ]) + + config_path = "" + if ctx.file.mix_config != None: + config_path = ctx.file.mix_config.path + + command = """set -euo pipefail + +{maybe_install_erlang} + +if [[ "{elixir_home}" == /* ]]; then + ABS_ELIXIR_HOME="{elixir_home}" +else + ABS_ELIXIR_HOME=$PWD/{elixir_home} +fi + +export OUT="{out}" +export CONFIG_PATH="{config_path}" +export APP="{app}" +export MAIN_MODULE="Elixir.{main_module}" + +{env} + +export PATH="{erlang_home}/bin:$PATH" +set -x +"{elixir_home}"/bin/elixir {script} +""".format( + maybe_install_erlang = maybe_install_erlang(ctx), + erlang_home = erlang_home, + elixir_home = elixir_home, + env = env, + script = ctx.file._script.path, + out = ctx.outputs.out.path, + config_path = config_path, + app = app_info.app_name, + main_module = ctx.attr.main_module, + ) + + inputs = depset( + direct = ctx.files._script + ctx.files.mix_config, + transitive = [ + erlang_runfiles.files, + elixir_runfiles.files, + ], + ) + + ctx.actions.run_shell( + inputs = inputs, + outputs = [ctx.outputs.out], + command = command, + mnemonic = "ELIXIR", + ) + +elixir_escript_main = rule( + implementation = _impl, + attrs = { + "_script": attr.label( + allow_single_file = True, + default = Label(":elixir_escript_main.exs"), + ), + "app": attr.label( + providers = [ErlangAppInfo], + ), + "env": attr.string_dict(), + "main_module": attr.string(), + "mix_config": attr.label( + allow_single_file = [".exs"], + ), + "out": attr.output(), + }, + toolchains = [ + "@rules_elixir//:toolchain_type", + ], +) diff --git a/bazel/elixir/elixir_escript_main.exs b/bazel/elixir/elixir_escript_main.exs new file mode 100644 index 000000000000..0b8511e12a04 --- /dev/null +++ b/bazel/elixir/elixir_escript_main.exs @@ -0,0 +1,130 @@ +defmodule ElixirEscriptMain do + # https://github.com/elixir-lang/elixir/blob/99785cc16be096d02012ad889ca51b5045b599a4/lib/mix/lib/mix/tasks/escript.build.ex#L327 + def gen_main(project, name, module, app, language) do + config_path = project[:config_path] + + compile_config = + if File.regular?(config_path) do + config = Config.Reader.read!(config_path, env: Mix.env(), target: Mix.target()) + Macro.escape(config) + else + [] + end + + runtime_path = config_path |> Path.dirname() |> Path.join("runtime.exs") + + runtime_config = + if File.regular?(runtime_path) do + File.read!(runtime_path) + end + + module_body = + quote do + @spec main(OptionParser.argv()) :: any + def main(args) do + unquote(main_body_for(language, module, app, compile_config, runtime_config)) + end + + defp load_config(config) do + each_fun = fn {app, kw} -> + set_env_fun = fn {k, v} -> :application.set_env(app, k, v, persistent: true) end + :lists.foreach(set_env_fun, kw) + end + + :lists.foreach(each_fun, config) + :ok + end + + defp start_app(nil) do + :ok + end + + defp start_app(app) do + case :application.ensure_all_started(app) do + {:ok, _} -> + :ok + + {:error, {app, reason}} -> + formatted_error = + case :code.ensure_loaded(Application) do + {:module, Application} -> Application.format_error(reason) + {:error, _} -> :io_lib.format(~c"~p", [reason]) + end + + error_message = [ + "ERROR! Could not start application ", + :erlang.atom_to_binary(app, :utf8), + ": ", + formatted_error, + ?\n + ] + + io_error(error_message) + :erlang.halt(1) + end + end + + defp io_error(message) do + :io.put_chars(:standard_error, message) + end + end + + {:module, ^name, binary, _} = Module.create(name, module_body, Macro.Env.location(__ENV__)) + [{~c"#{name}.beam", binary}] + end + + defp main_body_for(:elixir, module, app, compile_config, runtime_config) do + config = + if runtime_config do + quote do + runtime_config = + Config.Reader.eval!( + "config/runtime.exs", + unquote(runtime_config), + env: unquote(Mix.env()), + target: unquote(Mix.target()), + imports: :disabled + ) + + Config.Reader.merge(unquote(compile_config), runtime_config) + end + else + compile_config + end + + quote do + case :application.ensure_all_started(:elixir) do + {:ok, _} -> + args = Enum.map(args, &List.to_string(&1)) + System.argv(args) + load_config(unquote(config)) + start_app(unquote(app)) + Kernel.CLI.run(fn _ -> unquote(module).main(args) end) + + error -> + io_error(["ERROR! Failed to start Elixir.\n", :io_lib.format(~c"error: ~p~n", [error])]) + :erlang.halt(1) + end + end + end +end + +output = System.get_env("OUT") +IO.puts("Will write to " <> output) + +project = [ + config_path: System.get_env("CONFIG_PATH", "config/config.exs"), +] +app = String.to_atom(System.get_env("APP")) +name = String.to_atom(Atom.to_string(app) <> "_escript") +module = String.to_atom(System.get_env("MAIN_MODULE")) + +:application.ensure_all_started(:mix) +Mix.State.start_link(:none) +[{_, bytecode}] = ElixirEscriptMain.gen_main(project, name, module, app, :elixir) + +{:ok, file} = File.open(output, [:write]) +IO.binwrite(file, bytecode) +File.close(file) + +IO.puts("done.") diff --git a/bazel/elixir/elixir_toolchain.bzl b/bazel/elixir/elixir_toolchain.bzl deleted file mode 100644 index e96e56c5f78b..000000000000 --- a/bazel/elixir/elixir_toolchain.bzl +++ /dev/null @@ -1,64 +0,0 @@ -load( - "@rules_erlang//private:erlang_build.bzl", - "OtpInfo", -) -load( - ":elixir_build.bzl", - "ElixirInfo", -) - -def _impl(ctx): - toolchain_info = platform_common.ToolchainInfo( - otpinfo = ctx.attr.elixir[OtpInfo], - elixirinfo = ctx.attr.elixir[ElixirInfo], - ) - return [toolchain_info] - -elixir_toolchain = rule( - implementation = _impl, - attrs = { - "elixir": attr.label( - mandatory = True, - providers = [OtpInfo, ElixirInfo], - ), - }, - provides = [platform_common.ToolchainInfo], -) - -def _build_info(ctx): - return ctx.toolchains[":toolchain_type"].otpinfo - -def erlang_dirs(ctx): - info = _build_info(ctx) - if info.release_dir_tar != None: - runfiles = ctx.runfiles([ - info.release_dir_tar, - info.version_file, - ]) - else: - runfiles = ctx.runfiles([ - info.version_file, - ]) - return (info.erlang_home, info.release_dir_tar, runfiles) - -def elixir_dirs(ctx, short_path = False): - info = ctx.toolchains[":toolchain_type"].elixirinfo - if info.elixir_home != None: - return (info.elixir_home, ctx.runfiles([info.version_file])) - else: - p = info.release_dir.short_path if short_path else info.release_dir.path - return (p, ctx.runfiles([info.release_dir, info.version_file])) - -def maybe_install_erlang(ctx, short_path = False): - info = _build_info(ctx) - release_dir_tar = info.release_dir_tar - if release_dir_tar == None: - return "" - else: - return """\ -tar --extract \\ - --directory / \\ - --file {release_tar}""".format( - release_tar = release_dir_tar.short_path if short_path else release_dir_tar.path, - erlang_home = info.erlang_home, - ) diff --git a/bazel/elixir/iex_eval.bzl b/bazel/elixir/iex_eval.bzl deleted file mode 100644 index 956fd751194c..000000000000 --- a/bazel/elixir/iex_eval.bzl +++ /dev/null @@ -1,70 +0,0 @@ -load( - ":elixir_toolchain.bzl", - "elixir_dirs", - "erlang_dirs", - "maybe_install_erlang", -) - -def _impl(ctx): - outs = [ - ctx.actions.declare_file(f) - for f in ctx.attr.outs - ] - - (erlang_home, _, erlang_runfiles) = erlang_dirs(ctx) - (elixir_home, elixir_runfiles) = elixir_dirs(ctx) - - script = """set -euo pipefail - -{maybe_install_erlang} - -if [[ "{elixir_home}" == /* ]]; then - ABS_ELIXIR_HOME="{elixir_home}" -else - ABS_ELIXIR_HOME=$PWD/{elixir_home} -fi - -export PATH="$ABS_ELIXIR_HOME"/bin:"{erlang_home}"/bin:${{PATH}} - -export SRCS="{srcs}" -export OUTS="{outs}" - -${{ABS_ELIXIR_HOME}}/bin/iex --eval "$1" -""".format( - maybe_install_erlang = maybe_install_erlang(ctx), - erlang_home = erlang_home, - elixir_home = elixir_home, - srcs = ctx.configuration.host_path_separator.join([src.path for src in ctx.files.srcs]), - outs = ctx.configuration.host_path_separator.join([out.path for out in outs]), - ) - - inputs = depset( - direct = ctx.files.srcs, - transitive = [ - erlang_runfiles.files, - elixir_runfiles.files, - ], - ) - - ctx.actions.run_shell( - inputs = inputs, - outputs = outs, - command = script, - arguments = [ctx.attr.expression], - ) - - return [ - DefaultInfo(files = depset(outs)), - ] - -iex_eval = rule( - implementation = _impl, - attrs = { - "srcs": attr.label_list(allow_files = True), - "outs": attr.string_list(), - "expression": attr.string( - mandatory = True, - ), - }, - toolchains = [":toolchain_type"], -) diff --git a/bazel/elixir/mix_archive_build.bzl b/bazel/elixir/mix_archive_build.bzl index adc3b15de608..621a43748fa8 100644 --- a/bazel/elixir/mix_archive_build.bzl +++ b/bazel/elixir/mix_archive_build.bzl @@ -1,14 +1,24 @@ load("@bazel_skylib//lib:shell.bzl", "shell") load( - "@rules_erlang//private:util.bzl", - "additional_file_dest_relative_path", -) -load( - "//bazel/elixir:elixir_toolchain.bzl", + "@rules_elixir//private:elixir_toolchain.bzl", "elixir_dirs", "erlang_dirs", "maybe_install_erlang", ) +load( + "@rules_erlang//:erlang_app_info.bzl", + "ErlangAppInfo", + "flat_deps", +) +load( + "@rules_erlang//:util.bzl", + "path_join", +) +load( + "@rules_erlang//private:util.bzl", + "additional_file_dest_relative_path", + "erl_libs_contents", +) def _impl(ctx): (erlang_home, _, erlang_runfiles) = erlang_dirs(ctx) @@ -17,6 +27,27 @@ def _impl(ctx): out = ctx.actions.declare_file(ctx.attr.out.name) mix_invocation_dir = ctx.actions.declare_directory("{}_mix".format(ctx.label.name)) + erl_libs_dir = ctx.label.name + "_deps" + + erl_libs_files = erl_libs_contents( + ctx, + target_info = None, + headers = True, + dir = erl_libs_dir, + deps = flat_deps(ctx.attr.deps), + ez_deps = ctx.files.ez_deps, + expand_ezs = True, + ) + + erl_libs_path = "" + if len(erl_libs_files) > 0: + erl_libs_path = path_join( + ctx.bin_dir.path, + ctx.label.workspace_root, + ctx.label.package, + erl_libs_dir, + ) + copy_srcs_commands = [] for src in ctx.attr.srcs: for src_file in src[DefaultInfo].files.to_list(): @@ -36,6 +67,10 @@ def _impl(ctx): {maybe_install_erlang} +if [ -n "{erl_libs_path}" ]; then + export ERL_LIBS=$PWD/{erl_libs_path} +fi + if [[ "{elixir_home}" == /* ]]; then ABS_ELIXIR_HOME="{elixir_home}" else @@ -61,12 +96,15 @@ export ERL_COMPILER_OPTIONS=deterministic for archive in {archives}; do "${{ABS_ELIXIR_HOME}}"/bin/mix archive.install --force $ORIGINAL_DIR/$archive done -if [[ -n "{ez_deps}" ]]; then +if [[ -n "{erl_libs_path}" ]]; then mkdir -p _build/${{MIX_ENV}}/lib - for ez_dep in {ez_deps}; do - unzip -q $ORIGINAL_DIR/$ez_dep -d _build/${{MIX_ENV}}/lib + for dep in "$ERL_LIBS"/*; do + ln -s $dep _build/${{MIX_ENV}}/lib done fi + +{setup} + "${{ABS_ELIXIR_HOME}}"/bin/mix archive.build \\ --no-deps-check \\ -o "${{ABS_OUT_PATH}}" @@ -76,12 +114,13 @@ fi find . -type l -delete """.format( maybe_install_erlang = maybe_install_erlang(ctx), + erl_libs_path = erl_libs_path, erlang_home = erlang_home, elixir_home = elixir_home, mix_invocation_dir = mix_invocation_dir.path, copy_srcs_commands = "\n".join(copy_srcs_commands), archives = " ".join([shell.quote(a.path) for a in ctx.files.archives]), - ez_deps = " ".join([shell.quote(a.path) for a in ctx.files.ez_deps]), + setup = ctx.attr.setup, out = out.path, ) @@ -91,7 +130,7 @@ find . -type l -delete erlang_runfiles.files, elixir_runfiles.files, depset(ctx.files.archives), - depset(ctx.files.ez_deps), + depset(erl_libs_files), ], ) @@ -121,12 +160,16 @@ mix_archive_build = rule( "archives": attr.label_list( allow_files = [".ez"], ), + "setup": attr.string(), "ez_deps": attr.label_list( allow_files = [".ez"], ), + "deps": attr.label_list( + providers = [ErlangAppInfo], + ), "out": attr.output(), }, toolchains = [ - ":toolchain_type", + "@rules_elixir//:toolchain_type", ], ) diff --git a/bazel/elixir/mix_archive_extract.bzl b/bazel/elixir/mix_archive_extract.bzl new file mode 100644 index 000000000000..8683da3c6e46 --- /dev/null +++ b/bazel/elixir/mix_archive_extract.bzl @@ -0,0 +1,67 @@ +load( + "@rules_erlang//:erlang_app_info.bzl", + "ErlangAppInfo", + "flat_deps", +) +load( + "@rules_erlang//:util.bzl", + "path_join", +) + +def _impl(ctx): + ebin = ctx.actions.declare_directory(path_join(ctx.attr.app_name, "ebin")) + + script = """set -euo pipefail + +DEST="$(mktemp -d)" +unzip -q -d "$DEST" {archive} +cp "$DEST"/{app_name}/ebin/* {ebin} +""".format( + archive = ctx.file.archive.path, + app_name = ctx.attr.app_name, + ebin = ebin.path, +) + + ctx.actions.run_shell( + inputs = ctx.files.archive, + outputs = [ebin], + command = script, + mnemonic = "MixArchiveExtract", + ) + + deps = flat_deps(ctx.attr.deps) + + runfiles = ctx.runfiles([ebin]) + for dep in ctx.attr.deps: + runfiles = runfiles.merge(dep[DefaultInfo].default_runfiles) + + return [ + DefaultInfo( + files = depset([ebin]), + runfiles = runfiles, + ), + ErlangAppInfo( + app_name = ctx.attr.app_name, + extra_apps = ctx.attr.extra_apps, + include = [], + beam = [ebin], + priv = [], + license_files = [], + srcs = ctx.files.srcs, + deps = deps, + ) + ] + +mix_archive_extract = rule( + implementation = _impl, + attrs = { + "app_name": attr.string(mandatory = True), + "extra_apps": attr.string_list(), + "deps": attr.label_list(providers = [ErlangAppInfo]), + "archive": attr.label( + allow_single_file = [".ez"], + ), + "srcs": attr.label_list(), + }, + provides = [ErlangAppInfo], +) diff --git a/bazel/platforms/BUILD.bazel b/bazel/platforms/BUILD.bazel deleted file mode 100644 index d76ca5cc7c55..000000000000 --- a/bazel/platforms/BUILD.bazel +++ /dev/null @@ -1,75 +0,0 @@ -package( - default_visibility = ["//visibility:public"], -) - -platform( - name = "erlang_internal_platform", - constraint_values = [ - "@erlang_config//:erlang_internal", - "@elixir_config//:elixir_internal", - ], - parents = ["@rbe//config:platform"], -) - -platform( - name = "erlang_linux_24_platform", - constraint_values = [ - "@erlang_config//:erlang_24", - "@elixir_config//:elixir_1_13", - ], - parents = ["@rbe//config:platform"], -) - -platform( - name = "erlang_linux_25_0_platform", - constraint_values = [ - "@erlang_config//:erlang_25_0", - "@elixir_config//:elixir_1_14", - ], - parents = ["@rbe//config:platform"], -) - -platform( - name = "erlang_linux_25_1_platform", - constraint_values = [ - "@erlang_config//:erlang_25_1", - "@elixir_config//:elixir_1_14", - ], - parents = ["@rbe//config:platform"], -) - -platform( - name = "erlang_linux_25_2_platform", - constraint_values = [ - "@erlang_config//:erlang_25_2", - "@elixir_config//:elixir_1_14", - ], - parents = ["@rbe//config:platform"], -) - -platform( - name = "erlang_linux_25_3_platform", - constraint_values = [ - "@erlang_config//:erlang_25_3", - "@elixir_config//:elixir_1_14", - ], - parents = ["@rbe//config:platform"], -) - -platform( - name = "erlang_linux_26_platform", - constraint_values = [ - "@erlang_config//:erlang_26", - "@elixir_config//:elixir_1_15", - ], - parents = ["@rbe//config:platform"], -) - -platform( - name = "erlang_linux_git_master_platform", - constraint_values = [ - "@erlang_config//:erlang_27_unknown", - "@elixir_config//:elixir_1_15", - ], - parents = ["@rbe//config:platform"], -) diff --git a/bazel/repositories/BUILD.bazel b/bazel/repositories/BUILD.bazel deleted file mode 100644 index e69de29bb2d1..000000000000 diff --git a/bazel/repositories/BUILD_external.tpl b/bazel/repositories/BUILD_external.tpl deleted file mode 100644 index d8649ce15b47..000000000000 --- a/bazel/repositories/BUILD_external.tpl +++ /dev/null @@ -1,34 +0,0 @@ -# This file is generated by rabbitmq-server via the elixir_config macro - -load( - "%{RABBITMQ_SERVER_WORKSPACE}//bazel/elixir:elixir_build.bzl", - "elixir_external", -) -load( - "%{RABBITMQ_SERVER_WORKSPACE}//bazel/elixir:elixir_toolchain.bzl", - "elixir_toolchain", -) - -elixir_external( - name = "elixir_external", - elixir_home = "%{ELIXIR_HOME}", -) - -elixir_toolchain( - name = "elixir", - elixir = ":elixir_external", - visibility = ["//visibility:public"], -) - -toolchain( - name = "toolchain", - exec_compatible_with = [ - "//:elixir_external", - ], - target_compatible_with = [ - "//:elixir_%{ELIXIR_VERSION_ID}", - ], - toolchain = ":elixir", - toolchain_type = "%{RABBITMQ_SERVER_WORKSPACE}//bazel/elixir:toolchain_type", - visibility = ["//visibility:public"], -) diff --git a/bazel/repositories/BUILD_internal.tpl b/bazel/repositories/BUILD_internal.tpl deleted file mode 100644 index 6592e8e7a704..000000000000 --- a/bazel/repositories/BUILD_internal.tpl +++ /dev/null @@ -1,36 +0,0 @@ -# This file is generated by rabbitmq-server via the elixir_config macro - -load( - "%{RABBITMQ_SERVER_WORKSPACE}//bazel/elixir:elixir_build.bzl", - "elixir_build", -) -load( - "%{RABBITMQ_SERVER_WORKSPACE}//bazel/elixir:elixir_toolchain.bzl", - "elixir_toolchain", -) - -elixir_build( - name = "elixir_build", - url = "%{URL}", - strip_prefix = "%{STRIP_PREFIX}", - sha256 = "%{SHA_256}", -) - -elixir_toolchain( - name = "elixir", - elixir = ":elixir_build", - visibility = ["//visibility:public"], -) - -toolchain( - name = "toolchain", - exec_compatible_with = [ - "//:elixir_internal", - ], - target_compatible_with = [ - "//:elixir_%{ELIXIR_VERSION_ID}", - ], - toolchain = ":elixir", - toolchain_type = "%{RABBITMQ_SERVER_WORKSPACE}//bazel/elixir:toolchain_type", - visibility = ["//visibility:public"], -) diff --git a/bazel/repositories/defaults.bzl.tpl b/bazel/repositories/defaults.bzl.tpl deleted file mode 100644 index f4822fd9d078..000000000000 --- a/bazel/repositories/defaults.bzl.tpl +++ /dev/null @@ -1,6 +0,0 @@ -# This file is generated by rabbitmq-server via the elixir_config macro - -def register_defaults(): - native.register_toolchains( -%{TOOLCHAINS} - ) diff --git a/bazel/repositories/elixir_config.bzl b/bazel/repositories/elixir_config.bzl deleted file mode 100644 index 1d59cbf86307..000000000000 --- a/bazel/repositories/elixir_config.bzl +++ /dev/null @@ -1,233 +0,0 @@ -load( - "@rules_erlang//:util.bzl", - "msys2_path", - "path_join", -) - -ELIXIR_HOME_ENV_VAR = "ELIXIR_HOME" - -_DEFAULT_EXTERNAL_ELIXIR_PACKAGE_NAME = "external" -_ELIXIR_VERSION_UNKNOWN = "UNKNOWN" - -INSTALLATION_TYPE_EXTERNAL = "external" -INSTALLATION_TYPE_INTERNAL = "internal" - -def _version_identifier(version_string): - parts = version_string.split(".", 2) - if len(parts) > 1: - return "{}_{}".format(parts[0], parts[1]) - else: - return parts[0] - -def _impl(repository_ctx): - rabbitmq_server_workspace = repository_ctx.attr.rabbitmq_server_workspace - - elixir_installations = _default_elixir_dict(repository_ctx) - for name in repository_ctx.attr.types.keys(): - if name == _DEFAULT_EXTERNAL_ELIXIR_PACKAGE_NAME: - fail("'{}' is reserved as an elixir name".format( - _DEFAULT_EXTERNAL_ELIXIR_PACKAGE_NAME, - )) - version = repository_ctx.attr.versions[name] - identifier = _version_identifier(version) - elixir_installations[name] = struct( - type = repository_ctx.attr.types[name], - version = version, - identifier = identifier, - url = repository_ctx.attr.urls.get(name, None), - strip_prefix = repository_ctx.attr.strip_prefixs.get(name, None), - sha256 = repository_ctx.attr.sha256s.get(name, None), - elixir_home = repository_ctx.attr.elixir_homes.get(name, None), - ) - - for (name, props) in elixir_installations.items(): - if props.type == INSTALLATION_TYPE_EXTERNAL: - repository_ctx.template( - "{}/BUILD.bazel".format(name), - Label("//bazel/repositories:BUILD_external.tpl"), - { - "%{ELIXIR_HOME}": props.elixir_home, - "%{ELIXIR_VERSION_ID}": props.identifier, - "%{RABBITMQ_SERVER_WORKSPACE}": rabbitmq_server_workspace, - }, - False, - ) - else: - repository_ctx.template( - "{}/BUILD.bazel".format(name), - Label("//bazel/repositories:BUILD_internal.tpl"), - { - "%{URL}": props.url, - "%{STRIP_PREFIX}": props.strip_prefix or "", - "%{SHA_256}": props.sha256 or "", - "%{ELIXIR_VERSION_ID}": props.identifier, - "%{RABBITMQ_SERVER_WORKSPACE}": rabbitmq_server_workspace, - }, - False, - ) - - if len(elixir_installations) == 0: - fail("No elixir installations configured") - - repository_ctx.file( - "BUILD.bazel", - _build_file_content(elixir_installations), - False, - ) - - toolchains = [ - "@{}//{}:toolchain".format(repository_ctx.name, name) - for name in elixir_installations.keys() - ] - - repository_ctx.template( - "defaults.bzl", - Label("//bazel/repositories:defaults.bzl.tpl"), - { - "%{TOOLCHAINS}": "\n".join([ - ' "%s",' % t - for t in toolchains - ]), - }, - False, - ) - -elixir_config = repository_rule( - implementation = _impl, - attrs = { - "rabbitmq_server_workspace": attr.string(), - "types": attr.string_dict(), - "versions": attr.string_dict(), - "urls": attr.string_dict(), - "strip_prefixs": attr.string_dict(), - "sha256s": attr.string_dict(), - "elixir_homes": attr.string_dict(), - }, - environ = [ - ELIXIR_HOME_ENV_VAR, - "PATH", - ], - local = True, -) - -def _elixir_home_from_elixir_path(repository_ctx, elixir_path): - ehr = repository_ctx.execute( - [ - elixir_path, - "-e", - "IO.puts Path.dirname(Path.dirname(Path.dirname(Path.dirname(Path.expand(:code.which(System))))))", - ], - ) - if ehr.return_code == 0: - elixir_home = ehr.stdout.strip("\n") - else: - elixir_home = str(elixir_path.dirname.dirname) - return elixir_home - -def _is_windows(repository_ctx): - return repository_ctx.os.name.lower().find("windows") != -1 - -def _default_elixir_dict(repository_ctx): - if _is_windows(repository_ctx): - if ELIXIR_HOME_ENV_VAR in repository_ctx.os.environ: - elixir_home = repository_ctx.os.environ[ELIXIR_HOME_ENV_VAR] - elixir_path = elixir_home + "\\bin\\elixir" - else: - elixir_path = repository_ctx.which("elixir") - if elixir_path == None: - elixir_path = repository_ctx.path("C:/Program Files (x86)/Elixir/bin/elixir") - elixir_home = _elixir_home_from_elixir_path(repository_ctx, elixir_path) - elixir_home = msys2_path(elixir_home) - elif ELIXIR_HOME_ENV_VAR in repository_ctx.os.environ: - elixir_home = repository_ctx.os.environ[ELIXIR_HOME_ENV_VAR] - elixir_path = path_join(elixir_home, "bin", "elixir") - else: - elixir_path = repository_ctx.which("elixir") - if elixir_path == None: - elixir_path = repository_ctx.path("/usr/local/bin/elixir") - elixir_home = _elixir_home_from_elixir_path(repository_ctx, elixir_path) - - version = repository_ctx.execute( - [ - path_join(elixir_home, "bin", "elixir"), - "-e", - "IO.puts System.version()", - ], - timeout = 10, - ) - if version.return_code == 0: - version = version.stdout.strip("\n") - identifier = _version_identifier(version) - return { - _DEFAULT_EXTERNAL_ELIXIR_PACKAGE_NAME: struct( - type = INSTALLATION_TYPE_EXTERNAL, - version = version, - identifier = identifier, - elixir_home = elixir_home, - ), - } - else: - return { - _DEFAULT_EXTERNAL_ELIXIR_PACKAGE_NAME: struct( - type = INSTALLATION_TYPE_EXTERNAL, - version = _ELIXIR_VERSION_UNKNOWN, - identifier = _ELIXIR_VERSION_UNKNOWN.lower(), - elixir_home = elixir_home, - ), - } - -def _build_file_content(elixir_installations): - build_file_content = """\ -package( - default_visibility = ["//visibility:public"], -) - -constraint_setting( - name = "elixir_internal_external", - default_constraint_value = ":elixir_external", -) - -constraint_value( - name = "elixir_external", - constraint_setting = ":elixir_internal_external", -) - -constraint_value( - name = "elixir_internal", - constraint_setting = ":elixir_internal_external", -) - -""" - - default_installation = elixir_installations[_DEFAULT_EXTERNAL_ELIXIR_PACKAGE_NAME] - - build_file_content += """\ -constraint_setting( - name = "elixir_version", - default_constraint_value = ":elixir_{}", -) - -""".format(default_installation.identifier) - - unique_identifiers = { - props.identifier: name - for (name, props) in elixir_installations.items() - }.keys() - - for identifier in unique_identifiers: - build_file_content += """\ -constraint_value( - name = "elixir_{identifier}", - constraint_setting = ":elixir_version", -) - -platform( - name = "elixir_{identifier}_platform", - constraint_values = [ - ":elixir_{identifier}", - ], -) - -""".format(identifier = identifier) - - return build_file_content diff --git a/deps/amqp10_client/.gitignore b/deps/amqp10_client/.gitignore index aca89b85199f..ac3616494721 100644 --- a/deps/amqp10_client/.gitignore +++ b/deps/amqp10_client/.gitignore @@ -1,25 +1,3 @@ -.sw? -.*.sw? -*.beam -*.plt -/.erlang.mk/ -/cover/ -/deps/ -/doc/ -/ebin/ -/escript/ -/escript.lock -/logs/ -/plugins/ -/plugins.lock -/sbin/ -/sbin.lock -/xrefr -elvis - -amqp10_client.d -/*.coverdata - # Generated source files. /include/rabbit_amqp1_0_framing.hrl /src/rabbit_amqp1_0_framing0.erl diff --git a/deps/amqp10_client/BUILD.bazel b/deps/amqp10_client/BUILD.bazel index 31e961fff069..df8b879adae1 100644 --- a/deps/amqp10_client/BUILD.bazel +++ b/deps/amqp10_client/BUILD.bazel @@ -20,7 +20,7 @@ load( APP_NAME = "amqp10_client" -APP_DESCRIPTION = "AMQP 1.0 client from the RabbitMQ Project" +APP_DESCRIPTION = "AMQP 1.0 client" APP_MODULE = "amqp10_client_app" @@ -74,7 +74,10 @@ rabbitmq_app( ], license_files = [":license_files"], priv = [":priv"], - deps = ["//deps/amqp10_common:erlang_app"], + deps = [ + "//deps/amqp10_common:erlang_app", + "@credentials_obfuscation//:erlang_app", + ], ) xref( @@ -97,7 +100,6 @@ dialyze( ) broker_for_integration_suites( - extra_plugins = ["//deps/rabbitmq_amqp1_0:erlang_app"], ) TEST_DEPS = [ diff --git a/deps/amqp10_client/Makefile b/deps/amqp10_client/Makefile index 694e6e076b78..36c117c78ea1 100644 --- a/deps/amqp10_client/Makefile +++ b/deps/amqp10_client/Makefile @@ -1,5 +1,5 @@ PROJECT = amqp10_client -PROJECT_DESCRIPTION = AMQP 1.0 client from the RabbitMQ Project +PROJECT_DESCRIPTION = AMQP 1.0 client PROJECT_MOD = amqp10_client_app define PROJECT_APP_EXTRA_KEYS @@ -29,13 +29,12 @@ endef PACKAGES_DIR ?= $(abspath PACKAGES) BUILD_DEPS = rabbit_common elvis_mk -DEPS = amqp10_common -TEST_DEPS = rabbit rabbitmq_amqp1_0 rabbitmq_ct_helpers +DEPS = amqp10_common credentials_obfuscation +TEST_DEPS = rabbit rabbitmq_ct_helpers LOCAL_DEPS = ssl inets crypto public_key DEP_EARLY_PLUGINS = rabbit_common/mk/rabbitmq-early-test.mk -DEP_PLUGINS = rabbit_common/mk/rabbitmq-macros.mk \ - rabbit_common/mk/rabbitmq-build.mk \ +DEP_PLUGINS = rabbit_common/mk/rabbitmq-build.mk \ rabbit_common/mk/rabbitmq-hexpm.mk \ rabbit_common/mk/rabbitmq-dist.mk \ rabbit_common/mk/rabbitmq-run.mk \ @@ -51,28 +50,11 @@ include erlang.mk HEX_TARBALL_FILES += rabbitmq-components.mk \ git-revisions.txt -# -------------------------------------------------------------------- -# Compiler flags. -# -------------------------------------------------------------------- - -# gen_fsm is deprecated starting from Erlang 20, but we want to support -# Erlang 19 as well. - -ERTS_VER := $(shell erl -version 2>&1 | sed -E 's/.* version //') -ERLANG_20_ERTS_VER := 9.0 - -ifeq ($(call compare_version,$(ERTS_VER),$(ERLANG_20_ERTS_VER),>=),true) -ERLC_OPTS += -Dnowarn_deprecated_gen_fsm -endif - -# Dialyze the tests. -DIALYZER_OPTS += --src -r test - # -------------------------------------------------------------------- # ActiveMQ for the testsuite. # -------------------------------------------------------------------- -ACTIVEMQ_VERSION := 5.14.4 +ACTIVEMQ_VERSION := 5.18.3 ACTIVEMQ_URL := 'https://archive.apache.org/dist/activemq/$(ACTIVEMQ_VERSION)/apache-activemq-$(ACTIVEMQ_VERSION)-bin.tar.gz' ACTIVEMQ := $(abspath test/system_SUITE_data/apache-activemq-$(ACTIVEMQ_VERSION)/bin/activemq) diff --git a/deps/amqp10_client/README.md b/deps/amqp10_client/README.md index 9ad1e803483d..8e7337169eae 100644 --- a/deps/amqp10_client/README.md +++ b/deps/amqp10_client/README.md @@ -2,16 +2,16 @@ This is an [Erlang client for the AMQP 1.0](https://www.amqp.org/resources/specifications) protocol. -It's primary purpose is to be used in RabbitMQ related projects but it is a -generic client that was tested with at least 4 implementations of AMQP 1.0. +Its primary purpose is to be used in RabbitMQ related projects but it is a +generic client that was tested with at least 3 implementations of AMQP 1.0. If you are looking for an Erlang client for [AMQP 0-9-1](https://www.rabbitmq.com/tutorials/amqp-concepts.html) — a completely different -protocol despite the name — [consider this one](https://github.com/rabbitmq/rabbitmq-erlang-client). +protocol despite the name — [consider this one](../amqp_client). ## Project Maturity and Status This client is used in the cross-protocol version of the RabbitMQ Shovel plugin. It is not 100% -feature complete but moderately mature and was tested against at least three AMQP 1.0 servers: +feature complete but moderately mature and was tested against at least 3 AMQP 1.0 servers: RabbitMQ, Azure ServiceBus, ActiveMQ. This client library is not officially supported by VMware at this time. @@ -80,8 +80,8 @@ after 2000 -> exit(credited_timeout) end. -%% create a new message using a delivery-tag, body and indicate -%% it's settlement status (true meaning no disposition confirmation +%% Create a new message using a delivery-tag, body and indicate +%% its settlement status (true meaning no disposition confirmation %% will be sent by the receiver). OutMsg = amqp10_msg:new(<<"my-tag">>, <<"my-body">>, true), ok = amqp10_client:send_msg(Sender, OutMsg), @@ -112,7 +112,7 @@ after the `Open` frame has been successfully written to the socket rather than waiting until the remote end returns with their `Open` frame. The client will notify the caller of various internal/async events using `amqp10_event` messages. In the example above when the remote replies with their `Open` frame -a message is sent of the following forma: +a message is sent of the following form: ``` {amqp10_event, {connection, ConnectionPid, opened}} diff --git a/deps/amqp10_client/activemq.bzl b/deps/amqp10_client/activemq.bzl index 7ad2f5cb4d88..7cffe4dea891 100644 --- a/deps/amqp10_client/activemq.bzl +++ b/deps/amqp10_client/activemq.bzl @@ -1,8 +1,8 @@ load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive") -ACTIVEMQ_VERSION = "5.14.4" +ACTIVEMQ_VERSION = "5.18.3" ACTIVEMQ_URL = "https://archive.apache.org/dist/activemq/{version}/apache-activemq-{version}-bin.tar.gz".format(version = ACTIVEMQ_VERSION) -SHA_256 = "16ec52bece0a4759f9d70f4132d7d8da67d662e4af029081c492e65510a695c1" +SHA_256 = "943381aa6d340707de6c42eadbf7b41b7fdf93df604156d972d50c4da783544f" def activemq_archive(): http_archive( diff --git a/deps/amqp10_client/app.bzl b/deps/amqp10_client/app.bzl index 2278cb326178..8fcdad73cf9d 100644 --- a/deps/amqp10_client/app.bzl +++ b/deps/amqp10_client/app.bzl @@ -13,7 +13,6 @@ def all_beam_files(name = "all_beam_files"): "src/amqp10_client_app.erl", "src/amqp10_client_connection.erl", "src/amqp10_client_connection_sup.erl", - "src/amqp10_client_connections_sup.erl", "src/amqp10_client_frame_reader.erl", "src/amqp10_client_session.erl", "src/amqp10_client_sessions_sup.erl", @@ -42,7 +41,6 @@ def all_test_beam_files(name = "all_test_beam_files"): "src/amqp10_client_app.erl", "src/amqp10_client_connection.erl", "src/amqp10_client_connection_sup.erl", - "src/amqp10_client_connections_sup.erl", "src/amqp10_client_frame_reader.erl", "src/amqp10_client_session.erl", "src/amqp10_client_sessions_sup.erl", @@ -77,7 +75,6 @@ def all_srcs(name = "all_srcs"): "src/amqp10_client_app.erl", "src/amqp10_client_connection.erl", "src/amqp10_client_connection_sup.erl", - "src/amqp10_client_connections_sup.erl", "src/amqp10_client_frame_reader.erl", "src/amqp10_client_session.erl", "src/amqp10_client_sessions_sup.erl", diff --git a/deps/amqp10_client/src/amqp10_client.erl b/deps/amqp10_client/src/amqp10_client.erl index d15c8df01b82..c5ebc7ba123f 100644 --- a/deps/amqp10_client/src/amqp10_client.erl +++ b/deps/amqp10_client/src/amqp10_client.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(amqp10_client). @@ -35,15 +35,13 @@ settle_msg/3, flow_link_credit/3, flow_link_credit/4, - echo/1, + stop_receiver_link/1, link_handle/1, get_msg/1, get_msg/2, parse_uri/1 ]). --define(DEFAULT_TIMEOUT, 5000). - -type snd_settle_mode() :: amqp10_client_session:snd_settle_mode(). -type rcv_settle_mode() :: amqp10_client_session:rcv_settle_mode(). @@ -55,7 +53,7 @@ -type attach_role() :: amqp10_client_session:attach_role(). -type attach_args() :: amqp10_client_session:attach_args(). -type filter() :: amqp10_client_session:filter(). --type properties() :: amqp10_client_session:properties(). +-type properties() :: amqp10_client_types:properties(). -type connection_config() :: amqp10_client_connection:connection_config(). @@ -87,6 +85,7 @@ -spec open_connection(inet:socket_address() | inet:hostname(), inet:port_number()) -> supervisor:startchild_ret(). open_connection(Addr, Port) -> + _ = ensure_started(), open_connection(#{address => Addr, port => Port, notify => self(), sasl => anon}). @@ -97,16 +96,20 @@ open_connection(Addr, Port) -> -spec open_connection(connection_config()) -> supervisor:startchild_ret(). open_connection(ConnectionConfig0) -> + _ = ensure_started(), + Notify = maps:get(notify, ConnectionConfig0, self()), NotifyWhenOpened = maps:get(notify_when_opened, ConnectionConfig0, self()), NotifyWhenClosed = maps:get(notify_when_closed, ConnectionConfig0, self()), - amqp10_client_connection:open(ConnectionConfig0#{ + ConnectionConfig1 = ConnectionConfig0#{ notify => Notify, notify_when_opened => NotifyWhenOpened, notify_when_closed => NotifyWhenClosed - }). + }, + ConnectionConfig = merge_default_tls_options(ConnectionConfig1), + amqp10_client_connection:open(ConnectionConfig). -%% @doc Opens a connection using a connection_config map +%% @doc Closes a connection. %% This is asynchronous and will notify completion to the caller using %% an amqp10_event of the following format: %% {amqp10_event, {connection, ConnectionPid, {closed, Why}}} @@ -128,7 +131,7 @@ begin_session(Connection) when is_pid(Connection) -> -spec begin_session_sync(pid()) -> supervisor:startchild_ret() | session_timeout. begin_session_sync(Connection) when is_pid(Connection) -> - begin_session_sync(Connection, ?DEFAULT_TIMEOUT). + begin_session_sync(Connection, ?TIMEOUT). %% @doc Synchronously begins an amqp10 session using 'Connection'. %% This is a convenience function that awaits the 'begun' event @@ -185,7 +188,7 @@ attach_sender_link_sync(Session, Name, Target, SettleMode, Durability) -> {ok, Ref}; {amqp10_event, {link, Ref, {detached, Err}}} -> {error, Err} - after ?DEFAULT_TIMEOUT -> link_timeout + after ?TIMEOUT -> link_timeout end. %% @doc Attaches a sender link to a target. @@ -265,9 +268,8 @@ attach_receiver_link(Session, Name, Source, SettleMode, Durability, Filter) -> %% This is asynchronous and will notify completion of the attach request to the %% caller using an amqp10_event of the following format: %% {amqp10_event, {link, LinkRef, attached | {detached, Why}}} --spec attach_receiver_link(pid(), binary(), binary(), - snd_settle_mode(), terminus_durability(), filter(), - properties()) -> +-spec attach_receiver_link(pid(), binary(), binary(), snd_settle_mode(), + terminus_durability(), filter(), properties()) -> {ok, link_ref()}. attach_receiver_link(Session, Name, Source, SettleMode, Durability, Filter, Properties) when is_pid(Session) andalso @@ -296,52 +298,63 @@ attach_link(Session, AttachArgs) -> %% This is asynchronous and will notify completion of the attach request to the %% caller using an amqp10_event of the following format: %% {amqp10_event, {link, LinkRef, {detached, Why}}} --spec detach_link(link_ref()) -> _. +-spec detach_link(link_ref()) -> ok | {error, term()}. detach_link(#link_ref{link_handle = Handle, session = Session}) -> amqp10_client_session:detach(Session, Handle). -%% @doc Grant credit to a sender. -%% The amqp10_client will automatically grant more credit to the sender when -%% the remaining link credit falls below the value of RenewWhenBelow. -%% If RenewWhenBelow is 'never' the client will never grant new credit. Instead -%% the caller will be notified when the link_credit reaches 0 with an -%% amqp10_event of the following format: +%% @doc Grant Credit to a sender. +%% +%% In addition, if RenewWhenBelow is an integer, the amqp10_client will automatically grant more +%% Credit to the sender when the sum of the remaining link credit and the number of unsettled +%% messages falls below the value of RenewWhenBelow. +%% `Credit + RenewWhenBelow - 1` is the maximum number of in-flight unsettled messages. +%% +%% If RenewWhenBelow is `never` the amqp10_client will never grant more credit. Instead the caller +%% will be notified when the link_credit reaches 0 with an amqp10_event of the following format: %% {amqp10_event, {link, LinkRef, credit_exhausted}} -spec flow_link_credit(link_ref(), Credit :: non_neg_integer(), - RenewWhenBelow :: never | non_neg_integer()) -> ok. + RenewWhenBelow :: never | pos_integer()) -> ok. flow_link_credit(Ref, Credit, RenewWhenBelow) -> flow_link_credit(Ref, Credit, RenewWhenBelow, false). -spec flow_link_credit(link_ref(), Credit :: non_neg_integer(), - RenewWhenBelow :: never | non_neg_integer(), + RenewWhenBelow :: never | pos_integer(), Drain :: boolean()) -> ok. flow_link_credit(#link_ref{role = receiver, session = Session, link_handle = Handle}, - Credit, RenewWhenBelow, Drain) -> + Credit, RenewWhenBelow, Drain) + when + %% Drain together with auto renewal doesn't make sense, so disallow it in the API. + ((Drain) andalso RenewWhenBelow =:= never + orelse not(Drain)) + andalso + %% Check that the RenewWhenBelow value make sense. + (RenewWhenBelow =:= never orelse + is_integer(RenewWhenBelow) andalso + RenewWhenBelow > 0 andalso + RenewWhenBelow =< Credit) -> Flow = #'v1_0.flow'{link_credit = {uint, Credit}, drain = Drain}, ok = amqp10_client_session:flow(Session, Handle, Flow, RenewWhenBelow). -%% @doc Request that the sender's flow state is echoed back -%% This may be used to determine when the Link has finally quiesced. -%% see §2.6.10 of the spec -echo(#link_ref{role = receiver, session = Session, - link_handle = Handle}) -> +%% @doc Stop a receiving link. +%% See AMQP 1.0 spec §2.6.10. +stop_receiver_link(#link_ref{role = receiver, + session = Session, + link_handle = Handle}) -> Flow = #'v1_0.flow'{link_credit = {uint, 0}, echo = true}, - ok = amqp10_client_session:flow(Session, Handle, Flow, 0). + ok = amqp10_client_session:flow(Session, Handle, Flow, never). %%% messages %% @doc Send a message on a the link referred to be the 'LinkRef'. -%% Returns ok for "async" transfers when messages are sent with settled=true -%% else it returns the delivery state from the disposition -spec send_msg(link_ref(), amqp10_msg:amqp10_msg()) -> - ok | {error, insufficient_credit | link_not_found | half_attached}. + ok | amqp10_client_session:transfer_error(). send_msg(#link_ref{role = sender, session = Session, link_handle = Handle}, Msg0) -> Msg = amqp10_msg:set_handle(Handle, Msg0), - amqp10_client_session:transfer(Session, Msg, ?DEFAULT_TIMEOUT). + amqp10_client_session:transfer(Session, Msg, ?TIMEOUT). %% @doc Accept a message on a the link referred to be the 'LinkRef'. -spec accept_msg(link_ref(), amqp10_msg:amqp10_msg()) -> ok. @@ -352,16 +365,15 @@ accept_msg(LinkRef, Msg) -> %% the chosen delivery state. -spec settle_msg(link_ref(), amqp10_msg:amqp10_msg(), amqp10_client_types:delivery_state()) -> ok. -settle_msg(#link_ref{role = receiver, - session = Session}, Msg, Settlement) -> +settle_msg(LinkRef, Msg, Settlement) -> DeliveryId = amqp10_msg:delivery_id(Msg), - amqp10_client_session:disposition(Session, receiver, DeliveryId, - DeliveryId, true, Settlement). + amqp10_client_session:disposition(LinkRef, DeliveryId, DeliveryId, true, Settlement). + %% @doc Get a single message from a link. %% Flows a single link credit then awaits delivery or timeout. -spec get_msg(link_ref()) -> {ok, amqp10_msg:amqp10_msg()} | {error, timeout}. get_msg(LinkRef) -> - get_msg(LinkRef, ?DEFAULT_TIMEOUT). + get_msg(LinkRef, ?TIMEOUT). %% @doc Get a single message from a link. %% Flows a single link credit then awaits delivery or timeout. @@ -417,8 +429,8 @@ parse_result(Map) -> throw(plain_sasl_missing_userinfo); _ -> case UserInfo of - [] -> none; - undefined -> none; + [] -> anon; + undefined -> anon; U -> parse_usertoken(U) end end, @@ -444,11 +456,6 @@ parse_result(Map) -> Ret0#{tls_opts => {secure_port, TlsOpts}} end. - -parse_usertoken(undefined) -> - none; -parse_usertoken("") -> - none; parse_usertoken(U) -> [User, Pass] = string:tokens(U, ":"), {plain, @@ -497,6 +504,21 @@ try_to_existing_atom(L) when is_list(L) -> throw({non_existent_atom, L}) end. +ensure_started() -> + _ = application:ensure_all_started(credentials_obfuscation). + + +-spec merge_default_tls_options(connection_config()) -> connection_config(). +merge_default_tls_options(#{tls_opts := {secure_port, TlsOpts0}} = Config) -> + GlobalTlsOpts = application:get_env(amqp10_client, ssl_options, []), + TlsOpts = + orddict:to_list( + orddict:merge(fun (_, _A, B) -> B end, + orddict:from_list(GlobalTlsOpts), + orddict:from_list(TlsOpts0))), + Config#{tls_opts => {secure_port, TlsOpts}}; +merge_default_tls_options(Config) -> + Config. -ifdef(TEST). -include_lib("eunit/include/eunit.hrl"). @@ -505,7 +527,7 @@ parse_uri_test_() -> [?_assertEqual({ok, #{address => "my_host", port => 9876, hostname => <<"my_host">>, - sasl => none}}, parse_uri("amqp://my_host:9876")), + sasl => anon}}, parse_uri("amqp://my_host:9876")), %% port defaults ?_assertMatch({ok, #{port := 5671}}, parse_uri("amqps://my_host")), ?_assertMatch({ok, #{port := 5672}}, parse_uri("amqp://my_host")), diff --git a/deps/amqp10_client/src/amqp10_client.hrl b/deps/amqp10_client/src/amqp10_client.hrl index bf00cffea3c8..99cad7578300 100644 --- a/deps/amqp10_client/src/amqp10_client.hrl +++ b/deps/amqp10_client/src/amqp10_client.hrl @@ -2,13 +2,11 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2020-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -define(AMQP_PROTOCOL_HEADER, <<"AMQP", 0, 1, 0, 0>>). -define(SASL_PROTOCOL_HEADER, <<"AMQP", 3, 1, 0, 0>>). --define(MIN_MAX_FRAME_SIZE, 512). --define(MAX_MAX_FRAME_SIZE, 1024 * 1024). -define(FRAME_HEADER_SIZE, 8). -define(TIMEOUT, 5000). @@ -22,4 +20,5 @@ -record(link_ref, {role :: sender | receiver, session :: pid(), + %% locally chosen output handle link_handle :: non_neg_integer()}). diff --git a/deps/amqp10_client/src/amqp10_client_app.erl b/deps/amqp10_client/src/amqp10_client_app.erl index 74d0eb03328d..6edce5d199e5 100644 --- a/deps/amqp10_client/src/amqp10_client_app.erl +++ b/deps/amqp10_client/src/amqp10_client_app.erl @@ -2,37 +2,19 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(amqp10_client_app). -behaviour(application). -%% Application callbacks +%% application callbacks -export([start/2, stop/1]). --type start_type() :: ( - normal | - {takeover, Node :: node()} | - {failover, Node :: node()} - ). --type state() :: term(). - -%%==================================================================== -%% API -%%==================================================================== - --spec start(StartType :: start_type(), StartArgs :: term()) -> - {ok, Pid :: pid()} | {ok, Pid :: pid(), State :: state()} | {error, Reason :: term()}. start(_Type, _Args) -> amqp10_client_sup:start_link(). --spec stop(State :: state()) -> ok. stop(_State) -> ok. - -%%==================================================================== -%% Internal functions -%%==================================================================== diff --git a/deps/amqp10_client/src/amqp10_client_connection.erl b/deps/amqp10_client/src/amqp10_client_connection.erl index b9f609a80b48..80c75f986a66 100644 --- a/deps/amqp10_client/src/amqp10_client_connection.erl +++ b/deps/amqp10_client/src/amqp10_client_connection.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(amqp10_client_connection). @@ -11,34 +11,27 @@ -include("amqp10_client.hrl"). -include_lib("amqp10_common/include/amqp10_framing.hrl"). +-include_lib("amqp10_common/include/amqp10_types.hrl"). --ifdef(nowarn_deprecated_gen_fsm). --compile({nowarn_deprecated_function, - [{gen_fsm, reply, 2}, - {gen_fsm, send_all_state_event, 2}, - {gen_fsm, send_event, 2}, - {gen_fsm, start_link, 3}, - {gen_fsm, sync_send_all_state_event, 2}]}). --endif. - -%% Public API. +%% public API -export([open/1, close/2]). -%% Private API. +%% private API -export([start_link/2, socket_ready/2, protocol_header_received/5, begin_session/1, heartbeat/1]). -%% gen_fsm callbacks. +%% gen_statem callbacks -export([init/1, callback_mode/0, terminate/3, code_change/4]). -%% gen_fsm state callbacks. +%% gen_statem state callbacks +%% see figure 2.23 -export([expecting_socket/3, sasl_hdr_sent/3, sasl_hdr_rcvds/3, @@ -48,12 +41,18 @@ opened/3, close_sent/3]). +-export([format_status/1]). + -type amqp10_socket() :: {tcp, gen_tcp:socket()} | {ssl, ssl:sslsocket()}. -type milliseconds() :: non_neg_integer(). -type address() :: inet:socket_address() | inet:hostname(). +-type encrypted_sasl() :: {plaintext, binary()} | {encrypted, binary()}. +-type decrypted_sasl() :: none | anon | external | {plain, User :: binary(), Pwd :: binary()}. +-type sasl() :: encrypted_sasl() | decrypted_sasl(). + -type connection_config() :: #{container_id => binary(), % AMQP container id hostname => binary(), % the dns name of the target host @@ -64,16 +63,16 @@ notify => pid() | none, % the pid to send connection events to notify_when_opened => pid() | none, notify_when_closed => pid() | none, - max_frame_size => non_neg_integer(), % TODO: constrain to large than 512 - outgoing_max_frame_size => non_neg_integer() | undefined, + %% incoming maximum frame size set by our client application + max_frame_size => pos_integer(), % TODO: constrain to large than 512 + %% outgoing maximum frame size set by AMQP peer in OPEN performative + outgoing_max_frame_size => pos_integer() | undefined, idle_time_out => milliseconds(), % set to a negative value to allow a sender to "overshoot" the flow % control by this margin transfer_limit_margin => 0 | neg_integer(), - sasl => none | anon | {plain, User :: binary(), Pwd :: binary()}, - notify => pid(), - notify_when_opened => pid() | none, - notify_when_closed => pid() | none + sasl => sasl(), + properties => amqp10_client_types:properties() }. -record(state, @@ -97,7 +96,8 @@ %% ------------------------------------------------------------------- -spec open(connection_config()) -> supervisor:startchild_ret(). -open(Config) -> +open(Config0) -> + Config = maps:update_with(sasl, fun maybe_encrypt_sasl/1, Config0), %% Start the supervision tree dedicated to that connection. It %% starts at least a connection process (the PID we want to return) %% and a reader process (responsible for opening and reading the @@ -123,6 +123,24 @@ open(Config) -> close(Pid, Reason) -> gen_statem:cast(Pid, {close, Reason}). +-spec maybe_encrypt_sasl(decrypted_sasl()) -> sasl(). +maybe_encrypt_sasl(Sasl) + when Sasl =:= none orelse + Sasl =:= anon orelse + Sasl =:= external -> + Sasl; +maybe_encrypt_sasl(Plain = {plain, _User, _Passwd}) -> + credentials_obfuscation:encrypt(term_to_binary(Plain)). + +-spec maybe_decrypt_sasl(sasl()) -> decrypted_sasl(). +maybe_decrypt_sasl(Sasl) + when Sasl =:= none orelse + Sasl =:= anon orelse + Sasl =:= external -> + Sasl; +maybe_decrypt_sasl(Encrypted) -> + binary_to_term(credentials_obfuscation:decrypt(Encrypted)). + %% ------------------------------------------------------------------- %% Private API. %% ------------------------------------------------------------------- @@ -144,13 +162,13 @@ protocol_header_received(Pid, Protocol, Maj, Min, Rev) -> -spec begin_session(pid()) -> supervisor:startchild_ret(). begin_session(Pid) -> - gen_statem:call(Pid, begin_session, {dirty_timeout, ?TIMEOUT}). + gen_statem:call(Pid, begin_session, ?TIMEOUT). heartbeat(Pid) -> gen_statem:cast(Pid, heartbeat). %% ------------------------------------------------------------------- -%% gen_fsm callbacks. +%% gen_statem callbacks. %% ------------------------------------------------------------------- callback_mode() -> [state_functions]. @@ -164,8 +182,9 @@ init([Sup, Config0]) -> expecting_socket(_EvtType, {socket_ready, Socket}, State = #state{config = Cfg}) -> State1 = State#state{socket = Socket}, - case Cfg of - #{sasl := none} -> + Sasl = credentials_obfuscation:decrypt(maps:get(sasl, Cfg)), + case Sasl of + none -> ok = socket_send(Socket, ?AMQP_PROTOCOL_HEADER), {next_state, hdr_sent, State1}; _ -> @@ -190,17 +209,16 @@ sasl_hdr_sent({call, From}, begin_session, {keep_state, State1}. sasl_hdr_rcvds(_EvtType, #'v1_0.sasl_mechanisms'{ - sasl_server_mechanisms = {array, symbol, Mechs}}, + sasl_server_mechanisms = {array, symbol, AvailableMechs}}, State = #state{config = #{sasl := Sasl}}) -> - SaslBin = {symbol, sasl_to_bin(Sasl)}, - case lists:any(fun(S) when S =:= SaslBin -> true; - (_) -> false - end, Mechs) of + DecryptedSasl = maybe_decrypt_sasl(Sasl), + OurMech = {symbol, decrypted_sasl_to_mechanism(DecryptedSasl)}, + case lists:member(OurMech, AvailableMechs) of true -> - ok = send_sasl_init(State, Sasl), + ok = send_sasl_init(State, DecryptedSasl), {next_state, sasl_init_sent, State}; false -> - {stop, {sasl_not_supported, Sasl}, State} + {stop, {sasl_not_supported, DecryptedSasl}, State} end; sasl_hdr_rcvds({call, From}, begin_session, #state{pending_session_reqs = PendingSessionReqs} = State) -> @@ -234,7 +252,7 @@ hdr_sent({call, From}, begin_session, State1 = State#state{pending_session_reqs = [From | PendingSessionReqs]}, {keep_state, State1}. -open_sent(_EvtType, #'v1_0.open'{max_frame_size = MFSz, +open_sent(_EvtType, #'v1_0.open'{max_frame_size = MaybeMaxFrameSize, idle_time_out = Timeout}, #state{pending_session_reqs = PendingSessionReqs, config = Config} = State0) -> @@ -246,8 +264,14 @@ open_sent(_EvtType, #'v1_0.open'{max_frame_size = MFSz, heartbeat_timer = Tmr}; _ -> State0 end, - State1 = State#state{config = - Config#{outgoing_max_frame_size => unpack(MFSz)}}, + MaxFrameSize = case unpack(MaybeMaxFrameSize) of + undefined -> + %% default as per 2.7.1 + ?UINT_MAX; + Bytes when is_integer(Bytes) -> + Bytes + end, + State1 = State#state{config = Config#{outgoing_max_frame_size => MaxFrameSize}}, State2 = lists:foldr( fun(From, S0) -> {Ret, S2} = handle_begin_session(From, S0), @@ -259,7 +283,10 @@ open_sent(_EvtType, #'v1_0.open'{max_frame_size = MFSz, open_sent({call, From}, begin_session, #state{pending_session_reqs = PendingSessionReqs} = State) -> State1 = State#state{pending_session_reqs = [From | PendingSessionReqs]}, - {keep_state, State1}. + {keep_state, State1}; +open_sent(info, {'DOWN', MRef, _, _, _}, + #state{reader_m_ref = MRef}) -> + {stop, {shutdown, reader_down}}. opened(_EvtType, heartbeat, State = #state{idle_time_out = T}) -> ok = send_heartbeat(State), @@ -322,16 +349,50 @@ terminate(Reason, _StateName, #state{connection_sup = Sup, case Reason of normal -> sys:terminate(Sup, normal); _ -> ok - end, - ok. + end. code_change(_OldVsn, StateName, State, _Extra) -> {ok, StateName, State}. +format_status(Context = #{data := ProcState}) -> + %% Note: Context.state here refers to the gen_statem state name, + %% so we need to use Context.data to get #state{} + Obfuscated = obfuscate_state(ProcState), + Context#{data => Obfuscated}. + + %% ------------------------------------------------------------------- %% Internal functions. %% ------------------------------------------------------------------- +obfuscate_state(State = #state{config = Cfg0}) -> + Cfg1 = obfuscate_state_config_sasl(Cfg0), + Cfg2 = obfuscate_state_config_tls_opts(Cfg1), + State#state{config = Cfg2}. + +-spec obfuscate_state_config_sasl(connection_config()) -> connection_config(). +obfuscate_state_config_sasl(Cfg) -> + Sasl0 = maps:get(sasl, Cfg, none), + Sasl = case Sasl0 of + {plain, Username, _Password} -> + {plain, Username, <<"[redacted]">>}; + Other -> + Other + end, + Cfg#{sasl => Sasl}. + +-spec obfuscate_state_config_tls_opts(connection_config()) -> connection_config(). +obfuscate_state_config_tls_opts(Cfg) -> + TlsOpts0 = maps:get(tls_opts, Cfg, undefined), + TlsOpts = case TlsOpts0 of + {secure_port, PropL0} -> + Obfuscated = proplists:delete(password, PropL0), + {secure_port, Obfuscated}; + _ -> + TlsOpts0 + end, + Cfg#{tls_opts => TlsOpts}. + handle_begin_session({FromPid, _Ref}, #state{sessions_sup = Sup, reader = Reader, next_channel = Channel, @@ -343,32 +404,32 @@ handle_begin_session({FromPid, _Ref}, end, {Ret, State1}. -send_open(#state{socket = Socket, config = Config}) -> +send_open(#state{socket = Socket, config = Config0}) -> {ok, Product} = application:get_key(description), {ok, Version} = application:get_key(vsn), Platform = "Erlang/OTP " ++ erlang:system_info(otp_release), - Props = {map, [{{symbol, <<"product">>}, - {utf8, list_to_binary(Product)}}, - {{symbol, <<"version">>}, - {utf8, list_to_binary(Version)}}, - {{symbol, <<"platform">>}, - {utf8, list_to_binary(Platform)}} - ]}, + Props0 = #{<<"product">> => {utf8, list_to_binary(Product)}, + <<"version">> => {utf8, list_to_binary(Version)}, + <<"platform">> => {utf8, list_to_binary(Platform)}}, + Config = maps:update_with(properties, + fun(Val) -> maps:merge(Props0, Val) end, + Props0, + Config0), + Props = amqp10_client_types:make_properties(Config), ContainerId = maps:get(container_id, Config, generate_container_id()), IdleTimeOut = maps:get(idle_time_out, Config, 0), + IncomingMaxFrameSize = maps:get(max_frame_size, Config), Open0 = #'v1_0.open'{container_id = {utf8, ContainerId}, channel_max = {ushort, 100}, idle_time_out = {uint, IdleTimeOut}, - properties = Props}, - Open1 = case Config of - #{max_frame_size := MFSz} -> - Open0#'v1_0.open'{max_frame_size = {uint, MFSz}}; - _ -> Open0 - end, + properties = Props, + max_frame_size = {uint, IncomingMaxFrameSize} + }, Open = case Config of #{hostname := Hostname} -> - Open1#'v1_0.open'{hostname = {utf8, Hostname}}; - _ -> Open1 + Open0#'v1_0.open'{hostname = {utf8, Hostname}}; + _ -> + Open0 end, Encoded = amqp10_framing:encode_bin(Open), Frame = amqp10_binary_generator:build_frame(0, Encoded), @@ -393,6 +454,15 @@ send_close(#state{socket = Socket}, _Reason) -> send_sasl_init(State, anon) -> Frame = #'v1_0.sasl_init'{mechanism = {symbol, <<"ANONYMOUS">>}}, send(Frame, 1, State); +send_sasl_init(State, external) -> + Frame = #'v1_0.sasl_init'{ + mechanism = {symbol, <<"EXTERNAL">>}, + %% "This response is empty when the client is requesting to act + %% as the identity the server associated with its authentication + %% credentials." + %% https://datatracker.ietf.org/doc/html/rfc4422#appendix-A.1 + initial_response = {binary, <<>>}}, + send(Frame, 1, State); send_sasl_init(State, {plain, User, Pass}) -> Response = <<0:8, User/binary, 0:8, Pass/binary>>, Frame = #'v1_0.sasl_init'{mechanism = {symbol, <<"PLAIN">>}, @@ -448,7 +518,7 @@ unpack(V) -> amqp10_client_types:unpack(V). -spec generate_container_id() -> binary(). generate_container_id() -> - Pre = list_to_binary(atom_to_list(node())), + Pre = atom_to_binary(node()), Id = bin_to_hex(crypto:strong_rand_bytes(8)), <
>/binary, Id/binary>>.
 
@@ -485,10 +555,15 @@ translate_err(#'v1_0.error'{condition = Cond, description = Desc}) ->
 amqp10_event(Evt) ->
     {amqp10_event, {connection, self(), Evt}}.
 
-sasl_to_bin({plain, _, _}) -> <<"PLAIN">>;
-sasl_to_bin(anon) -> <<"ANONYMOUS">>.
+decrypted_sasl_to_mechanism(anon) ->
+    <<"ANONYMOUS">>;
+decrypted_sasl_to_mechanism(external) ->
+    <<"EXTERNAL">>;
+decrypted_sasl_to_mechanism({plain, _, _}) ->
+    <<"PLAIN">>.
 
 config_defaults() ->
     #{sasl => none,
       transfer_limit_margin => 0,
-      max_frame_size => ?MAX_MAX_FRAME_SIZE}.
+      %% 1 MB
+      max_frame_size => 1_048_576}.
diff --git a/deps/amqp10_client/src/amqp10_client_connection_sup.erl b/deps/amqp10_client/src/amqp10_client_connection_sup.erl
index 68252637da60..1049e08bac4f 100644
--- a/deps/amqp10_client/src/amqp10_client_connection_sup.erl
+++ b/deps/amqp10_client/src/amqp10_client_connection_sup.erl
@@ -2,41 +2,37 @@
 %% License, v. 2.0. If a copy of the MPL was not distributed with this
 %% file, You can obtain one at https://mozilla.org/MPL/2.0/.
 %%
-%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates.  All rights reserved.
+%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved.
 %%
 -module(amqp10_client_connection_sup).
 
 -behaviour(supervisor).
 
-%% Private API.
+%% API
 -export([start_link/1]).
 
-%% Supervisor callbacks.
+%% Supervisor callbacks
 -export([init/1]).
 
--define(CHILD(Id, Mod, Type, Args), {Id, {Mod, start_link, Args},
-                                     transient, 5000, Type, [Mod]}).
-
-%% -------------------------------------------------------------------
-%% Private API.
-%% -------------------------------------------------------------------
-
--spec start_link(amqp10_client_connection:connection_config()) ->
-    {ok, pid()} | ignore | {error, any()}.
 start_link(Config) ->
     supervisor:start_link(?MODULE, [Config]).
 
-%% -------------------------------------------------------------------
-%% Supervisor callbacks.
-%% -------------------------------------------------------------------
-
-init(Args) ->
-    ReaderSpec = ?CHILD(reader, amqp10_client_frame_reader,
-                        worker, [self() | Args]),
-    ConnectionSpec = ?CHILD(connection, amqp10_client_connection,
-                            worker, [self() | Args]),
-    SessionsSupSpec = ?CHILD(sessions, amqp10_client_sessions_sup,
-                             supervisor, []),
-    {ok, {{one_for_all, 0, 1}, [ConnectionSpec,
-                                ReaderSpec,
-                                SessionsSupSpec]}}.
+init(Args0) ->
+    SupFlags = #{strategy => one_for_all,
+                 intensity => 0,
+                 period => 1},
+    Fun = start_link,
+    Args = [self() | Args0],
+    ConnectionSpec = #{id => connection,
+                       start => {amqp10_client_connection, Fun, Args},
+                       restart => transient},
+    ReaderSpec = #{id => reader,
+                   start => {amqp10_client_frame_reader, Fun, Args},
+                   restart => transient},
+    SessionsSupSpec = #{id => sessions,
+                        start => {amqp10_client_sessions_sup, Fun, []},
+                        restart => transient,
+                        type => supervisor},
+    {ok, {SupFlags, [ConnectionSpec,
+                     ReaderSpec,
+                     SessionsSupSpec]}}.
diff --git a/deps/amqp10_client/src/amqp10_client_connections_sup.erl b/deps/amqp10_client/src/amqp10_client_connections_sup.erl
deleted file mode 100644
index fd15cc89a0d9..000000000000
--- a/deps/amqp10_client/src/amqp10_client_connections_sup.erl
+++ /dev/null
@@ -1,38 +0,0 @@
-%% This Source Code Form is subject to the terms of the Mozilla Public
-%% License, v. 2.0. If a copy of the MPL was not distributed with this
-%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
-%%
-%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates.  All rights reserved.
-%%
--module(amqp10_client_connections_sup).
-
--behaviour(supervisor).
-
-%% Private API.
--export([start_link/0,
-         stop_child/1]).
-
-%% Supervisor callbacks.
--export([init/1]).
-
--define(CHILD(Id, Mod, Type, Args), {Id, {Mod, start_link, Args},
-                                     temporary, infinity, Type, [Mod]}).
-
-%% -------------------------------------------------------------------
-%% Private API.
-%% -------------------------------------------------------------------
-
-stop_child(Pid) ->
-    supervisor:terminate_child({local, ?MODULE}, Pid).
-
-start_link() ->
-    supervisor:start_link({local, ?MODULE}, ?MODULE, []).
-
-%% -------------------------------------------------------------------
-%% Supervisor callbacks.
-%% -------------------------------------------------------------------
-
-init([]) ->
-    Template = ?CHILD(connection_sup, amqp10_client_connection_sup,
-                      supervisor, []),
-    {ok, {{simple_one_for_one, 0, 1}, [Template]}}.
diff --git a/deps/amqp10_client/src/amqp10_client_frame_reader.erl b/deps/amqp10_client/src/amqp10_client_frame_reader.erl
index d2fc31161a04..05d8823999b1 100644
--- a/deps/amqp10_client/src/amqp10_client_frame_reader.erl
+++ b/deps/amqp10_client/src/amqp10_client_frame_reader.erl
@@ -2,7 +2,7 @@
 %% License, v. 2.0. If a copy of the MPL was not distributed with this
 %% file, You can obtain one at https://mozilla.org/MPL/2.0/.
 %%
-%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates.  All rights reserved.
+%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved.
 %%
 -module(amqp10_client_frame_reader).
 
@@ -262,11 +262,17 @@ handle_input(expecting_frame_body, Data,
         {<<_:BodyLength/binary, Rest/binary>>, 0} ->
             % heartbeat
             handle_input(expecting_frame_header, Rest, State);
-        {<>, _} ->
+        {<>, _} ->
             State1 = State#state{frame_state = undefined},
-            {PerfDesc, Payload} = amqp10_binary_parser:parse(FrameBody),
-            Perf = amqp10_framing:decode(PerfDesc),
-            State2 = route_frame(Channel, FrameType, {Perf, Payload}, State1),
+            BytesBody = size(Body),
+            {DescribedPerformative, BytesParsed} = amqp10_binary_parser:parse(Body),
+            Performative = amqp10_framing:decode(DescribedPerformative),
+            Payload = if BytesParsed < BytesBody ->
+                             binary_part(Body, BytesParsed, BytesBody - BytesParsed);
+                         BytesParsed =:= BytesBody ->
+                             no_payload
+                      end,
+            State2 = route_frame(Channel, FrameType, {Performative, Payload}, State1),
             handle_input(expecting_frame_header, Rest, State2);
         _ ->
             {ok, expecting_frame_body, Data, State}
@@ -280,22 +286,26 @@ handle_input(StateName, Data, State) ->
 defer_heartbeat_timer(State =
                       #state{heartbeat_timer_ref = TRef,
                              connection_config = #{idle_time_out := T}})
-  when is_number(T) andalso T > 0 ->
+  when is_integer(T) andalso T > 0 ->
     _ = case TRef of
-            undefined -> ok;
-            _ -> _ = erlang:cancel_timer(TRef)
+            undefined ->
+                ok;
+            _ ->
+                erlang:cancel_timer(TRef, [{async, true},
+                                           {info, false}])
         end,
     NewTRef = erlang:send_after(T * 2, self(), heartbeat),
     State#state{heartbeat_timer_ref = NewTRef};
-defer_heartbeat_timer(State) -> State.
+defer_heartbeat_timer(State) ->
+    State.
 
 route_frame(Channel, FrameType, {Performative, Payload} = Frame, State0) ->
     {DestinationPid, State} = find_destination(Channel, FrameType, Performative,
                                                State0),
     ?DBG("FRAME -> ~tp ~tp~n ~tp", [Channel, DestinationPid, Performative]),
     case Payload of
-        <<>> -> ok = gen_statem:cast(DestinationPid, Performative);
-        _ -> ok = gen_statem:cast(DestinationPid, Frame)
+        no_payload -> gen_statem:cast(DestinationPid, Performative);
+        _ -> gen_statem:cast(DestinationPid, Frame)
     end,
     State.
 
diff --git a/deps/amqp10_client/src/amqp10_client_session.erl b/deps/amqp10_client/src/amqp10_client_session.erl
index d6eec4a76a18..b66308a826b2 100644
--- a/deps/amqp10_client/src/amqp10_client_session.erl
+++ b/deps/amqp10_client/src/amqp10_client_session.erl
@@ -2,7 +2,7 @@
 %% License, v. 2.0. If a copy of the MPL was not distributed with this
 %% file, You can obtain one at https://mozilla.org/MPL/2.0/.
 %%
-%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates.  All rights reserved.
+%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved.
 %%
 -module(amqp10_client_session).
 
@@ -10,6 +10,7 @@
 
 -include("amqp10_client.hrl").
 -include_lib("amqp10_common/include/amqp10_framing.hrl").
+-include_lib("amqp10_common/include/amqp10_types.hrl").
 
 %% Public API.
 -export(['begin'/1,
@@ -20,10 +21,10 @@
          detach/2,
          transfer/3,
          flow/4,
-         disposition/6
+         disposition/5
         ]).
 
-%% Private API.
+%% Private API
 -export([start_link/4,
          socket_ready/2
         ]).
@@ -33,10 +34,12 @@
          init/1,
          terminate/3,
          code_change/4,
-         callback_mode/0
+         callback_mode/0,
+         format_status/1
         ]).
 
-%% gen_statem state callbacks.
+%% gen_statem state callbacks
+%% see figure 2.30
 -export([
          unmapped/3,
          begin_sent/3,
@@ -44,21 +47,27 @@
          end_sent/3
         ]).
 
--define(MAX_SESSION_WINDOW_SIZE, 65535).
--define(DEFAULT_MAX_HANDLE, 16#ffffffff).
--define(DEFAULT_TIMEOUT, 5000).
--define(INITIAL_OUTGOING_ID, 0).
--define(INITIAL_DELIVERY_COUNT, 0).
+-import(serial_number,
+        [add/2,
+         diff/2]).
 
-% -type from() :: {pid(), term()}.
+-define(MAX_SESSION_WINDOW_SIZE, 65535).
+-define(UINT_OUTGOING_WINDOW, {uint, ?UINT_MAX}).
+-define(INITIAL_OUTGOING_DELIVERY_ID, ?UINT_MAX).
+%% "The next-outgoing-id MAY be initialized to an arbitrary value" [2.5.6]
+-define(INITIAL_OUTGOING_TRANSFER_ID, ?UINT_MAX - 1).
+%% "Note that, despite its name, the delivery-count is not a count but a
+%% sequence number initialized at an arbitrary point by the sender." [2.6.7]
+-define(INITIAL_DELIVERY_COUNT, ?UINT_MAX - 2).
 
--type transfer_id() :: non_neg_integer().
--type link_handle() :: non_neg_integer().
 -type link_name() :: binary().
 -type link_address() :: binary().
 -type link_role() :: sender | receiver.
--type link_source() :: link_address() | undefined.
 -type link_target() :: {pid, pid()} | binary() | undefined.
+%% "The locally chosen handle is referred to as the output handle." [2.6.2]
+-type output_handle() :: link_handle().
+%% "The remotely chosen handle is referred to as the input handle." [2.6.2]
+-type input_handle() :: link_handle().
 
 -type snd_settle_mode() :: unsettled | settled | mixed.
 -type rcv_settle_mode() :: first | second.
@@ -74,16 +83,27 @@
 
 % http://www.amqp.org/specification/1.0/filters
 -type filter() :: #{binary() => binary() | map() | list(binary())}.
--type properties() :: #{binary() => tuple()}.
+-type max_message_size() :: undefined | non_neg_integer().
+-type footer_opt() :: crc32 | adler32.
 
 -type attach_args() :: #{name => binary(),
                          role => attach_role(),
                          snd_settle_mode => snd_settle_mode(),
                          rcv_settle_mode => rcv_settle_mode(),
                          filter => filter(),
-                         properties => properties()
+                         properties => amqp10_client_types:properties(),
+                         max_message_size => max_message_size(),
+                         handle => output_handle(),
+                         footer_opt => footer_opt()
                         }.
 
+-type transfer_error() :: {error,
+                           insufficient_credit |
+                           remote_incoming_window_exceeded |
+                           message_size_exceeded |
+                           link_not_found |
+                           half_attached}.
+
 -type link_ref() :: #link_ref{}.
 
 -export_type([snd_settle_mode/0,
@@ -93,56 +113,54 @@
               attach_role/0,
               target_def/0,
               source_def/0,
-              properties/0,
-              filter/0]).
+              filter/0,
+              max_message_size/0,
+              transfer_error/0]).
 
 -record(link,
         {name :: link_name(),
          ref :: link_ref(),
          state = detached :: detached | attach_sent | attached | detach_sent,
          notify :: pid(),
-         output_handle :: link_handle(),
-         input_handle :: link_handle() | undefined,
+         output_handle :: output_handle(),
+         input_handle :: input_handle() | undefined,
          role :: link_role(),
-         source :: link_source(),
          target :: link_target(),
-         delivery_count = 0 :: non_neg_integer(),
+         max_message_size :: non_neg_integer() | Unlimited :: undefined,
+         delivery_count :: sequence_no() | undefined,
          link_credit = 0 :: non_neg_integer(),
-         link_credit_unsettled = 0 :: non_neg_integer(),
          available = 0 :: non_neg_integer(),
          drain = false :: boolean(),
          partial_transfers :: undefined | {#'v1_0.transfer'{}, [binary()]},
-         auto_flow :: never | {auto, non_neg_integer(), non_neg_integer()}
-         }).
+         auto_flow :: never | {auto, RenewWhenBelow :: pos_integer(), Credit :: pos_integer()},
+         incoming_unsettled = #{} :: #{delivery_number() => ok},
+         footer_opt :: footer_opt() | undefined
+        }).
 
 -record(state,
         {channel :: pos_integer(),
          remote_channel :: pos_integer() | undefined,
-         next_incoming_id = 0 :: transfer_id(),
+
+         %% session flow control, see section 2.5.6
+         next_incoming_id :: transfer_number() | undefined,
          incoming_window = ?MAX_SESSION_WINDOW_SIZE :: non_neg_integer(),
-         next_outgoing_id = ?INITIAL_OUTGOING_ID + 1 :: transfer_id(),
-         outgoing_window = ?MAX_SESSION_WINDOW_SIZE  :: non_neg_integer(),
+         next_outgoing_id = ?INITIAL_OUTGOING_TRANSFER_ID :: transfer_number(),
          remote_incoming_window = 0 :: non_neg_integer(),
          remote_outgoing_window = 0 :: non_neg_integer(),
+
          reader :: pid(),
          socket :: amqp10_client_connection:amqp10_socket() | undefined,
-         links = #{} :: #{link_handle() => #link{}},
-         % maps link name to outgoing link handle
-         link_index = #{} :: #{link_name() => link_handle()},
-         % maps incoming handle to outgoing
-         link_handle_index = #{} :: #{link_handle() => link_handle()},
-         next_link_handle = 0 :: link_handle(),
-         early_attach_requests = [] :: [term()],
-         connection_config = #{} :: amqp10_client_connection:connection_config(),
-         % the unsettled map needs to go in the session state as a disposition
-         % can reference transfers for many different links
-         unsettled = #{} :: #{transfer_id() => {amqp10_msg:delivery_tag(),
-                                                any()}}, %TODO: refine as FsmRef
-         incoming_unsettled = #{} :: #{transfer_id() => link_handle()},
+         links = #{} :: #{output_handle() => #link{}},
+         link_index = #{} :: #{{link_role(), link_name()} => output_handle()},
+         link_handle_index = #{} :: #{input_handle() => output_handle()},
+         next_link_handle = 0 :: output_handle(),
+         early_attach_requests :: [term()],
+         connection_config :: amqp10_client_connection:connection_config(),
+         outgoing_delivery_id = ?INITIAL_OUTGOING_DELIVERY_ID :: delivery_number(),
+         outgoing_unsettled = #{} :: #{delivery_number() => {amqp10_msg:delivery_tag(), Notify :: pid()}},
          notify :: pid()
         }).
 
-
 %% -------------------------------------------------------------------
 %% Public API.
 %% -------------------------------------------------------------------
@@ -156,7 +174,7 @@
 
 -spec begin_sync(pid()) -> supervisor:startchild_ret().
 begin_sync(Connection) ->
-    begin_sync(Connection, ?DEFAULT_TIMEOUT).
+    begin_sync(Connection, ?TIMEOUT).
 
 -spec begin_sync(pid(), non_neg_integer()) ->
     supervisor:startchild_ret() | session_timeout.
@@ -173,28 +191,31 @@ begin_sync(Connection, Timeout) ->
 
 -spec attach(pid(), attach_args()) -> {ok, link_ref()}.
 attach(Session, Args) ->
-    gen_statem:call(Session, {attach, Args}, {dirty_timeout, ?TIMEOUT}).
+    gen_statem:call(Session, {attach, Args}, ?TIMEOUT).
 
--spec detach(pid(), link_handle()) -> ok | {error, link_not_found | half_attached}.
+-spec detach(pid(), output_handle()) -> ok | {error, link_not_found | half_attached}.
 detach(Session, Handle) ->
-    gen_statem:call(Session, {detach, Handle}, {dirty_timeout, ?TIMEOUT}).
+    gen_statem:call(Session, {detach, Handle}, ?TIMEOUT).
 
 -spec transfer(pid(), amqp10_msg:amqp10_msg(), timeout()) ->
-    ok | {error, insufficient_credit | link_not_found | half_attached}.
+    ok | transfer_error().
 transfer(Session, Amqp10Msg, Timeout) ->
-    [Transfer | Records] = amqp10_msg:to_amqp_records(Amqp10Msg),
-    gen_statem:call(Session, {transfer, Transfer, Records},
-                    {dirty_timeout, Timeout}).
+    [Transfer | Sections] = amqp10_msg:to_amqp_records(Amqp10Msg),
+    gen_statem:call(Session, {transfer, Transfer, Sections}, Timeout).
 
-flow(Session, Handle, Flow, RenewAfter) ->
-    gen_statem:cast(Session, {flow, Handle, Flow, RenewAfter}).
+flow(Session, Handle, Flow, RenewWhenBelow) ->
+    gen_statem:cast(Session, {flow_link, Handle, Flow, RenewWhenBelow}).
 
--spec disposition(pid(), link_role(), transfer_id(), transfer_id(), boolean(),
+%% Sending a disposition on a sender link (with receiver-settle-mode = second)
+%% is currently unsupported.
+-spec disposition(link_ref(), delivery_number(), delivery_number(), boolean(),
                   amqp10_client_types:delivery_state()) -> ok.
-disposition(Session, Role, First, Last, Settled, DeliveryState) ->
-    gen_statem:call(Session, {disposition, Role, First, Last, Settled,
-                              DeliveryState}, {dirty_timeout, ?TIMEOUT}).
-
+disposition(#link_ref{role = receiver,
+                      session = Session,
+                      link_handle = Handle},
+            First, Last, Settled, DeliveryState) ->
+    gen_statem:call(Session, {disposition, Handle, First, Last, Settled,
+                              DeliveryState}, ?TIMEOUT).
 
 
 %% -------------------------------------------------------------------
@@ -217,8 +238,11 @@ callback_mode() -> [state_functions].
 init([FromPid, Channel, Reader, ConnConfig]) ->
     process_flag(trap_exit, true),
     amqp10_client_frame_reader:register_session(Reader, self(), Channel),
-    State = #state{notify = FromPid, channel = Channel, reader = Reader,
-                   connection_config = ConnConfig},
+    State = #state{notify = FromPid,
+                   channel = Channel,
+                   reader = Reader,
+                   connection_config = ConnConfig,
+                   early_attach_requests = []},
     {ok, unmapped, State}.
 
 unmapped(cast, {socket_ready, Socket}, State) ->
@@ -258,56 +282,75 @@ mapped(cast, 'end', State) ->
     %% We send the first end frame and wait for the reply.
     send_end(State),
     {next_state, end_sent, State};
-mapped(cast, {flow, OutHandle, Flow0, RenewAfter}, State0) ->
-    State = send_flow(fun send/2, OutHandle, Flow0, RenewAfter, State0),
-    {next_state, mapped, State};
+mapped(cast, {flow_link, OutHandle, Flow0, RenewWhenBelow}, State0) ->
+    State = send_flow_link(OutHandle, Flow0, RenewWhenBelow, State0),
+    {keep_state, State};
+mapped(cast, {flow_session, Flow0 = #'v1_0.flow'{incoming_window = {uint, IncomingWindow}}},
+       #state{next_incoming_id = NII,
+              next_outgoing_id = NOI} = State) ->
+    Flow = Flow0#'v1_0.flow'{
+                   next_incoming_id = maybe_uint(NII),
+                   next_outgoing_id = uint(NOI),
+                   outgoing_window = ?UINT_OUTGOING_WINDOW},
+    ok = send(Flow, State),
+    {keep_state, State#state{incoming_window = IncomingWindow}};
 mapped(cast, #'v1_0.end'{error = Err}, State) ->
     %% We receive the first end frame, reply and terminate.
     _ = send_end(State),
     % TODO: send notifications for links?
-    Reason = case Err of
-                 undefined -> normal;
-                 _ -> Err
-             end,
+    Reason = reason(Err),
     ok = notify_session_ended(State, Reason),
     {stop, normal, State};
 mapped(cast, #'v1_0.attach'{name = {utf8, Name},
                             initial_delivery_count = IDC,
-                            handle = {uint, InHandle}},
+                            handle = {uint, InHandle},
+                            role = PeerRoleBool,
+                            max_message_size = MaybeMaxMessageSize},
        #state{links = Links, link_index = LinkIndex,
               link_handle_index = LHI} = State0) ->
 
-    #{Name := OutHandle} = LinkIndex,
+    OurRoleBool = not PeerRoleBool,
+    OurRole = boolean_to_role(OurRoleBool),
+    LinkIndexKey = {OurRole, Name},
+    #{LinkIndexKey := OutHandle} = LinkIndex,
     #{OutHandle := Link0} = Links,
     ok = notify_link_attached(Link0),
 
-    DeliveryCount = case Link0 of
-                        #link{role = sender, delivery_count = DC} -> DC;
-                        _ -> unpack(IDC)
-                    end,
-    Link = Link0#link{input_handle = InHandle, state = attached,
-                      delivery_count = DeliveryCount},
-    State = State0#state{links = Links#{OutHandle => Link},
-                         link_index = maps:remove(Name, LinkIndex),
+    {DeliveryCount, MaxMessageSize} =
+    case Link0 of
+        #link{role = sender = OurRole,
+              delivery_count = DC} ->
+            MSS = case MaybeMaxMessageSize of
+                      {ulong, S} when S > 0 -> S;
+                      _ -> undefined
+                  end,
+            {DC, MSS};
+        #link{role = receiver = OurRole,
+              max_message_size = MSS} ->
+            {unpack(IDC), MSS}
+    end,
+    Link = Link0#link{state = attached,
+                      input_handle = InHandle,
+                      delivery_count = DeliveryCount,
+                      max_message_size = MaxMessageSize},
+    State = State0#state{links = Links#{OutHandle := Link},
+                         link_index = maps:remove(LinkIndexKey, LinkIndex),
                          link_handle_index = LHI#{InHandle => OutHandle}},
-    {next_state, mapped, State};
+    {keep_state, State};
 mapped(cast, #'v1_0.detach'{handle = {uint, InHandle},
                             error = Err},
-        #state{links = Links, link_handle_index = LHI} = State0) ->
+       #state{links = Links, link_handle_index = LHI} = State0) ->
     with_link(InHandle, State0,
               fun (#link{output_handle = OutHandle} = Link, State) ->
-                      Reason = case Err of
-                                   undefined -> normal;
-                                   Err -> Err
-                               end,
+                      Reason = reason(Err),
                       ok = notify_link_detached(Link, Reason),
-                      {next_state, mapped,
+                      {keep_state,
                        State#state{links = maps:remove(OutHandle, Links),
                                    link_handle_index = maps:remove(InHandle, LHI)}}
               end);
 mapped(cast, #'v1_0.flow'{handle = undefined} = Flow, State0) ->
     State = handle_session_flow(Flow, State0),
-    {next_state, mapped, State};
+    {keep_state, State};
 mapped(cast, #'v1_0.flow'{handle = {uint, InHandle}} = Flow,
        #state{links = Links} = State0) ->
 
@@ -319,12 +362,12 @@ mapped(cast, #'v1_0.flow'{handle = {uint, InHandle}} = Flow,
      % TODO: handle `send_flow` return tag
     {ok, Link} = handle_link_flow(Flow, Link0),
     ok = maybe_notify_link_credit(Link0, Link),
-    Links1 = Links#{OutHandle => Link},
+    Links1 = Links#{OutHandle := Link},
     State1 = State#state{links = Links1},
-    {next_state, mapped, State1};
+    {keep_state, State1};
 mapped(cast, {#'v1_0.transfer'{handle = {uint, InHandle},
-                         more = true} = Transfer, Payload},
-                         #state{links = Links} = State0) ->
+                               more = true} = Transfer, Payload},
+       #state{links = Links} = State0) ->
 
     {ok, #link{output_handle = OutHandle} = Link} =
         find_link_by_input_handle(InHandle, State0),
@@ -333,52 +376,64 @@ mapped(cast, {#'v1_0.transfer'{handle = {uint, InHandle},
 
     State = book_partial_transfer_received(
               State0#state{links = Links#{OutHandle => Link1}}),
-    {next_state, mapped, State};
-mapped(cast, {#'v1_0.transfer'{handle = {uint, InHandle},
-                         delivery_id = MaybeDeliveryId,
-                         settled = Settled} = Transfer0, Payload0},
-                         #state{incoming_unsettled = Unsettled0} = State0) ->
-
+    {keep_state, State};
+mapped(cast, {Transfer0 = #'v1_0.transfer'{handle = {uint, InHandle}},
+              Payload0}, State0) ->
     {ok, #link{target = {pid, TargetPid},
-               output_handle = OutHandle,
-               ref = LinkRef} = Link0} =
-        find_link_by_input_handle(InHandle, State0),
-
-    {Transfer, Payload, Link} = complete_partial_transfer(Transfer0, Payload0, Link0),
-    Msg = decode_as_msg(Transfer, Payload),
-
-    % stash the DeliveryId - not sure for what yet
-    Unsettled = case MaybeDeliveryId of
-                    {uint, DeliveryId} when Settled =/= true ->
-                        Unsettled0#{DeliveryId => OutHandle};
-                    _ ->
-                        Unsettled0
-                end,
-
-    % link bookkeeping
-    % notify when credit is exhausted (link_credit = 0)
-    % detach the Link with a transfer-limit-exceeded error code if further
-    % transfers are received
-    case book_transfer_received(Settled,
-                                State0#state{incoming_unsettled = Unsettled},
-                                Link) of
-        {ok, State} ->
-            % deliver
-            TargetPid ! {amqp10_msg, LinkRef, Msg},
-            State1 = auto_flow(Link, State),
-            {next_state, mapped, State1};
-        {credit_exhausted, State} ->
-            TargetPid ! {amqp10_msg, LinkRef, Msg},
-            ok = notify_link(Link, credit_exhausted),
-            {next_state, mapped, State};
-        {transfer_limit_exceeded, State} ->
-            logger:warning("transfer_limit_exceeded for link ~tp", [Link]),
-            Link1 = detach_with_error_cond(Link, State,
-                                           ?V_1_0_LINK_ERROR_TRANSFER_LIMIT_EXCEEDED),
-            {next_state, mapped, update_link(Link1, State)}
+               ref = LinkRef,
+               incoming_unsettled = Unsettled,
+               footer_opt = FooterOpt
+              } = Link0} = find_link_by_input_handle(InHandle, State0),
+
+    {Transfer = #'v1_0.transfer'{settled = Settled,
+                                 delivery_id = {uint, DeliveryId}},
+     Payload, Link1} = complete_partial_transfer(Transfer0, Payload0, Link0),
+
+    Link2 = case Settled of
+                true ->
+                    Link1;
+                _ ->
+                    %% "If not set on the first (or only) transfer for a (multi-transfer) delivery,
+                    %% then the settled flag MUST be interpreted as being false." [2.7.5]
+                    Link1#link{incoming_unsettled = Unsettled#{DeliveryId => ok}}
+            end,
+    case decode_as_msg(Transfer, Payload, FooterOpt) of
+        {ok, Msg} ->
+            % link bookkeeping
+            % notify when credit is exhausted (link_credit = 0)
+            % detach the Link with a transfer-limit-exceeded error code if further
+            % transfers are received
+            case book_transfer_received(State0, Link2) of
+                {ok, Link3, State1} ->
+                    % deliver
+                    TargetPid ! {amqp10_msg, LinkRef, Msg},
+                    State = auto_flow(Link3, State1),
+                    {keep_state, State};
+                {credit_exhausted, Link3, State} ->
+                    TargetPid ! {amqp10_msg, LinkRef, Msg},
+                    notify_credit_exhausted(Link3),
+                    {keep_state, State};
+                {transfer_limit_exceeded, Link3, State} ->
+                    logger:warning("transfer_limit_exceeded for link ~tp", [Link3]),
+                    Link = detach_with_error_cond(Link3,
+                                                  State,
+                                                  ?V_1_0_LINK_ERROR_TRANSFER_LIMIT_EXCEEDED,
+                                                  undefined),
+                    {keep_state, update_link(Link, State)}
+            end;
+        {checksum_error, Expected, Actual} ->
+            Description = lists:flatten(
+                            io_lib:format(
+                              "~s checksum error: expected ~b, actual ~b",
+                              [FooterOpt, Expected, Actual])),
+            logger:warning("deteaching link ~tp due to ~s", [Link2, Description]),
+            Link = detach_with_error_cond(Link2,
+                                          State0,
+                                          ?V_1_0_AMQP_ERROR_DECODE_ERROR,
+                                          {utf8, unicode:characters_to_binary(Description)}),
+            {keep_state, update_link(Link, State0)}
     end;
 
-
 % role=true indicates the disposition is from a `receiver`. i.e. from the
 % clients point of view these are dispositions relating to `sender`links
 mapped(cast, #'v1_0.disposition'{role = true,
@@ -386,126 +441,127 @@ mapped(cast, #'v1_0.disposition'{role = true,
                                  first = {uint, First},
                                  last = Last0,
                                  state = DeliveryState},
-       #state{unsettled = Unsettled0} = State) ->
+       #state{outgoing_unsettled = Unsettled0} = State) ->
     Last = case Last0 of
                undefined -> First;
                {uint, L} -> L
            end,
     % TODO: no good if the range becomes very large!! refactor
-    Unsettled =
-        lists:foldl(fun(Id, Acc) ->
-                            case Acc of
-                                #{Id := {DeliveryTag, Receiver}} ->
-                                    %% TODO: currently all modified delivery states
-                                    %% will be translated to the old, `modified` atom.
-                                    %% At some point we should translate into the
-                                    %% full {modified, bool, bool, map) tuple.
-                                    S = translate_delivery_state(DeliveryState),
-                                    ok = notify_disposition(Receiver,
-                                                            {S, DeliveryTag}),
-                                    maps:remove(Id, Acc);
-                                _ -> Acc
-                            end
-                    end, Unsettled0, lists:seq(First, Last)),
-
-    {next_state, mapped, State#state{unsettled = Unsettled}};
+    Unsettled = serial_number:foldl(
+                  fun(Id, Acc0) ->
+                          case maps:take(Id, Acc0) of
+                              {{DeliveryTag, Pid}, Acc} ->
+                                  %% TODO: currently all modified delivery states
+                                  %% will be translated to the old, `modified` atom.
+                                  %% At some point we should translate into the
+                                  %% full {modified, bool, bool, map) tuple.
+                                  S = translate_delivery_state(DeliveryState),
+                                  ok = notify_disposition(Pid, {S, DeliveryTag}),
+                                  Acc;
+                              error ->
+                                  Acc0
+                          end
+                  end, Unsettled0, First, Last),
+
+    {keep_state, State#state{outgoing_unsettled = Unsettled}};
 mapped(cast, Frame, State) ->
     logger:warning("Unhandled session frame ~tp in state ~tp",
                              [Frame, State]),
-    {next_state, mapped, State};
+    {keep_state, State};
 mapped({call, From},
+       {transfer, _Transfer, _Sections},
+       #state{remote_incoming_window = Window})
+  when Window =< 0 ->
+    {keep_state_and_data, {reply, From, {error, remote_incoming_window_exceeded}}};
+mapped({call, From},
+       {transfer, _Transfer, _Sections},
+       #state{remote_incoming_window = Window})
+  when Window =< 0 ->
+    {keep_state_and_data, {reply, From, {error, remote_incoming_window_exceeded}}};
+mapped({call, From = {Pid, _}},
        {transfer, #'v1_0.transfer'{handle = {uint, OutHandle},
                                    delivery_tag = {binary, DeliveryTag},
-                                   settled = false} = Transfer0, Parts},
-       #state{next_outgoing_id = NOI, links = Links,
-              unsettled = Unsettled} = State) ->
+                                   settled = false} = Transfer0, Sections},
+       #state{outgoing_delivery_id = DeliveryId, links = Links,
+              outgoing_unsettled = Unsettled} = State) ->
     case Links of
         #{OutHandle := #link{input_handle = undefined}} ->
-            {keep_state, State, [{reply, From, {error, half_attached}}]};
+            {keep_state_and_data, {reply, From, {error, half_attached}}};
         #{OutHandle := #link{link_credit = LC}} when LC =< 0 ->
-            {keep_state, State, [{reply, From, {error, insufficient_credit}}]};
-        #{OutHandle := Link} ->
-            Transfer = Transfer0#'v1_0.transfer'{delivery_id = uint(NOI),
-                                                 resume = false},
-            {ok, NumFrames} = send_transfer(Transfer, Parts, State),
-            State1 = State#state{unsettled = Unsettled#{NOI => {DeliveryTag, From}}},
-            {keep_state, book_transfer_send(NumFrames, Link, State1),
-             [{reply, From, ok}]};
+            {keep_state_and_data, {reply, From, {error, insufficient_credit}}};
+        #{OutHandle := Link = #link{max_message_size = MaxMessageSize,
+                                    footer_opt = FooterOpt}} ->
+            Transfer = Transfer0#'v1_0.transfer'{delivery_id = uint(DeliveryId)},
+            case send_transfer(Transfer, Sections, FooterOpt, MaxMessageSize, State) of
+                {ok, NumFrames} ->
+                    State1 = State#state{outgoing_unsettled = Unsettled#{DeliveryId => {DeliveryTag, Pid}}},
+                    {keep_state, book_transfer_send(NumFrames, Link, State1), {reply, From, ok}};
+                Error ->
+                    {keep_state_and_data, {reply, From, Error}}
+            end;
         _ ->
-            {keep_state, State, [{reply, From, {error, link_not_found}}]}
+            {keep_state_and_data, {reply, From, {error, link_not_found}}}
 
     end;
 mapped({call, From},
        {transfer, #'v1_0.transfer'{handle = {uint, OutHandle}} = Transfer0,
-        Parts}, #state{next_outgoing_id = NOI,
+        Sections}, #state{outgoing_delivery_id = DeliveryId,
                        links = Links} = State) ->
     case Links of
         #{OutHandle := #link{input_handle = undefined}} ->
-            {keep_state_and_data, [{reply, From, {error, half_attached}}]};
+            {keep_state_and_data, {reply, From, {error, half_attached}}};
         #{OutHandle := #link{link_credit = LC}} when LC =< 0 ->
-            {keep_state_and_data, [{reply, From, {error, insufficient_credit}}]};
-        #{OutHandle := Link} ->
-            Transfer = Transfer0#'v1_0.transfer'{delivery_id = uint(NOI)},
-            {ok, NumFrames} = send_transfer(Transfer, Parts, State),
-            % TODO look into if erlang will correctly wrap integers during
-            % binary conversion.
-            {keep_state, book_transfer_send(NumFrames, Link, State),
-             [{reply, From, ok}]};
+            {keep_state_and_data, {reply, From, {error, insufficient_credit}}};
+        #{OutHandle := Link = #link{max_message_size = MaxMessageSize,
+                                    footer_opt = FooterOpt}} ->
+            Transfer = Transfer0#'v1_0.transfer'{delivery_id = uint(DeliveryId)},
+            case send_transfer(Transfer, Sections, FooterOpt, MaxMessageSize, State) of
+                {ok, NumFrames} ->
+                    {keep_state, book_transfer_send(NumFrames, Link, State), {reply, From, ok}};
+                Error ->
+                    {keep_state_and_data, {reply, From, Error}}
+            end;
         _ ->
-            {keep_state, [{reply, From, {error, link_not_found}}]}
+            {keep_state_and_data, {reply, From, {error, link_not_found}}}
     end;
 
 mapped({call, From},
-       {disposition, Role, First, Last, Settled0, DeliveryState},
-       #state{incoming_unsettled = Unsettled0,
-              links = Links0} = State0) ->
-    Disposition =
-    begin
-        DS = translate_delivery_state(DeliveryState),
-        #'v1_0.disposition'{role = translate_role(Role),
-                            first = {uint, First},
-                            last = {uint, Last},
-                            settled = Settled0,
-                            state = DS}
-    end,
-
-    Ks = lists:seq(First, Last),
-    Settled = maps:values(maps:with(Ks, Unsettled0)),
-    Links = lists:foldl(fun (H, Acc) ->
-                                #{H := #link{link_credit_unsettled = LCU} = L} = Acc,
-                                Acc#{H => L#link{link_credit_unsettled = LCU-1}}
-                        end, Links0, Settled),
-    Unsettled = maps:without(Ks, Unsettled0),
-    State = lists:foldl(fun(H, S) ->
-                                #{H := L} = Links,
-                                auto_flow(L, S)
-                        end,
-                        State0#state{incoming_unsettled = Unsettled,
-                                     links = Links},
-                        lists:usort(Settled)),
-
+       {disposition, OutputHandle, First, Last, Settled0, DeliveryState},
+       #state{links = Links} = State0) ->
+    #{OutputHandle := Link0 = #link{incoming_unsettled = Unsettled0}} = Links,
+    Unsettled = serial_number:foldl(fun maps:remove/2, Unsettled0, First, Last),
+    Link = Link0#link{incoming_unsettled = Unsettled},
+    State1 = State0#state{links = Links#{OutputHandle := Link}},
+    State = auto_flow(Link, State1),
+    Disposition = #'v1_0.disposition'{
+                     role = translate_role(receiver),
+                     first = {uint, First},
+                     last = {uint, Last},
+                     settled = Settled0,
+                     state = translate_delivery_state(DeliveryState)},
     Res = send(Disposition, State),
-
-    {keep_state, State, [{reply, From, Res}]};
+    {keep_state, State, {reply, From, Res}};
 
 mapped({call, From}, {attach, Attach}, State) ->
     {State1, LinkRef} = send_attach(fun send/2, Attach, From, State),
-    {keep_state, State1, [{reply, From, {ok, LinkRef}}]};
+    {keep_state, State1, {reply, From, {ok, LinkRef}}};
 
 mapped({call, From}, Msg, State) ->
     {Reply, State1} = send_detach(fun send/2, Msg, From, State),
-    {keep_state, State1, [{reply, From, Reply}]};
+    {keep_state, State1, {reply, From, Reply}};
 
 mapped(_EvtType, Msg, _State) ->
     logger:warning("amqp10_session: unhandled msg in mapped state ~W",
-                          [Msg, 10]),
+                   [Msg, 10]),
     keep_state_and_data.
 
-end_sent(_EvtType, #'v1_0.end'{}, State) ->
+end_sent(_EvtType, #'v1_0.end'{error = Err}, State) ->
+    Reason = reason(Err),
+    ok = notify_session_ended(State, Reason),
     {stop, normal, State};
-end_sent(_EvtType, _Frame, State) ->
+end_sent(_EvtType, _Frame, _State) ->
     % just drop frames here
-    {next_state, end_sent, State}.
+    keep_state_and_data.
 
 terminate(Reason, _StateName, #state{channel = Channel,
                                      remote_channel = RemoteChannel,
@@ -526,11 +582,10 @@ code_change(_OldVsn, StateName, State, _Extra) ->
 
 send_begin(#state{socket = Socket,
                   next_outgoing_id = NextOutId,
-                  incoming_window = InWin,
-                  outgoing_window = OutWin} = State) ->
+                  incoming_window = InWin} = State) ->
     Begin = #'v1_0.begin'{next_outgoing_id = uint(NextOutId),
                           incoming_window = uint(InWin),
-                          outgoing_window = uint(OutWin) },
+                          outgoing_window = ?UINT_OUTGOING_WINDOW},
     Frame = encode_frame(Begin, State),
     socket_send(Socket, Frame).
 
@@ -551,57 +606,95 @@ send(Record, #state{socket = Socket} = State) ->
     Frame = encode_frame(Record, State),
     socket_send(Socket, Frame).
 
-send_transfer(Transfer0, Parts0, #state{socket = Socket, channel = Channel,
-                                        connection_config = Config}) ->
-    OutMaxFrameSize = case Config of
-                          #{outgoing_max_frame_size := undefined} ->
-                              ?MAX_MAX_FRAME_SIZE;
-                          #{outgoing_max_frame_size := Sz} -> Sz;
-                          _ -> ?MAX_MAX_FRAME_SIZE
-                      end,
+send_transfer(Transfer0, Sections0, FooterOpt, MaxMessageSize,
+              #state{socket = Socket,
+                     channel = Channel,
+                     connection_config = Config}) ->
+    OutMaxFrameSize = maps:get(outgoing_max_frame_size, Config),
     Transfer = amqp10_framing:encode_bin(Transfer0),
-    TSize = iolist_size(Transfer),
-    Parts = [amqp10_framing:encode_bin(P) || P <- Parts0],
-    PartsBin = iolist_to_binary(Parts),
-
-    % TODO: this does not take the extended header into account
-    % see: 2.3
-    MaxPayloadSize = OutMaxFrameSize - TSize - ?FRAME_HEADER_SIZE,
-
-    Frames = build_frames(Channel, Transfer0, PartsBin, MaxPayloadSize, []),
-    ok = socket_send(Socket, Frames),
-    {ok, length(Frames)}.
-
-send_flow(Send, OutHandle,
-          #'v1_0.flow'{link_credit = {uint, Credit}} = Flow0, RenewAfter,
-          #state{links = Links,
-                 next_incoming_id = NII,
-                 next_outgoing_id = NOI,
-                 outgoing_window = OutWin,
-                 incoming_window = InWin} = State) ->
-    AutoFlow = case RenewAfter of
+    TransferSize = iolist_size(Transfer),
+    Sections = encode_sections(Sections0, FooterOpt),
+    SectionsBin = iolist_to_binary(Sections),
+    if is_integer(MaxMessageSize) andalso
+       MaxMessageSize > 0 andalso
+       byte_size(SectionsBin) > MaxMessageSize ->
+           {error, message_size_exceeded};
+       true ->
+           % TODO: this does not take the extended header into account
+           % see: 2.3
+           MaxPayloadSize = OutMaxFrameSize - TransferSize - ?FRAME_HEADER_SIZE,
+           Frames = build_frames(Channel, Transfer0, SectionsBin, MaxPayloadSize, []),
+           ok = socket_send(Socket, Frames),
+           {ok, length(Frames)}
+    end.
+
+encode_sections(Sections, undefined) ->
+    [amqp10_framing:encode_bin(S) || S <- Sections];
+encode_sections(Sections, FooterOpt) ->
+    {Bare, NoBare} = lists:partition(fun is_bare_message_section/1, Sections),
+    {FooterL, PreBare} = lists:partition(fun(#'v1_0.footer'{}) ->
+                                                 true;
+                                            (_) ->
+                                                 false
+                                         end, NoBare),
+    PreBareEncoded = [amqp10_framing:encode_bin(S) || S <- PreBare],
+    BareEncoded = [amqp10_framing:encode_bin(S) || S <- Bare],
+    {Key, Checksum} = case FooterOpt of
+                          crc32 ->
+                              {<<"x-opt-crc-32">>, erlang:crc32(BareEncoded)};
+                          adler32 ->
+                              {<<"x-opt-adler-32">>, erlang:adler32(BareEncoded)}
+                      end,
+    Ann = {{symbol, Key}, {uint, Checksum}},
+    Footer = case FooterL of
+                 [] ->
+                     #'v1_0.footer'{content = [Ann]};
+                 [F = #'v1_0.footer'{content = Content}] ->
+                     F#'v1_0.footer'{content = [Ann | Content]}
+             end,
+    FooterEncoded = amqp10_framing:encode_bin(Footer),
+    [PreBareEncoded, BareEncoded, FooterEncoded].
+
+is_bare_message_section(#'v1_0.header'{}) ->
+    false;
+is_bare_message_section(#'v1_0.delivery_annotations'{}) ->
+    false;
+is_bare_message_section(#'v1_0.message_annotations'{}) ->
+    false;
+is_bare_message_section(#'v1_0.footer'{}) ->
+    false;
+is_bare_message_section(_Section) ->
+    true.
+
+send_flow_link(OutHandle,
+               #'v1_0.flow'{link_credit = {uint, Credit}} = Flow0, RenewWhenBelow,
+               #state{links = Links,
+                      next_incoming_id = NII,
+                      next_outgoing_id = NOI,
+                      incoming_window = InWin} = State) ->
+    AutoFlow = case RenewWhenBelow of
                    never -> never;
                    Limit -> {auto, Limit, Credit}
                end,
     #{OutHandle := #link{output_handle = H,
                          role = receiver,
                          delivery_count = DeliveryCount,
-                         available = Available,
-                         link_credit_unsettled = LCU} = Link} = Links,
-    Flow = Flow0#'v1_0.flow'{handle = uint(H),
-                             link_credit = uint(Credit),
-                             next_incoming_id = uint(NII),
-                             next_outgoing_id = uint(NOI),
-                             outgoing_window = uint(OutWin),
-                             incoming_window = uint(InWin),
-                             delivery_count = uint(DeliveryCount),
-                             available = uint(Available)},
-    ok = Send(Flow, State),
+                         available = Available} = Link} = Links,
+    Flow = Flow0#'v1_0.flow'{
+                   handle = uint(H),
+                   %% "This value MUST be set if the peer has received the begin
+                   %% frame for the session, and MUST NOT be set if it has not." [2.7.4]
+                   next_incoming_id = maybe_uint(NII),
+                   next_outgoing_id = uint(NOI),
+                   outgoing_window = ?UINT_OUTGOING_WINDOW,
+                   incoming_window = uint(InWin),
+                   %% "In the event that the receiving link endpoint has not yet seen the
+                   %% initial attach frame from the sender this field MUST NOT be set." [2.7.4]
+                   delivery_count = maybe_uint(DeliveryCount),
+                   available = uint(Available)},
+    ok = send(Flow, State),
     State#state{links = Links#{OutHandle =>
                                Link#link{link_credit = Credit,
-                                         % need to add on the current LCU
-                                         % to ensure we don't overcredit
-                                         link_credit_unsettled = LCU + Credit,
                                          auto_flow = AutoFlow}}}.
 
 build_frames(Channel, Trf, Bin, MaxPayloadSize, Acc)
@@ -617,8 +710,8 @@ build_frames(Channel, Trf, Payload, MaxPayloadSize, Acc) ->
 
 make_source(#{role := {sender, _}}) ->
     #'v1_0.source'{};
-make_source(#{role := {receiver, #{address := Address} = Target, _Pid}, filter := Filter}) ->
-    Durable = translate_terminus_durability(maps:get(durable, Target, none)),
+make_source(#{role := {receiver, #{address := Address} = Source, _Pid}, filter := Filter}) ->
+    Durable = translate_terminus_durability(maps:get(durable, Source, none)),
     TranslatedFilter = translate_filters(Filter),
     #'v1_0.source'{address = {utf8, Address},
                    durable = {uint, Durable},
@@ -628,22 +721,20 @@ make_target(#{role := {receiver, _Source, _Pid}}) ->
     #'v1_0.target'{};
 make_target(#{role := {sender, #{address := Address} = Target}}) ->
     Durable = translate_terminus_durability(maps:get(durable, Target, none)),
-    #'v1_0.target'{address = {utf8, Address},
+    TargetAddr = case is_binary(Address) of
+                     true -> {utf8, Address};
+                     false -> Address
+                 end,
+    #'v1_0.target'{address = TargetAddr,
                    durable = {uint, Durable}}.
 
-make_properties(#{properties := Properties}) ->
-    translate_properties(Properties);
-make_properties(_) ->
+max_message_size(#{max_message_size := Size})
+  when is_integer(Size) andalso
+       Size > 0 ->
+    {ulong, Size};
+max_message_size(_) ->
     undefined.
 
-translate_properties(Properties) when is_map(Properties) andalso map_size(Properties) =< 0 ->
-    undefined;
-translate_properties(Properties) when is_map(Properties) ->
-    {map, maps:fold(fun translate_property/3, [], Properties)}.
-
-translate_property(K, V, Acc) when is_tuple(V) ->
-    [{{symbol, K}, V} | Acc].
-
 translate_terminus_durability(none) -> 0;
 translate_terminus_durability(configuration) -> 1;
 translate_terminus_durability(unsettled_state) -> 2.
@@ -675,7 +766,7 @@ filter_value_type(V)
   when is_integer(V) andalso V >= 0 ->
     {uint, V};
 filter_value_type(VList) when is_list(VList) ->
-    [filter_value_type(V) || V <- VList];
+    {list, [filter_value_type(V) || V <- VList]};
 filter_value_type({T, _} = V) when is_atom(T) ->
     %% looks like an already tagged type, just pass it through
     V.
@@ -710,54 +801,69 @@ send_detach(Send, {detach, OutHandle}, _From, State = #state{links = Links}) ->
             {{error, link_not_found}, State}
     end.
 
-detach_with_error_cond(Link = #link{output_handle = OutHandle}, State, Cond) ->
-    Err = #'v1_0.error'{condition = Cond},
+detach_with_error_cond(Link = #link{output_handle = OutHandle}, State, Cond, Description) ->
+    Err = #'v1_0.error'{condition = Cond,
+                        description = Description},
     Detach = #'v1_0.detach'{handle = uint(OutHandle),
                             closed = true,
                             error = Err},
     ok = send(Detach, State),
     Link#link{state = detach_sent}.
 
-send_attach(Send, #{name := Name, role := Role} = Args, {FromPid, _},
-      #state{next_link_handle = OutHandle, links = Links,
+send_attach(Send, #{name := Name, role := RoleTuple} = Args, {FromPid, _},
+            #state{next_link_handle = OutHandle0, links = Links,
              link_index = LinkIndex} = State) ->
 
     Source = make_source(Args),
     Target = make_target(Args),
-    Properties = make_properties(Args),
-
-    {LinkTarget, RoleAsBool} = case Role of
-                                   {receiver, _, Pid} ->
-                                       {{pid, Pid}, true};
-                                   {sender, #{address := TargetAddr}} ->
-                                       {TargetAddr, false}
-                               end,
+    Properties = amqp10_client_types:make_properties(Args),
+
+    {LinkTarget, InitialDeliveryCount, MaxMessageSize} =
+    case RoleTuple of
+        {receiver, _, Pid} ->
+            {{pid, Pid}, undefined, max_message_size(Args)};
+        {sender, #{address := TargetAddr}} ->
+            {TargetAddr, uint(?INITIAL_DELIVERY_COUNT), undefined}
+    end,
 
+    {OutHandle, NextLinkHandle} = case Args of
+                                      #{handle := Handle} ->
+                                          %% Client app provided link handle.
+                                          %% Really only meant for integration tests.
+                                          {Handle, OutHandle0};
+                                      _ ->
+                                          {OutHandle0, OutHandle0 + 1}
+                                  end,
+    Role = element(1, RoleTuple),
     % create attach performative
     Attach = #'v1_0.attach'{name = {utf8, Name},
-                            role = RoleAsBool,
+                            role = role_to_boolean(Role),
                             handle = {uint, OutHandle},
                             source = Source,
                             properties = Properties,
-                            initial_delivery_count =
-                                {uint, ?INITIAL_DELIVERY_COUNT},
+                            initial_delivery_count = InitialDeliveryCount,
                             snd_settle_mode = snd_settle_mode(Args),
                             rcv_settle_mode = rcv_settle_mode(Args),
-                            target = Target},
+                            target = Target,
+                            max_message_size = MaxMessageSize},
     ok = Send(Attach, State),
 
+    Ref = make_link_ref(Role, self(), OutHandle),
     Link = #link{name = Name,
-                 ref = make_link_ref(element(1, Role), self(), OutHandle),
+                 ref = Ref,
                  output_handle = OutHandle,
                  state = attach_sent,
-                 role = element(1, Role),
+                 role = Role,
                  notify = FromPid,
                  auto_flow = never,
-                 target = LinkTarget},
+                 target = LinkTarget,
+                 delivery_count = unpack(InitialDeliveryCount),
+                 max_message_size = unpack(MaxMessageSize),
+                 footer_opt = maps:get(footer_opt, Args, undefined)},
 
     {State#state{links = Links#{OutHandle => Link},
-                 next_link_handle = OutHandle + 1,
-                 link_index = LinkIndex#{Name => OutHandle}}, Link#link.ref}.
+                 next_link_handle = NextLinkHandle,
+                 link_index = LinkIndex#{{Role, Name} => OutHandle}}, Ref}.
 
 -spec handle_session_flow(#'v1_0.flow'{}, #state{}) -> #state{}.
 handle_session_flow(#'v1_0.flow'{next_incoming_id = MaybeNII,
@@ -767,41 +873,53 @@ handle_session_flow(#'v1_0.flow'{next_incoming_id = MaybeNII,
        #state{next_outgoing_id = OurNOI} = State) ->
     NII = case MaybeNII of
               {uint, N} -> N;
-              undefined -> ?INITIAL_OUTGOING_ID + 1
+              undefined -> ?INITIAL_OUTGOING_TRANSFER_ID
           end,
+    RemoteIncomingWindow = diff(add(NII, InWin), OurNOI), % see: 2.5.6
     State#state{next_incoming_id = NOI,
-                remote_incoming_window = NII + InWin - OurNOI, % see: 2.5.6
+                remote_incoming_window = RemoteIncomingWindow,
                 remote_outgoing_window = OutWin}.
 
 
 -spec handle_link_flow(#'v1_0.flow'{}, #link{}) -> {ok | send_flow, #link{}}.
-handle_link_flow(#'v1_0.flow'{drain = true, link_credit = {uint, TheirCredit}},
+handle_link_flow(#'v1_0.flow'{drain = true,
+                              link_credit = {uint, TheirCredit}},
                  Link = #link{role = sender,
                               delivery_count = OurDC,
                               available = 0}) ->
     {send_flow, Link#link{link_credit = 0,
-                          delivery_count = OurDC + TheirCredit}};
+                          delivery_count = add(OurDC, TheirCredit)}};
 handle_link_flow(#'v1_0.flow'{delivery_count = MaybeTheirDC,
                               link_credit = {uint, TheirCredit}},
                  Link = #link{role = sender,
                               delivery_count = OurDC}) ->
     TheirDC = case MaybeTheirDC of
-                  undefined -> ?INITIAL_DELIVERY_COUNT;
-                  {uint, DC} -> DC
+                  {uint, DC} -> DC;
+                  undefined -> ?INITIAL_DELIVERY_COUNT
               end,
-    LinkCredit = TheirDC + TheirCredit - OurDC,
-
+    LinkCredit = amqp10_util:link_credit_snd(TheirDC, TheirCredit, OurDC),
     {ok, Link#link{link_credit = LinkCredit}};
 handle_link_flow(#'v1_0.flow'{delivery_count = TheirDC,
+                              link_credit = {uint, TheirCredit},
                               available = Available,
-                              drain = Drain},
-                 Link = #link{role = receiver}) ->
-
-    {ok, Link#link{delivery_count = unpack(TheirDC),
-                   available = unpack(Available),
-                   drain = Drain}}.
+                              drain = Drain0},
+                 Link0 = #link{role = receiver}) ->
+    Drain = default(Drain0, false),
+    Link = case Drain andalso TheirCredit =:= 0 of
+               true ->
+                   notify_credit_exhausted(Link0),
+                   Link0#link{delivery_count = unpack(TheirDC),
+                              link_credit = 0,
+                              available = unpack(Available),
+                              drain = Drain};
+               false ->
+                   Link0#link{delivery_count = unpack(TheirDC),
+                              available = unpack(Available),
+                              drain = Drain}
+           end,
+    {ok, Link}.
 
--spec find_link_by_input_handle(link_handle(), #state{}) ->
+-spec find_link_by_input_handle(input_handle(), #state{}) ->
     {ok, #link{}} | not_found.
 find_link_by_input_handle(InHandle, #state{link_handle_index = LHI,
                                            links = Links}) ->
@@ -825,8 +943,11 @@ with_link(InHandle, State, Fun) ->
             {next_state, end_sent, State}
     end.
 
+maybe_uint(undefined) -> undefined;
+maybe_uint(Int) -> uint(Int).
 
 uint(Int) -> {uint, Int}.
+
 unpack(X) -> amqp10_client_types:unpack(X).
 
 snd_settle_mode(#{snd_settle_mode := unsettled}) -> {ubyte, 0};
@@ -861,13 +982,14 @@ translate_delivery_state({modified,
 translate_delivery_state(released) -> #'v1_0.released'{};
 translate_delivery_state(received) -> #'v1_0.received'{}.
 
-translate_role(sender) -> false;
 translate_role(receiver) -> true.
 
-maybe_notify_link_credit(#link{link_credit = 0, role = sender},
-                         #link{link_credit = Credit} = Link)
-  when Credit > 0 ->
-    notify_link(Link, credited);
+maybe_notify_link_credit(#link{role = sender,
+                               link_credit = 0},
+                         #link{role = sender,
+                               link_credit = NewCredit} = NewLink)
+  when NewCredit > 0 ->
+    notify_link(NewLink, credited);
 maybe_notify_link_credit(_Old, _New) ->
     ok.
 
@@ -890,72 +1012,72 @@ notify_session_ended(#state{notify = Pid}, Reason) ->
     Pid ! amqp10_session_event({ended, Reason}),
     ok.
 
-notify_disposition({Pid, _}, SessionDeliveryTag) ->
-    Pid ! {amqp10_disposition, SessionDeliveryTag},
+notify_disposition(Pid, DeliveryStateDeliveryTag) ->
+    Pid ! {amqp10_disposition, DeliveryStateDeliveryTag},
     ok.
 
 book_transfer_send(Num, #link{output_handle = Handle} = Link,
-                   #state{next_outgoing_id = NOI,
+                   #state{next_outgoing_id = NextOutgoingId,
+                          outgoing_delivery_id = DeliveryId,
                           remote_incoming_window = RIW,
                           links = Links} = State) ->
-    State#state{next_outgoing_id = NOI+Num,
+    State#state{next_outgoing_id = add(NextOutgoingId, Num),
+                outgoing_delivery_id = add(DeliveryId, 1),
                 remote_incoming_window = RIW-Num,
-                links = Links#{Handle => incr_link_counters(Link)}}.
+                links = Links#{Handle => book_link_transfer_send(Link)}}.
 
 book_partial_transfer_received(#state{next_incoming_id = NID,
                                       remote_outgoing_window = ROW} = State) ->
-    State#state{next_incoming_id = NID+1,
-                remote_outgoing_window = ROW-1}.
+    State#state{next_incoming_id = add(NID, 1),
+                remote_outgoing_window = ROW - 1}.
 
-book_transfer_received(_Settled,
-                       State = #state{connection_config =
+book_transfer_received(State = #state{connection_config =
                                       #{transfer_limit_margin := Margin}},
-                       #link{link_credit = Margin}) ->
-    {transfer_limit_exceeded, State};
-book_transfer_received(Settled,
-                       #state{next_incoming_id = NID,
+                       #link{link_credit = Margin} = Link) ->
+    {transfer_limit_exceeded, Link, State};
+book_transfer_received(#state{next_incoming_id = NID,
                               remote_outgoing_window = ROW,
                               links = Links} = State,
                        #link{output_handle = OutHandle,
                              delivery_count = DC,
                              link_credit = LC,
-                             link_credit_unsettled = LCU0} = Link) ->
-    LCU = case Settled of
-              true -> LCU0-1;
-              _ -> LCU0
-          end,
-
-    Link1 = Link#link{delivery_count = DC+1,
-                      link_credit = LC-1,
-                      link_credit_unsettled = LCU},
+                             available = Avail} = Link) ->
+    Link1 = Link#link{delivery_count = add(DC, 1),
+                      link_credit = LC - 1,
+                      %% "the receiver MUST maintain a floor of zero in its
+                      %% calculation of the value of available" [2.6.7]
+                      available = max(0, Avail - 1)},
     State1 = State#state{links = Links#{OutHandle => Link1},
-                         next_incoming_id = NID+1,
-                         remote_outgoing_window = ROW-1},
+                         next_incoming_id = add(NID, 1),
+                         remote_outgoing_window = ROW - 1},
     case Link1 of
         #link{link_credit = 0,
-              % only notify of credit exhaustion when
-              % not using auto flow.
               auto_flow = never} ->
-            {credit_exhausted, State1};
-        _ -> {ok, State1}
+            {credit_exhausted, Link1, State1};
+        _ ->
+            {ok, Link1, State1}
     end.
 
-auto_flow(#link{link_credit_unsettled = LCU,
-                auto_flow = {auto, Limit, Credit},
-                output_handle = OutHandle}, State)
-  when LCU =< Limit ->
-    send_flow(fun send/2, OutHandle,
-              #'v1_0.flow'{link_credit = {uint, Credit}},
-              Limit, State);
-auto_flow(_Link, State) ->
+auto_flow(#link{link_credit = LC,
+                auto_flow = {auto, RenewWhenBelow, Credit},
+                output_handle = OutHandle,
+                incoming_unsettled = Unsettled},
+          State)
+  when LC + map_size(Unsettled) < RenewWhenBelow ->
+    send_flow_link(OutHandle,
+                   #'v1_0.flow'{link_credit = {uint, Credit}},
+                   RenewWhenBelow, State);
+auto_flow(_, State) ->
     State.
 
 update_link(Link = #link{output_handle = OutHandle},
             State = #state{links = Links}) ->
             State#state{links = Links#{OutHandle => Link}}.
 
-incr_link_counters(#link{link_credit = LC, delivery_count = DC} = Link) ->
-    Link#link{delivery_count = DC+1, link_credit = LC+1}.
+book_link_transfer_send(Link = #link{link_credit = LC,
+                                     delivery_count = DC}) ->
+    Link#link{link_credit = LC - 1,
+              delivery_count = add(DC, 1)}.
 
 append_partial_transfer(Transfer, Payload,
                         #link{partial_transfers = undefined} = Link) ->
@@ -972,9 +1094,54 @@ complete_partial_transfer(_Transfer, Payload,
     {T, iolist_to_binary(lists:reverse([Payload | Payloads])),
      Link#link{partial_transfers = undefined}}.
 
-decode_as_msg(Transfer, Payload) ->
-    Records = amqp10_framing:decode_bin(Payload),
-    amqp10_msg:from_amqp_records([Transfer | Records]).
+decode_as_msg(Transfer, Payload, undefined) ->
+    Sections = amqp10_framing:decode_bin(Payload),
+    {ok, amqp10_msg:from_amqp_records([Transfer | Sections])};
+decode_as_msg(Transfer, Payload, FooterOpt) ->
+    PosSections = decode_sections([], Payload, size(Payload), 0),
+    Sections = lists:map(fun({_Pos, S}) -> S end, PosSections),
+    Msg = amqp10_msg:from_amqp_records([Transfer | Sections]),
+    OkMsg = {ok, Msg},
+    case lists:last(PosSections) of
+        {PosFooter, #'v1_0.footer'{content = Content}}
+          when is_list(Content) ->
+            Key = case FooterOpt of
+                      crc32 -> <<"x-opt-crc-32">>;
+                      adler32 -> <<"x-opt-adler-32">>
+                  end,
+            case lists:search(fun({{symbol, K}, {uint, _Checksum}})
+                                    when K =:= Key ->
+                                      true;
+                                 (_) ->
+                                      false
+                              end, Content) of
+                {value, {{symbol, Key}, {uint, Expected}}} ->
+                    {value, {PosBare, _}} = lists:search(fun({_Pos, S}) ->
+                                                                 is_bare_message_section(S)
+                                                         end, PosSections),
+                    BareBin = binary_part(Payload, PosBare, PosFooter - PosBare),
+                    case erlang:FooterOpt(BareBin) of
+                        Expected -> OkMsg;
+                        Actual -> {checksum_error, Expected, Actual}
+                    end;
+                false ->
+                    OkMsg
+            end;
+        _ ->
+            OkMsg
+    end.
+
+decode_sections(PosSections, _Payload, PayloadSize, PayloadSize) ->
+    lists:reverse(PosSections);
+decode_sections(PosSections, Payload, PayloadSize, Parsed)
+  when PayloadSize > Parsed ->
+    Bin = binary_part(Payload, Parsed, PayloadSize - Parsed),
+    {Described, NumBytes} = amqp10_binary_parser:parse(Bin),
+    Section = amqp10_framing:decode(Described),
+    decode_sections([{Parsed, Section} | PosSections],
+                    Payload,
+                    PayloadSize,
+                    Parsed + NumBytes).
 
 amqp10_session_event(Evt) ->
     {amqp10_event, {session, self(), Evt}}.
@@ -986,13 +1153,20 @@ socket_send(Sock, Data) ->
             throw({stop, normal})
     end.
 
+%% Only notify of credit exhaustion when not using auto flow.
+notify_credit_exhausted(Link = #link{auto_flow = never}) ->
+    ok = notify_link(Link, credit_exhausted);
+notify_credit_exhausted(_Link) ->
+    ok.
+
 -dialyzer({no_fail_call, socket_send0/2}).
 socket_send0({tcp, Socket}, Data) ->
     gen_tcp:send(Socket, Data);
 socket_send0({ssl, Socket}, Data) ->
     ssl:send(Socket, Data).
 
--spec make_link_ref(_, _, _) -> link_ref().
+-spec make_link_ref(link_role(), pid(), output_handle()) ->
+    link_ref().
 make_link_ref(Role, Session, Handle) ->
     #link_ref{role = Role, session = Session, link_handle = Handle}.
 
@@ -1026,6 +1200,107 @@ sym(B) when is_binary(B) -> {symbol, B};
 sym(B) when is_list(B) -> {symbol, list_to_binary(B)};
 sym(B) when is_atom(B) -> {symbol, atom_to_binary(B, utf8)}.
 
+reason(undefined) -> normal;
+reason(Other) -> Other.
+
+role_to_boolean(sender) ->
+    ?AMQP_ROLE_SENDER;
+role_to_boolean(receiver) ->
+    ?AMQP_ROLE_RECEIVER.
+
+boolean_to_role(?AMQP_ROLE_SENDER) ->
+    sender;
+boolean_to_role(?AMQP_ROLE_RECEIVER) ->
+    receiver.
+
+default(undefined, Default) -> Default;
+default(Thing, _Default) -> Thing.
+
+format_status(Status = #{data := Data0}) ->
+    #state{channel = Channel,
+           remote_channel = RemoteChannel,
+           next_incoming_id = NextIncomingId,
+           incoming_window = IncomingWindow,
+           next_outgoing_id = NextOutgoingId,
+           remote_incoming_window = RemoteIncomingWindow,
+           remote_outgoing_window = RemoteOutgoingWindow,
+           reader = Reader,
+           socket = Socket,
+           links = Links0,
+           link_index = LinkIndex,
+           link_handle_index = LinkHandleIndex,
+           next_link_handle = NextLinkHandle,
+           early_attach_requests = EarlyAttachRequests,
+           connection_config = ConnectionConfig,
+           outgoing_delivery_id = OutgoingDeliveryId,
+           outgoing_unsettled = OutgoingUnsettled,
+           notify = Notify
+          } = Data0,
+    Links = maps:map(
+              fun(_OutputHandle,
+                  #link{name = Name,
+                        ref = Ref,
+                        state = State,
+                        notify = LinkNotify,
+                        output_handle = OutputHandle,
+                        input_handle = InputHandle,
+                        role = Role,
+                        target = Target,
+                        max_message_size = MaxMessageSize,
+                        delivery_count = DeliveryCount,
+                        link_credit = LinkCredit,
+                        available = Available,
+                        drain = Drain,
+                        partial_transfers = PartialTransfers0,
+                        auto_flow = AutoFlow,
+                        incoming_unsettled = IncomingUnsettled,
+                        footer_opt = FooterOpt
+                       }) ->
+                      PartialTransfers = case PartialTransfers0 of
+                                             undefined ->
+                                                 0;
+                                             {#'v1_0.transfer'{}, Binaries} ->
+                                                 length(Binaries)
+                                         end,
+                      #{name => Name,
+                        ref => Ref,
+                        state => State,
+                        notify => LinkNotify,
+                        output_handle => OutputHandle,
+                        input_handle => InputHandle,
+                        role => Role,
+                        target => Target,
+                        max_message_size => MaxMessageSize,
+                        delivery_count => DeliveryCount,
+                        link_credit => LinkCredit,
+                        available => Available,
+                        drain => Drain,
+                        partial_transfers => PartialTransfers,
+                        auto_flow => AutoFlow,
+                        incoming_unsettled => maps:size(IncomingUnsettled),
+                        footer_opt => FooterOpt
+                       }
+              end, Links0),
+    Data = #{channel => Channel,
+             remote_channel => RemoteChannel,
+             next_incoming_id => NextIncomingId,
+             incoming_window => IncomingWindow,
+             next_outgoing_id => NextOutgoingId,
+             remote_incoming_window => RemoteIncomingWindow,
+             remote_outgoing_window => RemoteOutgoingWindow,
+             reader => Reader,
+             socket => Socket,
+             links => Links,
+             link_index => LinkIndex,
+             link_handle_index => LinkHandleIndex,
+             next_link_handle => NextLinkHandle,
+             early_attach_requests => length(EarlyAttachRequests),
+             connection_config => maps:remove(sasl, ConnectionConfig),
+             outgoing_delivery_id => OutgoingDeliveryId,
+             outgoing_unsettled => maps:size(OutgoingUnsettled),
+             notify => Notify},
+    Status#{data := Data}.
+
 -ifdef(TEST).
 -include_lib("eunit/include/eunit.hrl").
 
@@ -1051,27 +1326,34 @@ handle_session_flow_pre_begin_test() ->
     State = handle_session_flow(Flow, State0),
     42 = State#state.next_incoming_id,
     2000 = State#state.remote_outgoing_window,
-    ?INITIAL_OUTGOING_ID + 1 + 1000 - 51 = State#state.remote_incoming_window.
+    % using serial number arithmetic:
+    % ?INITIAL_OUTGOING_TRANSFER_ID + 1000 = 998
+    ?assertEqual(998 - 51, State#state.remote_incoming_window).
 
 handle_link_flow_sender_test() ->
-    Handle = 45,
-    DeliveryCount = 55,
-    Link = #link{role = sender, output_handle = 99,
-                 link_credit = 0, delivery_count = DeliveryCount + 2},
-    Flow = #'v1_0.flow'{handle = {uint, Handle},
-                        link_credit = {uint, 42},
-                        delivery_count = {uint, DeliveryCount}
+    DeliveryCountRcv = 55,
+    DeliveryCountSnd = DeliveryCountRcv + 2,
+    LinkCreditRcv = 42,
+    Link = #link{role = sender,
+                 output_handle = 99,
+                 link_credit = 0,
+                 delivery_count = DeliveryCountSnd},
+    Flow = #'v1_0.flow'{handle = {uint, 45},
+                        link_credit = {uint, LinkCreditRcv},
+                        delivery_count = {uint, DeliveryCountRcv}
                        },
     {ok, Outcome} = handle_link_flow(Flow, Link),
     % see section 2.6.7
-    Expected = DeliveryCount + 42 - (DeliveryCount + 2),
-    Expected = Outcome#link.link_credit,
+    ?assertEqual(DeliveryCountRcv + LinkCreditRcv - DeliveryCountSnd,
+                 Outcome#link.link_credit),
 
     % receiver does not yet know the delivery_count
     {ok, Outcome2} = handle_link_flow(Flow#'v1_0.flow'{delivery_count = undefined},
-                                Link),
-    Expected2 = ?INITIAL_DELIVERY_COUNT + 42 - (DeliveryCount + 2),
-    Expected2 = Outcome2#link.link_credit.
+                                      Link),
+    % using serial number arithmetic:
+    % ?INITIAL_DELIVERY_COUNT + LinkCreditRcv - DeliveryCountSnd = -18
+    % but we maintain a floor of zero
+    ?assertEqual(0, Outcome2#link.link_credit).
 
 handle_link_flow_sender_drain_test() ->
     Handle = 45,
@@ -1098,7 +1380,8 @@ handle_link_flow_receiver_test() ->
     Flow = #'v1_0.flow'{handle = {uint, Handle},
                         delivery_count = {uint, SenderDC},
                         available = 99,
-                        drain = true % what to do?
+                        drain = true, % what to do?
+                        link_credit = {uint, 0}
                        },
     {ok, Outcome} = handle_link_flow(Flow, Link),
     % see section 2.6.7
@@ -1148,7 +1431,8 @@ translate_filters_legacy_amqp_no_local_filter_test() ->
     {map,
         [{
             {symbol, <<"apache.org:no-local-filter:list">>},
-            {described, {symbol, <<"apache.org:no-local-filter:list">>}, [{utf8, <<"foo">>}, {utf8, <<"bar">>}]}
+            {described, {symbol, <<"apache.org:no-local-filter:list">>},
+             {list, [{utf8, <<"foo">>}, {utf8, <<"bar">>}]}}
         }]
     } = translate_filters(#{<<"apache.org:no-local-filter:list">> => [<<"foo">>, <<"bar">>]}).
 
diff --git a/deps/amqp10_client/src/amqp10_client_sessions_sup.erl b/deps/amqp10_client/src/amqp10_client_sessions_sup.erl
index 05e162ecc133..a3c4af737ba5 100644
--- a/deps/amqp10_client/src/amqp10_client_sessions_sup.erl
+++ b/deps/amqp10_client/src/amqp10_client_sessions_sup.erl
@@ -2,35 +2,26 @@
 %% License, v. 2.0. If a copy of the MPL was not distributed with this
 %% file, You can obtain one at https://mozilla.org/MPL/2.0/.
 %%
-%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates.  All rights reserved.
+%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved.
 %%
 -module(amqp10_client_sessions_sup).
 
 -behaviour(supervisor).
 
-%% Private API.
+%% API
 -export([start_link/0]).
 
-%% Supervisor callbacks.
+%% Supervisor callbacks
 -export([init/1]).
 
--define(CHILD(Id, Mod, Type, Args), {Id, {Mod, start_link, Args},
-                                     transient, 5000, Type, [Mod]}).
-
-%% -------------------------------------------------------------------
-%% Private API.
-%% -------------------------------------------------------------------
-
--spec start_link() ->
-    {ok, pid()} | ignore | {error, any()}.
-
 start_link() ->
     supervisor:start_link(?MODULE, []).
 
-%% -------------------------------------------------------------------
-%% Supervisor callbacks.
-%% -------------------------------------------------------------------
-
-init(Args) ->
-    Template = ?CHILD(session, amqp10_client_session, worker, Args),
-    {ok, {{simple_one_for_one, 0, 1}, [Template]}}.
+init([]) ->
+    SupFlags = #{strategy => simple_one_for_one,
+                 intensity => 0,
+                 period => 1},
+    ChildSpec = #{id => session,
+                  start => {amqp10_client_session, start_link, []},
+                  restart => transient},
+    {ok, {SupFlags, [ChildSpec]}}.
diff --git a/deps/amqp10_client/src/amqp10_client_sup.erl b/deps/amqp10_client/src/amqp10_client_sup.erl
index c6230854622d..423aaa150f73 100644
--- a/deps/amqp10_client/src/amqp10_client_sup.erl
+++ b/deps/amqp10_client/src/amqp10_client_sup.erl
@@ -2,33 +2,27 @@
 %% License, v. 2.0. If a copy of the MPL was not distributed with this
 %% file, You can obtain one at https://mozilla.org/MPL/2.0/.
 %%
-%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates.  All rights reserved.
+%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved.
 %%
 -module(amqp10_client_sup).
 
 -behaviour(supervisor).
 
-%% Private API.
+%% API
 -export([start_link/0]).
 
-%% Supervisor callbacks.
+%% Supervisor callbacks
 -export([init/1]).
 
--define(CHILD(Id, Mod, Type, Args), {Id, {Mod, start_link, Args},
-                                     temporary, infinity, Type, [Mod]}).
-
-%% -------------------------------------------------------------------
-%% Private API.
-%% -------------------------------------------------------------------
-
 start_link() ->
     supervisor:start_link({local, ?MODULE}, ?MODULE, []).
 
-%% -------------------------------------------------------------------
-%% Supervisor callbacks.
-%% -------------------------------------------------------------------
-
 init([]) ->
-    Template = ?CHILD(connection_sup, amqp10_client_connection_sup,
-                      supervisor, []),
-    {ok, {{simple_one_for_one, 0, 1}, [Template]}}.
+    SupFlags = #{strategy => simple_one_for_one,
+                 intensity => 0,
+                 period => 1},
+    ChildSpec = #{id => connection_sup,
+                  start => {amqp10_client_connection_sup, start_link, []},
+                  restart => temporary,
+                  type => supervisor},
+    {ok, {SupFlags, [ChildSpec]}}.
diff --git a/deps/amqp10_client/src/amqp10_client_types.erl b/deps/amqp10_client/src/amqp10_client_types.erl
index da7ee3391813..5758012e9335 100644
--- a/deps/amqp10_client/src/amqp10_client_types.erl
+++ b/deps/amqp10_client/src/amqp10_client_types.erl
@@ -2,7 +2,7 @@
 %% License, v. 2.0. If a copy of the MPL was not distributed with this
 %% file, You can obtain one at https://mozilla.org/MPL/2.0/.
 %%
-%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates.  All rights reserved.
+%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved.
 %%
 -module(amqp10_client_types).
 
@@ -10,7 +10,8 @@
 
 -export([unpack/1,
          utf8/1,
-         uint/1]).
+         uint/1,
+         make_properties/1]).
 
 -type amqp10_performative() :: #'v1_0.open'{} | #'v1_0.begin'{} | #'v1_0.attach'{} |
                                #'v1_0.flow'{} | #'v1_0.transfer'{} |
@@ -63,13 +64,15 @@
                                 link_event_detail()}.
 -type amqp10_event() :: {amqp10_event, amqp10_event_detail()}.
 
+-type properties() :: #{binary() => amqp10_binary_generator:amqp10_prim()}.
+
 -export_type([amqp10_performative/0, channel/0,
               source/0, target/0, amqp10_msg_record/0,
               delivery_state/0, amqp_error/0, connection_error/0,
-              amqp10_event_detail/0, amqp10_event/0]).
+              amqp10_event_detail/0, amqp10_event/0,
+              properties/0]).
 
 
-unpack(undefined) -> undefined;
 unpack({_, Value}) -> Value;
 unpack(Value) -> Value.
 
@@ -77,3 +80,11 @@ utf8(S) when is_list(S) -> {utf8, list_to_binary(S)};
 utf8(B) when is_binary(B) -> {utf8, B}.
 
 uint(N) -> {uint, N}.
+
+make_properties(#{properties := Props})
+  when map_size(Props) > 0 ->
+    {map, maps:fold(fun(K, V, L) ->
+                            [{{symbol, K}, V} | L]
+                    end, [], Props)};
+make_properties(_) ->
+    undefined.
diff --git a/deps/amqp10_client/src/amqp10_msg.erl b/deps/amqp10_client/src/amqp10_msg.erl
index c4eb1dce9aef..fa046cc60657 100644
--- a/deps/amqp10_client/src/amqp10_msg.erl
+++ b/deps/amqp10_client/src/amqp10_msg.erl
@@ -2,10 +2,12 @@
 %% License, v. 2.0. If a copy of the MPL was not distributed with this
 %% file, You can obtain one at https://mozilla.org/MPL/2.0/.
 %%
-%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates.  All rights reserved.
+%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved.
 %%
 -module(amqp10_msg).
 
+-include_lib("amqp10_common/include/amqp10_types.hrl").
+
 -export([from_amqp_records/1,
          to_amqp_records/1,
          % "read" api
@@ -38,7 +40,7 @@
 
 -include_lib("amqp10_common/include/amqp10_framing.hrl").
 
--type maybe(T) :: T | undefined.
+-type opt(T) :: T | undefined.
 
 -type delivery_tag() :: binary().
 -type content_type() :: term(). % TODO: refine
@@ -52,23 +54,23 @@
 
 -type amqp10_header() :: #{durable => boolean(), % false
                            priority => byte(), % 4
-                           ttl => maybe(non_neg_integer()),
+                           ttl => opt(non_neg_integer()),
                            first_acquirer => boolean(), % false
                            delivery_count => non_neg_integer()}. % 0
 
--type amqp10_properties() :: #{message_id => maybe(any()),
-                               user_id => maybe(binary()),
-                               to => maybe(any()),
-                               subject => maybe(binary()),
-                               reply_to => maybe(any()),
-                               correlation_id => maybe(any()),
-                               content_type => maybe(content_type()),
-                               content_encoding => maybe(content_encoding()),
-                               absolute_expiry_time => maybe(non_neg_integer()),
-                               creation_time => maybe(non_neg_integer()),
-                               group_id => maybe(binary()),
-                               group_sequence => maybe(non_neg_integer()),
-                               reply_to_group_id => maybe(binary())}.
+-type amqp10_properties() :: #{message_id => opt(any()),
+                               user_id => opt(binary()),
+                               to => opt(any()),
+                               subject => opt(binary()),
+                               reply_to => opt(any()),
+                               correlation_id => opt(any()),
+                               content_type => opt(content_type()),
+                               content_encoding => opt(content_encoding()),
+                               absolute_expiry_time => opt(non_neg_integer()),
+                               creation_time => opt(non_neg_integer()),
+                               group_id => opt(binary()),
+                               group_sequence => opt(non_neg_integer()),
+                               reply_to_group_id => opt(binary())}.
 
 -type amqp10_body() :: [#'v1_0.data'{}] |
                        [#'v1_0.amqp_sequence'{}] |
@@ -78,13 +80,13 @@
 
 -record(amqp10_msg,
         {transfer :: #'v1_0.transfer'{},
-         header :: maybe(#'v1_0.header'{}),
-         delivery_annotations :: maybe(#'v1_0.delivery_annotations'{}),
-         message_annotations :: maybe(#'v1_0.message_annotations'{}),
-         properties :: maybe(#'v1_0.properties'{}),
-         application_properties :: maybe(#'v1_0.application_properties'{}),
+         header :: opt(#'v1_0.header'{}),
+         delivery_annotations :: opt(#'v1_0.delivery_annotations'{}),
+         message_annotations :: opt(#'v1_0.message_annotations'{}),
+         properties :: opt(#'v1_0.properties'{}),
+         application_properties :: opt(#'v1_0.application_properties'{}),
          body :: amqp10_body() | unset,
-         footer :: maybe(#'v1_0.footer'{})
+         footer :: opt(#'v1_0.footer'{})
          }).
 
 -opaque amqp10_msg() :: #amqp10_msg{}.
@@ -106,8 +108,16 @@
 -spec from_amqp_records([amqp10_client_types:amqp10_msg_record()]) ->
     amqp10_msg().
 from_amqp_records([#'v1_0.transfer'{} = Transfer | Records]) ->
-    lists:foldl(fun parse_from_amqp/2, #amqp10_msg{transfer = Transfer,
-                                                   body = unset}, Records).
+    case lists:foldl(fun parse_from_amqp/2,
+                     #amqp10_msg{transfer = Transfer,
+                                 body = unset},
+                     Records) of
+        #amqp10_msg{body = Body} = Msg
+          when is_list(Body) ->
+            Msg#amqp10_msg{body = lists:reverse(Body)};
+        Msg ->
+            Msg
+    end.
 
 -spec to_amqp_records(amqp10_msg()) -> [amqp10_client_types:amqp10_msg_record()].
 to_amqp_records(#amqp10_msg{transfer = T,
@@ -142,7 +152,7 @@ settled(#amqp10_msg{transfer = #'v1_0.transfer'{settled = Settled}}) ->
 % the last 1 octet is the version
 % See 2.8.11 in the spec
 -spec message_format(amqp10_msg()) ->
-    maybe({non_neg_integer(), non_neg_integer()}).
+    opt({non_neg_integer(), non_neg_integer()}).
 message_format(#amqp10_msg{transfer =
                          #'v1_0.transfer'{message_format = undefined}}) ->
     undefined;
@@ -183,7 +193,8 @@ header(first_acquirer = K,
 header(delivery_count = K,
        #amqp10_msg{header = #'v1_0.header'{delivery_count = D}}) ->
     header_value(K, D);
-header(K, #amqp10_msg{header = undefined}) -> header_value(K, undefined).
+header(K, #amqp10_msg{header = undefined}) ->
+    header_value(K, undefined).
 
 -spec delivery_annotations(amqp10_msg()) -> #{annotations_key() => any()}.
 delivery_annotations(#amqp10_msg{delivery_annotations = undefined}) ->
@@ -256,12 +267,12 @@ body_bin(#amqp10_msg{body = #'v1_0.amqp_value'{} = Body}) ->
 new(DeliveryTag, Body, Settled) when is_binary(Body) ->
     #amqp10_msg{transfer = #'v1_0.transfer'{delivery_tag = {binary, DeliveryTag},
                                             settled = Settled,
-                                            message_format = {uint, 0}},
+                                            message_format = {uint, ?MESSAGE_FORMAT}},
                 body = [#'v1_0.data'{content = Body}]};
 new(DeliveryTag, Body, Settled) -> % TODO: constrain to amqp types
     #amqp10_msg{transfer = #'v1_0.transfer'{delivery_tag = {binary, DeliveryTag},
                                             settled = Settled,
-                                            message_format = {uint, 0}},
+                                            message_format = {uint, ?MESSAGE_FORMAT}},
                 body = Body}.
 
 %% @doc Create a new settled amqp10 message using the specified delivery tag
@@ -306,7 +317,7 @@ set_headers(Headers, #amqp10_msg{header = Current} = Msg) ->
     H = maps:fold(fun(durable, V, Acc) ->
                           Acc#'v1_0.header'{durable = V};
                      (priority, V, Acc) ->
-                          Acc#'v1_0.header'{priority = {uint, V}};
+                          Acc#'v1_0.header'{priority = {ubyte, V}};
                      (first_acquirer, V, Acc) ->
                           Acc#'v1_0.header'{first_acquirer = V};
                      (ttl, V, Acc) ->
@@ -322,11 +333,16 @@ set_properties(Props, #amqp10_msg{properties = undefined} = Msg) ->
     set_properties(Props, Msg#amqp10_msg{properties = #'v1_0.properties'{}});
 set_properties(Props, #amqp10_msg{properties = Current} = Msg) ->
     % TODO many fields are `any` types and we need to try to type tag them
-    P = maps:fold(fun(message_id, V, Acc) when is_binary(V) ->
-                          % message_id can be any type but we restrict it here
+    P = maps:fold(fun(message_id, {T, _V} = TypeVal, Acc) when T =:= ulong orelse
+                                                               T =:= uuid orelse
+                                                               T =:= binary orelse
+                                                               T =:= utf8 ->
+                          Acc#'v1_0.properties'{message_id = TypeVal};
+                     (message_id, V, Acc) when is_binary(V) ->
+                          %% backward compat clause
                           Acc#'v1_0.properties'{message_id = utf8(V)};
-                     (user_id, V, Acc) ->
-                          Acc#'v1_0.properties'{user_id = utf8(V)};
+                     (user_id, V, Acc) when is_binary(V) ->
+                          Acc#'v1_0.properties'{user_id = {binary, V}};
                      (to, V, Acc) ->
                           Acc#'v1_0.properties'{to = utf8(V)};
                      (subject, V, Acc) ->
@@ -407,15 +423,17 @@ wrap_ap_value(true) ->
     {boolean, true};
 wrap_ap_value(false) ->
     {boolean, false};
-wrap_ap_value(V) when is_integer(V) ->
-    {uint, V};
 wrap_ap_value(V) when is_binary(V) ->
     utf8(V);
 wrap_ap_value(V) when is_list(V) ->
     utf8(list_to_binary(V));
 wrap_ap_value(V) when is_atom(V) ->
-    utf8(atom_to_list(V)).
-
+    utf8(atom_to_binary(V));
+wrap_ap_value(V) when is_integer(V) ->
+    case V < 0 of
+        true -> {int, V};
+        false -> {uint, V}
+    end.
 
 %% LOCAL
 header_value(durable, undefined) -> false;
@@ -437,10 +455,16 @@ parse_from_amqp(#'v1_0.application_properties'{} = APs, AmqpMsg) ->
     AmqpMsg#amqp10_msg{application_properties = APs};
 parse_from_amqp(#'v1_0.amqp_value'{} = Value, AmqpMsg) ->
     AmqpMsg#amqp10_msg{body = Value};
-parse_from_amqp(#'v1_0.amqp_sequence'{} = Seq, AmqpMsg) ->
-    AmqpMsg#amqp10_msg{body = [Seq]};
-parse_from_amqp(#'v1_0.data'{} = Data, AmqpMsg) ->
-    AmqpMsg#amqp10_msg{body = [Data]};
+parse_from_amqp(#'v1_0.amqp_sequence'{} = Seq, AmqpMsg = #amqp10_msg{body = Body0}) ->
+    Body = if Body0 =:= unset -> [Seq];
+              is_list(Body0) -> [Seq | Body0]
+           end,
+    AmqpMsg#amqp10_msg{body = Body};
+parse_from_amqp(#'v1_0.data'{} = Data, AmqpMsg = #amqp10_msg{body = Body0}) ->
+    Body = if Body0 =:= unset -> [Data];
+              is_list(Body0) -> [Data | Body0]
+           end,
+    AmqpMsg#amqp10_msg{body = Body};
 parse_from_amqp(#'v1_0.footer'{} = Header, AmqpMsg) ->
     AmqpMsg#amqp10_msg{footer = Header}.
 
diff --git a/deps/amqp10_client/test/activemq_ct_helpers.erl b/deps/amqp10_client/test/activemq_ct_helpers.erl
index d5831b2606f6..ba1b7fe5721e 100644
--- a/deps/amqp10_client/test/activemq_ct_helpers.erl
+++ b/deps/amqp10_client/test/activemq_ct_helpers.erl
@@ -2,7 +2,7 @@
 %% License, v. 2.0. If a copy of the MPL was not distributed with this
 %% file, You can obtain one at https://mozilla.org/MPL/2.0/.
 %%
-%% Copyright (c) 2017-2023 VMware, Inc. or its affiliates.  All rights reserved.
+%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved.
 %%
 
 -module(activemq_ct_helpers).
diff --git a/deps/amqp10_client/test/mock_server.erl b/deps/amqp10_client/test/mock_server.erl
index 661bf78e7b5b..b3cd6bec3250 100644
--- a/deps/amqp10_client/test/mock_server.erl
+++ b/deps/amqp10_client/test/mock_server.erl
@@ -2,7 +2,7 @@
 %% License, v. 2.0. If a copy of the MPL was not distributed with this
 %% file, You can obtain one at https://mozilla.org/MPL/2.0/.
 %%
-%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates.  All rights reserved.
+%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved.
 %%
 -module(mock_server).
 
@@ -52,14 +52,15 @@ recv(Sock) ->
     {ok, <>} = gen_tcp:recv(Sock, 8),
     {ok, Data} = gen_tcp:recv(Sock, Length - 8),
-    {PerfDesc, Payload} = amqp10_binary_parser:parse(Data),
+    {PerfDesc, BytesParsed} = amqp10_binary_parser:parse(Data),
     Perf = amqp10_framing:decode(PerfDesc),
+    Payload = binary_part(Data, BytesParsed, size(Data) - BytesParsed),
     {Ch, Perf, Payload}.
 
 amqp_step(Fun) ->
     fun (Sock) ->
             Recv = recv(Sock),
-            ct:pal("AMQP Step receieved ~tp~n", [Recv]),
+            ct:pal("AMQP Step received ~tp~n", [Recv]),
             case Fun(Recv) of
                 {_Ch, []} -> ok;
                 {Ch, {multi, Records}} ->
@@ -81,4 +82,4 @@ send_amqp_header_step(Sock) ->
 recv_amqp_header_step(Sock) ->
     ct:pal("Receiving AMQP protocol header"),
     {ok, R} = gen_tcp:recv(Sock, 8),
-    ct:pal("handshake Step receieved ~tp~n", [R]).
+    ct:pal("handshake Step received ~tp~n", [R]).
diff --git a/deps/amqp10_client/test/msg_SUITE.erl b/deps/amqp10_client/test/msg_SUITE.erl
index 261af2ee3fe6..02d851180a4a 100644
--- a/deps/amqp10_client/test/msg_SUITE.erl
+++ b/deps/amqp10_client/test/msg_SUITE.erl
@@ -2,12 +2,11 @@
 %% License, v. 2.0. If a copy of the MPL was not distributed with this
 %% file, You can obtain one at https://mozilla.org/MPL/2.0/.
 %%
-%% Copyright (c) 2017-2023 VMware, Inc. or its affiliates.  All rights reserved.
+%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved.
 %%
 
 -module(msg_SUITE).
 
--include_lib("common_test/include/ct.hrl").
 -include_lib("eunit/include/eunit.hrl").
 
 -include_lib("amqp10_common/include/amqp10_framing.hrl").
diff --git a/deps/amqp10_client/test/system_SUITE.erl b/deps/amqp10_client/test/system_SUITE.erl
index 758074755780..9125222062eb 100644
--- a/deps/amqp10_client/test/system_SUITE.erl
+++ b/deps/amqp10_client/test/system_SUITE.erl
@@ -2,7 +2,7 @@
 %% License, v. 2.0. If a copy of the MPL was not distributed with this
 %% file, You can obtain one at https://mozilla.org/MPL/2.0/.
 %%
-%% Copyright (c) 2017-2023 VMware, Inc. or its affiliates.  All rights reserved.
+%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved.
 %%
 
 -module(system_SUITE).
@@ -14,21 +14,10 @@
 
 -include("src/amqp10_client.hrl").
 
--compile(export_all).
+-compile([export_all, nowarn_export_all]).
 
--define(UNAUTHORIZED_USER, <<"test_user_no_perm">>).
-
-%% The latch constant defines how many processes are spawned in order
-%% to run certain functionality in parallel. It follows the standard
-%% countdown latch pattern.
--define(LATCH, 100).
-
-%% The wait constant defines how long a consumer waits before it
-%% unsubscribes
--define(WAIT, 200).
-
-%% How to long wait for a process to die after an expected failure
--define(PROCESS_EXIT_TIMEOUT, 5000).
+suite() ->
+    [{timetrap, {minutes, 4}}].
 
 all() ->
     [
@@ -45,6 +34,7 @@ groups() ->
      {activemq, [], shared()},
      {rabbitmq_strict, [], [
                             basic_roundtrip_tls,
+                            roundtrip_tls_global_config,
                             open_connection_plain_sasl,
                             open_connection_plain_sasl_failure,
                             open_connection_plain_sasl_parse_uri
@@ -75,9 +65,11 @@ shared() ->
      split_transfer,
      transfer_unsettled,
      subscribe,
-     subscribe_with_auto_flow,
+     subscribe_with_auto_flow_settled,
+     subscribe_with_auto_flow_unsettled,
      outgoing_heartbeat,
-     roundtrip_large_messages
+     roundtrip_large_messages,
+     transfer_id_vs_delivery_id
     ].
 
 %% -------------------------------------------------------------------
@@ -92,7 +84,8 @@ init_per_suite(Config) ->
       ]).
 
 end_per_suite(Config) ->
-    rabbit_ct_helpers:run_teardown_steps(Config,
+    rabbit_ct_helpers:run_teardown_steps(
+      Config,
       [
        fun stop_amqp10_client_app/1
       ]).
@@ -110,20 +103,21 @@ stop_amqp10_client_app(Config) ->
 %% -------------------------------------------------------------------
 
 init_per_group(rabbitmq, Config0) ->
-    Config = rabbit_ct_helpers:set_config(Config0,
-                                          {sasl, {plain, <<"guest">>, <<"guest">>}}),
+    Config = rabbit_ct_helpers:set_config(Config0, {sasl, anon}),
     Config1 = rabbit_ct_helpers:merge_app_env(Config,
-                                              [{rabbitmq_amqp1_0,
-                                                [{protocol_strict_mode, true}]}]),
+                                              [{rabbit,
+                                                [{max_message_size, 134217728}]}]),
     rabbit_ct_helpers:run_steps(Config1, rabbit_ct_broker_helpers:setup_steps());
+
 init_per_group(rabbitmq_strict, Config0) ->
     Config = rabbit_ct_helpers:set_config(Config0,
                                           {sasl, {plain, <<"guest">>, <<"guest">>}}),
     Config1 = rabbit_ct_helpers:merge_app_env(Config,
-                                              [{rabbitmq_amqp1_0,
-                                                [{default_user, none},
-                                                 {protocol_strict_mode, true}]}]),
+                                              [{rabbit,
+                                                [{anonymous_login_user, none},
+                                                 {max_message_size, 134217728}]}]),
     rabbit_ct_helpers:run_steps(Config1, rabbit_ct_broker_helpers:setup_steps());
+
 init_per_group(activemq, Config0) ->
     Config = rabbit_ct_helpers:set_config(Config0, {sasl, anon}),
     rabbit_ct_helpers:run_steps(Config,
@@ -281,6 +275,26 @@ basic_roundtrip_tls(Config) ->
                 sasl => ?config(sasl, Config)},
     roundtrip(OpnConf).
 
+%% ssl option validation fails if verify_peer is enabled without cacerts.
+%% Test that cacertfile option takes effect taken from the application env.
+roundtrip_tls_global_config(Config) ->
+    Hostname = ?config(rmq_hostname, Config),
+    Port = rabbit_ct_broker_helpers:get_node_config(Config, 0, tcp_port_amqp_tls),
+    CACertFile = ?config(rmq_certsdir, Config) ++ "/testca/cacert.pem",
+    CertFile = ?config(rmq_certsdir, Config) ++ "/client/cert.pem",
+    KeyFile = ?config(rmq_certsdir, Config) ++ "/client/key.pem",
+    ok = application:set_env(amqp10_client, ssl_options, [{cacertfile, CACertFile},
+                                                          {certfile, CertFile},
+                                                          {keyfile, KeyFile}]),
+    OpnConf = #{address => Hostname,
+                port => Port,
+                tls_opts => {secure_port, [{verify, verify_peer}]},
+                notify => self(),
+                container_id => <<"open_connection_tls_container">>,
+                sasl => ?config(sasl, Config)},
+    roundtrip(OpnConf),
+    application:unset_env(amqp10_client, ssl_options).
+
 service_bus_config(Config, ContainerId) ->
     Hostname = ?config(sb_endpoint, Config),
     Port = ?config(sb_port, Config),
@@ -304,14 +318,15 @@ roundtrip_large_messages(Config) ->
     Hostname = ?config(rmq_hostname, Config),
     Port = rabbit_ct_broker_helpers:get_node_config(Config, 0, tcp_port_amqp),
     OpenConf = #{address => Hostname, port => Port, sasl => anon},
-    DataKb = crypto:strong_rand_bytes(1024),
-    roundtrip(OpenConf, DataKb),
-    Data1Mb = binary:copy(DataKb, 1024),
-    roundtrip(OpenConf, Data1Mb),
-    roundtrip(OpenConf, binary:copy(Data1Mb, 8)),
-    roundtrip(OpenConf, binary:copy(Data1Mb, 64)),
-    ok.
 
+    DataKb = rand:bytes(1024),
+    DataMb = rand:bytes(1024 * 1024),
+    Data8Mb = rand:bytes(8 * 1024 * 1024),
+    Data64Mb = rand:bytes(64 * 1024 * 1024),
+    ok = roundtrip(OpenConf, DataKb),
+    ok = roundtrip(OpenConf, DataMb),
+    ok = roundtrip(OpenConf, Data8Mb),
+    ok = roundtrip(OpenConf, Data64Mb).
 
 roundtrip(OpenConf) ->
     roundtrip(OpenConf, <<"banana">>).
@@ -319,39 +334,38 @@ roundtrip(OpenConf) ->
 roundtrip(OpenConf, Body) ->
     {ok, Connection} = amqp10_client:open_connection(OpenConf),
     {ok, Session} = amqp10_client:begin_session(Connection),
-    {ok, Sender} = amqp10_client:attach_sender_link(Session,
-                                                    <<"banana-sender">>,
-                                                    <<"test1">>,
-                                                    settled,
-                                                    unsettled_state),
+    {ok, Sender} = amqp10_client:attach_sender_link(
+                     Session, <<"banana-sender">>, <<"test1">>, settled, unsettled_state),
     await_link(Sender, credited, link_credit_timeout),
 
     Now = os:system_time(millisecond),
-    Props = #{creation_time => Now},
-    Msg0 =  amqp10_msg:set_properties(Props,
-                                      amqp10_msg:new(<<"my-tag">>, Body, true)),
+    Props = #{creation_time => Now,
+              message_id => <<"my message ID">>,
+              correlation_id => <<"my correlation ID">>,
+              content_type => <<"my content type">>,
+              content_encoding => <<"my content encoding">>,
+              group_id => <<"my group ID">>},
+    Msg0 = amqp10_msg:new(<<"my-tag">>, Body, true),
     Msg1 = amqp10_msg:set_application_properties(#{"a_key" => "a_value"}, Msg0),
-    Msg = amqp10_msg:set_message_annotations(#{<<"x_key">> => "x_value"}, Msg1),
-    % RabbitMQ AMQP 1.0 does not yet support delivery annotations
-    % Msg = amqp10_msg:set_delivery_annotations(#{<<"x_key">> => "x_value"}, Msg2),
+    Msg2 = amqp10_msg:set_properties(Props, Msg1),
+    Msg = amqp10_msg:set_message_annotations(#{<<"x-key">> => "x-value",
+                                               <<"x_key">> => "x_value"}, Msg2),
     ok = amqp10_client:send_msg(Sender, Msg),
     ok = amqp10_client:detach_link(Sender),
     await_link(Sender, {detached, normal}, link_detach_timeout),
 
     {error, link_not_found} = amqp10_client:detach_link(Sender),
-    {ok, Receiver} = amqp10_client:attach_receiver_link(Session,
-                                                        <<"banana-receiver">>,
-                                                        <<"test1">>,
-                                                        settled,
-                                                        unsettled_state),
-    {ok, OutMsg} = amqp10_client:get_msg(Receiver, 60000 * 5),
+    {ok, Receiver} = amqp10_client:attach_receiver_link(
+                       Session, <<"banana-receiver">>, <<"test1">>, settled, unsettled_state),
+    {ok, OutMsg} = amqp10_client:get_msg(Receiver, 4 * 60_000),
     ok = amqp10_client:end_session(Session),
     ok = amqp10_client:close_connection(Connection),
+
     % ct:pal(?LOW_IMPORTANCE, "roundtrip message Out: ~tp~nIn: ~tp~n", [OutMsg, Msg]),
-    #{creation_time := Now} = amqp10_msg:properties(OutMsg),
-    #{<<"a_key">> := <<"a_value">>} = amqp10_msg:application_properties(OutMsg),
-    #{<<"x_key">> := <<"x_value">>} = amqp10_msg:message_annotations(OutMsg),
-    % #{<<"x_key">> := <<"x_value">>} = amqp10_msg:delivery_annotations(OutMsg),
+    ?assertMatch(Props, amqp10_msg:properties(OutMsg)),
+    ?assertEqual(#{<<"a_key">> => <<"a_value">>}, amqp10_msg:application_properties(OutMsg)),
+    ?assertMatch(#{<<"x-key">> := <<"x-value">>,
+                   <<"x_key">> := <<"x_value">>}, amqp10_msg:message_annotations(OutMsg)),
     ?assertEqual([Body], amqp10_msg:body(OutMsg)),
     ok.
 
@@ -379,7 +393,7 @@ filtered_roundtrip(OpenConf, Body) ->
                                                         settled,
                                                         unsettled_state),
     ok = amqp10_client:send_msg(Sender, Msg1),
-    {ok, OutMsg1} = amqp10_client:get_msg(DefaultReceiver, 60000 * 5),
+    {ok, OutMsg1} = amqp10_client:get_msg(DefaultReceiver, 60_000 * 4),
     ?assertEqual(<<"msg-1-tag">>, amqp10_msg:delivery_tag(OutMsg1)),
 
     timer:sleep(5 * 1000),
@@ -398,16 +412,52 @@ filtered_roundtrip(OpenConf, Body) ->
                                                         unsettled_state,
                                                         #{<<"apache.org:selector-filter:string">> => <<"amqp.annotation.x-opt-enqueuedtimeutc > ", Now2Binary/binary>>}),
 
-    {ok, OutMsg2} = amqp10_client:get_msg(DefaultReceiver, 60000 * 5),
+    {ok, OutMsg2} = amqp10_client:get_msg(DefaultReceiver, 60_000 * 4),
     ?assertEqual(<<"msg-2-tag">>, amqp10_msg:delivery_tag(OutMsg2)),
 
-    {ok, OutMsgFiltered} = amqp10_client:get_msg(FilteredReceiver, 60000 * 5),
+    {ok, OutMsgFiltered} = amqp10_client:get_msg(FilteredReceiver, 60_000 * 4),
     ?assertEqual(<<"msg-2-tag">>, amqp10_msg:delivery_tag(OutMsgFiltered)),
 
     ok = amqp10_client:end_session(Session),
     ok = amqp10_client:close_connection(Connection),
     ok.
 
+%% Assert that implementations respect the difference between transfer-id and delivery-id.
+transfer_id_vs_delivery_id(Config) ->
+    Hostname = ?config(rmq_hostname, Config),
+    Port = rabbit_ct_broker_helpers:get_node_config(Config, 0, tcp_port_amqp),
+    OpenConf = #{address => Hostname, port => Port, sasl => anon},
+
+    {ok, Connection} = amqp10_client:open_connection(OpenConf),
+    {ok, Session} = amqp10_client:begin_session(Connection),
+    {ok, Sender} = amqp10_client:attach_sender_link(
+                     Session, <<"banana-sender">>, <<"test1">>, settled, unsettled_state),
+    await_link(Sender, credited, link_credit_timeout),
+
+    P0 = binary:copy(<<0>>, 8_000_000),
+    P1 = <>,
+    P2 = <>,
+    Msg1 = amqp10_msg:new(<<"tag 1">>, P1, true),
+    Msg2 = amqp10_msg:new(<<"tag 2">>, P2, true),
+    ok = amqp10_client:send_msg(Sender, Msg1),
+    ok = amqp10_client:send_msg(Sender, Msg2),
+    ok = amqp10_client:detach_link(Sender),
+    await_link(Sender, {detached, normal}, link_detach_timeout),
+
+    {ok, Receiver} = amqp10_client:attach_receiver_link(
+                       Session, <<"banana-receiver">>, <<"test1">>, settled, unsettled_state),
+    {ok, RcvMsg1} = amqp10_client:get_msg(Receiver, 60_000 * 4),
+    {ok, RcvMsg2} = amqp10_client:get_msg(Receiver, 60_000 * 4),
+    ok = amqp10_client:end_session(Session),
+    ok = amqp10_client:close_connection(Connection),
+
+    ?assertEqual([P1], amqp10_msg:body(RcvMsg1)),
+    ?assertEqual([P2], amqp10_msg:body(RcvMsg2)),
+    %% Despite many transfers, there were only 2 deliveries.
+    %% Therefore, delivery-id should have been increased by just 1.
+    ?assertEqual(serial_number:add(amqp10_msg:delivery_id(RcvMsg1), 1),
+                 amqp10_msg:delivery_id(RcvMsg2)).
+
 % a message is sent before the link attach is guaranteed to
 % have completed and link credit granted
 % also queue a link detached immediately after transfer
@@ -489,7 +539,7 @@ transfer_unsettled(Config) ->
 subscribe(Config) ->
     Hostname = ?config(rmq_hostname, Config),
     Port = rabbit_ct_broker_helpers:get_node_config(Config, 0, tcp_port_amqp),
-    QueueName = <<"test-sub">>,
+    QueueName = atom_to_binary(?FUNCTION_NAME),
     {ok, Connection} = amqp10_client:open_connection(Hostname, Port),
     {ok, Session} = amqp10_client:begin_session(Connection),
     {ok, Sender} = amqp10_client:attach_sender_link_sync(Session,
@@ -501,41 +551,165 @@ subscribe(Config) ->
                                                         <<"sub-receiver">>,
                                                         QueueName, unsettled),
     ok = amqp10_client:flow_link_credit(Receiver, 10, never),
-
-    _ = receive_messages(Receiver, 10),
-    % assert no further messages are delivered
-    timeout = receive_one(Receiver),
-    receive
-        {amqp10_event, {link, Receiver, credit_exhausted}} ->
-            ok
-    after 5000 ->
-              flush(),
-              exit(credit_exhausted_assert)
+    [begin
+         receive {amqp10_msg, Receiver, Msg} ->
+                     ok = amqp10_client:accept_msg(Receiver, Msg)
+         after 2000 -> ct:fail(timeout)
+         end
+     end || _ <- lists:seq(1, 10)],
+    ok = assert_no_message(Receiver),
+
+    receive {amqp10_event, {link, Receiver, credit_exhausted}} -> ok
+    after 5000 -> flush(),
+                  exit(credit_exhausted_assert)
     end,
 
     ok = amqp10_client:end_session(Session),
     ok = amqp10_client:close_connection(Connection).
 
-subscribe_with_auto_flow(Config) ->
+subscribe_with_auto_flow_settled(Config) ->
+    SenderSettleMode = settled,
     Hostname = ?config(rmq_hostname, Config),
     Port = rabbit_ct_broker_helpers:get_node_config(Config, 0, tcp_port_amqp),
-    QueueName = <<"test-sub">>,
+    QueueName = atom_to_binary(?FUNCTION_NAME),
     {ok, Connection} = amqp10_client:open_connection(Hostname, Port),
     {ok, Session} = amqp10_client:begin_session(Connection),
     {ok, Sender} = amqp10_client:attach_sender_link_sync(Session,
                                                          <<"sub-sender">>,
                                                          QueueName),
     await_link(Sender, credited, link_credit_timeout),
-    _ = publish_messages(Sender, <<"banana">>, 10),
-    {ok, Receiver} = amqp10_client:attach_receiver_link(Session,
-                                                        <<"sub-receiver">>,
-                                                        QueueName, unsettled),
+
+    publish_messages(Sender, <<"banana">>, 20),
+    {ok, Receiver} = amqp10_client:attach_receiver_link(
+                       Session, <<"sub-receiver">>, QueueName, SenderSettleMode),
+    await_link(Receiver, attached, attached_timeout),
+
     ok = amqp10_client:flow_link_credit(Receiver, 5, 2),
+    ?assertEqual(20, count_received_messages(Receiver)),
 
-    _ = receive_messages(Receiver, 10),
+    ok = amqp10_client:detach_link(Receiver),
+    ok = amqp10_client:detach_link(Sender),
+    ok = amqp10_client:end_session(Session),
+    ok = amqp10_client:close_connection(Connection).
 
-    % assert no further messages are delivered
-    timeout = receive_one(Receiver),
+subscribe_with_auto_flow_unsettled(Config) ->
+    SenderSettleMode = unsettled,
+    Hostname = ?config(rmq_hostname, Config),
+    Port = rabbit_ct_broker_helpers:get_node_config(Config, 0, tcp_port_amqp),
+    QueueName = atom_to_binary(?FUNCTION_NAME),
+    {ok, Connection} = amqp10_client:open_connection(Hostname, Port),
+    {ok, Session} = amqp10_client:begin_session(Connection),
+    {ok, Sender} = amqp10_client:attach_sender_link_sync(Session,
+                                                         <<"sub-sender">>,
+                                                         QueueName),
+    await_link(Sender, credited, link_credit_timeout),
+
+    _ = publish_messages(Sender, <<"1-">>, 30),
+    %% Use sender settle mode 'unsettled'.
+    %% This should require us to manually settle message in order to receive more messages.
+    {ok, Receiver} = amqp10_client:attach_receiver_link(Session, <<"sub-receiver-2">>, QueueName, SenderSettleMode),
+    await_link(Receiver, attached, attached_timeout),
+    ok = amqp10_client:flow_link_credit(Receiver, 5, 2),
+    %% We should receive exactly 5 messages.
+    [M1, _M2, M3, M4, M5] = receive_messages(Receiver, 5),
+    ok = assert_no_message(Receiver),
+
+    %% Even when we accept the first 3 messages, the number of unsettled messages has not yet fallen below 2.
+    %% Therefore, the client should not yet grant more credits to the sender.
+    ok = amqp10_client_session:disposition(
+           Receiver, amqp10_msg:delivery_id(M1), amqp10_msg:delivery_id(M3), true, accepted),
+    ok = assert_no_message(Receiver),
+
+    %% When we accept 1 more message (the order in which we accept shouldn't matter, here we accept M5 before M4),
+    %% the number of unsettled messages now falls below 2 (since only M4 is left unsettled).
+    %% Therefore, the client should grant 5 credits to the sender.
+    %% Therefore, we should receive 5 more messages.
+    ok = amqp10_client:accept_msg(Receiver, M5),
+    [_M6, _M7, _M8, _M9, M10] = receive_messages(Receiver, 5),
+    ok = assert_no_message(Receiver),
+
+    %% It shouldn't matter how we settle messages, therefore we use 'rejected' this time.
+    %% Settling all in flight messages should cause us to receive exactly 5 more messages.
+    ok = amqp10_client_session:disposition(
+           Receiver, amqp10_msg:delivery_id(M4), amqp10_msg:delivery_id(M10), true, rejected),
+    [M11, _M12, _M13, _M14, M15] = receive_messages(Receiver, 5),
+    ok = assert_no_message(Receiver),
+
+    %% Dynamically decrease link credit.
+    %% Since we explicitly tell to grant 3 new credits now, we expect to receive 3 more messages.
+    ok = amqp10_client:flow_link_credit(Receiver, 3, 3),
+    [M16, _M17, M18] = receive_messages(Receiver, 3),
+    ok = assert_no_message(Receiver),
+
+    ok = amqp10_client_session:disposition(
+           Receiver, amqp10_msg:delivery_id(M11), amqp10_msg:delivery_id(M15), true, accepted),
+    %% However, the RenewWhenBelow=3 still refers to all unsettled messages.
+    %% Right now we have 3 messages (M16, M17, M18) unsettled.
+    ok = assert_no_message(Receiver),
+
+    %% Settling 1 out of these 3 messages causes RenewWhenBelow to fall below 3 resulting
+    %% in 3 new messages to be received.
+    ok = amqp10_client:accept_msg(Receiver, M18),
+    [_M19, _M20, _M21] = receive_messages(Receiver, 3),
+    ok = assert_no_message(Receiver),
+
+    ok = amqp10_client:flow_link_credit(Receiver, 3, never, true),
+    [_M22, _M23, M24] = receive_messages(Receiver, 3),
+    ok = assert_no_message(Receiver),
+
+    %% Since RenewWhenBelow = never, we expect to receive no new messages despite settling.
+    ok = amqp10_client_session:disposition(
+           Receiver, amqp10_msg:delivery_id(M16), amqp10_msg:delivery_id(M24), true, rejected),
+    ok = assert_no_message(Receiver),
+
+    ok = amqp10_client:flow_link_credit(Receiver, 2, never, false),
+    [M25, _M26] = receive_messages(Receiver, 2),
+    ok = assert_no_message(Receiver),
+
+    ok = amqp10_client:flow_link_credit(Receiver, 3, 3),
+    [_M27, _M28, M29] = receive_messages(Receiver, 3),
+    ok = assert_no_message(Receiver),
+
+    ok = amqp10_client_session:disposition(
+           Receiver, amqp10_msg:delivery_id(M25), amqp10_msg:delivery_id(M29), true, accepted),
+    [M30] = receive_messages(Receiver, 1),
+    ok = assert_no_message(Receiver),
+    ok = amqp10_client:accept_msg(Receiver, M30),
+    %% The sender queue is empty now.
+    ok = assert_no_message(Receiver),
+
+    ok = amqp10_client:flow_link_credit(Receiver, 3, 1),
+    _ = publish_messages(Sender, <<"2-">>, 1),
+    [M31] = receive_messages(Receiver, 1),
+    ok = amqp10_client:accept_msg(Receiver, M31),
+
+    %% Since function flow_link_credit/3 documents
+    %%     "if RenewWhenBelow is an integer, the amqp10_client will automatically grant more
+    %%     Credit to the sender when the sum of the remaining link credit and the number of
+    %%     unsettled messages falls below the value of RenewWhenBelow."
+    %% our expectation is that the amqp10_client has not renewed credit since the sum of
+    %% remaining link credit (2) and unsettled messages (0) is 2.
+    %%
+    %% Therefore, when we publish another 3 messages, we expect to only receive only 2 messages!
+    _ = publish_messages(Sender, <<"3-">>, 5),
+    [M32, M33] = receive_messages(Receiver, 2),
+    ok = assert_no_message(Receiver),
+
+    %% When we accept both messages, the sum of the remaining link credit (0) and unsettled messages (0)
+    %% falls below RenewWhenBelow=1 causing the amqp10_client to grant 3 new credits.
+    ok = amqp10_client:accept_msg(Receiver, M32),
+    ok = assert_no_message(Receiver),
+    ok = amqp10_client:accept_msg(Receiver, M33),
+
+    [M35, M36, M37] = receive_messages(Receiver, 3),
+    ok = amqp10_client:accept_msg(Receiver, M35),
+    ok = amqp10_client:accept_msg(Receiver, M36),
+    ok = amqp10_client:accept_msg(Receiver, M37),
+    %% The sender queue is empty now.
+    ok = assert_no_message(Receiver),
+
+    ok = amqp10_client:detach_link(Receiver),
+    ok = amqp10_client:detach_link(Sender),
     ok = amqp10_client:end_session(Session),
     ok = amqp10_client:close_connection(Connection).
 
@@ -676,11 +850,13 @@ incoming_heartbeat(Config) ->
               idle_time_out => 1000, notify => self()},
     {ok, Connection} = amqp10_client:open_connection(CConf),
     receive
-        {amqp10_event, {connection, Connection,
-         {closed, {resource_limit_exceeded, <<"remote idle-time-out">>}}}} ->
+        {amqp10_event,
+         {connection, Connection0,
+          {closed, {resource_limit_exceeded, <<"remote idle-time-out">>}}}}
+          when Connection0 =:= Connection ->
             ok
     after 5000 ->
-          exit(incoming_heartbeat_assert)
+              exit(incoming_heartbeat_assert)
     end,
     demonitor(MockRef).
 
@@ -688,45 +864,72 @@ incoming_heartbeat(Config) ->
 %%% HELPERS
 %%%
 
-receive_messages(Receiver, Num) ->
-    [begin
-         ct:pal("receive_messages ~tp", [T]),
-         ok = receive_one(Receiver)
-     end || T <- lists:seq(1, Num)].
+await_link(Who, What, Err) ->
+    receive
+        {amqp10_event, {link, Who0, What0}}
+          when Who0 =:= Who andalso
+               What0 =:= What ->
+            ok;
+        {amqp10_event, {link, Who0, {detached, Why}}}
+          when Who0 =:= Who ->
+            ct:fail(Why)
+    after 5000 ->
+              flush(),
+              ct:fail(Err)
+    end.
 
-publish_messages(Sender, Data, Num) ->
+publish_messages(Sender, BodyPrefix, Num) ->
     [begin
-        Tag = integer_to_binary(T),
-        Msg = amqp10_msg:new(Tag, Data, false),
-        ok = amqp10_client:send_msg(Sender, Msg),
-        ok = await_disposition(Tag)
+         Tag = integer_to_binary(T),
+         Msg = amqp10_msg:new(Tag, <>, false),
+         ok = amqp10_client:send_msg(Sender, Msg),
+         ok = await_disposition(Tag)
      end || T <- lists:seq(1, Num)].
 
-receive_one(Receiver) ->
-    receive
-        {amqp10_msg, Receiver, Msg} ->
-            amqp10_client:accept_msg(Receiver, Msg)
-    after 2000 ->
-          timeout
-    end.
-
 await_disposition(DeliveryTag) ->
     receive
-        {amqp10_disposition, {accepted, DeliveryTag}} -> ok
+        {amqp10_disposition, {accepted, DeliveryTag0}}
+          when DeliveryTag0 =:= DeliveryTag -> ok
     after 3000 ->
               flush(),
-              exit(dispostion_timeout)
+              ct:fail(dispostion_timeout)
     end.
 
-await_link(Who, What, Err) ->
+count_received_messages(Receiver) ->
+    count_received_messages0(Receiver, 0).
+
+count_received_messages0(Receiver, Count) ->
     receive
-        {amqp10_event, {link, Who, What}} ->
-            ok;
-        {amqp10_event, {link, Who, {detached, Why}}} ->
-            exit(Why)
-    after 5000 ->
-              flush(),
-              exit(Err)
+        {amqp10_msg, Receiver, _Msg} ->
+            count_received_messages0(Receiver, Count + 1)
+    after 500 ->
+              Count
+    end.
+
+receive_messages(Receiver, N) ->
+    receive_messages0(Receiver, N, []).
+
+receive_messages0(_Receiver, 0, Acc) ->
+    lists:reverse(Acc);
+receive_messages0(Receiver, N, Acc) ->
+    receive
+        {amqp10_msg, Receiver, Msg} ->
+            receive_messages0(Receiver, N - 1, [Msg | Acc])
+    after 5000  ->
+              LastReceivedMsg = case Acc of
+                                    [] -> none;
+                                    [M | _] -> M
+                                end,
+              ct:fail({timeout,
+                       {num_received, length(Acc)},
+                       {num_missing, N},
+                       {last_received_msg, LastReceivedMsg}
+                      })
+    end.
+
+assert_no_message(Receiver) ->
+    receive {amqp10_msg, Receiver, Msg} -> ct:fail({unexpected_message, Msg})
+    after 50 -> ok
     end.
 
 to_bin(X) when is_list(X) ->
diff --git a/deps/amqp10_client/test/system_SUITE_data/conf/activemq.xml b/deps/amqp10_client/test/system_SUITE_data/conf/activemq.xml
index 2b9d37ed1812..9e14c13e84d2 100644
--- a/deps/amqp10_client/test/system_SUITE_data/conf/activemq.xml
+++ b/deps/amqp10_client/test/system_SUITE_data/conf/activemq.xml
@@ -28,12 +28,6 @@
         
     
 
-   
-    
-    
-
     
diff --git a/deps/amqp10_client/test/system_SUITE_data/conf/activemq_no_anon.xml b/deps/amqp10_client/test/system_SUITE_data/conf/activemq_no_anon.xml
index 2e489fa2b9d2..fd2dbfc5bba3 100644
--- a/deps/amqp10_client/test/system_SUITE_data/conf/activemq_no_anon.xml
+++ b/deps/amqp10_client/test/system_SUITE_data/conf/activemq_no_anon.xml
@@ -28,12 +28,6 @@
         
     
 
-   
-    
-    
-
     
diff --git a/deps/amqp10_common/.gitignore b/deps/amqp10_common/.gitignore
index 22c3cc398034..92a78ef7f440 100644
--- a/deps/amqp10_common/.gitignore
+++ b/deps/amqp10_common/.gitignore
@@ -1,29 +1,3 @@
-*~
-.sw?
-.*.sw?
-*.beam
-*.coverdata
-/.erlang.mk/
-/cover/
-/deps/
-/doc/
-/ebin/
-/escript/
-/escript.lock
-/git-revisions.txt
-/logs/
-/plugins/
-/plugins.lock
-/rebar.config
-/rebar.lock
-/sbin/
-/sbin.lock
-/test/ct.cover.spec
-/xrefr
-
-/amqp10_common.d
-/*.plt
-
 # Generated source files.
 /include/amqp10_framing.hrl
 /src/amqp10_framing0.erl
diff --git a/deps/amqp10_common/BUILD.bazel b/deps/amqp10_common/BUILD.bazel
index 898c539c16e8..dfe65bc2d31b 100644
--- a/deps/amqp10_common/BUILD.bazel
+++ b/deps/amqp10_common/BUILD.bazel
@@ -110,10 +110,24 @@ dialyze(
 
 rabbitmq_suite(
     name = "binary_generator_SUITE",
+    size = "small",
 )
 
 rabbitmq_suite(
     name = "binary_parser_SUITE",
+    size = "small",
+)
+
+rabbitmq_suite(
+    name = "serial_number_SUITE",
+    size = "small",
+)
+
+rabbitmq_suite(
+    name = "prop_SUITE",
+    deps = [
+        "//deps/rabbitmq_ct_helpers:erlang_app",
+    ],
 )
 
 assert_suites()
diff --git a/deps/amqp10_common/Makefile b/deps/amqp10_common/Makefile
index ebf41ef29d23..6d1b124b817b 100644
--- a/deps/amqp10_common/Makefile
+++ b/deps/amqp10_common/Makefile
@@ -26,6 +26,7 @@ endef
 
 DIALYZER_OPTS += --src -r test -DTEST
 BUILD_DEPS = rabbit_common
+TEST_DEPS = rabbitmq_ct_helpers proper
 
 # Variables and recipes in development.*.mk are meant to be used from
 # any Git clone. They are excluded from the files published to Hex.pm.
@@ -44,6 +45,8 @@ DEP_PLUGINS = rabbit_common/mk/rabbitmq-build.mk \
 	      rabbit_common/mk/rabbitmq-test.mk \
 	      rabbit_common/mk/rabbitmq-tools.mk
 
+PLT_APPS = eunit
+
 include rabbitmq-components.mk
 include erlang.mk
 
diff --git a/deps/amqp10_common/app.bzl b/deps/amqp10_common/app.bzl
index a83665c5c086..a233c945cebe 100644
--- a/deps/amqp10_common/app.bzl
+++ b/deps/amqp10_common/app.bzl
@@ -13,6 +13,8 @@ def all_beam_files(name = "all_beam_files"):
             "src/amqp10_binary_parser.erl",
             "src/amqp10_framing.erl",
             "src/amqp10_framing0.erl",
+            "src/amqp10_util.erl",
+            "src/serial_number.erl",
         ],
         hdrs = [":public_and_private_hdrs"],
         app_name = "amqp10_common",
@@ -34,6 +36,8 @@ def all_test_beam_files(name = "all_test_beam_files"):
             "src/amqp10_binary_parser.erl",
             "src/amqp10_framing.erl",
             "src/amqp10_framing0.erl",
+            "src/amqp10_util.erl",
+            "src/serial_number.erl",
         ],
         hdrs = [":public_and_private_hdrs"],
         app_name = "amqp10_common",
@@ -62,11 +66,13 @@ def all_srcs(name = "all_srcs"):
             "src/amqp10_binary_parser.erl",
             "src/amqp10_framing.erl",
             "src/amqp10_framing0.erl",
+            "src/amqp10_util.erl",
+            "src/serial_number.erl",
         ],
     )
     filegroup(
         name = "public_hdrs",
-        srcs = ["include/amqp10_framing.hrl"],
+        srcs = ["include/amqp10_framing.hrl", "include/amqp10_types.hrl"],
     )
     filegroup(
         name = "private_hdrs",
@@ -96,3 +102,21 @@ def test_suite_beam_files(name = "test_suite_beam_files"):
         app_name = "amqp10_common",
         erlc_opts = "//:test_erlc_opts",
     )
+    erlang_bytecode(
+        name = "serial_number_SUITE_beam_files",
+        testonly = True,
+        srcs = ["test/serial_number_SUITE.erl"],
+        outs = ["test/serial_number_SUITE.beam"],
+        app_name = "amqp10_common",
+        erlc_opts = "//:test_erlc_opts",
+    )
+    erlang_bytecode(
+        name = "prop_SUITE_beam_files",
+        testonly = True,
+        srcs = ["test/prop_SUITE.erl"],
+        outs = ["test/prop_SUITE.beam"],
+        hdrs = ["include/amqp10_framing.hrl"],
+        app_name = "amqp10_common",
+        erlc_opts = "//:test_erlc_opts",
+        deps = ["@proper//:erlang_app"],
+    )
diff --git a/deps/amqp10_common/codegen.py b/deps/amqp10_common/codegen.py
index dc4480a1819a..d58e4eeca7d7 100755
--- a/deps/amqp10_common/codegen.py
+++ b/deps/amqp10_common/codegen.py
@@ -87,8 +87,7 @@ def print_hrl(types, defines):
             for opt in d.options:
                 print_define(opt, d.source)
     print("""
--define(DESCRIBED, 0:8).
--define(DESCRIBED_BIN, <>).
+-define(DESCRIBED, 0).
 """)
 
 
diff --git a/deps/amqp10_common/include/amqp10_types.hrl b/deps/amqp10_common/include/amqp10_types.hrl
new file mode 100644
index 000000000000..3068f6efb4f5
--- /dev/null
+++ b/deps/amqp10_common/include/amqp10_types.hrl
@@ -0,0 +1,19 @@
+-define(UINT_MAX, 16#ff_ff_ff_ff).
+
+% [1.6.5]
+-type uint() :: 0..?UINT_MAX.
+% [2.8.4]
+-type link_handle() :: uint().
+% [2.8.8]
+-type delivery_number() :: sequence_no().
+% [2.8.9]
+-type transfer_number() :: sequence_no().
+% [2.8.10]
+-type sequence_no() :: uint().
+
+% [2.8.1]
+-define(AMQP_ROLE_SENDER, false).
+-define(AMQP_ROLE_RECEIVER, true).
+
+% [3.2.16]
+-define(MESSAGE_FORMAT, 0).
diff --git a/deps/amqp10_common/rebar.config b/deps/amqp10_common/rebar.config
new file mode 100644
index 000000000000..3680f34b3239
--- /dev/null
+++ b/deps/amqp10_common/rebar.config
@@ -0,0 +1,5 @@
+{profiles,
+ [{test, [{deps, [proper
+                 ]}]}
+ ]
+}.
diff --git a/deps/amqp10_common/src/amqp10_binary_generator.erl b/deps/amqp10_common/src/amqp10_binary_generator.erl
index 908c2e93ae84..b829d797f269 100644
--- a/deps/amqp10_common/src/amqp10_binary_generator.erl
+++ b/deps/amqp10_common/src/amqp10_binary_generator.erl
@@ -2,7 +2,7 @@
 %% License, v. 2.0. If a copy of the MPL was not distributed with this
 %% file, You can obtain one at https://mozilla.org/MPL/2.0/.
 %%
-%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates.  All rights reserved.
+%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved.
 %%
 
 -module(amqp10_binary_generator).
@@ -22,9 +22,8 @@
 
 -type amqp10_prim() ::
     null |
-    true |
-    false |
-    {boolean, true | false} |
+    boolean() | 
+    {boolean, boolean()} |
     {ubyte, byte()} |
     {ushort, non_neg_integer()} |
     {uint, non_neg_integer()} |
@@ -41,7 +40,7 @@
     {symbol, binary()} |
     {binary, binary()} |
     {list, [amqp10_type()]} |
-    {map, [{amqp10_prim(), amqp10_prim()}]} | %% TODO: make map a map
+    {map, [{amqp10_prim(), amqp10_prim()}]} |
     {array, amqp10_ctor(), [amqp10_type()]}.
 
 -type amqp10_described() ::
@@ -53,6 +52,7 @@
 -export_type([
               amqp10_ctor/0,
               amqp10_type/0,
+              amqp10_prim/0,
               amqp10_described/0
              ]).
 
@@ -61,153 +61,186 @@
 -define(DOFF, 2).
 -define(VAR_1_LIMIT, 16#FF).
 
--spec build_frame(integer(), iolist()) -> iolist().
-build_frame(Channel, Payload) ->
-    build_frame(Channel, ?AMQP_FRAME_TYPE, Payload).
+-spec build_frame(non_neg_integer(), iolist()) -> iolist().
+build_frame(Channel, Body) ->
+    build_frame(Channel, ?AMQP_FRAME_TYPE, Body).
 
-build_frame(Channel, FrameType, Payload) ->
-    Size = iolist_size(Payload) + 8, % frame header and no extension
-    [ <>, Payload ].
+-spec build_frame(non_neg_integer(), non_neg_integer(), iolist()) -> iolist().
+build_frame(Channel, FrameType, Body) ->
+    Size = iolist_size(Body) + 8, % frame header and no extension
+    [<>, Body].
 
 build_heartbeat_frame() ->
     %% length is inclusive
     <<8:32, ?DOFF:8, ?AMQP_FRAME_TYPE:8, 0:16>>.
 
--spec generate(amqp10_type()) -> iolist().
-generate({described, Descriptor, Value}) ->
-    DescBin = generate(Descriptor),
-    ValueBin = generate(Value),
-    [ ?DESCRIBED_BIN, DescBin, ValueBin ];
-
-generate(null)  -> <<16#40>>;
-generate(true)  -> <<16#41>>;
-generate(false) -> <<16#42>>;
-generate({boolean, true}) -> <<16#56, 16#01>>;
-generate({boolean, false}) -> <<16#56, 16#00>>;
+-spec generate(amqp10_type()) -> iodata().
+generate(Type) ->
+    case generate1(Type) of
+        Byte when is_integer(Byte) ->
+            [Byte];
+        IoData ->
+            IoData
+    end.
+
+generate1({described, Descriptor, Value}) ->
+    DescBin = generate1(Descriptor),
+    ValueBin = generate1(Value),
+    [?DESCRIBED, DescBin, ValueBin];
+
+generate1(null)  -> 16#40;
+generate1(true)  -> 16#41;
+generate1(false) -> 16#42;
+generate1({boolean, true}) -> [16#56, 16#01];
+generate1({boolean, false}) -> [16#56, 16#00];
 
 %% some integral types have a compact encoding as a byte; this is in
 %% particular for the descriptors of AMQP types, which have the domain
 %% bits set to zero and values < 256.
-generate({ubyte,    V})                           -> <<16#50,V:8/unsigned>>;
-generate({ushort,   V})                           -> <<16#60,V:16/unsigned>>;
-generate({uint,     V}) when V =:= 0              -> <<16#43>>;
-generate({uint,     V}) when V < 256              -> <<16#52,V:8/unsigned>>;
-generate({uint,     V})                           -> <<16#70,V:32/unsigned>>;
-generate({ulong,    V}) when V =:= 0              -> <<16#44>>;
-generate({ulong,    V}) when V < 256              -> <<16#53,V:8/unsigned>>;
-generate({ulong,    V})                           -> <<16#80,V:64/unsigned>>;
-generate({byte,     V})                           -> <<16#51,V:8/signed>>;
-generate({short,    V})                           -> <<16#61,V:16/signed>>;
-generate({int,      V}) when V<128 andalso V>-129 -> <<16#54,V:8/signed>>;
-generate({int,      V})                           -> <<16#71,V:32/signed>>;
-generate({long,     V}) when V<128 andalso V>-129 -> <<16#55,V:8/signed>>;
-generate({long,     V})                           -> <<16#81,V:64/signed>>;
-generate({float,    V})                           -> <<16#72,V:32/float>>;
-generate({double,   V})                           -> <<16#82,V:64/float>>;
-generate({char,     V})                           -> <<16#73,V:4/binary>>;
-generate({timestamp,V})                           -> <<16#83,V:64/signed>>;
-generate({uuid,     V})                           -> <<16#98,V:16/binary>>;
-
-generate({utf8, V}) when size(V) < ?VAR_1_LIMIT -> [<<16#a1,(size(V)):8>>,  V];
-generate({utf8, V})                             -> [<<16#b1,(size(V)):32>>, V];
-generate({symbol, V})                           -> [<<16#a3,(size(V)):8>>,  V];
-generate({binary, V}) ->
+generate1({ubyte,    V})                           -> [16#50, V];
+generate1({ushort,   V})                           -> <<16#60,V:16/unsigned>>;
+generate1({uint,     V}) when V =:= 0              -> 16#43;
+generate1({uint,     V}) when V < 256              -> [16#52, V];
+generate1({uint,     V})                           -> <<16#70,V:32/unsigned>>;
+generate1({ulong,    V}) when V =:= 0              -> 16#44;
+generate1({ulong,    V}) when V < 256              -> [16#53, V];
+generate1({ulong,    V})                           -> <<16#80,V:64/unsigned>>;
+generate1({byte,     V})                           -> <<16#51,V:8/signed>>;
+generate1({short,    V})                           -> <<16#61,V:16/signed>>;
+generate1({int,      V}) when V<128 andalso V>-129 -> <<16#54,V:8/signed>>;
+generate1({int,      V})                           -> <<16#71,V:32/signed>>;
+generate1({long,     V}) when V<128 andalso V>-129 -> <<16#55,V:8/signed>>;
+generate1({long,     V})                           -> <<16#81,V:64/signed>>;
+generate1({float,    V})                           -> <<16#72,V:32/float>>;
+generate1({double,   V})                           -> <<16#82,V:64/float>>;
+generate1({char,V}) when V>=0 andalso V=<16#10ffff -> <<16#73,V:32>>;
+%% AMQP timestamp is "64-bit two's-complement integer representing milliseconds since the unix epoch".
+%% For small integers (i.e. values that can be stored in a single word),
+%% Erlang uses two’s complement to represent the signed integers.
+generate1({timestamp,V})                           -> <<16#83,V:64/signed>>;
+generate1({uuid,     V})                           -> <<16#98,V:16/binary>>;
+
+generate1({utf8, V}) when size(V) =< ?VAR_1_LIMIT   -> [16#a1, size(V), V];
+generate1({utf8, V})                                -> [<<16#b1, (size(V)):32>>, V];
+generate1({symbol, V}) when size(V) =< ?VAR_1_LIMIT -> [16#a3, size(V), V];
+generate1({symbol, V})                              -> [<<16#b3, (size(V)):32>>, V];
+generate1({binary, V}) ->
     Size = iolist_size(V),
-    if  Size < ?VAR_1_LIMIT -> [<<16#a0,Size:8>>,  V];
-        true                -> [<<16#b0,Size:32>>, V]
+    case Size =< ?VAR_1_LIMIT  of
+        true ->
+            [16#a0, Size, V];
+        false ->
+            [<<16#b0, Size:32>>, V]
     end;
 
-generate({list, []}) ->
-    <<16#45>>;
-generate({list, List}) ->
+generate1({list, []}) ->
+    16#45;
+generate1({list, List}) ->
     Count = length(List),
-    Compound = lists:map(fun generate/1, List),
+    Compound = lists:map(fun generate1/1, List),
     S = iolist_size(Compound),
     %% If the list contains less than (256 - 1) elements and if the
     %% encoded size (including the encoding of "Count", thus S + 1
     %% in the test) is less than 256 bytes, we use the short form.
     %% Otherwise, we use the large form.
     if Count >= (256 - 1) orelse (S + 1) >= 256 ->
-            [<<16#d0, (S + 4):32/unsigned, Count:32/unsigned>>, Compound];
-        true ->
-            [<<16#c0, (S + 1):8/unsigned,  Count:8/unsigned>>,  Compound]
+           [<<16#d0, (S + 4):32, Count:32>>, Compound];
+       true ->
+           [16#c0, S + 1, Count, Compound]
     end;
 
-generate({map, ListOfPairs}) ->
-    Count = length(ListOfPairs) * 2,
+generate1({map, KvList}) ->
+    Count = length(KvList) * 2,
     Compound = lists:map(fun ({Key, Val}) ->
-                                 [(generate(Key)),
-                                  (generate(Val))]
-                         end, ListOfPairs),
+                                 [(generate1(Key)),
+                                  (generate1(Val))]
+                         end, KvList),
     S = iolist_size(Compound),
-    %% See generate({list, ...}) for an explanation of this test.
+    %% See generate1({list, ...}) for an explanation of this test.
     if Count >= (256 - 1) orelse (S + 1) >= 256 ->
-            [<<16#d1, (S + 4):32, Count:32>>, Compound];
-        true ->
-            [<<16#c1, (S + 1):8,  Count:8>>,  Compound]
+           [<<16#d1, (S + 4):32, Count:32>>, Compound];
+       true ->
+           [16#c1, S + 1, Count, Compound]
     end;
 
-generate({array, Type, List}) ->
+generate1({array, Type, List}) ->
     Count = length(List),
-    Body = iolist_to_binary([constructor(Type),
-                             [generate(Type, I) || I <- List]]),
-    S = size(Body),
-    %% See generate({list, ...}) for an explanation of this test.
+    Array = [constructor(Type),
+             [generate2(Type, I) || I <- List]],
+    S = iolist_size(Array),
+    %% See generate1({list, ...}) for an explanation of this test.
     if Count >= (256 - 1) orelse (S + 1) >= 256 ->
-            [<<16#f0, (S + 4):32/unsigned, Count:32/unsigned>>, Body];
-        true ->
-            [<<16#e0, (S + 1):8/unsigned,  Count:8/unsigned>>,  Body]
+           [<<16#f0, (S + 4):32, Count:32>>, Array];
+       true ->
+           [16#e0, S + 1, Count, Array]
     end;
 
-generate({as_is, TypeCode, Bin}) ->
+generate1({as_is, TypeCode, Bin}) ->
     <>.
 
-%% TODO again these are a stub to get SASL working. New codec? Will
-%% that ever happen? If not we really just need to split generate/1
-%% up into things like these...
-%% for these constructors map straight-forwardly
-constructor(symbol) -> <<16#b3>>;
-constructor(ubyte) -> <<16#50>>;
-constructor(ushort) -> <<16#60>>;
-constructor(short) -> <<16#61>>;
-constructor(uint) -> <<16#70>>;
-constructor(ulong) -> <<16#80>>;
-constructor(byte) -> <<16#51>>;
-constructor(int) -> <<16#71>>;
-constructor(long) -> <<16#81>>;
-constructor(float) -> <<16#72>>;
-constructor(double) -> <<16#82>>;
-constructor(char) -> <<16#73>>;
-constructor(timestamp) -> <<16#83>>;
-constructor(uuid) -> <<16#98>>;
-constructor(null) -> <<16#40>>;
-constructor(boolean) -> <<16#56>>;
-constructor(array) -> <<16#f0>>; % use large array type for all nested arrays
-constructor(utf8) -> <<16#b1>>;
+constructor(symbol) -> 16#b3;
+constructor(ubyte) -> 16#50;
+constructor(ushort) -> 16#60;
+constructor(short) -> 16#61;
+constructor(uint) -> 16#70;
+constructor(ulong) -> 16#80;
+constructor(byte) -> 16#51;
+constructor(int) -> 16#71;
+constructor(long) -> 16#81;
+constructor(float) -> 16#72;
+constructor(double) -> 16#82;
+constructor(char) -> 16#73;
+constructor(timestamp) -> 16#83;
+constructor(uuid) -> 16#98;
+constructor(null) -> 16#40;
+constructor(boolean) -> 16#56;
+constructor(binary) -> 16#b0;
+constructor(utf8) -> 16#b1;
+constructor(list) -> 16#d0;  % use large list type for all array elements
+constructor(map) -> 16#d1;   % use large map type for all array elements
+constructor(array) -> 16#f0; % use large array type for all nested arrays
 constructor({described, Descriptor, Primitive}) ->
-    [<<16#00>>, generate(Descriptor), constructor(Primitive)].
-
-% returns io_list
-generate(symbol, {symbol, V}) -> [<<(size(V)):32>>, V];
-generate(utf8, {utf8, V}) -> [<<(size(V)):32>>, V];
-generate(boolean, true) -> <<16#01>>;
-generate(boolean, false) -> <<16#00>>;
-generate(boolean, {boolean, true}) -> <<16#01>>;
-generate(boolean, {boolean, false}) -> <<16#00>>;
-generate(ubyte, {ubyte, V}) -> <>;
-generate(byte, {byte, V}) -> <>;
-generate(ushort, {ushort, V}) -> <>;
-generate(short, {short, V}) -> <>;
-generate(uint, {uint, V}) -> <>;
-generate(int, {int, V}) -> <>;
-generate(ulong, {ulong, V}) -> <>;
-generate(long, {long, V}) -> <>;
-generate({described, D, P}, {described, D, V}) ->
-    generate(P, V);
-generate(array, {array, Type, List}) ->
+    [16#00, generate1(Descriptor), constructor(Primitive)].
+
+generate2(symbol, {symbol, V}) -> [<<(size(V)):32>>, V];
+generate2(utf8, {utf8, V}) -> [<<(size(V)):32>>, V];
+generate2(binary, {binary, V}) -> [<<(size(V)):32>>, V];
+generate2(boolean, true) -> 16#01;
+generate2(boolean, false) -> 16#00;
+generate2(boolean, {boolean, true}) -> 16#01;
+generate2(boolean, {boolean, false}) -> 16#00;
+generate2(null, null) -> 16#40;
+generate2(char, {char,V}) when V>=0 andalso V=<16#10ffff -> <>;
+generate2(ubyte, {ubyte, V}) -> V;
+generate2(byte, {byte, V}) -> <>;
+generate2(ushort, {ushort, V}) -> <>;
+generate2(short, {short, V}) -> <>;
+generate2(uint, {uint, V}) -> <>;
+generate2(int, {int, V}) -> <>;
+generate2(ulong, {ulong, V}) -> <>;
+generate2(long, {long, V}) -> <>;
+generate2(float, {float, V}) -> <>;
+generate2(double, {double, V}) -> <>;
+generate2(timestamp, {timestamp,V}) -> <>;
+generate2(uuid, {uuid, V}) -> <>;
+generate2({described, D, P}, {described, D, V}) ->
+    generate2(P, V);
+generate2(list, {list, List}) ->
+    Count = length(List),
+    Compound = lists:map(fun generate1/1, List),
+    S = iolist_size(Compound),
+    [<<(S + 4):32, Count:32>>, Compound];
+generate2(map, {map, KvList}) ->
+    Count = length(KvList) * 2,
+    Compound = lists:map(fun ({Key, Val}) ->
+                                 [(generate1(Key)),
+                                  (generate1(Val))]
+                         end, KvList),
+    S = iolist_size(Compound),
+    [<<(S + 4):32, Count:32>>, Compound];
+generate2(array, {array, Type, List}) ->
     Count = length(List),
-    Body = iolist_to_binary([constructor(Type),
-                             [generate(Type, I) || I <- List]]),
-    S = size(Body),
-    %% See generate({list, ...}) for an explanation of this test.
-    [<<(S + 4):32/unsigned, Count:32/unsigned>>, Body].
+    Array = [constructor(Type),
+             [generate2(Type, I) || I <- List]],
+    S = iolist_size(Array),
+    [<<(S + 4):32, Count:32>>, Array].
diff --git a/deps/amqp10_common/src/amqp10_binary_parser.erl b/deps/amqp10_common/src/amqp10_binary_parser.erl
index 04d7ea04fb71..c0275302d5f9 100644
--- a/deps/amqp10_common/src/amqp10_binary_parser.erl
+++ b/deps/amqp10_common/src/amqp10_binary_parser.erl
@@ -2,137 +2,118 @@
 %% License, v. 2.0. If a copy of the MPL was not distributed with this
 %% file, You can obtain one at https://mozilla.org/MPL/2.0/.
 %%
-%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates.  All rights reserved.
+%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved.
 %%
 
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%% NB: When compiling this file with "ERL_COMPILER_OPTIONS=bin_opt_info"
+%% make sure that all code outputs "OPTIMIZED: match context reused",
+%% i.e. neither "BINARY CREATED" nor "NOT OPTIMIZED" should be output.
+%% The only exception are arrays since arrays aren't used in the hot path.
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
 -module(amqp10_binary_parser).
 
--export([parse/1, parse_all/1]).
-
 -include("amqp10_framing.hrl").
 
--ifdef(TEST).
+-export([parse/1,
+         parse_many/2]).
 
--export([parse_all_int/1]).
+%% §1.6
+-define(CODE_ULONG, 16#80).
+-define(CODE_SMALL_ULONG, 16#53).
+-define(CODE_SYM_8, 16#a3).
+-define(CODE_SYM_32, 16#b3).
+%% §3.2
+-define(DESCRIPTOR_CODE_PROPERTIES, 16#73).
+-define(DESCRIPTOR_CODE_APPLICATION_PROPERTIES, 16#74).
+-define(DESCRIPTOR_CODE_DATA, 16#75).
+-define(DESCRIPTOR_CODE_AMQP_SEQUENCE, 16#76).
+-define(DESCRIPTOR_CODE_AMQP_VALUE, 16#77).
 
-parse_all_int(ValueBin) when is_binary(ValueBin) ->
-    lists:reverse(parse_all_int([], parse(ValueBin))).
 
-parse_all_int(Acc, {Value, <<>>}) -> [Value | Acc];
-parse_all_int(Acc, {Value, Rest}) -> parse_all_int([Value | Acc], parse(Rest)).
+%% server_mode is a special parsing mode used by RabbitMQ when parsing
+%% AMQP message sections from an AMQP client. This mode:
+%% 1. stops parsing when the body starts, and
+%% 2. returns the start byte position of each parsed bare message section.
+-type opts() :: [server_mode].
 
--endif.
+-export_type([opts/0]).
 
+%% Parses only the 1st AMQP type (including possible nested AMQP types).
 -spec parse(binary()) ->
-    {amqp10_binary_generator:amqp10_type(), Rest :: binary()}.
-parse(<>) ->
-    parse_described(Rest);
-parse(Rest) ->
-    parse_primitive0(Rest).
-
-parse_described(Bin) ->
-    {Descriptor, Rest1} = parse(Bin),
-    {Value, Rest2} = parse(Rest1),
-    {{described, Descriptor, Value}, Rest2}.
-
-parse_primitive0(<>) ->
-    parse_primitive(Type, Rest).
-
-%% Constants
-parse_primitive(16#40, R) -> {null, R};
-parse_primitive(16#41, R) -> {true, R};
-parse_primitive(16#42, R) -> {false, R};
-parse_primitive(16#43, R) -> {{uint, 0}, R};
-parse_primitive(16#44, R) -> {{ulong, 0}, R};
-
+    {amqp10_binary_generator:amqp10_type(), BytesParsed :: non_neg_integer()}.
+parse(Binary) ->
+    parse(Binary, 0).
+
+parse(<>, B) ->
+    {Descriptor, B1} = parse(Rest),
+    <<_ParsedDescriptorBin:B1/binary, Rest1/binary>> = Rest,
+    {Value, B2} = parse(Rest1),
+    {{described, Descriptor, Value}, B+1+B1+B2};
+parse(<<16#40, _/binary>>, B) -> {null,        B+1};
+parse(<<16#41, _/binary>>, B) -> {true,        B+1};
+parse(<<16#42, _/binary>>, B) -> {false,       B+1};
+parse(<<16#43, _/binary>>, B) -> {{uint, 0},   B+1};
+parse(<<16#44, _/binary>>, B) -> {{ulong, 0},  B+1};
 %% Fixed-widths. Most integral types have a compact encoding as a byte.
-parse_primitive(16#50, <>) -> {{ubyte, V},      R};
-parse_primitive(16#51, <>) -> {{byte, V},       R};
-parse_primitive(16#52, <>) -> {{uint, V},       R};
-parse_primitive(16#53, <>) -> {{ulong, V},      R};
-parse_primitive(16#54, <>) -> {{int, V},        R};
-parse_primitive(16#55, <>) -> {{long, V},       R};
-parse_primitive(16#56, <<0:8/unsigned,  R/binary>>) -> {{boolean, false},R};
-parse_primitive(16#56, <<1:8/unsigned,  R/binary>>) -> {{boolean, true}, R};
-parse_primitive(16#60, <>) -> {{ushort, V},     R};
-parse_primitive(16#61, <>) -> {{short, V},      R};
-parse_primitive(16#70, <>) -> {{uint, V},       R};
-parse_primitive(16#71, <>) -> {{int, V},        R};
-parse_primitive(16#72, <>) -> {{float, V},      R};
-parse_primitive(16#73, <>) -> {{char, Utf32},   R};
-parse_primitive(16#80, <>) -> {{ulong, V},      R};
-parse_primitive(16#81, <>) -> {{long, V},       R};
-parse_primitive(16#82, <>) -> {{double, V},     R};
-parse_primitive(16#83, <>) -> {{timestamp, TS}, R};
-parse_primitive(16#98, <>) -> {{uuid, Uuid},    R};
-
+parse(<<16#50, V:8/unsigned,  _/binary>>, B) -> {{ubyte, V},      B+2};
+parse(<<16#51, V:8/signed,    _/binary>>, B) -> {{byte, V},       B+2};
+parse(<<16#52, V:8/unsigned,  _/binary>>, B) -> {{uint, V},       B+2};
+parse(<>, B) -> {{ulong, V}, B+2};
+parse(<<16#54, V:8/signed,    _/binary>>, B) -> {{int, V},        B+2};
+parse(<<16#55, V:8/signed,    _/binary>>, B) -> {{long, V},       B+2};
+parse(<<16#56, 0:8/unsigned,  _/binary>>, B) -> {false,           B+2};
+parse(<<16#56, 1:8/unsigned,  _/binary>>, B) -> {true,            B+2};
+parse(<<16#60, V:16/unsigned, _/binary>>, B) -> {{ushort, V},     B+3};
+parse(<<16#61, V:16/signed,   _/binary>>, B) -> {{short, V},      B+3};
+parse(<<16#70, V:32/unsigned, _/binary>>, B) -> {{uint, V},       B+5};
+parse(<<16#71, V:32/signed,   _/binary>>, B) -> {{int, V},        B+5};
+parse(<<16#72, V:32/float,    _/binary>>, B) -> {{float, V},      B+5};
+parse(<<16#73, V:32,          _/binary>>, B) -> {{char, V},       B+5};
+parse(<>, B) -> {{ulong, V},B+9};
+parse(<<16#81, V:64/signed,   _/binary>>, B) -> {{long, V},       B+9};
+parse(<<16#82, V:64/float,    _/binary>>, B) -> {{double, V},     B+9};
+parse(<<16#83, TS:64/signed,  _/binary>>, B) -> {{timestamp, TS}, B+9};
+parse(<<16#98, Uuid:16/binary,_/binary>>, B) -> {{uuid, Uuid},    B+17};
 %% Variable-widths
-parse_primitive(16#a0,<>)-> {{binary, V}, R};
-parse_primitive(16#a1,<>)-> {{utf8, V},   R};
-parse_primitive(16#a3,<>)-> {{symbol, V}, R};
-parse_primitive(16#b3,<>)-> {{symbol, V}, R};
-parse_primitive(16#b0,<>)-> {{binary, V}, R};
-parse_primitive(16#b1,<>)-> {{utf8, V},   R};
-
+parse(<<16#a0, S:8, V:S/binary,_/binary>>, B)-> {{binary, V}, B+2+S};
+parse(<<16#a1, S:8, V:S/binary,_/binary>>, B)-> {{utf8, V},   B+2+S};
+parse(<>, B) -> {{symbol, V}, B+2+S};
+parse(<>, B) -> {{symbol, V}, B+5+S};
+parse(<<16#b0, S:32,V:S/binary,_/binary>>, B)-> {{binary, V}, B+5+S};
+parse(<<16#b1, S:32,V:S/binary,_/binary>>, B)-> {{utf8, V},   B+5+S};
 %% Compounds
-parse_primitive(16#45, R) ->
-    {{list, []}, R};
-parse_primitive(16#c0,<>) ->
-    {{list, parse_compound(8, CountAndValue)}, R};
-parse_primitive(16#c1,<>) ->
-    List = parse_compound(8, CountAndValue),
-    {{map, mapify(List)}, R};
-parse_primitive(16#d0,<>) ->
-    {{list, parse_compound(32, CountAndValue)}, R};
-parse_primitive(16#d1,<>) ->
-    List = parse_compound(32, CountAndValue),
-    {{map, mapify(List)}, R};
-
+parse(<<16#45, _/binary>>, B) ->
+    {{list, []}, B+1};
+parse(<<16#c0, Size, _IgnoreCount, Value:(Size-1)/binary, _/binary>>, B) ->
+    {{list, parse_many(Value, [])}, B+2+Size};
+parse(<<16#c1, Size, _IgnoreCount, Value:(Size-1)/binary, _/binary>>, B) ->
+    List = parse_many(Value, []),
+    {{map, mapify(List)}, B+2+Size};
+parse(<<16#d0, Size:32, _IgnoreCount:32, Value:(Size-4)/binary, _/binary>>, B) ->
+    {{list, parse_many(Value, [])}, B+5+Size};
+parse(<<16#d1, Size:32, _IgnoreCount:32, Value:(Size-4)/binary, _/binary>>, B) ->
+    List = parse_many(Value, []),
+    {{map, mapify(List)}, B+5+Size};
 %% Arrays
-parse_primitive(16#e0,<>) ->
-    {parse_array(8, CountAndV), R};
-parse_primitive(16#f0,<>) ->
-    {parse_array(32, CountAndV), R};
-
+parse(<<16#e0, S:8,CountAndV:S/binary,_/binary>>, B) ->
+    {parse_array(8, CountAndV), B+2+S};
+parse(<<16#f0, S:32,CountAndV:S/binary,_/binary>>, B) ->
+    {parse_array(32, CountAndV), B+5+S};
 %% NaN or +-inf
-parse_primitive(16#72, <>) ->
-    {{as_is, 16#72, <>}, R};
-parse_primitive(16#82, <>) ->
-    {{as_is, 16#82, <>}, R};
-
+parse(<<16#72, V:32, _/binary>>, B) ->
+    {{as_is, 16#72, <>}, B+5};
+parse(<<16#82, V:64, _/binary>>, B) ->
+    {{as_is, 16#82, <>}, B+9};
 %% decimals
-parse_primitive(16#74, <>) ->
-    {{as_is, 16#74, <>}, R};
-parse_primitive(16#84, <>) ->
-    {{as_is, 16#84, <>}, R};
-parse_primitive(16#94, <>) ->
-    {{as_is, 16#94, <>}, R};
-
-parse_primitive(Type, _Bin) ->
-    throw({primitive_type_unsupported, Type, _Bin}).
-
-parse_compound(UnitSize, Bin) ->
-    <> = Bin,
-    parse_compound1(Count, Bin1, []).
-
-parse_compound1(0, <<>>, List) ->
-    lists:reverse(List);
-parse_compound1(_Left, <<>>, List) ->
-    case application:get_env(rabbitmq_amqp1_0, protocol_strict_mode) of
-        {ok, false} -> lists:reverse(List); %% ignore miscount
-        {ok, true}  -> throw(compound_datatype_miscount)
-    end;
-parse_compound1(Count, Bin, Acc) ->
-    {Value, Rest} = parse(Bin),
-    parse_compound1(Count - 1, Rest, [Value | Acc]).
-
-parse_array_primitive(16#40, <<_:8/unsigned, R/binary>>) -> {null, R};
-parse_array_primitive(16#41, <<_:8/unsigned, R/binary>>) -> {true, R};
-parse_array_primitive(16#42, <<_:8/unsigned, R/binary>>) -> {false, R};
-parse_array_primitive(16#43, <<_:8/unsigned, R/binary>>) -> {{uint, 0}, R};
-parse_array_primitive(16#44, <<_:8/unsigned, R/binary>>) -> {{ulong, 0}, R};
-parse_array_primitive(ElementType, Data) ->
-    parse_primitive(ElementType, Data).
+parse(<<16#74, V:32, _/binary>>, B) ->
+    {{as_is, 16#74, <>}, B+5};
+parse(<<16#84, V:64, _/binary>>, B) ->
+    {{as_is, 16#84, <>}, B+9};
+parse(<<16#94, V:128, _/binary>>, B) ->
+    {{as_is, 16#94, <>}, B+17};
+parse(<>, B) ->
+    throw({primitive_type_unsupported, Type, {position, B}}).
 
 %% array structure is {array, Ctor, [Data]}
 %% e.g. {array, symbol, [<<"amqp:accepted:list">>]}
@@ -141,7 +122,8 @@ parse_array(UnitSize, Bin) ->
     parse_array1(Count, Bin1).
 
 parse_array1(Count, <>) ->
-    {Descriptor, Rest1} = parse(Rest),
+    {Descriptor, B1} = parse(Rest),
+    <<_ParsedDescriptorBin:B1/binary, Rest1/binary>> = Rest,
     {array, Type, List} = parse_array1(Count, Rest1),
     Values = lists:map(fun (Value) ->
                                {described, Descriptor, Value}
@@ -158,12 +140,15 @@ parse_array2(0, Type, Bin, Acc) ->
 parse_array2(Count, Type, <<>>, Acc) when Count > 0 ->
     exit({failed_to_parse_array_insufficient_input, Type, Count, Acc});
 parse_array2(Count, Type, Bin, Acc) ->
-    {Value, Rest} = parse_array_primitive(Type, Bin),
+    {Value, B} = parse_array_primitive(Type, Bin),
+    <<_ParsedValue:B/binary, Rest/binary>> = Bin,
     parse_array2(Count - 1, Type, Rest, [Value | Acc]).
 
-parse_constructor(16#a3) -> symbol;
-parse_constructor(16#b3) -> symbol;
+parse_constructor(?CODE_SYM_8) -> symbol;
+parse_constructor(?CODE_SYM_32) -> symbol;
+parse_constructor(16#a0) -> binary;
 parse_constructor(16#a1) -> utf8;
+parse_constructor(16#b0) -> binary;
 parse_constructor(16#b1) -> utf8;
 parse_constructor(16#50) -> ubyte;
 parse_constructor(16#51) -> byte;
@@ -171,118 +156,184 @@ parse_constructor(16#60) -> ushort;
 parse_constructor(16#61) -> short;
 parse_constructor(16#70) -> uint;
 parse_constructor(16#71) -> int;
-parse_constructor(16#80) -> ulong;
+parse_constructor(16#72) -> float;
+parse_constructor(16#73) -> char;
+parse_constructor(16#82) -> double;
+parse_constructor(?CODE_ULONG) -> ulong;
 parse_constructor(16#81) -> long;
 parse_constructor(16#40) -> null;
 parse_constructor(16#56) -> boolean;
+parse_constructor(16#83) -> timestamp;
+parse_constructor(16#98) -> uuid;
+parse_constructor(16#d0) -> list;
+parse_constructor(16#d1) -> map;
 parse_constructor(16#f0) -> array;
 parse_constructor(0) -> described;
 parse_constructor(X) ->
     exit({failed_to_parse_constructor, X}).
 
+parse_array_primitive(16#40, <<_:8/unsigned, _/binary>>) -> {null, 1};
+parse_array_primitive(16#41, <<_:8/unsigned, _/binary>>) -> {true, 1};
+parse_array_primitive(16#42, <<_:8/unsigned, _/binary>>) -> {false, 1};
+parse_array_primitive(16#43, <<_:8/unsigned, _/binary>>) -> {{uint, 0}, 1};
+parse_array_primitive(16#44, <<_:8/unsigned, _/binary>>) -> {{ulong, 0}, 1};
+parse_array_primitive(ElementType, Data) ->
+    {Val, B} = parse(<>),
+    {Val, B-1}.
+
 mapify([]) ->
     [];
 mapify([Key, Value | Rest]) ->
     [{Key, Value} | mapify(Rest)].
 
-%% parse_all/1 is much faster and much more memory efficient than parse/1.
-%%
-%% When compiling this module with environment variable ERL_COMPILER_OPTIONS=bin_opt_info,
-%% for parse/1 the compiler prints many times:
-%% "BINARY CREATED: binary is used in a term that is returned from the function"
-%% because sub binaries are created.
-%%
-%% For parse_all/1 the compiler prints many times:
-%% "OPTIMIZED: match context reused"
-%% because sub binaries are not created.
-%%
-%% See also https://www.erlang.org/doc/efficiency_guide/binaryhandling.html
--spec parse_all(binary()) ->
-    [amqp10_binary_generator:amqp10_type()].
-
-parse_all(<<>>) ->
+%% Parses all AMQP types (or, in server_mode, stops when the body is reached).
+%% This is an optimisation over calling parse/1 repeatedly.
+%% We re-use the match context avoiding creation of sub binaries.
+-spec parse_many(binary(), opts()) ->
+    [amqp10_binary_generator:amqp10_type() |
+     {{pos, non_neg_integer()}, amqp10_binary_generator:amqp10_type() | body}].
+parse_many(Binary, Opts) ->
+    OptionServerMode = lists:member(server_mode, Opts),
+    pm(Binary, OptionServerMode, 0).
+
+pm(<<>>, _, _) ->
     [];
 
+%% We put function clauses that are more likely to match to the top as this results in better performance.
+%% Constants.
+pm(<<16#40, R/binary>>, O, B) -> [null | pm(R, O, B+1)];
+pm(<<16#41, R/binary>>, O, B) -> [true | pm(R, O, B+1)];
+pm(<<16#42, R/binary>>, O, B) -> [false | pm(R, O, B+1)];
+pm(<<16#43, R/binary>>, O, B) -> [{uint, 0} | pm(R, O, B+1)];
+%% Fixed-widths.
+pm(<<16#44, R/binary>>, O, B)                            -> [{ulong, 0} | pm(R, O, B+1)];
+pm(<<16#50, V:8/unsigned,  R/binary>>, O, B)             -> [{ubyte, V} | pm(R, O, B+2)];
+pm(<<16#52, V:8/unsigned,  R/binary>>, O, B)             -> [{uint, V} | pm(R, O, B+2)];
+pm(<>, O, B) -> [{ulong, V} | pm(R, O, B+2)];
+pm(<<16#70, V:32/unsigned, R/binary>>, O, B)             -> [{uint, V} | pm(R, O, B+5)];
+pm(<>, O, B)       -> [{ulong, V} | pm(R, O, B+9)];
+%% Variable-widths
+pm(<<16#a0, S:8, V:S/binary,R/binary>>, O, B)            -> [{binary, V} | pm(R, O, B+2+S)];
+pm(<<16#a1, S:8, V:S/binary,R/binary>>, O, B)            -> [{utf8, V} | pm(R, O, B+2+S)];
+pm(<>, O, B)      -> [{symbol, V} | pm(R, O, B+2+S)];
+%% Compounds
+pm(<<16#45, R/binary>>, O, B) ->
+    [{list, []} | pm(R, O, B+1)];
+pm(<<16#c0, S:8,CountAndValue:S/binary,R/binary>>, O, B) ->
+    [{list, pm_compound(8, CountAndValue, O, B+2)} | pm(R, O, B+2+S)];
+pm(<<16#c1, S:8,CountAndValue:S/binary,R/binary>>, O, B) ->
+    List = pm_compound(8, CountAndValue, O, B+2),
+    [{map, mapify(List)} | pm(R, O, B+2+S)];
+
+%% We avoid guard tests: they improve readability, but result in worse performance.
+%%
+%% In server mode:
+%% * Stop when we reach the message body (data or amqp-sequence or amqp-value section).
+%% * Include byte positions for parsed bare message sections.
+pm(<>, true, B) ->
+    reached_body(B, ?DESCRIPTOR_CODE_DATA);
+pm(<>, true, B) ->
+    reached_body(B, ?DESCRIPTOR_CODE_AMQP_SEQUENCE);
+pm(<>, true, B) ->
+    reached_body(B, ?DESCRIPTOR_CODE_AMQP_VALUE);
+pm(<>, O = true, B) ->
+    [Value | Rest] = pm(Rest0, O, B+3),
+    [{{pos, B}, {described, {ulong, ?DESCRIPTOR_CODE_PROPERTIES}, Value}} | Rest];
+pm(<>, O = true, B) ->
+    [Value | Rest] = pm(Rest0, O, B+3),
+    [{{pos, B}, {described, {ulong, ?DESCRIPTOR_CODE_APPLICATION_PROPERTIES}, Value}} | Rest];
+pm(<>, true, B) ->
+    reached_body(B, ?DESCRIPTOR_CODE_DATA);
+pm(<>, true, B) ->
+    reached_body(B, ?DESCRIPTOR_CODE_AMQP_SEQUENCE);
+pm(<>, true, B) ->
+    reached_body(B, ?DESCRIPTOR_CODE_AMQP_VALUE);
+pm(<>, true, B) ->
+    reached_body(B, ?DESCRIPTOR_CODE_DATA);
+pm(<>, true, B) ->
+    reached_body(B, ?DESCRIPTOR_CODE_AMQP_SEQUENCE);
+pm(<>, true, B) ->
+    reached_body(B, ?DESCRIPTOR_CODE_AMQP_VALUE);
+pm(<>, true, B) ->
+    reached_body(B, ?DESCRIPTOR_CODE_DATA);
+pm(<>, true, B) ->
+    reached_body(B, ?DESCRIPTOR_CODE_AMQP_SEQUENCE);
+pm(<>, true, B) ->
+    reached_body(B, ?DESCRIPTOR_CODE_AMQP_VALUE);
+pm(<>, O = true, B) ->
+    [Value | Rest] = pm(Rest0, O, B+10),
+    [{{pos, B}, {described, {ulong, ?DESCRIPTOR_CODE_PROPERTIES}, Value}} | Rest];
+pm(<>, O = true, B) ->
+    [Value | Rest] = pm(Rest0, O, B+10),
+    [{{pos, B}, {described, {ulong, ?DESCRIPTOR_CODE_APPLICATION_PROPERTIES}, Value}} | Rest];
+pm(<>, O = true, B) ->
+    [Value | Rest] = pm(Rest0, O, B+23),
+    [{{pos, B}, {described, {symbol, <<"amqp:properties:list">>}, Value}} | Rest];
+pm(<>, O = true, B) ->
+    [Value | Rest] = pm(Rest0, O, B+34),
+    [{{pos, B}, {described, {symbol, <<"amqp:application-properties:map">>}, Value}} | Rest];
+pm(<>, O = true, B) ->
+    [Value | Rest] = pm(Rest0, O, B+26),
+    [{{pos, B}, {described, {symbol, <<"amqp:properties:list">>}, Value}} | Rest];
+pm(<>, O = true, B) ->
+    [Value | Rest] = pm(Rest0, O, B+37),
+    [{{pos, B}, {described, {symbol, <<"amqp:application-properties:map">>}, Value}} | Rest];
+
 %% Described Types
-parse_all(<>) ->
-    [Descriptor, Value | Rest] = parse_all(Rest0),
+pm(<>, O, B) ->
+    [Descriptor, Value | Rest] = pm(Rest0, O, B+1),
     [{described, Descriptor, Value} | Rest];
 
 %% Primitives Types
 %%
-%% Constants
-parse_all(<<16#40, R/binary>>) -> [null | parse_all(R)];
-parse_all(<<16#41, R/binary>>) -> [true | parse_all(R)];
-parse_all(<<16#42, R/binary>>) -> [false | parse_all(R)];
-parse_all(<<16#43, R/binary>>) -> [{uint, 0} | parse_all(R)];
-parse_all(<<16#44, R/binary>>) -> [{ulong, 0} | parse_all(R)];
-
-%% Fixed-widths. Most integral types have a compact encoding as a byte.
-parse_all(<<16#50, V:8/unsigned,  R/binary>>) -> [{ubyte, V} | parse_all(R)];
-parse_all(<<16#51, V:8/signed,    R/binary>>) -> [{byte, V} | parse_all(R)];
-parse_all(<<16#52, V:8/unsigned,  R/binary>>) -> [{uint, V} | parse_all(R)];
-parse_all(<<16#53, V:8/unsigned,  R/binary>>) -> [{ulong, V} | parse_all(R)];
-parse_all(<<16#54, V:8/signed,    R/binary>>) -> [{int, V} | parse_all(R)];
-parse_all(<<16#55, V:8/signed,    R/binary>>) -> [{long, V} | parse_all(R)];
-parse_all(<<16#56, 0:8/unsigned,  R/binary>>) -> [{boolean, false} | parse_all(R)];
-parse_all(<<16#56, 1:8/unsigned,  R/binary>>) -> [{boolean, true} | parse_all(R)];
-parse_all(<<16#60, V:16/unsigned, R/binary>>) -> [{ushort, V} | parse_all(R)];
-parse_all(<<16#61, V:16/signed,   R/binary>>) -> [{short, V} | parse_all(R)];
-parse_all(<<16#70, V:32/unsigned, R/binary>>) -> [{uint, V} | parse_all(R)];
-parse_all(<<16#71, V:32/signed,   R/binary>>) -> [{int, V} | parse_all(R)];
-parse_all(<<16#72, V:32/float,    R/binary>>) -> [{float, V} | parse_all(R)];
-parse_all(<<16#73, Utf32:4/binary,R/binary>>) -> [{char, Utf32} | parse_all(R)];
-parse_all(<<16#80, V:64/unsigned, R/binary>>) -> [{ulong, V} | parse_all(R)];
-parse_all(<<16#81, V:64/signed,   R/binary>>) -> [{long, V} | parse_all(R)];
-parse_all(<<16#82, V:64/float,    R/binary>>) -> [{double, V} | parse_all(R)];
-parse_all(<<16#83, TS:64/signed,  R/binary>>) -> [{timestamp, TS} | parse_all(R)];
-parse_all(<<16#98, Uuid:16/binary,R/binary>>) -> [{uuid, Uuid} | parse_all(R)];
-
+%% Fixed-widths.
+pm(<<16#51, V:8/signed,    R/binary>>, O, B) -> [{byte, V} | pm(R, O, B+2)];
+pm(<<16#54, V:8/signed,    R/binary>>, O, B) -> [{int, V} | pm(R, O, B+2)];
+pm(<<16#55, V:8/signed,    R/binary>>, O, B) -> [{long, V} | pm(R, O, B+2)];
+pm(<<16#56, 0:8/unsigned,  R/binary>>, O, B) -> [false | pm(R, O, B+2)];
+pm(<<16#56, 1:8/unsigned,  R/binary>>, O, B) -> [true  | pm(R, O, B+2)];
+pm(<<16#60, V:16/unsigned, R/binary>>, O, B) -> [{ushort, V} | pm(R, O, B+3)];
+pm(<<16#61, V:16/signed,   R/binary>>, O, B) -> [{short, V} | pm(R, O, B+3)];
+pm(<<16#71, V:32/signed,   R/binary>>, O, B) -> [{int, V} | pm(R, O, B+5)];
+pm(<<16#72, V:32/float,    R/binary>>, O, B) -> [{float, V} | pm(R, O, B+5)];
+pm(<<16#73, V:32,          R/binary>>, O, B) -> [{char, V} | pm(R, O, B+5)];
+pm(<<16#81, V:64/signed,   R/binary>>, O, B) -> [{long, V} | pm(R, O, B+9)];
+pm(<<16#82, V:64/float,    R/binary>>, O, B) -> [{double, V} | pm(R, O, B+9)];
+pm(<<16#83, TS:64/signed,  R/binary>>, O, B) -> [{timestamp, TS} | pm(R, O, B+9)];
+pm(<<16#98, Uuid:16/binary,R/binary>>, O, B) -> [{uuid, Uuid} | pm(R, O, B+17)];
 %% Variable-widths
-parse_all(<<16#a0, S:8/unsigned, V:S/binary,R/binary>>) -> [{binary, V} | parse_all(R)];
-parse_all(<<16#a1, S:8/unsigned, V:S/binary,R/binary>>) -> [{utf8, V} | parse_all(R)];
-parse_all(<<16#a3, S:8/unsigned, V:S/binary,R/binary>>) -> [{symbol, V} | parse_all(R)];
-parse_all(<<16#b3, S:32/unsigned,V:S/binary,R/binary>>) -> [{symbol, V} | parse_all(R)];
-parse_all(<<16#b0, S:32/unsigned,V:S/binary,R/binary>>) -> [{binary, V} | parse_all(R)];
-parse_all(<<16#b1, S:32/unsigned,V:S/binary,R/binary>>) -> [{utf8, V} | parse_all(R)];
-
+pm(<>, O, B) -> [{symbol, V} | pm(R, O, B+5+S)];
+pm(<<16#b0, S:32,V:S/binary,R/binary>>, O, B)        -> [{binary, V} | pm(R, O, B+5+S)];
+pm(<<16#b1, S:32,V:S/binary,R/binary>>, O, B)        -> [{utf8, V} | pm(R, O, B+5+S)];
 %% Compounds
-parse_all(<<16#45, R/binary>>) ->
-    [{list, []} | parse_all(R)];
-parse_all(<<16#c0, S:8/unsigned,CountAndValue:S/binary,R/binary>>) ->
-    [{list, parse_compound_all(8, CountAndValue)} | parse_all(R)];
-parse_all(<<16#c1, S:8/unsigned,CountAndValue:S/binary,R/binary>>) ->
-    List = parse_compound_all(8, CountAndValue),
-    [{map, mapify(List)} | parse_all(R)];
-parse_all(<<16#d0, S:32/unsigned,CountAndValue:S/binary,R/binary>>) ->
-    [{list, parse_compound_all(32, CountAndValue)} | parse_all(R)];
-parse_all(<<16#d1, S:32/unsigned,CountAndValue:S/binary,R/binary>>) ->
-    List = parse_compound_all(32, CountAndValue),
-    [{map, mapify(List)} | parse_all(R)];
-
+pm(<<16#d0, S:32,CountAndValue:S/binary,R/binary>>, O, B) ->
+    [{list, pm_compound(32, CountAndValue, O, B+5)} | pm(R, O, B+5+S)];
+pm(<<16#d1, S:32,CountAndValue:S/binary,R/binary>>, O, B) ->
+    List = pm_compound(32, CountAndValue, O, B+5),
+    [{map, mapify(List)} | pm(R, O, B+5+S)];
 %% Arrays
-parse_all(<<16#e0, S:8/unsigned,CountAndV:S/binary,R/binary>>) ->
-    [parse_array(8, CountAndV) | parse_all(R)];
-parse_all(<<16#f0, S:32/unsigned,CountAndV:S/binary,R/binary>>) ->
-    [parse_array(32, CountAndV) | parse_all(R)];
-
+pm(<<16#e0, S:8,CountAndV:S/binary,R/binary>>, O, B) ->
+    [parse_array(8, CountAndV) | pm(R, O, B+2+S)];
+pm(<<16#f0, S:32,CountAndV:S/binary,R/binary>>, O, B) ->
+    [parse_array(32, CountAndV) | pm(R, O, B+5+S)];
 %% NaN or +-inf
-parse_all(<<16#72, V:32, R/binary>>) ->
-    [{as_is, 16#72, <>} | parse_all(R)];
-parse_all(<<16#82, V:64, R/binary>>) ->
-    [{as_is, 16#82, <>} | parse_all(R)];
-
+pm(<<16#72, V:32, R/binary>>, O, B) ->
+    [{as_is, 16#72, <>} | pm(R, O, B+5)];
+pm(<<16#82, V:64, R/binary>>, O, B) ->
+    [{as_is, 16#82, <>} | pm(R, O, B+9)];
 %% decimals
-parse_all(<<16#74, V:32, R/binary>>) ->
-    [{as_is, 16#74, <>} | parse_all(R)];
-parse_all(<<16#84, V:64, R/binary>>) ->
-    [{as_is, 16#84, <>} | parse_all(R)];
-parse_all(<<16#94, V:128, R/binary>>) ->
-    [{as_is, 16#94, <>} | parse_all(R)];
-
-parse_all(<>) ->
-    throw({primitive_type_unsupported, Type, _Bin}).
-
-parse_compound_all(UnitSize, Bin) ->
-    <<_Count:UnitSize, Bin1/binary>> = Bin,
-    parse_all(Bin1).
+pm(<<16#74, V:32, R/binary>>, O, B) ->
+    [{as_is, 16#74, <>} | pm(R, O, B+5)];
+pm(<<16#84, V:64, R/binary>>, O, B) ->
+    [{as_is, 16#84, <>} | pm(R, O, B+9)];
+pm(<<16#94, V:128, R/binary>>, O, B) ->
+    [{as_is, 16#94, <>} | pm(R, O, B+17)];
+pm(<>, _O, B) ->
+    throw({primitive_type_unsupported, Type, {position, B}}).
+
+pm_compound(UnitSize, Bin, O, B) ->
+    <<_IgnoreCount:UnitSize, Value/binary>> = Bin,
+    pm(Value, O, B + UnitSize div 8).
+
+reached_body(Position, DescriptorCode) ->
+    [{{pos, Position}, {body, DescriptorCode}}].
diff --git a/deps/amqp10_common/src/amqp10_framing.erl b/deps/amqp10_common/src/amqp10_framing.erl
index f85ce2302730..4742a639766a 100644
--- a/deps/amqp10_common/src/amqp10_framing.erl
+++ b/deps/amqp10_common/src/amqp10_framing.erl
@@ -2,13 +2,21 @@
 %% License, v. 2.0. If a copy of the MPL was not distributed with this
 %% file, You can obtain one at https://mozilla.org/MPL/2.0/.
 %%
-%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates.  All rights reserved.
+%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved.
 %%
 
 -module(amqp10_framing).
 
--export([encode/1, encode_described/3, decode/1, version/0,
-         symbol_for/1, number_for/1, encode_bin/1, decode_bin/1, pprint/1]).
+-export([version/0,
+         encode/1,
+         encode_described/3,
+         encode_bin/1,
+         decode/1,
+         decode_bin/1,
+         decode_bin/2,
+         symbol_for/1,
+         number_for/1,
+         pprint/1]).
 
 %% debug
 -export([fill_from_list/2, fill_from_map/2]).
@@ -100,14 +108,16 @@ symbolify(FieldName) when is_atom(FieldName) ->
 
 %% A sequence comes as an arbitrary list of values; it's not a
 %% composite type.
-decode({described, Descriptor, {list, Fields}}) ->
+decode({described, Descriptor, {list, Fields} = Type}) ->
     case amqp10_framing0:record_for(Descriptor) of
         #'v1_0.amqp_sequence'{} ->
             #'v1_0.amqp_sequence'{content = [decode(F) || F <- Fields]};
+        #'v1_0.amqp_value'{} ->
+            #'v1_0.amqp_value'{content = Type};
         Else ->
             fill_from_list(Else, Fields)
     end;
-decode({described, Descriptor, {map, Fields}}) ->
+decode({described, Descriptor, {map, Fields} = Type}) ->
     case amqp10_framing0:record_for(Descriptor) of
         #'v1_0.application_properties'{} ->
             #'v1_0.application_properties'{content = decode_map(Fields)};
@@ -117,13 +127,15 @@ decode({described, Descriptor, {map, Fields}}) ->
             #'v1_0.message_annotations'{content = decode_map(Fields)};
         #'v1_0.footer'{} ->
             #'v1_0.footer'{content = decode_map(Fields)};
+        #'v1_0.amqp_value'{} ->
+            #'v1_0.amqp_value'{content = Type};
         Else ->
             fill_from_map(Else, Fields)
     end;
-decode({described, Descriptor, {binary, Field}}) ->
+decode({described, Descriptor, {binary, Field} = Type}) ->
     case amqp10_framing0:record_for(Descriptor) of
         #'v1_0.amqp_value'{} ->
-            #'v1_0.amqp_value'{content = {binary, Field}};
+            #'v1_0.amqp_value'{content = Type};
         #'v1_0.data'{} ->
             #'v1_0.data'{content = Field}
     end;
@@ -157,7 +169,8 @@ encode_described(map, CodeNumber,
 encode_described(map, CodeNumber,
                  #'v1_0.message_annotations'{content = Content}) ->
     {described, {ulong, CodeNumber}, {map, Content}};
-encode_described(map, CodeNumber, #'v1_0.footer'{content = Content}) ->
+encode_described(map, CodeNumber,
+                 #'v1_0.footer'{content = Content}) ->
     {described, {ulong, CodeNumber}, {map, Content}};
 encode_described(binary, CodeNumber, #'v1_0.data'{content = Content}) ->
     {described, {ulong, CodeNumber}, {binary, Content}};
@@ -169,11 +182,21 @@ encode_described(annotations, CodeNumber, Frame) ->
 encode(X) ->
     amqp10_framing0:encode(X).
 
+-spec encode_bin(term()) -> iodata().
 encode_bin(X) ->
     amqp10_binary_generator:generate(encode(X)).
 
-decode_bin(X) ->
-    [decode(PerfDesc) || PerfDesc <- amqp10_binary_parser:parse_all(X)].
+-spec decode_bin(binary()) -> [term()].
+decode_bin(Binary) ->
+    [decode(Section) || Section <- amqp10_binary_parser:parse_many(Binary, [])].
+
+-spec decode_bin(binary(), amqp10_binary_parser:opts()) -> [term()].
+decode_bin(Binary, Opts) ->
+    lists:map(fun({Pos = {pos, _}, Section}) ->
+                      {Pos, decode(Section)};
+                 (Section) ->
+                      decode(Section)
+              end, amqp10_binary_parser:parse_many(Binary, Opts)).
 
 symbol_for(X) ->
     amqp10_framing0:symbol_for(X).
diff --git a/deps/amqp10_common/src/amqp10_util.erl b/deps/amqp10_common/src/amqp10_util.erl
new file mode 100644
index 000000000000..9df68c1b0339
--- /dev/null
+++ b/deps/amqp10_common/src/amqp10_util.erl
@@ -0,0 +1,20 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.  All rights reserved.
+%%
+
+-module(amqp10_util).
+-include_lib("amqp10_common/include/amqp10_types.hrl").
+-export([link_credit_snd/3]).
+
+%% AMQP 1.0 §2.6.7
+-spec link_credit_snd(sequence_no(), uint(), sequence_no()) -> uint().
+link_credit_snd(DeliveryCountRcv, LinkCreditRcv, DeliveryCountSnd) ->
+    LinkCreditSnd = serial_number:diff(
+                      serial_number:add(DeliveryCountRcv, LinkCreditRcv),
+                      DeliveryCountSnd),
+    %% LinkCreditSnd can be negative when receiver decreases credits
+    %% while messages are in flight. Maintain a floor of zero.
+    max(0, LinkCreditSnd).
diff --git a/deps/amqp10_common/src/serial_number.erl b/deps/amqp10_common/src/serial_number.erl
new file mode 100644
index 000000000000..8f6cabcf1515
--- /dev/null
+++ b/deps/amqp10_common/src/serial_number.erl
@@ -0,0 +1,131 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates.  All rights reserved.
+
+%% https://www.ietf.org/rfc/rfc1982.txt
+-module(serial_number).
+-include("amqp10_types.hrl").
+
+-export([add/2,
+         compare/2,
+         ranges/1,
+         in_range/3,
+         diff/2,
+         foldl/4]).
+
+-ifdef(TEST).
+-export([usort/1]).
+-endif.
+
+-type serial_number() :: sequence_no().
+-export_type([serial_number/0]).
+
+%% SERIAL_BITS = 32
+%% 2 ^ SERIAL_BITS
+-define(SERIAL_SPACE, 16#100000000).
+%% 2 ^ (SERIAL_BITS - 1) - 1
+-define(SERIAL_MAX_ADDEND, 16#7fffffff).
+
+-spec add(serial_number(), non_neg_integer()) ->
+    serial_number().
+add(S, N)
+  when N >= 0 andalso
+       N =< ?SERIAL_MAX_ADDEND ->
+    (S + N) rem ?SERIAL_SPACE;
+add(S, N) ->
+    exit({undefined_serial_addition, S, N}).
+
+%% 2 ^ (SERIAL_BITS - 1)
+-define(COMPARE, 2_147_483_648).
+
+-spec compare(serial_number(), serial_number()) ->
+    equal | less | greater.
+compare(A, B) ->
+    if A =:= B ->
+           equal;
+       (A < B andalso B - A < ?COMPARE) orelse
+       (A > B andalso A - B > ?COMPARE) ->
+           less;
+       (A < B andalso B - A > ?COMPARE) orelse
+       (A > B andalso A - B < ?COMPARE) ->
+           greater;
+       true ->
+           exit({undefined_serial_comparison, A, B})
+    end.
+
+-spec usort([serial_number()]) ->
+    [serial_number()].
+usort(L) ->
+    lists:usort(fun(A, B) ->
+                        compare(A, B) =/= greater
+                end, L).
+
+%% Takes a list of serial numbers and returns tuples
+%% {First, Last} representing contiguous serial numbers.
+-spec ranges([serial_number()]) ->
+    [{First :: serial_number(), Last :: serial_number()}].
+ranges([]) ->
+    [];
+ranges(SerialNumbers) ->
+    [First | Rest] = usort(SerialNumbers),
+    ranges0(Rest, [{First, First}]).
+
+ranges0([], Acc) ->
+    lists:reverse(Acc);
+ranges0([H | Rest], [{First, Last} | AccRest] = Acc0) ->
+    case add(Last, 1) of
+        H ->
+            Acc = [{First, H} | AccRest],
+            ranges0(Rest, Acc);
+        _ ->
+            Acc = [{H, H} | Acc0],
+            ranges0(Rest, Acc)
+    end.
+
+-spec in_range(serial_number(), serial_number(), serial_number()) ->
+    boolean().
+in_range(S, First, Last) ->
+    case compare(S, First) of
+        less ->
+            false;
+        _ ->
+            case compare(S, Last) of
+                greater ->
+                    false;
+                _ ->
+                    true
+            end
+    end.
+
+-define(SERIAL_DIFF_BOUND, 16#80000000).
+-spec diff(serial_number(), serial_number()) -> integer().
+diff(A, B) ->
+    Diff = A - B,
+    if Diff > (?SERIAL_DIFF_BOUND) ->
+           %% B is actually greater than A
+           - (?SERIAL_SPACE - Diff);
+       Diff < - (?SERIAL_DIFF_BOUND) ->
+           ?SERIAL_SPACE + Diff;
+       Diff < ?SERIAL_DIFF_BOUND andalso Diff > -?SERIAL_DIFF_BOUND ->
+           Diff;
+       true ->
+           exit({undefined_serial_diff, A, B})
+    end.
+
+-spec foldl(Fun, Acc0, First, Last) -> Acc1 when
+      Fun :: fun((serial_number(), AccIn) -> AccOut),
+                 Acc0 :: term(),
+                 Acc1 :: term(),
+                 AccIn :: term(),
+                 AccOut :: term(),
+                 First :: serial_number(),
+                 Last :: serial_number().
+
+foldl(Fun, Acc0, Current, Last) ->
+    Acc = Fun(Current, Acc0),
+    case compare(Current, Last) of
+        less -> foldl(Fun, Acc, add(Current, 1), Last);
+        equal -> Acc
+    end.
diff --git a/deps/amqp10_common/test/binary_generator_SUITE.erl b/deps/amqp10_common/test/binary_generator_SUITE.erl
index 6fdc64a6299d..ac63d1b7a661 100644
--- a/deps/amqp10_common/test/binary_generator_SUITE.erl
+++ b/deps/amqp10_common/test/binary_generator_SUITE.erl
@@ -67,9 +67,8 @@ null(_Config) ->
 booleans(_Config) ->
     roundtrip(true),
     roundtrip(false),
-    roundtrip({boolean, false}),
-    roundtrip({boolean, true}),
-    ok.
+    ?assertEqual(true, roundtrip_return({boolean, true})),
+    ?assertEqual(false, roundtrip_return({boolean, false})).
 
 symbol(_Config) ->
     roundtrip({symbol, <<"SYMB">>}),
@@ -110,21 +109,22 @@ numerals(_Config) ->
 
 utf8(_Config) ->
     roundtrip({utf8, <<"hi">>}),
-    roundtrip({utf8, binary:copy(<<"asdfghjk">>, 64)}),
+    roundtrip({utf8, binary:copy(<<"abcdefgh">>, 64)}),
     ok.
 
 char(_Config) ->
-    roundtrip({char, <<$A/utf32>>}),
+    roundtrip({char, $🎉}),
     ok.
 
 list(_Config) ->
     %% list:list0
     roundtrip({list, []}),
     %% list:list8
-    roundtrip({list, [{utf8, <<"hi">>},
+    roundtrip({list, [
+                      {utf8, <<"hi">>},
                       {int, 123},
                       {binary, <<"data">>},
-                      {array, int, [{int, 1}, {int, 2}, {int, 3}]},
+                      {array, int, [{int, 1}, {int, -2147483648}, {int, 2147483647}]},
                       {described,
                        {utf8, <<"URL">>},
                        {utf8, <<"http://example.org/hello-world">>}}
@@ -164,24 +164,64 @@ array(_Config) ->
     roundtrip({array, ulong, [{ulong, 0}, {ulong, 16#FFFFFFFFFFFFFFFF}]}),
     roundtrip({array, long, [{long, 0}, {long, -16#8000000000000},
                              {long, 16#7FFFFFFFFFFFFF}]}),
-    roundtrip({array, boolean, [{boolean, true}, {boolean, false}]}),
-    % array of arrays
-    % TODO: does the inner type need to be consistent across the array?
+    roundtrip({array, boolean, [true, false]}),
+
+    ?assertEqual({array, boolean, [true, false]},
+                 roundtrip_return({array, boolean, [{boolean, true}, {boolean, false}]})),
+
+    %% array of arrays
     roundtrip({array, array, []}),
     roundtrip({array, array, [{array, symbol, [{symbol, <<"ANONYMOUS">>}]}]}),
+    roundtrip({array, array, [{array, symbol, [{symbol, <<"ANONYMOUS">>}]},
+                              {array, symbol, [{symbol, <<"sym1">>},
+                                               {symbol, <<"sym2">>}]}]}),
+
+    %% array of lists
+    roundtrip({array, list, []}),
+    roundtrip({array, list, [{list, [{symbol, <<"sym">>}]},
+                             {list, [null,
+                                     {described,
+                                      {utf8, <<"URL">>},
+                                      {utf8, <<"http://example.org/hello-world">>}}]},
+                             {list, []},
+                             {list, [true, false, {byte, -128}]}
+                            ]}),
+
+    %% array of maps
+    roundtrip({array, map, []}),
+    roundtrip({array, map, [{map, [{{symbol, <<"k1">>}, {utf8, <<"v1">>}}]},
+                            {map, []},
+                            {map, [{{described,
+                                     {utf8, <<"URL">>},
+                                     {utf8, <<"http://example.org/hello-world">>}},
+                                    {byte, -1}},
+                                   {{int, 0}, {ulong, 0}}
+                                  ]}
+                           ]}),
 
     Desc = {utf8, <<"URL">>},
-    roundtrip({array, {described, Desc, utf8},
-               [{described, Desc, {utf8, <<"http://example.org/hello">>}}]}),
     roundtrip({array, {described, Desc, utf8}, []}),
+    roundtrip({array, {described, Desc, utf8},
+               [{described, Desc, {utf8, <<"http://example.org/hello1">>}},
+                {described, Desc, {utf8, <<"http://example.org/hello2">>}}]}),
+
     %% array:array32
-    roundtrip({array, boolean, [{boolean, true} || _ <- lists:seq(1, 256)]}),
+    roundtrip({array, boolean, [true || _ <- lists:seq(1, 256)]}),
     ok.
 
 %% Utility
 
 roundtrip(Term) ->
     Bin = iolist_to_binary(amqp10_binary_generator:generate(Term)),
-    % generate returns an iolist but parse expects a binary
-    ?assertMatch({Term, _}, amqp10_binary_parser:parse(Bin)),
-    ?assertMatch([Term | _], amqp10_binary_parser:parse_all(Bin)).
+    ?assertEqual({Term, size(Bin)}, amqp10_binary_parser:parse(Bin)),
+    ?assertEqual([Term], amqp10_binary_parser:parse_many(Bin, [])).
+
+%% Return the roundtripped term.
+roundtrip_return(Term) ->
+    Bin = iolist_to_binary(amqp10_binary_generator:generate(Term)),
+    %% We assert only that amqp10_binary_parser:parse/1 and
+    %% amqp10_binary_parser:parse_many/2 return the same term.
+    {RoundTripTerm, BytesParsed} = amqp10_binary_parser:parse(Bin),
+    ?assertEqual(size(Bin), BytesParsed),
+    ?assertEqual([RoundTripTerm], amqp10_binary_parser:parse_many(Bin, [])),
+    RoundTripTerm.
diff --git a/deps/amqp10_common/test/binary_parser_SUITE.erl b/deps/amqp10_common/test/binary_parser_SUITE.erl
index 4aec1f5dac28..a217509c5325 100644
--- a/deps/amqp10_common/test/binary_parser_SUITE.erl
+++ b/deps/amqp10_common/test/binary_parser_SUITE.erl
@@ -5,7 +5,6 @@
 -export([
          ]).
 
--include_lib("common_test/include/ct.hrl").
 -include_lib("eunit/include/eunit.hrl").
 
 %%%===================================================================
@@ -30,38 +29,24 @@ groups() ->
      {tests, [parallel], all_tests()}
     ].
 
-init_per_suite(Config) ->
-    Config.
-
-end_per_suite(_Config) ->
-    ok.
-
-init_per_group(_Group, Config) ->
-    Config.
-
-end_per_group(_Group, _Config) ->
-    ok.
-
-init_per_testcase(_TestCase, Config) ->
-    Config.
-
-end_per_testcase(_TestCase, _Config) ->
-    ok.
-
-%%%===================================================================
-%%% Test cases
-%%%===================================================================
-
 roundtrip(_Config) ->
-    Terms = [null,
+    Terms = [
+             null,
              {described,
               {symbol, <<"URL">>},
               {utf8, <<"http://example.org/hello-world">>}},
              {described,
               {symbol, <<"URL">>},
               {binary, <<"https://rabbitmq.com">>}},
+             %% "The descriptor portion of a described format code is itself
+             %% any valid AMQP encoded value, including other described values." [1.2]
+             {described,
+              {described,
+               {symbol, <<"inner constructor">>},
+               {binary, <<"inner value">>}},
+              {binary, <<"outer value">>}},
              {array, ubyte, [{ubyte, 1}, {ubyte, 255}]},
-             {boolean, false},
+             true,
              {list, [{utf8, <<"hi">>},
                      {described,
                       {symbol, <<"URL">>},
@@ -75,7 +60,8 @@ roundtrip(_Config) ->
                     {{utf8, <<"key2">>}, {int, 33}}
                    ]},
              {array, {described, {utf8, <<"URL">>}, utf8}, []},
-             false],
+             false
+            ],
 
     Bin = lists:foldl(
             fun(T, Acc) ->
@@ -83,8 +69,7 @@ roundtrip(_Config) ->
                     <>
             end, <<>>, Terms),
 
-    ?assertEqual(Terms, amqp10_binary_parser:parse_all_int(Bin)),
-    ?assertEqual(Terms, amqp10_binary_parser:parse_all(Bin)).
+    ?assertEqual(Terms, amqp10_binary_parser:parse_many(Bin, [])).
 
 array_with_extra_input(_Config) ->
     Bin = <<83,16,192,85,10,177,0,0,0,1,48,161,12,114,97,98,98,105,116, 109,113,45,98,111,120,112,255,255,0,0,96,0,50,112,0,0,19,136,163,5,101,110,45,85,83,224,14,2,65,5,102,105,45,70,73,5,101,110,45,85,83,64,64,193,24,2,163,20,68,69,70,69,78,83,73,67,83,46,84,69,83,84,46,83,85,73,84,69,65>>,
@@ -93,11 +78,10 @@ array_with_extra_input(_Config) ->
                 %% element type, input, accumulated result
                 65, <<105,45,70,73,5,101,110,45,85,83>>, [true,true]},
 
-    ?assertExit(Expected, amqp10_binary_parser:parse_all_int(Bin)),
-    ?assertExit(Expected, amqp10_binary_parser:parse_all(Bin)).
+    ?assertExit(Expected, amqp10_binary_parser:parse_many(Bin, [])).
 
 unsupported_type(_Config) ->
-    Bin = <<2/integer, "hey">>,
-    Expected = {primitive_type_unsupported, 16#02, <<"hey">>},
-    ?assertThrow(Expected, amqp10_binary_parser:parse_all_int(Bin)),
-    ?assertThrow(Expected, amqp10_binary_parser:parse_all(Bin)).
+    UnsupportedType = 16#02,
+    Bin = <>,
+    Expected = {primitive_type_unsupported, UnsupportedType, {position, 0}},
+    ?assertThrow(Expected, amqp10_binary_parser:parse_many(Bin, [])).
diff --git a/deps/amqp10_common/test/prop_SUITE.erl b/deps/amqp10_common/test/prop_SUITE.erl
new file mode 100644
index 000000000000..4cb04f594f37
--- /dev/null
+++ b/deps/amqp10_common/test/prop_SUITE.erl
@@ -0,0 +1,436 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved.
+
+-module(prop_SUITE).
+-compile([export_all, nowarn_export_all]).
+
+-include_lib("proper/include/proper.hrl").
+-include("amqp10_framing.hrl").
+
+-import(rabbit_ct_proper_helpers, [run_proper/3]).
+
+all() ->
+    [{group, tests}].
+
+groups() ->
+    [
+     {tests, [parallel],
+      [
+       prop_single_primitive_type_parse,
+       prop_single_primitive_type_parse_many,
+       prop_many_primitive_types_parse,
+       prop_many_primitive_types_parse_many,
+       prop_annotated_message,
+       prop_server_mode_body,
+       prop_server_mode_bare_message
+      ]}
+    ].
+
+%%%%%%%%%%%%%%%%%%
+%%% Properties %%%
+%%%%%%%%%%%%%%%%%%
+
+prop_single_primitive_type_parse(_Config) ->
+    run_proper(
+      fun() -> ?FORALL(Val,
+                       oneof(primitive_types()),
+                       begin
+                           Bin = iolist_to_binary(amqp10_binary_generator:generate(Val)),
+                           equals({Val, size(Bin)}, amqp10_binary_parser:parse(Bin))
+                       end)
+      end, [], 10_000).
+
+prop_single_primitive_type_parse_many(_Config) ->
+    run_proper(
+      fun() -> ?FORALL(Val,
+                       oneof(primitive_types()),
+                       begin
+                           Bin = iolist_to_binary(amqp10_binary_generator:generate(Val)),
+                           equals([Val], amqp10_binary_parser:parse_many(Bin, []))
+                       end)
+      end, [], 10_000).
+
+prop_many_primitive_types_parse(_Config) ->
+    run_proper(
+      fun() -> ?FORALL(Vals,
+                       list(oneof(primitive_types())),
+                       begin
+                           Bin = iolist_to_binary([amqp10_binary_generator:generate(V) || V <- Vals]),
+                           PosValList = parse(Bin, 0, []),
+                           equals(Vals, [Val || {_Pos, Val} <- PosValList])
+                       end)
+      end, [], 1000).
+
+prop_many_primitive_types_parse_many(_Config) ->
+    run_proper(
+      fun() -> ?FORALL(Vals,
+                       list(oneof(primitive_types())),
+                       begin
+                           Bin = iolist_to_binary([amqp10_binary_generator:generate(V) || V <- Vals]),
+                           equals(Vals, amqp10_binary_parser:parse_many(Bin, []))
+                       end)
+      end, [], 1000).
+
+prop_annotated_message(_Config) ->
+    run_proper(
+      fun() -> ?FORALL(Sections,
+                       annotated_message(),
+                       begin
+                           Bin = iolist_to_binary([amqp10_framing:encode_bin(S) || S <- Sections]),
+                           equals(Sections, amqp10_framing:decode_bin(Bin))
+                       end)
+      end, [], 1000).
+
+prop_server_mode_body(_Config) ->
+    run_proper(
+      fun() -> ?FORALL(Sections,
+                       annotated_message(),
+                       begin
+                           {value,
+                            FirstBodySection} = lists:search(
+                                                  fun(#'v1_0.data'{}) -> true;
+                                                     (#'v1_0.amqp_sequence'{}) -> true;
+                                                     (#'v1_0.amqp_value'{}) -> true;
+                                                     (_) -> false
+                                                  end, Sections),
+
+                           Bin = iolist_to_binary([amqp10_framing:encode_bin(S) || S <- Sections]),
+                           %% Invariant 1: Decoder should us return the correct
+                           %% byte position of the first body section.
+                           Decoded = amqp10_framing:decode_bin(Bin, [server_mode]),
+                           {value,
+                            {{pos, Pos},
+                             {body, Code}}} = lists:search(fun(({{pos, _Pos}, {body, _Code}})) ->
+                                                                   true;
+                                                              (_) ->
+                                                                   false
+                                                           end, Decoded),
+                           FirstBodySectionBin = binary_part(Bin, Pos, size(Bin) - Pos),
+                           {Section, _NumBytes} = amqp10_binary_parser:parse(FirstBodySectionBin),
+                           %% Invariant 2: Decoder should have returned the
+                           %% correct descriptor code of the first body section.
+                           {described, {ulong, Code}, _Val} = Section,
+                           equals(FirstBodySection, amqp10_framing:decode(Section))
+                       end)
+      end, [], 1000).
+
+prop_server_mode_bare_message(_Config) ->
+    run_proper(
+      fun() -> ?FORALL(Sections,
+                       annotated_message(),
+                       begin
+                           {value,
+                            FirstBareMsgSection} = lists:search(
+                                                     fun(#'v1_0.properties'{}) -> true;
+                                                        (#'v1_0.application_properties'{}) -> true;
+                                                        (#'v1_0.data'{}) -> true;
+                                                        (#'v1_0.amqp_sequence'{}) -> true;
+                                                        (#'v1_0.amqp_value'{}) -> true;
+                                                        (_) -> false
+                                                     end, Sections),
+
+                           Bin = iolist_to_binary([amqp10_framing:encode_bin(S) || S <- Sections]),
+                           %% Invariant: Decoder should us return the correct
+                           %% byte position of the first bare message section.
+                           Decoded = amqp10_framing:decode_bin(Bin, [server_mode]),
+                           {value,
+                            {{pos, Pos}, _Sect}} = lists:search(fun(({{pos, _Pos}, _Sect})) ->
+                                                                        true;
+                                                                   (_) ->
+                                                                        false
+                                                                end, Decoded),
+                           FirstBareMsgSectionBin = binary_part(Bin, Pos, size(Bin) - Pos),
+                           {Section, _NumBytes} = amqp10_binary_parser:parse(FirstBareMsgSectionBin),
+                           equals(FirstBareMsgSection, amqp10_framing:decode(Section))
+                       end)
+      end, [], 1000).
+
+%%%%%%%%%%%%%%%
+%%% Helpers %%%
+%%%%%%%%%%%%%%%
+
+parse(Bin, Parsed, PosVal)
+  when size(Bin) =:= Parsed ->
+    lists:reverse(PosVal);
+parse(Bin, Parsed, PosVal)
+  when size(Bin) > Parsed ->
+    BinPart = binary_part(Bin, Parsed, size(Bin) - Parsed),
+    {Val, NumBytes} = amqp10_binary_parser:parse(BinPart),
+    parse(Bin, Parsed + NumBytes, [{Parsed, Val} | PosVal]).
+
+%%%%%%%%%%%%%%%%%%
+%%% Generators %%%
+%%%%%%%%%%%%%%%%%%
+
+primitive_types() ->
+    fixed_and_variable_width_types() ++
+    compound_types() ++
+    [amqp_array()].
+
+fixed_and_variable_width_types() ->
+    [
+     amqp_null(),
+     amqp_boolean(),
+     amqp_ubyte(),
+     amqp_ushort(),
+     amqp_uint(),
+     amqp_ulong(),
+     amqp_byte(),
+     amqp_short(),
+     amqp_int(),
+     amqp_long(),
+     amqp_float(),
+     amqp_double(),
+     amqp_char(),
+     amqp_timestamp(),
+     amqp_uuid(),
+     amqp_binary(),
+     amqp_string(),
+     amqp_symbol()
+    ].
+
+compound_types() ->
+    [
+     amqp_list(),
+     amqp_map()
+    ].
+
+amqp_null() ->
+    null.
+
+amqp_boolean() ->
+    boolean().
+
+amqp_ubyte() ->
+    {ubyte, integer(0, 16#ff)}.
+
+amqp_ushort() ->
+    {ushort, integer(0, 16#ff_ff)}.
+
+amqp_uint() ->
+    Lim = 16#ff_ff_ff_ff,
+    {uint, oneof([
+                  integer(0, Lim),
+                  ?SIZED(Size, resize(Size * 100, integer(0, Lim)))
+                 ])}.
+
+amqp_ulong() ->
+    Lim = 16#ff_ff_ff_ff_ff_ff_ff_ff,
+    {ulong, oneof([
+                   integer(0, Lim),
+                   ?SIZED(Size, resize(Size * 100_000, integer(0, Lim)))
+                  ])}.
+
+amqp_byte() ->
+    Lim = 16#ff div 2,
+    {byte, integer(-Lim - 1, Lim)}.
+
+amqp_short() ->
+    Lim = 16#ff_ff div 2,
+    {short, integer(-Lim - 1, Lim)}.
+
+amqp_int() ->
+    Lim = 16#ff_ff_ff_ff div 2,
+    {int, oneof([
+                 integer(-Lim - 1, Lim),
+                 ?SIZED(Size, resize(Size * 100, integer(-Lim - 1, Lim)))
+                ])}.
+
+amqp_long() ->
+    Lim = 16#ff_ff_ff_ff_ff_ff_ff_ff div 2,
+    {long, oneof([
+                  integer(-Lim - 1, Lim),
+                  ?SIZED(Size, resize(Size * 100, integer(-Lim - 1, Lim)))
+                 ])}.
+
+%% AMQP float is 32-bit whereas Erlang float is 64-bit.
+%% Therefore, 32-bit encoding any Erlang float will lose precision.
+%% Hence, we use some static floats where we know that they can be represented precisely using 32 bits.
+amqp_float() ->
+    {float, oneof([-1.5, -1.0, 0.0, 1.0, 1.5, 100.0])}.
+
+%% AMQP double and Erlang float are both 64-bit.
+amqp_double() ->
+    {double, float()}.
+
+amqp_char() ->
+    {char, char()}.
+
+amqp_timestamp() ->
+    Now = erlang:system_time(millisecond),
+    YearMillis = 1000 * 60 * 60 * 24 * 365,
+    TimestampMillis1950 = -631_152_000_000,
+    TimestampMillis2200 = 7_258_118_400_000,
+    {timestamp, oneof([integer(Now - YearMillis, Now + YearMillis),
+                       integer(TimestampMillis1950, TimestampMillis2200)
+                      ])}.
+
+amqp_uuid() ->
+    {uuid, binary(16)}.
+
+amqp_binary() ->
+    {binary, oneof([
+                    binary(),
+                    ?SIZED(Size, resize(Size * 10, binary()))
+                   ])}.
+
+amqp_string() ->
+    {utf8, utf8()}.
+
+amqp_symbol() ->
+    {symbol, ?LET(L,
+                  ?SIZED(Size, resize(Size * 10, list(ascii_char()))),
+                  list_to_binary(L))}.
+
+ascii_char() ->
+    integer(0, 127).
+
+amqp_list() ->
+    {list, list(prefer_simple_type())}.
+
+amqp_map() ->
+    {map, ?LET(KvList,
+               list({prefer_simple_type(),
+                     prefer_simple_type()}),
+               lists:uniq(fun({K, _V}) -> K end, KvList)
+              )}.
+
+amqp_array() ->
+    Gens = fixed_and_variable_width_types(),
+    ?LET(N,
+         integer(1, length(Gens)),
+         begin
+             Gen = lists:nth(N, Gens),
+             ?LET(Instance,
+                  Gen,
+                  begin
+                      Constructor = case Instance of
+                                        {T, _V} -> T;
+                                        null -> null;
+                                        V when is_boolean(V) -> boolean
+                                    end,
+                      {array, Constructor, list(Gen)}
+                  end)
+         end).
+
+prefer_simple_type() ->
+    frequency([{200, oneof(fixed_and_variable_width_types())},
+               {1, ?LAZY(oneof(compound_types()))},
+               {1, ?LAZY(amqp_array())}
+              ]).
+
+zero_or_one(Section) ->
+    oneof([
+           [],
+           [Section]
+          ]).
+
+optional(Field) ->
+    oneof([
+           undefined,
+           Field
+          ]).
+
+annotated_message() ->
+    ?LET(H,
+         zero_or_one(header_section()),
+         ?LET(DA,
+              zero_or_one(delivery_annotation_section()),
+              ?LET(MA,
+                   zero_or_one(message_annotation_section()),
+                   ?LET(P,
+                        zero_or_one(properties_section()),
+                        ?LET(AP,
+                             zero_or_one(application_properties_section()),
+                             ?LET(B,
+                                  body(),
+                                  ?LET(F,
+                                       zero_or_one(footer_section()),
+                                       lists:append([H, DA, MA, P, AP, B, F])
+                                      ))))))).
+
+%% "The body consists of one of the following three choices: one or more data sections,
+%% one or more amqp-sequence sections, or a single amqp-value section." [§3.2]
+body() ->
+    oneof([
+           non_empty(list(data_section())),
+           non_empty(list(amqp_sequence_section())),
+           [amqp_value_section()]
+          ]).
+
+header_section() ->
+    #'v1_0.header'{
+       durable = optional(amqp_boolean()),
+       priority = optional(amqp_ubyte()),
+       ttl = optional(milliseconds()),
+       first_acquirer = optional(amqp_boolean()),
+       delivery_count = optional(amqp_uint())}.
+
+delivery_annotation_section() ->
+    #'v1_0.delivery_annotations'{content = annotations()}.
+
+message_annotation_section() ->
+    #'v1_0.message_annotations'{content = annotations()}.
+
+properties_section() ->
+    #'v1_0.properties'{
+       message_id = optional(message_id()),
+       user_id = optional(amqp_binary()),
+       to = optional(address_string()),
+       subject = optional(amqp_string()),
+       reply_to = optional(address_string()),
+       correlation_id = optional(message_id()),
+       content_type = optional(amqp_symbol()),
+       content_encoding = optional(amqp_symbol()),
+       absolute_expiry_time = optional(amqp_timestamp()),
+       creation_time = optional(amqp_timestamp()),
+       group_id = optional(amqp_string()),
+       group_sequence = optional(sequence_no()),
+       reply_to_group_id = optional(amqp_string())}.
+
+application_properties_section() ->
+    Gen = ?LET(KvList,
+               list({amqp_string(),
+                     oneof(fixed_and_variable_width_types() -- [amqp_null()])}),
+               lists:uniq(fun({K, _V}) -> K end, KvList)),
+    #'v1_0.application_properties'{content = Gen}.
+
+data_section() ->
+    #'v1_0.data'{content = binary()}.
+
+amqp_sequence_section() ->
+    #'v1_0.amqp_sequence'{content = list(oneof(primitive_types() -- [amqp_null()]))}.
+
+amqp_value_section() ->
+    #'v1_0.amqp_value'{content = oneof(primitive_types())}.
+
+footer_section() ->
+    #'v1_0.footer'{content = annotations()}.
+
+annotations() ->
+    ?LET(KvList,
+         list({oneof([amqp_symbol(),
+                      amqp_ulong()]),
+               prefer_simple_type()}),
+         begin
+             KvList1 = lists:uniq(fun({K, _V}) -> K end, KvList),
+             lists:filter(fun({_K, V}) -> V =/= null end, KvList1)
+         end).
+
+sequence_no() ->
+    amqp_uint().
+
+milliseconds() ->
+    amqp_uint().
+
+message_id() ->
+    oneof([amqp_ulong(),
+           amqp_uuid(),
+           amqp_binary(),
+           amqp_string()]).
+
+address_string() ->
+    amqp_string().
diff --git a/deps/amqp10_common/test/serial_number_SUITE.erl b/deps/amqp10_common/test/serial_number_SUITE.erl
new file mode 100644
index 000000000000..3d1b2108945d
--- /dev/null
+++ b/deps/amqp10_common/test/serial_number_SUITE.erl
@@ -0,0 +1,152 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates.  All rights reserved.
+%%
+
+-module(serial_number_SUITE).
+-include_lib("eunit/include/eunit.hrl").
+
+-compile([export_all,
+          nowarn_export_all]).
+
+-import(serial_number, [add/2,
+                        compare/2,
+                        usort/1,
+                        ranges/1,
+                        in_range/3,
+                        diff/2,
+                        foldl/4]).
+
+all() -> [test_add,
+          test_compare,
+          test_usort,
+          test_ranges,
+          test_in_range,
+          test_diff,
+          test_foldl].
+
+-dialyzer({nowarn_function, test_add/1}).
+test_add(_Config) ->
+    ?assertEqual(1, add(0, 1)),
+    %% "Addition of a value outside the range
+    %% [0 .. (2^(SERIAL_BITS - 1) - 1)] is undefined."
+    MaxAddend = round(math:pow(2, 32 - 1) - 1),
+    MinAddend = 0,
+    ?assertEqual(MaxAddend, add(0, MaxAddend)),
+    ?assertEqual(MinAddend, add(0, MinAddend)),
+    ?assertEqual(0, add(16#ffffffff, 1)),
+    ?assertEqual(1, add(16#ffffffff, 2)),
+    AddendTooLarge = MaxAddend + 1,
+    ?assertExit({undefined_serial_addition, 0, AddendTooLarge},
+                add(0, AddendTooLarge)),
+    AddendTooSmall = MinAddend - 1,
+    ?assertExit({undefined_serial_addition, 0, AddendTooSmall},
+                add(0, AddendTooSmall)).
+
+test_compare(_Config) ->
+    ?assertEqual(equal, compare(0, 0)),
+    ?assertEqual(equal, compare(16#ffffffff, 16#ffffffff)),
+    ?assertEqual(less, compare(0, 1)),
+    ?assertEqual(greater, compare(1, 0)),
+    ?assertEqual(less, compare(0, 2)),
+    ?assertEqual(less, compare(0, round(math:pow(2, 32 - 1)) - 1)),
+    ?assertExit({undefined_serial_comparison, 0, _},
+                compare(0, round(math:pow(2, 32 - 1)))),
+    ?assertEqual(less, compare(16#ffffffff - 5, 30_000)),
+    ?assertEqual(greater, compare(1, 0)),
+    ?assertEqual(greater, compare(2147483647, 0)),
+    ?assertExit({undefined_serial_comparison, 2147483648, 0},
+                compare(2147483648, 0)).
+
+test_usort(_Config) ->
+    ?assertEqual([],
+                 usort([])),
+    ?assertEqual([3],
+                 usort([3])),
+    ?assertEqual([0],
+                 usort([0, 0])),
+    ?assertEqual([4294967000, 4294967293, 4294967294, 4294967295, 0, 3, 4],
+                 usort([3, 4294967295, 4294967295, 4294967293, 4294967000, 4294967294, 0, 4])).
+
+test_ranges(_Config) ->
+    ?assertEqual([],
+                 ranges([])),
+    ?assertEqual([{0, 0}],
+                 ranges([0])),
+    ?assertEqual([{0, 1}],
+                 ranges([0, 1])),
+    ?assertEqual([{0, 1}],
+                 ranges([1, 0])),
+    ?assertEqual([{0, 0}, {2, 2}],
+                 ranges([0, 2])),
+    ?assertEqual([{0, 0}, {2, 2}],
+                 ranges([2, 0])),
+    %% 2 ^ 32 - 1 = 4294967295
+    ?assertEqual([{4294967290, 4294967290}, {4294967295, 4294967295}],
+                 ranges([4294967290, 4294967295])),
+    ?assertEqual([{4294967290, 4294967290}, {4294967295, 4294967295}],
+                 ranges([4294967295, 4294967290])),
+    ?assertEqual([{4294967294, 4294967294}, {0, 0}],
+                 ranges([4294967294, 0])),
+    ?assertEqual([{4294967294, 4294967294}, {0, 0}],
+                 ranges([0, 4294967294])),
+    ?assertEqual([{4294967295, 0}],
+                 ranges([4294967295, 0])),
+    ?assertEqual([{4294967294, 1}, {3, 5}, {10, 10}, {18, 19}],
+                 ranges([4294967294, 4294967295, 0, 1, 3, 4, 5, 10, 18, 19])),
+    ?assertEqual([{4294967294, 1}, {3, 5}, {10, 10}, {18, 19}],
+                 ranges([1, 10, 4294967294, 0, 3, 4, 5, 19, 18, 4294967295])).
+
+test_in_range(_Config) ->
+    ?assert(in_range(0, 0, 0)),
+    ?assert(in_range(0, 0, 1)),
+    ?assert(in_range(4294967295, 4294967295, 4294967295)),
+    ?assert(in_range(4294967295, 4294967295, 0)),
+    ?assert(in_range(0, 4294967295, 0)),
+    ?assert(in_range(4294967230, 4294967200, 1000)),
+    ?assert(in_range(88, 4294967200, 1000)),
+
+    ?assertNot(in_range(1, 0, 0)),
+    ?assertNot(in_range(4294967295, 0, 0)),
+    ?assertNot(in_range(0, 1, 1)),
+    ?assertNot(in_range(10, 1, 9)),
+    ?assertNot(in_range(1005, 4294967200, 1000)),
+    ?assertNot(in_range(4294967190, 4294967200, 1000)),
+
+    %% Pass wrong First and Last.
+    ?assertNot(in_range(1, 3, 2)),
+    ?assertNot(in_range(2, 3, 2)),
+    ?assertNot(in_range(3, 3, 2)),
+    ?assertNot(in_range(4, 3, 2)),
+
+    ?assertExit({undefined_serial_comparison, 0, 16#80000000},
+                in_range(0, 16#80000000, 16#80000000)).
+
+test_diff(_Config) ->
+    ?assertEqual(0, diff(0, 0)),
+    ?assertEqual(0, diff(1, 1)),
+    ?assertEqual(0, diff(16#ffffffff, 16#ffffffff)),
+    ?assertEqual(1, diff(1, 0)),
+    ?assertEqual(2, diff(1, 16#ffffffff)),
+    ?assertEqual(6, diff(0, 16#fffffffa)),
+    ?assertEqual(206, diff(200, 16#fffffffa)),
+    ?assertEqual(-2, diff(16#ffffffff, 1)),
+    ?assertExit({undefined_serial_diff, _, _},
+                diff(0, 16#80000000)),
+    ?assertExit({undefined_serial_diff, _, _},
+                diff(16#ffffffff, 16#7fffffff)).
+
+test_foldl(_Config) ->
+    ?assertEqual(
+       [16#ffffffff - 1, 16#ffffffff, 0, 1],
+       foldl(fun(S, Acc) ->
+                     Acc ++ [S]
+             end, [], 16#ffffffff - 1, 1)),
+
+    ?assertEqual(
+       [0],
+       foldl(fun(S, Acc) ->
+                     Acc ++ [S]
+             end, [], 0, 0)).
diff --git a/deps/amqp_client/.gitignore b/deps/amqp_client/.gitignore
deleted file mode 100644
index 394996242c71..000000000000
--- a/deps/amqp_client/.gitignore
+++ /dev/null
@@ -1,22 +0,0 @@
-.sw?
-.*.sw?
-*.beam
-*.coverdata
-/.erlang.mk/
-/_build/
-/cover/
-/deps/
-/doc/
-/ebin/
-/escript/
-/git-revisions.txt
-/logs/
-/PACKAGES/
-*.plt
-/plugins/
-/rebar.config
-/rebar.lock
-/sbin
-/test/ct.cover.spec
-/xrefr
-/amqp_client.d
diff --git a/deps/amqp_client/BUILD.bazel b/deps/amqp_client/BUILD.bazel
index 4ed5876ff299..ed36ed8b6b79 100644
--- a/deps/amqp_client/BUILD.bazel
+++ b/deps/amqp_client/BUILD.bazel
@@ -107,6 +107,7 @@ rabbitmq_home(
     name = "broker-for-tests-home",
     plugins = [
         "//deps/rabbit:erlang_app",
+        "//deps/rabbitmq_cli:erlang_app",
     ],
 )
 
diff --git a/deps/amqp_client/include/amqp_client.hrl b/deps/amqp_client/include/amqp_client.hrl
index 2f1aad5e5a2d..d2f9d0c9fb29 100644
--- a/deps/amqp_client/include/amqp_client.hrl
+++ b/deps/amqp_client/include/amqp_client.hrl
@@ -2,7 +2,7 @@
 %% License, v. 2.0. If a copy of the MPL was not distributed with this
 %% file, You can obtain one at https://mozilla.org/MPL/2.0/.
 %%
-%% Copyright (c) 2020-2023 VMware, Inc. or its affiliates.  All rights reserved.
+%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved.
 %%
 
 -ifndef(AMQP_CLIENT_HRL).
diff --git a/deps/amqp_client/include/amqp_client_internal.hrl b/deps/amqp_client/include/amqp_client_internal.hrl
index 5b0a02563ff0..ad0a80816f47 100644
--- a/deps/amqp_client/include/amqp_client_internal.hrl
+++ b/deps/amqp_client/include/amqp_client_internal.hrl
@@ -2,7 +2,7 @@
 %% License, v. 2.0. If a copy of the MPL was not distributed with this
 %% file, You can obtain one at https://mozilla.org/MPL/2.0/.
 %%
-%% Copyright (c) 2020-2023 VMware, Inc. or its affiliates.  All rights reserved.
+%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved.
 %%
 
 -include("amqp_client.hrl").
diff --git a/deps/amqp_client/include/amqp_gen_consumer_spec.hrl b/deps/amqp_client/include/amqp_gen_consumer_spec.hrl
index 29a305352a25..aa6ce5c7b0db 100644
--- a/deps/amqp_client/include/amqp_gen_consumer_spec.hrl
+++ b/deps/amqp_client/include/amqp_gen_consumer_spec.hrl
@@ -2,7 +2,7 @@
 %% License, v. 2.0. If a copy of the MPL was not distributed with this
 %% file, You can obtain one at https://mozilla.org/MPL/2.0/.
 %%
-%% Copyright (c) 2011-2020 VMware, Inc. or its affiliates.  All rights reserved.
+%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved.
 %%
 
 -include("amqp_client.hrl").
diff --git a/deps/amqp_client/include/rabbit_routing_prefixes.hrl b/deps/amqp_client/include/rabbit_routing_prefixes.hrl
index 49c680deecba..0aa7e4727c2f 100644
--- a/deps/amqp_client/include/rabbit_routing_prefixes.hrl
+++ b/deps/amqp_client/include/rabbit_routing_prefixes.hrl
@@ -2,7 +2,7 @@
 %% License, v. 2.0. If a copy of the MPL was not distributed with this
 %% file, You can obtain one at https://mozilla.org/MPL/2.0/.
 %%
-%% Copyright (c) 2011-2020 VMware, Inc. or its affiliates.  All rights reserved.
+%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved.
 %%
 
 -define(QUEUE_PREFIX, "/queue").
diff --git a/deps/amqp_client/src/amqp_auth_mechanisms.erl b/deps/amqp_client/src/amqp_auth_mechanisms.erl
index 6a111d58f559..8080c3ffde91 100644
--- a/deps/amqp_client/src/amqp_auth_mechanisms.erl
+++ b/deps/amqp_client/src/amqp_auth_mechanisms.erl
@@ -2,7 +2,7 @@
 %% License, v. 2.0. If a copy of the MPL was not distributed with this
 %% file, You can obtain one at https://mozilla.org/MPL/2.0/.
 %%
-%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates.  All rights reserved.
+%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved.
 %%
 
 %% @private
diff --git a/deps/amqp_client/src/amqp_channel.erl b/deps/amqp_client/src/amqp_channel.erl
index 115d0b3e859f..90735e3ef823 100644
--- a/deps/amqp_client/src/amqp_channel.erl
+++ b/deps/amqp_client/src/amqp_channel.erl
@@ -2,7 +2,7 @@
 %% License, v. 2.0. If a copy of the MPL was not distributed with this
 %% file, You can obtain one at https://mozilla.org/MPL/2.0/.
 %%
-%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates.  All rights reserved.
+%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved.
 %%
 
 %% @type close_reason(Type) = {shutdown, amqp_reason(Type)}.
@@ -91,7 +91,7 @@
                 waiting_set        = gb_trees:empty(),
                 only_acks_received = true,
 
-                %% true | false, only relevant in the direct
+                %% boolean(), only relevant in the direct
                 %% client case.
                 %% when true, consumers will manually notify
                 %% queue pids using rabbit_amqqueue_common:notify_sent/2
@@ -797,11 +797,6 @@ handle_method_from_server1(#'basic.nack'{} = BasicNack, none,
                            #state{confirm_handler = {CH, _Ref}} = State) ->
     CH ! BasicNack,
     {noreply, update_confirm_set(BasicNack, State)};
-
-handle_method_from_server1(#'basic.credit_drained'{} = CreditDrained, none,
-                           #state{consumer = Consumer} = State) ->
-    Consumer ! CreditDrained,
-    {noreply, State};
 handle_method_from_server1(Method, none, State) ->
     {noreply, rpc_bottom_half(Method, State)};
 handle_method_from_server1(Method, Content, State) ->
@@ -844,7 +839,11 @@ handle_channel_exit(Reason = #amqp_error{name = ErrorName, explanation = Expl},
     handle_shutdown({connection_closing, ReportedReason}, State);
 handle_channel_exit(Reason, State) ->
     %% Unexpected death of a channel infrastructure process
-    {stop, {infrastructure_died, Reason}, State}.
+    Reason1 = case Reason of
+                  {shutdown, R} -> R;
+                  _             -> Reason
+              end,
+    {stop, {infrastructure_died, Reason1}, State}.
 
 handle_shutdown({_, 200, _}, State) ->
     {stop, normal, State};
@@ -877,11 +876,8 @@ do(Method, Content, Flow, #state{driver = direct, writer = W}) ->
 
 
 flush_writer(#state{driver = network, writer = Writer}) ->
-    try
-        rabbit_writer:flush(Writer)
-    catch
-        exit:noproc -> ok
-    end;
+    _ = catch rabbit_writer:flush(Writer),
+    ok;
 flush_writer(#state{driver = direct}) ->
     ok.
 amqp_msg(none) ->
diff --git a/deps/amqp_client/src/amqp_channel_sup.erl b/deps/amqp_client/src/amqp_channel_sup.erl
index ea3093c45dcc..c9a97456238e 100644
--- a/deps/amqp_client/src/amqp_channel_sup.erl
+++ b/deps/amqp_client/src/amqp_channel_sup.erl
@@ -2,7 +2,7 @@
 %% License, v. 2.0. If a copy of the MPL was not distributed with this
 %% file, You can obtain one at https://mozilla.org/MPL/2.0/.
 %%
-%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates.  All rights reserved.
+%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved.
 %%
 
 %% @private
diff --git a/deps/amqp_client/src/amqp_channel_sup_sup.erl b/deps/amqp_client/src/amqp_channel_sup_sup.erl
index 9753b4e0a5a4..4b7b5f5169dd 100644
--- a/deps/amqp_client/src/amqp_channel_sup_sup.erl
+++ b/deps/amqp_client/src/amqp_channel_sup_sup.erl
@@ -2,14 +2,12 @@
 %% License, v. 2.0. If a copy of the MPL was not distributed with this
 %% file, You can obtain one at https://mozilla.org/MPL/2.0/.
 %%
-%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates.  All rights reserved.
+%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved.
 %%
 
 %% @private
 -module(amqp_channel_sup_sup).
 
--include("amqp_client.hrl").
-
 -behaviour(supervisor).
 
 -export([start_link/3, start_channel_sup/4]).
diff --git a/deps/amqp_client/src/amqp_channels_manager.erl b/deps/amqp_client/src/amqp_channels_manager.erl
index 9de4ce66123d..b4c0715a5e25 100644
--- a/deps/amqp_client/src/amqp_channels_manager.erl
+++ b/deps/amqp_client/src/amqp_channels_manager.erl
@@ -2,7 +2,7 @@
 %% License, v. 2.0. If a copy of the MPL was not distributed with this
 %% file, You can obtain one at https://mozilla.org/MPL/2.0/.
 %%
-%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates.  All rights reserved.
+%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved.
 %%
 
 %% @private
diff --git a/deps/amqp_client/src/amqp_client.erl b/deps/amqp_client/src/amqp_client.erl
index 693aab36e22f..09e639a55cb5 100644
--- a/deps/amqp_client/src/amqp_client.erl
+++ b/deps/amqp_client/src/amqp_client.erl
@@ -2,7 +2,7 @@
 %% License, v. 2.0. If a copy of the MPL was not distributed with this
 %% file, You can obtain one at https://mozilla.org/MPL/2.0/.
 %%
-%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates.  All rights reserved.
+%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved.
 %%
 
 %% @private
diff --git a/deps/amqp_client/src/amqp_connection.erl b/deps/amqp_client/src/amqp_connection.erl
index 9f8c6ddb92ed..46440c50eda3 100644
--- a/deps/amqp_client/src/amqp_connection.erl
+++ b/deps/amqp_client/src/amqp_connection.erl
@@ -2,7 +2,7 @@
 %% License, v. 2.0. If a copy of the MPL was not distributed with this
 %% file, You can obtain one at https://mozilla.org/MPL/2.0/.
 %%
-%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates.  All rights reserved.
+%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved.
 %%
 
 %% @type close_reason(Type) = {shutdown, amqp_reason(Type)}.
@@ -65,7 +65,8 @@
 -export([error_atom/1]).
 -export([info/2, info_keys/1, info_keys/0]).
 -export([connection_name/1, update_secret/3]).
--export([socket_adapter_info/2]).
+-export([socket_adapter_info/2,
+         socket_adapter_info/3]).
 
 -define(DEFAULT_CONSUMER, {amqp_selective_consumer, []}).
 
@@ -87,8 +88,10 @@
 %% 
  • node :: atom() - The node the broker runs on (direct only)
  • %%
  • adapter_info :: amqp_adapter_info() - Extra management information for if %% this connection represents a non-AMQP network connection.
  • -%%
  • client_properties :: [{binary(), atom(), binary()}] - A list of extra -%% client properties to be sent to the server, defaults to []
  • +%%
  • client_properties :: [{binary(), atom(), binary()}] +%% | #{binary() => binary()} +%% - A list (or a map) of extra client properties to be sent to the server, +%% defaults to []
  • %% %% %% @type amqp_params_network() = #amqp_params_network{}. @@ -115,8 +118,10 @@ %% defaults to 30000 (network only) %%
  • ssl_options :: term() - The second parameter to be used with the %% ssl:connect/2 function, defaults to 'none' (network only)
  • -%%
  • client_properties :: [{binary(), atom(), binary()}] - A list of extra -%% client properties to be sent to the server, defaults to []
  • +%%
  • client_properties :: [{binary(), atom(), binary()}] +%% | #{binary() => binary()} +%% - A list (or a map) of extra client properties to be sent to the server, +%% defaults to []
  • %%
  • socket_options :: [any()] - Extra socket options. These are %% appended to the default options. See %% inet:setopts/2 @@ -154,10 +159,12 @@ start(AmqpParams, ConnName) when ConnName == undefined; is_binary(ConnName) -> ensure_started(), AmqpParams0 = case AmqpParams of - #amqp_params_direct{password = Password} -> - AmqpParams#amqp_params_direct{password = credentials_obfuscation:encrypt(Password)}; - #amqp_params_network{password = Password} -> - AmqpParams#amqp_params_network{password = credentials_obfuscation:encrypt(Password)} + #amqp_params_direct{password = Password, client_properties = Props} -> + AmqpParams#amqp_params_direct{password = credentials_obfuscation:encrypt(Password), + client_properties = rabbit_data_coercion:to_proplist(Props)}; + #amqp_params_network{password = Password, client_properties = Props} -> + AmqpParams#amqp_params_network{password = credentials_obfuscation:encrypt(Password), + client_properties = rabbit_data_coercion:to_proplist(Props)} end, AmqpParams1 = case AmqpParams0 of @@ -379,7 +386,12 @@ info_keys() -> %% @doc Takes a socket and a protocol, returns an #amqp_adapter_info{} %% based on the socket for the protocol given. socket_adapter_info(Sock, Protocol) -> - amqp_direct_connection:socket_adapter_info(Sock, Protocol). + socket_adapter_info(Sock, Protocol, undefined). + +%% @doc Takes a socket and a protocol, returns an #amqp_adapter_info{} +%% based on the socket for the protocol given. +socket_adapter_info(Sock, Protocol, UniqueId) -> + amqp_direct_connection:socket_adapter_info(Sock, Protocol, UniqueId). %% @spec (ConnectionPid) -> ConnectionName %% where diff --git a/deps/amqp_client/src/amqp_connection_sup.erl b/deps/amqp_client/src/amqp_connection_sup.erl index fead03426aa4..5ec61501ea68 100644 --- a/deps/amqp_client/src/amqp_connection_sup.erl +++ b/deps/amqp_client/src/amqp_connection_sup.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% %% @private diff --git a/deps/amqp_client/src/amqp_connection_type_sup.erl b/deps/amqp_client/src/amqp_connection_type_sup.erl index 73416caabe6b..146cec175164 100644 --- a/deps/amqp_client/src/amqp_connection_type_sup.erl +++ b/deps/amqp_client/src/amqp_connection_type_sup.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% %% @private diff --git a/deps/amqp_client/src/amqp_direct_connection.erl b/deps/amqp_client/src/amqp_direct_connection.erl index 2b4b637b5e13..e40aeec13833 100644 --- a/deps/amqp_client/src/amqp_direct_connection.erl +++ b/deps/amqp_client/src/amqp_direct_connection.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% %% @private @@ -17,7 +17,8 @@ -export([init/0, terminate/2, connect/4, do/2, open_channel_args/1, i/2, info_keys/0, handle_message/2, closing/3, channels_terminated/1]). --export([socket_adapter_info/2]). +-export([socket_adapter_info/2, + socket_adapter_info/3]). -record(state, {node, user, @@ -176,17 +177,26 @@ ensure_adapter_info(A = #amqp_adapter_info{name = unknown}) -> ensure_adapter_info(Info) -> Info. socket_adapter_info(Sock, Protocol) -> + socket_adapter_info(Sock, Protocol, undefined). + +socket_adapter_info(Sock, Protocol, UniqueId) -> {PeerHost, PeerPort, Host, Port} = - case rabbit_net:socket_ends(Sock, inbound) of - {ok, Res} -> Res; - _ -> {unknown, unknown, unknown, unknown} - end, - Name = case rabbit_net:connection_string(Sock, inbound) of - {ok, Res1} -> Res1; - _Error -> "(unknown)" + case rabbit_net:socket_ends(Sock, inbound) of + {ok, Res} -> Res; + _ -> {unknown, unknown, unknown, unknown} + end, + ConnectionString = case rabbit_net:connection_string(Sock, inbound) of + {ok, Res1} -> Res1; + _Error -> "(unknown)" + end, + Name = case UniqueId of + undefined -> + rabbit_data_coercion:to_binary(ConnectionString); + _ -> + rabbit_data_coercion:to_binary(rabbit_misc:format("~s (~tp)", [ConnectionString, UniqueId])) end, #amqp_adapter_info{protocol = Protocol, - name = list_to_binary(Name), + name = Name, host = Host, port = Port, peer_host = PeerHost, diff --git a/deps/amqp_client/src/amqp_direct_consumer.erl b/deps/amqp_client/src/amqp_direct_consumer.erl index 2b76d3622623..0e5268886b5a 100644 --- a/deps/amqp_client/src/amqp_direct_consumer.erl +++ b/deps/amqp_client/src/amqp_direct_consumer.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2011-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% %% @doc This module is an implementation of the amqp_gen_consumer @@ -31,8 +31,6 @@ %% This module has no public functions. -module(amqp_direct_consumer). --include("amqp_gen_consumer_spec.hrl"). - -behaviour(amqp_gen_consumer). -export([init/1, handle_consume_ok/3, handle_consume/3, handle_cancel_ok/3, diff --git a/deps/amqp_client/src/amqp_gen_connection.erl b/deps/amqp_client/src/amqp_gen_connection.erl index b383de973f0b..1f7da65b03d6 100644 --- a/deps/amqp_client/src/amqp_gen_connection.erl +++ b/deps/amqp_client/src/amqp_gen_connection.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% %% @private diff --git a/deps/amqp_client/src/amqp_gen_consumer.erl b/deps/amqp_client/src/amqp_gen_consumer.erl index 88c2124f316d..2f05b32365c6 100644 --- a/deps/amqp_client/src/amqp_gen_consumer.erl +++ b/deps/amqp_client/src/amqp_gen_consumer.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2011-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% %% @doc A behaviour module for implementing consumers for diff --git a/deps/amqp_client/src/amqp_main_reader.erl b/deps/amqp_client/src/amqp_main_reader.erl index e2e326dc4520..461cf783fe47 100644 --- a/deps/amqp_client/src/amqp_main_reader.erl +++ b/deps/amqp_client/src/amqp_main_reader.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% %% @private diff --git a/deps/amqp_client/src/amqp_network_connection.erl b/deps/amqp_client/src/amqp_network_connection.erl index 6322508cf375..a5ef739ea0f3 100644 --- a/deps/amqp_client/src/amqp_network_connection.erl +++ b/deps/amqp_client/src/amqp_network_connection.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% %% @private @@ -118,7 +118,6 @@ do_connect({Addr, Family}, connection_timeout = Timeout, socket_options = ExtraOpts}, SIF, State) -> - ok = obtain(), case gen_tcp:connect(Addr, Port, [Family | ?RABBIT_TCP_OPTS] ++ ExtraOpts, Timeout) of @@ -134,7 +133,6 @@ do_connect({Addr, Family}, SIF, State) -> {ok, GlobalSslOpts} = application:get_env(amqp_client, ssl_options), app_utils:start_applications([asn1, crypto, public_key, ssl]), - ok = obtain(), case gen_tcp:connect(Addr, Port, [Family | ?RABBIT_TCP_OPTS] ++ ExtraOpts, Timeout) of @@ -317,7 +315,7 @@ client_properties(UserProperties) -> {<<"version">>, longstr, list_to_binary(Vsn)}, {<<"platform">>, longstr, <<"Erlang">>}, {<<"copyright">>, longstr, - <<"Copyright (c) 2007-2023 VMware, Inc. or its affiliates.">>}, + <<"Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.">>}, {<<"information">>, longstr, <<"Licensed under the MPL. " "See https://www.rabbitmq.com/">>}, @@ -379,11 +377,5 @@ handshake_recv(Expecting) -> end end. -obtain() -> - case code:is_loaded(file_handle_cache) of - false -> ok; - _ -> file_handle_cache:obtain() - end. - get_reason(#'connection.close'{reply_code = ErrCode}) -> ?PROTOCOL:amqp_exception(ErrCode). diff --git a/deps/amqp_client/src/amqp_rpc_client.erl b/deps/amqp_client/src/amqp_rpc_client.erl index 2d4b4a769e9c..ce1ed1dfa965 100644 --- a/deps/amqp_client/src/amqp_rpc_client.erl +++ b/deps/amqp_client/src/amqp_rpc_client.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% %% @doc This module allows the simple execution of an asynchronous RPC over diff --git a/deps/amqp_client/src/amqp_rpc_server.erl b/deps/amqp_client/src/amqp_rpc_server.erl index c71a02cdde5a..d27a475f5953 100644 --- a/deps/amqp_client/src/amqp_rpc_server.erl +++ b/deps/amqp_client/src/amqp_rpc_server.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% %% @doc This is a utility module that is used to expose an arbitrary function diff --git a/deps/amqp_client/src/amqp_selective_consumer.erl b/deps/amqp_client/src/amqp_selective_consumer.erl index 80726225d5d7..205d260cd5d6 100644 --- a/deps/amqp_client/src/amqp_selective_consumer.erl +++ b/deps/amqp_client/src/amqp_selective_consumer.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2011-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% %% @doc This module is an implementation of the amqp_gen_consumer @@ -176,10 +176,7 @@ handle_info({'DOWN', _MRef, process, Pid, _Info}, _ -> {ok, State} %% unnamed consumer went down %% before receiving consume_ok end - end; -handle_info(#'basic.credit_drained'{} = Method, State) -> - deliver_to_consumer_or_die(Method, Method, State), - {ok, State}. + end. %% @private handle_call({register_default_consumer, Pid}, _From, @@ -246,8 +243,7 @@ tag(#'basic.consume'{consumer_tag = Tag}) -> Tag; tag(#'basic.consume_ok'{consumer_tag = Tag}) -> Tag; tag(#'basic.cancel'{consumer_tag = Tag}) -> Tag; tag(#'basic.cancel_ok'{consumer_tag = Tag}) -> Tag; -tag(#'basic.deliver'{consumer_tag = Tag}) -> Tag; -tag(#'basic.credit_drained'{consumer_tag = Tag}) -> Tag. +tag(#'basic.deliver'{consumer_tag = Tag}) -> Tag. add_to_monitor_dict(Pid, Monitors) -> case maps:find(Pid, Monitors) of diff --git a/deps/amqp_client/src/amqp_sup.erl b/deps/amqp_client/src/amqp_sup.erl index 13a37920d94a..fecd182a1db2 100644 --- a/deps/amqp_client/src/amqp_sup.erl +++ b/deps/amqp_client/src/amqp_sup.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% %% @private diff --git a/deps/amqp_client/src/amqp_uri.erl b/deps/amqp_client/src/amqp_uri.erl index 7ca723d1e12b..b88d5a835b6c 100644 --- a/deps/amqp_client/src/amqp_uri.erl +++ b/deps/amqp_client/src/amqp_uri.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(amqp_uri). diff --git a/deps/amqp_client/src/overview.edoc.in b/deps/amqp_client/src/overview.edoc.in index 799b293239d1..ece36f0d0850 100644 --- a/deps/amqp_client/src/overview.edoc.in +++ b/deps/amqp_client/src/overview.edoc.in @@ -1,6 +1,6 @@ -@title AMQP Client for Erlang -@author GoPivotal Inc. -@copyright 2007-2020 VMware, Inc. or its affiliates. +@title AMQP 0-9-1 Client for Erlang +@author Broadcom Inc. +@copyright 2007-2024 Broadcom. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. @version %%VERSION%% diff --git a/deps/amqp_client/src/rabbit_routing_util.erl b/deps/amqp_client/src/rabbit_routing_util.erl index fe458744ed1f..6e7ddcb0b72b 100644 --- a/deps/amqp_client/src/rabbit_routing_util.erl +++ b/deps/amqp_client/src/rabbit_routing_util.erl @@ -2,15 +2,14 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2013-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_routing_util). -export([init_state/0, dest_prefixes/0, all_dest_prefixes/0]). -export([ensure_endpoint/4, ensure_endpoint/5, ensure_binding/3]). --export([parse_endpoint/1, parse_endpoint/2]). --export([parse_routing/1, dest_temp_queue/1]). +-export([dest_temp_queue/1]). -include("amqp_client.hrl"). -include("rabbit_routing_prefixes.hrl"). @@ -26,50 +25,6 @@ all_dest_prefixes() -> [?TEMP_QUEUE_PREFIX | dest_prefixes()]. %% -------------------------------------------------------------------------- -parse_endpoint(Destination) -> - parse_endpoint(Destination, false). - -parse_endpoint(undefined, AllowAnonymousQueue) -> - parse_endpoint("/queue", AllowAnonymousQueue); - -parse_endpoint(Destination, AllowAnonymousQueue) when is_binary(Destination) -> - parse_endpoint(unicode:characters_to_list(Destination), - AllowAnonymousQueue); -parse_endpoint(Destination, AllowAnonymousQueue) when is_list(Destination) -> - case re:split(Destination, "/", [{return, list}]) of - [Name] -> - {ok, {queue, unescape(Name)}}; - ["", Type | Rest] - when Type =:= "exchange" orelse Type =:= "queue" orelse - Type =:= "topic" orelse Type =:= "temp-queue" -> - parse_endpoint0(atomise(Type), Rest, AllowAnonymousQueue); - ["", "amq", "queue" | Rest] -> - parse_endpoint0(amqqueue, Rest, AllowAnonymousQueue); - ["", "reply-queue" = Prefix | [_|_]] -> - parse_endpoint0(reply_queue, - [lists:nthtail(2 + length(Prefix), Destination)], - AllowAnonymousQueue); - _ -> - {error, {unknown_destination, Destination}} - end. - -parse_endpoint0(exchange, ["" | _] = Rest, _) -> - {error, {invalid_destination, exchange, to_url(Rest)}}; -parse_endpoint0(exchange, [Name], _) -> - {ok, {exchange, {unescape(Name), undefined}}}; -parse_endpoint0(exchange, [Name, Pattern], _) -> - {ok, {exchange, {unescape(Name), unescape(Pattern)}}}; -parse_endpoint0(queue, [], false) -> - {error, {invalid_destination, queue, []}}; -parse_endpoint0(queue, [], true) -> - {ok, {queue, undefined}}; -parse_endpoint0(Type, [[_|_]] = [Name], _) -> - {ok, {Type, unescape(Name)}}; -parse_endpoint0(Type, Rest, _) -> - {error, {invalid_destination, Type, to_url(Rest)}}. - -%% -------------------------------------------------------------------------- - ensure_endpoint(Dir, Channel, Endpoint, State) -> ensure_endpoint(Dir, Channel, Endpoint, [], State). @@ -140,16 +95,6 @@ ensure_binding(Queue, {Exchange, RoutingKey}, Channel) -> %% -------------------------------------------------------------------------- -parse_routing({exchange, {Name, undefined}}) -> - {Name, ""}; -parse_routing({exchange, {Name, Pattern}}) -> - {Name, Pattern}; -parse_routing({topic, Name}) -> - {"amq.topic", Name}; -parse_routing({Type, Name}) - when Type =:= queue orelse Type =:= reply_queue orelse Type =:= amqqueue -> - {"", Name}. - dest_temp_queue({temp_queue, Name}) -> Name; dest_temp_queue(_) -> none. @@ -206,17 +151,3 @@ queue_declare_method(#'queue.declare'{} = Method, Type, Params) -> _ -> Method2 end. - -%% -------------------------------------------------------------------------- - -to_url([]) -> []; -to_url(Lol) -> "/" ++ string:join(Lol, "/"). - -atomise(Name) when is_list(Name) -> - list_to_atom(re:replace(Name, "-", "_", [{return,list}, global])). - -unescape(Str) -> unescape(Str, []). - -unescape("%2F" ++ Str, Acc) -> unescape(Str, [$/ | Acc]); -unescape([C | Str], Acc) -> unescape(Str, [C | Acc]); -unescape([], Acc) -> lists:reverse(Acc). diff --git a/deps/amqp_client/src/uri_parser.erl b/deps/amqp_client/src/uri_parser.erl index 77d7eff3cce8..c5e0b3f255e2 100644 --- a/deps/amqp_client/src/uri_parser.erl +++ b/deps/amqp_client/src/uri_parser.erl @@ -1,7 +1,7 @@ %% This file is a copy of http_uri.erl from the R13B-1 Erlang/OTP %% distribution with several modifications. -%% All modifications are Copyright (c) 2009-2023 VMware, Inc. or its affiliates. +%% All modifications are Copyright (c) 2007-2024 Broadcom. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% ``The contents of this file are subject to the Erlang Public License, %% Version 1.1, (the "License"); you may not use this file except in diff --git a/deps/amqp_client/test/system_SUITE.erl b/deps/amqp_client/test/system_SUITE.erl index 9340ddd20ffb..fe8309ce473a 100644 --- a/deps/amqp_client/test/system_SUITE.erl +++ b/deps/amqp_client/test/system_SUITE.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2016-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(system_SUITE). @@ -1342,12 +1342,18 @@ channel_writer_death(Config) -> Ret = amqp_channel:call(Channel, QoS), throw({unexpected_success, Ret}) catch - exit:{{function_clause, - [{rabbit_channel, check_user_id_header, _, _} | _]}, _} - when ConnType =:= direct -> ok; + exit:{{{badrecord, <<>>}, + [{rabbit_channel, _, _, _} | _]}, _} + when ConnType =:= direct -> ok; exit:{{infrastructure_died, {unknown_properties_record, <<>>}}, _} - when ConnType =:= network -> ok + when ConnType =:= network -> ok; + + %% The writer process exited before the call and the amqp_channel_sup + %% supervisor shut the supervision tree down because the channel is + %% significant. The call happened at that shutdown time or just after. + exit:{shutdown, {gen_server, call, _}} -> ok; + exit:{noproc, {gen_server, call, _}} -> ok end, wait_for_death(Channel), wait_for_death(Connection). @@ -1435,7 +1441,12 @@ shortstr_overflow_property(Config) -> Ret = amqp_channel:call(Channel, QoS), throw({unexpected_success, Ret}) catch - exit:{{infrastructure_died, content_properties_shortstr_overflow}, _} -> ok + exit:{{infrastructure_died, content_properties_shortstr_overflow}, _} -> ok; + %% The writer process exited before the call and the amqp_channel_sup + %% supervisor shut the supervision tree down because the channel is + %% significant. The call happened at that shutdown time or just after. + exit:{shutdown, {gen_server, call, _}} -> ok; + exit:{noproc, {gen_server, call, _}} -> ok end, wait_for_death(Channel), wait_for_death(Connection). @@ -1457,7 +1468,12 @@ shortstr_overflow_field(Config) -> consumer_tag = SentString}), throw({unexpected_success, Ret}) catch - exit:{{infrastructure_died, method_field_shortstr_overflow}, _} -> ok + exit:{{infrastructure_died, method_field_shortstr_overflow}, _} -> ok; + %% The writer process exited before the call and the amqp_channel_sup + %% supervisor shut the supervision tree down because the channel is + %% significant. The call happened at that shutdown time or just after. + exit:{shutdown, {gen_server, call, _}} -> ok; + exit:{noproc, {gen_server, call, _}} -> ok end, wait_for_death(Channel), wait_for_death(Connection). diff --git a/deps/amqp_client/test/unit_SUITE.erl b/deps/amqp_client/test/unit_SUITE.erl index aeee31ec9a7a..5aee3bb18664 100644 --- a/deps/amqp_client/test/unit_SUITE.erl +++ b/deps/amqp_client/test/unit_SUITE.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2016-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(unit_SUITE). @@ -322,9 +322,9 @@ route_destination_parsing(_Config) -> ok. parse_dest(Destination, Params) -> - rabbit_routing_util:parse_endpoint(Destination, Params). + rabbit_routing_parser:parse_endpoint(Destination, Params). parse_dest(Destination) -> - rabbit_routing_util:parse_endpoint(Destination). + rabbit_routing_parser:parse_endpoint(Destination). %% ------------------------------------------------------------------- %% Topic variable map diff --git a/deps/oauth2_client/BUILD.bazel b/deps/oauth2_client/BUILD.bazel new file mode 100644 index 000000000000..be565ee245d8 --- /dev/null +++ b/deps/oauth2_client/BUILD.bazel @@ -0,0 +1,125 @@ +load("@rules_erlang//:eunit2.bzl", "eunit") +load("@rules_erlang//:xref2.bzl", "xref") +load("@rules_erlang//:dialyze.bzl", "dialyze", "plt") +load( + "//:rabbitmq.bzl", + "RABBITMQ_DIALYZER_OPTS", + "assert_suites", + "broker_for_integration_suites", + "rabbitmq_app", + "rabbitmq_integration_suite", + "rabbitmq_suite", +) +load( + ":app.bzl", + "all_beam_files", + "all_srcs", + "all_test_beam_files", + "test_suite_beam_files", +) + +APP_NAME = "oauth2_client" + +APP_DESCRIPTION = "OAuth 2.0 client from the RabbitMQ Project" + +APP_MODULE = "oauth2_client_app" + +# gazelle:erlang_app_extra_app ssl +# gazelle:erlang_app_extra_app inets +# gazelle:erlang_app_extra_app crypto +# gazelle:erlang_app_extra_app public_key + +# gazelle:erlang_app_dep_exclude rabbit + +rabbitmq_app( + name = "erlang_app", + srcs = [":all_srcs"], + hdrs = [":public_hdrs"], + app_description = APP_DESCRIPTION, + app_module = APP_MODULE, + app_name = APP_NAME, + beam_files = [":beam_files"], + extra_apps = [ + "crypto", + "inets", + "ssl", + "public_key", + ], + license_files = [":license_files"], + priv = [":priv"], + deps = [ + "//deps/rabbit_common:erlang_app", + "@jose//:erlang_app", + ], +) + +xref( + name = "xref", + additional_libs = [ + "//deps/rabbit:erlang_app", # keep + ], + target = ":erlang_app", +) + +plt( + name = "deps_plt", + for_target = ":erlang_app", + ignore_warnings = True, + plt = "//:base_plt", + deps = [ + "//deps/rabbit:erlang_app", # keep + ], +) + +dialyze( + name = "dialyze", + dialyzer_opts = RABBITMQ_DIALYZER_OPTS, + plt = ":deps_plt", + target = ":erlang_app", +) + +eunit( + name = "eunit", + compiled_suites = [ + ":test_oauth_http_mock_beam", + ":test_oauth2_client_test_util_beam", + ], + target = ":test_erlang_app", +) + +all_srcs(name = "all_srcs") + +all_beam_files(name = "all_beam_files") + +all_test_beam_files(name = "all_test_beam_files") + +test_suite_beam_files(name = "test_suite_beam_files") + +alias( + name = "oauth2_client", + actual = ":erlang_app", + visibility = ["//visibility:public"], +) + +broker_for_integration_suites() + +rabbitmq_integration_suite( + name = "system_SUITE", + size = "small", + additional_beam = [ + "test/oauth_http_mock.beam", + ], + runtime_deps = [ + "@cowboy//:erlang_app", + ], +) + +rabbitmq_suite( + name = "unit_SUITE", + size = "small", + additional_beam = [ + "test/oauth2_client_test_util.beam", + ], +) + +assert_suites() diff --git a/deps/rabbitmq_amqp1_0/CODE_OF_CONDUCT.md b/deps/oauth2_client/CODE_OF_CONDUCT.md similarity index 100% rename from deps/rabbitmq_amqp1_0/CODE_OF_CONDUCT.md rename to deps/oauth2_client/CODE_OF_CONDUCT.md diff --git a/deps/rabbitmq_amqp1_0/CONTRIBUTING.md b/deps/oauth2_client/CONTRIBUTING.md similarity index 100% rename from deps/rabbitmq_amqp1_0/CONTRIBUTING.md rename to deps/oauth2_client/CONTRIBUTING.md diff --git a/deps/rabbitmq_amqp1_0/LICENSE b/deps/oauth2_client/LICENSE similarity index 81% rename from deps/rabbitmq_amqp1_0/LICENSE rename to deps/oauth2_client/LICENSE index e75136bfb5f8..1699234a3e89 100644 --- a/deps/rabbitmq_amqp1_0/LICENSE +++ b/deps/oauth2_client/LICENSE @@ -1,3 +1,4 @@ This package is licensed under the MPL 2.0. For the MPL 2.0, please see LICENSE-MPL-RabbitMQ. -If you have any questions regarding licensing, please contact us at rabbitmq-core@groups.vmware.com. +If you have any questions regarding licensing, please contact us at +rabbitmq-core@groups.vmware.com. diff --git a/deps/rabbitmq_amqp1_0/LICENSE-MPL-RabbitMQ b/deps/oauth2_client/LICENSE-MPL-RabbitMQ similarity index 100% rename from deps/rabbitmq_amqp1_0/LICENSE-MPL-RabbitMQ rename to deps/oauth2_client/LICENSE-MPL-RabbitMQ diff --git a/deps/oauth2_client/Makefile b/deps/oauth2_client/Makefile new file mode 100644 index 000000000000..2acf3a7c2d0d --- /dev/null +++ b/deps/oauth2_client/Makefile @@ -0,0 +1,21 @@ +PROJECT = oauth2_client +PROJECT_DESCRIPTION = OAuth2 client from the RabbitMQ Project +PROJECT_MOD = oauth2_client_app + +BUILD_DEPS = rabbit +DEPS = rabbit_common jose +TEST_DEPS = rabbitmq_ct_helpers cowboy +LOCAL_DEPS = ssl inets crypto public_key + +PLT_APPS = rabbit + +DEP_EARLY_PLUGINS = rabbit_common/mk/rabbitmq-early-test.mk +DEP_PLUGINS = rabbit_common/mk/rabbitmq-build.mk \ + rabbit_common/mk/rabbitmq-hexpm.mk \ + rabbit_common/mk/rabbitmq-dist.mk \ + rabbit_common/mk/rabbitmq-run.mk \ + rabbit_common/mk/rabbitmq-test.mk \ + rabbit_common/mk/rabbitmq-tools.mk + +include rabbitmq-components.mk +include erlang.mk diff --git a/deps/oauth2_client/README.md b/deps/oauth2_client/README.md new file mode 100644 index 000000000000..6f186c0bd492 --- /dev/null +++ b/deps/oauth2_client/README.md @@ -0,0 +1,7 @@ +# Erlang OAuth 2.0 client + +This is an [Erlang client for the OAuth 2.0](https://www.amqp.org/resources/specifications) protocol. + +It's primary purpose is to be used in RabbitMQ related projects. + +## Usage diff --git a/deps/oauth2_client/app.bzl b/deps/oauth2_client/app.bzl new file mode 100644 index 000000000000..6b4b31789a16 --- /dev/null +++ b/deps/oauth2_client/app.bzl @@ -0,0 +1,111 @@ +load("@rules_erlang//:erlang_bytecode2.bzl", "erlang_bytecode") +load("@rules_erlang//:filegroup.bzl", "filegroup") + +def all_beam_files(name = "all_beam_files"): + filegroup( + name = "beam_files", + srcs = [":other_beam"], + ) + erlang_bytecode( + name = "other_beam", + srcs = [ + "src/jwt_helper.erl", + "src/oauth2_client.erl", + ], + hdrs = [":public_and_private_hdrs"], + app_name = "oauth2_client", + dest = "ebin", + erlc_opts = "//:erlc_opts", + deps = ["@jose//:erlang_app"], + ) + +def all_test_beam_files(name = "all_test_beam_files"): + filegroup( + name = "test_beam_files", + testonly = True, + srcs = [":test_other_beam"], + ) + erlang_bytecode( + name = "test_other_beam", + testonly = True, + srcs = [ + "src/jwt_helper.erl", + "src/oauth2_client.erl", + ], + hdrs = [":public_and_private_hdrs"], + app_name = "oauth2_client", + dest = "test", + erlc_opts = "//:test_erlc_opts", + deps = ["@jose//:erlang_app"], + ) + +def all_srcs(name = "all_srcs"): + filegroup( + name = "all_srcs", + srcs = [":public_and_private_hdrs", ":srcs"], + ) + filegroup( + name = "public_and_private_hdrs", + srcs = [":private_hdrs", ":public_hdrs"], + ) + filegroup( + name = "priv", + ) + + filegroup( + name = "srcs", + srcs = [ + "src/jwt_helper.erl", + "src/oauth2_client.erl", + ], + ) + filegroup( + name = "private_hdrs", + ) + filegroup( + name = "public_hdrs", + srcs = ["include/oauth2_client.hrl"], + ) + filegroup( + name = "license_files", + srcs = [ + "LICENSE", + "LICENSE-MPL-RabbitMQ", + ], + ) + +def test_suite_beam_files(name = "test_suite_beam_files"): + erlang_bytecode( + name = "test_oauth_http_mock_beam", + testonly = True, + srcs = ["test/oauth_http_mock.erl"], + outs = ["test/oauth_http_mock.beam"], + app_name = "oauth2_client", + erlc_opts = "//:test_erlc_opts", + ) + erlang_bytecode( + name = "system_SUITE_beam_files", + testonly = True, + srcs = ["test/system_SUITE.erl"], + outs = ["test/system_SUITE.beam"], + hdrs = ["include/oauth2_client.hrl"], + app_name = "oauth2_client", + erlc_opts = "//:test_erlc_opts", + ) + erlang_bytecode( + name = "unit_SUITE_beam_files", + testonly = True, + srcs = ["test/unit_SUITE.erl"], + outs = ["test/unit_SUITE.beam"], + hdrs = ["include/oauth2_client.hrl"], + app_name = "oauth2_client", + erlc_opts = "//:test_erlc_opts", + ) + erlang_bytecode( + name = "test_oauth2_client_test_util_beam", + testonly = True, + srcs = ["test/oauth2_client_test_util.erl"], + outs = ["test/oauth2_client_test_util.beam"], + app_name = "oauth2_client", + erlc_opts = "//:test_erlc_opts", + ) diff --git a/deps/oauth2_client/erlang.mk b/deps/oauth2_client/erlang.mk new file mode 120000 index 000000000000..59af4a527a9d --- /dev/null +++ b/deps/oauth2_client/erlang.mk @@ -0,0 +1 @@ +../../erlang.mk \ No newline at end of file diff --git a/deps/oauth2_client/include/oauth2_client.hrl b/deps/oauth2_client/include/oauth2_client.hrl new file mode 100644 index 000000000000..745eeec33a53 --- /dev/null +++ b/deps/oauth2_client/include/oauth2_client.hrl @@ -0,0 +1,98 @@ +%% This Source Code Form is subject to the terms of the Mozilla Public +%% License, v. 2.0. If a copy of the MPL was not distributed with this +%% file, You can obtain one at https://mozilla.org/MPL/2.0/. +%% +%% Copyright (c) 2020-2023 VMware, Inc. or its affiliates. All rights reserved. +%% + + +% define access token request common constants + +-define(DEFAULT_HTTP_TIMEOUT, 60000). + +% Refresh tome this number of seconds before expires_in token's attribute +-define(REFRESH_IN_BEFORE_EXPIRES_IN, 5). + +-define(DEFAULT_OPENID_CONFIGURATION_PATH, "/.well-known/openid-configuration"). + +% define access token request constants +-define(CONTENT_URLENCODED, "application/x-www-form-urlencoded"). +-define(CONTENT_JSON, "application/json"). +-define(REQUEST_GRANT_TYPE, "grant_type"). +-define(CLIENT_CREDENTIALS_GRANT_TYPE, "client_credentials"). +-define(REFRESH_TOKEN_GRANT_TYPE, "refresh_token"). + +-define(REQUEST_CLIENT_ID, "client_id"). +-define(REQUEST_CLIENT_SECRET, "client_secret"). +-define(REQUEST_SCOPE, "scope"). +-define(REQUEST_REFRESH_TOKEN, "refresh_token"). + +% define access token response constants +-define(BEARER_TOKEN_TYPE, <<"Bearer">>). + +-define(RESPONSE_ACCESS_TOKEN, <<"access_token">>). +-define(RESPONSE_TOKEN_TYPE, <<"token_type">>). +-define(RESPONSE_EXPIRES_IN, <<"expires_in">>). +-define(RESPONSE_REFRESH_TOKEN, <<"refresh_token">>). + +-define(RESPONSE_ERROR, <<"error">>). +-define(RESPONSE_ERROR_DESCRIPTION, <<"error_description">>). + +-define(RESPONSE_ISSUER, <<"issuer">>). +-define(RESPONSE_TOKEN_ENDPOINT, <<"token_endpoint">>). +-define(RESPONSE_AUTHORIZATION_ENDPOINT, <<"authorization_endpoint">>). +-define(RESPONSE_END_SESSION_ENDPOINT, <<"end_session_endpoint">>). +-define(RESPONSE_JWKS_URI, <<"jwks_uri">>). +-define(RESPONSE_TLS_OPTIONS, <<"ssl_options">>). + +%% The closest we have to a type import in Erlang +-type option(T) :: rabbit_types:option(T). + +-record(oauth_provider, { + issuer :: option(uri_string:uri_string()), + token_endpoint :: option(uri_string:uri_string()), + authorization_endpoint :: option(uri_string:uri_string()), + end_session_endpoint :: option(uri_string:uri_string()), + jwks_uri :: option(uri_string:uri_string()), + ssl_options :: option(list()) + }). + +-type oauth_provider() :: #oauth_provider{}. +-type oauth_provider_id() :: binary(). + +-record(access_token_request, { + client_id :: string() | binary(), + client_secret :: string() | binary(), + scope :: string() | binary() | undefined, + timeout :: option(integer()) + }). + +-type access_token_request() :: #access_token_request{}. + +-record(successful_access_token_response, { + access_token :: binary(), + token_type :: binary(), + refresh_token :: option(binary()), % A refresh token SHOULD NOT be included + % .. for client-credentials flow. + % https://www.rfc-editor.org/rfc/rfc6749#section-4.4.3 + expires_in :: option(integer()) +}). + +-type successful_access_token_response() :: #successful_access_token_response{}. + +-record(unsuccessful_access_token_response, { + error :: integer(), + error_description :: binary() | string() | undefined +}). + +-type unsuccessful_access_token_response() :: #unsuccessful_access_token_response{}. + +-record(refresh_token_request, { + client_id :: string() | binary(), + client_secret :: string() | binary(), + scope :: string() | binary() | undefined, + refresh_token :: binary(), + timeout :: option(integer()) + }). + +-type refresh_token_request() :: #refresh_token_request{}. diff --git a/deps/oauth2_client/rabbitmq-components.mk b/deps/oauth2_client/rabbitmq-components.mk new file mode 120000 index 000000000000..43c0d3567154 --- /dev/null +++ b/deps/oauth2_client/rabbitmq-components.mk @@ -0,0 +1 @@ +../../rabbitmq-components.mk \ No newline at end of file diff --git a/deps/oauth2_client/src/jwt_helper.erl b/deps/oauth2_client/src/jwt_helper.erl new file mode 100644 index 000000000000..88a70c787070 --- /dev/null +++ b/deps/oauth2_client/src/jwt_helper.erl @@ -0,0 +1,22 @@ +%% This Source Code Form is subject to the terms of the Mozilla Public +%% License, v. 2.0. If a copy of the MPL was not distributed with this +%% file, You can obtain one at https://mozilla.org/MPL/2.0/. +%% +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. +%% +-module(jwt_helper). + +-export([decode/1, get_expiration_time/1]). + +-include_lib("jose/include/jose_jwt.hrl"). + +decode(Token) -> + try + #jose_jwt{fields = Fields} = jose_jwt:peek_payload(Token), + Fields + catch Type:Err:Stacktrace -> + {error, {invalid_token, Type, Err, Stacktrace}} + end. + +get_expiration_time(#{<<"exp">> := Exp}) when is_integer(Exp) -> {ok, Exp}; +get_expiration_time(#{}) -> {error, missing_exp_field}. diff --git a/deps/oauth2_client/src/oauth2_client.erl b/deps/oauth2_client/src/oauth2_client.erl new file mode 100644 index 000000000000..cb667ee72615 --- /dev/null +++ b/deps/oauth2_client/src/oauth2_client.erl @@ -0,0 +1,514 @@ +%% This Source Code Form is subject to the terms of the Mozilla Public +%% License, v. 2.0. If a copy of the MPL was not distributed with this +%% file, You can obtain one at https://mozilla.org/MPL/2.0/. +%% +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. +%% +-module(oauth2_client). +-export([get_access_token/2, get_expiration_time/1, + refresh_access_token/2, + get_oauth_provider/1, get_oauth_provider/2, + extract_ssl_options_as_list/1 + ]). + +-include("oauth2_client.hrl"). +-spec get_access_token(oauth_provider(), access_token_request()) -> + {ok, successful_access_token_response()} | {error, unsuccessful_access_token_response() | any()}. +get_access_token(OAuthProvider, Request) -> + rabbit_log:debug("get_access_token using OAuthProvider:~p and client_id:~p", + [OAuthProvider, Request#access_token_request.client_id]), + URL = OAuthProvider#oauth_provider.token_endpoint, + Header = [], + Type = ?CONTENT_URLENCODED, + Body = build_access_token_request_body(Request), + HTTPOptions = get_ssl_options_if_any(OAuthProvider) ++ + get_timeout_of_default(Request#access_token_request.timeout), + Options = [], + Response = httpc:request(post, {URL, Header, Type, Body}, HTTPOptions, Options), + parse_access_token_response(Response). + +-spec refresh_access_token(oauth_provider(), refresh_token_request()) -> + {ok, successful_access_token_response()} | {error, unsuccessful_access_token_response() | any()}. +refresh_access_token(OAuthProvider, Request) -> + URL = OAuthProvider#oauth_provider.token_endpoint, + Header = [], + Type = ?CONTENT_URLENCODED, + Body = build_refresh_token_request_body(Request), + HTTPOptions = get_ssl_options_if_any(OAuthProvider) ++ + get_timeout_of_default(Request#refresh_token_request.timeout), + Options = [], + Response = httpc:request(post, {URL, Header, Type, Body}, HTTPOptions, Options), + parse_access_token_response(Response). + +append_paths(Path1, Path2) -> + erlang:iolist_to_binary([Path1, Path2]). + +-spec get_openid_configuration(uri_string:uri_string(), erlang:iodata() | <<>>, ssl:tls_option() | []) -> {ok, oauth_provider()} | {error, term()}. +get_openid_configuration(IssuerURI, OpenIdConfigurationPath, TLSOptions) -> + URLMap = uri_string:parse(IssuerURI), + Path = case maps:get(path, URLMap) of + "/" -> OpenIdConfigurationPath; + "" -> OpenIdConfigurationPath; + P -> append_paths(P, OpenIdConfigurationPath) + end, + URL = uri_string:resolve(Path, IssuerURI), + rabbit_log:debug("get_openid_configuration issuer URL ~p (~p)", [URL, TLSOptions]), + Options = [], + Response = httpc:request(get, {URL, []}, TLSOptions, Options), + enrich_oauth_provider(parse_openid_configuration_response(Response), TLSOptions). + +-spec get_openid_configuration(uri_string:uri_string(), ssl:tls_option() | []) -> {ok, oauth_provider()} | {error, term()}. +get_openid_configuration(IssuerURI, TLSOptions) -> + get_openid_configuration(IssuerURI, ?DEFAULT_OPENID_CONFIGURATION_PATH, TLSOptions). + +-spec get_expiration_time(successful_access_token_response()) -> + {ok, [{expires_in, integer() }| {exp, integer() }]} | {error, missing_exp_field}. +get_expiration_time(#successful_access_token_response{expires_in = ExpiresInSec, + access_token = AccessToken}) -> + case ExpiresInSec of + undefined -> + case jwt_helper:get_expiration_time(jwt_helper:decode(AccessToken)) of + {ok, Exp} -> {ok, [{exp, Exp}]}; + {error, _} = Error -> Error + end; + _ -> {ok, [{expires_in, ExpiresInSec}]} + end. + +update_oauth_provider_endpoints_configuration(OAuthProvider) -> + LockId = lock(), + try do_update_oauth_provider_endpoints_configuration(OAuthProvider) of + V -> V + after + unlock(LockId) + end. + +update_oauth_provider_endpoints_configuration(OAuthProviderId, OAuthProvider) -> + LockId = lock(), + try do_update_oauth_provider_endpoints_configuration(OAuthProviderId, OAuthProvider) of + V -> V + after + unlock(LockId) + end. + +do_update_oauth_provider_endpoints_configuration(OAuthProvider) -> + case OAuthProvider#oauth_provider.token_endpoint of + undefined -> + do_nothing; + TokenEndpoint -> + application:set_env(rabbitmq_auth_backend_oauth2, token_endpoint, TokenEndpoint) + end, + case OAuthProvider#oauth_provider.authorization_endpoint of + undefined -> + do_nothing; + AuthzEndpoint -> + application:set_env(rabbitmq_auth_backend_oauth2, authorization_endpoint, AuthzEndpoint) + end, + case OAuthProvider#oauth_provider.end_session_endpoint of + undefined -> + do_nothing; + EndSessionEndpoint -> + application:set_env(rabbitmq_auth_backend_oauth2, end_session_endpoint, EndSessionEndpoint) + end, + List = application:get_env(rabbitmq_auth_backend_oauth2, key_config, []), + ModifiedList = case OAuthProvider#oauth_provider.jwks_uri of + undefined -> List; + JwksEndPoint -> [{jwks_url, JwksEndPoint} | List] + end, + application:set_env(rabbitmq_auth_backend_oauth2, key_config, ModifiedList), + rabbit_log:debug("Updated oauth_provider details: ~p ", [ OAuthProvider]), + OAuthProvider. + +do_update_oauth_provider_endpoints_configuration(OAuthProviderId, OAuthProvider) -> + OAuthProviders = application:get_env(rabbitmq_auth_backend_oauth2, oauth_providers, #{}), + LookupProviderPropList = maps:get(OAuthProviderId, OAuthProviders), + ModifiedList0 = case OAuthProvider#oauth_provider.token_endpoint of + undefined -> LookupProviderPropList; + TokenEndpoint -> [{token_endpoint, TokenEndpoint} | LookupProviderPropList] + end, + ModifiedList1 = case OAuthProvider#oauth_provider.authorization_endpoint of + undefined -> ModifiedList0; + AuthzEndpoint -> [{authorization_endpoint, AuthzEndpoint} | ModifiedList0] + end, + ModifiedList2 = case OAuthProvider#oauth_provider.end_session_endpoint of + undefined -> ModifiedList1; + EndSessionEndpoint -> [{end_session_endpoint, EndSessionEndpoint} | ModifiedList1] + end, + ModifiedList3 = case OAuthProvider#oauth_provider.jwks_uri of + undefined -> ModifiedList2; + JwksEndPoint -> [{jwks_uri, JwksEndPoint} | ModifiedList2] + end, + ModifiedOAuthProviders = maps:put(OAuthProviderId, ModifiedList3, OAuthProviders), + application:set_env(rabbitmq_auth_backend_oauth2, oauth_providers, ModifiedOAuthProviders), + rabbit_log:debug("Replacing oauth_providers ~p", [ ModifiedOAuthProviders]), + OAuthProvider. + +use_global_locks_on_all_nodes() -> + case application:get_env(rabbitmq_auth_backend_oauth2, use_global_locks, true) of + true -> {rabbit_nodes:list_running(), rabbit_nodes:lock_retries()}; + _ -> {} + end. + +lock() -> + case use_global_locks_on_all_nodes() of + {} -> + case global:set_lock({oauth2_config_lock, rabbitmq_auth_backend_oauth2}) of + true -> rabbitmq_auth_backend_oauth2; + false -> undefined + end; + {Nodes, Retries} -> + case global:set_lock({oauth2_config_lock, rabbitmq_auth_backend_oauth2}, Nodes, Retries) of + true -> rabbitmq_auth_backend_oauth2; + false -> undefined + end + end. + +unlock(LockId) -> + case LockId of + undefined -> ok; + Value -> + case use_global_locks_on_all_nodes() of + {} -> global:del_lock({oauth2_config_lock, Value}); + {Nodes, _Retries} -> global:del_lock({oauth2_config_lock, Value}, Nodes) + end + end. + +-spec get_oauth_provider(list()) -> {ok, oauth_provider()} | {error, any()}. +get_oauth_provider(ListOfRequiredAttributes) -> + case application:get_env(rabbitmq_auth_backend_oauth2, default_oauth_provider) of + undefined -> get_oauth_provider_from_keyconfig(ListOfRequiredAttributes); + {ok, DefaultOauthProvider} -> + rabbit_log:debug("Using default_oauth_provider ~p", [DefaultOauthProvider]), + get_oauth_provider(DefaultOauthProvider, ListOfRequiredAttributes) + end. + +get_oauth_provider_from_keyconfig(ListOfRequiredAttributes) -> + OAuthProvider = lookup_oauth_provider_from_keyconfig(), + rabbit_log:debug("Using oauth_provider ~p from keyconfig", [OAuthProvider]), + case find_missing_attributes(OAuthProvider, ListOfRequiredAttributes) of + [] -> + {ok, OAuthProvider}; + _ -> + Result2 = case OAuthProvider#oauth_provider.issuer of + undefined -> {error, {missing_oauth_provider_attributes, [issuer]}}; + Issuer -> + rabbit_log:debug("Downloading oauth_provider using issuer ~p", [Issuer]), + case get_openid_configuration(Issuer, get_ssl_options_if_any(OAuthProvider)) of + {ok, OauthProvider} -> + {ok, update_oauth_provider_endpoints_configuration(OauthProvider)}; + {error, _} = Error2 -> Error2 + end + end, + case Result2 of + {ok, OAuthProvider2} -> + case find_missing_attributes(OAuthProvider2, ListOfRequiredAttributes) of + [] -> + rabbit_log:debug("Resolved oauth_provider ~p", [OAuthProvider]), + {ok, OAuthProvider2}; + _ = Attrs-> + {error, {missing_oauth_provider_attributes, Attrs}} + end; + {error, _} = Error3 -> Error3 + end + end. + + +-spec get_oauth_provider(oauth_provider_id(), list()) -> {ok, oauth_provider()} | {error, any()}. +get_oauth_provider(OAuth2ProviderId, ListOfRequiredAttributes) when is_list(OAuth2ProviderId) -> + get_oauth_provider(list_to_binary(OAuth2ProviderId), ListOfRequiredAttributes); + +get_oauth_provider(OAuth2ProviderId, ListOfRequiredAttributes) when is_binary(OAuth2ProviderId) -> + rabbit_log:debug("get_oauth_provider ~p with at least these attributes: ~p", [OAuth2ProviderId, ListOfRequiredAttributes]), + case lookup_oauth_provider_config(OAuth2ProviderId) of + {error, _} = Error0 -> + rabbit_log:debug("Failed to find oauth_provider ~p configuration due to ~p", + [OAuth2ProviderId, Error0]), + Error0; + Config -> + rabbit_log:debug("Found oauth_provider configuration ~p", [Config]), + OAuthProvider = case Config of + {error,_} = Error -> Error; + _ -> map_to_oauth_provider(Config) + end, + rabbit_log:debug("Resolved oauth_provider ~p", [OAuthProvider]), + case find_missing_attributes(OAuthProvider, ListOfRequiredAttributes) of + [] -> + {ok, OAuthProvider}; + _ -> + Result2 = case OAuthProvider#oauth_provider.issuer of + undefined -> {error, {missing_oauth_provider_attributes, [issuer]}}; + Issuer -> + rabbit_log:debug("Downloading oauth_provider ~p using issuer ~p", + [OAuth2ProviderId, Issuer]), + case get_openid_configuration(Issuer, get_ssl_options_if_any(OAuthProvider)) of + {ok, OauthProvider} -> + {ok, update_oauth_provider_endpoints_configuration(OAuth2ProviderId, OauthProvider)}; + {error, _} = Error2 -> Error2 + end + end, + case Result2 of + {ok, OAuthProvider2} -> + case find_missing_attributes(OAuthProvider2, ListOfRequiredAttributes) of + [] -> + rabbit_log:debug("Resolved oauth_provider ~p", [OAuthProvider]), + {ok, OAuthProvider2}; + _ = Attrs-> + {error, {missing_oauth_provider_attributes, Attrs}} + end; + {error, _} = Error3 -> Error3 + end + end + end. + +%% HELPER functions + + +oauth_provider_to_proplists(#oauth_provider{} = OAuthProvider) -> + lists:zip(record_info(fields, oauth_provider), tl(tuple_to_list(OAuthProvider))). +filter_undefined_props(PropList) -> + lists:foldl(fun(Prop, Acc) -> + case Prop of + {Name, undefined} -> Acc ++ [Name]; + _ -> Acc + end end, [], PropList). + +-spec intersection(list(), list()) -> list(). +intersection(L1, L2) -> + S1 = sets:from_list(L1), + S2 = sets:from_list(L2), + lists:usort(sets:to_list(sets:intersection(S1, S2))). + +find_missing_attributes(#oauth_provider{} = OAuthProvider, RequiredAttributes) -> + PropList = oauth_provider_to_proplists(OAuthProvider), + Filtered = filter_undefined_props(PropList), + intersection(Filtered, RequiredAttributes). + +lookup_oauth_provider_from_keyconfig() -> + Issuer = application:get_env(rabbitmq_auth_backend_oauth2, issuer, undefined), + TokenEndpoint = application:get_env(rabbitmq_auth_backend_oauth2, token_endpoint, undefined), + AuthorizationEndpoint = application:get_env(rabbitmq_auth_backend_oauth2, authorization_endpoint, undefined), + EndSessionEndpoint = application:get_env(rabbitmq_auth_backend_oauth2, end_session_endpoint, undefined), + Map = maps:from_list(application:get_env(rabbitmq_auth_backend_oauth2, key_config, [])), + #oauth_provider{ + issuer = Issuer, + jwks_uri = maps:get(jwks_url, Map, undefined), %% jwks_url not uri . _url is the legacy name + token_endpoint = TokenEndpoint, + authorization_endpoint = AuthorizationEndpoint, + end_session_endpoint = EndSessionEndpoint, + ssl_options = extract_ssl_options_as_list(Map) + }. + + + +-spec extract_ssl_options_as_list(#{atom() => any()}) -> proplists:proplist(). +extract_ssl_options_as_list(Map) -> + {Verify, CaCerts, CaCertFile} = case get_verify_or_peer_verification(Map, verify_peer) of + verify_peer -> + case maps:get(cacertfile, Map, undefined) of + undefined -> + case public_key:cacerts_get() of + [] -> {verify_none, undefined, undefined}; + Certs -> {verify_peer, Certs, undefined} + end; + CaCert -> {verify_peer, undefined, CaCert} + end; + verify_none -> {verify_none, undefined, undefined} + end, + + [ {verify, Verify} ] + ++ + case Verify of + verify_none -> []; + _ -> + [ + {depth, maps:get(depth, Map, 10)}, + {crl_check, maps:get(crl_check, Map, false)}, + {fail_if_no_peer_cert, maps:get(fail_if_no_peer_cert, Map, false)} + ] + end + ++ + case Verify of + verify_none -> []; + _ -> + case {CaCerts, CaCertFile} of + {_, undefined} -> [{cacerts, CaCerts}]; + {undefined, _} -> [{cacertfile, CaCertFile}] + end + end + ++ + case maps:get(hostname_verification, Map, none) of + wildcard -> + [{customize_hostname_check, [{match_fun, public_key:pkix_verify_hostname_match_fun(https)}]}]; + none -> + [] + end. + +% Replace peer_verification with verify to make it more consistent with other +% ssl_options in RabbitMQ and Erlang's ssl options +% Eventually, peer_verification will be removed. For now, both are allowed +-spec get_verify_or_peer_verification(#{atom() => any()}, verify_none | verify_peer ) -> verify_none | verify_peer. +get_verify_or_peer_verification(Ssl_options, Default) -> + case maps:get(verify, Ssl_options, undefined) of + undefined -> + case maps:get(peer_verification, Ssl_options, undefined) of + undefined -> Default; + PeerVerification -> PeerVerification + end; + Verify -> Verify + end. + +lookup_oauth_provider_config(OAuth2ProviderId) -> + case application:get_env(rabbitmq_auth_backend_oauth2, oauth_providers) of + undefined -> {error, oauth_providers_not_found}; + {ok, MapOfProviders} when is_map(MapOfProviders) -> + case maps:get(OAuth2ProviderId, MapOfProviders, undefined) of + undefined -> + {error, {oauth_provider_not_found, OAuth2ProviderId}}; + Value -> Value + end; + _ -> {error, invalid_oauth_provider_configuration} + end. + +build_access_token_request_body(Request) -> + uri_string:compose_query([ + grant_type_request_parameter(?CLIENT_CREDENTIALS_GRANT_TYPE), + client_id_request_parameter(Request#access_token_request.client_id), + client_secret_request_parameter(Request#access_token_request.client_secret)] + ++ scope_request_parameter_or_default(Request#access_token_request.scope, [])). + +build_refresh_token_request_body(Request) -> + uri_string:compose_query([ + grant_type_request_parameter(?REFRESH_TOKEN_GRANT_TYPE), + refresh_token_request_parameter(Request#refresh_token_request.refresh_token), + client_id_request_parameter(Request#refresh_token_request.client_id), + client_secret_request_parameter(Request#refresh_token_request.client_secret)] + ++ scope_request_parameter_or_default(Request#refresh_token_request.scope, [])). + +grant_type_request_parameter(Type) -> + {?REQUEST_GRANT_TYPE, Type}. +client_id_request_parameter(Client_id) -> + {?REQUEST_CLIENT_ID, binary_to_list(Client_id)}. +client_secret_request_parameter(Client_secret) -> + {?REQUEST_CLIENT_SECRET, binary_to_list(Client_secret)}. +refresh_token_request_parameter(RefreshToken) -> + {?REQUEST_REFRESH_TOKEN, RefreshToken}. +scope_request_parameter_or_default(Scope, Default) -> + case Scope of + undefined -> Default; + <<>> -> Default; + Scope -> [{?REQUEST_SCOPE, Scope}] + end. + +get_ssl_options_if_any(OAuthProvider) -> + case OAuthProvider#oauth_provider.ssl_options of + undefined -> []; + Options -> [{ssl, Options}] + end. +get_timeout_of_default(Timeout) -> + case Timeout of + undefined -> [{timeout, ?DEFAULT_HTTP_TIMEOUT}]; + Timeout -> [{timeout, Timeout}] + end. + +is_json(?CONTENT_JSON) -> true; +is_json(_) -> false. + +-spec decode_body(string(), string() | binary() | term()) -> 'false' | 'null' | 'true' | + binary() | [any()] | number() | map() | {error, term()}. + +decode_body(_, []) -> []; +decode_body(?CONTENT_JSON, Body) -> + case rabbit_json:try_decode(rabbit_data_coercion:to_binary(Body)) of + {ok, Value} -> + Value; + {error, _} = Error -> + Error + end; +decode_body(MimeType, Body) -> + Items = string:split(MimeType, ";"), + case lists:any(fun is_json/1, Items) of + true -> decode_body(?CONTENT_JSON, Body); + false -> {error, mime_type_is_not_json} + end. + + +map_to_successful_access_token_response(Map) -> + #successful_access_token_response{ + access_token = maps:get(?RESPONSE_ACCESS_TOKEN, Map), + token_type = maps:get(?RESPONSE_TOKEN_TYPE, Map, undefined), + refresh_token = maps:get(?RESPONSE_REFRESH_TOKEN, Map, undefined), + expires_in = maps:get(?RESPONSE_EXPIRES_IN, Map, undefined) + }. + +map_to_unsuccessful_access_token_response(Map) -> + #unsuccessful_access_token_response{ + error = maps:get(?RESPONSE_ERROR, Map), + error_description = maps:get(?RESPONSE_ERROR_DESCRIPTION, Map, undefined) + }. + + +map_to_oauth_provider(Map) when is_map(Map) -> + #oauth_provider{ + issuer = maps:get(?RESPONSE_ISSUER, Map), + token_endpoint = maps:get(?RESPONSE_TOKEN_ENDPOINT, Map, undefined), + authorization_endpoint = maps:get(?RESPONSE_AUTHORIZATION_ENDPOINT, Map, undefined), + end_session_endpoint = maps:get(?RESPONSE_END_SESSION_ENDPOINT, Map, undefined), + jwks_uri = maps:get(?RESPONSE_JWKS_URI, Map, undefined) + }; + +map_to_oauth_provider(PropList) when is_list(PropList) -> + #oauth_provider{ + issuer = proplists:get_value(issuer, PropList), + token_endpoint = proplists:get_value(token_endpoint, PropList), + authorization_endpoint = proplists:get_value(authorization_endpoint, PropList, undefined), + end_session_endpoint = proplists:get_value(end_session_endpoint, PropList, undefined), + jwks_uri = proplists:get_value(jwks_uri, PropList, undefined), + ssl_options = extract_ssl_options_as_list(maps:from_list(proplists:get_value(https, PropList, []))) + }. + + +enrich_oauth_provider({ok, OAuthProvider}, TLSOptions) -> + {ok, OAuthProvider#oauth_provider{ssl_options=TLSOptions}}; +enrich_oauth_provider(Response, _) -> + Response. + +map_to_access_token_response(Code, Reason, Headers, Body) -> + case decode_body(proplists:get_value("content-type", Headers, ?CONTENT_JSON), Body) of + {error, {error, InternalError}} -> + {error, InternalError}; + {error, _} = Error -> + Error; + Value -> + case Code of + 200 -> {ok, map_to_successful_access_token_response(Value)}; + 201 -> {ok, map_to_successful_access_token_response(Value)}; + 204 -> {ok, []}; + 400 -> {error, map_to_unsuccessful_access_token_response(Value)}; + 401 -> {error, map_to_unsuccessful_access_token_response(Value)}; + _ -> {error, Reason} + end + end. + +map_response_to_oauth_provider(Code, Reason, Headers, Body) -> + case decode_body(proplists:get_value("content-type", Headers, ?CONTENT_JSON), Body) of + {error, {error, InternalError}} -> + {error, InternalError}; + {error, _} = Error -> + Error; + Value -> + case Code of + 200 -> {ok, map_to_oauth_provider(Value)}; + 201 -> {ok, map_to_oauth_provider(Value)}; + _ -> {error, Reason} + end + end. + + +parse_access_token_response({error, Reason}) -> + {error, Reason}; +parse_access_token_response({ok,{{_,Code,Reason}, Headers, Body}}) -> + map_to_access_token_response(Code, Reason, Headers, Body). + +parse_openid_configuration_response({error, Reason}) -> + {error, Reason}; +parse_openid_configuration_response({ok,{{_,Code,Reason}, Headers, Body}}) -> + map_response_to_oauth_provider(Code, Reason, Headers, Body). diff --git a/deps/oauth2_client/test/oauth2_client_test_util.erl b/deps/oauth2_client/test/oauth2_client_test_util.erl new file mode 100644 index 000000000000..761814d2dd98 --- /dev/null +++ b/deps/oauth2_client/test/oauth2_client_test_util.erl @@ -0,0 +1,154 @@ +%% This Source Code Form is subject to the terms of the Mozilla Public +%% License, v. 2.0. If a copy of the MPL was not distributed with this +%% file, You can obtain one at https://mozilla.org/MPL/2.0/. +%% +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. +%% +-module(oauth2_client_test_util). + +-compile(export_all). + +-define(DEFAULT_EXPIRATION_IN_SECONDS, 2). + +%% +%% API +%% + +sign_token_hs(Token, #{<<"kid">> := TokenKey} = Jwk) -> + sign_token_hs(Token, Jwk, TokenKey). + +sign_token_hs(Token, Jwk, TokenKey) -> + Jws = #{ + <<"alg">> => <<"HS256">>, + <<"kid">> => TokenKey + }, + sign_token(Token, Jwk, Jws). + +sign_token_rsa(Token, Jwk, TokenKey) -> + Jws = #{ + <<"alg">> => <<"RS256">>, + <<"kid">> => TokenKey + }, + sign_token(Token, Jwk, Jws). + +sign_token_no_kid(Token, Jwk) -> + Signed = jose_jwt:sign(Jwk, Token), + jose_jws:compact(Signed). + +sign_token(Token, Jwk, Jws) -> + Signed = jose_jwt:sign(Jwk, Jws, Token), + jose_jws:compact(Signed). + +fixture_jwk() -> + fixture_jwk(<<"token-key">>). + +fixture_jwk(TokenKey) -> + fixture_jwk(TokenKey, <<"dG9rZW5rZXk">>). + +fixture_jwk(TokenKey, K) -> + #{<<"alg">> => <<"HS256">>, + <<"k">> => K, + <<"kid">> => TokenKey, + <<"kty">> => <<"oct">>, + <<"use">> => <<"sig">>, + <<"value">> => TokenKey}. + +full_permission_scopes() -> + [<<"rabbitmq.configure:*/*">>, + <<"rabbitmq.write:*/*">>, + <<"rabbitmq.read:*/*">>]. + +expirable_token() -> + expirable_token(?DEFAULT_EXPIRATION_IN_SECONDS). + +expirable_token(Seconds) -> + TokenPayload = fixture_token(), + %% expiration is a timestamp with precision in seconds + TokenPayload#{<<"exp">> := os:system_time(seconds) + Seconds}. + +expirable_token_with_expiration_time(ExpiresIn) -> + TokenPayload = fixture_token(), + %% expiration is a timestamp with precision in seconds + TokenPayload#{<<"exp">> := ExpiresIn}. + +expired_token() -> + expired_token_with_scopes(full_permission_scopes()). + +expired_token_with_scopes(Scopes) -> + token_with_scopes_and_expiration(Scopes, seconds_in_the_past(10)). + +fixture_token_with_scopes(Scopes) -> + token_with_scopes_and_expiration(Scopes, default_expiration_moment()). + +token_with_scopes_and_expiration(Scopes, Expiration) -> + %% expiration is a timestamp with precision in seconds + #{<<"exp">> => Expiration, + <<"iss">> => <<"unit_test">>, + <<"foo">> => <<"bar">>, + <<"aud">> => [<<"rabbitmq">>], + <<"scope">> => Scopes}. + +token_without_scopes() -> + %% expiration is a timestamp with precision in seconds + #{ + <<"iss">> => <<"unit_test">>, + <<"foo">> => <<"bar">>, + <<"aud">> => [<<"rabbitmq">>] + }. + +fixture_token() -> + fixture_token([]). + +token_with_sub(TokenFixture, Sub) -> + maps:put(<<"sub">>, Sub, TokenFixture). +token_with_scopes(TokenFixture, Scopes) -> + maps:put(<<"scope">>, Scopes, TokenFixture). + +fixture_token(ExtraScopes) -> + Scopes = [<<"rabbitmq.configure:vhost/foo">>, + <<"rabbitmq.write:vhost/foo">>, + <<"rabbitmq.read:vhost/foo">>, + <<"rabbitmq.read:vhost/bar">>, + <<"rabbitmq.read:vhost/bar/%23%2Ffoo">>] ++ ExtraScopes, + fixture_token_with_scopes(Scopes). + +fixture_token_with_full_permissions() -> + fixture_token_with_scopes(full_permission_scopes()). + +plain_token_without_scopes_and_aud() -> + %% expiration is a timestamp with precision in seconds + #{<<"exp">> => default_expiration_moment(), + <<"iss">> => <<"unit_test">>, + <<"foo">> => <<"bar">>}. + +token_with_scope_alias_in_scope_field(Value) -> + %% expiration is a timestamp with precision in seconds + #{<<"exp">> => default_expiration_moment(), + <<"iss">> => <<"unit_test">>, + <<"foo">> => <<"bar">>, + <<"aud">> => [<<"rabbitmq">>], + <<"scope">> => Value}. + +token_with_scope_alias_in_claim_field(Claims, Scopes) -> + %% expiration is a timestamp with precision in seconds + #{<<"exp">> => default_expiration_moment(), + <<"iss">> => <<"unit_test">>, + <<"foo">> => <<"bar">>, + <<"aud">> => [<<"rabbitmq">>], + <<"scope">> => Scopes, + <<"claims">> => Claims}. + +seconds_in_the_future() -> + seconds_in_the_future(30). + +seconds_in_the_future(N) -> + os:system_time(seconds) + N. + +seconds_in_the_past() -> + seconds_in_the_past(10). + +seconds_in_the_past(N) -> + os:system_time(seconds) - N. + +default_expiration_moment() -> + seconds_in_the_future(30). diff --git a/deps/oauth2_client/test/oauth_http_mock.erl b/deps/oauth2_client/test/oauth_http_mock.erl new file mode 100644 index 000000000000..f299b9704aca --- /dev/null +++ b/deps/oauth2_client/test/oauth_http_mock.erl @@ -0,0 +1,59 @@ +%% This Source Code Form is subject to the terms of the Mozilla Public +%% License, v. 2.0. If a copy of the MPL was not distributed with this +%% file, You can obtain one at https://mozilla.org/MPL/2.0/. +%% +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. +%% + +-module(oauth_http_mock). +-include_lib("eunit/include/eunit.hrl"). + +-export([init/2]). + +%%% CALLBACKS + +init(Req, #{request := ExpectedRequest, response := ExpectedResponse} = Expected) -> + ct:log("init oauth_http_mock Req:~p", [Req]), + match_request(Req, ExpectedRequest), + {Code, Headers, JsonPayload} = produce_expected_response(ExpectedResponse), + {ok, case JsonPayload of + undefined -> cowboy_req:reply(Code, Req); + _ -> cowboy_req:reply(Code, Headers, JsonPayload, Req) + end, Expected}. + +match_request_parameters_in_body(Req, #{parameters := Parameters}) -> + ?assertEqual(true, cowboy_req:has_body(Req)), + {ok, KeyValues, _Req2} = cowboy_req:read_urlencoded_body(Req), + [ ?assertEqual(Value, proplists:get_value(list_to_binary(Parameter), KeyValues)) + || {Parameter, Value} <- Parameters]. + +match_request(Req, #{method := Method} = ExpectedRequest) -> + ?assertEqual(Method, maps:get(method, Req)), + case maps:is_key(parameters, ExpectedRequest) of + true -> match_request_parameters_in_body(Req, ExpectedRequest); + false -> ok + end. + +produce_expected_response(ExpectedResponse) -> + case proplists:is_defined(content_type, ExpectedResponse) of + true -> + Payload = proplists:get_value(payload, ExpectedResponse), + case is_proplist(Payload) of + true -> + { proplists:get_value(code, ExpectedResponse), + #{<<"content-type">> => proplists:get_value(content_type, ExpectedResponse)}, + rabbit_json:encode(Payload) + }; + _ -> + { proplists:get_value(code, ExpectedResponse), + #{<<"content-type">> => proplists:get_value(content_type, ExpectedResponse)}, + Payload + } + end; + false -> + {proplists:get_value(code, ExpectedResponse), undefined, undefined} + end. + + +is_proplist([{_Key, _Val}|_] = List) -> lists:all(fun({_K, _V}) -> true; (_) -> false end, List); +is_proplist(_) -> false. diff --git a/deps/oauth2_client/test/system_SUITE.erl b/deps/oauth2_client/test/system_SUITE.erl new file mode 100644 index 000000000000..1be0acc72815 --- /dev/null +++ b/deps/oauth2_client/test/system_SUITE.erl @@ -0,0 +1,545 @@ +%% This Source Code Form is subject to the terms of the Mozilla Public +%% License, v. 2.0. If a copy of the MPL was not distributed with this +%% file, You can obtain one at https://mozilla.org/MPL/2.0/. +%% +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. +%% + +-module(system_SUITE). + +-include_lib("common_test/include/ct.hrl"). +-include_lib("eunit/include/eunit.hrl"). + +-include_lib("oauth2_client.hrl"). + +-compile(export_all). + +-define(MOCK_TOKEN_ENDPOINT, <<"/token">>). +-define(AUTH_PORT, 8000). +-define(GRANT_ACCESS_TOKEN, +#{request => + #{ + method => <<"POST">>, + path => ?MOCK_TOKEN_ENDPOINT, + parameters => [ + {?REQUEST_CLIENT_ID, <<"guest">>}, + {?REQUEST_CLIENT_SECRET, <<"password">>} + ] + }, + response => [ + {code, 200}, + {content_type, ?CONTENT_JSON}, + {payload, [ + {access_token, <<"some access token">>}, + {token_type, <<"Bearer">>} + ]} + ] +}). +-define(DENIES_ACCESS_TOKEN, +#{request => + #{ + method => <<"POST">>, + path => ?MOCK_TOKEN_ENDPOINT, + parameters => [ + {?REQUEST_CLIENT_ID, <<"invalid_client">>}, + {?REQUEST_CLIENT_SECRET, <<"password">>} + ] + }, + response => [ + {code, 400}, + {content_type, ?CONTENT_JSON}, + {payload, [ + {error, <<"invalid_client">>}, + {error_description, <<"invalid client found">>} + ]} + ] +}). + +-define(AUTH_SERVER_ERROR, +#{request => + #{ + method => <<"POST">>, + path => ?MOCK_TOKEN_ENDPOINT, + parameters => [ + {?REQUEST_CLIENT_ID, <<"guest">>}, + {?REQUEST_CLIENT_SECRET, <<"password">>} + ] + }, + response => [ + {code, 500} + ] +}). + +-define(NON_JSON_PAYLOAD, +#{request => + #{ + method => <<"POST">>, + path => ?MOCK_TOKEN_ENDPOINT, + parameters => [ + {?REQUEST_CLIENT_ID, <<"guest">>}, + {?REQUEST_CLIENT_SECRET, <<"password">>} + ] + }, + response => [ + {code, 400}, + {content_type, ?CONTENT_JSON}, + {payload, <<"{ some illegal json}">>} + ] +}). + +-define(GET_OPENID_CONFIGURATION, +#{request => + #{ + method => <<"GET">>, + path => ?DEFAULT_OPENID_CONFIGURATION_PATH + }, + response => [ + {code, 200}, + {content_type, ?CONTENT_JSON}, + {payload, [ + {issuer, build_issuer("http") }, + {authorization_endpoint, <<"http://localhost:8000/authorize">>}, + {token_endpoint, build_token_endpoint_uri("http")}, + {end_session_endpoint, <<"http://localhost:8000/logout">>}, + {jwks_uri, build_jwks_uri("http")} + ]} + ] +}). +-define(GET_OPENID_CONFIGURATION_WITH_SSL, +#{request => + #{ + method => <<"GET">>, + path => ?DEFAULT_OPENID_CONFIGURATION_PATH + }, + response => [ + {code, 200}, + {content_type, ?CONTENT_JSON}, + {payload, [ + {issuer, build_issuer("https") }, + {authorization_endpoint, <<"https://localhost:8000/authorize">>}, + {token_endpoint, build_token_endpoint_uri("https")}, + {end_session_endpoint, <<"http://localhost:8000/logout">>}, + {jwks_uri, build_jwks_uri("https")} + ]} + ] +}). +-define(GRANTS_REFRESH_TOKEN, + #{request => #{ + method => <<"POST">>, + path => ?MOCK_TOKEN_ENDPOINT, + parameters => [ + {?REQUEST_CLIENT_ID, <<"guest">>}, + {?REQUEST_CLIENT_SECRET, <<"password">>}, + {?REQUEST_REFRESH_TOKEN, <<"some refresh token">>} + ] + }, + response => [ + {code, 200}, + {content_type, ?CONTENT_JSON}, + {payload, [ + {access_token, <<"some refreshed access token">>}, + {token_type, <<"Bearer">>} + ]} + ] +}). + +all() -> +[ + {group, http_up}, + {group, http_down}, + {group, https} +]. + +groups() -> +[ + {http_up, [], [ + {group, verify_access_token}, + {group, with_all_oauth_provider_settings}, + {group, without_all_oauth_providers_settings} + ]}, + {with_all_oauth_provider_settings, [], [ + {group, verify_get_oauth_provider} + ]}, + {without_all_oauth_providers_settings, [], [ + {group, verify_get_oauth_provider} + ]}, + {verify_access_token, [], [ + grants_access_token, + denies_access_token, + auth_server_error, + non_json_payload, + grants_refresh_token + ]}, + {verify_get_oauth_provider, [], [ + get_oauth_provider, + get_oauth_provider_given_oauth_provider_id + ]}, + + {http_down, [], [ + connection_error + ]}, + {https, [], [ + grants_access_token, + grants_refresh_token, + ssl_connection_error, + {group, with_all_oauth_provider_settings}, + {group, without_all_oauth_providers_settings} + ]} +]. + +init_per_suite(Config) -> + [ + {denies_access_token, [ {token_endpoint, ?DENIES_ACCESS_TOKEN} ]}, + {auth_server_error, [ {token_endpoint, ?AUTH_SERVER_ERROR} ]}, + {non_json_payload, [ {token_endpoint, ?NON_JSON_PAYLOAD} ]}, + {grants_refresh_token, [ {token_endpoint, ?GRANTS_REFRESH_TOKEN} ]} + | Config]. + +end_per_suite(Config) -> + Config. + +init_per_group(https, Config) -> + {ok, _} = application:ensure_all_started(ssl), + application:ensure_all_started(cowboy), + Config0 = rabbit_ct_helpers:run_setup_steps(Config), + CertsDir = ?config(rmq_certsdir, Config0), + CaCertFile = filename:join([CertsDir, "testca", "cacert.pem"]), + WrongCaCertFile = filename:join([CertsDir, "server", "server.pem"]), + [{group, https}, + {oauth_provider_id, <<"uaa">>}, + {oauth_provider, build_https_oauth_provider(CaCertFile)}, + {oauth_provider_with_issuer, keep_only_issuer_and_ssl_options(build_https_oauth_provider(CaCertFile))}, + {issuer, build_issuer("https")}, + {oauth_provider_with_wrong_ca, build_https_oauth_provider(WrongCaCertFile)} | + Config0]; + +init_per_group(http_up, Config) -> + {ok, _} = application:ensure_all_started(inets), + application:ensure_all_started(cowboy), + [{group, http_up}, + {oauth_provider_id, <<"uaa">>}, + {issuer, build_issuer("http")}, + {oauth_provider_with_issuer, keep_only_issuer_and_ssl_options(build_http_oauth_provider())}, + {oauth_provider, build_http_oauth_provider()} | Config]; + +init_per_group(http_down, Config) -> + [{issuer, build_issuer("http")}, + {oauth_provider_id, <<"uaa">>}, + {oauth_provider, build_http_oauth_provider()} | Config]; + +init_per_group(with_all_oauth_provider_settings, Config) -> + [{with_all_oauth_provider_settings, true} | Config]; + +init_per_group(without_all_oauth_providers_settings, Config) -> + [{with_all_oauth_provider_settings, false} | Config]; + +init_per_group(_, Config) -> + Config. + + +get_http_oauth_server_expectations(TestCase, Config) -> + case ?config(TestCase, Config) of + undefined -> + case ?config(group, Config) of + https -> [ + {token_endpoint, ?GRANT_ACCESS_TOKEN}, + {get_openid_configuration, ?GET_OPENID_CONFIGURATION_WITH_SSL } + ]; + _ -> [ + {token_endpoint, ?GRANT_ACCESS_TOKEN}, + {get_openid_configuration, ?GET_OPENID_CONFIGURATION } + ] + end; + Expectations -> Expectations + end. + +lookup_expectation(Endpoint, Config) -> + proplists:get_value(Endpoint, ?config(oauth_server_expectations, Config)). + +configure_all_oauth_provider_settings(Config) -> + OAuthProvider = ?config(oauth_provider, Config), + OAuthProviders = #{ ?config(oauth_provider_id, Config) => oauth_provider_to_proplist(OAuthProvider) }, + + application:set_env(rabbitmq_auth_backend_oauth2, issuer, OAuthProvider#oauth_provider.issuer), + application:set_env(rabbitmq_auth_backend_oauth2, oauth_providers, OAuthProviders), + application:set_env(rabbitmq_auth_backend_oauth2, token_endpoint, OAuthProvider#oauth_provider.token_endpoint), + application:set_env(rabbitmq_auth_backend_oauth2, end_sessione_endpoint, OAuthProvider#oauth_provider.end_session_endpoint), + application:set_env(rabbitmq_auth_backend_oauth2, authorization_endpoint, OAuthProvider#oauth_provider.authorization_endpoint), + KeyConfig = [ { jwks_url, OAuthProvider#oauth_provider.jwks_uri } ] ++ + case OAuthProvider#oauth_provider.ssl_options of + undefined -> + []; + _ -> + [ {peer_verification, proplists:get_value(verify, OAuthProvider#oauth_provider.ssl_options) }, + {cacertfile, proplists:get_value(cacertfile, OAuthProvider#oauth_provider.ssl_options) } ] + end, + application:set_env(rabbitmq_auth_backend_oauth2, key_config, KeyConfig). + +configure_minimum_oauth_provider_settings(Config) -> + OAuthProvider = ?config(oauth_provider_with_issuer, Config), + OAuthProviders = #{ ?config(oauth_provider_id, Config) => oauth_provider_to_proplist(OAuthProvider) }, + application:set_env(rabbitmq_auth_backend_oauth2, oauth_providers, OAuthProviders), + application:set_env(rabbitmq_auth_backend_oauth2, issuer, OAuthProvider#oauth_provider.issuer), + KeyConfig = + case OAuthProvider#oauth_provider.ssl_options of + undefined -> + []; + _ -> + [ {peer_verification, proplists:get_value(verify, OAuthProvider#oauth_provider.ssl_options) }, + {cacertfile, proplists:get_value(cacertfile, OAuthProvider#oauth_provider.ssl_options) } ] + end, + application:set_env(rabbitmq_auth_backend_oauth2, key_config, KeyConfig). + +init_per_testcase(TestCase, Config) -> + application:set_env(rabbitmq_auth_backend_oauth2, use_global_locks, false), + + case ?config(with_all_oauth_provider_settings, Config) of + false -> configure_minimum_oauth_provider_settings(Config); + true -> configure_all_oauth_provider_settings(Config); + undefined -> configure_all_oauth_provider_settings(Config) + end, + + HttpOauthServerExpectations = get_http_oauth_server_expectations(TestCase, Config), + ListOfExpectations = maps:values(proplists:to_map(HttpOauthServerExpectations)), + + case ?config(group, Config) of + http_up -> + start_http_oauth_server(?AUTH_PORT, ListOfExpectations); + https -> + start_https_oauth_server(?AUTH_PORT, ?config(rmq_certsdir, Config), ListOfExpectations); + _ -> + ok + end, + [{oauth_server_expectations, HttpOauthServerExpectations} | Config ]. + +end_per_testcase(_, Config) -> + application:unset_env(rabbitmq_auth_backend_oauth2, oauth_providers), + application:unset_env(rabbitmq_auth_backend_oauth2, issuer), + application:unset_env(rabbitmq_auth_backend_oauth2, token_endpoint), + application:unset_env(rabbitmq_auth_backend_oauth2, authorization_endpoint), + application:unset_env(rabbitmq_auth_backend_oauth2, end_session_endpoint), + application:unset_env(rabbitmq_auth_backend_oauth2, key_config), + case ?config(group, Config) of + http_up -> + stop_http_auth_server(); + https -> + stop_http_auth_server(); + _ -> + ok + end, + Config. + +end_per_group(https_and_rabbitmq_node, Config) -> + rabbit_ct_helpers:run_steps(Config, rabbit_ct_broker_helpers:teardown_steps()); + +end_per_group(_, Config) -> + Config. + +grants_access_token_dynamically_resolving_oauth_provider(Config) -> + #{request := #{parameters := Parameters}, + response := [ {code, 200}, {content_type, _CT}, {payload, JsonPayload}] } = lookup_expectation(token_endpoint, Config), + + {ok, #successful_access_token_response{access_token = AccessToken, token_type = TokenType} } = + oauth2_client:get_access_token(?config(oauth_provider_id, Config), build_access_token_request(Parameters)), + + ?assertEqual(proplists:get_value(token_type, JsonPayload), TokenType), + ?assertEqual(proplists:get_value(access_token, JsonPayload), AccessToken). + +grants_access_token(Config) -> + #{request := #{parameters := Parameters}, + response := [ {code, 200}, {content_type, _CT}, {payload, JsonPayload}] } + = lookup_expectation(token_endpoint, Config), + + {ok, #successful_access_token_response{access_token = AccessToken, token_type = TokenType} } = + oauth2_client:get_access_token(?config(oauth_provider, Config), build_access_token_request(Parameters)), + ?assertEqual(proplists:get_value(token_type, JsonPayload), TokenType), + ?assertEqual(proplists:get_value(access_token, JsonPayload), AccessToken). + +grants_refresh_token(Config) -> + #{request := #{parameters := Parameters}, + response := [ {code, 200}, {content_type, _CT}, {payload, JsonPayload}] } + = lookup_expectation(token_endpoint, Config), + + {ok, #successful_access_token_response{access_token = AccessToken, token_type = TokenType} } = + oauth2_client:refresh_access_token(?config(oauth_provider, Config), build_refresh_token_request(Parameters)), + ?assertEqual(proplists:get_value(token_type, JsonPayload), TokenType), + ?assertEqual(proplists:get_value(access_token, JsonPayload), AccessToken). + +denies_access_token(Config) -> + #{request := #{parameters := Parameters}, + response := [ {code, 400}, {content_type, _CT}, {payload, JsonPayload}] } + = lookup_expectation(token_endpoint, Config), + {error, #unsuccessful_access_token_response{error = Error, error_description = ErrorDescription} } = + oauth2_client:get_access_token(?config(oauth_provider, Config),build_access_token_request(Parameters)), + ?assertEqual(proplists:get_value(error, JsonPayload), Error), + ?assertEqual(proplists:get_value(error_description, JsonPayload), ErrorDescription). + +auth_server_error(Config) -> + #{request := #{parameters := Parameters}, + response := [ {code, 500} ] } = lookup_expectation(token_endpoint, Config), + {error, "Internal Server Error"} = + oauth2_client:get_access_token(?config(oauth_provider, Config), build_access_token_request(Parameters)). + +non_json_payload(Config) -> + #{request := #{parameters := Parameters}} = lookup_expectation(token_endpoint, Config), + {error, {failed_to_decode_json, _ErrorArgs}} = + oauth2_client:get_access_token(?config(oauth_provider, Config), build_access_token_request(Parameters)). + +connection_error(Config) -> + #{request := #{parameters := Parameters}} = lookup_expectation(token_endpoint, Config), + {error, {failed_connect, _ErrorArgs} } = oauth2_client:get_access_token( + ?config(oauth_provider, Config), build_access_token_request(Parameters)). + + +ssl_connection_error(Config) -> + #{request := #{parameters := Parameters}} = lookup_expectation(token_endpoint, Config), + + {error, {failed_connect, _} } = oauth2_client:get_access_token( + ?config(oauth_provider_with_wrong_ca, Config), build_access_token_request(Parameters)). + +get_oauth_provider(Config) -> + #{response := [ {code, 200}, {content_type, _CT}, {payload, JsonPayload}] } + = lookup_expectation(get_openid_configuration, Config), + + {ok, #oauth_provider{issuer = Issuer, token_endpoint = TokenEndPoint, jwks_uri = Jwks_uri}} = + oauth2_client:get_oauth_provider([issuer, token_endpoint, jwks_uri]), + + ?assertEqual(proplists:get_value(issuer, JsonPayload), Issuer), + ?assertEqual(proplists:get_value(token_endpoint, JsonPayload), TokenEndPoint), + ?assertEqual(proplists:get_value(jwks_uri, JsonPayload), Jwks_uri). + +get_oauth_provider_given_oauth_provider_id(Config) -> + #{response := [ {code, 200}, {content_type, _CT}, {payload, JsonPayload}] } + = lookup_expectation(get_openid_configuration, Config), + + ct:log("get_oauth_provider ~p", [?config(oauth_provider_id, Config)]), + {ok, #oauth_provider{ + issuer = Issuer, + token_endpoint = TokenEndPoint, + authorization_endpoint = AuthorizationEndpoint, + end_session_endpoint = EndSessionEndpoint, + jwks_uri = Jwks_uri}} = + oauth2_client:get_oauth_provider(?config(oauth_provider_id, Config), + [issuer, token_endpoint, jwks_uri, authorization_endpoint, end_session_endpoint]), + + ?assertEqual(proplists:get_value(issuer, JsonPayload), Issuer), + ?assertEqual(proplists:get_value(token_endpoint, JsonPayload), TokenEndPoint), + ?assertEqual(proplists:get_value(authorization_endpoint, JsonPayload), AuthorizationEndpoint), + ?assertEqual(proplists:get_value(end_session_endpoint, JsonPayload), EndSessionEndpoint), + ?assertEqual(proplists:get_value(jwks_uri, JsonPayload), Jwks_uri). + + +%%% HELPERS +build_issuer(Scheme) -> + uri_string:recompose(#{scheme => Scheme, + host => "localhost", + port => rabbit_data_coercion:to_integer(?AUTH_PORT), + path => ""}). + +build_token_endpoint_uri(Scheme) -> + uri_string:recompose(#{scheme => Scheme, + host => "localhost", + port => rabbit_data_coercion:to_integer(?AUTH_PORT), + path => "/token"}). + +build_jwks_uri(Scheme) -> + uri_string:recompose(#{scheme => Scheme, + host => "localhost", + port => rabbit_data_coercion:to_integer(?AUTH_PORT), + path => "/certs"}). + +build_access_token_request(Request) -> + #access_token_request { + client_id = proplists:get_value(?REQUEST_CLIENT_ID, Request), + client_secret = proplists:get_value(?REQUEST_CLIENT_SECRET, Request) + }. +build_refresh_token_request(Request) -> + #refresh_token_request{ + client_id = proplists:get_value(?REQUEST_CLIENT_ID, Request), + client_secret = proplists:get_value(?REQUEST_CLIENT_SECRET, Request), + refresh_token = proplists:get_value(?REQUEST_REFRESH_TOKEN, Request) + }. +build_http_oauth_provider() -> + #oauth_provider { + issuer = build_issuer("http"), + token_endpoint = build_token_endpoint_uri("http"), + jwks_uri = build_jwks_uri("http") + }. +keep_only_issuer_and_ssl_options(OauthProvider) -> + #oauth_provider { + issuer = OauthProvider#oauth_provider.issuer, + ssl_options = OauthProvider#oauth_provider.ssl_options + }. +build_https_oauth_provider(CaCertFile) -> + #oauth_provider { + issuer = build_issuer("https"), + token_endpoint = build_token_endpoint_uri("https"), + jwks_uri = build_jwks_uri("https"), + ssl_options = ssl_options(verify_peer, false, CaCertFile) + }. +oauth_provider_to_proplist(#oauth_provider{ issuer = Issuer, token_endpoint = TokenEndpoint, + ssl_options = SslOptions, jwks_uri = Jwks_url}) -> + [ { issuer, Issuer}, + {token_endpoint, TokenEndpoint}, + { https, + case SslOptions of + undefined -> []; + Value -> Value + end}, + {jwks_url, Jwks_url} ]. + +start_http_oauth_server(Port, Expectations) when is_list(Expectations) -> + Dispatch = cowboy_router:compile([ + {'_', [{Path, oauth_http_mock, Expected} || #{request := #{path := Path}} = Expected <- Expectations ]} + ]), + ct:log("start_http_oauth_server with expectation list : ~p -> dispatch: ~p", [Expectations, Dispatch]), + {ok, _} = cowboy:start_clear(mock_http_auth_listener,[ {port, Port} ], + #{env => #{dispatch => Dispatch}}); + +start_http_oauth_server(Port, #{request := #{path := Path}} = Expected) -> + Dispatch = cowboy_router:compile([ + {'_', [{Path, oauth_http_mock, Expected}]} + ]), + ct:log("start_http_oauth_server with expectation : ~p -> dispatch: ~p ", [Expected, Dispatch]), + {ok, _} = cowboy:start_clear( + mock_http_auth_listener, + [{port, Port} + ], + #{env => #{dispatch => Dispatch}}). + + +start_https_oauth_server(Port, CertsDir, Expectations) when is_list(Expectations) -> + Dispatch = cowboy_router:compile([ + {'_', [{Path, oauth_http_mock, Expected} || #{request := #{path := Path}} = Expected <- Expectations ]} + ]), + ct:log("start_https_oauth_server with expectation list : ~p -> dispatch: ~p", [Expectations, Expectations]), + {ok, _} = cowboy:start_tls( + mock_http_auth_listener, + [{port, Port}, + {certfile, filename:join([CertsDir, "server", "cert.pem"])}, + {keyfile, filename:join([CertsDir, "server", "key.pem"])} + ], + #{env => #{dispatch => Dispatch}}); + +start_https_oauth_server(Port, CertsDir, #{request := #{path := Path}} = Expected) -> + Dispatch = cowboy_router:compile([{'_', [{Path, oauth_http_mock, Expected}]}]), + ct:log("start_https_oauth_server with expectation : ~p -> dispatch: ~p", [Expected, Dispatch]), + {ok, _} = cowboy:start_tls( + mock_http_auth_listener, + [{port, Port}, + {certfile, filename:join([CertsDir, "server", "cert.pem"])}, + {keyfile, filename:join([CertsDir, "server", "key.pem"])} + ], + #{env => #{dispatch => Dispatch}}). + +stop_http_auth_server() -> + cowboy:stop_listener(mock_http_auth_listener). + +-spec ssl_options(ssl:verify_type(), boolean(), file:filename()) -> list(). +ssl_options(PeerVerification, FailIfNoPeerCert, CaCertFile) -> + [{verify, PeerVerification}, + {depth, 10}, + {fail_if_no_peer_cert, FailIfNoPeerCert}, + {crl_check, false}, + {crl_cache, {ssl_crl_cache, {internal, [{http, 10000}]}}}, + {cacertfile, CaCertFile}]. diff --git a/deps/oauth2_client/test/unit_SUITE.erl b/deps/oauth2_client/test/unit_SUITE.erl new file mode 100644 index 000000000000..0ffa6304ad14 --- /dev/null +++ b/deps/oauth2_client/test/unit_SUITE.erl @@ -0,0 +1,147 @@ +%% This Source Code Form is subject to the terms of the Mozilla Public +%% License, v. 2.0. If a copy of the MPL was not distributed with this +%% file, You can obtain one at https://mozilla.org/MPL/2.0/. +%% +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. +%% + +-module(unit_SUITE). + +-include_lib("common_test/include/ct.hrl"). +-include_lib("eunit/include/eunit.hrl"). + +-include_lib("oauth2_client.hrl"). +-include_lib("public_key/include/public_key.hrl"). + +-compile(export_all). + +-define(UTIL_MOD, oauth2_client_test_util). + +all() -> +[ + {group, ssl_options}, + {group, get_expiration_time} +]. + +groups() -> +[ + {ssl_options, [], [ + no_ssl_options_triggers_verify_peer, + choose_verify_over_peer_verification, + verify_set_to_verify_none, + peer_verification_set_to_verify_none, + peer_verification_set_to_verify_peer_with_cacertfile, + verify_set_to_verify_peer_with_cacertfile + ]}, + {get_expiration_time, [], [ + access_token_response_without_expiration_time, + access_token_response_with_expires_in, + access_token_response_with_exp_in_access_token + ]} +]. + +no_ssl_options_triggers_verify_peer(_) -> + ?assertMatch([ + {verify, verify_peer}, + {depth, 10}, + {crl_check,false}, + {fail_if_no_peer_cert,false}, + {cacerts, _CaCerts} + ], oauth2_client:extract_ssl_options_as_list(#{})). + +choose_verify_over_peer_verification(_) -> + Expected1 = [ + {verify, verify_none} + ], + ?assertEqual(Expected1, oauth2_client:extract_ssl_options_as_list( + #{verify => verify_none, peer_verification => verify_peer })). + +verify_set_to_verify_none(_) -> + Expected1 = [ + {verify, verify_none} + ], + ?assertEqual(Expected1, oauth2_client:extract_ssl_options_as_list(#{verify => verify_none})), + + Expected2 = [ + {verify, verify_none} + ], + ?assertEqual(Expected2, oauth2_client:extract_ssl_options_as_list(#{ + verify => verify_none, + cacertfile => "/tmp" + })). + + +peer_verification_set_to_verify_none(_) -> + Expected1 = [ + {verify, verify_none} + ], + ?assertEqual(Expected1, oauth2_client:extract_ssl_options_as_list(#{peer_verification => verify_none})), + + Expected2 = [ + {verify, verify_none} + ], + ?assertEqual(Expected2, oauth2_client:extract_ssl_options_as_list(#{ + peer_verification => verify_none, + cacertfile => "/tmp" + })). + + +peer_verification_set_to_verify_peer_with_cacertfile(_) -> + Expected = [ + {verify, verify_peer}, + {depth, 10}, + {crl_check,false}, + {fail_if_no_peer_cert,false}, + {cacertfile, "/tmp"} + ], + ?assertEqual(Expected, oauth2_client:extract_ssl_options_as_list(#{ + cacertfile => "/tmp", + peer_verification => verify_peer + })). + + +verify_set_to_verify_peer_with_cacertfile(_) -> + Expected = [ + {verify, verify_peer}, + {depth, 10}, + {crl_check,false}, + {fail_if_no_peer_cert,false}, + {cacertfile, "/tmp"} + ], + ?assertEqual(Expected, oauth2_client:extract_ssl_options_as_list(#{ + cacertfile => "/tmp", + verify => verify_peer + })). + +access_token_response_with_expires_in(_) -> + Jwk = ?UTIL_MOD:fixture_jwk(), + ExpiresIn = os:system_time(seconds), + AccessToken = ?UTIL_MOD:expirable_token_with_expiration_time(ExpiresIn), + {_, EncodedToken} = ?UTIL_MOD:sign_token_hs(AccessToken, Jwk), + AccessTokenResponse = #successful_access_token_response{ + access_token = EncodedToken, + expires_in = ExpiresIn + }, + ?assertEqual({ok, [{expires_in, ExpiresIn}]}, oauth2_client:get_expiration_time(AccessTokenResponse)). + +access_token_response_with_exp_in_access_token(_) -> + Jwk = ?UTIL_MOD:fixture_jwk(), + ExpiresIn = os:system_time(seconds), + AccessToken = ?UTIL_MOD:expirable_token_with_expiration_time(ExpiresIn), + {_, EncodedToken} = ?UTIL_MOD:sign_token_hs(AccessToken, Jwk), + AccessTokenResponse = #successful_access_token_response{ + access_token = EncodedToken + }, + ?assertEqual({ok, [{exp, ExpiresIn}]}, oauth2_client:get_expiration_time(AccessTokenResponse)). + +access_token_response_without_expiration_time(_) -> + Jwk = ?UTIL_MOD:fixture_jwk(), + AccessToken = maps:remove(<<"exp">>, ?UTIL_MOD:fixture_token()), + ct:log("AccesToken ~p", [AccessToken]), + {_, EncodedToken} = ?UTIL_MOD:sign_token_hs(AccessToken, Jwk), + AccessTokenResponse = #successful_access_token_response{ + access_token = EncodedToken + }, + ct:log("AccessTokenResponse ~p", [AccessTokenResponse]), + ?assertEqual({error, missing_exp_field}, oauth2_client:get_expiration_time(AccessTokenResponse)). + diff --git a/deps/rabbit/.gitignore b/deps/rabbit/.gitignore index dc870136e8ec..7f6246dc7b9e 100644 --- a/deps/rabbit/.gitignore +++ b/deps/rabbit/.gitignore @@ -1,42 +1,8 @@ -*~ -.sw? -.*.sw? -*.beam -*.coverdata -MnesiaCore.* -/.erlang.mk/ -/cover/ /debug/ -/deps/ -/debug/ -/doc/ -/ebin/ -/escript/ -/escript.lock /etc/ -/logs/ -/plugins/ -/plugins.lock -/test/ct.cover.spec /test/config_schema_SUITE_data/schema/** -/xrefr -/sbin/ -/sbin.lock -rabbit.d - -# Generated documentation. -docs/*.html - -# Dialyzer -*.plt - -# Tracing tools -*-ttb -*.ti -*.lz4* -callgrind.out* -callgraph.dot* - -PACKAGES/* rabbit-rabbitmq-deps.mk + +[Bb]in/ +[Oo]bj/ diff --git a/deps/rabbit/BUILD.bazel b/deps/rabbit/BUILD.bazel index 0134351bfda2..c91cd890ff2c 100644 --- a/deps/rabbit/BUILD.bazel +++ b/deps/rabbit/BUILD.bazel @@ -5,7 +5,6 @@ load("//:rabbitmq_home.bzl", "rabbitmq_home") load("//:rabbitmq_run.bzl", "rabbitmq_run") load( "//:rabbitmq.bzl", - "ENABLE_FEATURE_MAYBE_EXPR", "RABBITMQ_DIALYZER_OPTS", "assert_suites", "rabbitmq_app", @@ -40,7 +39,6 @@ _APP_ENV = """[ {vm_memory_calculation_strategy, rss}, {memory_monitor_interval, 2500}, {disk_free_limit, 50000000}, %% 50MB - {msg_store_index_module, rabbit_msg_store_ets_index}, {backing_queue_module, rabbit_variable_queue}, %% 0 ("no limit") would make a better default, but that %% breaks the QPid Java client @@ -67,7 +65,9 @@ _APP_ENV = """[ {collect_statistics_interval, 5000}, {mnesia_table_loading_retry_timeout, 30000}, {mnesia_table_loading_retry_limit, 10}, - {auth_mechanisms, ['PLAIN', 'AMQPLAIN']}, + {anonymous_login_user, <<"guest">>}, + {anonymous_login_pass, <<"guest">>}, + {auth_mechanisms, ['PLAIN', 'AMQPLAIN', 'ANONYMOUS']}, {auth_backends, [rabbit_auth_backend_internal]}, {delegate_count, 16}, {trace_vhosts, []}, @@ -88,9 +88,6 @@ _APP_ENV = """[ {ssl_apps, [asn1, crypto, public_key, ssl]}, %% classic queue storage implementation version {classic_queue_default_version, 2}, - %% see rabbitmq-server#114 - {mirroring_flow_control, true}, - {mirroring_sync_batch_size, 4096}, %% see rabbitmq-server#227 and related tickets. %% msg_store_credit_disc_bound only takes effect when %% messages are persisted to the message store. If messages @@ -117,9 +114,6 @@ _APP_ENV = """[ {cluster_nodes, {[], disc}}, {config_entry_decoder, [{passphrase, undefined}]}, - - %% rabbitmq-server#973 - {queue_explicit_gc_run_operation_threshold, 1000}, {background_gc_enabled, false}, {background_gc_target_interval, 60000}, %% rabbitmq-server#589 @@ -134,8 +128,8 @@ _APP_ENV = """[ {default_consumer_prefetch, {false, 0}}, %% interval at which the channel can perform periodic actions {channel_tick_interval, 60000}, - %% Default max message size is 128 MB - {max_message_size, 134217728}, + %% Default max message size is 16 MB + {max_message_size, 16777216}, %% Socket writer will run GC every 1 GB of outgoing data {writer_gc_threshold, 1000000000}, %% interval at which connection/channel tracking executes post operations @@ -147,7 +141,11 @@ _APP_ENV = """[ {dead_letter_worker_publisher_confirm_timeout, 180000}, %% EOL date for the current release series, if known/announced - {release_series_eol_date, none} + {release_series_eol_date, none}, + + {vhost_process_reconciliation_run_interval, 30}, + %% for testing + {vhost_process_reconciliation_enabled, true} ] """ @@ -213,9 +211,12 @@ rabbitmq_app( priv = [":priv"], deps = [ "//deps/amqp10_common:erlang_app", - "//deps/rabbit/apps/rabbitmq_prelaunch:erlang_app", "//deps/rabbit_common:erlang_app", + "//deps/rabbitmq_prelaunch:erlang_app", "@cuttlefish//:erlang_app", + "@gen_batch_server//:erlang_app", + "@khepri//:erlang_app", + "@khepri_mnesia_migration//:erlang_app", "@observer_cli//:erlang_app", "@osiris//:erlang_app", "@ra//:erlang_app", @@ -232,6 +233,9 @@ rabbitmq_app( xref( name = "xref", + additional_libs = [ + "//deps/rabbitmq_cli:erlang_app", # keep + ], target = ":erlang_app", ) @@ -243,9 +247,10 @@ plt( ], for_target = ":erlang_app", ignore_warnings = True, + libs = ["@rules_elixir//elixir"], # keep plt = "//:base_plt", deps = [ - "@looking_glass//:erlang_app", # keep + "//deps/rabbitmq_cli:erlang_app", # keep ], ) @@ -271,6 +276,7 @@ rabbitmq_home( plugins = [ ":test_erlang_app", "//deps/rabbitmq_ct_client_helpers:erlang_app", + "//deps/rabbitmq_amqp1_0:erlang_app", "@inet_tcp_proxy_dist//:erlang_app", "@meck//:erlang_app", ], @@ -312,30 +318,37 @@ rabbitmq_integration_suite( rabbitmq_integration_suite( name = "classic_queue_prop_SUITE", size = "large", - shard_count = 6, - sharding_method = "case", deps = [ "@proper//:erlang_app", ], ) rabbitmq_integration_suite( - name = "cluster_rename_SUITE", - size = "large", - flaky = True, - shard_count = 2, + name = "cluster_SUITE", + size = "medium", ) rabbitmq_integration_suite( - name = "cluster_SUITE", + name = "clustering_events_SUITE", size = "medium", + additional_beam = [ + ":test_event_recorder_beam", + ], ) rabbitmq_integration_suite( name = "quorum_queue_member_reconciliation_SUITE", size = "medium", additional_beam = [ - ":test_quorum_queue_utils_beam", + ":test_queue_utils_beam", + ], +) + +rabbitmq_integration_suite( + name = "cluster_limit_SUITE", + size = "medium", + additional_beam = [ + ":test_queue_utils_beam", ], ) @@ -345,15 +358,17 @@ rabbitmq_integration_suite( additional_beam = [ ":test_clustering_utils_beam", ], - flaky = True, - shard_count = 18, + shard_count = 45, sharding_method = "case", ) rabbitmq_integration_suite( name = "clustering_recovery_SUITE", size = "medium", - shard_count = 2, + additional_beam = [ + ":test_clustering_utils_beam", + ], + shard_count = 8, sharding_method = "case", ) @@ -374,7 +389,7 @@ rabbitmq_integration_suite( name = "consumer_timeout_SUITE", size = "medium", additional_beam = [ - ":test_quorum_queue_utils_beam", + ":test_queue_utils_beam", ], ) @@ -387,9 +402,19 @@ rabbitmq_integration_suite( name = "dead_lettering_SUITE", size = "large", additional_beam = [ - ":test_quorum_queue_utils_beam", + ":test_queue_utils_beam", ], - shard_count = 7, + shard_count = 6, +) + +rabbitmq_integration_suite( + name = "amqpl_consumer_ack_SUITE", +) + +rabbitmq_integration_suite( + name = "message_containers_deaths_v2_SUITE", + size = "medium", + shard_count = 1, ) rabbitmq_integration_suite( @@ -415,41 +440,17 @@ rabbitmq_integration_suite( size = "medium", ) -rabbitmq_integration_suite( - name = "dynamic_ha_SUITE", - size = "large", - flaky = True, - shard_count = 20, - sharding_method = "case", - deps = [ - "@proper//:erlang_app", - ], -) - rabbitmq_integration_suite( name = "dynamic_qq_SUITE", size = "large", additional_beam = [ - ":test_quorum_queue_utils_beam", + ":test_queue_utils_beam", ], - flaky = True, deps = [ "@proper//:erlang_app", ], ) -rabbitmq_integration_suite( - name = "eager_sync_SUITE", - size = "large", - additional_beam = [ - ":sync_detection_SUITE_beam_files", - ], - flaky = True, - shard_count = 5, - sharding_method = "case", - tags = ["classic-queue"], -) - rabbitmq_integration_suite( name = "feature_flags_SUITE", size = "large", @@ -513,16 +514,7 @@ rabbitmq_integration_suite( name = "maintenance_mode_SUITE", size = "medium", additional_beam = [ - ":test_quorum_queue_utils_beam", - ], -) - -rabbitmq_integration_suite( - name = "many_node_ha_SUITE", - size = "medium", - additional_beam = [ - ":test_rabbit_ha_test_consumer_beam", - ":test_rabbit_ha_test_producer_beam", + ":test_queue_utils_beam", ], ) @@ -541,11 +533,6 @@ rabbitmq_integration_suite( size = "medium", ) -rabbitmq_integration_suite( - name = "message_containers_SUITE", - size = "medium", -) - rabbitmq_integration_suite( name = "metrics_SUITE", size = "medium", @@ -573,7 +560,6 @@ rabbitmq_suite( rabbitmq_integration_suite( name = "peer_discovery_classic_config_SUITE", size = "large", - flaky = True, ) rabbitmq_integration_suite( @@ -581,6 +567,11 @@ rabbitmq_integration_suite( size = "medium", ) +rabbitmq_integration_suite( + name = "peer_discovery_tmp_hidden_node_SUITE", + size = "large", +) + rabbitmq_integration_suite( name = "per_user_connection_channel_limit_partitions_SUITE", size = "large", @@ -589,11 +580,13 @@ rabbitmq_integration_suite( rabbitmq_integration_suite( name = "per_user_connection_channel_limit_SUITE", size = "medium", + shard_count = 4, ) rabbitmq_integration_suite( name = "per_user_connection_channel_tracking_SUITE", size = "medium", + shard_count = 4, ) rabbitmq_integration_suite( @@ -609,6 +602,7 @@ rabbitmq_integration_suite( rabbitmq_integration_suite( name = "per_vhost_connection_limit_SUITE", size = "medium", + shard_count = 5, ) rabbitmq_integration_suite( @@ -650,7 +644,7 @@ rabbitmq_integration_suite( name = "publisher_confirms_parallel_SUITE", size = "medium", additional_beam = [ - ":test_quorum_queue_utils_beam", + ":test_queue_utils_beam", ], ) @@ -659,36 +653,38 @@ rabbitmq_integration_suite( size = "medium", ) -rabbitmq_integration_suite( - name = "queue_master_location_SUITE", - size = "large", - shard_count = 2, -) - rabbitmq_integration_suite( name = "queue_parallel_SUITE", size = "large", additional_beam = [ - ":test_quorum_queue_utils_beam", + ":test_queue_utils_beam", ], - shard_count = 6, + shard_count = 3, ) rabbitmq_integration_suite( name = "queue_type_SUITE", size = "medium", + additional_beam = [ + ":test_queue_utils_beam", + ], ) rabbitmq_integration_suite( name = "quorum_queue_SUITE", size = "large", additional_beam = [ - ":test_quorum_queue_utils_beam", + ":test_queue_utils_beam", + ":test_clustering_utils_beam", ], - flaky = True, shard_count = 6, ) +rabbitmq_integration_suite( + name = "classic_queue_SUITE", + size = "medium", +) + rabbitmq_suite( name = "rabbit_confirms_SUITE", size = "small", @@ -709,8 +705,12 @@ rabbitmq_suite( rabbitmq_suite( name = "rabbit_fifo_int_SUITE", size = "medium", + additional_beam = [ + ":test_test_util_beam", + ], deps = [ "//deps/rabbit_common:erlang_app", + "//deps/rabbitmq_ct_helpers:erlang_app", "@aten//:erlang_app", "@gen_batch_server//:erlang_app", "@meck//:erlang_app", @@ -726,6 +726,7 @@ rabbitmq_suite( ], deps = [ "//deps/rabbit_common:erlang_app", + "@meck//:erlang_app", "@proper//:erlang_app", "@ra//:erlang_app", ], @@ -739,12 +740,21 @@ rabbitmq_suite( ], ) +rabbitmq_suite( + name = "rabbit_fifo_q_SUITE", + size = "small", + deps = [ + "//deps/rabbit_common:erlang_app", + "@proper//:erlang_app", + ], +) + rabbitmq_integration_suite( name = "rabbit_fifo_dlx_integration_SUITE", size = "medium", additional_beam = [ ":test_test_util_beam", - ":test_quorum_queue_utils_beam", + ":test_queue_utils_beam", ":quorum_queue_SUITE_beam_files", ], deps = [ @@ -781,8 +791,8 @@ rabbitmq_suite( ) rabbitmq_suite( - name = "rabbit_msg_record_SUITE", - size = "medium", + name = "mc_unit_SUITE", + size = "small", deps = [ "//deps/amqp10_common:erlang_app", "//deps/rabbit_common:erlang_app", @@ -790,23 +800,24 @@ rabbitmq_suite( ) rabbitmq_suite( - name = "mc_SUITE", - size = "small", + name = "rabbit_stream_coordinator_SUITE", deps = [ - "//deps/amqp10_common:erlang_app", "//deps/rabbit_common:erlang_app", ], ) rabbitmq_suite( - name = "rabbit_stream_coordinator_SUITE", + name = "rabbit_stream_sac_coordinator_SUITE", + runtime_deps = [ + "@meck//:erlang_app", + ], deps = [ "//deps/rabbit_common:erlang_app", ], ) rabbitmq_suite( - name = "rabbit_stream_sac_coordinator_SUITE", + name = "rabbit_access_control_SUITE", runtime_deps = [ "@meck//:erlang_app", ], @@ -819,9 +830,9 @@ rabbitmq_integration_suite( name = "rabbit_stream_queue_SUITE", size = "large", additional_beam = [ - ":test_quorum_queue_utils_beam", + ":test_queue_utils_beam", ], - shard_count = 21, + shard_count = 19, deps = [ "@proper//:erlang_app", ], @@ -840,7 +851,6 @@ rabbitmq_integration_suite( rabbitmq_integration_suite( name = "rabbitmqctl_integration_SUITE", size = "medium", - flaky = True, ) rabbitmq_integration_suite( @@ -853,29 +863,14 @@ rabbitmq_integration_suite( size = "medium", ) -rabbitmq_integration_suite( - name = "simple_ha_SUITE", - size = "large", - additional_beam = [ - ":test_rabbit_ha_test_consumer_beam", - ":test_rabbit_ha_test_producer_beam", - ], - shard_count = 4, -) - rabbitmq_integration_suite( name = "single_active_consumer_SUITE", size = "medium", additional_beam = [ - ":test_quorum_queue_utils_beam", + ":test_queue_utils_beam", ], ) -rabbitmq_integration_suite( - name = "sync_detection_SUITE", - size = "medium", -) - rabbitmq_integration_suite( name = "term_to_binary_compat_prop_SUITE", deps = [ @@ -888,6 +883,11 @@ rabbitmq_integration_suite( size = "medium", ) +rabbitmq_integration_suite( + name = "transactions_SUITE", + size = "medium", +) + rabbitmq_integration_suite( name = "unit_access_control_authn_authz_context_propagation_SUITE", size = "medium", @@ -926,6 +926,11 @@ rabbitmq_integration_suite( size = "medium", ) +rabbitmq_suite( + name = "unit_quorum_queue_SUITE", + size = "medium", +) + rabbitmq_integration_suite( name = "unit_app_management_SUITE", size = "medium", @@ -939,6 +944,14 @@ rabbitmq_suite( ], ) +rabbitmq_suite( + name = "unit_cluster_formation_sort_nodes_SUITE", + size = "small", + deps = [ + "@meck//:erlang_app", + ], +) + rabbitmq_suite( name = "unit_collections_SUITE", size = "small", @@ -951,8 +964,8 @@ rabbitmq_suite( name = "unit_config_value_encryption_SUITE", size = "medium", deps = [ - "//deps/rabbit/apps/rabbitmq_prelaunch:test_erlang_app", "//deps/rabbit_common:test_erlang_app", + "//deps/rabbitmq_prelaunch:test_erlang_app", "@credentials_obfuscation//:erlang_app", ], ) @@ -982,15 +995,6 @@ rabbitmq_integration_suite( size = "medium", ) -rabbitmq_suite( - name = "unit_gm_SUITE", - size = "small", - deps = [ - "//deps/rabbitmq_ct_helpers:erlang_app", - "@meck//:erlang_app", - ], -) - rabbitmq_integration_suite( name = "unit_log_management_SUITE", size = "medium", @@ -1037,6 +1041,14 @@ rabbitmq_suite( size = "small", ) +rabbitmq_suite( + name = "unit_queue_location_SUITE", + size = "small", + deps = [ + "@meck//:erlang_app", + ], +) + rabbitmq_integration_suite( name = "unit_stats_and_metrics_SUITE", size = "medium", @@ -1071,27 +1083,19 @@ rabbitmq_integration_suite( ], ) -rabbitmq_suite( - name = "unit_classic_mirrored_queue_sync_throttling_SUITE", - size = "small", - deps = [ - "//deps/rabbit_common:erlang_app", - ], -) - -rabbitmq_suite( - name = "unit_classic_mirrored_queue_throughput_SUITE", - size = "small", - deps = [ - "//deps/rabbit_common:erlang_app", +rabbitmq_integration_suite( + name = "direct_exchange_routing_v2_SUITE", + size = "medium", + additional_beam = [ + ":test_queue_utils_beam", ], ) rabbitmq_integration_suite( - name = "direct_exchange_routing_v2_SUITE", - size = "medium", + name = "rabbit_local_random_exchange_SUITE", + size = "small", additional_beam = [ - ":test_quorum_queue_utils_beam", + ":test_queue_utils_beam", ], ) @@ -1112,7 +1116,7 @@ rabbitmq_integration_suite( name = "exchanges_SUITE", size = "medium", additional_beam = [ - ":test_quorum_queue_utils_beam", + ":test_queue_utils_beam", ], ) @@ -1164,11 +1168,114 @@ rabbitmq_integration_suite( ], ) +rabbitmq_integration_suite( + name = "metadata_store_clustering_SUITE", + size = "large", + shard_count = 18, + sharding_method = "case", +) + +rabbitmq_integration_suite( + name = "metadata_store_phase1_SUITE", + size = "small", + deps = [ + "@khepri//:erlang_app", + ], +) + +rabbitmq_integration_suite( + name = "metadata_store_migration_SUITE", + size = "small", +) + rabbitmq_integration_suite( name = "routing_SUITE", size = "large", ) +rabbitmq_integration_suite( + name = "cli_forget_cluster_node_SUITE", + size = "medium", + additional_beam = [ + ":test_clustering_utils_beam", + ], +) + +rabbitmq_integration_suite( + name = "cluster_minority_SUITE", + size = "medium", + additional_beam = [ + ":test_clustering_utils_beam", + ], +) + +rabbitmq_integration_suite( + name = "cluster_upgrade_SUITE", + size = "medium", + additional_beam = [ + ":test_queue_utils_beam", + ], +) + +rabbitmq_integration_suite( + name = "amqp_client_SUITE", + size = "large", + additional_beam = [ + ":test_event_recorder_beam", + ], + shard_count = 3, + runtime_deps = [ + "//deps/rabbitmq_amqp_client:erlang_app", + ], +) + +rabbitmq_integration_suite( + name = "amqp_proxy_protocol_SUITE", + size = "medium", +) + +rabbitmq_integration_suite( + name = "amqp_system_SUITE", + flaky = True, + shard_count = 2, + tags = [ + "dotnet", + ], + test_env = { + "TMPDIR": "$TEST_TMPDIR", + }, +) + +rabbitmq_integration_suite( + name = "amqp_auth_SUITE", + additional_beam = [ + ":test_event_recorder_beam", + ], + shard_count = 2, + runtime_deps = [ + "//deps/rabbitmq_amqp_client:erlang_app", + ], +) + +rabbitmq_integration_suite( + name = "amqp_address_SUITE", + shard_count = 2, + runtime_deps = [ + "//deps/rabbitmq_amqp_client:erlang_app", + ], +) + +rabbitmq_integration_suite( + name = "amqp_credit_api_v2_SUITE", + runtime_deps = [ + "//deps/rabbitmq_amqp_client:erlang_app", + ], +) + +rabbitmq_integration_suite( + name = "amqpl_direct_reply_to_SUITE", +) + assert_suites() filegroup( @@ -1256,16 +1363,17 @@ eunit( ":test_dummy_supervisor2_beam", ":test_failing_dummy_interceptor_beam", ":test_mirrored_supervisor_SUITE_gs_beam", - ":test_quorum_queue_utils_beam", + ":test_queue_utils_beam", ":test_rabbit_auth_backend_context_propagation_mock_beam", ":test_rabbit_dummy_protocol_connection_info_beam", ":test_rabbit_foo_protocol_connection_info_beam", - ":test_rabbit_ha_test_consumer_beam", - ":test_rabbit_ha_test_producer_beam", ":test_test_util_beam", ":test_test_rabbit_event_handler_beam", ":test_clustering_utils_beam", + ":test_event_recorder_beam", ], - erl_extra_args = [ENABLE_FEATURE_MAYBE_EXPR], target = ":test_erlang_app", + test_env = { + "COVERDATA_TO_LCOV_APPS_DIRS": "deps:deps/rabbit/apps", + }, ) diff --git a/deps/rabbit/Makefile b/deps/rabbit/Makefile index bb2fb67eb498..aa1c78bbac40 100644 --- a/deps/rabbit/Makefile +++ b/deps/rabbit/Makefile @@ -19,7 +19,6 @@ define PROJECT_ENV {vm_memory_calculation_strategy, rss}, {memory_monitor_interval, 2500}, {disk_free_limit, 50000000}, %% 50MB - {msg_store_index_module, rabbit_msg_store_ets_index}, {backing_queue_module, rabbit_variable_queue}, %% 0 ("no limit") would make a better default, but that %% breaks the QPid Java client @@ -46,7 +45,12 @@ define PROJECT_ENV {collect_statistics_interval, 5000}, {mnesia_table_loading_retry_timeout, 30000}, {mnesia_table_loading_retry_limit, 10}, - {auth_mechanisms, ['PLAIN', 'AMQPLAIN']}, + %% The identity to act as for anonymous logins. + {anonymous_login_user, <<"guest">>}, + {anonymous_login_pass, <<"guest">>}, + %% "The server mechanisms are ordered in decreasing level of preference." + %% AMQP §5.3.3.1 + {auth_mechanisms, ['PLAIN', 'AMQPLAIN', 'ANONYMOUS']}, {auth_backends, [rabbit_auth_backend_internal]}, {delegate_count, 16}, {trace_vhosts, []}, @@ -67,9 +71,6 @@ define PROJECT_ENV {ssl_apps, [asn1, crypto, public_key, ssl]}, %% classic queue storage implementation version {classic_queue_default_version, 2}, - %% see rabbitmq-server#114 - {mirroring_flow_control, true}, - {mirroring_sync_batch_size, 4096}, %% see rabbitmq-server#227 and related tickets. %% msg_store_credit_disc_bound only takes effect when %% messages are persisted to the message store. If messages @@ -99,9 +100,6 @@ define PROJECT_ENV {cluster_nodes, {[], disc}}, {config_entry_decoder, [{passphrase, undefined}]}, - - %% rabbitmq-server#973 - {queue_explicit_gc_run_operation_threshold, 1000}, {background_gc_enabled, false}, {background_gc_target_interval, 60000}, %% rabbitmq-server#589 @@ -116,42 +114,36 @@ define PROJECT_ENV {default_consumer_prefetch, {false, 0}}, %% interval at which the channel can perform periodic actions {channel_tick_interval, 60000}, - %% Default max message size is 128 MB - {max_message_size, 134217728}, + %% Default max message size is 16 MB + {max_message_size, 16777216}, %% Socket writer will run GC every 1 GB of outgoing data {writer_gc_threshold, 1000000000}, %% interval at which connection/channel tracking executes post operations {tracking_execution_timeout, 15000}, {stream_messages_soft_limit, 256}, - {track_auth_attempt_source, false}, - {credentials_obfuscation_fallback_secret, <<"nocookie">>}, - {dead_letter_worker_consumer_prefetch, 32}, - {dead_letter_worker_publisher_confirm_timeout, 180000}, - - %% EOL date for the current release series, if known/announced - {release_series_eol_date, none} + {track_auth_attempt_source, false}, + {credentials_obfuscation_fallback_secret, <<"nocookie">>}, + {dead_letter_worker_consumer_prefetch, 32}, + {dead_letter_worker_publisher_confirm_timeout, 180000}, + %% EOL date for the current release series, if known/announced + {release_series_eol_date, none}, + {vhost_process_reconciliation_run_interval, 30}, + %% for testing + {vhost_process_reconciliation_enabled, true} ] endef -# With Erlang.mk default behavior, the value of `$(APPS_DIR)` is always -# relative to the top-level executed Makefile. In our case, it could be -# a plugin for instance. However, the rabbitmq_prelaunch application is -# in this repository, not the plugin's. That's why we need to override -# this value here. -APPS_DIR := $(CURDIR)/apps - -LOCAL_DEPS = sasl rabbitmq_prelaunch os_mon inets compiler public_key crypto ssl syntax_tools xmerl +LOCAL_DEPS = sasl os_mon inets compiler public_key crypto ssl syntax_tools xmerl BUILD_DEPS = rabbitmq_cli -DEPS = ranch rabbit_common ra sysmon_handler stdout_formatter recon redbug observer_cli osiris amqp10_common syslog systemd seshat -TEST_DEPS = rabbitmq_ct_helpers rabbitmq_ct_client_helpers amqp_client meck proper +DEPS = ranch rabbit_common amqp10_common rabbitmq_prelaunch ra sysmon_handler stdout_formatter recon redbug observer_cli osiris syslog systemd seshat khepri khepri_mnesia_migration cuttlefish gen_batch_server +TEST_DEPS = rabbitmq_ct_helpers rabbitmq_ct_client_helpers meck proper amqp_client rabbitmq_amqp_client rabbitmq_amqp1_0 -PLT_APPS += mnesia +PLT_APPS += mnesia runtime_tools dep_syslog = git https://github.com/schlagert/syslog 4.0.0 -dep_osiris = git https://github.com/rabbitmq/osiris v1.6.4 +dep_osiris = git https://github.com/rabbitmq/osiris v1.8.2 dep_systemd = hex 0.6.1 -dep_seshat = hex 0.4.0 define usage_xml_to_erl $(subst __,_,$(patsubst $(DOCS_DIR)/rabbitmq%.1.xml, src/rabbit_%_usage.erl, $(subst -,_,$(1)))) @@ -160,6 +152,7 @@ endef DOCS_DIR = docs MANPAGES = $(wildcard $(DOCS_DIR)/*.[0-9]) WEB_MANPAGES = $(patsubst %,%.html,$(MANPAGES)) +MD_MANPAGES = $(patsubst %,%.md,$(MANPAGES)) DEP_EARLY_PLUGINS = rabbit_common/mk/rabbitmq-early-test.mk DEP_PLUGINS = rabbit_common/mk/rabbitmq-build.mk \ @@ -171,9 +164,6 @@ DEP_PLUGINS = rabbit_common/mk/rabbitmq-build.mk \ include ../../rabbitmq-components.mk include ../../erlang.mk -# See above why we mess with `$(APPS_DIR)`. -unexport APPS_DIR - ifeq ($(strip $(BATS)),) BATS := $(ERLANG_MK_TMP)/bats/bin/bats endif @@ -219,7 +209,6 @@ SLOW_CT_SUITES := backing_queue \ priority_queue \ priority_queue_recovery \ publisher_confirms_parallel \ - queue_master_location \ queue_parallel \ quorum_queue \ rabbit_core_metrics_gc \ @@ -255,21 +244,10 @@ ifdef CREDIT_FLOW_TRACING RMQ_ERLC_OPTS += -DCREDIT_FLOW_TRACING=true endif -ifdef DEBUG_FF -RMQ_ERLC_OPTS += -DDEBUG_QUORUM_QUEUE_FF=true -endif - ifdef TRACE_SUPERVISOR2 RMQ_ERLC_OPTS += -DTRACE_SUPERVISOR2=true endif -ifndef USE_PROPER_QC -# PropEr needs to be installed for property checking -# http://proper.softlab.ntua.gr/ -USE_PROPER_QC := $(shell $(ERL) -eval 'io:format({module, proper} =:= code:ensure_loaded(proper)), halt().') -RMQ_ERLC_OPTS += $(if $(filter true,$(USE_PROPER_QC)),-Duse_proper_qc) -endif - # -------------------------------------------------------------------- # Documentation. # -------------------------------------------------------------------- @@ -281,7 +259,7 @@ docs:: manpages web-manpages manpages: $(MANPAGES) @: -web-manpages: $(WEB_MANPAGES) +web-manpages: $(WEB_MANPAGES) $(MD_MANPAGES) @: # We use mandoc(1) to convert manpages to HTML plus an awk script which @@ -309,7 +287,12 @@ web-manpages: $(WEB_MANPAGES) } } \ ' > "$@" +%.md: % + $(gen_verbose) mandoc -T markdown -O 'fragment,man=%N.%S.md' "$<" | \ + sed -E -e 's/\{/\{/g' \ + > "$@" + distclean:: distclean-manpages distclean-manpages:: - $(gen_verbose) rm -f $(WEB_MANPAGES) + $(gen_verbose) rm -f $(WEB_MANPAGES) $(MD_MANPAGES) diff --git a/deps/rabbit/README.md b/deps/rabbit/README.md index a41294770b2e..3424377e3cad 100644 --- a/deps/rabbit/README.md +++ b/deps/rabbit/README.md @@ -1,6 +1,3 @@ -[![OTP v22.3](https://img.shields.io/github/workflow/status/rabbitmq/rabbitmq-server/Test%20-%20Erlang%2022.3/main?label=Erlang%2022.3)](https://github.com/rabbitmq/rabbitmq-server/actions?query=workflow%3A%22Test+-+Erlang+22.3%22+branch%3A%22main%22) -[![OTP v23](https://img.shields.io/github/workflow/status/rabbitmq/rabbitmq-server/Test%20-%20Erlang%2023.1/main?label=Erlang%2023.1)](https://github.com/rabbitmq/rabbitmq-server/actions?query=workflow%3A%22Test+-+Erlang+23.1%22+branch%3Amain) - # RabbitMQ Server [RabbitMQ](https://rabbitmq.com) is a [feature rich](https://rabbitmq.com/documentation.html), multi-protocol messaging broker. It supports: @@ -62,4 +59,4 @@ RabbitMQ server is [licensed under the MPL 2.0](LICENSE-MPL-RabbitMQ). ## Copyright -(c) 2007-2020 VMware, Inc. or its affiliates. +(c) 2007-2024 Broadcom. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. diff --git a/deps/rabbit/app.bzl b/deps/rabbit/app.bzl index 7432ed8694ae..3cb3ca4c2bc5 100644 --- a/deps/rabbit/app.bzl +++ b/deps/rabbit/app.bzl @@ -9,14 +9,11 @@ def all_beam_files(name = "all_beam_files"): erlang_bytecode( name = "behaviours", srcs = [ - "src/gm.erl", "src/mc.erl", "src/rabbit_backing_queue.erl", "src/rabbit_credential_validator.erl", "src/rabbit_exchange_type.erl", - "src/rabbit_mirror_queue_mode.erl", "src/rabbit_policy_merge_strategy.erl", - "src/rabbit_queue_master_locator.erl", "src/rabbit_queue_type.erl", "src/rabbit_tracking.erl", ], @@ -24,7 +21,7 @@ def all_beam_files(name = "all_beam_files"): app_name = "rabbit", dest = "ebin", erlc_opts = "//:erlc_opts", - deps = ["//deps/rabbit_common:erlang_app"], + deps = ["//deps/amqp10_common:erlang_app", "//deps/rabbit_common:erlang_app"], ) erlang_bytecode( name = "other_beam", @@ -33,6 +30,7 @@ def all_beam_files(name = "all_beam_files"): "src/background_gc.erl", "src/code_server_cache.erl", "src/gatherer.erl", + "src/gm.erl", "src/internal_user.erl", "src/lqueue.erl", "src/mc_amqp.erl", @@ -46,6 +44,13 @@ def all_beam_files(name = "all_beam_files"): "src/rabbit.erl", "src/rabbit_access_control.erl", "src/rabbit_alarm.erl", + "src/rabbit_amqp1_0.erl", + "src/rabbit_amqp_management.erl", + "src/rabbit_amqp_reader.erl", + "src/rabbit_amqp_session.erl", + "src/rabbit_amqp_session_sup.erl", + "src/rabbit_amqp_util.erl", + "src/rabbit_amqp_writer.erl", "src/rabbit_amqqueue.erl", "src/rabbit_amqqueue_control.erl", "src/rabbit_amqqueue_process.erl", @@ -53,6 +58,7 @@ def all_beam_files(name = "all_beam_files"): "src/rabbit_amqqueue_sup_sup.erl", "src/rabbit_auth_backend_internal.erl", "src/rabbit_auth_mechanism_amqplain.erl", + "src/rabbit_auth_mechanism_anonymous.erl", "src/rabbit_auth_mechanism_cr_demo.erl", "src/rabbit_auth_mechanism_plain.erl", "src/rabbit_autoheal.erl", @@ -85,22 +91,32 @@ def all_beam_files(name = "all_beam_files"): "src/rabbit_cuttlefish.erl", "src/rabbit_db.erl", "src/rabbit_db_binding.erl", + "src/rabbit_db_binding_m2k_converter.erl", "src/rabbit_db_cluster.erl", "src/rabbit_db_exchange.erl", + "src/rabbit_db_exchange_m2k_converter.erl", + "src/rabbit_db_m2k_converter.erl", "src/rabbit_db_maintenance.erl", + "src/rabbit_db_maintenance_m2k_converter.erl", "src/rabbit_db_msup.erl", + "src/rabbit_db_msup_m2k_converter.erl", "src/rabbit_db_policy.erl", "src/rabbit_db_queue.erl", + "src/rabbit_db_queue_m2k_converter.erl", "src/rabbit_db_rtparams.erl", + "src/rabbit_db_rtparams_m2k_converter.erl", "src/rabbit_db_topic_exchange.erl", "src/rabbit_db_user.erl", + "src/rabbit_db_user_m2k_converter.erl", "src/rabbit_db_vhost.erl", "src/rabbit_db_vhost_defaults.erl", + "src/rabbit_db_vhost_m2k_converter.erl", "src/rabbit_dead_letter.erl", "src/rabbit_definitions.erl", "src/rabbit_definitions_hashing.erl", "src/rabbit_definitions_import_https.erl", "src/rabbit_definitions_import_local_filesystem.erl", + "src/rabbit_depr_ff_extra.erl", "src/rabbit_deprecated_features.erl", "src/rabbit_diagnostics.erl", "src/rabbit_direct.erl", @@ -115,6 +131,7 @@ def all_beam_files(name = "all_beam_files"): "src/rabbit_exchange_type_fanout.erl", "src/rabbit_exchange_type_headers.erl", "src/rabbit_exchange_type_invalid.erl", + "src/rabbit_exchange_type_local_random.erl", "src/rabbit_exchange_type_topic.erl", "src/rabbit_feature_flags.erl", "src/rabbit_ff_controller.erl", @@ -130,12 +147,15 @@ def all_beam_files(name = "all_beam_files"): "src/rabbit_fifo_dlx_sup.erl", "src/rabbit_fifo_dlx_worker.erl", "src/rabbit_fifo_index.erl", + "src/rabbit_fifo_q.erl", "src/rabbit_fifo_v0.erl", "src/rabbit_fifo_v1.erl", + "src/rabbit_fifo_v3.erl", "src/rabbit_file.erl", "src/rabbit_global_counters.erl", "src/rabbit_guid.erl", "src/rabbit_health_check.erl", + "src/rabbit_khepri.erl", "src/rabbit_limiter.erl", "src/rabbit_log_channel.erl", "src/rabbit_log_connection.erl", @@ -144,25 +164,12 @@ def all_beam_files(name = "all_beam_files"): "src/rabbit_log_queue.erl", "src/rabbit_log_tail.erl", "src/rabbit_logger_exchange_h.erl", - "src/rabbit_looking_glass.erl", "src/rabbit_maintenance.erl", - "src/rabbit_memory_monitor.erl", "src/rabbit_message_interceptor.erl", "src/rabbit_metrics.erl", - "src/rabbit_mirror_queue_coordinator.erl", - "src/rabbit_mirror_queue_master.erl", "src/rabbit_mirror_queue_misc.erl", - "src/rabbit_mirror_queue_mode_all.erl", - "src/rabbit_mirror_queue_mode_exactly.erl", - "src/rabbit_mirror_queue_mode_nodes.erl", - "src/rabbit_mirror_queue_slave.erl", - "src/rabbit_mirror_queue_sync.erl", "src/rabbit_mnesia.erl", - "src/rabbit_mnesia_rename.erl", - "src/rabbit_msg_file.erl", - "src/rabbit_msg_record.erl", "src/rabbit_msg_store.erl", - "src/rabbit_msg_store_ets_index.erl", "src/rabbit_msg_store_gc.erl", "src/rabbit_networking.erl", "src/rabbit_networking_store.erl", @@ -183,18 +190,12 @@ def all_beam_files(name = "all_beam_files"): "src/rabbit_prelaunch_enabled_plugins_file.erl", "src/rabbit_prelaunch_feature_flags.erl", "src/rabbit_prelaunch_logging.erl", - "src/rabbit_prequeue.erl", "src/rabbit_priority_queue.erl", "src/rabbit_process.erl", "src/rabbit_queue_consumers.erl", "src/rabbit_queue_decorator.erl", "src/rabbit_queue_index.erl", "src/rabbit_queue_location.erl", - "src/rabbit_queue_location_client_local.erl", - "src/rabbit_queue_location_min_masters.erl", - "src/rabbit_queue_location_random.erl", - "src/rabbit_queue_location_validator.erl", - "src/rabbit_queue_master_location_misc.erl", "src/rabbit_queue_type_util.erl", "src/rabbit_quorum_memory_manager.erl", "src/rabbit_quorum_queue.erl", @@ -219,6 +220,7 @@ def all_beam_files(name = "all_beam_files"): "src/rabbit_trace.erl", "src/rabbit_tracking_store.erl", "src/rabbit_upgrade_preparation.erl", + "src/rabbit_uri.erl", "src/rabbit_variable_queue.erl", "src/rabbit_version.erl", "src/rabbit_vhost.erl", @@ -228,6 +230,7 @@ def all_beam_files(name = "all_beam_files"): "src/rabbit_vhost_sup.erl", "src/rabbit_vhost_sup_sup.erl", "src/rabbit_vhost_sup_wrapper.erl", + "src/rabbit_vhosts.erl", "src/rabbit_vm.erl", "src/supervised_lifecycle.erl", "src/tcp_listener.erl", @@ -243,6 +246,8 @@ def all_beam_files(name = "all_beam_files"): deps = [ "//deps/amqp10_common:erlang_app", "//deps/rabbit_common:erlang_app", + "@khepri//:erlang_app", + "@khepri_mnesia_migration//:erlang_app", "@ra//:erlang_app", "@ranch//:erlang_app", "@stdout_formatter//:erlang_app", @@ -259,14 +264,11 @@ def all_test_beam_files(name = "all_test_beam_files"): name = "test_behaviours", testonly = True, srcs = [ - "src/gm.erl", "src/mc.erl", "src/rabbit_backing_queue.erl", "src/rabbit_credential_validator.erl", "src/rabbit_exchange_type.erl", - "src/rabbit_mirror_queue_mode.erl", "src/rabbit_policy_merge_strategy.erl", - "src/rabbit_queue_master_locator.erl", "src/rabbit_queue_type.erl", "src/rabbit_tracking.erl", ], @@ -274,7 +276,7 @@ def all_test_beam_files(name = "all_test_beam_files"): app_name = "rabbit", dest = "test", erlc_opts = "//:test_erlc_opts", - deps = ["//deps/rabbit_common:erlang_app"], + deps = ["//deps/amqp10_common:erlang_app", "//deps/rabbit_common:erlang_app"], ) erlang_bytecode( name = "test_other_beam", @@ -284,6 +286,7 @@ def all_test_beam_files(name = "all_test_beam_files"): "src/background_gc.erl", "src/code_server_cache.erl", "src/gatherer.erl", + "src/gm.erl", "src/internal_user.erl", "src/lqueue.erl", "src/mc_amqp.erl", @@ -297,6 +300,13 @@ def all_test_beam_files(name = "all_test_beam_files"): "src/rabbit.erl", "src/rabbit_access_control.erl", "src/rabbit_alarm.erl", + "src/rabbit_amqp1_0.erl", + "src/rabbit_amqp_management.erl", + "src/rabbit_amqp_reader.erl", + "src/rabbit_amqp_session.erl", + "src/rabbit_amqp_session_sup.erl", + "src/rabbit_amqp_util.erl", + "src/rabbit_amqp_writer.erl", "src/rabbit_amqqueue.erl", "src/rabbit_amqqueue_control.erl", "src/rabbit_amqqueue_process.erl", @@ -304,6 +314,7 @@ def all_test_beam_files(name = "all_test_beam_files"): "src/rabbit_amqqueue_sup_sup.erl", "src/rabbit_auth_backend_internal.erl", "src/rabbit_auth_mechanism_amqplain.erl", + "src/rabbit_auth_mechanism_anonymous.erl", "src/rabbit_auth_mechanism_cr_demo.erl", "src/rabbit_auth_mechanism_plain.erl", "src/rabbit_autoheal.erl", @@ -336,22 +347,32 @@ def all_test_beam_files(name = "all_test_beam_files"): "src/rabbit_cuttlefish.erl", "src/rabbit_db.erl", "src/rabbit_db_binding.erl", + "src/rabbit_db_binding_m2k_converter.erl", "src/rabbit_db_cluster.erl", "src/rabbit_db_exchange.erl", + "src/rabbit_db_exchange_m2k_converter.erl", + "src/rabbit_db_m2k_converter.erl", "src/rabbit_db_maintenance.erl", + "src/rabbit_db_maintenance_m2k_converter.erl", "src/rabbit_db_msup.erl", + "src/rabbit_db_msup_m2k_converter.erl", "src/rabbit_db_policy.erl", "src/rabbit_db_queue.erl", + "src/rabbit_db_queue_m2k_converter.erl", "src/rabbit_db_rtparams.erl", + "src/rabbit_db_rtparams_m2k_converter.erl", "src/rabbit_db_topic_exchange.erl", "src/rabbit_db_user.erl", + "src/rabbit_db_user_m2k_converter.erl", "src/rabbit_db_vhost.erl", "src/rabbit_db_vhost_defaults.erl", + "src/rabbit_db_vhost_m2k_converter.erl", "src/rabbit_dead_letter.erl", "src/rabbit_definitions.erl", "src/rabbit_definitions_hashing.erl", "src/rabbit_definitions_import_https.erl", "src/rabbit_definitions_import_local_filesystem.erl", + "src/rabbit_depr_ff_extra.erl", "src/rabbit_deprecated_features.erl", "src/rabbit_diagnostics.erl", "src/rabbit_direct.erl", @@ -366,6 +387,7 @@ def all_test_beam_files(name = "all_test_beam_files"): "src/rabbit_exchange_type_fanout.erl", "src/rabbit_exchange_type_headers.erl", "src/rabbit_exchange_type_invalid.erl", + "src/rabbit_exchange_type_local_random.erl", "src/rabbit_exchange_type_topic.erl", "src/rabbit_feature_flags.erl", "src/rabbit_ff_controller.erl", @@ -381,12 +403,15 @@ def all_test_beam_files(name = "all_test_beam_files"): "src/rabbit_fifo_dlx_sup.erl", "src/rabbit_fifo_dlx_worker.erl", "src/rabbit_fifo_index.erl", + "src/rabbit_fifo_q.erl", "src/rabbit_fifo_v0.erl", "src/rabbit_fifo_v1.erl", + "src/rabbit_fifo_v3.erl", "src/rabbit_file.erl", "src/rabbit_global_counters.erl", "src/rabbit_guid.erl", "src/rabbit_health_check.erl", + "src/rabbit_khepri.erl", "src/rabbit_limiter.erl", "src/rabbit_log_channel.erl", "src/rabbit_log_connection.erl", @@ -395,25 +420,12 @@ def all_test_beam_files(name = "all_test_beam_files"): "src/rabbit_log_queue.erl", "src/rabbit_log_tail.erl", "src/rabbit_logger_exchange_h.erl", - "src/rabbit_looking_glass.erl", "src/rabbit_maintenance.erl", - "src/rabbit_memory_monitor.erl", "src/rabbit_message_interceptor.erl", "src/rabbit_metrics.erl", - "src/rabbit_mirror_queue_coordinator.erl", - "src/rabbit_mirror_queue_master.erl", "src/rabbit_mirror_queue_misc.erl", - "src/rabbit_mirror_queue_mode_all.erl", - "src/rabbit_mirror_queue_mode_exactly.erl", - "src/rabbit_mirror_queue_mode_nodes.erl", - "src/rabbit_mirror_queue_slave.erl", - "src/rabbit_mirror_queue_sync.erl", "src/rabbit_mnesia.erl", - "src/rabbit_mnesia_rename.erl", - "src/rabbit_msg_file.erl", - "src/rabbit_msg_record.erl", "src/rabbit_msg_store.erl", - "src/rabbit_msg_store_ets_index.erl", "src/rabbit_msg_store_gc.erl", "src/rabbit_networking.erl", "src/rabbit_networking_store.erl", @@ -434,18 +446,12 @@ def all_test_beam_files(name = "all_test_beam_files"): "src/rabbit_prelaunch_enabled_plugins_file.erl", "src/rabbit_prelaunch_feature_flags.erl", "src/rabbit_prelaunch_logging.erl", - "src/rabbit_prequeue.erl", "src/rabbit_priority_queue.erl", "src/rabbit_process.erl", "src/rabbit_queue_consumers.erl", "src/rabbit_queue_decorator.erl", "src/rabbit_queue_index.erl", "src/rabbit_queue_location.erl", - "src/rabbit_queue_location_client_local.erl", - "src/rabbit_queue_location_min_masters.erl", - "src/rabbit_queue_location_random.erl", - "src/rabbit_queue_location_validator.erl", - "src/rabbit_queue_master_location_misc.erl", "src/rabbit_queue_type_util.erl", "src/rabbit_quorum_memory_manager.erl", "src/rabbit_quorum_queue.erl", @@ -470,6 +476,7 @@ def all_test_beam_files(name = "all_test_beam_files"): "src/rabbit_trace.erl", "src/rabbit_tracking_store.erl", "src/rabbit_upgrade_preparation.erl", + "src/rabbit_uri.erl", "src/rabbit_variable_queue.erl", "src/rabbit_version.erl", "src/rabbit_vhost.erl", @@ -479,6 +486,7 @@ def all_test_beam_files(name = "all_test_beam_files"): "src/rabbit_vhost_sup.erl", "src/rabbit_vhost_sup_sup.erl", "src/rabbit_vhost_sup_wrapper.erl", + "src/rabbit_vhosts.erl", "src/rabbit_vm.erl", "src/supervised_lifecycle.erl", "src/tcp_listener.erl", @@ -494,6 +502,8 @@ def all_test_beam_files(name = "all_test_beam_files"): deps = [ "//deps/amqp10_common:erlang_app", "//deps/rabbit_common:erlang_app", + "@khepri//:erlang_app", + "@khepri_mnesia_migration//:erlang_app", "@ra//:erlang_app", "@ranch//:erlang_app", "@stdout_formatter//:erlang_app", @@ -514,8 +524,9 @@ def all_srcs(name = "all_srcs"): srcs = [ "include/amqqueue.hrl", "include/amqqueue_v2.hrl", - "include/gm_specs.hrl", + "include/internal_user.hrl", "include/mc.hrl", + "include/rabbit_amqp.hrl", "include/rabbit_global_counters.hrl", "include/vhost.hrl", "include/vhost_v2.hrl", @@ -529,11 +540,14 @@ def all_srcs(name = "all_srcs"): filegroup( name = "private_hdrs", srcs = [ + "src/mirrored_supervisor.hrl", "src/rabbit_feature_flags.hrl", + "src/rabbit_ff_registry.hrl", "src/rabbit_fifo.hrl", "src/rabbit_fifo_dlx.hrl", "src/rabbit_fifo_v0.hrl", "src/rabbit_fifo_v1.hrl", + "src/rabbit_fifo_v3.hrl", "src/rabbit_stream_coordinator.hrl", "src/rabbit_stream_sac_coordinator.hrl", ], @@ -560,6 +574,13 @@ def all_srcs(name = "all_srcs"): "src/rabbit.erl", "src/rabbit_access_control.erl", "src/rabbit_alarm.erl", + "src/rabbit_amqp1_0.erl", + "src/rabbit_amqp_management.erl", + "src/rabbit_amqp_reader.erl", + "src/rabbit_amqp_session.erl", + "src/rabbit_amqp_session_sup.erl", + "src/rabbit_amqp_util.erl", + "src/rabbit_amqp_writer.erl", "src/rabbit_amqqueue.erl", "src/rabbit_amqqueue_control.erl", "src/rabbit_amqqueue_process.erl", @@ -567,6 +588,7 @@ def all_srcs(name = "all_srcs"): "src/rabbit_amqqueue_sup_sup.erl", "src/rabbit_auth_backend_internal.erl", "src/rabbit_auth_mechanism_amqplain.erl", + "src/rabbit_auth_mechanism_anonymous.erl", "src/rabbit_auth_mechanism_cr_demo.erl", "src/rabbit_auth_mechanism_plain.erl", "src/rabbit_autoheal.erl", @@ -601,22 +623,32 @@ def all_srcs(name = "all_srcs"): "src/rabbit_cuttlefish.erl", "src/rabbit_db.erl", "src/rabbit_db_binding.erl", + "src/rabbit_db_binding_m2k_converter.erl", "src/rabbit_db_cluster.erl", "src/rabbit_db_exchange.erl", + "src/rabbit_db_exchange_m2k_converter.erl", + "src/rabbit_db_m2k_converter.erl", "src/rabbit_db_maintenance.erl", + "src/rabbit_db_maintenance_m2k_converter.erl", "src/rabbit_db_msup.erl", + "src/rabbit_db_msup_m2k_converter.erl", "src/rabbit_db_policy.erl", "src/rabbit_db_queue.erl", + "src/rabbit_db_queue_m2k_converter.erl", "src/rabbit_db_rtparams.erl", + "src/rabbit_db_rtparams_m2k_converter.erl", "src/rabbit_db_topic_exchange.erl", "src/rabbit_db_user.erl", + "src/rabbit_db_user_m2k_converter.erl", "src/rabbit_db_vhost.erl", "src/rabbit_db_vhost_defaults.erl", + "src/rabbit_db_vhost_m2k_converter.erl", "src/rabbit_dead_letter.erl", "src/rabbit_definitions.erl", "src/rabbit_definitions_hashing.erl", "src/rabbit_definitions_import_https.erl", "src/rabbit_definitions_import_local_filesystem.erl", + "src/rabbit_depr_ff_extra.erl", "src/rabbit_deprecated_features.erl", "src/rabbit_diagnostics.erl", "src/rabbit_direct.erl", @@ -632,6 +664,7 @@ def all_srcs(name = "all_srcs"): "src/rabbit_exchange_type_fanout.erl", "src/rabbit_exchange_type_headers.erl", "src/rabbit_exchange_type_invalid.erl", + "src/rabbit_exchange_type_local_random.erl", "src/rabbit_exchange_type_topic.erl", "src/rabbit_feature_flags.erl", "src/rabbit_ff_controller.erl", @@ -647,12 +680,15 @@ def all_srcs(name = "all_srcs"): "src/rabbit_fifo_dlx_sup.erl", "src/rabbit_fifo_dlx_worker.erl", "src/rabbit_fifo_index.erl", + "src/rabbit_fifo_q.erl", "src/rabbit_fifo_v0.erl", "src/rabbit_fifo_v1.erl", + "src/rabbit_fifo_v3.erl", "src/rabbit_file.erl", "src/rabbit_global_counters.erl", "src/rabbit_guid.erl", "src/rabbit_health_check.erl", + "src/rabbit_khepri.erl", "src/rabbit_limiter.erl", "src/rabbit_log_channel.erl", "src/rabbit_log_connection.erl", @@ -661,26 +697,12 @@ def all_srcs(name = "all_srcs"): "src/rabbit_log_queue.erl", "src/rabbit_log_tail.erl", "src/rabbit_logger_exchange_h.erl", - "src/rabbit_looking_glass.erl", "src/rabbit_maintenance.erl", - "src/rabbit_memory_monitor.erl", "src/rabbit_message_interceptor.erl", "src/rabbit_metrics.erl", - "src/rabbit_mirror_queue_coordinator.erl", - "src/rabbit_mirror_queue_master.erl", "src/rabbit_mirror_queue_misc.erl", - "src/rabbit_mirror_queue_mode.erl", - "src/rabbit_mirror_queue_mode_all.erl", - "src/rabbit_mirror_queue_mode_exactly.erl", - "src/rabbit_mirror_queue_mode_nodes.erl", - "src/rabbit_mirror_queue_slave.erl", - "src/rabbit_mirror_queue_sync.erl", "src/rabbit_mnesia.erl", - "src/rabbit_mnesia_rename.erl", - "src/rabbit_msg_file.erl", - "src/rabbit_msg_record.erl", "src/rabbit_msg_store.erl", - "src/rabbit_msg_store_ets_index.erl", "src/rabbit_msg_store_gc.erl", "src/rabbit_networking.erl", "src/rabbit_networking_store.erl", @@ -702,19 +724,12 @@ def all_srcs(name = "all_srcs"): "src/rabbit_prelaunch_enabled_plugins_file.erl", "src/rabbit_prelaunch_feature_flags.erl", "src/rabbit_prelaunch_logging.erl", - "src/rabbit_prequeue.erl", "src/rabbit_priority_queue.erl", "src/rabbit_process.erl", "src/rabbit_queue_consumers.erl", "src/rabbit_queue_decorator.erl", "src/rabbit_queue_index.erl", "src/rabbit_queue_location.erl", - "src/rabbit_queue_location_client_local.erl", - "src/rabbit_queue_location_min_masters.erl", - "src/rabbit_queue_location_random.erl", - "src/rabbit_queue_location_validator.erl", - "src/rabbit_queue_master_location_misc.erl", - "src/rabbit_queue_master_locator.erl", "src/rabbit_queue_type.erl", "src/rabbit_queue_type_util.erl", "src/rabbit_quorum_memory_manager.erl", @@ -741,6 +756,7 @@ def all_srcs(name = "all_srcs"): "src/rabbit_tracking.erl", "src/rabbit_tracking_store.erl", "src/rabbit_upgrade_preparation.erl", + "src/rabbit_uri.erl", "src/rabbit_variable_queue.erl", "src/rabbit_version.erl", "src/rabbit_vhost.erl", @@ -750,6 +766,7 @@ def all_srcs(name = "all_srcs"): "src/rabbit_vhost_sup.erl", "src/rabbit_vhost_sup_sup.erl", "src/rabbit_vhost_sup_wrapper.erl", + "src/rabbit_vhosts.erl", "src/rabbit_vm.erl", "src/supervised_lifecycle.erl", "src/tcp_listener.erl", @@ -824,16 +841,16 @@ def test_suite_beam_files(name = "test_suite_beam_files"): erlc_opts = "//:test_erlc_opts", deps = ["//deps/amqp_client:erlang_app"], ) - erlang_bytecode( - name = "cluster_rename_SUITE_beam_files", + name = "clustering_events_SUITE_beam_files", testonly = True, - srcs = ["test/cluster_rename_SUITE.erl"], - outs = ["test/cluster_rename_SUITE.beam"], + srcs = ["test/clustering_events_SUITE.erl"], + outs = ["test/clustering_events_SUITE.beam"], app_name = "rabbit", erlc_opts = "//:test_erlc_opts", - deps = ["//deps/amqp_client:erlang_app"], + deps = ["//deps/amqp_client:erlang_app", "//deps/rabbitmq_ct_helpers:erlang_app"], ) + erlang_bytecode( name = "clustering_management_SUITE_beam_files", testonly = True, @@ -911,7 +928,7 @@ def test_suite_beam_files(name = "test_suite_beam_files"): outs = ["test/definition_import_SUITE.beam"], app_name = "rabbit", erlc_opts = "//:test_erlc_opts", - deps = ["//deps/rabbit_common:erlang_app", "//deps/rabbitmq_ct_helpers:erlang_app"], + deps = ["//deps/rabbitmq_ct_helpers:erlang_app"], ) erlang_bytecode( name = "deprecated_features_SUITE_beam_files", @@ -947,15 +964,6 @@ def test_suite_beam_files(name = "test_suite_beam_files"): app_name = "rabbit", erlc_opts = "//:test_erlc_opts", ) - erlang_bytecode( - name = "dynamic_ha_SUITE_beam_files", - testonly = True, - srcs = ["test/dynamic_ha_SUITE.erl"], - outs = ["test/dynamic_ha_SUITE.beam"], - app_name = "rabbit", - erlc_opts = "//:test_erlc_opts", - deps = ["//deps/amqp_client:erlang_app", "//deps/rabbitmq_ct_helpers:erlang_app", "@proper//:erlang_app"], - ) erlang_bytecode( name = "dynamic_qq_SUITE_beam_files", testonly = True, @@ -965,15 +973,6 @@ def test_suite_beam_files(name = "test_suite_beam_files"): erlc_opts = "//:test_erlc_opts", deps = ["//deps/amqp_client:erlang_app", "//deps/rabbitmq_ct_helpers:erlang_app"], ) - erlang_bytecode( - name = "eager_sync_SUITE_beam_files", - testonly = True, - srcs = ["test/eager_sync_SUITE.erl"], - outs = ["test/eager_sync_SUITE.beam"], - app_name = "rabbit", - erlc_opts = "//:test_erlc_opts", - deps = ["//deps/amqp_client:erlang_app"], - ) erlang_bytecode( name = "feature_flags_SUITE_beam_files", testonly = True, @@ -990,7 +989,6 @@ def test_suite_beam_files(name = "test_suite_beam_files"): outs = ["test/feature_flags_v2_SUITE.beam"], app_name = "rabbit", erlc_opts = "//:test_erlc_opts", - deps = ["//deps/rabbit_common:erlang_app"], ) erlang_bytecode( name = "feature_flags_with_unpriveleged_user_SUITE_beam_files", @@ -1025,7 +1023,7 @@ def test_suite_beam_files(name = "test_suite_beam_files"): outs = ["test/logging_SUITE.beam"], app_name = "rabbit", erlc_opts = "//:test_erlc_opts", - deps = ["//deps/amqp_client:erlang_app", "//deps/rabbit_common:erlang_app"], + deps = ["//deps/amqp_client:erlang_app", "//deps/rabbit_common:erlang_app", "//deps/rabbitmq_ct_helpers:erlang_app"], ) erlang_bytecode( name = "lqueue_SUITE_beam_files", @@ -1045,15 +1043,6 @@ def test_suite_beam_files(name = "test_suite_beam_files"): erlc_opts = "//:test_erlc_opts", deps = ["//deps/amqp_client:erlang_app", "//deps/rabbitmq_ct_helpers:erlang_app"], ) - erlang_bytecode( - name = "many_node_ha_SUITE_beam_files", - testonly = True, - srcs = ["test/many_node_ha_SUITE.erl"], - outs = ["test/many_node_ha_SUITE.beam"], - app_name = "rabbit", - erlc_opts = "//:test_erlc_opts", - deps = ["//deps/amqp_client:erlang_app"], - ) erlang_bytecode( name = "message_size_limit_SUITE_beam_files", testonly = True, @@ -1097,7 +1086,7 @@ def test_suite_beam_files(name = "test_suite_beam_files"): outs = ["test/peer_discovery_classic_config_SUITE.beam"], app_name = "rabbit", erlc_opts = "//:test_erlc_opts", - deps = ["//deps/amqp_client:erlang_app", "//deps/rabbitmq_ct_helpers:erlang_app"], + deps = ["//deps/rabbitmq_ct_helpers:erlang_app"], ) erlang_bytecode( name = "peer_discovery_dns_SUITE_beam_files", @@ -1108,6 +1097,15 @@ def test_suite_beam_files(name = "test_suite_beam_files"): erlc_opts = "//:test_erlc_opts", deps = ["//deps/amqp_client:erlang_app"], ) + erlang_bytecode( + name = "peer_discovery_tmp_hidden_node_SUITE_beam_files", + testonly = True, + srcs = ["test/peer_discovery_tmp_hidden_node_SUITE.erl"], + outs = ["test/peer_discovery_tmp_hidden_node_SUITE.beam"], + app_name = "rabbit", + erlc_opts = "//:test_erlc_opts", + deps = ["//deps/rabbit_common:erlang_app"], + ) erlang_bytecode( name = "per_user_connection_channel_limit_SUITE_beam_files", testonly = True, @@ -1160,7 +1158,7 @@ def test_suite_beam_files(name = "test_suite_beam_files"): outs = ["test/per_vhost_connection_limit_partitions_SUITE.beam"], app_name = "rabbit", erlc_opts = "//:test_erlc_opts", - deps = ["//deps/amqp_client:erlang_app", "//deps/rabbitmq_ct_helpers:erlang_app"], + deps = ["//deps/rabbitmq_ct_helpers:erlang_app"], ) erlang_bytecode( name = "per_vhost_msg_store_SUITE_beam_files", @@ -1187,7 +1185,7 @@ def test_suite_beam_files(name = "test_suite_beam_files"): outs = ["test/policy_SUITE.beam"], app_name = "rabbit", erlc_opts = "//:test_erlc_opts", - deps = ["//deps/amqp_client:erlang_app"], + deps = ["//deps/amqp_client:erlang_app", "//deps/rabbitmq_ct_helpers:erlang_app"], ) erlang_bytecode( name = "priority_queue_SUITE_beam_files", @@ -1242,15 +1240,6 @@ def test_suite_beam_files(name = "test_suite_beam_files"): erlc_opts = "//:test_erlc_opts", deps = ["//deps/amqp_client:erlang_app"], ) - erlang_bytecode( - name = "queue_master_location_SUITE_beam_files", - testonly = True, - srcs = ["test/queue_master_location_SUITE.erl"], - outs = ["test/queue_master_location_SUITE.beam"], - app_name = "rabbit", - erlc_opts = "//:test_erlc_opts", - deps = ["//deps/amqp_client:erlang_app", "//deps/rabbitmq_ct_helpers:erlang_app"], - ) erlang_bytecode( name = "queue_parallel_SUITE_beam_files", testonly = True, @@ -1309,7 +1298,10 @@ def test_suite_beam_files(name = "test_suite_beam_files"): testonly = True, srcs = ["test/rabbit_fifo_SUITE.erl"], outs = ["test/rabbit_fifo_SUITE.beam"], - hdrs = ["src/rabbit_fifo.hrl"], + hdrs = [ + "src/rabbit_fifo.hrl", + "src/rabbit_fifo_dlx.hrl", + ], app_name = "rabbit", erlc_opts = "//:test_erlc_opts", deps = ["//deps/rabbit_common:erlang_app"], @@ -1319,7 +1311,7 @@ def test_suite_beam_files(name = "test_suite_beam_files"): testonly = True, srcs = ["test/rabbit_fifo_dlx_SUITE.erl"], outs = ["test/rabbit_fifo_dlx_SUITE.beam"], - hdrs = ["src/rabbit_fifo.hrl", "src/rabbit_fifo_dlx.hrl"], + hdrs = ["src/rabbit_fifo.hrl"], app_name = "rabbit", erlc_opts = "//:test_erlc_opts", deps = ["//deps/rabbit_common:erlang_app"], @@ -1362,15 +1354,7 @@ def test_suite_beam_files(name = "test_suite_beam_files"): erlc_opts = "//:test_erlc_opts", deps = ["//deps/rabbit_common:erlang_app"], ) - erlang_bytecode( - name = "rabbit_msg_record_SUITE_beam_files", - testonly = True, - srcs = ["test/rabbit_msg_record_SUITE.erl"], - outs = ["test/rabbit_msg_record_SUITE.beam"], - app_name = "rabbit", - erlc_opts = "//:test_erlc_opts", - deps = ["//deps/amqp10_common:erlang_app", "//deps/rabbit_common:erlang_app"], - ) + erlang_bytecode( name = "rabbit_stream_coordinator_SUITE_beam_files", testonly = True, @@ -1399,6 +1383,15 @@ def test_suite_beam_files(name = "test_suite_beam_files"): app_name = "rabbit", erlc_opts = "//:test_erlc_opts", ) + erlang_bytecode( + name = "rabbit_access_control_SUITE_beam_files", + testonly = True, + srcs = ["test/rabbit_access_control_SUITE.erl"], + outs = ["test/rabbit_access_control_SUITE.beam"], + app_name = "rabbit", + erlc_opts = "//:test_erlc_opts", + deps = ["//deps/rabbit_common:erlang_app"], + ) erlang_bytecode( name = "rabbitmq_queues_cli_integration_SUITE_beam_files", testonly = True, @@ -1415,7 +1408,7 @@ def test_suite_beam_files(name = "test_suite_beam_files"): outs = ["test/rabbitmqctl_integration_SUITE.beam"], app_name = "rabbit", erlc_opts = "//:test_erlc_opts", - deps = ["//deps/amqp_client:erlang_app"], + deps = ["//deps/amqp_client:erlang_app", "//deps/rabbitmq_ct_helpers:erlang_app"], ) erlang_bytecode( name = "rabbitmqctl_shutdown_SUITE_beam_files", @@ -1424,7 +1417,6 @@ def test_suite_beam_files(name = "test_suite_beam_files"): outs = ["test/rabbitmqctl_shutdown_SUITE.beam"], app_name = "rabbit", erlc_opts = "//:test_erlc_opts", - deps = ["//deps/amqp_client:erlang_app"], ) erlang_bytecode( name = "signal_handling_SUITE_beam_files", @@ -1434,15 +1426,6 @@ def test_suite_beam_files(name = "test_suite_beam_files"): app_name = "rabbit", erlc_opts = "//:test_erlc_opts", ) - erlang_bytecode( - name = "simple_ha_SUITE_beam_files", - testonly = True, - srcs = ["test/simple_ha_SUITE.erl"], - outs = ["test/simple_ha_SUITE.beam"], - app_name = "rabbit", - erlc_opts = "//:test_erlc_opts", - deps = ["//deps/amqp_client:erlang_app"], - ) erlang_bytecode( name = "single_active_consumer_SUITE_beam_files", testonly = True, @@ -1452,15 +1435,6 @@ def test_suite_beam_files(name = "test_suite_beam_files"): erlc_opts = "//:test_erlc_opts", deps = ["//deps/amqp_client:erlang_app"], ) - erlang_bytecode( - name = "sync_detection_SUITE_beam_files", - testonly = True, - srcs = ["test/sync_detection_SUITE.erl"], - outs = ["test/sync_detection_SUITE.beam"], - app_name = "rabbit", - erlc_opts = "//:test_erlc_opts", - deps = ["//deps/amqp_client:erlang_app"], - ) erlang_bytecode( name = "term_to_binary_compat_prop_SUITE_beam_files", testonly = True, @@ -1468,7 +1442,7 @@ def test_suite_beam_files(name = "test_suite_beam_files"): outs = ["test/term_to_binary_compat_prop_SUITE.beam"], app_name = "rabbit", erlc_opts = "//:test_erlc_opts", - deps = ["//deps/rabbit_common:erlang_app", "@proper//:erlang_app"], + deps = ["@proper//:erlang_app"], ) erlang_bytecode( name = "test_channel_operation_timeout_test_queue_beam", @@ -1536,10 +1510,10 @@ def test_suite_beam_files(name = "test_suite_beam_files"): erlc_opts = "//:test_erlc_opts", ) erlang_bytecode( - name = "test_quorum_queue_utils_beam", + name = "test_queue_utils_beam", testonly = True, - srcs = ["test/quorum_queue_utils.erl"], - outs = ["test/quorum_queue_utils.beam"], + srcs = ["test/queue_utils.erl"], + outs = ["test/queue_utils.beam"], app_name = "rabbit", erlc_opts = "//:test_erlc_opts", ) @@ -1568,24 +1542,6 @@ def test_suite_beam_files(name = "test_suite_beam_files"): app_name = "rabbit", erlc_opts = "//:test_erlc_opts", ) - erlang_bytecode( - name = "test_rabbit_ha_test_consumer_beam", - testonly = True, - srcs = ["test/rabbit_ha_test_consumer.erl"], - outs = ["test/rabbit_ha_test_consumer.beam"], - app_name = "rabbit", - erlc_opts = "//:test_erlc_opts", - deps = ["//deps/amqp_client:erlang_app"], - ) - erlang_bytecode( - name = "test_rabbit_ha_test_producer_beam", - testonly = True, - srcs = ["test/rabbit_ha_test_producer.erl"], - outs = ["test/rabbit_ha_test_producer.beam"], - app_name = "rabbit", - erlc_opts = "//:test_erlc_opts", - deps = ["//deps/amqp_client:erlang_app"], - ) erlang_bytecode( name = "test_test_util_beam", testonly = True, @@ -1603,6 +1559,15 @@ def test_suite_beam_files(name = "test_suite_beam_files"): erlc_opts = "//:test_erlc_opts", deps = ["//deps/rabbit_common:erlang_app"], ) + erlang_bytecode( + name = "transactions_SUITE_beam_files", + testonly = True, + srcs = ["test/transactions_SUITE.erl"], + outs = ["test/transactions_SUITE.beam"], + app_name = "rabbit", + erlc_opts = "//:test_erlc_opts", + deps = ["//deps/amqp_client:erlang_app"], + ) erlang_bytecode( name = "unit_access_control_SUITE_beam_files", testonly = True, @@ -1649,36 +1614,35 @@ def test_suite_beam_files(name = "test_suite_beam_files"): deps = ["//deps/amqp_client:erlang_app"], ) erlang_bytecode( - name = "unit_app_management_SUITE_beam_files", + name = "unit_quorum_queue_SUITE_beam_files", testonly = True, - srcs = ["test/unit_app_management_SUITE.erl"], - outs = ["test/unit_app_management_SUITE.beam"], + srcs = ["test/unit_quorum_queue_SUITE.erl"], + outs = ["test/unit_quorum_queue_SUITE.beam"], app_name = "rabbit", erlc_opts = "//:test_erlc_opts", - deps = ["//deps/amqp_client:erlang_app"], ) erlang_bytecode( - name = "unit_classic_mirrored_queue_sync_throttling_SUITE_beam_files", + name = "unit_app_management_SUITE_beam_files", testonly = True, - srcs = ["test/unit_classic_mirrored_queue_sync_throttling_SUITE.erl"], - outs = ["test/unit_classic_mirrored_queue_sync_throttling_SUITE.beam"], + srcs = ["test/unit_app_management_SUITE.erl"], + outs = ["test/unit_app_management_SUITE.beam"], app_name = "rabbit", erlc_opts = "//:test_erlc_opts", - deps = ["//deps/rabbit_common:erlang_app"], + deps = ["//deps/amqp_client:erlang_app"], ) erlang_bytecode( - name = "unit_classic_mirrored_queue_throughput_SUITE_beam_files", + name = "unit_cluster_formation_locking_mocks_SUITE_beam_files", testonly = True, - srcs = ["test/unit_classic_mirrored_queue_throughput_SUITE.erl"], - outs = ["test/unit_classic_mirrored_queue_throughput_SUITE.beam"], + srcs = ["test/unit_cluster_formation_locking_mocks_SUITE.erl"], + outs = ["test/unit_cluster_formation_locking_mocks_SUITE.beam"], app_name = "rabbit", erlc_opts = "//:test_erlc_opts", ) erlang_bytecode( - name = "unit_cluster_formation_locking_mocks_SUITE_beam_files", + name = "unit_cluster_formation_sort_nodes_SUITE_beam_files", testonly = True, - srcs = ["test/unit_cluster_formation_locking_mocks_SUITE.erl"], - outs = ["test/unit_cluster_formation_locking_mocks_SUITE.beam"], + srcs = ["test/unit_cluster_formation_sort_nodes_SUITE.erl"], + outs = ["test/unit_cluster_formation_sort_nodes_SUITE.beam"], app_name = "rabbit", erlc_opts = "//:test_erlc_opts", ) @@ -1740,16 +1704,6 @@ def test_suite_beam_files(name = "test_suite_beam_files"): app_name = "rabbit", erlc_opts = "//:test_erlc_opts", ) - erlang_bytecode( - name = "unit_gm_SUITE_beam_files", - testonly = True, - srcs = ["test/unit_gm_SUITE.erl"], - outs = ["test/unit_gm_SUITE.beam"], - hdrs = ["include/gm_specs.hrl"], - app_name = "rabbit", - beam = ["ebin/gm.beam"], - erlc_opts = "//:test_erlc_opts", - ) erlang_bytecode( name = "unit_log_management_SUITE_beam_files", testonly = True, @@ -1929,7 +1883,6 @@ def test_suite_beam_files(name = "test_suite_beam_files"): outs = ["test/rabbit_db_msup_SUITE.beam"], app_name = "rabbit", erlc_opts = "//:test_erlc_opts", - deps = ["//deps/rabbit_common:erlang_app"], ) erlang_bytecode( name = "rabbit_db_policy_SUITE_beam_files", @@ -2010,27 +1963,37 @@ def test_suite_beam_files(name = "test_suite_beam_files"): outs = ["test/quorum_queue_member_reconciliation_SUITE.beam"], app_name = "rabbit", erlc_opts = "//:test_erlc_opts", - deps = ["//deps/amqp_client:erlang_app", "//deps/rabbitmq_ct_helpers:erlang_app"], + deps = ["//deps/amqp_client:erlang_app"], ) + erlang_bytecode( - name = "message_containers_SUITE_beam_files", + name = "cluster_limit_SUITE_beam_files", testonly = True, - srcs = ["test/message_containers_SUITE.erl"], - outs = ["test/message_containers_SUITE.beam"], + srcs = ["test/cluster_limit_SUITE.erl"], + outs = ["test/cluster_limit_SUITE.beam"], app_name = "rabbit", erlc_opts = "//:test_erlc_opts", deps = ["//deps/amqp_client:erlang_app"], ) erlang_bytecode( - name = "mc_SUITE_beam_files", + name = "metadata_store_clustering_SUITE_beam_files", testonly = True, - srcs = ["test/mc_SUITE.erl"], - outs = ["test/mc_SUITE.beam"], - hdrs = ["include/mc.hrl"], + srcs = ["test/metadata_store_clustering_SUITE.erl"], + outs = ["test/metadata_store_clustering_SUITE.beam"], app_name = "rabbit", erlc_opts = "//:test_erlc_opts", - deps = ["//deps/amqp10_common:erlang_app", "//deps/rabbit_common:erlang_app"], + deps = ["//deps/amqp_client:erlang_app"], + ) + erlang_bytecode( + name = "metadata_store_migration_SUITE_beam_files", + testonly = True, + srcs = ["test/metadata_store_migration_SUITE.erl"], + outs = ["test/metadata_store_migration_SUITE.beam"], + app_name = "rabbit", + erlc_opts = "//:test_erlc_opts", + deps = ["//deps/amqp_client:erlang_app"], ) + erlang_bytecode( name = "routing_SUITE_beam_files", testonly = True, @@ -2040,3 +2003,174 @@ def test_suite_beam_files(name = "test_suite_beam_files"): erlc_opts = "//:test_erlc_opts", deps = ["//deps/amqp_client:erlang_app"], ) + erlang_bytecode( + name = "metadata_store_phase1_SUITE_beam_files", + testonly = True, + srcs = ["test/metadata_store_phase1_SUITE.erl"], + outs = ["test/metadata_store_phase1_SUITE.beam"], + app_name = "rabbit", + erlc_opts = "//:test_erlc_opts", + deps = ["//deps/rabbit_common:erlang_app", "@khepri//:erlang_app"], + ) + erlang_bytecode( + name = "mc_unit_SUITE_beam_files", + testonly = True, + srcs = ["test/mc_unit_SUITE.erl"], + outs = ["test/mc_unit_SUITE.beam"], + hdrs = ["include/mc.hrl"], + app_name = "rabbit", + erlc_opts = "//:test_erlc_opts", + deps = ["//deps/amqp10_common:erlang_app", "//deps/rabbit_common:erlang_app"], + ) + erlang_bytecode( + name = "cli_forget_cluster_node_SUITE_beam_files", + testonly = True, + srcs = ["test/cli_forget_cluster_node_SUITE.erl"], + outs = ["test/cli_forget_cluster_node_SUITE.beam"], + app_name = "rabbit", + erlc_opts = "//:test_erlc_opts", + deps = ["//deps/amqp_client:erlang_app", "//deps/rabbitmq_ct_helpers:erlang_app"], + ) + erlang_bytecode( + name = "cluster_minority_SUITE_beam_files", + testonly = True, + srcs = ["test/cluster_minority_SUITE.erl"], + outs = ["test/cluster_minority_SUITE.beam"], + app_name = "rabbit", + erlc_opts = "//:test_erlc_opts", + deps = ["//deps/amqp_client:erlang_app"], + ) + erlang_bytecode( + name = "test_event_recorder_beam", + testonly = True, + srcs = ["test/event_recorder.erl"], + outs = ["test/event_recorder.beam"], + app_name = "rabbit", + erlc_opts = "//:test_erlc_opts", + deps = ["//deps/rabbit_common:erlang_app"], + ) + erlang_bytecode( + name = "amqp_auth_SUITE_beam_files", + testonly = True, + srcs = ["test/amqp_auth_SUITE.erl"], + outs = ["test/amqp_auth_SUITE.beam"], + app_name = "rabbit", + erlc_opts = "//:test_erlc_opts", + deps = ["//deps/amqp10_common:erlang_app", "//deps/amqp_client:erlang_app"], + ) + erlang_bytecode( + name = "amqp_client_SUITE_beam_files", + testonly = True, + srcs = ["test/amqp_client_SUITE.erl"], + outs = ["test/amqp_client_SUITE.beam"], + app_name = "rabbit", + erlc_opts = "//:test_erlc_opts", + deps = ["//deps/amqp10_common:erlang_app", "//deps/amqp_client:erlang_app"], + ) + erlang_bytecode( + name = "amqp_credit_api_v2_SUITE_beam_files", + testonly = True, + srcs = ["test/amqp_credit_api_v2_SUITE.erl"], + outs = ["test/amqp_credit_api_v2_SUITE.beam"], + app_name = "rabbit", + erlc_opts = "//:test_erlc_opts", + deps = ["//deps/amqp_client:erlang_app"], + ) + erlang_bytecode( + name = "amqp_proxy_protocol_SUITE_beam_files", + testonly = True, + srcs = ["test/amqp_proxy_protocol_SUITE.erl"], + outs = ["test/amqp_proxy_protocol_SUITE.beam"], + app_name = "rabbit", + erlc_opts = "//:test_erlc_opts", + ) + erlang_bytecode( + name = "amqp_system_SUITE_beam_files", + testonly = True, + srcs = ["test/amqp_system_SUITE.erl"], + outs = ["test/amqp_system_SUITE.beam"], + app_name = "rabbit", + erlc_opts = "//:test_erlc_opts", + deps = ["//deps/rabbit_common:erlang_app"], + ) + erlang_bytecode( + name = "amqp_address_SUITE_beam_files", + testonly = True, + srcs = ["test/amqp_address_SUITE.erl"], + outs = ["test/amqp_address_SUITE.beam"], + app_name = "rabbit", + erlc_opts = "//:test_erlc_opts", + deps = ["//deps/amqp10_common:erlang_app", "//deps/rabbitmq_amqp_client:erlang_app"], + ) + erlang_bytecode( + name = "message_containers_deaths_v2_SUITE_beam_files", + testonly = True, + srcs = ["test/message_containers_deaths_v2_SUITE.erl"], + outs = ["test/message_containers_deaths_v2_SUITE.beam"], + app_name = "rabbit", + erlc_opts = "//:test_erlc_opts", + deps = ["//deps/amqp_client:erlang_app", "//deps/rabbitmq_ct_helpers:erlang_app"], + ) + erlang_bytecode( + name = "amqpl_direct_reply_to_SUITE_beam_files", + testonly = True, + srcs = ["test/amqpl_direct_reply_to_SUITE.erl"], + outs = ["test/amqpl_direct_reply_to_SUITE.beam"], + app_name = "rabbit", + erlc_opts = "//:test_erlc_opts", + deps = ["//deps/amqp_client:erlang_app"], + ) + erlang_bytecode( + name = "rabbit_local_random_exchange_SUITE_beam_files", + testonly = True, + srcs = ["test/rabbit_local_random_exchange_SUITE.erl"], + outs = ["test/rabbit_local_random_exchange_SUITE.beam"], + app_name = "rabbit", + erlc_opts = "//:test_erlc_opts", + deps = ["//deps/amqp_client:erlang_app"], + ) + erlang_bytecode( + name = "amqpl_consumer_ack_SUITE_beam_files", + testonly = True, + srcs = ["test/amqpl_consumer_ack_SUITE.erl"], + outs = ["test/amqpl_consumer_ack_SUITE.beam"], + app_name = "rabbit", + erlc_opts = "//:test_erlc_opts", + deps = ["//deps/amqp_client:erlang_app"], + ) + erlang_bytecode( + name = "unit_queue_location_SUITE_beam_files", + testonly = True, + srcs = ["test/unit_queue_location_SUITE.erl"], + outs = ["test/unit_queue_location_SUITE.beam"], + app_name = "rabbit", + erlc_opts = "//:test_erlc_opts", + ) + erlang_bytecode( + name = "classic_queue_SUITE_beam_files", + testonly = True, + srcs = ["test/classic_queue_SUITE.erl"], + outs = ["test/classic_queue_SUITE.beam"], + app_name = "rabbit", + erlc_opts = "//:test_erlc_opts", + deps = ["//deps/amqp_client:erlang_app"], + ) + erlang_bytecode( + name = "rabbit_fifo_q_SUITE_beam_files", + testonly = True, + srcs = ["test/rabbit_fifo_q_SUITE.erl"], + outs = ["test/rabbit_fifo_q_SUITE.beam"], + hdrs = ["src/rabbit_fifo.hrl"], + app_name = "rabbit", + erlc_opts = "//:test_erlc_opts", + deps = ["@proper//:erlang_app"], + ) + erlang_bytecode( + name = "cluster_upgrade_SUITE_beam_files", + testonly = True, + srcs = ["test/cluster_upgrade_SUITE.erl"], + outs = ["test/cluster_upgrade_SUITE.beam"], + app_name = "rabbit", + erlc_opts = "//:test_erlc_opts", + deps = ["//deps/amqp_client:erlang_app"], + ) diff --git a/deps/rabbit/apps/rabbitmq_prelaunch/.gitignore b/deps/rabbit/apps/rabbitmq_prelaunch/.gitignore deleted file mode 100644 index aaf249068ce8..000000000000 --- a/deps/rabbit/apps/rabbitmq_prelaunch/.gitignore +++ /dev/null @@ -1,13 +0,0 @@ -*~ -.sw? -.*.sw? -*.beam -*.coverdata -/ebin/ -/.erlang.mk/ -/logs/ -/rabbitmq_prelaunch.d -/xrefr - -# Dialyzer -*.plt diff --git a/deps/rabbit/docs/.gitignore b/deps/rabbit/docs/.gitignore new file mode 100644 index 000000000000..1342b3e396fc --- /dev/null +++ b/deps/rabbit/docs/.gitignore @@ -0,0 +1,3 @@ +*.html +*.md + diff --git a/deps/rabbit/docs/README.md b/deps/rabbit/docs/README.md index df2a126466b8..b47fc721dd2c 100644 --- a/deps/rabbit/docs/README.md +++ b/deps/rabbit/docs/README.md @@ -1,31 +1,63 @@ # Manual Pages and Documentation Extras -This directory contains [CLI tool](https://rabbitmq.com/cli.html) man page sources as well as a few documentation extras: +This directory contains [CLI tools](https://rabbitmq.com/docs/cli/) man page sources as well as a few documentation extras: - * An [annotated rabbitmq.conf example](./rabbitmq.conf.example) (see [new style configuration format](https://www.rabbitmq.com/configure.html#config-file-formats)) - * An [annotated advanced.config example](./advanced.config.example) (see [The advanced.config file](https://www.rabbitmq.com/configure.html#advanced-config-file)) + * An [annotated rabbitmq.conf example](./rabbitmq.conf.example) (see [new style configuration format](https://www.rabbitmq.com/docs/configure#config-file-formats)) + * An [annotated advanced.config example](./advanced.config.example) (see [The advanced.config file](https://www.rabbitmq.com/docs/configure#advanced-config-file)) * A [systemd unit file example](./rabbitmq-server.service.example) -Please [see rabbitmq.com](https://rabbitmq.com/documentation.html) for documentation guides. +Please [see rabbitmq.com](https://rabbitmq.com/docs/) for documentation guides. -## Classic Config File Format Example +## man Pages -Feeling nostalgic and looking for the [classic configuration file example](https://github.com/rabbitmq/rabbitmq-server/blob/v3.7.x/docs/rabbitmq.config.example)? -Now that's old school! Keep in mind that classic configuration file **should be considered deprecated**. -Prefer `rabbitmq.conf` (see [new style configuration format](https://www.rabbitmq.com/configure.html#config-file-formats)) -with an `advanced.config` to complement it as needed. +### Dependencies + * `man` + * [`tidy5`](https://binaries.html-tidy.org/) (a.k.a. `tidy-html5`) -## man Pages +On macOS, `tidy5` can be installed with Homebrew: + +``` shell +brew install tidy-html5 +``` + +and then be found under the `bin` directory of the Homebrew cellar: + +``` shell +/opt/homebrew/bin/tidy --help +``` ### Source Files -This directory contains man pages that are converted to HTML using `mandoc`: +This directory contains man pages in ntroff, the man page format. + +To inspect a local version, use `man`: + +``` shell +man docs/rabbitmq-diagnostics.8 + +man docs/rabbitmq-queues.8 +``` + +To converted all man pages to HTML using `mandoc`: + +``` shell +gmake web-manpages +``` - gmake web-manpages +The result then must be post-processed and copied to the website repository: -The result is then copied to the [website repository](https://github.com/rabbitmq/rabbitmq-website/tree/live/site/man) +``` shell +# cd deps/rabbit/docs +# +# clear all generated HTML and Markdown files +rm *.html *.md +# export tidy5 path +export TIDY5_BIN=/opt/homebrew/bin/tidy; +# run the post-processing script, in this case it updates the 3.13.x version of the docs +./postprocess_man_html.sh . /path/to/rabbitmq-website.git/versioned_docs/version-3.13/man/ +``` ### Contributions diff --git a/deps/rabbit/docs/postprocess_man_html.sh b/deps/rabbit/docs/postprocess_man_html.sh new file mode 100755 index 000000000000..82c4e622ee09 --- /dev/null +++ b/deps/rabbit/docs/postprocess_man_html.sh @@ -0,0 +1,92 @@ +#!/bin/sh + +set -e + +srcdir="$1" +destdir="$2" + +tidy_bin=${TIDY5_BIN:-"tidy5"} + +for src in "$srcdir"/*.html; do + name=$(basename "$src" .html) + dest="$destdir/$name.md" + echo "src=$src" "dest=$dest" "name=$name" + + cat < "$dest" +--- +title: $name +--- +EOF + +$tidy_bin -i --wrap 0 \ + --asxhtml \ + --show-body-only yes \ + --drop-empty-elements yes \ + --drop-empty-paras yes \ + --enclose-block-text yes \ + --enclose-text yes "$src" \ + | \ + awk ' + / */, "", title); + + print level, title, "{#" id "}"; + next; + } + /dt id="/ { + id = $0; + sub(/.*(id|name)="/, "", id); + sub(/".*/, "", id); + + line = $0; + sub(/id="[^"]*"/, "", line); + print line; + + next; + } + /a class="permalink"/ { + title = $0; + sub(/ *]*>/, "", title); + sub(/<\/a>/, "", title); + sub(/]*>/, "", title); + gsub(/>\*\\*<", title); + + print level "#", title, "{#" id "}"; + next; + } + { + line = $0; + gsub(/{/, "\\{", line); + gsub(/
  • /, "
  • \n", line); + gsub(/<\/li>/, "\n
  • ", line); + gsub(/<\/ul>/, "\n", line); + gsub(/]*>/, "", line); + gsub(/<\/div>]/, "<\/div>\n]", line); + gsub(/style="[^"]*"/, "", line); + print line; + next; + } + ' > "$dest" +done \ No newline at end of file diff --git a/deps/rabbit/docs/rabbitmq-diagnostics.8 b/deps/rabbit/docs/rabbitmq-diagnostics.8 index a6f0d279f736..5045b8493ce0 100644 --- a/deps/rabbit/docs/rabbitmq-diagnostics.8 +++ b/deps/rabbit/docs/rabbitmq-diagnostics.8 @@ -3,7 +3,7 @@ .\" License, v. 2.0. If a copy of the MPL was not distributed with this .\" file, You can obtain one at https://mozilla.org/MPL/2.0/. .\" -.\" Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +.\" Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. .\" .Dd June 22, 2023 .Dt RABBITMQ-DIAGNOSTICS 8 @@ -697,10 +697,10 @@ See .Cm quorum_status in .Xr rabbitmq-queues 8 -.It Cm check_if_node_is_mirror_sync_critical +.It Cm check_if_cluster_has_classic_queue_mirroring_policy .Pp See -.Cm check_if_node_is_mirror_sync_critical +.Cm check_if_cluster_has_classic_queue_mirroring_policy in .Xr rabbitmq-queues 8 .It Cm check_if_node_is_quorum_critical @@ -723,4 +723,4 @@ in .\" ------------------------------------------------------------------ .Sh AUTHOR .\" ------------------------------------------------------------------ -.An The RabbitMQ Team Aq Mt rabbitmq-core@groups.vmware.com +.An The RabbitMQ Team Aq Mt contact-tanzu-data.pdl@broadcom.com diff --git a/deps/rabbit/docs/rabbitmq-echopid.8 b/deps/rabbit/docs/rabbitmq-echopid.8 index 70bd782ca09e..4985aee3ca20 100644 --- a/deps/rabbit/docs/rabbitmq-echopid.8 +++ b/deps/rabbit/docs/rabbitmq-echopid.8 @@ -3,7 +3,7 @@ .\" License, v. 2.0. If a copy of the MPL was not distributed with this .\" file, You can obtain one at https://mozilla.org/MPL/2.0/. .\" -.\" Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +.\" Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. .\" .Dd June 22, 2023 .Dt RABBITMQ-ECHOPID.BAT 8 @@ -67,4 +67,4 @@ The short-name form of the RabbitMQ node name. .\" ------------------------------------------------------------------ .Sh AUTHOR .\" ------------------------------------------------------------------ -.An The RabbitMQ Team Aq Mt rabbitmq-core@groups.vmware.com +.An The RabbitMQ Team Aq Mt contact-tanzu-data.pdl@broadcom.com diff --git a/deps/rabbit/docs/rabbitmq-env.conf.5 b/deps/rabbit/docs/rabbitmq-env.conf.5 index 5c1ec08128ac..bc198e697142 100644 --- a/deps/rabbit/docs/rabbitmq-env.conf.5 +++ b/deps/rabbit/docs/rabbitmq-env.conf.5 @@ -3,7 +3,7 @@ .\" License, v. 2.0. If a copy of the MPL was not distributed with this .\" file, You can obtain one at https://mozilla.org/MPL/2.0/. .\" -.\" Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +.\" Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. .\" .Dd June 22, 2023 .Dt RABBITMQ-ENV.CONF 5 @@ -84,4 +84,4 @@ file RabbitMQ configuration file location is changed to "/data/services/rabbitmq .\" ------------------------------------------------------------------ .Sh AUTHOR .\" ------------------------------------------------------------------ -.An The RabbitMQ Team Aq Mt rabbitmq-core@groups.vmware.com +.An The RabbitMQ Team Aq Mt contact-tanzu-data.pdl@broadcom.com diff --git a/deps/rabbit/docs/rabbitmq-plugins.8 b/deps/rabbit/docs/rabbitmq-plugins.8 index 0ea367a50fe5..794c3b2d6ba4 100644 --- a/deps/rabbit/docs/rabbitmq-plugins.8 +++ b/deps/rabbit/docs/rabbitmq-plugins.8 @@ -3,7 +3,7 @@ .\" License, v. 2.0. If a copy of the MPL was not distributed with this .\" file, You can obtain one at https://mozilla.org/MPL/2.0/. .\" -.\" Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +.\" Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. .\" .Dd June 22, 2023 .Dt RABBITMQ-PLUGINS 8 @@ -252,4 +252,4 @@ plugin and its dependencies and disables everything else: .\" ------------------------------------------------------------------ .Sh AUTHOR .\" ------------------------------------------------------------------ -.An The RabbitMQ Team Aq Mt rabbitmq-core@groups.vmware.com +.An The RabbitMQ Team Aq Mt contact-tanzu-data.pdl@broadcom.com diff --git a/deps/rabbit/docs/rabbitmq-queues.8 b/deps/rabbit/docs/rabbitmq-queues.8 index c857dfb63c59..caefb4740d49 100644 --- a/deps/rabbit/docs/rabbitmq-queues.8 +++ b/deps/rabbit/docs/rabbitmq-queues.8 @@ -3,7 +3,7 @@ .\" License, v. 2.0. If a copy of the MPL was not distributed with this .\" file, You can obtain one at https://mozilla.org/MPL/2.0/. .\" -.\" Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +.\" Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. .\" .Dd June 22, 2023 .Dt RABBITMQ-QUEUES 8 @@ -182,13 +182,14 @@ This command is currently only supported by quorum queues. Example: .Sp .Dl rabbitmq-queues peek --vhost Qo a-vhost Qc Qo a-queue Qc Qo 1 Qc -.It Cm check_if_node_is_mirror_sync_critical +.It Cm check_if_cluster_has_classic_queue_mirroring_policy .Pp -Health check that exits with a non-zero code if there are classic mirrored queues without online synchronised mirrors (queues that would potentially lose data if the target node is shut down). +Health check that exits with a non-zero code if there are policies in the cluster that enable classic queue mirroring. +Classic queue mirroring has been deprecated since 2021 and was completely removed in the RabbitMQ 4.0 development cycle. .Pp Example: .Sp -.Dl rabbitmq-queues check_if_node_is_mirror_sync_critical +.Dl rabbitmq-queues check_if_cluster_has_classic_queue_mirroring_policy .It Cm check_if_node_is_quorum_critical .Pp Health check that exits with a non-zero code if there are queues with minimum online quorum (queues that would lose their quorum if the target node is shut down). @@ -210,4 +211,4 @@ Example: .\" ------------------------------------------------------------------ .Sh AUTHOR .\" ------------------------------------------------------------------ -.An The RabbitMQ Team Aq Mt rabbitmq-core@groups.vmware.com +.An The RabbitMQ Team Aq Mt contact-tanzu-data.pdl@broadcom.com diff --git a/deps/rabbit/docs/rabbitmq-server.8 b/deps/rabbit/docs/rabbitmq-server.8 index e93f91d8b08c..32c536a73569 100644 --- a/deps/rabbit/docs/rabbitmq-server.8 +++ b/deps/rabbit/docs/rabbitmq-server.8 @@ -3,7 +3,7 @@ .\" License, v. 2.0. If a copy of the MPL was not distributed with this .\" file, You can obtain one at https://mozilla.org/MPL/2.0/. .\" -.\" Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +.\" Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. .\" .Dd June 22, 2023 .Dt RABBITMQ-SERVER 8 @@ -96,4 +96,4 @@ For example, runs RabbitMQ AMQP server in the background: .\" ------------------------------------------------------------------ .Sh AUTHOR .\" ------------------------------------------------------------------ -.An The RabbitMQ Team Aq Mt rabbitmq-core@groups.vmware.com +.An The RabbitMQ Team Aq Mt contact-tanzu-data.pdl@broadcom.com diff --git a/deps/rabbit/docs/rabbitmq-service.8 b/deps/rabbit/docs/rabbitmq-service.8 index 58a89ae90cd3..e405836fe5cc 100644 --- a/deps/rabbit/docs/rabbitmq-service.8 +++ b/deps/rabbit/docs/rabbitmq-service.8 @@ -3,7 +3,7 @@ .\" License, v. 2.0. If a copy of the MPL was not distributed with this .\" file, You can obtain one at https://mozilla.org/MPL/2.0/. .\" -.\" Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +.\" Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. .\" .Dd June 22, 2023 .Dt RABBITMQ-SERVICE.BAT 8 @@ -150,4 +150,4 @@ is to discard the server output. .\" ------------------------------------------------------------------ .Sh AUTHOR .\" ------------------------------------------------------------------ -.An The RabbitMQ Team Aq Mt rabbitmq-core@groups.vmware.com +.An The RabbitMQ Team Aq Mt contact-tanzu-data.pdl@broadcom.com diff --git a/deps/rabbit/docs/rabbitmq-streams.8 b/deps/rabbit/docs/rabbitmq-streams.8 index 9ed511ce6ba0..b139826aeed2 100644 --- a/deps/rabbit/docs/rabbitmq-streams.8 +++ b/deps/rabbit/docs/rabbitmq-streams.8 @@ -3,7 +3,7 @@ .\" License, v. 2.0. If a copy of the MPL was not distributed with this .\" file, You can obtain one at https://mozilla.org/MPL/2.0/. .\" -.\" Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +.\" Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. .\" .Dd June 22, 2023 .Dt RABBITMQ-STREAMS 8 @@ -119,6 +119,15 @@ Example: .Sp .Dl rabbitmq-streams stream_status --vhost Qo a-vhost Qc Qo a-stream Qc .\" ------------------------------------ +.It Cm restart_stream Ar stream Fl -vhost Ar virtual-host Fl --preferred-leader-node Ar node +.Pp +Restarts a stream including all of it's replicas. The optional preferred +node flag instructs the command to try to place the leader on a specific node during the restart. +.Pp +Example: +.Sp +.Dl rabbitmq-streams restart_stream --vhost Qo a-vhost Qc Qo a-stream Qc --preferred-leader-node Qo node +.\" ------------------------------------ .El .Ss Policies .Bl -tag -width Ds @@ -307,7 +316,7 @@ for each producer: .sp .Dl rabbitmq-streams list_stream_publishers connection_pid publisher_id stream .\" ------------------------------------------------------------------ -.It Cm add_super_stream Ar super-stream Oo Fl -vhost Ar vhost Oc Oo Fl -partitions Ar partitions Oc Oo Fl -routing-keys Ar routing-keys Oc Oo Fl -max-length-bytes Ar max-length-bytes Oc Oo Fl -max-age Ar max-age Oc Oo Fl -stream-max-segment-size-bytes Ar stream-max-segment-size-bytes Oc Oo Fl -leader-locator Ar leader-locator Oc Oo Fl -initial-cluster-size Ar initial-cluster-size Oc +.It Cm add_super_stream Ar super-stream Oo Fl -vhost Ar vhost Oc Oo Fl -partitions Ar partitions Oc Oo Fl -binding-keys Ar binding-keys Oc Oo Fl -max-length-bytes Ar max-length-bytes Oc Oo Fl -max-age Ar max-age Oc Oo Fl -stream-max-segment-size-bytes Ar stream-max-segment-size-bytes Oc Oo Fl -leader-locator Ar leader-locator Oc Oo Fl -initial-cluster-size Ar initial-cluster-size Oc .Bl -tag -width Ds .It Ar super-stream The name of the super stream to create. @@ -315,8 +324,8 @@ The name of the super stream to create. The name of the virtual host to create the super stream into. .It Ar partitions The number of partitions the super stream will have. -.It Ar routing-keys -Comma-separated list of routing keys. +.It Ar binding-keys +Comma-separated list of binding keys. .It Ar max-length-bytes The maximum size of partition streams, example values: 20gb, 500mb. .It Ar max-age @@ -438,4 +447,4 @@ for each consumer attached to the stream-1 stream and belonging to the stream-1 .\" ------------------------------------------------------------------ .Sh AUTHOR .\" ------------------------------------------------------------------ -.An The RabbitMQ Team Aq Mt rabbitmq-core@groups.vmware.com +.An The RabbitMQ Team Aq Mt contact-tanzu-data.pdl@broadcom.com diff --git a/deps/rabbit/docs/rabbitmq-upgrade.8 b/deps/rabbit/docs/rabbitmq-upgrade.8 index 0211620333e6..2a00f0f1f8e7 100644 --- a/deps/rabbit/docs/rabbitmq-upgrade.8 +++ b/deps/rabbit/docs/rabbitmq-upgrade.8 @@ -3,7 +3,7 @@ .\" License, v. 2.0. If a copy of the MPL was not distributed with this .\" file, You can obtain one at https://mozilla.org/MPL/2.0/. .\" -.\" Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +.\" Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. .\" .Dd June 22, 2023 .Dt RABBITMQ-UPGRADE 8 @@ -127,4 +127,4 @@ To learn more, see the .\" ------------------------------------------------------------------ .Sh AUTHOR .\" ------------------------------------------------------------------ -.An The RabbitMQ Team Aq Mt rabbitmq-core@groups.vmware.com +.An The RabbitMQ Team Aq Mt contact-tanzu-data.pdl@broadcom.com diff --git a/deps/rabbit/docs/rabbitmq.conf.example b/deps/rabbit/docs/rabbitmq.conf.example index ddbcafc5ea1f..2a3f3f590d4f 100644 --- a/deps/rabbit/docs/rabbitmq.conf.example +++ b/deps/rabbit/docs/rabbitmq.conf.example @@ -6,11 +6,11 @@ ## copying the entire (large!) file, create or generate a new rabbitmq.conf for the target system ## and populate it with the necessary settings. ## -## See https://rabbitmq.com/configure.html to learn about how to configure RabbitMQ, +## See https://www.rabbitmq.com/docs/configure to learn about how to configure RabbitMQ, ## the ini-style format used by rabbitmq.conf, how it is different from `advanced.config`, ## how to verify effective configuration, and so on. ## -## See https://rabbitmq.com/documentation.html for the rest of RabbitMQ documentation. +## See https://www.rabbitmq.com/docs/documentation for the rest of RabbitMQ documentation. ## ## In case you have questions, please use RabbitMQ community Slack and the rabbitmq-users Google group ## instead of GitHub issues. @@ -23,7 +23,7 @@ ## Networking ## ==================== ## -## Related doc guide: https://rabbitmq.com/networking.html. +## Related doc guide: https://www.rabbitmq.com/docs/networking. ## ## By default, RabbitMQ will listen on all interfaces, using ## the standard (reserved) AMQP 0-9-1 and 1.0 port. @@ -84,7 +84,7 @@ ## ============== ## -## Related doc guide: https://rabbitmq.com/access-control.html. +## Related doc guide: https://www.rabbitmq.com/docs/access-control. ## The default "guest" user is only permitted to access the server ## via a loopback interface (e.g. localhost). @@ -98,7 +98,7 @@ ## TLS configuration. ## -## Related doc guide: https://rabbitmq.com/ssl.html. +## Related doc guide: https://www.rabbitmq.com/docs/ssl. ## # listeners.ssl.1 = 5671 # @@ -192,8 +192,8 @@ ## ## Related doc guides: ## -## * https://rabbitmq.com/plugins.html -## * https://rabbitmq.com/access-control.html +## * https://www.rabbitmq.com/docs/plugins +## * https://www.rabbitmq.com/docs/access-control ## # auth_backends.1 = rabbit_auth_backend_internal @@ -209,8 +209,8 @@ ## ## Relevant doc guides: ## -## * https://rabbitmq.com/ldap.html -## * https://rabbitmq.com/access-control.html +## * https://www.rabbitmq.com/docs/ldap +## * https://www.rabbitmq.com/docs/access-control ## ## uses LDAP for both authentication and authorisation # auth_backends.1 = rabbit_auth_backend_ldap @@ -228,14 +228,15 @@ ## 'AMQPLAIN', and 'EXTERNAL' Additional mechanisms can be added via ## plugins. ## -## Related doc guide: https://rabbitmq.com/authentication.html. +## Related doc guide: https://www.rabbitmq.com/docs/authentication. ## # auth_mechanisms.1 = PLAIN # auth_mechanisms.2 = AMQPLAIN +# auth_mechanisms.3 = ANONYMOUS ## The rabbitmq-auth-mechanism-ssl plugin makes it possible to ## authenticate a user based on the client's x509 (TLS) certificate. -## Related doc guide: https://rabbitmq.com/authentication.html. +## Related doc guide: https://www.rabbitmq.com/docs/authentication. ## ## To use auth-mechanism-ssl, the EXTERNAL mechanism should ## be enabled: @@ -278,13 +279,6 @@ ## # load_definitions = /path/to/definitions/file.json - -## -## Cluster name -## ==================== -## -# cluster_name = dev3.eng.megacorp.local - ## Password hashing implementation. Will only affect newly ## created users. To recalculate hash for an existing user ## it's necessary to update her password. @@ -306,7 +300,7 @@ ## On first start RabbitMQ will create a vhost and a user. These ## config items control what gets created. -## Relevant doc guide: https://rabbitmq.com/access-control.html +## Relevant doc guide: https://www.rabbitmq.com/docs/access-control ## # default_vhost = / # default_user = guest @@ -319,7 +313,7 @@ ## Tags for default user ## ## For more details about tags, see the documentation for the -## Management Plugin at https://rabbitmq.com/management.html. +## Management Plugin at https://www.rabbitmq.com/docs/management. ## # default_user_tags.administrator = true @@ -342,8 +336,8 @@ ## ## Related doc guides: ## -## * https://rabbitmq.com/heartbeats.html -## * https://rabbitmq.com/networking.html +## * https://www.rabbitmq.com/docs/heartbeats +## * https://www.rabbitmq.com/docs/networking ## # heartbeat = 60 @@ -365,7 +359,7 @@ ## ## Related doc guides: ## -## * https://rabbitmq.com/networking.html +## * https://www.rabbitmq.com/docs/networking ## * https://www.erlang.org/doc/man/inet.html#setopts-2 ## @@ -384,7 +378,7 @@ ## Resource Limits & Flow Control ## ============================== ## -## Related doc guide: https://rabbitmq.com/memory.html. +## Related doc guide: https://www.rabbitmq.com/docs/memory. ## Memory-based Flow Control threshold. ## @@ -423,7 +417,7 @@ ## ## Another alternative is to configure queues to page all messages (both ## persistent and transient) to disk as quickly -## as possible, see https://rabbitmq.com/lazy-queues.html. +## as possible, see https://www.rabbitmq.com/docs/lazy-queues. ## # vm_memory_high_watermark_paging_ratio = 0.5 @@ -463,6 +457,32 @@ ## Clustering ## ===================== ## + +## By default cluster name is set to the name of the first +## node to have formed the cluster. It can be overridden +## to make it easier for (human) operators to tell one cluster from another. +# cluster_name = dev3.eng.megacorp.local + +## Selects the default strategy used to pick a node to place a new queue leader replica +## on. Can be overridden by the `x-queue-leader-locator` optional queue argument +## at declaration time. +## +## "balanced" (the default) is recommended for most environments. It works +## like so: +## +## 1. When there are many queues in the cluster, the candidate node is picked randomly +## 2. When there are few queues, uses their current replica count to pick the node +## with the smallest number of replicas. +## +## Supported non-deprecated values are: "balanced", "client-local" +# queue_leader_locator = balanced + + +## Partition handling strategy. +## Primarily affects deployments that use Mnesia and classic queues. +## Khepri and quorum queues, streams will always use Raft's failure recovery +## strategy. +## # cluster_partition_handling = ignore ## Pauses all nodes on the minority side of a partition. The cluster @@ -479,12 +499,6 @@ # cluster_partition_handling.pause_if_all_down.nodes.1 = rabbit@localhost # cluster_partition_handling.pause_if_all_down.nodes.2 = hare@localhost -## Mirror sync batch size, in messages. Increasing this will speed -## up syncing but total batch size in bytes must not exceed 2 GiB. -## Available in RabbitMQ 3.6.0 or later. -## -# mirroring_sync_batch_size = 4096 - ## Make clustering happen *automatically* at startup. Only applied ## to nodes that have just been reset or started for the first time. ## @@ -530,19 +544,21 @@ # # This value is no longer meant to be configured directly. # -# See https://www.rabbitmq.com/management.html#fine-stats. +# See https://www.rabbitmq.com/docs/management#fine-stats. ## -## Ra Settings +## Raft settings ## ===================== ## +## These set the defaults that quorum queues, streams, Khepri, and other Raft-based features use. +## # raft.segment_max_entries = 65536 # raft.wal_max_size_bytes = 1048576 # raft.wal_max_batch_size = 4096 # raft.snapshot_chunk_size = 1000000 ## -## Misc/Advanced Options +## Misc and Advanced Options ## ===================== ## ## NB: Change these only if you understand what you are doing! @@ -570,15 +586,17 @@ ## Timeout used when waiting for Mnesia tables in a cluster to ## become available. ## +## Related doc guide: https://www.rabbitmq.com/docs/clustering#restarting # mnesia_table_loading_retry_timeout = 30000 ## Retries when waiting for Mnesia tables in the cluster startup. Note that ## this setting is not applied to Mnesia upgrades or node deletions. ## +## Related doc guide: https://www.rabbitmq.com/docs/clustering#restarting # mnesia_table_loading_retry_limit = 10 ## Size in bytes below which to embed messages in the queue index. -## Related doc guide: https://rabbitmq.com/persistence-conf.html +## Related doc guide: https://www.rabbitmq.com/docs/persistence-conf ## # queue_index_embed_msgs_below = 4096 @@ -594,7 +612,7 @@ ## (see https://www.erlang-solutions.com/blog/erlang-garbage-collector.html). ## ## Before trying this option, please take a look at the memory -## breakdown (https://www.rabbitmq.com/memory-use.html). +## breakdown (https://www.rabbitmq.com/docs/memory-use). ## # background_gc_enabled = false @@ -636,7 +654,7 @@ ## ---------------------------------------------------------------------------- ## Advanced Erlang Networking/Clustering Options. ## -## Related doc guide: https://rabbitmq.com/clustering.html +## Related doc guide: https://www.rabbitmq.com/docs/clustering ## ---------------------------------------------------------------------------- # ====================================== @@ -644,7 +662,7 @@ # ====================================== ## Timeout used to detect peer unavailability, including CLI tools. -## Related doc guide: https://www.rabbitmq.com/nettick.html. +## Related doc guide: https://www.rabbitmq.com/docs/nettick. ## # net_ticktime = 60 @@ -657,7 +675,7 @@ ## ---------------------------------------------------------------------------- ## RabbitMQ Management Plugin ## -## Related doc guide: https://rabbitmq.com/management.html. +## Related doc guide: https://www.rabbitmq.com/docs/management. ## ---------------------------------------------------------------------------- # ======================================= @@ -665,7 +683,7 @@ # ======================================= ## Preload schema definitions from the following JSON file. -## Related doc guide: https://rabbitmq.com/management.html#load-definitions. +## Related doc guide: https://www.rabbitmq.com/docs/management#load-definitions. ## # management.load_definitions = /path/to/exported/definitions.json @@ -674,7 +692,7 @@ # management.http_log_dir = /path/to/access.log ## HTTP listener and embedded Web server settings. -# ## See https://rabbitmq.com/management.html for details. +# ## See https://www.rabbitmq.com/docs/management for details. # # management.tcp.port = 15672 # management.tcp.ip = 0.0.0.0 @@ -687,7 +705,7 @@ # management.tcp.compress = true ## HTTPS listener settings. -## See https://rabbitmq.com/management.html and https://rabbitmq.com/ssl.html for details. +## See https://www.rabbitmq.com/docs/management and https://www.rabbitmq.com/docs/ssl for details. ## # management.ssl.port = 15671 # management.ssl.cacertfile = /path/to/ca_certificate.pem @@ -721,12 +739,12 @@ # management.path_prefix = /a-prefix ## One of 'basic', 'detailed' or 'none'. See -## https://rabbitmq.com/management.html#fine-stats for more details. +## https://www.rabbitmq.com/docs/management#fine-stats for more details. # management.rates_mode = basic ## Configure how long aggregated data (such as message rates and queue ## lengths) is retained. Please read the plugin's documentation in -## https://rabbitmq.com/management.html#configuration for more +## https://www.rabbitmq.com/docs/management#configuration for more ## details. ## Your can use 'minute', 'hour' and 'day' keys or integer key (in seconds) # management.sample_retention_policies.global.minute = 5 @@ -741,7 +759,7 @@ ## ---------------------------------------------------------------------------- ## RabbitMQ Shovel Plugin ## -## Related doc guide: https://rabbitmq.com/shovel.html +## Related doc guide: https://www.rabbitmq.com/docs/shovel ## ---------------------------------------------------------------------------- ## See advanced.config.example for a Shovel plugin example @@ -750,14 +768,14 @@ ## ---------------------------------------------------------------------------- ## RabbitMQ STOMP Plugin ## -## Related doc guide: https://rabbitmq.com/stomp.html +## Related doc guide: https://www.rabbitmq.com/docs/stomp ## ---------------------------------------------------------------------------- # ======================================= # STOMP section # ======================================= -## See https://rabbitmq.com/stomp.html for details. +## See https://www.rabbitmq.com/docs/stomp for details. ## TCP listeners. ## @@ -781,7 +799,7 @@ # stomp.proxy_protocol = false ## TLS listeners -## See https://rabbitmq.com/stomp.html and https://rabbitmq.com/ssl.html for details. +## See https://www.rabbitmq.com/docs/stomp and https://www.rabbitmq.com/docs/ssl for details. # stomp.listeners.ssl.default = 61614 # # ssl_options.cacertfile = path/to/cacert.pem @@ -860,7 +878,7 @@ # mqtt.tcp_listen_options.send_timeout = 120000 ## TLS listener settings -## ## See https://rabbitmq.com/mqtt.html and https://rabbitmq.com/ssl.html for details. +## ## See https://www.rabbitmq.com/docs/mqtt and https://www.rabbitmq.com/docs/ssl for details. # # mqtt.listeners.ssl.default = 8883 # @@ -888,14 +906,8 @@ ## # mqtt.proxy_protocol = false -## Set the default user name and password used for anonymous connections (when client -## provides no credentials). Anonymous connections are highly discouraged! -## -# mqtt.default_user = guest -# mqtt.default_pass = guest - ## Enable anonymous connections. If this is set to false, clients MUST provide -## credentials in order to connect. See also the mqtt.default_user/mqtt.default_pass +## credentials in order to connect. See also the anonymous_login_user/anonymous_login_pass ## keys. Anonymous connections are highly discouraged! ## # mqtt.allow_anonymous = true @@ -954,13 +966,9 @@ ## # amqp1_0.default_user = guest -## Enable protocol strict mode. See the README for more information. -## -# amqp1_0.protocol_strict_mode = false - ## Logging settings. ## -## See https://rabbitmq.com/logging.html for details. +## See https://www.rabbitmq.com/docs/logging for details. ## ## Log directory, taken from the RABBITMQ_LOG_BASE env variable by default. @@ -1000,11 +1008,40 @@ # log.exchange.level = info +## File size-based log rotation + +## Note that `log.file.rotation.size` cannot be combined with `log.file.rotation.date`, +## the two options are mutually exclusive. + +## rotate when the file reaches 10 MiB +# log.file.rotation.size = 10485760 + +## keep up to 5 archived log files in addition to the current one +# log.file.rotation.count = 5 + +## compress the archived logs +# log.file.rotation.compress = true + + +## Date-based log rotation + +## Note that `log.file.rotation.date` cannot be combined with `log.file.rotation.size`, +## the two options are mutually exclusive. + +## rotate every night at midnight +# log.file.rotation.date = $D0 + +## keep up to 5 archived log files in addition to the current one +# log.file.rotation.count = 5 + +## compress the archived logs +# log.file.rotation.compress = true + ## ---------------------------------------------------------------------------- ## RabbitMQ LDAP Plugin ## -## Related doc guide: https://rabbitmq.com/ldap.html. +## Related doc guide: https://www.rabbitmq.com/docs/ldap. ## ## ---------------------------------------------------------------------------- @@ -1037,7 +1074,7 @@ ## # auth_ldap.timeout = infinity -## Or number +## Or a number # auth_ldap.timeout = 500 ## Enable logging of LDAP queries. @@ -1054,6 +1091,39 @@ # auth_ldap.log = true # auth_ldap.log = network +## Client TLS settings for LDAP connections +## + +## enables TLS for connections to the LDAP server +# auth_ldap.use_ssl = true + +## local filesystem path to a CA certificate bundle file +# auth_ldap.ssl_options.cacertfile = /path/to/ca_certificate.pem + +## local filesystem path to a client certificate file +# auth_ldap.ssl_options.certfile = /path/to/client_certfile.pem + +## local filesystem path to a client private key file +# auth_ldap.ssl_options.keyfile = /path/to/client_key.pem + +## Sets Server Name Indication for LDAP connections. +## If an LDAP server host is available via multiple domain names, set this value +## to the preferred domain name target LDAP server +# auth_ldap.ssl_options.sni = ldap.identity.eng.megacorp.local + +## take wildcards into account when performing hostname verification +# auth_ldap.ssl_options.hostname_verification = wildcard + +## enables peer certificate chain verification +# auth_ldap.ssl_options.verify = verify_peer + +## disables peer certificate chain verification +# auth_ldap.ssl_options.verify = verify_none + +## if target LDAP server does not present a certificate, should the connection be aborted? +# auth_ldap.ssl_options.fail_if_no_peer_cert = true + + ## ## Authentication ## ============== @@ -1105,7 +1175,7 @@ ## The LDAP plugin can perform a variety of queries against your ## LDAP server to determine questions of authorisation. ## -## Related doc guide: https://rabbitmq.com/ldap.html#authorisation. +## Related doc guide: https://www.rabbitmq.com/docs/ldap#authorisation. ## Following configuration should be defined in advanced.config file ## DO NOT UNCOMMENT THESE LINES! diff --git a/deps/rabbit/docs/rabbitmqctl.8 b/deps/rabbit/docs/rabbitmqctl.8 index be18d312a960..d35a1541885d 100644 --- a/deps/rabbit/docs/rabbitmqctl.8 +++ b/deps/rabbit/docs/rabbitmqctl.8 @@ -3,7 +3,7 @@ .\" License, v. 2.0. If a copy of the MPL was not distributed with this .\" file, You can obtain one at https://mozilla.org/MPL/2.0/. .\" -.\" Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +.\" Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. .\" .Dd June 22, 2023 .Dt RABBITMQCTL 8 @@ -463,156 +463,6 @@ is part of, as a ram node: To learn more, see the .Lk https://www.rabbitmq.com/clustering.html "RabbitMQ Clustering guide". .\" ------------------------------------------------------------------ -.It Cm rename_cluster_node Ar oldnode1 Ar newnode1 Op Ar oldnode2 Ar newnode2 ... -.Pp -Supports renaming of cluster nodes in the local database. -.Pp -This subcommand causes -.Nm -to temporarily become the node in order to make the change. -The local cluster node must therefore be completely stopped; other nodes -can be online or offline. -.Pp -This subcommand takes an even number of arguments, in pairs representing -the old and new names for nodes. -You must specify the old and new names for this node and for any other -nodes that are stopped and being renamed at the same time. -.Pp -It is possible to stop all nodes and rename them all simultaneously (in -which case old and new names for all nodes must be given to every node) -or stop and rename nodes one at a time (in which case each node only -needs to be told how its own name is changing). -.Pp -For example, this command will rename the node -.Qq rabbit@misshelpful -to the node -.Qq rabbit@cordelia -.sp -.Dl rabbitmqctl rename_cluster_node rabbit@misshelpful rabbit@cordelia -.Pp -Note that this command only changes the local database. -It may also be necessary to rename the local database directories -and configure the new node name. -For example: -.sp -.Bl -enum -compact -.It -Stop the node: -.sp -.Dl rabbitmqctl stop rabbit@misshelpful -.sp -.It -Rename the node in the local database: -.sp -.Dl rabbitmqctl rename_cluster_node rabbit@misshelpful rabbit@cordelia -.sp -.It -Rename the local database directories (note, you do not need to do this -if you have set the RABBITMQ_MNESIA_DIR environment variable): -.sp -.Bd -literal -offset indent -compact -mv \\ - /var/lib/rabbitmq/mnesia/rabbit\\@misshelpful \\ - /var/lib/rabbitmq/mnesia/rabbit\\@cordelia -mv \\ - /var/lib/rabbitmq/mnesia/rabbit\\@misshelpful-rename \\ - /var/lib/rabbitmq/mnesia/rabbit\\@cordelia-rename -mv \\ - /var/lib/rabbitmq/mnesia/rabbit\\@misshelpful-plugins-expand \\ - /var/lib/rabbitmq/mnesia/rabbit\\@cordelia-plugins-expand -.Ed -.sp -.It -If node name is configured e.g. using -.Ar /etc/rabbitmq/rabbitmq-env.conf -, it also needs to be updated there. -.sp -.It -Start the node when ready -.El -.\" ------------------------------------------------------------------ -.It Cm update_cluster_nodes Ar clusternode -.Bl -tag -width Ds -.It Ar clusternode -The node to consult for up-to-date information. -.El -.Pp -Instructs an already clustered node to contact -.Ar clusternode -to cluster when booting up. -This is different from -.Cm join_cluster -since it does not join any cluster - it checks that the node is already -in a cluster with -.Ar clusternode . -.Pp -The need for this command is motivated by the fact that clusters can -change while a node is offline. -Consider a situation where node -.Va rabbit@A -and -.Va rabbit@B -are clustered. -.Va rabbit@A -goes down, -.Va rabbit@C -clusters with -.Va rabbit@B , -and then -.Va rabbit@B -leaves the cluster. -When -.Va rabbit@A -starts back up, it'll try to contact -.Va rabbit@B , -but this will fail since -.Va rabbit@B -is not in the cluster anymore. -The following command will rename node -.Va rabbit@B -to -.Va rabbit@C -on node -.Va rabbitA -.sp -.Dl update_cluster_nodes -n Va rabbit@A Va rabbit@B Va rabbit@C -.Pp -To learn more, see the -.Lk https://www.rabbitmq.com/clustering.html "RabbitMQ Clustering guide" -.El -.\" ------------------------------------------------------------------ -.\" ## Classic Mirrored Queues -.\" ------------------------------------------------------------------ -.Ss Replication -.Bl -tag -width Ds -.\" ------------------------------------------------------------------ -.It Cm sync_queue Oo Fl p Ar vhost Oc Ar queue -.Bl -tag -width Ds -.It Ar queue -The name of the queue to synchronise. -.El -.Pp -Instructs a mirrored queue with unsynchronised mirrors (follower replicas) -to synchronise them. -The queue will block while synchronisation takes place (all publishers -and consumers using the queue will block or temporarily see no activity). -This command can only be used with mirrored queues. -To learn more, see the -.Lk https://www.rabbitmq.com/ha.html "RabbitMQ Mirroring guide" -.Pp -Note that queues with unsynchronised replicas and active consumers -will become synchronised eventually (assuming that consumers make progress). -This command is primarily useful for queues that do not have active consumers. -.\" ------------------------------------------------------------------ -.It Cm cancel_sync_queue Oo Fl p Ar vhost Oc Ar queue -.Bl -tag -width Ds -.It Ar queue -The name of the queue to cancel synchronisation for. -.El -.Pp -Instructs a synchronising mirrored queue to stop synchronising itself. -.El -.\" ------------------------------------------------------------------ .\" ## User management .\" ------------------------------------------------------------------ .Ss User Management @@ -2607,4 +2457,4 @@ Reset the stats database for all nodes in the cluster. .\" ------------------------------------------------------------------------------------------------ .Sh AUTHOR .\" ------------------------------------------------------------------------------------------------ -.An The RabbitMQ Team Aq Mt rabbitmq-core@groups.vmware.com +.An The RabbitMQ Team Aq Mt contact-tanzu-data.pdl@broadcom.com diff --git a/deps/rabbit/docs/set_rabbitmq_policy.sh.example b/deps/rabbit/docs/set_rabbitmq_policy.sh.example index f46e901ad56b..da6637f606bf 100644 --- a/deps/rabbit/docs/set_rabbitmq_policy.sh.example +++ b/deps/rabbit/docs/set_rabbitmq_policy.sh.example @@ -1,4 +1,3 @@ # This script is called by rabbitmq-server-ha.ocf during RabbitMQ # cluster start up. It is a convenient place to set your cluster -# policy here, for example: -# ${OCF_RESKEY_ctl} set_policy ha-all "." '{"ha-mode":"all", "ha-sync-mode":"automatic"}' --apply-to all --priority 0 +# policy here. See https://www.rabbitmq.com/docs/parameters for examples diff --git a/deps/rabbit/include/amqqueue.hrl b/deps/rabbit/include/amqqueue.hrl index 4c5882d1966e..011da21cade0 100644 --- a/deps/rabbit/include/amqqueue.hrl +++ b/deps/rabbit/include/amqqueue.hrl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2018-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -include("amqqueue_v2.hrl"). @@ -41,6 +41,9 @@ -define(amqqueue_is_stream(Q), ?amqqueue_type_is(Q, rabbit_stream_queue)). +-define(amqqueue_is_mqtt_qos0(Q), + ?amqqueue_type_is(Q, rabbit_mqtt_qos0_queue)). + -define(amqqueue_type_is(Q, Type), (?is_amqqueue_v2(Q) andalso ?amqqueue_v2_field_type(Q) =:= Type)). diff --git a/deps/rabbit/include/gm_specs.hrl b/deps/rabbit/include/gm_specs.hrl deleted file mode 100644 index 92d885e47dfc..000000000000 --- a/deps/rabbit/include/gm_specs.hrl +++ /dev/null @@ -1,10 +0,0 @@ -%% This Source Code Form is subject to the terms of the Mozilla Public -%% License, v. 2.0. If a copy of the MPL was not distributed with this -%% file, You can obtain one at https://mozilla.org/MPL/2.0/. -%% -%% Copyright (c) 2020-2023 VMware, Inc. or its affiliates. All rights reserved. -%% - --type callback_result() :: 'ok' | {'stop', any()} | {'become', atom(), args()}. --type args() :: any(). --type members() :: [pid()]. diff --git a/deps/rabbit/include/internal_user.hrl b/deps/rabbit/include/internal_user.hrl new file mode 100644 index 000000000000..aa9c5db81fb7 --- /dev/null +++ b/deps/rabbit/include/internal_user.hrl @@ -0,0 +1,4 @@ +-define(is_internal_user(U), + (?is_internal_user_v2(U))). + +-define(is_internal_user_v2(U), is_record(U, internal_user, 6)). diff --git a/deps/rabbit/include/mc.hrl b/deps/rabbit/include/mc.hrl index 8e82a550f4be..abe38fb7c617 100644 --- a/deps/rabbit/include/mc.hrl +++ b/deps/rabbit/include/mc.hrl @@ -1,23 +1,36 @@ --type death_key() :: {Queue :: rabbit_misc:resource_name(), rabbit_dead_letter:reason()}. --type death_anns() :: #{first_time := non_neg_integer(), %% the timestamp of the first - last_time := non_neg_integer(), %% the timestamp of the last - ttl => non_neg_integer()}. --record(death, { - exchange :: rabbit_misc:resource_name(), - routing_keys = [] :: [rabbit_types:routing_key()], - count = 0 :: non_neg_integer(), - anns :: death_anns() - }). +%% good enough for most use cases +-define(IS_MC(Msg), element(1, Msg) == mc andalso tuple_size(Msg) == 5). --record(deaths, {first :: death_key(), - last :: death_key(), - records = #{} :: #{death_key() := #death{}}}). +%% "Short strings can carry up to 255 octets of UTF-8 data, but +%% may not contain binary zero octets." [AMQP 0.9.1 $4.2.5.3] +-define(IS_SHORTSTR_LEN(B), byte_size(B) < 256). +%% We keep the following atom annotation keys short as they are stored per message on disk. +-define(ANN_EXCHANGE, x). +-define(ANN_ROUTING_KEYS, rk). +-define(ANN_TIMESTAMP, ts). +-define(ANN_RECEIVED_AT_TIMESTAMP, rts). +-define(ANN_DURABLE, d). +-define(ANN_PRIORITY, p). -%% good enough for most use cases --define(IS_MC(Msg), element(1, Msg) == mc andalso tuple_size(Msg) == 5). +-define(FF_MC_DEATHS_V2, message_containers_deaths_v2). + +-type death_key() :: {SourceQueue :: rabbit_misc:resource_name(), rabbit_dead_letter:reason()}. +-type death_anns() :: #{%% timestamp of the first time this message + %% was dead lettered from this queue for this reason + first_time := pos_integer(), + %% timestamp of the last time this message + %% was dead lettered from this queue for this reason + last_time := pos_integer(), + ttl => OriginalTtlHeader :: non_neg_integer()}. + +-record(death, {exchange :: OriginalExchange :: rabbit_misc:resource_name(), + routing_keys :: OriginalRoutingKeys :: [rabbit_types:routing_key(),...], + %% how many times this message was dead lettered from this queue for this reason + count :: pos_integer(), + anns :: death_anns()}). -%% "Field names MUST start with a letter, '$' or '#' and may continue with letters, '$' or '#', digits, or -%% underlines, to a maximum length of 128 characters." [AMQP 0.9.1 4.2.5.5 Field Tables] -%% Given that the valid chars are ASCII chars, 1 char is encoded as 1 byte. --define(AMQP_LEGACY_FIELD_NAME_MAX_LEN, 128). +-record(deaths, {first :: death_key(), % redundant to mc annotations x-first-death-* + last :: death_key(), % redundant to mc annotations x-last-death-* + records :: #{death_key() := #death{}} + }). diff --git a/deps/rabbit/include/rabbit_amqp.hrl b/deps/rabbit/include/rabbit_amqp.hrl new file mode 100644 index 000000000000..84e98d5d565d --- /dev/null +++ b/deps/rabbit/include/rabbit_amqp.hrl @@ -0,0 +1,61 @@ +%% To enable AMQP trace logging, uncomment below line: +%-define(TRACE_AMQP, true). +-ifdef(TRACE_AMQP). +-warning("AMQP tracing is enabled"). +-define(TRACE(Format, Args), + rabbit_log:debug( + "~s:~s/~b ~b~n" ++ Format ++ "~n", + [?MODULE, ?FUNCTION_NAME, ?FUNCTION_ARITY, ?LINE] ++ Args)). +-else. +-define(TRACE(_Format, _Args), ok). +-endif. + +%% General consts + +%% [2.8.19] +-define(MIN_MAX_FRAME_1_0_SIZE, 512). + +%% for rabbit_event user_authentication_success and user_authentication_failure +-define(AUTH_EVENT_KEYS, + [name, + host, + port, + peer_host, + peer_port, + protocol, + auth_mechanism, + ssl, + ssl_protocol, + ssl_key_exchange, + ssl_cipher, + ssl_hash, + peer_cert_issuer, + peer_cert_subject, + peer_cert_validity]). + +-define(ITEMS, + [pid, + frame_max, + timeout, + vhost, + user, + node + ] ++ ?AUTH_EVENT_KEYS). + +-define(INFO_ITEMS, + [connection_state, + recv_oct, + recv_cnt, + send_oct, + send_cnt + ] ++ ?ITEMS). + +%% for rabbit_event connection_created +-define(CONNECTION_EVENT_KEYS, + [type, + client_properties, + connected_at, + channel_max + ] ++ ?ITEMS). + +-include_lib("amqp10_common/include/amqp10_framing.hrl"). diff --git a/deps/rabbit/include/rabbit_global_counters.hrl b/deps/rabbit/include/rabbit_global_counters.hrl index ae19ce4f0909..023ac44be2c9 100644 --- a/deps/rabbit/include/rabbit_global_counters.hrl +++ b/deps/rabbit/include/rabbit_global_counters.hrl @@ -1,5 +1,4 @@ -define(NUM_PROTOCOL_COUNTERS, 8). --define(NUM_PROTOCOL_QUEUE_TYPE_COUNTERS, 8). %% Dead Letter counters: %% diff --git a/deps/rabbit/priv/schema/rabbit.schema b/deps/rabbit/priv/schema/rabbit.schema index 33e1373ab926..f4ea2f18e4a8 100644 --- a/deps/rabbit/priv/schema/rabbit.schema +++ b/deps/rabbit/priv/schema/rabbit.schema @@ -229,7 +229,12 @@ end}. [{datatype, {enum, [true, false]}}]}. {mapping, "definitions.tls.password", "rabbit.definitions.ssl_options.password", - [{datatype, string}]}. + [{datatype, [tagged_binary, binary]}]}. + +{translation, "rabbit.definitions.ssl_options.password", +fun(Conf) -> + rabbit_cuttlefish:optionally_tagged_string("definitions.tls.password", Conf) +end}. {mapping, "definitions.tls.secure_renegotiate", "rabbit.definitions.ssl_options.secure_renegotiate", [{datatype, {enum, [true, false]}}]}. @@ -395,7 +400,12 @@ end}. [{datatype, {enum, [true, false]}}]}. {mapping, "ssl_options.password", "rabbit.ssl_options.password", - [{datatype, string}]}. + [{datatype, [tagged_binary, binary]}]}. + +{translation, "rabbit.ssl_options.password", +fun(Conf) -> + rabbit_cuttlefish:optionally_tagged_binary("ssl_options.password", Conf) +end}. {mapping, "ssl_options.psk_identity", "rabbit.ssl_options.psk_identity", [{datatype, string}]}. @@ -427,25 +437,29 @@ end}. {mapping, "ssl_options.bypass_pem_cache", "ssl.bypass_pem_cache", [{datatype, {enum, [true, false]}}]}. + +{mapping, "metadata_store.khepri.default_timeout", "rabbit.khepri_default_timeout", + [{datatype, integer}]}. + %% =========================================================================== %% Choose the available SASL mechanism(s) to expose. -%% The two default (built in) mechanisms are 'PLAIN' and -%% 'AMQPLAIN'. Additional mechanisms can be added via -%% plugins. +%% The three default (built in) mechanisms are 'PLAIN', 'AMQPLAIN' and 'ANONYMOUS'. +%% Additional mechanisms can be added via plugins. %% -%% See https://www.rabbitmq.com/authentication.html for more details. +%% See https://www.rabbitmq.com/docs/access-control for more details. %% -%% {auth_mechanisms, ['PLAIN', 'AMQPLAIN']}, +%% {auth_mechanisms, ['PLAIN', 'AMQPLAIN', 'ANONYMOUS']}, {mapping, "auth_mechanisms.$name", "rabbit.auth_mechanisms", [ {datatype, atom}]}. {translation, "rabbit.auth_mechanisms", -fun(Conf) -> - Settings = cuttlefish_variable:filter_by_prefix("auth_mechanisms", Conf), - [ V || {_, V} <- Settings ] -end}. + fun(Conf) -> + Settings = cuttlefish_variable:filter_by_prefix("auth_mechanisms", Conf), + Sorted = lists:keysort(1, Settings), + [V || {_, V} <- Sorted] + end}. %% Select an authentication backend to use. RabbitMQ provides an @@ -652,12 +666,12 @@ fun(Conf) -> end}. {mapping, "default_pass", "rabbit.default_pass", [ - {datatype, string} + {datatype, [tagged_binary, binary]} ]}. {translation, "rabbit.default_pass", fun(Conf) -> - list_to_binary(cuttlefish:conf_get("default_pass", Conf)) + rabbit_cuttlefish:optionally_tagged_binary("default_pass", Conf) end}. {mapping, "default_permissions.configure", "rabbit.default_permissions", [ @@ -692,7 +706,7 @@ end}. ]}. {mapping, "default_users.$name.password", "rabbit.default_users", [ - {datatype, string} + {datatype, [tagged_binary, binary]} ]}. {mapping, "default_users.$name.configure", "rabbit.default_users", [ @@ -721,6 +735,22 @@ end}. end end}. +%% Connections that skip SASL layer or use SASL mechanism ANONYMOUS will use this identity. +%% Setting this to a username will allow (anonymous) clients to connect and act as this +%% given user. For production environments, set this value to 'none'. +{mapping, "anonymous_login_user", "rabbit.anonymous_login_user", + [{datatype, [{enum, [none]}, binary]}]}. + +{mapping, "anonymous_login_pass", "rabbit.anonymous_login_pass", [ + {datatype, [tagged_binary, binary]} +]}. + +{translation, "rabbit.anonymous_login_pass", +fun(Conf) -> + rabbit_cuttlefish:optionally_tagged_binary("anonymous_login_pass", Conf) +end}. + + %% %% Default Policies %% ==================== @@ -782,6 +812,12 @@ end}. {datatype, string} ]}. +{mapping, "default_policies.operator.$id.classic_queues.queue_version", "rabbit.default_policies.operator", + [ + {validators, ["non_zero_positive_integer"]}, + {datatype, integer} + ]}. + {translation, "rabbit.default_policies.operator", fun(Conf) -> Props = rabbit_cuttlefish:aggregate_props( Conf, @@ -795,6 +831,8 @@ end}. _ -> V end, {["default_policies","operator",ID|T], NewV}; + ({["default_policies","operator",ID, "queue_pattern"], V}) -> + {["default_policies","operator",ID,"queue_pattern"], list_to_binary(V)}; (E) -> E end), case Props of @@ -877,6 +915,39 @@ end}. {mapping, "channel_max", "rabbit.channel_max", [{datatype, integer}]}. +{mapping, "channel_max_per_node", "rabbit.channel_max_per_node", + [{datatype, [{atom, infinity}, integer]}]}. + +{translation, "rabbit.channel_max_per_node", + fun(Conf) -> + case cuttlefish:conf_get("channel_max_per_node", Conf, undefined) of + undefined -> cuttlefish:unset(); + infinity -> infinity; + Val when is_integer(Val) andalso Val > 0 -> Val; + _ -> cuttlefish:invalid("should be positive integer or 'infinity'") + end + end +}. + +%% Set the max allowed number of consumers per channel. +%% `infinity` means "no limit". +%% +%% {consumer_max_per_channel, infinity}, + +{mapping, "consumer_max_per_channel", "rabbit.consumer_max_per_channel", + [{datatype, [{atom, infinity}, integer]}]}. + +{translation, "rabbit.consumer_max_per_channel", + fun(Conf) -> + case cuttlefish:conf_get("consumer_max_per_channel", Conf, undefined) of + undefined -> cuttlefish:unset(); + infinity -> infinity; + Val when is_integer(Val) andalso Val > 0 -> Val; + _ -> cuttlefish:invalid("should be positive integer or 'infinity'") + end + end +}. + %% Set the max permissible number of client connections per node. %% `infinity` means "no limit". %% @@ -1052,10 +1123,13 @@ end}. %% GB: gigabytes (10^9 - 1,000,000,000 bytes) {mapping, "vm_memory_high_watermark.relative", "rabbit.vm_memory_high_watermark", [ - {datatype, float}]}. + {datatype, float} +]}. {mapping, "vm_memory_high_watermark.absolute", "rabbit.vm_memory_high_watermark", [ - {datatype, [integer, string]}]}. + {datatype, [integer, string]}, + {validators, ["is_supported_information_unit"]} +]}. {translation, "rabbit.vm_memory_high_watermark", @@ -1114,7 +1188,6 @@ end}. %% {disk_free_limit, 50000000}, %% %% Or you can set it using memory units (same as in vm_memory_high_watermark) -%% with RabbitMQ 3.6.0+. %% {disk_free_limit, "50MB"}, %% {disk_free_limit, "50000kB"}, %% {disk_free_limit, "2GB"}, @@ -1128,7 +1201,9 @@ end}. {datatype, float}]}. {mapping, "disk_free_limit.absolute", "rabbit.disk_free_limit", [ - {datatype, [integer, string]}]}. + {datatype, [integer, string]}, + {validators, ["is_supported_information_unit"]} +]}. {translation, "rabbit.disk_free_limit", @@ -1277,28 +1352,6 @@ fun(Conf) -> end end}. -%% Cluster formation: Randomized startup delay -%% -%% DEPRECATED: This is a no-op. Old configs are still allowed, but a warning will be printed. - -{mapping, "cluster_formation.randomized_startup_delay_range.min", "rabbit.cluster_formation.randomized_startup_delay_range", []}. -{mapping, "cluster_formation.randomized_startup_delay_range.max", "rabbit.cluster_formation.randomized_startup_delay_range", []}. - -{translation, "rabbit.cluster_formation.randomized_startup_delay_range", -fun(Conf) -> - Min = cuttlefish:conf_get("cluster_formation.randomized_startup_delay_range.min", Conf, undefined), - Max = cuttlefish:conf_get("cluster_formation.randomized_startup_delay_range.max", Conf, undefined), - - case {Min, Max} of - {undefined, undefined} -> - ok; - _ -> - cuttlefish:warn("cluster_formation.randomized_startup_delay_range.min and " - "cluster_formation.randomized_startup_delay_range.max are deprecated") - end, - cuttlefish:unset() -end}. - %% Cluster formation: lock acquisition retries as passed to https://erlang.org/doc/man/global.html#set_lock-3 %% %% Currently used in classic, k8s, and aws peer discovery backends. @@ -1388,6 +1441,21 @@ fun(Conf) -> end}. +{mapping, "cluster_queue_limit", "rabbit.cluster_queue_limit", + [{datatype, [{atom, infinity}, integer]}]}. + +{translation, "rabbit.cluster_queue_limit", + fun(Conf) -> + case cuttlefish:conf_get("cluster_queue_limit", Conf, undefined) of + undefined -> cuttlefish:unset(); + infinity -> infinity; + Val when is_integer(Val) andalso Val > 0 -> Val; + _ -> cuttlefish:invalid("should be positive integer or 'infinity'") + end + end +}. + + %% Interval (in milliseconds) at which we send keepalive messages %% to other cluster members. Note that this is not the same thing %% as net_ticktime; missed keepalive messages will not cause nodes @@ -1400,6 +1468,10 @@ end}. %% Queue master locator (classic queues) %% +%% For backwards compatibility only as of 4.0. +%% We still allow values of min-masters, random and client-local +%% but the behaviour is only local or balanced. +%% Use queue_leader_locator instead. {mapping, "queue_master_locator", "rabbit.queue_master_locator", [{datatype, string}]}. @@ -2353,7 +2425,7 @@ end}. {mapping, "raft.segment_max_entries", "ra.segment_max_entries", [ {datatype, integer}, - {validators, ["non_zero_positive_integer"]} + {validators, ["non_zero_positive_integer", "non_zero_positive_16_bit_integer"]} ]}. {translation, "ra.segment_max_entries", @@ -2462,6 +2534,22 @@ end}. end }. +{mapping, "default_queue_type", "rabbit.default_queue_type", [ + {datatype, atom} +]}. + +{translation, "rabbit.default_queue_type", +fun(Conf) -> + case cuttlefish:conf_get("default_queue_type", Conf, rabbit_classic_queue) of + classic -> rabbit_classic_queue; + quorum -> rabbit_quorum_queue; + stream -> rabbit_stream_queue; + Module -> Module + end +end}. + + + %% %% Backing queue version %% @@ -2474,7 +2562,7 @@ end}. {translation, "rabbit.classic_queue_default_version", fun(Conf) -> case cuttlefish:conf_get("classic_queue.default_version", Conf, 2) of - 1 -> 1; + 1 -> cuttlefish:invalid("Classic queues v1 are no longer supported"); 2 -> 2; _ -> cuttlefish:unset() end @@ -2555,6 +2643,32 @@ end}. end }. +{mapping, "stream.replication.port_range.min", "osiris.port_range", [ + {datatype, [integer]}, + {validators, ["non_zero_positive_integer"]} +]}. +{mapping, "stream.replication.port_range.max", "osiris.port_range", [ + {datatype, [integer]}, + {validators, ["non_zero_positive_integer"]} +]}. + +{translation, "osiris.port_range", +fun(Conf) -> + Min = cuttlefish:conf_get("stream.replication.port_range.min", Conf, undefined), + Max = cuttlefish:conf_get("stream.replication.port_range.max", Conf, undefined), + + case {Min, Max} of + {undefined, undefined} -> + cuttlefish:unset(); + {Mn, undefined} -> + {Mn, Mn + 500}; + {undefined, Mx} -> + {Mx - 500, Mx}; + _ -> + {Min, Max} + end +end}. + % =============================== % Validators % =============================== @@ -2617,8 +2731,37 @@ fun(Int) when is_integer(Int) -> Int >= 1 end}. +{validator, "non_zero_positive_16_bit_integer", "number should be between 1 and 65535", +fun(Int) when is_integer(Int) -> + (Int >= 1) and (Int =< 65535) +end}. + {validator, "valid_regex", "string must be a valid regular expression", fun("") -> false; (String) -> {Res, _ } = re:compile(String), Res =:= ok end}. + +{validator, "is_supported_information_unit", "supported formats: 500MB, 500MiB, 10GB, 10GiB, 2TB, 2TiB, 10000000000", + fun(S0) -> + case is_integer(S0) of + true -> true; + false -> + %% this is a string + S = string:strip(S0, right), + %% The suffix is optional + {ok, HasIUSuffix} = re:compile("([0-9]+)([a-zA-Z]){1,3}$", [dollar_endonly, caseless]), + %% Here are the prefixes we accept. This must match + %% what rabbit_resource_monitor_misc and 'rabbitmq-diagnostics status' can format. + {ok, SuffixExtractor} = re:compile("(k|ki|kb|kib|m|mi|mb|mib|g|gi|gb|gib|t|ti|tb|tib|p|pi|pb|pib)$", [dollar_endonly, caseless]), + case re:run(S, HasIUSuffix) of + nomatch -> false; + {match, _} -> + case re:split(S, SuffixExtractor) of + [] -> false; + [_CompleteMatch] -> false; + [_CompleteMatch, Suffix | _] -> true + end + end + end + end}. diff --git a/deps/rabbit/scripts/rabbitmq-defaults b/deps/rabbit/scripts/rabbitmq-defaults index 41d72c7da47e..2db59ff9499d 100755 --- a/deps/rabbit/scripts/rabbitmq-defaults +++ b/deps/rabbit/scripts/rabbitmq-defaults @@ -3,7 +3,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2012-2020 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. ## ### next line potentially updated in package install steps diff --git a/deps/rabbit/scripts/rabbitmq-diagnostics b/deps/rabbit/scripts/rabbitmq-diagnostics index 7101f3cc9bfb..381661de7390 100755 --- a/deps/rabbit/scripts/rabbitmq-diagnostics +++ b/deps/rabbit/scripts/rabbitmq-diagnostics @@ -3,7 +3,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. ## # Exit immediately if a pipeline, which may consist of a single simple command, @@ -20,4 +20,18 @@ set -a # shellcheck source=./rabbitmq-env . "${0%/*}"/rabbitmq-env -run_escript rabbitmqctl_escript "${ESCRIPT_DIR:?must be defined}"/rabbitmq-diagnostics "$@" +maybe_noinput='noinput' + +case "$@" in + *observer*) + maybe_noinput='input' + ;; + *remote_shell*) + maybe_noinput='input' + ;; + *) + maybe_noinput='noinput' + ;; +esac + +run_escript "${ESCRIPT_DIR:?must be defined}"/rabbitmq-diagnostics "$maybe_noinput" "$@" diff --git a/deps/rabbit/scripts/rabbitmq-diagnostics.bat b/deps/rabbit/scripts/rabbitmq-diagnostics.bat index f5043e81fedb..bf7471a0d5e9 100644 --- a/deps/rabbit/scripts/rabbitmq-diagnostics.bat +++ b/deps/rabbit/scripts/rabbitmq-diagnostics.bat @@ -3,7 +3,7 @@ REM This Source Code Form is subject to the terms of the Mozilla Public REM License, v. 2.0. If a copy of the MPL was not distributed with this REM file, You can obtain one at https://mozilla.org/MPL/2.0/. REM -REM Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved. +REM Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. REM REM Scopes the variables to the current batch file @@ -42,9 +42,10 @@ if "%1"=="remote_shell" ( set ERL_CMD=erl.exe ) +REM Note: do NOT add -noinput because "observer" depends on it "!ERLANG_HOME!\bin\!ERL_CMD!" +B ^ -boot !CLEAN_BOOT_FILE! ^ --noinput -noshell -hidden -smp enable ^ +-noshell -hidden -smp enable ^ !RABBITMQ_CTL_ERL_ARGS! ^ -kernel inet_dist_listen_min !RABBITMQ_CTL_DIST_PORT_MIN! ^ -kernel inet_dist_listen_max !RABBITMQ_CTL_DIST_PORT_MAX! ^ diff --git a/deps/rabbit/scripts/rabbitmq-env b/deps/rabbit/scripts/rabbitmq-env index c9b3a15ee746..6981cf3a805f 100755 --- a/deps/rabbit/scripts/rabbitmq-env +++ b/deps/rabbit/scripts/rabbitmq-env @@ -3,7 +3,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. ## if [ "$RABBITMQ_ENV_LOADED" = 1 ]; then @@ -178,25 +178,24 @@ _rmq_env_set_erl_libs() run_escript() { - escript_main="${1:?escript_main must be defined}" - shift escript="${1:?escript must be defined}" shift + maybe_noinput="${1:?maybe_noinput must be defined}" + shift + _rmq_env_set_erl_libs - # Important: do not quote RABBITMQ_CTL_ERL_ARGS as they must be - # word-split - # shellcheck disable=SC2086 - exec erl +B \ - -boot "$CLEAN_BOOT_FILE" \ - -noinput -noshell -hidden -smp enable \ - $RABBITMQ_CTL_ERL_ARGS \ - -kernel inet_dist_listen_min "$RABBITMQ_CTL_DIST_PORT_MIN" \ - -kernel inet_dist_listen_max "$RABBITMQ_CTL_DIST_PORT_MAX" \ - -run escript start \ - -escript main "$escript_main" \ - -extra "$escript" "$@" + tmp_erl_flags="-boot $CLEAN_BOOT_FILE $RABBITMQ_CTL_ERL_ARGS -kernel inet_dist_listen_min $RABBITMQ_CTL_DIST_PORT_MIN -kernel inet_dist_listen_max $RABBITMQ_CTL_DIST_PORT_MAX" + + if [ "$maybe_noinput" = 'noinput' ] + then + tmp_erl_flags="-noinput $tmp_erl_flags" + fi + + ERL_FLAGS="$tmp_erl_flags" "$escript" "$@" + + unset tmp_erl_flags } RABBITMQ_ENV_LOADED=1 diff --git a/deps/rabbit/scripts/rabbitmq-plugins b/deps/rabbit/scripts/rabbitmq-plugins index 1ec15b2ee9d4..e5ea06b2235f 100755 --- a/deps/rabbit/scripts/rabbitmq-plugins +++ b/deps/rabbit/scripts/rabbitmq-plugins @@ -3,7 +3,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. ## # Exit immediately if a pipeline, which may consist of a single simple command, @@ -20,4 +20,4 @@ set -a # shellcheck source=./rabbitmq-env . "${0%/*}"/rabbitmq-env -run_escript rabbitmqctl_escript "${ESCRIPT_DIR:?must be defined}"/rabbitmq-plugins "$@" +run_escript "${ESCRIPT_DIR:?must be defined}"/rabbitmq-plugins 'noinput' "$@" diff --git a/deps/rabbit/scripts/rabbitmq-plugins.bat b/deps/rabbit/scripts/rabbitmq-plugins.bat index e1f13b7073b2..718fee57f324 100644 --- a/deps/rabbit/scripts/rabbitmq-plugins.bat +++ b/deps/rabbit/scripts/rabbitmq-plugins.bat @@ -4,7 +4,7 @@ REM This Source Code Form is subject to the terms of the Mozilla Public REM License, v. 2.0. If a copy of the MPL was not distributed with this REM file, You can obtain one at https://mozilla.org/MPL/2.0/. REM -REM Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved. +REM Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. REM setlocal diff --git a/deps/rabbit/scripts/rabbitmq-queues b/deps/rabbit/scripts/rabbitmq-queues index 680076f962a6..4af980e79486 100755 --- a/deps/rabbit/scripts/rabbitmq-queues +++ b/deps/rabbit/scripts/rabbitmq-queues @@ -3,7 +3,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. ## # Exit immediately if a pipeline, which may consist of a single simple command, @@ -20,4 +20,4 @@ set -a # shellcheck source=./rabbitmq-env . "${0%/*}"/rabbitmq-env -run_escript rabbitmqctl_escript "${ESCRIPT_DIR:?must be defined}"/rabbitmq-queues "$@" +run_escript "${ESCRIPT_DIR:?must be defined}"/rabbitmq-queues 'noinput' "$@" diff --git a/deps/rabbit/scripts/rabbitmq-queues.bat b/deps/rabbit/scripts/rabbitmq-queues.bat index 99fce6479f4a..97f4c0ebc632 100644 --- a/deps/rabbit/scripts/rabbitmq-queues.bat +++ b/deps/rabbit/scripts/rabbitmq-queues.bat @@ -3,7 +3,7 @@ REM This Source Code Form is subject to the terms of the Mozilla Public REM License, v. 2.0. If a copy of the MPL was not distributed with this REM file, You can obtain one at https://mozilla.org/MPL/2.0/. REM -REM Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved. +REM Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. REM REM Scopes the variables to the current batch file diff --git a/deps/rabbit/scripts/rabbitmq-server b/deps/rabbit/scripts/rabbitmq-server index 3bd76a1cf68a..f5f5de84cd9d 100755 --- a/deps/rabbit/scripts/rabbitmq-server +++ b/deps/rabbit/scripts/rabbitmq-server @@ -4,7 +4,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. ## set -e @@ -77,14 +77,13 @@ start_rabbitmq_server() { -syslog logger '[]' \ -syslog syslog_error_logger false \ -kernel prevent_overlapping_partitions false \ - -enable-feature maybe_expr \ "$@" } stop_rabbitmq_server() { if test "$rabbitmq_server_pid"; then kill -TERM "$rabbitmq_server_pid" - wait "$rabbitmq_server_pid" || true + wait "$rabbitmq_server_pid" fi } @@ -130,23 +129,12 @@ else trap "stop_rabbitmq_server; exit 130" INT start_rabbitmq_server "$@" & - export rabbitmq_server_pid=$! + export rabbitmq_server_pid="$!" # Block until RabbitMQ exits or a signal is caught. # Waits for last command (which is start_rabbitmq_server) - # - # The "|| true" is here to work around an issue with Dash. Normally - # in a Bourne shell, if `wait` is interrupted by a signal, the - # signal handlers defined above are executed and the script - # terminates with the exit code of `wait` (unless the signal handler - # overrides that). - # In the case of Dash, it looks like `set -e` (set at the beginning - # of this script) gets precedence over signal handling. Therefore, - # when `wait` is interrupted, its exit code is non-zero and because - # of `set -e`, the script terminates immediately without running the - # signal handler. To work around this issue, we use "|| true" to - # force that statement to succeed and the signal handler to properly - # execute. Because the statement below has an exit code of 0, the - # signal handler has to restate the expected exit code. - wait "$rabbitmq_server_pid" || true + # In a POSIX Bourne shell, if `wait` is interrupted by a signal, the signal + # handlers defined above are executed and the script terminates with the + # exit code of `wait` (unless the signal handler overrides that). + wait "$rabbitmq_server_pid" fi diff --git a/deps/rabbit/scripts/rabbitmq-server.bat b/deps/rabbit/scripts/rabbitmq-server.bat index 76868b27c38e..37f2e9e0ffbf 100644 --- a/deps/rabbit/scripts/rabbitmq-server.bat +++ b/deps/rabbit/scripts/rabbitmq-server.bat @@ -3,7 +3,7 @@ REM This Source Code Form is subject to the terms of the Mozilla Public REM License, v. 2.0. If a copy of the MPL was not distributed with this REM file, You can obtain one at https://mozilla.org/MPL/2.0/. REM -REM Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved. +REM Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. REM setlocal @@ -71,7 +71,6 @@ if "!RABBITMQ_ALLOW_INPUT!"=="" ( -syslog logger [] ^ -syslog syslog_error_logger false ^ -kernel prevent_overlapping_partitions false ^ --enable-feature maybe_expr ^ !STAR! if ERRORLEVEL 1 ( diff --git a/deps/rabbit/scripts/rabbitmq-service.bat b/deps/rabbit/scripts/rabbitmq-service.bat index 809885a48f40..55b514d2d23d 100644 --- a/deps/rabbit/scripts/rabbitmq-service.bat +++ b/deps/rabbit/scripts/rabbitmq-service.bat @@ -3,7 +3,7 @@ REM This Source Code Form is subject to the terms of the Mozilla Public REM License, v. 2.0. If a copy of the MPL was not distributed with this REM file, You can obtain one at https://mozilla.org/MPL/2.0/. REM -REM Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved. +REM Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. REM setlocal @@ -201,7 +201,6 @@ set ERLANG_SERVICE_ARGUMENTS= ^ -syslog logger [] ^ -syslog syslog_error_logger false ^ -kernel prevent_overlapping_partitions false ^ --enable-feature maybe_expr ^ !STARVAR! set ERLANG_SERVICE_ARGUMENTS=!ERLANG_SERVICE_ARGUMENTS:\=\\! diff --git a/deps/rabbit/scripts/rabbitmq-streams b/deps/rabbit/scripts/rabbitmq-streams index 66278fffdc0a..7c863ccfb44d 100755 --- a/deps/rabbit/scripts/rabbitmq-streams +++ b/deps/rabbit/scripts/rabbitmq-streams @@ -4,7 +4,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. ## # Exit immediately if a pipeline, which may consist of a single simple command, @@ -21,4 +21,4 @@ set -a # shellcheck source=./rabbitmq-env . "${0%/*}"/rabbitmq-env -run_escript rabbitmqctl_escript "${ESCRIPT_DIR:?must be defined}"/rabbitmq-streams "$@" +run_escript "${ESCRIPT_DIR:?must be defined}"/rabbitmq-streams 'noinput' "$@" diff --git a/deps/rabbit/scripts/rabbitmq-streams.bat b/deps/rabbit/scripts/rabbitmq-streams.bat index 6523201ed422..37d604f39cbe 100644 --- a/deps/rabbit/scripts/rabbitmq-streams.bat +++ b/deps/rabbit/scripts/rabbitmq-streams.bat @@ -4,7 +4,7 @@ REM This Source Code Form is subject to the terms of the Mozilla Public REM License, v. 2.0. If a copy of the MPL was not distributed with this REM file, You can obtain one at https://mozilla.org/MPL/2.0/. REM -REM Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +REM Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. REM REM Scopes the variables to the current batch file diff --git a/deps/rabbit/scripts/rabbitmq-upgrade b/deps/rabbit/scripts/rabbitmq-upgrade index 6d2bc3f94819..bbda0151fae4 100755 --- a/deps/rabbit/scripts/rabbitmq-upgrade +++ b/deps/rabbit/scripts/rabbitmq-upgrade @@ -3,7 +3,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. ## # Exit immediately if a pipeline, which may consist of a single simple command, @@ -20,4 +20,4 @@ set -a # shellcheck source=./rabbitmq-env . "${0%/*}"/rabbitmq-env -run_escript rabbitmqctl_escript "${ESCRIPT_DIR:?must be defined}"/rabbitmq-upgrade "$@" +run_escript "${ESCRIPT_DIR:?must be defined}"/rabbitmq-upgrade 'noinput' "$@" diff --git a/deps/rabbit/scripts/rabbitmq-upgrade.bat b/deps/rabbit/scripts/rabbitmq-upgrade.bat index 70b0eeee6219..bf452d842d2a 100644 --- a/deps/rabbit/scripts/rabbitmq-upgrade.bat +++ b/deps/rabbit/scripts/rabbitmq-upgrade.bat @@ -3,7 +3,7 @@ REM This Source Code Form is subject to the terms of the Mozilla Public REM License, v. 2.0. If a copy of the MPL was not distributed with this REM file, You can obtain one at https://mozilla.org/MPL/2.0/. REM -REM Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved. +REM Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. REM REM Scopes the variables to the current batch file diff --git a/deps/rabbit/scripts/rabbitmqctl b/deps/rabbit/scripts/rabbitmqctl index 8016dbe282f6..d99b0c76d76c 100755 --- a/deps/rabbit/scripts/rabbitmqctl +++ b/deps/rabbit/scripts/rabbitmqctl @@ -3,7 +3,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. ## # Exit immediately if a pipeline, which may consist of a single simple command, @@ -20,4 +20,135 @@ set -a # shellcheck source=./rabbitmq-env . "${0%/*}"/rabbitmq-env -run_escript rabbitmqctl_escript "${ESCRIPT_DIR:?must be defined}"/rabbitmqctl "$@" +# Uncomment for debugging +# echo "\$# : $#" +# echo "\$0: $0" +# echo "\$1: $1" +# echo "\$2: $2" +# echo "\$3: $3" +# echo "\$4: $4" +# echo "\$5: $5" +# set -x + +_tmp_help_requested='false' + +for _tmp_argument in "$@" +do + if [ "$_tmp_argument" = '--help' ] + then + _tmp_help_requested='true' + break + fi +done + +if [ "$1" = 'help' ] || [ "$_tmp_help_requested" = 'true' ] +then + unset _tmp_help_requested + # In this case, we do not require input and can exit early since + # help was requested + # + run_escript "${ESCRIPT_DIR:?must be defined}"/rabbitmqctl 'noinput' "$@" + exit "$?" +fi + +unset _tmp_help_requested + +maybe_noinput='noinput' + +case "$@" in + *add_user*) + if [ "$#" -eq 2 ] + then + # In this case, input is required to provide the password: + # + # rabbitmqctl add_user bob + # + maybe_noinput='input' + elif [ "$#" -eq 3 ] + then + # In these cases, input depends on the arguments provided: + # + # rabbitmqctl add_user bob --pre-hashed-password (input needed) + # rabbitmqctl add_user bob bobpassword (NO input needed) + # rabbitmqctl add_user --pre-hashed-password bob (input needed) + # + for _tmp_argument in "$@" + do + if [ "$_tmp_argument" = '--pre-hashed-password' ] + then + maybe_noinput='input' + break + fi + done + elif [ "$#" -gt 3 ] + then + # If there are 4 or more arguments, no input is needed: + # + # rabbitmqctl add_user bob --pre-hashed-password HASHVALUE + # rabbitmqctl add_user bob bobpassword IGNORED + # rabbitmqctl add_user --pre-hashed-password bob HASHVALUE + # + maybe_noinput='noinput' + fi + ;; + *authenticate_user*) + if [ "$#" -eq 2 ] + then + # In this case, input is required to provide the password: + # + # rabbitmqctl authenticate_user bob + # + maybe_noinput='input' + elif [ "$#" -gt 2 ] + then + # If there are 2 or more arguments, no input is needed: + # + maybe_noinput='noinput' + fi + ;; + *change_password*) + maybe_noinput='input' + if [ "$#" -gt 2 ] + then + # If there are 3 or more arguments, no input is needed: + # + # rabbitmqctl change_password sue foobar + # rabbitmqctl change_password sue newpassword IGNORED + # + maybe_noinput='noinput' + fi + ;; + *decode*|*encode*) + # It is unlikely that these commands will be run in a shell script loop + # with redirection, so always assume that stdin input is needed + # + maybe_noinput='input' + ;; + *eval*) + if [ "$#" -eq 1 ] + then + # If there is only one argument, 'eval', then input is required + # + # rabbitmqctl eval + # + maybe_noinput='input' + fi + ;; + *hash_password*) + if [ "$#" -eq 1 ] + then + # If there is only one argument, 'hash_password', then input is required + # + # rabbitmqctl hash_password + # + maybe_noinput='input' + fi + ;; + *) + maybe_noinput='noinput' + ;; +esac + +unset _tmp_argument + +run_escript "${ESCRIPT_DIR:?must be defined}"/rabbitmqctl "$maybe_noinput" "$@" diff --git a/deps/rabbit/scripts/rabbitmqctl.bat b/deps/rabbit/scripts/rabbitmqctl.bat index 711ec6e990ec..18a757eccb6e 100644 --- a/deps/rabbit/scripts/rabbitmqctl.bat +++ b/deps/rabbit/scripts/rabbitmqctl.bat @@ -3,7 +3,7 @@ REM This Source Code Form is subject to the terms of the Mozilla Public REM License, v. 2.0. If a copy of the MPL was not distributed with this REM file, You can obtain one at https://mozilla.org/MPL/2.0/. REM -REM Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved. +REM Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. REM REM Scopes the variables to the current batch file diff --git a/deps/rabbit/scripts/vmware-rabbitmq b/deps/rabbit/scripts/vmware-rabbitmq index 6327dc39d299..2806b8c5a95e 100755 --- a/deps/rabbit/scripts/vmware-rabbitmq +++ b/deps/rabbit/scripts/vmware-rabbitmq @@ -3,7 +3,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. ## # Exit immediately if a pipeline, which may consist of a single simple command, @@ -20,4 +20,4 @@ set -a # shellcheck source=./rabbitmq-env . "${0%/*}"/rabbitmq-env -run_escript rabbitmqctl_escript "${ESCRIPT_DIR:?must be defined}"/vmware-rabbitmq "$@" +run_escript "${ESCRIPT_DIR:?must be defined}"/vmware-rabbitmq 'noinput' "$@" diff --git a/deps/rabbit/scripts/vmware-rabbitmq.bat b/deps/rabbit/scripts/vmware-rabbitmq.bat index 3c8591d0cdf4..bedd1621398c 100644 --- a/deps/rabbit/scripts/vmware-rabbitmq.bat +++ b/deps/rabbit/scripts/vmware-rabbitmq.bat @@ -3,7 +3,7 @@ REM This Source Code Form is subject to the terms of the Mozilla Public REM License, v. 2.0. If a copy of the MPL was not distributed with this REM file, You can obtain one at https://mozilla.org/MPL/2.0/. REM -REM Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +REM Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. REM REM Scopes the variables to the current batch file diff --git a/deps/rabbit/src/amqqueue.erl b/deps/rabbit/src/amqqueue.erl index a20c9b28b04b..03e4db1b115c 100644 --- a/deps/rabbit/src/amqqueue.erl +++ b/deps/rabbit/src/amqqueue.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2018-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(amqqueue). %% Could become amqqueue_v2 in the future. @@ -28,9 +28,6 @@ set_decorators/2, % exclusive_owner get_exclusive_owner/1, - % gm_pids - get_gm_pids/1, - set_gm_pids/2, get_leader/1, % name (#resource) get_name/1, @@ -53,34 +50,26 @@ % type_state get_type_state/1, set_type_state/2, - % recoverable_slaves - get_recoverable_slaves/1, - set_recoverable_slaves/2, - % slave_pids - get_slave_pids/1, - set_slave_pids/2, - % slave_pids_pending_shutdown - get_slave_pids_pending_shutdown/1, - set_slave_pids_pending_shutdown/2, % state get_state/1, set_state/2, - % sync_slave_pids - get_sync_slave_pids/1, - set_sync_slave_pids/2, get_type/1, get_vhost/1, is_amqqueue/1, is_auto_delete/1, is_durable/1, + is_exclusive/1, is_classic/1, is_quorum/1, pattern_match_all/0, pattern_match_on_name/1, pattern_match_on_type/1, - reset_mirroring_and_decorators/1, + pattern_match_on_durable/1, + pattern_match_on_type_and_durable/2, + reset_decorators/1, set_immutable/1, qnode/1, + to_printable/1, macros/0]). -define(record_version, amqqueue_v2). @@ -100,12 +89,9 @@ arguments = [] :: rabbit_framing:amqp_table() | ets:match_pattern(), %% durable (just so we know home node) pid :: pid() | ra_server_id() | none | ets:match_pattern(), - %% transient - slave_pids = [] :: [pid()] | none | ets:match_pattern(), - %% transient - sync_slave_pids = [] :: [pid()] | none| ets:match_pattern(), - %% durable - recoverable_slaves = [] :: [atom()] | none | ets:match_pattern(), + slave_pids = [], %% reserved + sync_slave_pids = [], %% reserved + recoverable_slaves = [], %% reserved %% durable, implicit update as above policy :: proplists:proplist() | none | undefined | ets:match_pattern(), %% durable, implicit update as above @@ -117,7 +103,7 @@ %% durable (have we crashed?) state = live :: atom() | none | ets:match_pattern(), policy_version = 0 :: non_neg_integer() | ets:match_pattern(), - slave_pids_pending_shutdown = [] :: [pid()] | ets:match_pattern(), + slave_pids_pending_shutdown = [], %% reserved %% secondary index vhost :: rabbit_types:vhost() | undefined | ets:match_pattern(), options = #{} :: map() | ets:match_pattern(), @@ -380,18 +366,6 @@ set_decorators(#amqqueue{} = Queue, Decorators) -> get_exclusive_owner(#amqqueue{exclusive_owner = Owner}) -> Owner. -% gm_pids - --spec get_gm_pids(amqqueue()) -> [{pid(), pid()}] | none. - -get_gm_pids(#amqqueue{gm_pids = GMPids}) -> - GMPids. - --spec set_gm_pids(amqqueue(), [{pid(), pid()}] | none) -> amqqueue(). - -set_gm_pids(#amqqueue{} = Queue, GMPids) -> - Queue#amqqueue{gm_pids = GMPids}. - -spec get_leader(amqqueue_v2()) -> node(). get_leader(#amqqueue{type = rabbit_quorum_queue, pid = {_, Leader}}) -> Leader. @@ -462,18 +436,6 @@ get_policy_version(#amqqueue{policy_version = PV}) -> set_policy_version(#amqqueue{} = Queue, PV) -> Queue#amqqueue{policy_version = PV}. -% recoverable_slaves - --spec get_recoverable_slaves(amqqueue()) -> [atom()] | none. - -get_recoverable_slaves(#amqqueue{recoverable_slaves = Slaves}) -> - Slaves. - --spec set_recoverable_slaves(amqqueue(), [atom()] | none) -> amqqueue(). - -set_recoverable_slaves(#amqqueue{} = Queue, Slaves) -> - Queue#amqqueue{recoverable_slaves = Slaves}. - % type_state (new in v2) -spec get_type_state(amqqueue()) -> map(). @@ -488,31 +450,6 @@ set_type_state(#amqqueue{} = Queue, TState) -> set_type_state(Queue, _TState) -> Queue. -% slave_pids - --spec get_slave_pids(amqqueue()) -> [pid()] | none. - -get_slave_pids(#amqqueue{slave_pids = Slaves}) -> - Slaves. - --spec set_slave_pids(amqqueue(), [pid()] | none) -> amqqueue(). - -set_slave_pids(#amqqueue{} = Queue, SlavePids) -> - Queue#amqqueue{slave_pids = SlavePids}. - -% slave_pids_pending_shutdown - --spec get_slave_pids_pending_shutdown(amqqueue()) -> [pid()]. - -get_slave_pids_pending_shutdown( - #amqqueue{slave_pids_pending_shutdown = Slaves}) -> - Slaves. - --spec set_slave_pids_pending_shutdown(amqqueue(), [pid()]) -> amqqueue(). - -set_slave_pids_pending_shutdown(#amqqueue{} = Queue, SlavePids) -> - Queue#amqqueue{slave_pids_pending_shutdown = SlavePids}. - % state -spec get_state(amqqueue()) -> atom() | none. @@ -524,18 +461,6 @@ get_state(#amqqueue{state = State}) -> State. set_state(#amqqueue{} = Queue, State) -> Queue#amqqueue{state = State}. -% sync_slave_pids - --spec get_sync_slave_pids(amqqueue()) -> [pid()] | none. - -get_sync_slave_pids(#amqqueue{sync_slave_pids = Pids}) -> - Pids. - --spec set_sync_slave_pids(amqqueue(), [pid()] | none) -> amqqueue(). - -set_sync_slave_pids(#amqqueue{} = Queue, Pids) -> - Queue#amqqueue{sync_slave_pids = Pids}. - %% New in v2. -spec get_type(amqqueue()) -> atom(). @@ -555,6 +480,11 @@ is_auto_delete(#amqqueue{auto_delete = AutoDelete}) -> is_durable(#amqqueue{durable = Durable}) -> Durable. +-spec is_exclusive(amqqueue()) -> boolean(). + +is_exclusive(Queue) -> + is_pid(get_exclusive_owner(Queue)). + -spec is_classic(amqqueue()) -> boolean(). is_classic(Queue) -> @@ -590,22 +520,26 @@ pattern_match_on_name(Name) -> pattern_match_on_type(Type) -> #amqqueue{type = Type, _ = '_'}. --spec reset_mirroring_and_decorators(amqqueue()) -> amqqueue(). +-spec pattern_match_on_durable(boolean()) -> amqqueue_pattern(). + +pattern_match_on_durable(IsDurable) -> + #amqqueue{durable = IsDurable, _ = '_'}. -reset_mirroring_and_decorators(#amqqueue{} = Queue) -> - Queue#amqqueue{slave_pids = [], - sync_slave_pids = [], - gm_pids = [], - decorators = undefined}. +-spec pattern_match_on_type_and_durable(atom(), boolean()) -> + amqqueue_pattern(). + +pattern_match_on_type_and_durable(Type, IsDurable) -> + #amqqueue{type = Type, durable = IsDurable, _ = '_'}. + +-spec reset_decorators(amqqueue()) -> amqqueue(). + +reset_decorators(#amqqueue{} = Queue) -> + Queue#amqqueue{decorators = undefined}. -spec set_immutable(amqqueue()) -> amqqueue(). set_immutable(#amqqueue{} = Queue) -> Queue#amqqueue{pid = none, - slave_pids = [], - sync_slave_pids = none, - recoverable_slaves = none, - gm_pids = none, policy = none, decorators = none, state = none}. @@ -622,6 +556,14 @@ qnode(none) -> qnode({_, Node}) -> Node. +-spec to_printable(amqqueue()) -> #{binary() => any()}. +to_printable(#amqqueue{name = QName = #resource{name = Name}, + vhost = VHost, type = Type}) -> + #{<<"readable_name">> => rabbit_data_coercion:to_binary(rabbit_misc:rs(QName)), + <<"name">> => Name, + <<"virtual_host">> => VHost, + <<"type">> => Type}. + % private macros() -> diff --git a/deps/rabbit/src/background_gc.erl b/deps/rabbit/src/background_gc.erl index 0aab74afbedf..a9fc8d06b9cb 100644 --- a/deps/rabbit/src/background_gc.erl +++ b/deps/rabbit/src/background_gc.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(background_gc). diff --git a/deps/rabbit/src/code_server_cache.erl b/deps/rabbit/src/code_server_cache.erl index c395d1bf5269..2a656a280246 100644 --- a/deps/rabbit/src/code_server_cache.erl +++ b/deps/rabbit/src/code_server_cache.erl @@ -4,7 +4,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(code_server_cache). diff --git a/deps/rabbit/src/gatherer.erl b/deps/rabbit/src/gatherer.erl index 8a0ab570a390..e1d9e7e8c877 100644 --- a/deps/rabbit/src/gatherer.erl +++ b/deps/rabbit/src/gatherer.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(gatherer). diff --git a/deps/rabbit/src/gm.erl b/deps/rabbit/src/gm.erl index 62bf3f5a8755..d2b253c98051 100644 --- a/deps/rabbit/src/gm.erl +++ b/deps/rabbit/src/gm.erl @@ -2,1649 +2,26 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(gm). -%% Guaranteed Multicast -%% ==================== -%% -%% This module provides the ability to create named groups of -%% processes to which members can be dynamically added and removed, -%% and for messages to be broadcast within the group that are -%% guaranteed to reach all members of the group during the lifetime of -%% the message. The lifetime of a message is defined as being, at a -%% minimum, the time from which the message is first sent to any -%% member of the group, up until the time at which it is known by the -%% member who published the message that the message has reached all -%% group members. -%% -%% The guarantee given is that provided a message, once sent, makes it -%% to members who do not all leave the group, the message will -%% continue to propagate to all group members. -%% -%% Another way of stating the guarantee is that if member P publishes -%% messages m and m', then for all members P', if P' is a member of -%% the group prior to the publication of m, and P' receives m', then -%% P' will receive m. -%% -%% Note that only local-ordering is enforced: i.e. if member P sends -%% message m and then message m', then for-all members P', if P' -%% receives m and m', then they will receive m' after m. Causality -%% ordering is _not_ enforced. I.e. if member P receives message m -%% and as a result publishes message m', there is no guarantee that -%% other members P' will receive m before m'. -%% -%% -%% API Use -%% ------- -%% -%% Mnesia must be started. Use the idempotent create_tables/0 function -%% to create the tables required. -%% -%% start_link/3 -%% Provide the group name, the callback module name, and any arguments -%% you wish to be passed into the callback module's functions. The -%% joined/2 function will be called when we have joined the group, -%% with the arguments passed to start_link and a list of the current -%% members of the group. See the callbacks specs and the comments -%% below for further details of the callback functions. -%% -%% leave/1 -%% Provide the Pid. Removes the Pid from the group. The callback -%% handle_terminate/2 function will be called. -%% -%% broadcast/2 -%% Provide the Pid and a Message. The message will be sent to all -%% members of the group as per the guarantees given above. This is a -%% cast and the function call will return immediately. There is no -%% guarantee that the message will reach any member of the group. -%% -%% confirmed_broadcast/2 -%% Provide the Pid and a Message. As per broadcast/2 except that this -%% is a call, not a cast, and only returns 'ok' once the Message has -%% reached every member of the group. Do not call -%% confirmed_broadcast/2 directly from the callback module otherwise -%% you will deadlock the entire group. -%% -%% info/1 -%% Provide the Pid. Returns a proplist with various facts, including -%% the group name and the current group members. -%% -%% validate_members/2 -%% Check whether a given member list agrees with the chosen member's -%% view. Any differences will be communicated via the members_changed -%% callback. If there are no differences then there will be no reply. -%% Note that members will not necessarily share the same view. -%% -%% forget_group/1 -%% Provide the group name. Removes its mnesia record. Makes no attempt -%% to ensure the group is empty. -%% -%% Implementation Overview -%% ----------------------- -%% -%% One possible means of implementation would be a fan-out from the -%% sender to every member of the group. This would require that the -%% group is fully connected, and, in the event that the original -%% sender of the message disappears from the group before the message -%% has made it to every member of the group, raises questions as to -%% who is responsible for sending on the message to new group members. -%% In particular, the issue is with [ Pid ! Msg || Pid <- Members ] - -%% if the sender dies part way through, who is responsible for -%% ensuring that the remaining Members receive the Msg? In the event -%% that within the group, messages sent are broadcast from a subset of -%% the members, the fan-out arrangement has the potential to -%% substantially impact the CPU and network workload of such members, -%% as such members would have to accommodate the cost of sending each -%% message to every group member. -%% -%% Instead, if the members of the group are arranged in a chain, then -%% it becomes easier to reason about who within the group has received -%% each message and who has not. It eases issues of responsibility: in -%% the event of a group member disappearing, the nearest upstream -%% member of the chain is responsible for ensuring that messages -%% continue to propagate down the chain. It also results in equal -%% distribution of sending and receiving workload, even if all -%% messages are being sent from just a single group member. This -%% configuration has the further advantage that it is not necessary -%% for every group member to know of every other group member, and -%% even that a group member does not have to be accessible from all -%% other group members. -%% -%% Performance is kept high by permitting pipelining and all -%% communication between joined group members is asynchronous. In the -%% chain A -> B -> C -> D, if A sends a message to the group, it will -%% not directly contact C or D. However, it must know that D receives -%% the message (in addition to B and C) before it can consider the -%% message fully sent. A simplistic implementation would require that -%% D replies to C, C replies to B and B then replies to A. This would -%% result in a propagation delay of twice the length of the chain. It -%% would also require, in the event of the failure of C, that D knows -%% to directly contact B and issue the necessary replies. Instead, the -%% chain forms a ring: D sends the message on to A: D does not -%% distinguish A as the sender, merely as the next member (downstream) -%% within the chain (which has now become a ring). When A receives -%% from D messages that A sent, it knows that all members have -%% received the message. However, the message is not dead yet: if C -%% died as B was sending to C, then B would need to detect the death -%% of C and forward the message on to D instead: thus every node has -%% to remember every message published until it is told that it can -%% forget about the message. This is essential not just for dealing -%% with failure of members, but also for the addition of new members. -%% -%% Thus once A receives the message back again, it then sends to B an -%% acknowledgement for the message, indicating that B can now forget -%% about the message. B does so, and forwards the ack to C. C forgets -%% the message, and forwards the ack to D, which forgets the message -%% and finally forwards the ack back to A. At this point, A takes no -%% further action: the message and its acknowledgement have made it to -%% every member of the group. The message is now dead, and any new -%% member joining the group at this point will not receive the -%% message. -%% -%% We therefore have two roles: -%% -%% 1. The sender, who upon receiving their own messages back, must -%% then send out acknowledgements, and upon receiving their own -%% acknowledgements back perform no further action. -%% -%% 2. The other group members who upon receiving messages and -%% acknowledgements must update their own internal state accordingly -%% (the sending member must also do this in order to be able to -%% accommodate failures), and forwards messages on to their downstream -%% neighbours. -%% -%% -%% Implementation: It gets trickier -%% -------------------------------- -%% -%% Chain A -> B -> C -> D -%% -%% A publishes a message which B receives. A now dies. B and D will -%% detect the death of A, and will link up, thus the chain is now B -> -%% C -> D. B forwards A's message on to C, who forwards it to D, who -%% forwards it to B. Thus B is now responsible for A's messages - both -%% publications and acknowledgements that were in flight at the point -%% at which A died. Even worse is that this is transitive: after B -%% forwards A's message to C, B dies as well. Now C is not only -%% responsible for B's in-flight messages, but is also responsible for -%% A's in-flight messages. -%% -%% Lemma 1: A member can only determine which dead members they have -%% inherited responsibility for if there is a total ordering on the -%% conflicting additions and subtractions of members from the group. -%% -%% Consider the simultaneous death of B and addition of B' that -%% transitions a chain from A -> B -> C to A -> B' -> C. Either B' or -%% C is responsible for in-flight messages from B. It is easy to -%% ensure that at least one of them thinks they have inherited B, but -%% if we do not ensure that exactly one of them inherits B, then we -%% could have B' converting publishes to acks, which then will crash C -%% as C does not believe it has issued acks for those messages. -%% -%% More complex scenarios are easy to concoct: A -> B -> C -> D -> E -%% becoming A -> C' -> E. Who has inherited which of B, C and D? -%% -%% However, for non-conflicting membership changes, only a partial -%% ordering is required. For example, A -> B -> C becoming A -> A' -> -%% B. The addition of A', between A and B can have no conflicts with -%% the death of C: it is clear that A has inherited C's messages. -%% -%% For ease of implementation, we adopt the simple solution, of -%% imposing a total order on all membership changes. -%% -%% On the death of a member, it is ensured the dead member's -%% neighbours become aware of the death, and the upstream neighbour -%% now sends to its new downstream neighbour its state, including the -%% messages pending acknowledgement. The downstream neighbour can then -%% use this to calculate which publishes and acknowledgements it has -%% missed out on, due to the death of its old upstream. Thus the -%% downstream can catch up, and continues the propagation of messages -%% through the group. -%% -%% Lemma 2: When a member is joining, it must synchronously -%% communicate with its upstream member in order to receive its -%% starting state atomically with its addition to the group. -%% -%% New members must start with the same state as their nearest -%% upstream neighbour. This ensures that it is not surprised by -%% acknowledgements they are sent, and that should their downstream -%% neighbour die, they are able to send the correct state to their new -%% downstream neighbour to ensure it can catch up. Thus in the -%% transition A -> B -> C becomes A -> A' -> B -> C becomes A -> A' -> -%% C, A' must start with the state of A, so that it can send C the -%% correct state when B dies, allowing C to detect any missed -%% messages. -%% -%% If A' starts by adding itself to the group membership, A could then -%% die, without A' having received the necessary state from A. This -%% would leave A' responsible for in-flight messages from A, but -%% having the least knowledge of all, of those messages. Thus A' must -%% start by synchronously calling A, which then immediately sends A' -%% back its state. A then adds A' to the group. If A dies at this -%% point then A' will be able to see this (as A' will fail to appear -%% in the group membership), and thus A' will ignore the state it -%% receives from A, and will simply repeat the process, trying to now -%% join downstream from some other member. This ensures that should -%% the upstream die as soon as the new member has been joined, the new -%% member is guaranteed to receive the correct state, allowing it to -%% correctly process messages inherited due to the death of its -%% upstream neighbour. -%% -%% The canonical definition of the group membership is held by a -%% distributed database. Whilst this allows the total ordering of -%% changes to be achieved, it is nevertheless undesirable to have to -%% query this database for the current view, upon receiving each -%% message. Instead, we wish for members to be able to cache a view of -%% the group membership, which then requires a cache invalidation -%% mechanism. Each member maintains its own view of the group -%% membership. Thus when the group's membership changes, members may -%% need to become aware of such changes in order to be able to -%% accurately process messages they receive. Because of the -%% requirement of a total ordering of conflicting membership changes, -%% it is not possible to use the guaranteed broadcast mechanism to -%% communicate these changes: to achieve the necessary ordering, it -%% would be necessary for such messages to be published by exactly one -%% member, which can not be guaranteed given that such a member could -%% die. -%% -%% The total ordering we enforce on membership changes gives rise to a -%% view version number: every change to the membership creates a -%% different view, and the total ordering permits a simple -%% monotonically increasing view version number. -%% -%% Lemma 3: If a message is sent from a member that holds view version -%% N, it can be correctly processed by any member receiving the -%% message with a view version >= N. -%% -%% Initially, let us suppose that each view contains the ordering of -%% every member that was ever part of the group. Dead members are -%% marked as such. Thus we have a ring of members, some of which are -%% dead, and are thus inherited by the nearest alive downstream -%% member. -%% -%% In the chain A -> B -> C, all three members initially have view -%% version 1, which reflects reality. B publishes a message, which is -%% forward by C to A. B now dies, which A notices very quickly. Thus A -%% updates the view, creating version 2. It now forwards B's -%% publication, sending that message to its new downstream neighbour, -%% C. This happens before C is aware of the death of B. C must become -%% aware of the view change before it interprets the message its -%% received, otherwise it will fail to learn of the death of B, and -%% thus will not realise it has inherited B's messages (and will -%% likely crash). -%% -%% Thus very simply, we have that each subsequent view contains more -%% information than the preceding view. -%% -%% However, to avoid the views growing indefinitely, we need to be -%% able to delete members which have died _and_ for which no messages -%% are in-flight. This requires that upon inheriting a dead member, we -%% know the last publication sent by the dead member (this is easy: we -%% inherit a member because we are the nearest downstream member which -%% implies that we know at least as much than everyone else about the -%% publications of the dead member), and we know the earliest message -%% for which the acknowledgement is still in flight. -%% -%% In the chain A -> B -> C, when B dies, A will send to C its state -%% (as C is the new downstream from A), allowing C to calculate which -%% messages it has missed out on (described above). At this point, C -%% also inherits B's messages. If that state from A also includes the -%% last message published by B for which an acknowledgement has been -%% seen, then C knows exactly which further acknowledgements it must -%% receive (also including issuing acknowledgements for publications -%% still in-flight that it receives), after which it is known there -%% are no more messages in flight for B, thus all evidence that B was -%% ever part of the group can be safely removed from the canonical -%% group membership. -%% -%% Thus, for every message that a member sends, it includes with that -%% message its view version. When a member receives a message it will -%% update its view from the canonical copy, should its view be older -%% than the view version included in the message it has received. -%% -%% The state held by each member therefore includes the messages from -%% each publisher pending acknowledgement, the last publication seen -%% from that publisher, and the last acknowledgement from that -%% publisher. In the case of the member's own publications or -%% inherited members, this last acknowledgement seen state indicates -%% the last acknowledgement retired, rather than sent. -%% -%% -%% Proof sketch -%% ------------ -%% -%% We need to prove that with the provided operational semantics, we -%% can never reach a state that is not well formed from a well-formed -%% starting state. -%% -%% Operational semantics (small step): straight-forward message -%% sending, process monitoring, state updates. -%% -%% Well formed state: dead members inherited by exactly one non-dead -%% member; for every entry in anyone's pending-acks, either (the -%% publication of the message is in-flight downstream from the member -%% and upstream from the publisher) or (the acknowledgement of the -%% message is in-flight downstream from the publisher and upstream -%% from the member). -%% -%% Proof by induction on the applicable operational semantics. -%% -%% -%% Related work -%% ------------ -%% -%% The ring configuration and double traversal of messages around the -%% ring is similar (though developed independently) to the LCR -%% protocol by [Levy 2008]. However, LCR differs in several -%% ways. Firstly, by using vector clocks, it enforces a total order of -%% message delivery, which is unnecessary for our purposes. More -%% significantly, it is built on top of a "group communication system" -%% which performs the group management functions, taking -%% responsibility away from the protocol as to how to cope with safely -%% adding and removing members. When membership changes do occur, the -%% protocol stipulates that every member must perform communication -%% with every other member of the group, to ensure all outstanding -%% deliveries complete, before the entire group transitions to the new -%% view. This, in total, requires two sets of all-to-all synchronous -%% communications. -%% -%% This is not only rather inefficient, but also does not explain what -%% happens upon the failure of a member during this process. It does -%% though entirely avoid the need for inheritance of responsibility of -%% dead members that our protocol incorporates. -%% -%% In [Marandi et al 2010], a Paxos-based protocol is described. This -%% work explicitly focuses on the efficiency of communication. LCR -%% (and our protocol too) are more efficient, but at the cost of -%% higher latency. The Ring-Paxos protocol is itself built on top of -%% IP-multicast, which rules it out for many applications where -%% point-to-point communication is all that can be required. They also -%% have an excellent related work section which I really ought to -%% read... -%% -%% -%% [Levy 2008] The Complexity of Reliable Distributed Storage, 2008. -%% [Marandi et al 2010] Ring Paxos: A High-Throughput Atomic Broadcast -%% Protocol - - --behaviour(gen_server2). - --export([create_tables/0, start_link/4, leave/1, broadcast/2, broadcast/3, - confirmed_broadcast/2, info/1, validate_members/2, forget_group/1]). - --export([init/1, handle_call/3, handle_cast/2, handle_info/2, terminate/2, - code_change/3, prioritise_info/3]). - -%% For INSTR_MOD callbacks --export([call/3, cast/2, monitor/1, demonitor/1]). +%% Deprecated with CMQ. +%% This module stays here for mixed-version compatibility, because of +%% the `gm_group` table. It can be removed once the migration to Khepri +%% is finalised and Mnesia fully removed. -export([table_definitions/0]). -define(GROUP_TABLE, gm_group). --define(MAX_BUFFER_SIZE, 100000000). %% 100MB --define(BROADCAST_TIMER, 25). --define(FORCE_GC_TIMER, 250). --define(VERSION_START, 0). --define(SETS, ordsets). - --record(state, - { self, - left, - right, - group_name, - module, - view, - pub_count, - members_state, - callback_args, - confirms, - broadcast_buffer, - broadcast_buffer_sz, - broadcast_timer, - force_gc_timer, - txn_executor, - shutting_down - }). -record(gm_group, { name, version, members }). --record(view_member, { id, aliases, left, right }). - --record(member, { pending_ack, last_pub, last_ack }). - -define(TABLE, {?GROUP_TABLE, [{record_name, gm_group}, {attributes, record_info(fields, gm_group)}]}). -define(TABLE_MATCH, {match, #gm_group { _ = '_' }}). --define(TAG, '$gm'). - --export_type([group_name/0]). - --type group_name() :: any(). --type txn_fun() :: fun((fun(() -> any())) -> any()). - -%% The joined, members_changed and handle_msg callbacks can all return -%% any of the following terms: -%% -%% 'ok' - the callback function returns normally -%% -%% {'stop', Reason} - the callback indicates the member should stop -%% with reason Reason and should leave the group. -%% -%% {'become', Module, Args} - the callback indicates that the callback -%% module should be changed to Module and that the callback functions -%% should now be passed the arguments Args. This allows the callback -%% module to be dynamically changed. - -%% Called when we've successfully joined the group. Supplied with Args -%% provided in start_link, plus current group members. --callback joined(Args :: term(), Members :: [pid()]) -> - ok | {stop, Reason :: term()} | {become, Module :: atom(), Args :: any()}. - -%% Supplied with Args provided in start_link, the list of new members -%% and the list of members previously known to us that have since -%% died. Note that if a member joins and dies very quickly, it's -%% possible that we will never see that member appear in either births -%% or deaths. However we are guaranteed that (1) we will see a member -%% joining either in the births here, or in the members passed to -%% joined/2 before receiving any messages from it; and (2) we will not -%% see members die that we have not seen born (or supplied in the -%% members to joined/2). --callback members_changed(Args :: term(), - Births :: [pid()], Deaths :: [pid()]) -> - ok | {stop, Reason :: term()} | {become, Module :: atom(), Args :: any()}. - -%% Supplied with Args provided in start_link, the sender, and the -%% message. This does get called for messages injected by this member, -%% however, in such cases, there is no special significance of this -%% invocation: it does not indicate that the message has made it to -%% any other members, let alone all other members. --callback handle_msg(Args :: term(), From :: pid(), Message :: term()) -> - ok | {stop, Reason :: term()} | {become, Module :: atom(), Args :: any()}. - -%% Called on gm member termination as per rules in gen_server, with -%% the Args provided in start_link plus the termination Reason. --callback handle_terminate(Args :: term(), Reason :: term()) -> - ok | term(). - --spec create_tables() -> 'ok' | {'aborted', any()}. - -create_tables() -> - create_tables([?TABLE]). - -create_tables([]) -> - ok; -create_tables([{Table, Attributes} | Tables]) -> - case mnesia:create_table(Table, Attributes) of - {atomic, ok} -> create_tables(Tables); - {aborted, {already_exists, Table}} -> create_tables(Tables); - Err -> Err - end. - table_definitions() -> {Name, Attributes} = ?TABLE, [{Name, [?TABLE_MATCH | Attributes]}]. - --spec start_link(group_name(), atom(), any(), txn_fun()) -> - rabbit_types:ok_pid_or_error(). - -start_link(GroupName, Module, Args, TxnFun) -> - gen_server2:start_link(?MODULE, [GroupName, Module, Args, TxnFun], - [{spawn_opt, [{fullsweep_after, 0}]}]). - --spec leave(pid()) -> 'ok'. - -leave(Server) -> - gen_server2:cast(Server, leave). - --spec broadcast(pid(), any()) -> 'ok'. - -broadcast(Server, Msg) -> broadcast(Server, Msg, 0). - -broadcast(Server, Msg, SizeHint) -> - gen_server2:cast(Server, {broadcast, Msg, SizeHint}). - --spec confirmed_broadcast(pid(), any()) -> 'ok'. - -confirmed_broadcast(Server, Msg) -> - gen_server2:call(Server, {confirmed_broadcast, Msg}, infinity). - --spec info(pid()) -> rabbit_types:infos(). - -info(Server) -> - gen_server2:call(Server, info, infinity). - --spec validate_members(pid(), [pid()]) -> 'ok'. - -validate_members(Server, Members) -> - gen_server2:cast(Server, {validate_members, Members}). - --spec forget_group(group_name()) -> 'ok'. - -forget_group(GroupName) -> - {atomic, ok} = mnesia:sync_transaction( - fun () -> - mnesia:delete({?GROUP_TABLE, GroupName}) - end), - ok. - -init([GroupName, Module, Args, TxnFun]) -> - put(process_name, {?MODULE, GroupName}), - Self = make_member(GroupName), - gen_server2:cast(self(), join), - {ok, #state { self = Self, - left = {Self, undefined}, - right = {Self, undefined}, - group_name = GroupName, - module = Module, - view = undefined, - pub_count = -1, - members_state = undefined, - callback_args = Args, - confirms = queue:new(), - broadcast_buffer = [], - broadcast_buffer_sz = 0, - broadcast_timer = undefined, - force_gc_timer = undefined, - txn_executor = TxnFun, - shutting_down = false }}. - - -handle_call({confirmed_broadcast, _Msg}, _From, - State = #state { shutting_down = {true, _} }) -> - reply(shutting_down, State); - -handle_call({confirmed_broadcast, _Msg}, _From, - State = #state { members_state = undefined }) -> - reply(not_joined, State); - -handle_call({confirmed_broadcast, Msg}, _From, - State = #state { self = Self, - right = {Self, undefined}, - module = Module, - callback_args = Args }) -> - handle_callback_result({Module:handle_msg(Args, get_pid(Self), Msg), - ok, State}); - -handle_call({confirmed_broadcast, Msg}, From, State) -> - {Result, State1 = #state { pub_count = PubCount, confirms = Confirms }} = - internal_broadcast(Msg, 0, State), - Confirms1 = queue:in({PubCount, From}, Confirms), - handle_callback_result({Result, flush_broadcast_buffer( - State1 #state { confirms = Confirms1 })}); - -handle_call(info, _From, - State = #state { members_state = undefined }) -> - reply(not_joined, State); - -handle_call(info, _From, State = #state { group_name = GroupName, - module = Module, - view = View }) -> - reply([{group_name, GroupName}, - {module, Module}, - {group_members, get_pids(alive_view_members(View))}], State); - -handle_call({add_on_right, _NewMember}, _From, - State = #state { members_state = undefined }) -> - reply(not_ready, State); - -handle_call({add_on_right, NewMember}, _From, - State = #state { self = Self, - group_name = GroupName, - members_state = MembersState, - txn_executor = TxnFun }) -> - try - Group = record_new_member_in_group( - NewMember, Self, GroupName, TxnFun), - View1 = group_to_view(check_membership(Self, Group)), - MembersState1 = remove_erased_members(MembersState, View1), - ok = send_right(NewMember, View1, - {catchup, Self, prepare_members_state(MembersState1)}), - {Result, State1} = change_view(View1, State #state { - members_state = MembersState1 }), - handle_callback_result({Result, {ok, Group}, State1}) - catch - lost_membership -> - {stop, shutdown, State} - end. - -%% add_on_right causes a catchup to be sent immediately from the left, -%% so we can never see this from the left neighbour. However, it's -%% possible for the right neighbour to send us a check_neighbours -%% immediately before that. We can't possibly handle it, but if we're -%% in this state we know a catchup is coming imminently anyway. So -%% just ignore it. -handle_cast({?TAG, _ReqVer, check_neighbours}, - State = #state { members_state = undefined }) -> - noreply(State); - -handle_cast({?TAG, ReqVer, Msg}, - State = #state { view = View, - self = Self, - members_state = MembersState, - group_name = GroupName }) -> - try - {Result, State1} = - case needs_view_update(ReqVer, View) of - true -> - View1 = group_to_view( - check_membership(Self, - dirty_read_group(GroupName))), - MemberState1 = remove_erased_members(MembersState, View1), - change_view(View1, State #state { - members_state = MemberState1 }); - false -> {ok, State} - end, - handle_callback_result( - if_callback_success( - Result, fun handle_msg_true/3, fun handle_msg_false/3, Msg, State1)) - catch - lost_membership -> - {stop, shutdown, State} - end; - -handle_cast({broadcast, _Msg, _SizeHint}, - State = #state { shutting_down = {true, _} }) -> - noreply(State); - -handle_cast({broadcast, _Msg, _SizeHint}, - State = #state { members_state = undefined }) -> - noreply(State); - -handle_cast({broadcast, Msg, _SizeHint}, - State = #state { self = Self, - right = {Self, undefined}, - module = Module, - callback_args = Args }) -> - handle_callback_result({Module:handle_msg(Args, get_pid(Self), Msg), - State}); - -handle_cast({broadcast, Msg, SizeHint}, State) -> - {Result, State1} = internal_broadcast(Msg, SizeHint, State), - handle_callback_result({Result, maybe_flush_broadcast_buffer(State1)}); - -handle_cast(join, State = #state { self = Self, - group_name = GroupName, - members_state = undefined, - module = Module, - callback_args = Args, - txn_executor = TxnFun }) -> - try - View = join_group(Self, GroupName, TxnFun), - MembersState = - case alive_view_members(View) of - [Self] -> blank_member_state(); - _ -> undefined - end, - State1 = check_neighbours(State #state { view = View, - members_state = MembersState }), - handle_callback_result( - {Module:joined(Args, get_pids(all_known_members(View))), State1}) - catch - lost_membership -> - {stop, shutdown, State} - end; - -handle_cast({validate_members, OldMembers}, - State = #state { view = View, - module = Module, - callback_args = Args }) -> - NewMembers = get_pids(all_known_members(View)), - Births = NewMembers -- OldMembers, - Deaths = OldMembers -- NewMembers, - case {Births, Deaths} of - {[], []} -> noreply(State); - _ -> Result = Module:members_changed(Args, Births, Deaths), - handle_callback_result({Result, State}) - end; - -handle_cast(leave, State) -> - {stop, normal, State}. - - -handle_info(force_gc, State) -> - garbage_collect(), - noreply(State #state { force_gc_timer = undefined }); - -handle_info(flush, State) -> - noreply( - flush_broadcast_buffer(State #state { broadcast_timer = undefined })); - -handle_info(timeout, State) -> - noreply(flush_broadcast_buffer(State)); - -handle_info({'DOWN', _MRef, process, _Pid, _Reason}, - State = #state { shutting_down = - {true, {shutdown, ring_shutdown}} }) -> - noreply(State); -handle_info({'DOWN', MRef, process, _Pid, Reason}, - State = #state { self = Self, - left = Left, - right = Right, - group_name = GroupName, - confirms = Confirms, - txn_executor = TxnFun }) -> - try - check_membership(GroupName), - Member = case {Left, Right} of - {{Member1, MRef}, _} -> Member1; - {_, {Member1, MRef}} -> Member1; - _ -> undefined - end, - case {Member, Reason} of - {undefined, _} -> - noreply(State); - {_, {shutdown, ring_shutdown}} -> - noreply(State); - _ -> - %% In the event of a partial partition we could see another member - %% go down and then remove them from Mnesia. While they can - %% recover from this they'd have to restart the queue - not - %% ideal. So let's sleep here briefly just in case this was caused - %% by a partial partition; in which case by the time we record the - %% member death in Mnesia we will probably be in a full - %% partition and will not be assassinating another member. - timer:sleep(100), - View1 = group_to_view(record_dead_member_in_group(Self, - Member, GroupName, TxnFun, true)), - handle_callback_result( - case alive_view_members(View1) of - [Self] -> maybe_erase_aliases( - State #state { - members_state = blank_member_state(), - confirms = purge_confirms(Confirms) }, - View1); - _ -> change_view(View1, State) - end) - end - catch - lost_membership -> - {stop, shutdown, State} - end; -handle_info(_, State) -> - %% Discard any unexpected messages, such as late replies from neighbour_call/2 - %% TODO: For #gm_group{} related info messages, it could be worthwhile to - %% change_view/2, as this might reflect an alteration in the gm group, meaning - %% we now need to update our state. see rabbitmq-server#914. - noreply(State). - -terminate(Reason, #state { module = Module, callback_args = Args }) -> - Module:handle_terminate(Args, Reason). - -code_change(_OldVsn, State, _Extra) -> - {ok, State}. - -prioritise_info(flush, _Len, _State) -> - 1; -%% DOWN messages should not overtake initial catchups; if they do we -%% will receive a DOWN we do not know what to do with. -prioritise_info({'DOWN', _MRef, process, _Pid, _Reason}, _Len, - #state { members_state = undefined }) -> - 0; -%% We should not prioritise DOWN messages from our left since -%% otherwise the DOWN can overtake any last activity from the left, -%% causing that activity to be lost. -prioritise_info({'DOWN', _MRef, process, LeftPid, _Reason}, _Len, - #state { left = {{_LeftVer, LeftPid}, _MRef2} }) -> - 0; -%% But prioritise all other DOWNs - we want to make sure we are not -%% sending activity into the void for too long because our right is -%% down but we don't know it. -prioritise_info({'DOWN', _MRef, process, _Pid, _Reason}, _Len, _State) -> - 1; -prioritise_info(_, _Len, _State) -> - 0. - - -handle_msg(check_neighbours, State) -> - %% no-op - it's already been done by the calling handle_cast - {ok, State}; - -handle_msg({catchup, Left, MembersStateLeft}, - State = #state { self = Self, - left = {Left, _MRefL}, - right = {Right, _MRefR}, - view = View, - members_state = undefined }) -> - ok = send_right(Right, View, {catchup, Self, MembersStateLeft}), - MembersStateLeft1 = build_members_state(MembersStateLeft), - {ok, State #state { members_state = MembersStateLeft1 }}; - -handle_msg({catchup, Left, MembersStateLeft}, - State = #state { self = Self, - left = {Left, _MRefL}, - view = View, - members_state = MembersState }) - when MembersState =/= undefined -> - MembersStateLeft1 = build_members_state(MembersStateLeft), - AllMembers = lists:usort(maps:keys(MembersState) ++ - maps:keys(MembersStateLeft1)), - {MembersState1, Activity} = - lists:foldl( - fun (Id, MembersStateActivity) -> - #member { pending_ack = PALeft, last_ack = LA } = - find_member_or_blank(Id, MembersStateLeft1), - with_member_acc( - fun (#member { pending_ack = PA } = Member, Activity1) -> - case is_member_alias(Id, Self, View) of - true -> - {_AcksInFlight, Pubs, _PA1} = - find_prefix_common_suffix(PALeft, PA), - {Member #member { last_ack = LA }, - activity_cons(Id, pubs_from_queue(Pubs), - [], Activity1)}; - false -> - {Acks, _Common, Pubs} = - find_prefix_common_suffix(PA, PALeft), - {Member, - activity_cons(Id, pubs_from_queue(Pubs), - acks_from_queue(Acks), - Activity1)} - end - end, Id, MembersStateActivity) - end, {MembersState, activity_nil()}, AllMembers), - handle_msg({activity, Left, activity_finalise(Activity)}, - State #state { members_state = MembersState1 }); - -handle_msg({catchup, _NotLeft, _MembersState}, State) -> - {ok, State}; - -handle_msg({activity, Left, Activity}, - State = #state { self = Self, - group_name = GroupName, - left = {Left, _MRefL}, - view = View, - members_state = MembersState, - confirms = Confirms }) - when MembersState =/= undefined -> - try - %% If we have to stop, do it asap so we avoid any ack confirmation - %% Membership must be checked again by erase_members_in_group, as the - %% node can be marked as dead on the meanwhile - check_membership(GroupName), - {MembersState1, {Confirms1, Activity1}} = - calculate_activity(MembersState, Confirms, Activity, Self, View), - State1 = State #state { members_state = MembersState1, - confirms = Confirms1 }, - Activity3 = activity_finalise(Activity1), - ok = maybe_send_activity(Activity3, State1), - {Result, State2} = maybe_erase_aliases(State1, View), - if_callback_success( - Result, fun activity_true/3, fun activity_false/3, Activity3, State2) - catch - lost_membership -> - {{stop, shutdown}, State} - end; - -handle_msg({activity, _NotLeft, _Activity}, State) -> - {ok, State}. - - -noreply(State) -> - {noreply, ensure_timers(State), flush_timeout(State)}. - -reply(Reply, State) -> - {reply, Reply, ensure_timers(State), flush_timeout(State)}. - -ensure_timers(State) -> - ensure_force_gc_timer(ensure_broadcast_timer(State)). - -flush_timeout(#state{broadcast_buffer = []}) -> infinity; -flush_timeout(_) -> 0. - -ensure_force_gc_timer(State = #state { force_gc_timer = TRef }) - when is_reference(TRef) -> - State; -ensure_force_gc_timer(State = #state { force_gc_timer = undefined }) -> - TRef = erlang:send_after(?FORCE_GC_TIMER, self(), force_gc), - State #state { force_gc_timer = TRef }. - -ensure_broadcast_timer(State = #state { broadcast_buffer = [], - broadcast_timer = undefined }) -> - State; -ensure_broadcast_timer(State = #state { broadcast_buffer = [], - broadcast_timer = TRef }) -> - _ = erlang:cancel_timer(TRef), - State #state { broadcast_timer = undefined }; -ensure_broadcast_timer(State = #state { broadcast_timer = undefined }) -> - TRef = erlang:send_after(?BROADCAST_TIMER, self(), flush), - State #state { broadcast_timer = TRef }; -ensure_broadcast_timer(State) -> - State. - -internal_broadcast(Msg, SizeHint, - State = #state { self = Self, - pub_count = PubCount, - module = Module, - callback_args = Args, - broadcast_buffer = Buffer, - broadcast_buffer_sz = BufferSize }) -> - PubCount1 = PubCount + 1, - {Module:handle_msg(Args, get_pid(Self), Msg), - State #state { pub_count = PubCount1, - broadcast_buffer = [{PubCount1, Msg} | Buffer], - broadcast_buffer_sz = BufferSize + SizeHint}}. - -%% The Erlang distribution mechanism has an interesting quirk - it -%% will kill the VM cold with "Absurdly large distribution output data -%% buffer" if you attempt to send a message which serialises out to -%% more than 2^31 bytes in size. It's therefore a very good idea to -%% make sure that we don't exceed that size! -%% -%% Now, we could figure out the size of messages as they come in using -%% size(term_to_binary(Msg)) or similar. The trouble is, that requires -%% us to serialise the message only to throw the serialised form -%% away. Hard to believe that's a sensible thing to do. So instead we -%% accept a size hint from the application, via broadcast/3. This size -%% hint can be the size of anything in the message which we expect -%% could be large, and we just ignore the size of any small bits of -%% the message term. Therefore MAX_BUFFER_SIZE is set somewhat -%% conservatively at 100MB - but the buffer is only to allow us to -%% buffer tiny messages anyway, so 100MB is plenty. - -maybe_flush_broadcast_buffer(State = #state{broadcast_buffer_sz = Size}) -> - case Size > ?MAX_BUFFER_SIZE of - true -> flush_broadcast_buffer(State); - false -> State - end. - -flush_broadcast_buffer(State = #state { broadcast_buffer = [] }) -> - State; -flush_broadcast_buffer(State = #state { self = Self, - members_state = MembersState, - broadcast_buffer = Buffer, - pub_count = PubCount }) -> - [{PubCount, _Msg}|_] = Buffer, %% ASSERTION match on PubCount - Pubs = lists:reverse(Buffer), - Activity = activity_cons(Self, Pubs, [], activity_nil()), - ok = maybe_send_activity(activity_finalise(Activity), State), - MembersState1 = with_member( - fun (Member = #member { pending_ack = PA }) -> - PA1 = queue:join(PA, queue:from_list(Pubs)), - Member #member { pending_ack = PA1, - last_pub = PubCount } - end, Self, MembersState), - State #state { members_state = MembersState1, - broadcast_buffer = [], - broadcast_buffer_sz = 0 }. - -%% --------------------------------------------------------------------------- -%% View construction and inspection -%% --------------------------------------------------------------------------- - -needs_view_update(ReqVer, {Ver, _View}) -> Ver < ReqVer. - -view_version({Ver, _View}) -> Ver. - -is_member_alive({dead, _Member}) -> false; -is_member_alive(_) -> true. - -is_member_alias(Self, Self, _View) -> - true; -is_member_alias(Member, Self, View) -> - ?SETS:is_element(Member, - ((fetch_view_member(Self, View)) #view_member.aliases)). - -dead_member_id({dead, Member}) -> Member. - -store_view_member(VMember = #view_member { id = Id }, {Ver, View}) -> - {Ver, maps:put(Id, VMember, View)}. - -with_view_member(Fun, View, Id) -> - store_view_member(Fun(fetch_view_member(Id, View)), View). - -fetch_view_member(Id, {_Ver, View}) -> maps:get(Id, View). - -find_view_member(Id, {_Ver, View}) -> maps:find(Id, View). - -blank_view(Ver) -> {Ver, maps:new()}. - -alive_view_members({_Ver, View}) -> maps:keys(View). - -all_known_members({_Ver, View}) -> - maps:fold( - fun (Member, #view_member { aliases = Aliases }, Acc) -> - ?SETS:to_list(Aliases) ++ [Member | Acc] - end, [], View). - -group_to_view(#gm_group { members = Members, version = Ver }) -> - Alive = lists:filter(fun is_member_alive/1, Members), - [_|_] = Alive, %% ASSERTION - can't have all dead members - add_aliases(link_view(Alive ++ Alive ++ Alive, blank_view(Ver)), Members). - -link_view([Left, Middle, Right | Rest], View) -> - case find_view_member(Middle, View) of - error -> - link_view( - [Middle, Right | Rest], - store_view_member(#view_member { id = Middle, - aliases = ?SETS:new(), - left = Left, - right = Right }, View)); - {ok, _} -> - View - end; -link_view(_, View) -> - View. - -add_aliases(View, Members) -> - Members1 = ensure_alive_suffix(Members), - {EmptyDeadSet, View1} = - lists:foldl( - fun (Member, {DeadAcc, ViewAcc}) -> - case is_member_alive(Member) of - true -> - {?SETS:new(), - with_view_member( - fun (VMember = - #view_member { aliases = Aliases }) -> - VMember #view_member { - aliases = ?SETS:union(Aliases, DeadAcc) } - end, ViewAcc, Member)}; - false -> - {?SETS:add_element(dead_member_id(Member), DeadAcc), - ViewAcc} - end - end, {?SETS:new(), View}, Members1), - 0 = ?SETS:size(EmptyDeadSet), %% ASSERTION - View1. - -ensure_alive_suffix(Members) -> - queue:to_list(ensure_alive_suffix1(queue:from_list(Members))). - -ensure_alive_suffix1(MembersQ) -> - {{value, Member}, MembersQ1} = queue:out_r(MembersQ), - case is_member_alive(Member) of - true -> MembersQ; - false -> ensure_alive_suffix1(queue:in_r(Member, MembersQ1)) - end. - - -%% --------------------------------------------------------------------------- -%% View modification -%% --------------------------------------------------------------------------- - -join_group(Self, GroupName, TxnFun) -> - join_group(Self, GroupName, dirty_read_group(GroupName), TxnFun). - -join_group(Self, GroupName, {error, not_found}, TxnFun) -> - join_group(Self, GroupName, - prune_or_create_group(Self, GroupName, TxnFun), TxnFun); -join_group(Self, _GroupName, #gm_group { members = [Self] } = Group, _TxnFun) -> - group_to_view(Group); -join_group(Self, GroupName, #gm_group { members = Members } = Group, TxnFun) -> - case lists:member(Self, Members) of - true -> - group_to_view(Group); - false -> - case lists:filter(fun is_member_alive/1, Members) of - [] -> - join_group(Self, GroupName, - prune_or_create_group(Self, GroupName, TxnFun), - TxnFun); - Alive -> - Left = lists:nth(rand:uniform(length(Alive)), Alive), - Handler = - fun () -> - join_group( - Self, GroupName, - record_dead_member_in_group(Self, - Left, GroupName, TxnFun, false), - TxnFun) - end, - try - case neighbour_call(Left, {add_on_right, Self}) of - {ok, Group1} -> group_to_view(Group1); - not_ready -> join_group(Self, GroupName, TxnFun) - end - catch - exit:{R, _} - when R =:= noproc; R =:= normal; R =:= shutdown -> - Handler(); - exit:{{R, _}, _} - when R =:= nodedown; R =:= shutdown -> - Handler() - end - end - end. - -dirty_read_group(GroupName) -> - case mnesia:dirty_read(?GROUP_TABLE, GroupName) of - [] -> {error, not_found}; - [Group] -> Group - end. - -read_group(GroupName) -> - case mnesia:read({?GROUP_TABLE, GroupName}) of - [] -> {error, not_found}; - [Group] -> Group - end. - -write_group(Group) -> mnesia:write(?GROUP_TABLE, Group, write), Group. - -prune_or_create_group(Self, GroupName, TxnFun) -> - TxnFun( - fun () -> - GroupNew = #gm_group { name = GroupName, - members = [Self], - version = get_version(Self) }, - case read_group(GroupName) of - {error, not_found} -> - write_group(GroupNew); - Group = #gm_group { members = Members } -> - case lists:any(fun is_member_alive/1, Members) of - true -> Group; - false -> write_group(GroupNew) - end - end - end). - -record_dead_member_in_group(Self, Member, GroupName, TxnFun, Verify) -> - Fun = - fun () -> - try - Group = #gm_group { members = Members, version = Ver } = - case Verify of - true -> - check_membership(Self, read_group(GroupName)); - false -> - check_group(read_group(GroupName)) - end, - case lists:splitwith( - fun (Member1) -> Member1 =/= Member end, Members) of - {_Members1, []} -> %% not found - already recorded dead - Group; - {Members1, [Member | Members2]} -> - Members3 = Members1 ++ [{dead, Member} | Members2], - write_group(Group #gm_group { members = Members3, - version = Ver + 1 }) - end - catch - lost_membership -> - %% The transaction must not be abruptly crashed, but - %% leave the gen_server to stop normally - {error, lost_membership} - end - end, - handle_lost_membership_in_txn(TxnFun, Fun). - -handle_lost_membership_in_txn(TxnFun, Fun) -> - case TxnFun(Fun) of - {error, lost_membership = T} -> - throw(T); - Any -> - Any - end. - -record_new_member_in_group(NewMember, Left, GroupName, TxnFun) -> - Fun = - fun () -> - try - Group = #gm_group { members = Members, version = Ver } = - check_membership(Left, read_group(GroupName)), - case lists:member(NewMember, Members) of - true -> - %% This avois duplicates during partial partitions, - %% as inconsistent views might happen during them - rabbit_log:warning("(~tp) GM avoiding duplicate of ~tp", - [self(), NewMember]), - Group; - false -> - {Prefix, [Left | Suffix]} = - lists:splitwith(fun (M) -> M =/= Left end, Members), - write_group(Group #gm_group { - members = Prefix ++ [Left, NewMember | Suffix], - version = Ver + 1 }) - end - catch - lost_membership -> - %% The transaction must not be abruptly crashed, but - %% leave the gen_server to stop normally - {error, lost_membership} - end - end, - handle_lost_membership_in_txn(TxnFun, Fun). - -erase_members_in_group(Self, Members, GroupName, TxnFun) -> - DeadMembers = [{dead, Id} || Id <- Members], - Fun = - fun () -> - try - Group = #gm_group { members = [_|_] = Members1, version = Ver } = - check_membership(Self, read_group(GroupName)), - case Members1 -- DeadMembers of - Members1 -> Group; - Members2 -> write_group( - Group #gm_group { members = Members2, - version = Ver + 1 }) - end - catch - lost_membership -> - %% The transaction must not be abruptly crashed, but - %% leave the gen_server to stop normally - {error, lost_membership} - end - end, - handle_lost_membership_in_txn(TxnFun, Fun). - -maybe_erase_aliases(State = #state { self = Self, - group_name = GroupName, - members_state = MembersState, - txn_executor = TxnFun }, View) -> - #view_member { aliases = Aliases } = fetch_view_member(Self, View), - {Erasable, MembersState1} - = ?SETS:fold( - fun (Id, {ErasableAcc, MembersStateAcc} = Acc) -> - #member { last_pub = LP, last_ack = LA } = - find_member_or_blank(Id, MembersState), - case can_erase_view_member(Self, Id, LA, LP) of - true -> {[Id | ErasableAcc], - erase_member(Id, MembersStateAcc)}; - false -> Acc - end - end, {[], MembersState}, Aliases), - View1 = case Erasable of - [] -> View; - _ -> group_to_view( - erase_members_in_group(Self, Erasable, GroupName, TxnFun)) - end, - change_view(View1, State #state { members_state = MembersState1 }). - -can_erase_view_member(Self, Self, _LA, _LP) -> false; -can_erase_view_member(_Self, _Id, N, N) -> true; -can_erase_view_member(_Self, _Id, _LA, _LP) -> false. - -neighbour_cast(N, Msg) -> ?INSTR_MOD:cast(get_pid(N), Msg). -neighbour_call(N, Msg) -> ?INSTR_MOD:call(get_pid(N), Msg, infinity). - -%% --------------------------------------------------------------------------- -%% View monitoring and maintenance -%% --------------------------------------------------------------------------- - -ensure_neighbour(_Ver, Self, {Self, undefined}, Self) -> - {Self, undefined}; -ensure_neighbour(Ver, Self, {Self, undefined}, RealNeighbour) -> - ok = neighbour_cast(RealNeighbour, {?TAG, Ver, check_neighbours}), - {RealNeighbour, maybe_monitor(RealNeighbour, Self)}; -ensure_neighbour(_Ver, _Self, {RealNeighbour, MRef}, RealNeighbour) -> - {RealNeighbour, MRef}; -ensure_neighbour(Ver, Self, {RealNeighbour, MRef}, Neighbour) -> - true = ?INSTR_MOD:demonitor(MRef), - Msg = {?TAG, Ver, check_neighbours}, - ok = neighbour_cast(RealNeighbour, Msg), - ok = case Neighbour of - Self -> ok; - _ -> neighbour_cast(Neighbour, Msg) - end, - {Neighbour, maybe_monitor(Neighbour, Self)}. - -maybe_monitor( Self, Self) -> undefined; -maybe_monitor(Other, _Self) -> ?INSTR_MOD:monitor(get_pid(Other)). - -check_neighbours(State = #state { self = Self, - left = Left, - right = Right, - view = View, - broadcast_buffer = Buffer }) -> - #view_member { left = VLeft, right = VRight } - = fetch_view_member(Self, View), - Ver = view_version(View), - Left1 = ensure_neighbour(Ver, Self, Left, VLeft), - Right1 = ensure_neighbour(Ver, Self, Right, VRight), - Buffer1 = case Right1 of - {Self, undefined} -> []; - _ -> Buffer - end, - State1 = State #state { left = Left1, right = Right1, - broadcast_buffer = Buffer1 }, - ok = maybe_send_catchup(Right, State1), - State1. - -maybe_send_catchup(Right, #state { right = Right }) -> - ok; -maybe_send_catchup(_Right, #state { self = Self, - right = {Self, undefined} }) -> - ok; -maybe_send_catchup(_Right, #state { members_state = undefined }) -> - ok; -maybe_send_catchup(_Right, #state { self = Self, - right = {Right, _MRef}, - view = View, - members_state = MembersState }) -> - send_right(Right, View, - {catchup, Self, prepare_members_state(MembersState)}). - - -%% --------------------------------------------------------------------------- -%% Catch_up delta detection -%% --------------------------------------------------------------------------- - -find_prefix_common_suffix(A, B) -> - {Prefix, A1} = find_prefix(A, B, queue:new()), - {Common, Suffix} = find_common(A1, B, queue:new()), - {Prefix, Common, Suffix}. - -%% Returns the elements of A that occur before the first element of B, -%% plus the remainder of A. -find_prefix(A, B, Prefix) -> - case {queue:out(A), queue:out(B)} of - {{{value, Val}, _A1}, {{value, Val}, _B1}} -> - {Prefix, A}; - {{empty, A1}, {{value, _A}, _B1}} -> - {Prefix, A1}; - {{{value, {NumA, _MsgA} = Val}, A1}, - {{value, {NumB, _MsgB}}, _B1}} when NumA < NumB -> - find_prefix(A1, B, queue:in(Val, Prefix)); - {_, {empty, _B1}} -> - {A, Prefix} %% Prefix well be empty here - end. - -%% A should be a prefix of B. Returns the commonality plus the -%% remainder of B. -find_common(A, B, Common) -> - case {queue:out(A), queue:out(B)} of - {{{value, Val}, A1}, {{value, Val}, B1}} -> - find_common(A1, B1, queue:in(Val, Common)); - {{empty, _A}, _} -> - {Common, B}; - %% Drop value from B. - %% Match value to avoid infinite loop, since {empty, B} = queue:out(B). - {_, {{value, _}, B1}} -> - find_common(A, B1, Common); - %% Drop value from A. Empty A should be matched by second close. - {{{value, _}, A1}, _} -> - find_common(A1, B, Common) - end. - - -%% --------------------------------------------------------------------------- -%% Members helpers -%% --------------------------------------------------------------------------- - -with_member(Fun, Id, MembersState) -> - store_member( - Id, Fun(find_member_or_blank(Id, MembersState)), MembersState). - -with_member_acc(Fun, Id, {MembersState, Acc}) -> - {MemberState, Acc1} = Fun(find_member_or_blank(Id, MembersState), Acc), - {store_member(Id, MemberState, MembersState), Acc1}. - -find_member_or_blank(Id, MembersState) -> - case maps:find(Id, MembersState) of - {ok, Result} -> Result; - error -> blank_member() - end. - -erase_member(Id, MembersState) -> maps:remove(Id, MembersState). - -blank_member() -> - #member { pending_ack = queue:new(), last_pub = -1, last_ack = -1 }. - -blank_member_state() -> maps:new(). - -store_member(Id, MemberState, MembersState) -> - maps:put(Id, MemberState, MembersState). - -prepare_members_state(MembersState) -> maps:to_list(MembersState). - -build_members_state(MembersStateList) -> maps:from_list(MembersStateList). - -make_member(GroupName) -> - {case dirty_read_group(GroupName) of - #gm_group { version = Version } -> Version; - {error, not_found} -> ?VERSION_START - end, self()}. - -remove_erased_members(MembersState, View) -> - lists:foldl(fun (Id, MembersState1) -> - store_member(Id, find_member_or_blank(Id, MembersState), - MembersState1) - end, blank_member_state(), all_known_members(View)). - -get_version({Version, _Pid}) -> Version. - -get_pid({_Version, Pid}) -> Pid. - -get_pids(Ids) -> [Pid || {_Version, Pid} <- Ids]. - -%% --------------------------------------------------------------------------- -%% Activity assembly -%% --------------------------------------------------------------------------- - -activity_nil() -> queue:new(). - -activity_cons( _Id, [], [], Tail) -> Tail; -activity_cons(Sender, Pubs, Acks, Tail) -> queue:in({Sender, Pubs, Acks}, Tail). - -activity_finalise(Activity) -> queue:to_list(Activity). - -maybe_send_activity([], _State) -> - ok; -maybe_send_activity(Activity, #state { self = Self, - right = {Right, _MRefR}, - view = View }) -> - send_right(Right, View, {activity, Self, Activity}). - -send_right(Right, View, Msg) -> - ok = neighbour_cast(Right, {?TAG, view_version(View), Msg}). - -calculate_activity(MembersState, Confirms, Activity, Self, View) -> - lists:foldl( - fun ({Id, Pubs, Acks}, MembersStateConfirmsActivity) -> - with_member_acc( - fun (Member = #member { pending_ack = PA, - last_pub = LP, - last_ack = LA }, - {Confirms2, Activity2}) -> - case is_member_alias(Id, Self, View) of - true -> - {ToAck, PA1} = - find_common(queue_from_pubs(Pubs), PA, - queue:new()), - LA1 = last_ack(Acks, LA), - AckNums = acks_from_queue(ToAck), - Confirms3 = maybe_confirm( - Self, Id, Confirms2, AckNums), - {Member #member { pending_ack = PA1, - last_ack = LA1 }, - {Confirms3, - activity_cons( - Id, [], AckNums, Activity2)}}; - false -> - PA1 = apply_acks(Acks, join_pubs(PA, Pubs)), - LA1 = last_ack(Acks, LA), - LP1 = last_pub(Pubs, LP), - {Member #member { pending_ack = PA1, - last_pub = LP1, - last_ack = LA1 }, - {Confirms2, - activity_cons(Id, Pubs, Acks, Activity2)}} - end - end, Id, MembersStateConfirmsActivity) - end, {MembersState, {Confirms, activity_nil()}}, Activity). - -callback(Args, Module, Activity) -> - Result = - lists:foldl( - fun ({Id, Pubs, _Acks}, {Args1, Module1, ok}) -> - lists:foldl(fun ({_PubNum, Pub}, Acc = {Args2, Module2, ok}) -> - case Module2:handle_msg( - Args2, get_pid(Id), Pub) of - ok -> - Acc; - {become, Module3, Args3} -> - {Args3, Module3, ok}; - {stop, _Reason} = Error -> - Error - end; - (_, Error = {stop, _Reason}) -> - Error - end, {Args1, Module1, ok}, Pubs); - (_, Error = {stop, _Reason}) -> - Error - end, {Args, Module, ok}, Activity), - case Result of - {Args, Module, ok} -> ok; - {Args1, Module1, ok} -> {become, Module1, Args1}; - {stop, _Reason} = Error -> Error - end. - -change_view(View, State = #state { view = View0, - module = Module, - callback_args = Args }) -> - OldMembers = all_known_members(View0), - NewMembers = all_known_members(View), - Births = NewMembers -- OldMembers, - Deaths = OldMembers -- NewMembers, - Result = case {Births, Deaths} of - {[], []} -> ok; - _ -> Module:members_changed( - Args, get_pids(Births), get_pids(Deaths)) - end, - {Result, check_neighbours(State #state { view = View })}. - -handle_callback_result({Result, State}) -> - if_callback_success( - Result, fun no_reply_true/3, fun no_reply_false/3, undefined, State); -handle_callback_result({Result, Reply, State}) -> - if_callback_success( - Result, fun reply_true/3, fun reply_false/3, Reply, State). - -no_reply_true (_Result, _Undefined, State) -> noreply(State). -no_reply_false({stop, Reason}, _Undefined, State) -> {stop, Reason, State}. - -reply_true (_Result, Reply, State) -> reply(Reply, State). -reply_false({stop, Reason}, Reply, State) -> {stop, Reason, Reply, State}. - -handle_msg_true (_Result, Msg, State) -> handle_msg(Msg, State). -handle_msg_false(Result, _Msg, State) -> {Result, State}. - -activity_true(_Result, Activity, State = #state { module = Module, - callback_args = Args }) -> - {callback(Args, Module, Activity), State}. -activity_false(Result, _Activity, State) -> - {Result, State}. - -if_callback_success(Result, True, False, Arg, State) -> - {NewResult, NewState} = maybe_stop(Result, State), - if_callback_success1(NewResult, True, False, Arg, NewState). - -if_callback_success1(ok, True, _False, Arg, State) -> - True(ok, Arg, State); -if_callback_success1( - {become, Module, Args} = Result, True, _False, Arg, State) -> - True(Result, Arg, State #state { module = Module, - callback_args = Args }); -if_callback_success1({stop, _Reason} = Result, _True, False, Arg, State) -> - False(Result, Arg, State). - -maybe_stop({stop, Reason}, #state{ shutting_down = false } = State) -> - ShuttingDown = {true, Reason}, - case has_pending_messages(State) of - true -> {ok, State #state{ shutting_down = ShuttingDown }}; - false -> {{stop, Reason}, State #state{ shutting_down = ShuttingDown }} - end; -maybe_stop(Result, #state{ shutting_down = false } = State) -> - {Result, State}; -maybe_stop(Result, #state{ shutting_down = {true, Reason} } = State) -> - case has_pending_messages(State) of - true -> {Result, State}; - false -> {{stop, Reason}, State} - end. - -has_pending_messages(#state{ broadcast_buffer = Buffer }) - when Buffer =/= [] -> - true; -has_pending_messages(#state{ members_state = MembersState }) -> - MembersWithPubAckMismatches = maps:filter(fun(_Id, #member{last_pub = LP, last_ack = LA}) -> - LP =/= LA - end, MembersState), - 0 =/= maps:size(MembersWithPubAckMismatches). - -maybe_confirm(_Self, _Id, Confirms, []) -> - Confirms; -maybe_confirm(Self, Self, Confirms, [PubNum | PubNums]) -> - case queue:out(Confirms) of - {empty, _Confirms} -> - Confirms; - {{value, {PubNum, From}}, Confirms1} -> - gen_server2:reply(From, ok), - maybe_confirm(Self, Self, Confirms1, PubNums); - {{value, {PubNum1, _From}}, _Confirms} when PubNum1 > PubNum -> - maybe_confirm(Self, Self, Confirms, PubNums) - end; -maybe_confirm(_Self, _Id, Confirms, _PubNums) -> - Confirms. - -purge_confirms(Confirms) -> - _ = [gen_server2:reply(From, ok) || {_PubNum, From} <- queue:to_list(Confirms)], - queue:new(). - - -%% --------------------------------------------------------------------------- -%% Msg transformation -%% --------------------------------------------------------------------------- - -acks_from_queue(Q) -> [PubNum || {PubNum, _Msg} <- queue:to_list(Q)]. - -pubs_from_queue(Q) -> queue:to_list(Q). - -queue_from_pubs(Pubs) -> queue:from_list(Pubs). - -apply_acks( [], Pubs) -> Pubs; -apply_acks(List, Pubs) -> {_, Pubs1} = queue:split(length(List), Pubs), - Pubs1. - -join_pubs(Q, []) -> Q; -join_pubs(Q, Pubs) -> queue:join(Q, queue_from_pubs(Pubs)). - -last_ack( [], LA) -> LA; -last_ack(List, LA) -> LA1 = lists:last(List), - true = LA1 > LA, %% ASSERTION - LA1. - -last_pub( [], LP) -> LP; -last_pub(List, LP) -> {PubNum, _Msg} = lists:last(List), - true = PubNum > LP, %% ASSERTION - PubNum. - -%% --------------------------------------------------------------------------- - -%% Uninstrumented versions - -call(Pid, Msg, Timeout) -> gen_server2:call(Pid, Msg, Timeout). -cast(Pid, Msg) -> gen_server2:cast(Pid, Msg). -monitor(Pid) -> erlang:monitor(process, Pid). -demonitor(MRef) -> erlang:demonitor(MRef). - -check_membership(Self, #gm_group{members = M} = Group) -> - case lists:member(Self, M) of - true -> - Group; - false -> - throw(lost_membership) - end; -check_membership(_Self, {error, not_found}) -> - throw(lost_membership). - -check_membership(GroupName) -> - case dirty_read_group(GroupName) of - #gm_group{members = M} -> - case lists:keymember(self(), 2, M) of - true -> - ok; - false -> - throw(lost_membership) - end; - {error, not_found} -> - throw(lost_membership) - end. - -check_group({error, not_found}) -> - throw(lost_membership); -check_group(Any) -> - Any. diff --git a/deps/rabbit/src/internal_user.erl b/deps/rabbit/src/internal_user.erl index 340c83e7636c..9678cef2ed15 100644 --- a/deps/rabbit/src/internal_user.erl +++ b/deps/rabbit/src/internal_user.erl @@ -2,13 +2,11 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2020-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(internal_user). --include_lib("rabbit_common/include/rabbit.hrl"). - -export([ new/0, new/1, diff --git a/deps/rabbit/src/lqueue.erl b/deps/rabbit/src/lqueue.erl index c5d1f3af863e..5241eee6e6af 100644 --- a/deps/rabbit/src/lqueue.erl +++ b/deps/rabbit/src/lqueue.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2011-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(lqueue). diff --git a/deps/rabbit/src/mc.erl b/deps/rabbit/src/mc.erl index 25c2f5fa9830..465c7054f089 100644 --- a/deps/rabbit/src/mc.erl +++ b/deps/rabbit/src/mc.erl @@ -1,7 +1,15 @@ +%% This Source Code Form is subject to the terms of the Mozilla Public +%% License, v. 2.0. If a copy of the MPL was not distributed with this +%% file, You can obtain one at https://mozilla.org/MPL/2.0/. +%% +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. +%% + -module(mc). -export([ init/3, + init/4, size/1, is/1, get_annotation/2, @@ -11,19 +19,23 @@ is_persistent/1, ttl/1, correlation_id/1, + user_id/1, message_id/1, + property/2, timestamp/1, priority/1, set_ttl/2, x_header/2, routing_headers/2, + exchange/1, + routing_keys/1, %% convert/2, + convert/3, protocol_state/1, prepare/2, - record_death/3, + record_death/4, is_death_cycle/2, - last_death/1, death_queue_names/1 ]). @@ -36,6 +48,7 @@ -type protocol() :: module(). -type annotations() :: #{internal_ann_key() => term(), x_ann_key() => x_ann_value()}. +-type environment() :: #{atom() => term()}. -type ann_key() :: internal_ann_key() | x_ann_key(). -type ann_value() :: term(). @@ -53,7 +66,8 @@ -export_type([ state/0, ann_key/0, - ann_value/0 + ann_value/0, + annotations/0 ]). -type proto_state() :: term(). @@ -107,12 +121,12 @@ %% Convert state to another protocol %% all protocols must be able to convert to mc_amqp (AMQP 1.0) --callback convert_to(Target :: protocol(), proto_state()) -> +-callback convert_to(Target :: protocol(), proto_state(), environment()) -> proto_state() | not_implemented. %% Convert from another protocol %% all protocols must be able to convert from mc_amqp (AMQP 1.0) --callback convert_from(Source :: protocol(), proto_state()) -> +-callback convert_from(Source :: protocol(), proto_state(), environment()) -> proto_state() | not_implemented. %% emit a protocol specific state package @@ -128,13 +142,21 @@ %%% API -spec init(protocol(), term(), annotations()) -> state(). -init(Proto, Data, Anns) - when is_atom(Proto) - andalso is_map(Anns) -> +init(Proto, Data, Anns) -> + init(Proto, Data, Anns, #{}). + +-spec init(protocol(), term(), annotations(), environment()) -> state(). +init(Proto, Data, Anns0, Env) -> {ProtoData, ProtoAnns} = Proto:init(Data), + Anns1 = case map_size(Env) == 0 of + true -> Anns0; + false -> Anns0#{env => Env} + end, + Anns2 = maps:merge(ProtoAnns, Anns1), + Anns = set_received_at_timestamp(Anns2), #?MODULE{protocol = Proto, data = ProtoData, - annotations = maps:merge(ProtoAnns, Anns)}. + annotations = Anns}. -spec size(state()) -> {MetadataSize :: non_neg_integer(), @@ -171,7 +193,7 @@ take_annotation(_Key, BasicMessage) -> -spec set_annotation(ann_key(), ann_value(), state()) -> state(). set_annotation(Key, Value, #?MODULE{annotations = Anns} = State) -> - State#?MODULE{annotations = maps:put(Key, Value, Anns)}; + State#?MODULE{annotations = Anns#{Key => Value}}; set_annotation(Key, Value, BasicMessage) -> mc_compat:set_annotation(Key, Value, BasicMessage). @@ -209,9 +231,21 @@ routing_headers(#?MODULE{protocol = Proto, routing_headers(BasicMsg, Opts) -> mc_compat:routing_headers(BasicMsg, Opts). +-spec exchange(state()) -> undefined | rabbit_misc:resource_name(). +exchange(#?MODULE{annotations = Anns}) -> + maps:get(?ANN_EXCHANGE, Anns, undefined); +exchange(BasicMessage) -> + mc_compat:get_annotation(?ANN_EXCHANGE, BasicMessage). + +-spec routing_keys(state()) -> [rabbit_types:routing_key()]. +routing_keys(#?MODULE{annotations = Anns}) -> + maps:get(?ANN_ROUTING_KEYS, Anns, []); +routing_keys(BasicMessage) -> + mc_compat:get_annotation(?ANN_ROUTING_KEYS, BasicMessage). + -spec is_persistent(state()) -> boolean(). is_persistent(#?MODULE{annotations = Anns}) -> - maps:get(durable, Anns, true); + maps:get(?ANN_DURABLE, Anns, true); is_persistent(BasicMsg) -> mc_compat:is_persistent(BasicMsg). @@ -221,16 +255,15 @@ ttl(#?MODULE{annotations = Anns}) -> ttl(BasicMsg) -> mc_compat:ttl(BasicMsg). - -spec timestamp(state()) -> undefined | non_neg_integer(). timestamp(#?MODULE{annotations = Anns}) -> - maps:get(timestamp, Anns, undefined); + maps:get(?ANN_TIMESTAMP, Anns, undefined); timestamp(BasicMsg) -> mc_compat:timestamp(BasicMsg). -spec priority(state()) -> undefined | non_neg_integer(). priority(#?MODULE{annotations = Anns}) -> - maps:get(priority, Anns, undefined); + maps:get(?ANN_PRIORITY, Anns, undefined); priority(BasicMsg) -> mc_compat:priority(BasicMsg). @@ -246,6 +279,15 @@ correlation_id(#?MODULE{protocol = Proto, correlation_id(BasicMsg) -> mc_compat:correlation_id(BasicMsg). +-spec user_id(state()) -> + {binary, rabbit_types:username()} | + undefined. +user_id(#?MODULE{protocol = Proto, + data = Data}) -> + Proto:property(?FUNCTION_NAME, Data); +user_id(BasicMsg) -> + mc_compat:user_id(BasicMsg). + -spec message_id(state()) -> {uuid, binary()} | {utf8, binary()} | @@ -258,25 +300,41 @@ message_id(#?MODULE{protocol = Proto, message_id(BasicMsg) -> mc_compat:message_id(BasicMsg). +-spec property(atom(), state()) -> + {utf8, binary()} | undefined. +property(Property, #?MODULE{protocol = Proto, + data = Data}) -> + Proto:property(Property, Data); +property(_Property, _BasicMsg) -> + undefined. + -spec set_ttl(undefined | non_neg_integer(), state()) -> state(). set_ttl(Value, #?MODULE{annotations = Anns} = State) -> - State#?MODULE{annotations = maps:put(ttl, Value, Anns)}; + State#?MODULE{annotations = Anns#{ttl => Value}}; set_ttl(Value, BasicMsg) -> mc_compat:set_ttl(Value, BasicMsg). -spec convert(protocol(), state()) -> state(). -convert(Proto, #?MODULE{protocol = Proto} = State) -> +convert(Proto, State) -> + convert(Proto, State, #{}). + +-spec convert(protocol(), state(), environment()) -> state(). +convert(Proto, #?MODULE{protocol = Proto} = State, _Env) -> State; convert(TargetProto, #?MODULE{protocol = SourceProto, - data = Data0} = State) -> + annotations = Anns, + data = Data0} = State, + TargetEnv) -> Data = SourceProto:prepare(read, Data0), + SourceEnv = maps:get(env, Anns, #{}), + Env = maps:merge(SourceEnv, TargetEnv), TargetState = - case SourceProto:convert_to(TargetProto, Data) of + case SourceProto:convert_to(TargetProto, Data, Env) of not_implemented -> - case TargetProto:convert_from(SourceProto, Data) of + case TargetProto:convert_from(SourceProto, Data, Env) of not_implemented -> - AmqpData = SourceProto:convert_to(mc_amqp, Data), - mc_amqp:convert_to(TargetProto, AmqpData); + AmqpData = SourceProto:convert_to(mc_amqp, Data, Env), + mc_amqp:convert_to(TargetProto, AmqpData, Env); TargetState0 -> TargetState0 end; @@ -285,7 +343,7 @@ convert(TargetProto, #?MODULE{protocol = SourceProto, end, State#?MODULE{protocol = TargetProto, data = TargetState}; -convert(Proto, BasicMsg) -> +convert(Proto, BasicMsg, _Env) -> mc_compat:convert_to(Proto, BasicMsg). -spec protocol_state(state()) -> term(). @@ -297,88 +355,111 @@ protocol_state(BasicMsg) -> mc_compat:protocol_state(BasicMsg). -spec record_death(rabbit_dead_letter:reason(), - SourceQueue :: rabbit_misc:resource_name(), - state()) -> state(). + rabbit_misc:resource_name(), + state(), + environment()) -> state(). record_death(Reason, SourceQueue, - #?MODULE{protocol = _Mod, - data = _Data, - annotations = Anns0} = State) - when is_atom(Reason) andalso is_binary(SourceQueue) -> + #?MODULE{annotations = Anns0} = State, + Env) + when is_atom(Reason) andalso + is_binary(SourceQueue) -> Key = {SourceQueue, Reason}, - Exchange = maps:get(exchange, Anns0), - RoutingKeys = maps:get(routing_keys, Anns0), + #{?ANN_EXCHANGE := Exchange, + ?ANN_ROUTING_KEYS := RKeys0} = Anns0, + %% The routing keys that we record in the death history and will + %% report to the client should include CC, but exclude BCC. + RKeys = case Anns0 of + #{bcc := BccKeys} -> + RKeys0 -- BccKeys; + _ -> + RKeys0 + end, Timestamp = os:system_time(millisecond), Ttl = maps:get(ttl, Anns0, undefined), - - DeathAnns = rabbit_misc:maps_put_truthy(ttl, Ttl, #{first_time => Timestamp, - last_time => Timestamp}), - case maps:get(deaths, Anns0, undefined) of - undefined -> - Ds = #deaths{last = Key, - first = Key, - records = #{Key => #death{count = 1, - exchange = Exchange, - routing_keys = RoutingKeys, - anns = DeathAnns}}}, - Anns = Anns0#{<<"x-first-death-reason">> => atom_to_binary(Reason), + DeathAnns = rabbit_misc:maps_put_truthy( + ttl, Ttl, #{first_time => Timestamp, + last_time => Timestamp}), + NewDeath = #death{exchange = Exchange, + routing_keys = RKeys, + count = 1, + anns = DeathAnns}, + ReasonBin = atom_to_binary(Reason), + Anns = case Anns0 of + #{deaths := Deaths0} -> + Deaths = case Deaths0 of + #deaths{records = Rs0} -> + Rs = maps:update_with( + Key, + fun(Death) -> + update_death(Death, Timestamp) + end, + NewDeath, + Rs0), + Deaths0#deaths{last = Key, + records = Rs}; + _ -> + %% Deaths are ordered by recency + case lists:keytake(Key, 1, Deaths0) of + {value, {Key, D0}, Deaths1} -> + D = update_death(D0, Timestamp), + [{Key, D} | Deaths1]; + false -> + [{Key, NewDeath} | Deaths0] + end + end, + Anns0#{<<"x-last-death-reason">> := ReasonBin, + <<"x-last-death-queue">> := SourceQueue, + <<"x-last-death-exchange">> := Exchange, + deaths := Deaths}; + _ -> + Deaths = case Env of + #{?FF_MC_DEATHS_V2 := false} -> + #deaths{last = Key, + first = Key, + records = #{Key => NewDeath}}; + _ -> + [{Key, NewDeath}] + end, + Anns0#{<<"x-first-death-reason">> => ReasonBin, <<"x-first-death-queue">> => SourceQueue, <<"x-first-death-exchange">> => Exchange, - <<"x-last-death-reason">> => atom_to_binary(Reason), - <<"x-last-death-queue">> => SourceQueue, - <<"x-last-death-exchange">> => Exchange - }, - - State#?MODULE{annotations = Anns#{deaths => Ds}}; - #deaths{records = Rs} = Ds0 -> - Death = #death{count = C, - anns = DA} = maps:get(Key, Rs, - #death{exchange = Exchange, - routing_keys = RoutingKeys, - anns = DeathAnns}), - Ds = Ds0#deaths{last = Key, - records = Rs#{Key => - Death#death{count = C + 1, - anns = DA#{last_time => Timestamp}}}}, - Anns = Anns0#{deaths => Ds, - <<"x-last-death-reason">> => atom_to_binary(Reason), + <<"x-last-death-reason">> => ReasonBin, <<"x-last-death-queue">> => SourceQueue, - <<"x-last-death-exchange">> => Exchange}, - State#?MODULE{annotations = Anns} - end; -record_death(Reason, SourceQueue, BasicMsg) -> - mc_compat:record_death(Reason, SourceQueue, BasicMsg). - + <<"x-last-death-exchange">> => Exchange, + deaths => Deaths} + end, + State#?MODULE{annotations = Anns}; +record_death(Reason, SourceQueue, BasicMsg, Env) -> + mc_compat:record_death(Reason, SourceQueue, BasicMsg, Env). + +update_death(#death{count = Count, + anns = DeathAnns} = Death, Timestamp) -> + Death#death{count = Count + 1, + anns = DeathAnns#{last_time := Timestamp}}. -spec is_death_cycle(rabbit_misc:resource_name(), state()) -> boolean(). +is_death_cycle(TargetQueue, #?MODULE{annotations = #{deaths := #deaths{records = Rs}}}) -> + is_cycle_v1(TargetQueue, maps:keys(Rs)); is_death_cycle(TargetQueue, #?MODULE{annotations = #{deaths := Deaths}}) -> - is_cycle(TargetQueue, maps:keys(Deaths#deaths.records)); + is_cycle_v2(TargetQueue, Deaths); is_death_cycle(_TargetQueue, #?MODULE{}) -> false; is_death_cycle(TargetQueue, BasicMsg) -> mc_compat:is_death_cycle(TargetQueue, BasicMsg). +%% Returns death queue names ordered by recency. -spec death_queue_names(state()) -> [rabbit_misc:resource_name()]. -death_queue_names(#?MODULE{annotations = Anns}) -> - case maps:get(deaths, Anns, undefined) of - undefined -> - []; - #deaths{records = Records} -> - proplists:get_keys(maps:keys(Records)) - end; +death_queue_names(#?MODULE{annotations = #{deaths := #deaths{records = Rs}}}) -> + proplists:get_keys(maps:keys(Rs)); +death_queue_names(#?MODULE{annotations = #{deaths := Deaths}}) -> + lists:map(fun({{Queue, _Reason}, _Death}) -> + Queue + end, Deaths); +death_queue_names(#?MODULE{}) -> + []; death_queue_names(BasicMsg) -> mc_compat:death_queue_names(BasicMsg). --spec last_death(state()) -> - undefined | {death_key(), #death{}}. -last_death(#?MODULE{annotations = Anns}) - when not is_map_key(deaths, Anns) -> - undefined; -last_death(#?MODULE{annotations = #{deaths := #deaths{last = Last, - records = Rs}}}) -> - {Last, maps:get(Last, Rs)}; -last_death(BasicMsg) -> - mc_compat:last_death(BasicMsg). - -spec prepare(read | store, state()) -> state(). prepare(For, #?MODULE{protocol = Proto, data = Data} = State) -> @@ -388,20 +469,38 @@ prepare(For, State) -> %% INTERNAL -%% if there is a death with a source queue that is the same as the target +is_cycle_v2(TargetQueue, Deaths) -> + case lists:splitwith(fun({{SourceQueue, _Reason}, #death{}}) -> + SourceQueue =/= TargetQueue + end, Deaths) of + {_, []} -> + false; + {L, [H | _]} -> + %% There is a cycle, but we only want to drop the message + %% if the cycle is "fully automatic", i.e. without a client + %% expliclity rejecting the message somewhere in the cycle. + lists:all(fun({{_SourceQueue, Reason}, _Death}) -> + Reason =/= rejected + end, [H | L]) + end. + +%% The desired v1 behaviour is the following: +%% "If there is a death with a source queue that is the same as the target %% queue name and there are no newer deaths with the 'rejected' reason then -%% consider this a cycle -is_cycle(_Queue, []) -> +%% consider this a cycle." +%% However, the correct death order cannot be reliably determined in v1. +%% v2 fixes this bug. +is_cycle_v1(_Queue, []) -> false; -is_cycle(_Queue, [{_Q, rejected} | _]) -> +is_cycle_v1(_Queue, [{_Q, rejected} | _]) -> %% any rejection breaks the cycle false; -is_cycle(Queue, [{Queue, Reason} | _]) +is_cycle_v1(Queue, [{Queue, Reason} | _]) when Reason =/= rejected -> true; -is_cycle(Queue, [_ | Rem]) -> - is_cycle(Queue, Rem). +is_cycle_v1(Queue, [_ | Rem]) -> + is_cycle_v1(Queue, Rem). --ifdef(TEST). --include_lib("eunit/include/eunit.hrl"). --endif. +set_received_at_timestamp(Anns) -> + Millis = os:system_time(millisecond), + Anns#{?ANN_RECEIVED_AT_TIMESTAMP => Millis}. diff --git a/deps/rabbit/src/mc_amqp.erl b/deps/rabbit/src/mc_amqp.erl index 3b5f856a88f1..be63597c3f96 100644 --- a/deps/rabbit/src/mc_amqp.erl +++ b/deps/rabbit/src/mc_amqp.erl @@ -10,41 +10,48 @@ x_header/2, property/2, routing_headers/2, - get_property/2, - convert_to/2, - convert_from/2, + convert_to/3, + convert_from/3, protocol_state/2, - serialize/1, prepare/2 ]). -import(rabbit_misc, - [maps_put_truthy/3, - maps_put_falsy/3 - ]). - --type message_section() :: - #'v1_0.header'{} | - #'v1_0.delivery_annotations'{} | - #'v1_0.message_annotations'{} | - #'v1_0.properties'{} | - #'v1_0.application_properties'{} | - #'v1_0.data'{} | - #'v1_0.amqp_sequence'{} | - #'v1_0.amqp_value'{} | - #'v1_0.footer'{}. - --define(SIMPLE_VALUE(V), is_binary(V) orelse - is_number(V) orelse - is_boolean(V)). - + [maps_put_truthy/3]). + +-define(MESSAGE_ANNOTATIONS_GUESS_SIZE, 100). + +-define(SIMPLE_VALUE(V), + is_binary(V) orelse + is_number(V) orelse + is_boolean(V)). + +%% §3.2 +-define(DESCRIPTOR_CODE_DATA, 16#75). +-define(DESCRIPTOR_CODE_AMQP_SEQUENCE, 16#76). +-define(DESCRIPTOR_CODE_AMQP_VALUE, 16#77). + +%% A section that was omitted by the AMQP sender. +%% We use an empty list as it is cheaper to serialize. +-define(OMITTED_SECTION, []). + +-type amqp10_data() :: [#'v1_0.amqp_sequence'{} | #'v1_0.data'{}] | #'v1_0.amqp_value'{}. +-type body_descriptor_code() :: ?DESCRIPTOR_CODE_DATA | + ?DESCRIPTOR_CODE_AMQP_SEQUENCE | + ?DESCRIPTOR_CODE_AMQP_VALUE. +%% §3.2.5 +-type application_properties() :: [{Key :: {utf8, binary()}, + Val :: term()}]. +%% §3.2.10 +-type amqp_annotations() :: [{Key :: {symbol, binary()} | {ulong, non_neg_integer()}, + Val :: term()}]. -type opt(T) :: T | undefined. --type amqp10_data() :: [#'v1_0.amqp_sequence'{} | #'v1_0.data'{}] | - #'v1_0.amqp_value'{}. --record(msg, + +%% This representation is used when the message was originally sent with +%% a protocol other than AMQP and the message was not read from a stream. +-record(msg_body_decoded, { header :: opt(#'v1_0.header'{}), - delivery_annotations = []:: list(), message_annotations = [] :: list(), properties :: opt(#'v1_0.properties'{}), application_properties = [] :: list(), @@ -52,51 +59,103 @@ footer = [] :: list() }). --opaque state() :: #msg{}. - --export_type([ - state/0, - message_section/0 - ]). - -%% mc implementation -init(Sections) when is_list(Sections) -> - Msg = decode(Sections, #msg{}), - init(Msg); -init(#msg{} = Msg) -> - %% TODO: as the essential annotations, durable, priority, ttl and delivery_count - %% is all we are interested in it isn't necessary to keep hold of the - %% incoming AMQP header inside the state +%% This representation is used when we received the message from +%% an AMQP client or when we read the message from a stream. +%% This message was parsed only until the start of the body. +-record(msg_body_encoded, + { + header :: opt(#'v1_0.header'{}), + message_annotations = [] :: amqp_annotations(), + properties :: opt(#'v1_0.properties'{}), + application_properties = [] :: application_properties(), + bare_and_footer = uninit :: uninit | binary(), + bare_and_footer_application_properties_pos = ?OMITTED_SECTION :: non_neg_integer() | ?OMITTED_SECTION, + bare_and_footer_body_pos = uninit :: uninit | non_neg_integer(), + body_code = uninit :: uninit | body_descriptor_code() + }). + +%% This representation is how we store the message on disk in classic queues +%% and quorum queues. For better performance and less disk usage, we omit the +%% header because the header fields we're interested in are already set as mc +%% annotations. We store the original bare message unaltered to preserve +%% message hashes on the binary encoding of the bare message [§3.2]. +%% We store positions of where the properties, application-properties and body +%% sections start to be able to parse only individual sections after reading +%% the message back from a classic or quorum queue. The record is called v1 +%% just in case we ever want to introduce a new v2 on disk representation in +%% the future. +-record(v1, + { + message_annotations = [] :: amqp_annotations(), + bare_and_footer :: binary(), + bare_and_footer_properties_pos :: 0 | ?OMITTED_SECTION, + bare_and_footer_application_properties_pos :: non_neg_integer() | ?OMITTED_SECTION, + bare_and_footer_body_pos :: non_neg_integer(), + body_code :: body_descriptor_code() + }). + +-opaque state() :: #msg_body_decoded{} | #msg_body_encoded{} | #v1{}. + +-export_type([state/0]). + +init(Payload) -> + Sections = amqp10_framing:decode_bin(Payload, [server_mode]), + Msg = msg_body_encoded(Sections, Payload, #msg_body_encoded{}), Anns = essential_properties(Msg), {Msg, Anns}. -convert_from(?MODULE, Sections) -> - element(1, init(Sections)); -convert_from(_SourceProto, _) -> +convert_from(?MODULE, Sections, _Env) when is_list(Sections) -> + msg_body_decoded(Sections, #msg_body_decoded{}); +convert_from(_SourceProto, _, _Env) -> not_implemented. -size(#msg{data = Body}) -> - %% TODO how to estimate anything but data sections? - BodySize = if is_list(Body) -> - lists:foldl( - fun(#'v1_0.data'{content = Data}, Acc) -> - iolist_size(Data) + Acc; - (#'v1_0.amqp_sequence'{content = _}, Acc) -> - Acc - end, 0, Body); - is_record(Body, 'v1_0.amqp_value') -> - 0 +convert_to(?MODULE, Msg, _Env) -> + Msg; +convert_to(TargetProto, Msg, Env) -> + TargetProto:convert_from(?MODULE, msg_to_sections(Msg), Env). + +size(#v1{message_annotations = MA, + bare_and_footer = Body}) -> + MetaSize = case MA of + [] -> 0; + _ -> ?MESSAGE_ANNOTATIONS_GUESS_SIZE end, - {_MetaSize = 0, BodySize}. + {MetaSize, byte_size(Body)}. x_header(Key, Msg) -> message_annotation(Key, Msg, undefined). -property(correlation_id, #msg{properties = #'v1_0.properties'{correlation_id = Corr}}) -> +property(_Prop, #msg_body_encoded{properties = undefined}) -> + undefined; +property(Prop, #msg_body_encoded{properties = Props}) -> + property0(Prop, Props); +property(_Prop, #v1{bare_and_footer_properties_pos = ?OMITTED_SECTION}) -> + undefined; +property(Prop, #v1{bare_and_footer = Bin, + bare_and_footer_properties_pos = 0, + bare_and_footer_application_properties_pos = ApPos, + bare_and_footer_body_pos = BodyPos}) -> + PropsLen = case ApPos of + ?OMITTED_SECTION -> BodyPos; + _ -> ApPos + end, + PropsBin = binary_part(Bin, 0, PropsLen), + % assertion + {PropsDescribed, PropsLen} = amqp10_binary_parser:parse(PropsBin), + Props = amqp10_framing:decode(PropsDescribed), + property0(Prop, Props). + +property0(correlation_id, #'v1_0.properties'{correlation_id = Corr}) -> Corr; -property(message_id, #msg{properties = #'v1_0.properties'{message_id = MsgId}}) -> +property0(message_id, #'v1_0.properties'{message_id = MsgId}) -> MsgId; -property(_Prop, #msg{}) -> +property0(user_id, #'v1_0.properties'{user_id = UserId}) -> + UserId; +property0(subject, #'v1_0.properties'{subject = Subject}) -> + Subject; +property0(to, #'v1_0.properties'{to = To}) -> + To; +property0(_Prop, #'v1_0.properties'{}) -> undefined. routing_headers(Msg, Opts) -> @@ -105,51 +164,35 @@ routing_headers(Msg, Opts) -> true -> message_annotations_as_simple_map(Msg); false -> - #{} + [] end, - application_properties_as_simple_map(Msg, X). - + List = application_properties_as_simple_map(Msg, X), + maps:from_list(List). get_property(durable, Msg) -> case Msg of - #msg{header = #'v1_0.header'{durable = Durable}} - when is_atom(Durable) -> - Durable; - #msg{header = #'v1_0.header'{durable = {boolean, Durable}}} -> + #msg_body_encoded{header = #'v1_0.header'{durable = Durable}} + when is_boolean(Durable) -> Durable; _ -> %% fallback in case the source protocol was old AMQP 0.9.1 - case message_annotation(<<"x-basic-delivery-mode">>, Msg, 2) of - {ubyte, 2} -> - true; + case message_annotation(<<"x-basic-delivery-mode">>, Msg, undefined) of + {ubyte, 1} -> + false; _ -> - false + true end end; get_property(timestamp, Msg) -> case Msg of - #msg{properties = #'v1_0.properties'{creation_time = {timestamp, Ts}}} -> + #msg_body_encoded{properties = #'v1_0.properties'{creation_time = {timestamp, Ts}}} -> Ts; _ -> undefined end; -get_property(correlation_id, Msg) -> - case Msg of - #msg{properties = #'v1_0.properties'{correlation_id = {_Type, CorrId}}} -> - CorrId; - _ -> - undefined - end; -get_property(message_id, Msg) -> - case Msg of - #msg{properties = #'v1_0.properties'{message_id = {_Type, CorrId}}} -> - CorrId; - _ -> - undefined - end; get_property(ttl, Msg) -> case Msg of - #msg{header = #'v1_0.header'{ttl = {_, Ttl}}} -> + #msg_body_encoded{header = #'v1_0.header'{ttl = {uint, Ttl}}} -> Ttl; _ -> %% fallback in case the source protocol was AMQP 0.9.1 @@ -163,7 +206,7 @@ get_property(ttl, Msg) -> end; get_property(priority, Msg) -> case Msg of - #msg{header = #'v1_0.header'{priority = {ubyte, Priority}}} -> + #msg_body_encoded{header = #'v1_0.header'{priority = {ubyte, Priority}}} -> Priority; _ -> %% fallback in case the source protocol was AMQP 0.9.1 @@ -173,100 +216,203 @@ get_property(priority, Msg) -> _ -> undefined end - end; -get_property(_P, _Msg) -> - undefined. + end. -convert_to(?MODULE, Msg) -> +%% protocol_state/2 serialises the protocol state outputting an AMQP encoded message. +-spec protocol_state(state(), mc:annotations()) -> iolist(). +protocol_state(Msg0 = #msg_body_decoded{header = Header0, + message_annotations = MA0}, Anns) -> + Header = update_header_from_anns(Header0, Anns), + MA = protocol_state_message_annotations(MA0, Anns), + Msg = Msg0#msg_body_decoded{header = Header, + message_annotations = MA}, + Sections = msg_to_sections(Msg), + encode(Sections); +protocol_state(#msg_body_encoded{header = Header0, + message_annotations = MA0, + bare_and_footer = BareAndFooter}, Anns) -> + Header = update_header_from_anns(Header0, Anns), + MA = protocol_state_message_annotations(MA0, Anns), + Sections = to_sections(Header, MA, []), + [encode(Sections), BareAndFooter]; +protocol_state(#v1{message_annotations = MA0, + bare_and_footer = BareAndFooter}, Anns) -> + Durable = case Anns of + #{?ANN_DURABLE := D} -> D; + _ -> true + end, + Priority = case Anns of + #{?ANN_PRIORITY := P} + when is_integer(P) -> + {ubyte, P}; + _ -> + undefined + end, + Ttl = case Anns of + #{ttl := T} + when is_integer(T) -> + {uint, T}; + _ -> + undefined + end, + Header = update_header_from_anns(#'v1_0.header'{durable = Durable, + priority = Priority, + ttl = Ttl}, Anns), + MA = protocol_state_message_annotations(MA0, Anns), + Sections = to_sections(Header, MA, []), + [encode(Sections), BareAndFooter]. + +prepare(read, Msg) -> Msg; -convert_to(TargetProto, Msg) -> - TargetProto:convert_from(?MODULE, msg_to_sections(Msg, fun (X) -> X end)). - -serialize(Sections) -> - encode_bin(Sections). - -protocol_state(Msg, Anns) -> - Exchange = maps:get(exchange, Anns), - [RKey | _] = maps:get(routing_keys, Anns), - - %% any x-* annotations get added as message annotations - AnnsToAdd = maps:filter(fun (Key, _) -> mc_util:is_x_header(Key) end, Anns), - - MACFun = fun(MAC) -> - add_message_annotations( - AnnsToAdd#{<<"x-exchange">> => wrap(utf8, Exchange), - <<"x-routing-key">> => wrap(utf8, RKey)}, MAC) - end, - - msg_to_sections(Msg, MACFun). - -prepare(_For, Msg) -> - Msg. +prepare(store, Msg = #v1{}) -> + Msg; +prepare(store, #msg_body_encoded{ + message_annotations = MA, + properties = Props, + bare_and_footer = BF, + bare_and_footer_application_properties_pos = AppPropsPos, + bare_and_footer_body_pos = BodyPos, + body_code = BodyCode}) + when is_integer(BodyPos) -> + PropsPos = case Props of + undefined -> ?OMITTED_SECTION; + #'v1_0.properties'{} -> 0 + end, + #v1{message_annotations = MA, + bare_and_footer = BF, + bare_and_footer_properties_pos = PropsPos, + bare_and_footer_application_properties_pos = AppPropsPos, + bare_and_footer_body_pos = BodyPos, + body_code = BodyCode + }. %% internal -msg_to_sections(#msg{header = H, - delivery_annotations = DAC, - message_annotations = MAC0, - properties = P, - application_properties = APC, - data = Data, - footer = FC}, MacFun) -> - Tail = case FC of - [] -> []; - _ -> - [#'v1_0.footer'{content = FC}] - end, +msg_to_sections(#msg_body_decoded{header = H, + message_annotations = MAC, + properties = P, + application_properties = APC, + data = Data, + footer = FC}) -> S0 = case Data of #'v1_0.amqp_value'{} -> - [Data | Tail]; + [Data]; _ when is_list(Data) -> - Data ++ Tail - end, - S1 = case APC of - [] -> S0; - _ -> - [#'v1_0.application_properties'{content = APC} | S0] - end, - S2 = case P of - undefined -> S1; - _ -> - [P | S1] - end, - S3 = case MacFun(MAC0) of - [] -> S2; - MAC -> - [#'v1_0.message_annotations'{content = MAC} | S2] + Data end, - S4 = case DAC of - [] -> S3; + S = case FC of + [] -> + S0; + _ -> + S0 ++ [#'v1_0.footer'{content = FC}] + end, + to_sections(H, MAC, P, APC, S); +msg_to_sections(#msg_body_encoded{header = H, + message_annotations = MAC, + properties = P, + application_properties = APC, + bare_and_footer = BareAndFooter, + bare_and_footer_body_pos = BodyPos, + body_code = BodyCode}) -> + BodyAndFooterBin = binary_part(BareAndFooter, + BodyPos, + byte_size(BareAndFooter) - BodyPos), + BodyAndFooter = case BodyCode of + ?DESCRIPTOR_CODE_DATA -> + amqp10_framing:decode_bin(BodyAndFooterBin); + _ -> + [{amqp_encoded_body_and_footer, BodyAndFooterBin}] + end, + to_sections(H, MAC, P, APC, BodyAndFooter); +msg_to_sections(#v1{message_annotations = MAC, + bare_and_footer = BareAndFooterBin, + body_code = ?DESCRIPTOR_CODE_DATA}) -> + BareAndFooter = amqp10_framing:decode_bin(BareAndFooterBin), + to_sections(undefined, MAC, BareAndFooter); +msg_to_sections(#v1{message_annotations = MAC, + bare_and_footer = BareAndFooter, + bare_and_footer_body_pos = BodyPos + }) -> + Tail = case BodyPos =:= 0 of + true -> + [{amqp_encoded_body_and_footer, BareAndFooter}]; + false -> + {Bin, BodyAndFooterBin} = split_binary(BareAndFooter, BodyPos), + Sections = amqp10_framing:decode_bin(Bin), + Sections ++ [{amqp_encoded_body_and_footer, BodyAndFooterBin}] + end, + to_sections(undefined, MAC, Tail). + +to_sections(H, MAC, P, APC, Tail) -> + S0 = case APC of + [] -> + Tail; _ -> - [#'v1_0.delivery_annotations'{content = DAC} | S3] + [#'v1_0.application_properties'{content = APC} | Tail] end, + S = case P of + undefined -> + S0; + _ -> + [P | S0] + end, + to_sections(H, MAC, S). + +to_sections(H, MAC, Tail) -> + S = case MAC of + [] -> + Tail; + _ -> + [#'v1_0.message_annotations'{content = MAC} | Tail] + end, case H of - undefined -> S4; + undefined -> + S; _ -> - [H | S4] + [H | S] end. - - - - -encode_bin(undefined) -> - <<>>; -encode_bin(Sections) when is_list(Sections) -> +-spec protocol_state_message_annotations(amqp_annotations(), mc:annotations()) -> + amqp_annotations(). +protocol_state_message_annotations(MA, Anns) -> + maps:fold( + fun(?ANN_EXCHANGE, Exchange, L) -> + maps_upsert(<<"x-exchange">>, {utf8, Exchange}, L); + (?ANN_ROUTING_KEYS, RKeys, L) -> + RKey = hd(RKeys), + maps_upsert(<<"x-routing-key">>, {utf8, RKey}, L); + (<<"x-", _/binary>> = K, V, L) + when V =/= undefined -> + %% any x-* annotations get added as message annotations + maps_upsert(K, mc_util:infer_type(V), L); + (<<"timestamp_in_ms">>, V, L) -> + maps_upsert(<<"x-opt-rabbitmq-received-time">>, {timestamp, V}, L); + (deaths, Deaths, L) + when is_list(Deaths) -> + Maps = encode_deaths(Deaths), + maps_upsert(<<"x-opt-deaths">>, {array, map, Maps}, L); + (_, _, Acc) -> + Acc + end, MA, Anns). + +maps_upsert(Key, TaggedVal, KVList) -> + TaggedKey = {symbol, Key}, + Elem = {TaggedKey, TaggedVal}, + lists:keystore(TaggedKey, 1, KVList, Elem). + +encode(Sections) when is_list(Sections) -> [amqp10_framing:encode_bin(Section) || Section <- Sections, - not is_empty(Section)]; -encode_bin(Section) -> - case is_empty(Section) of - true -> - <<>>; - false -> - amqp10_framing:encode_bin(Section) - end. + not is_empty(Section)]. -is_empty(undefined) -> +is_empty(#'v1_0.header'{durable = undefined, + priority = undefined, + ttl = undefined, + first_acquirer = undefined, + delivery_count = undefined}) -> + true; +is_empty(#'v1_0.delivery_annotations'{content = []}) -> + true; +is_empty(#'v1_0.message_annotations'{content = []}) -> true; is_empty(#'v1_0.properties'{message_id = undefined, user_id = undefined, @@ -284,156 +430,191 @@ is_empty(#'v1_0.properties'{message_id = undefined, true; is_empty(#'v1_0.application_properties'{content = []}) -> true; -is_empty(#'v1_0.message_annotations'{content = []}) -> - true; -is_empty(#'v1_0.delivery_annotations'{content = []}) -> - true; is_empty(#'v1_0.footer'{content = []}) -> true; -is_empty(#'v1_0.header'{durable = undefined, - priority = undefined, - ttl = undefined, - first_acquirer = undefined, - delivery_count = undefined}) -> - true; is_empty(_) -> false. - -message_annotation(_Key, #msg{message_annotations = []}, - Default) -> - Default; -message_annotation(Key, #msg{message_annotations = Content}, - Default) +message_annotation(Key, State, Default) when is_binary(Key) -> - mc_util:amqp_map_get(Key, Content, Default). + case message_annotations(State) of + [] -> Default; + MA -> mc_util:amqp_map_get(Key, MA, Default) + end. -message_annotations_as_simple_map(#msg{message_annotations = []}) -> - #{}; -message_annotations_as_simple_map(#msg{message_annotations = Content}) -> - %% the section record format really is terrible - lists:foldl(fun ({{symbol, K}, {_T, V}}, Acc) - when ?SIMPLE_VALUE(V) -> - Acc#{K => V}; - (_, Acc)-> - Acc - end, #{}, Content). +message_annotations(#msg_body_decoded{message_annotations = L}) -> L; +message_annotations(#msg_body_encoded{message_annotations = L}) -> L; +message_annotations(#v1{message_annotations = L}) -> L. + +message_annotations_as_simple_map(#msg_body_encoded{message_annotations = Content}) -> + message_annotations_as_simple_map0(Content); +message_annotations_as_simple_map(#v1{message_annotations = Content}) -> + message_annotations_as_simple_map0(Content). -application_properties_as_simple_map(#msg{application_properties = []}, M) -> - M; -application_properties_as_simple_map(#msg{application_properties = Content}, - M) -> +message_annotations_as_simple_map0(Content) -> + %% the section record format really is terrible + lists:filtermap(fun({{symbol, K}, {_T, V}}) + when ?SIMPLE_VALUE(V) -> + {true, {K, V}}; + (_) -> + false + end, Content). + +application_properties_as_simple_map( + #msg_body_encoded{application_properties = Content}, L) -> + application_properties_as_simple_map0(Content, L); +application_properties_as_simple_map( + #v1{bare_and_footer_application_properties_pos = ?OMITTED_SECTION}, L) -> + L; +application_properties_as_simple_map( + #v1{bare_and_footer = Bin, + bare_and_footer_application_properties_pos = ApPos, + bare_and_footer_body_pos = BodyPos}, L) -> + ApLen = BodyPos - ApPos, + ApBin = binary_part(Bin, ApPos, ApLen), + % assertion + {ApDescribed, ApLen} = amqp10_binary_parser:parse(ApBin), + #'v1_0.application_properties'{content = Content} = amqp10_framing:decode(ApDescribed), + application_properties_as_simple_map0(Content, L). + +application_properties_as_simple_map0(Content, L) -> %% the section record format really is terrible - lists:foldl(fun - ({{utf8, K}, {_T, V}}, Acc) + lists:foldl(fun({{utf8, K}, {_T, V}}, Acc) when ?SIMPLE_VALUE(V) -> - Acc#{K => V}; - ({{utf8, K}, undefined}, Acc) -> - Acc#{K => undefined}; - (_, Acc)-> + [{K, V} | Acc]; + ({{utf8, K}, V}, Acc) + when V =:= undefined orelse is_boolean(V) -> + [{K, V} | Acc]; + (_, Acc)-> Acc - end, M, Content). + end, L, Content). -decode([], Acc) -> +msg_body_decoded([], Acc) -> Acc; -decode([#'v1_0.header'{} = H | Rem], Msg) -> - decode(Rem, Msg#msg{header = H}); -decode([#'v1_0.message_annotations'{content = MAC} | Rem], Msg) -> - decode(Rem, Msg#msg{message_annotations = MAC}); -decode([#'v1_0.properties'{} = P | Rem], Msg) -> - decode(Rem, Msg#msg{properties = P}); -decode([#'v1_0.application_properties'{content = APC} | Rem], Msg) -> - decode(Rem, Msg#msg{application_properties = APC}); -decode([#'v1_0.delivery_annotations'{content = DAC} | Rem], Msg) -> - decode(Rem, Msg#msg{delivery_annotations = DAC}); -decode([#'v1_0.data'{} = D | Rem], #msg{data = Body} = Msg) +msg_body_decoded([#'v1_0.header'{} = H | Rem], Msg) -> + msg_body_decoded(Rem, Msg#msg_body_decoded{header = H}); +msg_body_decoded([_Ignore = #'v1_0.delivery_annotations'{} | Rem], Msg) -> + msg_body_decoded(Rem, Msg); +msg_body_decoded([#'v1_0.message_annotations'{content = MAC} | Rem], Msg) -> + msg_body_decoded(Rem, Msg#msg_body_decoded{message_annotations = MAC}); +msg_body_decoded([#'v1_0.properties'{} = P | Rem], Msg) -> + msg_body_decoded(Rem, Msg#msg_body_decoded{properties = P}); +msg_body_decoded([#'v1_0.application_properties'{content = APC} | Rem], Msg) -> + msg_body_decoded(Rem, Msg#msg_body_decoded{application_properties = APC}); +msg_body_decoded([#'v1_0.data'{} = D | Rem], #msg_body_decoded{data = Body} = Msg) when is_list(Body) -> - decode(Rem, Msg#msg{data = Body ++ [D]}); -decode([#'v1_0.amqp_sequence'{} = D | Rem], #msg{data = Body} = Msg) + msg_body_decoded(Rem, Msg#msg_body_decoded{data = Body ++ [D]}); +msg_body_decoded([#'v1_0.amqp_sequence'{} = D | Rem], #msg_body_decoded{data = Body} = Msg) when is_list(Body) -> - decode(Rem, Msg#msg{data = Body ++ [D]}); -decode([#'v1_0.footer'{content = FC} | Rem], Msg) -> - decode(Rem, Msg#msg{footer = FC}); -decode([#'v1_0.amqp_value'{} = B | Rem], #msg{} = Msg) -> + msg_body_decoded(Rem, Msg#msg_body_decoded{data = Body ++ [D]}); +msg_body_decoded([#'v1_0.amqp_value'{} = B | Rem], #msg_body_decoded{} = Msg) -> %% an amqp value can only be a singleton - decode(Rem, Msg#msg{data = B}). - -add_message_annotations(Anns, MA0) -> - maps:fold(fun (K, V, Acc) -> - map_add(symbol, K, mc_util:infer_type(V), Acc) - end, MA0, Anns). - -map_add(_T, _Key, undefined, Acc) -> - Acc; -map_add(KeyType, Key, TaggedValue, Acc0) -> - TaggedKey = wrap(KeyType, Key), - lists_upsert({TaggedKey, TaggedValue}, Acc0). - -wrap(_Type, undefined) -> - undefined; -wrap(Type, Val) -> - {Type, Val}. - -key_find(K, [{{_, K}, {_, V}} | _]) -> - V; -key_find(K, [_ | Rem]) -> - key_find(K, Rem); -key_find(_K, []) -> - undefined. - -recover_deaths([], Acc) -> - Acc; -recover_deaths([{map, Kvs} | Rem], Acc) -> - Queue = key_find(<<"queue">>, Kvs), - Reason = binary_to_atom(key_find(<<"reason">>, Kvs)), - DA0 = case key_find(<<"original-expiration">>, Kvs) of - undefined -> - #{}; - Exp -> - #{ttl => binary_to_integer(Exp)} - end, - RKeys = [RK || {_, RK} <- key_find(<<"routing-keys">>, Kvs)], - Ts = key_find(<<"time">>, Kvs), - DA = DA0#{first_time => Ts, - last_time => Ts}, - recover_deaths(Rem, - Acc#{{Queue, Reason} => - #death{anns = DA, - exchange = key_find(<<"exchange">>, Kvs), - count = key_find(<<"count">>, Kvs), - routing_keys = RKeys}}). - -essential_properties(#msg{message_annotations = MA} = Msg) -> + msg_body_decoded(Rem, Msg#msg_body_decoded{data = B}); +msg_body_decoded([#'v1_0.footer'{content = FC} | Rem], Msg) -> + msg_body_decoded(Rem, Msg#msg_body_decoded{footer = FC}). + +msg_body_encoded([#'v1_0.header'{} = H | Rem], Payload, Msg) -> + msg_body_encoded(Rem, Payload, Msg#msg_body_encoded{header = H}); +msg_body_encoded([_Ignore = #'v1_0.delivery_annotations'{} | Rem], Payload, Msg) -> + msg_body_encoded(Rem, Payload, Msg); +msg_body_encoded([#'v1_0.message_annotations'{content = MAC} | Rem], Payload, Msg) -> + msg_body_encoded(Rem, Payload, Msg#msg_body_encoded{message_annotations = MAC}); +msg_body_encoded([{{pos, Pos}, #'v1_0.properties'{} = Props} | Rem], Payload, Msg) -> + %% properties is the first bare message section. + Bin = binary_part_bare_and_footer(Payload, Pos), + msg_body_encoded(Rem, Pos, Msg#msg_body_encoded{properties = Props, + bare_and_footer = Bin}); +msg_body_encoded([{{pos, Pos}, #'v1_0.application_properties'{content = APC}} | Rem], Payload, Msg) + when is_binary(Payload) -> + %% AMQP sender omitted properties section. + %% application-properties is the first bare message section. + Bin = binary_part_bare_and_footer(Payload, Pos), + msg_body_encoded(Rem, Pos, Msg#msg_body_encoded{application_properties = APC, + bare_and_footer_application_properties_pos = 0, + bare_and_footer = Bin}); +msg_body_encoded([{{pos, Pos}, #'v1_0.application_properties'{content = APC}} | Rem], BarePos, Msg) + when is_integer(BarePos) -> + %% properties is the first bare message section. + %% application-properties is the second bare message section. + msg_body_encoded(Rem, BarePos, Msg#msg_body_encoded{ + application_properties = APC, + bare_and_footer_application_properties_pos = Pos - BarePos + }); +%% Base case: we assert the last part contains the mandatory body: +msg_body_encoded([{{pos, Pos}, {body, Code}}], Payload, Msg) + when is_binary(Payload) -> + %% AMQP sender omitted properties and application-properties sections. + %% The body is the first bare message section. + Bin = binary_part_bare_and_footer(Payload, Pos), + Msg#msg_body_encoded{bare_and_footer = Bin, + bare_and_footer_body_pos = 0, + body_code = Code}; +msg_body_encoded([{{pos, Pos}, {body, Code}}], BarePos, Msg) + when is_integer(BarePos) -> + Msg#msg_body_encoded{bare_and_footer_body_pos = Pos - BarePos, + body_code = Code}. + +%% We extract the binary part of the payload exactly once when the bare message starts. +binary_part_bare_and_footer(Payload, Start) -> + binary_part(Payload, Start, byte_size(Payload) - Start). + +update_header_from_anns(undefined, Anns) -> + update_header_from_anns(#'v1_0.header'{durable = true}, Anns); +update_header_from_anns(Header, Anns) -> + DeliveryCount = case Anns of + #{delivery_count := C} -> C; + _ -> 0 + end, + Redelivered = case Anns of + #{redelivered := R} -> R; + _ -> false + end, + FirstAcq = not Redelivered andalso + DeliveryCount =:= 0 andalso + not is_map_key(deaths, Anns), + Header#'v1_0.header'{first_acquirer = FirstAcq, + delivery_count = {uint, DeliveryCount}}. + +encode_deaths(Deaths) -> + lists:map( + fun({{Queue, Reason}, + #death{exchange = Exchange, + routing_keys = RoutingKeys, + count = Count, + anns = Anns = #{first_time := FirstTime, + last_time := LastTime}}}) -> + RKeys = [{utf8, Rk} || Rk <- RoutingKeys], + Map0 = [ + {{symbol, <<"queue">>}, {utf8, Queue}}, + {{symbol, <<"reason">>}, {symbol, atom_to_binary(Reason)}}, + {{symbol, <<"count">>}, {ulong, Count}}, + {{symbol, <<"first-time">>}, {timestamp, FirstTime}}, + {{symbol, <<"last-time">>}, {timestamp, LastTime}}, + {{symbol, <<"exchange">>}, {utf8, Exchange}}, + {{symbol, <<"routing-keys">>}, {array, utf8, RKeys}} + ], + Map = case Anns of + #{ttl := Ttl} -> + [{{symbol, <<"ttl">>}, {uint, Ttl}} | Map0]; + _ -> + Map0 + end, + {map, Map} + end, Deaths). + +essential_properties(#msg_body_encoded{message_annotations = MA} = Msg) -> Durable = get_property(durable, Msg), Priority = get_property(priority, Msg), Timestamp = get_property(timestamp, Msg), Ttl = get_property(ttl, Msg), - - Deaths = case message_annotation(<<"x-death">>, Msg, undefined) of - {list, DeathMaps} -> - %% TODO: make more correct? - Def = {utf8, <<>>}, - {utf8, FstQ} = message_annotation(<<"x-first-death-queue">>, Msg, Def), - {utf8, FstR} = message_annotation(<<"x-first-death-reason">>, Msg, Def), - {utf8, LastQ} = message_annotation(<<"x-last-death-queue">>, Msg, Def), - {utf8, LastR} = message_annotation(<<"x-last-death-reason">>, Msg, Def), - #deaths{first = {FstQ, binary_to_atom(FstR)}, - last = {LastQ, binary_to_atom(LastR)}, - records = recover_deaths(DeathMaps, #{})}; - _ -> - undefined - end, - Anns = maps_put_falsy( - durable, Durable, + Anns0 = #{?ANN_DURABLE => Durable}, + Anns = maps_put_truthy( + ?ANN_PRIORITY, Priority, maps_put_truthy( - priority, Priority, + ?ANN_TIMESTAMP, Timestamp, maps_put_truthy( - timestamp, Timestamp, - maps_put_truthy( - ttl, Ttl, - maps_put_truthy( - deaths, Deaths, - #{}))))), + ttl, Ttl, + Anns0))), case MA of [] -> Anns; @@ -441,31 +622,21 @@ essential_properties(#msg{message_annotations = MA} = Msg) -> lists:foldl( fun ({{symbol, <<"x-routing-key">>}, {utf8, Key}}, Acc) -> - maps:update_with(routing_keys, + maps:update_with(?ANN_ROUTING_KEYS, fun(L) -> [Key | L] end, [Key], Acc); ({{symbol, <<"x-cc">>}, {list, CCs0}}, Acc) -> CCs = [CC || {_T, CC} <- CCs0], - maps:update_with(routing_keys, + maps:update_with(?ANN_ROUTING_KEYS, fun(L) -> L ++ CCs end, CCs, Acc); ({{symbol, <<"x-exchange">>}, {utf8, Exchange}}, Acc) -> - Acc#{exchange => Exchange}; + Acc#{?ANN_EXCHANGE => Exchange}; (_, Acc) -> Acc end, Anns, MA) end. - -lists_upsert(New, L) -> - lists_upsert(New, L, [], L). - -lists_upsert({Key, _} = New, [{Key, _} | Rem], Pref, _All) -> - lists:reverse(Pref, [New | Rem]); -lists_upsert(New, [Item | Rem], Pref, All) -> - lists_upsert(New, Rem, [Item | Pref], All); -lists_upsert(New, [], _Pref, All) -> - [New | All]. diff --git a/deps/rabbit/src/mc_amqpl.erl b/deps/rabbit/src/mc_amqpl.erl index 4dc96e9db2f7..8de27294723a 100644 --- a/deps/rabbit/src/mc_amqpl.erl +++ b/deps/rabbit/src/mc_amqpl.erl @@ -12,8 +12,8 @@ size/1, x_header/2, routing_headers/2, - convert_to/2, - convert_from/2, + convert_to/3, + convert_from/3, protocol_state/2, property/2, set_property/3, @@ -24,8 +24,9 @@ -export([ message/3, message/4, - message/5, - from_basic_message/1 + from_basic_message/1, + to_091/2, + from_091/2 ]). -import(rabbit_misc, @@ -38,8 +39,10 @@ -define(AMQP10_PROPERTIES_HEADER, <<"x-amqp-1.0-properties">>). -define(AMQP10_APP_PROPERTIES_HEADER, <<"x-amqp-1.0-app-properties">>). -define(AMQP10_MESSAGE_ANNOTATIONS_HEADER, <<"x-amqp-1.0-message-annotations">>). +-define(AMQP10_FOOTER, <<"x-amqp-1.0-footer">>). -define(PROTOMOD, rabbit_framing_amqp_0_9_1). -define(CLASS_ID, 60). +-define(LONGSTR_UTF8_LIMIT, 4096). -opaque state() :: #content{}. @@ -49,54 +52,60 @@ %% mc implementation init(#content{} = Content0) -> - Content = rabbit_binary_parser:ensure_content_decoded(Content0), + Content1 = rabbit_binary_parser:ensure_content_decoded(Content0), %% project essential properties into annotations - Anns = essential_properties(Content), - {strip_header(Content, ?DELETED_HEADER), Anns}. - -convert_from(mc_amqp, Sections) -> - {H, MAnn, Prop, AProp, BodyRev} = - lists:foldl( - fun - (#'v1_0.header'{} = S, Acc) -> - setelement(1, Acc, S); - (#'v1_0.message_annotations'{} = S, Acc) -> - setelement(2, Acc, S); - (#'v1_0.properties'{} = S, Acc) -> - setelement(3, Acc, S); - (#'v1_0.application_properties'{} = S, Acc) -> - setelement(4, Acc, S); - (#'v1_0.delivery_annotations'{}, Acc) -> - %% delivery annotations not currently used - Acc; - (#'v1_0.footer'{}, Acc) -> - %% footer not currently used - Acc; - (undefined, Acc) -> - Acc; - (BodySection, Acc) -> - Body = element(5, Acc), - setelement(5, Acc, [BodySection | Body]) - end, {undefined, undefined, undefined, undefined, []}, - Sections), - - {PayloadRev, Type0} = - case BodyRev of - [#'v1_0.data'{content = Bin}] when is_binary(Bin) -> - {[Bin], undefined}; - [#'v1_0.data'{content = Bin}] when is_list(Bin) -> - {lists:reverse(Bin), undefined}; - _ -> - %% anything else needs to be encoded - %% TODO: This is potentially inefficient, but #content.payload_fragments_rev expects - %% currently a flat list of binaries. Can we make rabbit_writer work - %% with an iolist instead? - BinsRev = [begin - IoList = amqp10_framing:encode_bin(X), - erlang:iolist_to_binary(IoList) - end || X <- BodyRev], - {BinsRev, ?AMQP10_TYPE} - end, + Anns = essential_properties(Content1), + Content = strip_header(Content1, ?DELETED_HEADER), + {Content, Anns}. + +convert_from(mc_amqp, Sections, Env) -> + {H, MAnn, Prop, AProp, BodyRev, Footer} = + lists:foldl( + fun(#'v1_0.header'{} = S, Acc) -> + setelement(1, Acc, S); + (_Ignore = #'v1_0.delivery_annotations'{}, Acc) -> + Acc; + (#'v1_0.message_annotations'{} = S, Acc) -> + setelement(2, Acc, S); + (#'v1_0.properties'{} = S, Acc) -> + setelement(3, Acc, S); + (#'v1_0.application_properties'{} = S, Acc) -> + setelement(4, Acc, S); + (BodySect, Acc) + when is_record(BodySect, 'v1_0.data') orelse + is_record(BodySect, 'v1_0.amqp_sequence') orelse + is_record(BodySect, 'v1_0.amqp_value') -> + Body = element(5, Acc), + setelement(5, Acc, [BodySect | Body]); + (Body = {amqp_encoded_body_and_footer, _}, Acc) -> + %% assertions + [] = element(5, Acc), + setelement(5, Acc, Body); + (#'v1_0.footer'{} = S, Acc) -> + setelement(6, Acc, S) + end, + {undefined, undefined, undefined, undefined, [], undefined}, + Sections), + + {PFR, Type0} = case BodyRev of + [#'v1_0.data'{} | _] -> + %% We assert that the body consists of one or more data sections. + %% If there are multiple data sections, we concatenate the binary data. + PFR0 = lists:map( + fun(#'v1_0.data'{content = Content}) -> + %% In practice, when converting from mc_amqp + %% to mc_amqpl, Content will be a single binary, + %% in which case iolist_to_binary/1 is cheap. + iolist_to_binary(Content) + end, BodyRev), + {PFR0, undefined}; + {amqp_encoded_body_and_footer, BodyAndFooterBin} -> + {[BodyAndFooterBin], ?AMQP10_TYPE}; + _ -> + %% Anything else needs to be AMQP encoded. + PFR0 = lists:map(fun amqp_encoded_binary/1, BodyRev), + {PFR0, ?AMQP10_TYPE} + end, #'v1_0.properties'{message_id = MsgId, user_id = UserId0, reply_to = ReplyTo0, @@ -144,23 +153,66 @@ convert_from(mc_amqp, Sections) -> end, Headers0 = [to_091(K, V) || {{utf8, K}, V} <- AP, - byte_size(K) =< ?AMQP_LEGACY_FIELD_NAME_MAX_LEN], - %% Add remaining message annotations as headers? + ?IS_SHORTSTR_LEN(K)], + %% Add remaining x- message annotations as headers XHeaders = lists:filtermap(fun({{symbol, <<"x-cc">>}, V}) -> {true, to_091(<<"CC">>, V)}; - ({{symbol, K}, V}) - when byte_size(K) =< ?AMQP_LEGACY_FIELD_NAME_MAX_LEN -> + ({{symbol, <<"x-opt-rabbitmq-received-time">>}, {timestamp, Ts}}) -> + {true, {<<"timestamp_in_ms">>, long, Ts}}; + ({{symbol, <<"x-opt-deaths">>}, V}) -> + convert_from_amqp_deaths(V); + ({{symbol, <<"x-", _/binary>> = K}, V}) + when ?IS_SHORTSTR_LEN(K) -> case is_internal_header(K) of - false -> {true, to_091(K, V)}; - true -> false + false -> + {true, to_091(K, V)}; + true -> + false end; (_) -> false end, MA), {Headers1, MsgId091} = message_id(MsgId, <<"x-message-id">>, Headers0), - {Headers, CorrId091} = message_id(CorrId, <<"x-correlation-id">>, Headers1), + {Headers2, CorrId091} = message_id(CorrId, <<"x-correlation-id">>, Headers1), + + Headers = case Env of + #{'rabbitmq_4.0.0' := false} -> + Headers3 = case AProp of + undefined -> + Headers2; + #'v1_0.application_properties'{} -> + APropBin = amqp_encoded_binary(AProp), + [{?AMQP10_APP_PROPERTIES_HEADER, longstr, APropBin} | Headers2] + end, + Headers4 = case Prop of + undefined -> + Headers3; + #'v1_0.properties'{} -> + PropBin = amqp_encoded_binary(Prop), + [{?AMQP10_PROPERTIES_HEADER, longstr, PropBin} | Headers3] + end, + Headers5 = case MAnn of + undefined -> + Headers4; + #'v1_0.message_annotations'{} -> + MAnnBin = amqp_encoded_binary(MAnn), + [{?AMQP10_MESSAGE_ANNOTATIONS_HEADER, longstr, MAnnBin} | Headers4] + end, + Headers6 = case Footer of + undefined -> + Headers5; + #'v1_0.footer'{} -> + FootBin = amqp_encoded_binary(Footer), + [{?AMQP10_FOOTER, longstr, FootBin} | Headers5] + end, + Headers6; + _ -> + Headers2 + end, UserId1 = unwrap(UserId0), + %% user-id is a binary type so we need to validate + %% if we can use it as is UserId = case mc_util:is_valid_shortstr(UserId1) of true -> UserId1; @@ -169,7 +221,7 @@ convert_from(mc_amqp, Sections) -> undefined end, - BP = #'P_basic'{message_id = MsgId091, + BP = #'P_basic'{message_id = MsgId091, delivery_mode = DelMode, expiration = Expiration, user_id = UserId, @@ -177,9 +229,9 @@ convert_from(mc_amqp, Sections) -> [] -> undefined; AllHeaders -> AllHeaders end, - reply_to = unwrap(ReplyTo0), + reply_to = unwrap_shortstr(ReplyTo0), type = Type, - app_id = unwrap(GroupId), + app_id = unwrap_shortstr(GroupId), priority = Priority, correlation_id = CorrId091, content_type = unwrap(ContentType), @@ -189,8 +241,8 @@ convert_from(mc_amqp, Sections) -> #content{class_id = ?CLASS_ID, properties = BP, properties_bin = none, - payload_fragments_rev = PayloadRev}; -convert_from(_SourceProto, _) -> + payload_fragments_rev = PFR}; +convert_from(_SourceProto, _, _) -> not_implemented. size(#content{properties_bin = PropsBin, @@ -263,11 +315,11 @@ prepare(store, Content) -> rabbit_binary_parser:clear_decoded_content( rabbit_binary_generator:ensure_content_encoded(Content, ?PROTOMOD)). -convert_to(?MODULE, Content) -> +convert_to(?MODULE, Content, _Env) -> Content; -convert_to(mc_amqp, #content{payload_fragments_rev = Payload} = Content) -> +convert_to(mc_amqp, #content{payload_fragments_rev = PFR} = Content, Env) -> #content{properties = Props} = prepare(read, Content), - #'P_basic'{message_id = MsgId, + #'P_basic'{message_id = MsgId0, expiration = Expiration, delivery_mode = DelMode, headers = Headers0, @@ -276,7 +328,7 @@ convert_to(mc_amqp, #content{payload_fragments_rev = Payload} = Content) -> type = Type, priority = Priority, app_id = AppId, - correlation_id = CorrId, + correlation_id = CorrId0, content_type = ContentType, content_encoding = ContentEncoding, timestamp = Timestamp} = Props, @@ -297,6 +349,7 @@ convert_to(mc_amqp, #content{payload_fragments_rev = Payload} = Content) -> undefined -> undefined; _ -> + %% Channel already checked for valid integer. binary_to_integer(Expiration) end, @@ -304,14 +357,26 @@ convert_to(mc_amqp, #content{payload_fragments_rev = Payload} = Content) -> ttl = wrap(uint, Ttl), %% TODO: check Priority is a ubyte? priority = wrap(ubyte, Priority)}, + CorrId = case mc_util:urn_string_to_uuid(CorrId0) of + {ok, CorrUUID} -> + {uuid, CorrUUID}; + _ -> + wrap(utf8, CorrId0) + end, + MsgId = case mc_util:urn_string_to_uuid(MsgId0) of + {ok, MsgUUID} -> + {uuid, MsgUUID}; + _ -> + wrap(utf8, MsgId0) + end, P = case amqp10_section_header(?AMQP10_PROPERTIES_HEADER, Headers) of undefined -> - #'v1_0.properties'{message_id = wrap(utf8, MsgId), + #'v1_0.properties'{message_id = MsgId, user_id = wrap(binary, UserId), to = undefined, % subject = wrap(utf8, RKey), reply_to = wrap(utf8, ReplyTo), - correlation_id = wrap(utf8, CorrId), + correlation_id = CorrId, content_type = wrap(symbol, ContentType), content_encoding = wrap(symbol, ContentEncoding), creation_time = wrap(timestamp, ConvertedTs), @@ -357,21 +422,28 @@ convert_to(mc_amqp, #content{payload_fragments_rev = Payload} = Content) -> Section -> Section end, - BodySections = case Type of ?AMQP10_TYPE -> amqp10_framing:decode_bin( - iolist_to_binary(lists:reverse(Payload))); + iolist_to_binary(lists:reverse(PFR))); _ -> - [#'v1_0.data'{content = lists:reverse(Payload)}] + [#'v1_0.data'{content = lists:reverse(PFR)}] end, + Tail = case amqp10_section_header(?AMQP10_FOOTER, Headers) of + undefined -> + BodySections; + #'v1_0.footer'{} = Footer -> + BodySections ++ [Footer] + end, - Sections = [H, MA, P, AP | BodySections], - mc_amqp:convert_from(mc_amqp, Sections); -convert_to(_TargetProto, _Content) -> + Sections = [H, MA, P, AP | Tail], + mc_amqp:convert_from(mc_amqp, Sections, Env); +convert_to(_TargetProto, _Content, _Env) -> not_implemented. -protocol_state(#content{properties = #'P_basic'{headers = H00} = B0} = C, +protocol_state(#content{properties = #'P_basic'{headers = H00, + priority = Priority0, + delivery_mode = DeliveryMode0} = B0} = C, Anns) -> %% Add any x- annotations as headers H0 = case H00 of @@ -379,8 +451,12 @@ protocol_state(#content{properties = #'P_basic'{headers = H00} = B0} = C, _ -> H00 end, - Deaths = maps:get(deaths, Anns, undefined), - Headers0 = deaths_to_headers(Deaths, H0), + Headers0 = case Anns of + #{deaths := Deaths} -> + deaths_to_headers(Deaths, H0); + _ -> + H0 + end, Headers1 = maps:fold( fun (<<"x-", _/binary>> = Key, Val, H) when is_integer(Val) -> [{Key, long, Val} | H]; @@ -395,7 +471,7 @@ protocol_state(#content{properties = #'P_basic'{headers = H00} = B0} = C, end, Headers0, Anns), Headers = case Headers1 of [] -> - undefined; + H00; _ -> %% Dedup lists:usort(fun({Key1, _, _}, {Key2, _, _}) -> @@ -403,7 +479,7 @@ protocol_state(#content{properties = #'P_basic'{headers = H00} = B0} = C, end, Headers1) end, Timestamp = case Anns of - #{timestamp := Ts} -> + #{?ANN_TIMESTAMP := Ts} -> Ts div 1000; _ -> undefined @@ -414,16 +490,44 @@ protocol_state(#content{properties = #'P_basic'{headers = H00} = B0} = C, %% publishes undefined; #{ttl := Ttl} -> - %% not sure this will ever happen - %% as we only ever unset the expiry integer_to_binary(Ttl); _ -> B0#'P_basic'.expiration end, - - B = B0#'P_basic'{timestamp = Timestamp, + Priority = case Priority0 of + undefined -> + case Anns of + #{?ANN_PRIORITY := P} -> + %% This branch is hit when a message with priority was originally + %% published with AMQP to a classic or quorum queue because the + %% AMQP header isn't stored on disk. + P; + _ -> + undefined + end; + _ -> + Priority0 + end, + DelMode = case DeliveryMode0 of + undefined -> + case Anns of + #{?ANN_DURABLE := false} -> + %% Leave it undefined which is equivalent to 1. + undefined; + _ -> + %% This branch is hit when a durable message was originally published + %% with AMQP to a classic or quorum queue because the AMQP header isn't + %% stored on disk. + 2 + end; + _ -> + DeliveryMode0 + end, + B = B0#'P_basic'{headers = Headers, + delivery_mode = DelMode, + priority = Priority, expiration = Expiration, - headers = Headers}, + timestamp = Timestamp}, C#content{properties = B, properties_bin = none}; @@ -433,35 +537,28 @@ protocol_state(Content0, Anns) -> %% changed protocol_state(prepare(read, Content0), Anns). --spec message(rabbit_types:exchange_name(), rabbit_types:routing_key(), #content{}) -> mc:state(). +-spec message(rabbit_types:exchange_name(), rabbit_types:routing_key(), #content{}) -> + {ok, mc:state()} | {error, Reason :: any()}. message(ExchangeName, RoutingKey, Content) -> message(ExchangeName, RoutingKey, Content, #{}). +%% helper for creating message container from messages received from AMQP legacy -spec message(rabbit_types:exchange_name(), rabbit_types:routing_key(), #content{}, map()) -> - mc:state(). -message(XName, RoutingKey, Content, Anns) -> - message(XName, RoutingKey, Content, Anns, - rabbit_feature_flags:is_enabled(message_containers)). - -%% helper for creating message container from messages received from -%% AMQP legacy -message(#resource{name = ExchangeNameBin}, RoutingKey, - #content{properties = Props} = Content, Anns, true) + {ok, mc:state()} | {error, Reason :: any()}. +message(#resource{name = ExchangeNameBin}, + RoutingKey, + #content{properties = Props} = Content, + Anns) when is_binary(RoutingKey) andalso is_map(Anns) -> - HeaderRoutes = rabbit_basic:header_routes(Props#'P_basic'.headers), - mc:init(?MODULE, - rabbit_basic:strip_bcc_header(Content), - Anns#{routing_keys => [RoutingKey | HeaderRoutes], - exchange => ExchangeNameBin}); -message(#resource{} = XName, RoutingKey, - #content{} = Content, Anns, false) -> - {ok, Msg} = rabbit_basic:message(XName, RoutingKey, Content), - case Anns of - #{id := Id} -> - Msg#basic_message{id = Id}; - _ -> - Msg + case rabbit_basic:header_routes(Props#'P_basic'.headers) of + {error, _} = Error -> + Error; + HeaderRoutes -> + {ok, mc:init(?MODULE, + Content, + Anns#{?ANN_ROUTING_KEYS => [RoutingKey | HeaderRoutes], + ?ANN_EXCHANGE => ExchangeNameBin})} end. from_basic_message(#basic_message{content = Content, @@ -474,48 +571,79 @@ from_basic_message(#basic_message{content = Content, _ -> #{id => Id} end, - message(Ex, RKey, prepare(read, Content), Anns, true). + {ok, Msg} = message(Ex, RKey, prepare(read, Content), Anns), + Msg. %% Internal -deaths_to_headers(undefined, Headers) -> - Headers; -deaths_to_headers(#deaths{records = Records}, Headers0) -> - %% sort records by the last timestamp - List = lists:sort( - fun({_, #death{anns = #{last_time := L1}}}, - {_, #death{anns = #{last_time := L2}}}) -> - L1 < L2 - end, maps:to_list(Records)), - Infos = lists:foldl( - fun ({{QName, Reason}, #death{anns = #{first_time := Ts} = DA, - exchange = Ex, - count = Count, - routing_keys = RoutingKeys}}, - Acc) -> - %% The first routing key is the one specified in the - %% basic.publish; all others are CC or BCC keys. - RKs = [hd(RoutingKeys) | rabbit_basic:header_routes(Headers0)], - RKeys = [{longstr, Key} || Key <- RKs], - ReasonBin = atom_to_binary(Reason, utf8), - PerMsgTTL = case maps:get(ttl, DA, undefined) of - undefined -> []; - Ttl when is_integer(Ttl) -> - Expiration = integer_to_binary(Ttl), - [{<<"original-expiration">>, longstr, - Expiration}] - end, - [{table, [{<<"count">>, long, Count}, - {<<"reason">>, longstr, ReasonBin}, - {<<"queue">>, longstr, QName}, - {<<"time">>, timestamp, Ts div 1000}, - {<<"exchange">>, longstr, Ex}, - {<<"routing-keys">>, array, RKeys}] ++ PerMsgTTL} - | Acc] - end, [], List), +deaths_to_headers(Deaths, Headers0) -> + Infos = case Deaths of + #deaths{records = Records} -> + %% sort records by the last timestamp + List = lists:sort( + fun({_, #death{anns = #{last_time := L1}}}, + {_, #death{anns = #{last_time := L2}}}) -> + L1 =< L2 + end, maps:to_list(Records)), + lists:foldl(fun(Record, Acc) -> + Table = death_table(Record), + [Table | Acc] + end, [], List); + _ -> + lists:map(fun death_table/1, Deaths) + end, rabbit_misc:set_table_value(Headers0, <<"x-death">>, array, Infos). +convert_from_amqp_deaths({array, map, Maps}) -> + L = lists:map( + fun({map, KvList}) -> + {Ttl, KvList1} = case KvList of + [{{symbol, <<"ttl">>}, {uint, Ttl0}} | Tail] -> + {Ttl0, Tail}; + _ -> + {undefined, KvList} + end, + [ + {{symbol, <<"queue">>}, {utf8, Queue}}, + {{symbol, <<"reason">>}, {symbol, Reason}}, + {{symbol, <<"count">>}, {ulong, Count}}, + {{symbol, <<"first-time">>}, {timestamp, FirstTime}}, + {{symbol, <<"last-time">>}, {timestamp, _LastTime}}, + {{symbol, <<"exchange">>}, {utf8, Exchange}}, + {{symbol, <<"routing-keys">>}, {array, utf8, RKeys0}} + ] = KvList1, + RKeys = [Key || {utf8, Key} <- RKeys0], + death_table(Queue, Reason, Exchange, RKeys, Count, FirstTime, Ttl) + end, Maps), + {true, {<<"x-death">>, array, L}}; +convert_from_amqp_deaths(_IgnoreUnknownValue) -> + false. +death_table({{QName, Reason}, + #death{exchange = Exchange, + routing_keys = RoutingKeys, + count = Count, + anns = DeathAnns = #{first_time := FirstTime}}}) -> + death_table(QName, Reason, Exchange, RoutingKeys, Count, FirstTime, + maps:get(ttl, DeathAnns, undefined)). + +death_table(QName, Reason, Exchange, RoutingKeys, Count, FirstTime, Ttl) -> + L0 = [ + {<<"count">>, long, Count}, + {<<"reason">>, longstr, rabbit_data_coercion:to_binary(Reason)}, + {<<"queue">>, longstr, QName}, + {<<"time">>, timestamp, FirstTime div 1000}, + {<<"exchange">>, longstr, Exchange}, + {<<"routing-keys">>, array, [{longstr, Key} || Key <- RoutingKeys]} + ], + L = case Ttl of + undefined -> + L0; + _ -> + Expiration = integer_to_binary(Ttl), + [{<<"original-expiration">>, longstr, Expiration} | L0] + end, + {table, L}. strip_header(#content{properties = #'P_basic'{headers = undefined}} = DecodedContent, _Key) -> @@ -536,16 +664,19 @@ wrap(_Type, undefined) -> wrap(Type, Val) -> {Type, Val}. -from_091(longstr, V) -> - case mc_util:is_valid_shortstr(V) of +from_091(longstr, V) + when is_binary(V) andalso + byte_size(V) =< ?LONGSTR_UTF8_LIMIT -> + %% if a longstr is longer than 4096 bytes we just assume it is binary + %% it _may_ still be valid utf8 but checking this for every longstr header + %% value is going to be excessively slow + case mc_util:is_utf8_no_null(V) of true -> {utf8, V}; false -> - %% if a string is longer than 255 bytes we just assume it is binary - %% it _may_ still be valid utf8 but checking this is going to be - %% excessively slow {binary, V} end; +from_091(longstr, V) -> {binary, V}; from_091(long, V) -> {long, V}; from_091(unsignedbyte, V) -> {ubyte, V}; from_091(short, V) -> {short, V}; @@ -596,7 +727,15 @@ unwrap({timestamp, V}) -> unwrap({_Type, V}) -> V. -to_091(Key, {utf8, V}) when is_binary(V) -> {Key, longstr, V}; +unwrap_shortstr({utf8, V}) + when is_binary(V) andalso + ?IS_SHORTSTR_LEN(V) -> + V; +unwrap_shortstr(_) -> + undefined. + +to_091(Key, {utf8, V}) -> {Key, longstr, V}; +to_091(Key, {symbol, V}) -> {Key, longstr, V}; to_091(Key, {long, V}) -> {Key, long, V}; to_091(Key, {ulong, V}) -> {Key, long, V}; %% TODO: we could try to constrain this to_091(Key, {byte, V}) -> {Key, byte, V}; @@ -608,7 +747,7 @@ to_091(Key, {int, V}) -> {Key, signedint, V}; to_091(Key, {double, V}) -> {Key, double, V}; to_091(Key, {float, V}) -> {Key, float, V}; to_091(Key, {timestamp, V}) -> {Key, timestamp, V div 1000}; -to_091(Key, {binary, V}) -> {Key, binary, V}; +to_091(Key, {binary, V}) -> {Key, longstr, V}; to_091(Key, {boolean, V}) -> {Key, bool, V}; to_091(Key, true) -> {Key, bool, true}; to_091(Key, false) -> {Key, bool, false}; @@ -620,6 +759,7 @@ to_091(Key, {map, M}) -> {Key, table, [to_091(unwrap(K), V) || {K, V} <- M]}. to_091({utf8, V}) -> {longstr, V}; +to_091({symbol, V}) -> {longstr, V}; to_091({long, V}) -> {long, V}; to_091({byte, V}) -> {byte, V}; to_091({ubyte, V}) -> {unsignedbyte, V}; @@ -630,7 +770,7 @@ to_091({int, V}) -> {signedint, V}; to_091({double, V}) -> {double, V}; to_091({float, V}) -> {float, V}; to_091({timestamp, V}) -> {timestamp, V div 1000}; -to_091({binary, V}) -> {binary, V}; +to_091({binary, V}) -> {longstr, V}; to_091({boolean, V}) -> {bool, V}; to_091(true) -> {bool, true}; to_091(false) -> {bool, false}; @@ -642,17 +782,17 @@ to_091({map, M}) -> {table, [to_091(unwrap(K), V) || {K, V} <- M]}. message_id({uuid, UUID}, _HKey, H0) -> - {H0, mc_util:uuid_to_string(UUID)}; + {H0, mc_util:uuid_to_urn_string(UUID)}; message_id({ulong, N}, _HKey, H0) -> {H0, erlang:integer_to_binary(N)}; message_id({binary, B}, HKey, H0) -> - {[{HKey, longstr, B} | H0], undefined}; + {[{HKey, binary, B} | H0], undefined}; message_id({utf8, S}, HKey, H0) -> - case byte_size(S) > 255 of + case ?IS_SHORTSTR_LEN(S) of true -> - {[{HKey, longstr, S} | H0], undefined}; + {H0, S}; false -> - {H0, S} + {[{HKey, longstr, S} | H0], undefined} end; message_id(undefined, _HKey, H) -> {H, undefined}. @@ -660,7 +800,8 @@ message_id(undefined, _HKey, H) -> essential_properties(#content{} = C) -> #'P_basic'{delivery_mode = Mode, priority = Priority, - timestamp = TimestampRaw} = Props = C#content.properties, + timestamp = TimestampRaw, + headers = Headers} = Props = C#content.properties, {ok, MsgTTL} = rabbit_basic:parse_expiration(Props), Timestamp = case TimestampRaw of undefined -> @@ -670,15 +811,23 @@ essential_properties(#content{} = C) -> TimestampRaw * 1000 end, Durable = Mode == 2, + BccKeys = case rabbit_basic:header(<<"BCC">>, Headers) of + {<<"BCC">>, array, Routes} -> + [Route || {longstr, Route} <- Routes]; + _ -> + undefined + end, maps_put_truthy( - priority, Priority, + ?ANN_PRIORITY, Priority, maps_put_truthy( ttl, MsgTTL, maps_put_truthy( - timestamp, Timestamp, + ?ANN_TIMESTAMP, Timestamp, maps_put_falsy( - durable, Durable, - #{})))). + ?ANN_DURABLE, Durable, + maps_put_truthy( + bcc, BccKeys, + #{}))))). %% headers that are added as annotations during conversions is_internal_header(<<"x-basic-", _/binary>>) -> @@ -700,3 +849,6 @@ amqp10_section_header(Header, Headers) -> _ -> undefined end. + +amqp_encoded_binary(Section) -> + iolist_to_binary(amqp10_framing:encode_bin(Section)). diff --git a/deps/rabbit/src/mc_compat.erl b/deps/rabbit/src/mc_compat.erl index 571acc4a8675..289a5332cd58 100644 --- a/deps/rabbit/src/mc_compat.erl +++ b/deps/rabbit/src/mc_compat.erl @@ -14,6 +14,7 @@ is_persistent/1, ttl/1, correlation_id/1, + user_id/1, message_id/1, timestamp/1, priority/1, @@ -25,10 +26,9 @@ protocol_state/1, %serialize/1, prepare/2, - record_death/3, + record_death/4, is_death_cycle/2, %deaths/1, - last_death/1, death_queue_names/1 ]). @@ -49,18 +49,20 @@ is(_) -> false. -spec get_annotation(mc:ann_key(), state()) -> mc:ann_value() | undefined. -get_annotation(routing_keys, #basic_message{routing_keys = RKeys}) -> +get_annotation(?ANN_ROUTING_KEYS, #basic_message{routing_keys = RKeys}) -> RKeys; -get_annotation(exchange, #basic_message{exchange_name = Ex}) -> +get_annotation(?ANN_EXCHANGE, #basic_message{exchange_name = Ex}) -> Ex#resource.name; get_annotation(id, #basic_message{id = Id}) -> - Id. + Id; +get_annotation(_Key, #basic_message{}) -> + undefined. set_annotation(id, Value, #basic_message{} = Msg) -> Msg#basic_message{id = Value}; -set_annotation(routing_keys, Value, #basic_message{} = Msg) -> +set_annotation(?ANN_ROUTING_KEYS, Value, #basic_message{} = Msg) -> Msg#basic_message{routing_keys = Value}; -set_annotation(exchange, Value, #basic_message{exchange_name = Ex} = Msg) -> +set_annotation(?ANN_EXCHANGE, Value, #basic_message{exchange_name = Ex} = Msg) -> Msg#basic_message{exchange_name = Ex#resource{name = Value}}; set_annotation(<<"x-", _/binary>> = Key, Value, #basic_message{content = Content0} = Msg) -> @@ -88,7 +90,7 @@ set_annotation(<<"x-", _/binary>> = Key, Value, Msg#basic_message{content = C}; set_annotation(<<"timestamp_in_ms">> = Name, Value, #basic_message{} = Msg) -> rabbit_basic:add_header(Name, long, Value, Msg); -set_annotation(timestamp, Millis, +set_annotation(?ANN_TIMESTAMP, Millis, #basic_message{content = #content{properties = B} = C0} = Msg) -> C = C0#content{properties = B#'P_basic'{timestamp = Millis div 1000}, properties_bin = none}, @@ -106,6 +108,9 @@ timestamp(#basic_message{content = Content}) -> priority(#basic_message{content = Content}) -> get_property(?FUNCTION_NAME, Content). +user_id(#basic_message{content = Content}) -> + get_property(?FUNCTION_NAME, Content). + correlation_id(#basic_message{content = Content}) -> case get_property(?FUNCTION_NAME, Content) of undefined -> @@ -152,7 +157,7 @@ prepare(store, Msg) -> record_death(Reason, SourceQueue, #basic_message{content = Content, exchange_name = Exchange, - routing_keys = RoutingKeys} = Msg) -> + routing_keys = RoutingKeys} = Msg, _Env) -> % HeadersFun1 = fun (H) -> lists:keydelete(<<"CC">>, 1, H) end, ReasonBin = atom_to_binary(Reason), TimeSec = os:system_time(seconds), @@ -357,26 +362,6 @@ death_queue_names(#basic_message{content = Content}) -> [] end. -last_death(#basic_message{content = Content}) -> - #content{properties = #'P_basic'{headers = Headers}} = - rabbit_binary_parser:ensure_content_decoded(Content), - %% TODO: review this conversion and/or change the API - case rabbit_misc:table_lookup(Headers, <<"x-death">>) of - {array, [{table, Info} | _]} -> - X = x_death_event_key(Info, <<"exchange">>), - Q = x_death_event_key(Info, <<"queue">>), - T = x_death_event_key(Info, <<"time">>, 0), - Keys = x_death_event_key(Info, <<"routing_keys">>), - Count = x_death_event_key(Info, <<"count">>), - {Q, #death{exchange = X, - anns = #{first_time => T * 1000, - last_time => T * 1000}, - routing_keys = Keys, - count = Count}}; - _ -> - undefined - end. - get_property(P, #content{properties = none} = Content) -> %% this is inefficient but will only apply to old messages that are %% not containerized @@ -384,6 +369,13 @@ get_property(P, #content{properties = none} = Content) -> get_property(durable, #content{properties = #'P_basic'{delivery_mode = Mode}}) -> Mode == 2; +get_property(user_id, + #content{properties = #'P_basic'{user_id = UserId}}) -> + if UserId =:= undefined -> + undefined; + is_binary(UserId) -> + {binary, UserId} + end; get_property(ttl, #content{properties = Props}) -> {ok, MsgTTL} = rabbit_basic:parse_expiration(Props), MsgTTL; diff --git a/deps/rabbit/src/mc_util.erl b/deps/rabbit/src/mc_util.erl index 6c73d6019b15..669dace41f45 100644 --- a/deps/rabbit/src/mc_util.erl +++ b/deps/rabbit/src/mc_util.erl @@ -1,8 +1,11 @@ -module(mc_util). +-include("mc.hrl"). + -export([is_valid_shortstr/1, is_utf8_no_null/1, - uuid_to_string/1, + uuid_to_urn_string/1, + urn_string_to_uuid/1, infer_type/1, utf8_string_is_ascii/1, amqp_map_get/3, @@ -10,25 +13,35 @@ ]). -spec is_valid_shortstr(term()) -> boolean(). -is_valid_shortstr(Bin) when byte_size(Bin) < 256 -> +is_valid_shortstr(Bin) when ?IS_SHORTSTR_LEN(Bin) -> is_utf8_no_null(Bin); is_valid_shortstr(_) -> false. -is_utf8_no_null(<<>>) -> - true; -is_utf8_no_null(<<0, _/binary>>) -> - false; -is_utf8_no_null(<<_/utf8, Rem/binary>>) -> - is_utf8_no_null(Rem); -is_utf8_no_null(_) -> - false. +-spec is_utf8_no_null(term()) -> boolean(). +is_utf8_no_null(Term) -> + utf8_scan(Term, fun (C) -> C > 0 end). --spec uuid_to_string(binary()) -> binary(). -uuid_to_string(<>) -> - list_to_binary( - io_lib:format(<<"urn:uuid:~8.16.0b-~4.16.0b-~4.16.0b-~2.16.0b~2.16.0b-~12.16.0b">>, - [TL, TM, THV, CSR, CSL, N])). +-spec uuid_to_urn_string(binary()) -> binary(). +uuid_to_urn_string(<>) -> + Delim = <<"-">>, + iolist_to_binary( + [<<"urn:uuid:">>, + binary:encode_hex(TL, lowercase), Delim, + binary:encode_hex(TM, lowercase), Delim, + binary:encode_hex(THV, lowercase), Delim, + binary:encode_hex(CSR, lowercase), + binary:encode_hex(CSL, lowercase), Delim, + binary:encode_hex(N, lowercase)]). + +-spec urn_string_to_uuid(binary()) -> + {ok, binary()} | {error, not_urn_string}. +urn_string_to_uuid(<<"urn:uuid:", UuidStr:36/binary>>) -> + Parts = binary:split(UuidStr, <<"-">>, [global]), + {ok, iolist_to_binary([binary:decode_hex(Part) || Part <- Parts])}; +urn_string_to_uuid(_) -> + {error, not_urn_string}. infer_type(undefined) -> @@ -43,13 +56,8 @@ infer_type({T, _} = V) when is_atom(T) -> %% looks like a pre-tagged type V. -utf8_string_is_ascii(UTF8String) - when is_binary(UTF8String) -> - List = unicode:characters_to_list(UTF8String), - lists:all(fun(Char) -> - Char >= 0 andalso - Char < 128 - end, List). +utf8_string_is_ascii(UTF8String) -> + utf8_scan(UTF8String, fun(Char) -> Char >= 0 andalso Char < 128 end). amqp_map_get(Key, {map, List}, Default) -> amqp_map_get(Key, List, Default); @@ -68,3 +76,17 @@ is_x_header(<<"x-", _/binary>>) -> true; is_x_header(_) -> false. + +%% INTERNAL + +utf8_scan(<<>>, _Pred) -> + true; +utf8_scan(<>, Pred) -> + case Pred(C) of + true -> + utf8_scan(Rem, Pred); + false -> + false + end; +utf8_scan(_, _Pred) -> + false. diff --git a/deps/rabbit/src/mirrored_supervisor.erl b/deps/rabbit/src/mirrored_supervisor.erl index b1660d67d259..1aa33413cbf5 100644 --- a/deps/rabbit/src/mirrored_supervisor.erl +++ b/deps/rabbit/src/mirrored_supervisor.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2011-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(mirrored_supervisor). @@ -137,7 +137,11 @@ -type startlink_err() :: {'already_started', pid()} | 'shutdown' | term(). -type startlink_ret() :: {'ok', pid()} | 'ignore' | {'error', startlink_err()}. --type group_name() :: any(). +-type group_name() :: module(). +-type child_id() :: term(). %% supervisor:child_id() is not exported. + +-export_type([group_name/0, + child_id/0]). -spec start_link(GroupName, Module, Args) -> startlink_ret() when GroupName :: group_name(), diff --git a/deps/rabbit/src/mirrored_supervisor.hrl b/deps/rabbit/src/mirrored_supervisor.hrl new file mode 100644 index 000000000000..5287d1508e2a --- /dev/null +++ b/deps/rabbit/src/mirrored_supervisor.hrl @@ -0,0 +1 @@ +-record(mirrored_sup_childspec, {key, mirroring_pid, childspec}). diff --git a/deps/rabbit/src/mirrored_supervisor_sups.erl b/deps/rabbit/src/mirrored_supervisor_sups.erl index 06650ebb27a8..f953e74bb6bd 100644 --- a/deps/rabbit/src/mirrored_supervisor_sups.erl +++ b/deps/rabbit/src/mirrored_supervisor_sups.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2011-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(mirrored_supervisor_sups). diff --git a/deps/rabbit/src/pg_local.erl b/deps/rabbit/src/pg_local.erl index 3fc2220acd80..df43c0dcd8f0 100644 --- a/deps/rabbit/src/pg_local.erl +++ b/deps/rabbit/src/pg_local.erl @@ -13,7 +13,7 @@ %% versions of Erlang/OTP. The remaining type specs have been %% removed. -%% All modifications are (C) 2010-2023 VMware, Inc. or its affiliates. +%% All modifications are (C) 2007-2024 Broadcom. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% %CopyrightBegin% %% diff --git a/deps/rabbit/src/pid_recomposition.erl b/deps/rabbit/src/pid_recomposition.erl index a0bbc322fc4a..491c6027e55a 100644 --- a/deps/rabbit/src/pid_recomposition.erl +++ b/deps/rabbit/src/pid_recomposition.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(pid_recomposition). diff --git a/deps/rabbit/src/rabbit.erl b/deps/rabbit/src/rabbit.erl index a5d8061828d7..b164dd0a23a0 100644 --- a/deps/rabbit/src/rabbit.erl +++ b/deps/rabbit/src/rabbit.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit). @@ -29,14 +29,17 @@ base_product_name/0, base_product_version/0, motd_file/0, - motd/0]). + motd/0, + pg_local_scope/1]). %% For CLI, testing and mgmt-agent. -export([set_log_level/1, log_locations/0, config_files/0]). -export([is_booted/1, is_booted/0, is_booting/1, is_booting/0]). %%--------------------------------------------------------------------------- %% Boot steps. --export([maybe_insert_default_data/0, boot_delegate/0, recover/0]). +-export([maybe_insert_default_data/0, boot_delegate/0, recover/0, + pg_local_amqp_session/0, + pg_local_amqp_connection/0]). %% for tests -export([validate_msg_store_io_batch_size_and_credit_disc_bound/2]). @@ -148,13 +151,6 @@ [{description, "kernel ready"}, {requires, external_infrastructure}]}). --rabbit_boot_step({rabbit_memory_monitor, - [{description, "memory monitor"}, - {mfa, {rabbit_sup, start_restartable_child, - [rabbit_memory_monitor]}}, - {requires, rabbit_alarm}, - {enables, core_initialized}]}). - -rabbit_boot_step({guid_generator, [{description, "guid generator"}, {mfa, {rabbit_sup, start_restartable_child, @@ -229,12 +225,6 @@ {requires, [core_initialized, recovery]}, {enables, routing_ready}]}). --rabbit_boot_step({rabbit_looking_glass, - [{description, "Looking Glass tracer and profiler"}, - {mfa, {rabbit_looking_glass, boot, []}}, - {requires, [core_initialized, recovery]}, - {enables, routing_ready}]}). - -rabbit_boot_step({rabbit_observer_cli, [{description, "Observer CLI configuration"}, {mfa, {rabbit_observer_cli, init, []}}, @@ -267,9 +257,29 @@ {mfa, {logger, debug, ["'networking' boot step skipped and moved to end of startup", [], #{domain => ?RMQLOG_DOMAIN_GLOBAL}]}}, {requires, notify_cluster}]}). +%% This mechanism is necessary in environments where a cluster is formed in parallel, +%% which is the case with many container orchestration tools. +%% In such scenarios, a virtual host can be declared before the cluster is formed and all +%% cluster members are known, e.g. via definition import. +-rabbit_boot_step({virtual_host_reconciliation, + [{description, "makes sure all virtual host have running processes on all nodes"}, + {mfa, {rabbit_vhosts, boot, []}}, + {requires, notify_cluster}]}). + +-rabbit_boot_step({pg_local_amqp_session, + [{description, "local-only pg scope for AMQP sessions"}, + {mfa, {rabbit, pg_local_amqp_session, []}}, + {requires, kernel_ready}, + {enables, core_initialized}]}). + +-rabbit_boot_step({pg_local_amqp_connection, + [{description, "local-only pg scope for AMQP connections"}, + {mfa, {rabbit, pg_local_amqp_connection, []}}, + {requires, kernel_ready}, + {enables, core_initialized}]}). + %%--------------------------------------------------------------------------- --include_lib("rabbit_common/include/rabbit_framing.hrl"). -include_lib("rabbit_common/include/rabbit.hrl"). -define(APPS, [os_mon, mnesia, rabbit_common, rabbitmq_prelaunch, ra, sysmon_handler, rabbit, osiris]). @@ -356,14 +366,40 @@ run_prelaunch_second_phase() -> %% 3. Logging. ok = rabbit_prelaunch_logging:setup(Context), - %% 4. Clustering. - ok = rabbit_prelaunch_cluster:setup(Context), + %% The clustering steps requires Khepri to be started to check for + %% consistency. This is the opposite compared to Mnesia which must be + %% stopped. That's why we setup Khepri and the coordination Ra system it + %% depends on before, but only handle Mnesia after. + %% + %% We also always set it up, even when using Mnesia, to ensure it is ready + %% if/when the migration begins. + %% + %% Note that this is only the Khepri store which is started here. We + %% perform additional initialization steps in `rabbit_db:init()' which is + %% triggered from a boot step. This boot step handles both Mnesia and + %% Khepri and synchronizes the feature flags. + %% + %% To sum up: + %% 1. We start the Khepri store (always) + %% 2. We verify the cluster, including the feature flags compatibility + %% 3. We start Mnesia (if Khepri is unused) + %% 4. We synchronize feature flags in `rabbit_db:init()' + %% 4. We finish to initialize either Mnesia or Khepri in `rabbit_db:init()' + ok = rabbit_ra_systems:setup(Context), + ok = rabbit_khepri:setup(Context), - %% Start Mnesia now that everything is ready. - ?LOG_DEBUG("Starting Mnesia"), - ok = mnesia:start(), + %% 4. Clustering checks. This covers the compatibility between nodes, + %% feature-flags-wise. + ok = rabbit_prelaunch_cluster:setup(Context), - ok = rabbit_ra_systems:setup(Context), + case rabbit_khepri:is_enabled() of + true -> + ok; + false -> + %% Start Mnesia now that everything is ready. + ?LOG_DEBUG("Starting Mnesia"), + ok = mnesia:start() + end, ?LOG_DEBUG(""), ?LOG_DEBUG("== Prelaunch DONE =="), @@ -377,24 +413,32 @@ run_prelaunch_second_phase() -> start_it(StartType) -> case spawn_boot_marker() of {ok, Marker} -> - T0 = erlang:timestamp(), ?LOG_INFO("RabbitMQ is asked to start...", [], #{domain => ?RMQLOG_DOMAIN_PRELAUNCH}), try - {ok, _} = application:ensure_all_started(rabbitmq_prelaunch, - StartType), - {ok, _} = application:ensure_all_started(rabbit, - StartType), - ok = wait_for_ready_or_stopped(), - - T1 = erlang:timestamp(), - ?LOG_DEBUG( - "Time to start RabbitMQ: ~tp us", - [timer:now_diff(T1, T0)]), + {Millis, ok} = timer:tc( + fun() -> + {ok, _} = application:ensure_all_started( + rabbitmq_prelaunch, StartType), + {ok, _} = application:ensure_all_started( + rabbit, StartType), + wait_for_ready_or_stopped() + end, millisecond), + ?LOG_INFO("Time to start RabbitMQ: ~b ms", [Millis]), stop_boot_marker(Marker), ok catch error:{badmatch, Error}:_ -> + %% `rabbitmq_prelaunch' was started before `rabbit' above. + %% If the latter fails to start, we must stop the former as + %% well. + %% + %% This is important if the environment changes between + %% that error and the next attempt to start `rabbit': the + %% environment is only read during the start of + %% `rabbitmq_prelaunch' (and the cached context is cleaned + %% on stop). + _ = application:stop(rabbitmq_prelaunch), stop_boot_marker(Marker), case StartType of temporary -> throw(Error); @@ -681,7 +725,6 @@ maybe_print_boot_progress(true, IterationsLeft) -> status() -> Version = base_product_version(), [CryptoLibInfo] = crypto:info_lib(), - SeriesSupportStatus = rabbit_release_series:readable_support_status(), S1 = [{pid, list_to_integer(os:getpid())}, %% The timeout value used is twice that of gen_server:call/2. {running_applications, rabbit_misc:which_applications()}, @@ -689,7 +732,6 @@ status() -> {rabbitmq_version, Version}, {crypto_lib_info, CryptoLibInfo}, {erlang_version, erlang:system_info(system_version)}, - {release_series_support_status, SeriesSupportStatus}, {memory, rabbit_vm:memory()}, {alarms, alarms()}, {is_under_maintenance, rabbit_maintenance:is_being_drained_local_read(node())}, @@ -726,7 +768,7 @@ status() -> true -> [{virtual_host_count, rabbit_vhost:count()}, {connection_count, - length(rabbit_networking:connections_local()) + + length(rabbit_networking:local_connections()) + length(rabbit_networking:local_non_amqp_connections())}, {queue_count, total_queue_count()}]; false -> @@ -888,7 +930,6 @@ start(normal, []) -> ?COPYRIGHT_MESSAGE, ?INFORMATION_MESSAGE], #{domain => ?RMQLOG_DOMAIN_PRELAUNCH}) end, - maybe_warn_about_release_series_eol(), log_motd(), {ok, SupPid} = rabbit_sup:start_link(), @@ -984,6 +1025,20 @@ do_run_postlaunch_phase(Plugins) -> ?LOG_DEBUG(""), ?LOG_DEBUG("== Plugins (postlaunch phase) =="), + %% Before loading plugins, set the prometheus collectors and + %% instrumenters to the empty list. By default, prometheus will attempt + %% to find all implementers of its collector and instrumenter + %% behaviours by scanning all available modules during application + %% start. This can take significant time (on the order of seconds) due + %% to the large number of modules available. + %% + %% * Collectors: the `rabbitmq_prometheus' plugin explicitly registers + %% all collectors. + %% * Instrumenters: no instrumenters are used. + _ = application:load(prometheus), + ok = application:set_env(prometheus, collectors, [default]), + ok = application:set_env(prometheus, instrumenters, []), + %% However, we want to run their boot steps and actually start %% them one by one, to ensure a dependency is fully started %% before a plugin which depends on it gets a chance to start. @@ -1015,7 +1070,13 @@ do_run_postlaunch_phase(Plugins) -> ok = log_broker_started(StrictlyPlugins), ?LOG_DEBUG("Marking ~ts as running", [product_name()]), - rabbit_boot_state:set(ready) + rabbit_boot_state:set(ready), + + %% Now that everything is ready, trigger the garbage collector. With + %% Khepri enabled, it seems to be more important than before; see #5515 + %% for context. + _ = rabbit_runtime:gc_all_processes(), + ok catch throw:{error, _} = Error -> rabbit_prelaunch_errors:log_error(Error), @@ -1047,6 +1108,7 @@ stop(State) -> [] -> rabbit_prelaunch:set_stop_reason(normal); _ -> rabbit_prelaunch:set_stop_reason(State) end, + rabbit_db:clear_init_finished(), rabbit_boot_state:set(stopped), ok. @@ -1062,8 +1124,18 @@ boot_delegate() -> -spec recover() -> 'ok'. recover() -> - ok = rabbit_vhost:recover(), - ok. + ok = rabbit_vhost:recover(). + +pg_local_amqp_session() -> + PgScope = pg_local_scope(amqp_session), + rabbit_sup:start_child(pg_amqp_session, pg, [PgScope]). + +pg_local_amqp_connection() -> + PgScope = pg_local_scope(amqp_connection), + rabbit_sup:start_child(pg_amqp_connection, pg, [PgScope]). + +pg_local_scope(Prefix) -> + list_to_atom(io_lib:format("~s_~s", [Prefix, node()])). -spec maybe_insert_default_data() -> 'ok'. @@ -1238,7 +1310,6 @@ print_banner() -> %% padded list lines {LogFmt, LogLocations} = LineListFormatter("~n ~ts", log_locations()), {CfgFmt, CfgLocations} = LineListFormatter("~n ~ts", config_locations()), - SeriesSupportStatus = rabbit_release_series:readable_support_status(), {MOTDFormat, MOTDArgs} = case motd() of undefined -> {"", []}; @@ -1256,34 +1327,23 @@ print_banner() -> MOTDFormat ++ "~n Erlang: ~ts [~ts]" "~n TLS Library: ~ts" - "~n Release series support status: ~ts" + "~n Release series support status: see https://www.rabbitmq.com/release-information" "~n" - "~n Doc guides: https://rabbitmq.com/documentation.html" - "~n Support: https://rabbitmq.com/contact.html" - "~n Tutorials: https://rabbitmq.com/getstarted.html" - "~n Monitoring: https://rabbitmq.com/monitoring.html" + "~n Doc guides: https://www.rabbitmq.com/docs" + "~n Support: https://www.rabbitmq.com/docs/contact" + "~n Tutorials: https://www.rabbitmq.com/tutorials" + "~n Monitoring: https://www.rabbitmq.com/docs/monitoring" + "~n Upgrading: https://www.rabbitmq.com/docs/upgrade" "~n" "~n Logs: ~ts" ++ LogFmt ++ "~n" "~n Config file(s): ~ts" ++ CfgFmt ++ "~n" "~n Starting broker...", [Product, Version, ?COPYRIGHT_MESSAGE, ?INFORMATION_MESSAGE] ++ - [rabbit_misc:otp_release(), emu_flavor(), crypto_version(), - SeriesSupportStatus] ++ + [rabbit_misc:otp_release(), emu_flavor(), crypto_version()] ++ MOTDArgs ++ LogLocations ++ CfgLocations). -maybe_warn_about_release_series_eol() -> - case rabbit_release_series:is_currently_supported() of - false -> - %% we intentionally log this as an error for increased visibiity - ?LOG_ERROR("This release series has reached end of life " - "and is no longer supported. " - "Please visit https://rabbitmq.com/versions.html " - "to learn more and upgrade"); - _ -> ok - end. - emu_flavor() -> %% emu_flavor was introduced in Erlang 24 so we need to catch the error on Erlang 23 case catch(erlang:system_info(emu_flavor)) of @@ -1606,8 +1666,9 @@ config_files() -> start_fhc() -> ok = rabbit_sup:start_restartable_child( file_handle_cache, - [fun rabbit_alarm:set_alarm/1, fun rabbit_alarm:clear_alarm/1]), - ensure_working_fhc(). + [fun(_) -> ok end, fun(_) -> ok end]), + ensure_working_fhc(), + maybe_warn_low_fd_limit(). ensure_working_fhc() -> %% To test the file handle cache, we simply read a file we know it @@ -1629,7 +1690,7 @@ ensure_working_fhc() -> #{domain => ?RMQLOG_DOMAIN_GLOBAL}), ?LOG_INFO("FHC write buffering: ~ts", [WriteBuf], #{domain => ?RMQLOG_DOMAIN_GLOBAL}), - Filename = filename:join(code:lib_dir(kernel, ebin), "kernel.app"), + Filename = filename:join(code:lib_dir(kernel), "ebin/kernel.app"), {ok, Fd} = file_handle_cache:open(Filename, [raw, binary, read], []), {ok, _} = file_handle_cache:read(Fd, 1), ok = file_handle_cache:close(Fd), @@ -1647,6 +1708,16 @@ ensure_working_fhc() -> throw({ensure_working_fhc, {timeout, TestPid}}) end. +maybe_warn_low_fd_limit() -> + case file_handle_cache:ulimit() of + %% unknown is included as atom() > integer(). + L when L > 1024 -> + ok; + L -> + rabbit_log:warning("Available file handles: ~tp. " + "Please consider increasing system limits", [L]) + end. + %% Any configuration that %% 1. is not allowed to change while RabbitMQ is running, and %% 2. is read often @@ -1656,16 +1727,28 @@ persist_static_configuration() -> [classic_queue_index_v2_segment_entry_count, classic_queue_store_v2_max_cache_size, classic_queue_store_v2_check_crc32, - incoming_message_interceptors - ]). + incoming_message_interceptors, + credit_flow_default_credit + ]), + + %% Disallow 0 as it means unlimited: + %% "If this field is zero or unset, there is no maximum + %% size imposed by the link endpoint." [AMQP 1.0 §2.7.3] + MaxMsgSize = case application:get_env(?MODULE, max_message_size) of + {ok, Size} + when is_integer(Size) andalso Size > 0 -> + erlang:min(Size, ?MAX_MSG_SIZE); + _ -> + ?MAX_MSG_SIZE + end, + ok = persistent_term:put(max_message_size, MaxMsgSize). persist_static_configuration(Params) -> - App = ?MODULE, lists:foreach( fun(Param) -> - case application:get_env(App, Param) of + case application:get_env(?MODULE, Param) of {ok, Value} -> - ok = persistent_term:put({App, Param}, Value); + ok = persistent_term:put(Param, Value); undefined -> ok end diff --git a/deps/rabbit/src/rabbit_access_control.erl b/deps/rabbit/src/rabbit_access_control.erl index 163f310d3e3e..cfc8b591eb3f 100644 --- a/deps/rabbit/src/rabbit_access_control.erl +++ b/deps/rabbit/src/rabbit_access_control.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_access_control). @@ -10,9 +10,10 @@ -include_lib("rabbit_common/include/rabbit.hrl"). -export([check_user_pass_login/2, check_user_login/2, check_user_loopback/2, - check_vhost_access/4, check_resource_access/4, check_topic_access/4]). + check_vhost_access/4, check_resource_access/4, check_topic_access/4, + check_user_id/2]). --export([permission_cache_can_expire/1, update_state/2]). +-export([permission_cache_can_expire/1, update_state/2, expiry_timestamp/1]). %%---------------------------------------------------------------------------- @@ -188,7 +189,7 @@ check_resource_access(User = #user{username = Username, check_access( fun() -> Module:check_resource_access( auth_user(User, Impl), Resource, Permission, Context) end, - Module, "~s access to ~s refused for user '~s'", + Module, "~s access to ~ts refused for user '~ts'", [Permission, rabbit_misc:rs(Resource), Username]); (_, Else) -> Else end, ok, Modules). @@ -201,7 +202,7 @@ check_topic_access(User = #user{username = Username, check_access( fun() -> Module:check_topic_access( auth_user(User, Impl), Resource, Permission, Context) end, - Module, "~s access to topic '~s' in exchange ~s refused for user '~s'", + Module, "~s access to topic '~ts' in exchange ~ts refused for user '~ts'", [Permission, maps:get(routing_key, Context), rabbit_misc:rs(Resource), Username]); (_, Else) -> Else end, ok, Modules). @@ -222,6 +223,31 @@ check_access(Fun, Module, ErrStr, ErrArgs, ErrName) -> rabbit_misc:protocol_error(ErrName, FullErrStr, FullErrArgs) end. +-spec check_user_id(mc:state(), rabbit_types:user()) -> + ok | {refused, string(), [term()]}. +check_user_id(Message, ActualUser) -> + case mc:user_id(Message) of + undefined -> + ok; + {binary, ClaimedUserName} -> + check_user_id0(ClaimedUserName, ActualUser) + end. + +check_user_id0(Username, #user{username = Username}) -> + ok; +check_user_id0(_, #user{authz_backends = [{rabbit_auth_backend_dummy, _}]}) -> + ok; +check_user_id0(ClaimedUserName, #user{username = ActualUserName, + tags = Tags}) -> + case lists:member(impersonator, Tags) of + true -> + ok; + false -> + {refused, + "user_id property set to '~ts' but authenticated user was '~ts'", + [ClaimedUserName, ActualUserName]} + end. + -spec update_state(User :: rabbit_types:user(), NewState :: term()) -> {'ok', rabbit_types:auth_user()} | {'refused', string()} | @@ -232,15 +258,16 @@ update_state(User = #user{authz_backends = Backends0}, NewState) -> %% backends is in reverse order from the original list. Backends = lists:foldl( fun({Module, Impl}, {ok, Acc}) -> - case Module:state_can_expire() of - true -> - case Module:update_state(auth_user(User, Impl), NewState) of + AuthUser = auth_user(User, Impl), + case Module:expiry_timestamp(AuthUser) of + never -> + {ok, [{Module, Impl} | Acc]}; + _ -> + case Module:update_state(AuthUser, NewState) of {ok, #auth_user{impl = Impl1}} -> {ok, [{Module, Impl1} | Acc]}; Else -> Else - end; - false -> - {ok, [{Module, Impl} | Acc]} + end end; (_, {error, _} = Err) -> Err; (_, {refused, _, _} = Err) -> Err @@ -254,5 +281,19 @@ update_state(User = #user{authz_backends = Backends0}, NewState) -> %% Returns true if any of the backends support credential expiration, %% otherwise returns false. -permission_cache_can_expire(#user{authz_backends = Backends}) -> - lists:any(fun ({Module, _State}) -> Module:state_can_expire() end, Backends). +permission_cache_can_expire(User) -> + expiry_timestamp(User) =/= never. + +-spec expiry_timestamp(User :: rabbit_types:user()) -> integer() | never. +expiry_timestamp(User = #user{authz_backends = Modules}) -> + lists:foldl(fun({Module, Impl}, Ts0) -> + case Module:expiry_timestamp(auth_user(User, Impl)) of + Ts1 when is_integer(Ts0) andalso is_integer(Ts1) + andalso Ts1 > Ts0 -> + Ts0; + Ts1 when is_integer(Ts1) -> + Ts1; + _ -> + Ts0 + end + end, never, Modules). diff --git a/deps/rabbit/src/rabbit_alarm.erl b/deps/rabbit/src/rabbit_alarm.erl index 750b3f222d2b..532a72f19b58 100644 --- a/deps/rabbit/src/rabbit_alarm.erl +++ b/deps/rabbit/src/rabbit_alarm.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% %% There are two types of alarms handled by this module: %% @@ -50,7 +50,7 @@ -type resource_alarm() :: {resource_limit, resource_alarm_source(), node()}. -type alarm() :: local_alarm() | resource_alarm(). -type resource_alert() :: {WasAlarmSetForNode :: boolean(), - IsThereAnyAlarmsWithSameSourceInTheCluster :: boolean(), + IsThereAnyAlarmWithSameSourceInTheCluster :: boolean(), NodeForWhichAlarmWasSetOrCleared :: node()}. %%---------------------------------------------------------------------------- diff --git a/deps/rabbit/src/rabbit_amqp1_0.erl b/deps/rabbit/src/rabbit_amqp1_0.erl new file mode 100644 index 000000000000..c63f471919c7 --- /dev/null +++ b/deps/rabbit/src/rabbit_amqp1_0.erl @@ -0,0 +1,44 @@ +%% This Source Code Form is subject to the terms of the Mozilla Public +%% License, v. 2.0. If a copy of the MPL was not distributed with this +%% file, You can obtain one at https://mozilla.org/MPL/2.0/. +%% +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. +%% +-module(rabbit_amqp1_0). + +-export([list_local/0, + register_connection/1]). + +%% Below 2 functions are deprecated. +%% They could be called in 3.13 / 4.0 mixed version clusters by the old 3.13 CLI command +%% rabbitmqctl list_amqp10_connections +-export([emit_connection_info_local/3, + emit_connection_info_all/4]). + +emit_connection_info_all(Nodes, Items, Ref, AggregatorPid) -> + Pids = [spawn_link(Node, rabbit_amqp1_0, emit_connection_info_local, + [Items, Ref, AggregatorPid]) + || Node <- Nodes], + rabbit_control_misc:await_emitters_termination(Pids), + ok. + +emit_connection_info_local(Items, Ref, AggregatorPid) -> + ConnectionPids = list_local(), + rabbit_control_misc:emitting_map_with_exit_handler( + AggregatorPid, + Ref, + fun(Pid) -> + rabbit_amqp_reader:info(Pid, Items) + end, + ConnectionPids). + +-spec list_local() -> [pid()]. +list_local() -> + pg:which_groups(pg_scope()). + +-spec register_connection(pid()) -> ok. +register_connection(Pid) -> + ok = pg:join(pg_scope(), Pid, Pid). + +pg_scope() -> + rabbit:pg_local_scope(amqp_connection). diff --git a/deps/rabbit/src/rabbit_amqp_management.erl b/deps/rabbit/src/rabbit_amqp_management.erl new file mode 100644 index 000000000000..e4555e806033 --- /dev/null +++ b/deps/rabbit/src/rabbit_amqp_management.erl @@ -0,0 +1,734 @@ +-module(rabbit_amqp_management). + +-include("rabbit_amqp.hrl"). +-include_lib("rabbit_common/include/rabbit.hrl"). + +-export([handle_request/5]). + +-import(rabbit_amqp_session, + [check_resource_access/4, + check_read_permitted_on_topic/4]). +-import(rabbit_misc, + [queue_resource/2, + exchange_resource/2]). + +-type permission_caches() :: {rabbit_amqp_session:permission_cache(), + rabbit_amqp_session:topic_permission_cache()}. + +-define(DEAD_LETTER_EXCHANGE_KEY, <<"x-dead-letter-exchange">>). + +-spec handle_request(binary(), + rabbit_types:vhost(), + rabbit_types:user(), + pid(), + permission_caches()) -> + {iolist(), permission_caches()}. +handle_request(Request, Vhost, User, ConnectionPid, PermCaches0) -> + ReqSections = amqp10_framing:decode_bin(Request), + + {#'v1_0.properties'{ + message_id = MessageId, + to = {utf8, HttpRequestTarget}, + subject = {utf8, HttpMethod}, + %% see Link Pair CS 01 §2.1 + %% https://docs.oasis-open.org/amqp/linkpair/v1.0/cs01/linkpair-v1.0-cs01.html#_Toc51331305 + reply_to = {utf8, <<"$me">>}}, + ReqBody + } = decode_req(ReqSections, {undefined, undefined}), + + {StatusCode, + RespBody, + PermCaches + } = try {PathSegments, QueryMap} = parse_uri(HttpRequestTarget), + handle_http_req(HttpMethod, + PathSegments, + QueryMap, + ReqBody, + Vhost, + User, + ConnectionPid, + PermCaches0) + catch throw:{?MODULE, StatusCode0, Explanation} -> + rabbit_log:warning("request ~ts ~ts failed: ~ts", + [HttpMethod, HttpRequestTarget, Explanation]), + {StatusCode0, {utf8, Explanation}, PermCaches0} + end, + + RespProps = #'v1_0.properties'{ + subject = {utf8, StatusCode}, + %% "To associate a response with a request, the correlation-id value of the response + %% properties MUST be set to the message-id value of the request properties." + %% [HTTP over AMQP WD 06 §5.1] + correlation_id = MessageId}, + RespAppProps = #'v1_0.application_properties'{ + content = [ + {{utf8, <<"http:response">>}, {utf8, <<"1.1">>}} + ]}, + RespDataSect = #'v1_0.amqp_value'{content = RespBody}, + RespSections = [RespProps, RespAppProps, RespDataSect], + ?TRACE("HTTP over AMQP request:~n ~tp~nHTTP over AMQP response:~n ~tp", + [[amqp10_framing:pprint(Sect) || Sect <- ReqSections], + [amqp10_framing:pprint(Sect) || Sect <- RespSections]]), + IoList = [amqp10_framing:encode_bin(Sect) || Sect <- RespSections], + {IoList, PermCaches}. + +handle_http_req(<<"GET">>, + [<<"queues">>, QNameBinQuoted], + _Query, + null, + Vhost, + _User, + _ConnPid, + PermCaches) -> + QNameBin = rabbit_uri:urldecode(QNameBinQuoted), + QName = queue_resource(Vhost, QNameBin), + case rabbit_amqqueue:with( + QName, + fun(Q) -> + {ok, NumMsgs, NumConsumers} = rabbit_amqqueue:stat(Q), + RespPayload = encode_queue(Q, NumMsgs, NumConsumers), + {ok, {<<"200">>, RespPayload, PermCaches}} + end) of + {ok, Result} -> + Result; + {error, not_found} -> + throw(<<"404">>, "~ts not found", [rabbit_misc:rs(QName)]); + {error, {absent, Q, Reason}} -> + absent(Q, Reason) + end; + +handle_http_req(HttpMethod = <<"PUT">>, + PathSegments = [<<"queues">>, QNameBinQuoted], + Query, + ReqPayload, + Vhost, + User = #user{username = Username}, + ConnPid, + {PermCache0, TopicPermCache}) -> + #{durable := Durable, + auto_delete := AutoDelete, + exclusive := Exclusive, + arguments := QArgs0 + } = decode_queue(ReqPayload), + QNameBin = rabbit_uri:urldecode(QNameBinQuoted), + Owner = case Exclusive of + true -> ConnPid; + false -> none + end, + QArgs = rabbit_amqqueue:augment_declare_args( + Vhost, Durable, Exclusive, AutoDelete, QArgs0), + case QNameBin of + <<>> -> throw(<<"400">>, "declare queue with empty name not allowed", []); + _ -> ok + end, + ok = prohibit_cr_lf(QNameBin), + QName = queue_resource(Vhost, QNameBin), + ok = prohibit_reserved_amq(QName), + PermCache1 = check_resource_access(QName, configure, User, PermCache0), + rabbit_core_metrics:queue_declared(QName), + + {Q1, NumMsgs, NumConsumers, StatusCode, PermCache} = + case rabbit_amqqueue:with( + QName, + fun(Q) -> + try rabbit_amqqueue:assert_equivalence( + Q, Durable, AutoDelete, QArgs, Owner) of + ok -> + {ok, Msgs, Consumers} = rabbit_amqqueue:stat(Q), + {ok, {Q, Msgs, Consumers, <<"200">>, PermCache1}} + catch exit:#amqp_error{name = precondition_failed, + explanation = Expl} -> + throw(<<"409">>, Expl, []); + exit:#amqp_error{explanation = Expl} -> + throw(<<"400">>, Expl, []) + end + end) of + {ok, Result} -> + Result; + {error, not_found} -> + PermCache2 = check_dead_letter_exchange(QName, QArgs, User, PermCache1), + try rabbit_amqqueue:declare( + QName, Durable, AutoDelete, QArgs, Owner, Username) of + {new, Q} -> + rabbit_core_metrics:queue_created(QName), + {Q, 0, 0, <<"201">>, PermCache2}; + {owner_died, Q} -> + %% Presumably our own days are numbered since the + %% connection has died. Pretend the queue exists though, + %% just so nothing fails. + {Q, 0, 0, <<"201">>, PermCache2}; + {absent, Q, Reason} -> + absent(Q, Reason); + {existing, _Q} -> + %% Must have been created in the meantime. Loop around again. + handle_http_req(HttpMethod, PathSegments, Query, ReqPayload, + Vhost, User, ConnPid, {PermCache2, TopicPermCache}); + {error, queue_limit_exceeded, Reason, ReasonArgs} -> + throw(<<"403">>, + Reason, + ReasonArgs); + {protocol_error, _ErrorType, Reason, ReasonArgs} -> + throw(<<"400">>, Reason, ReasonArgs) + catch exit:#amqp_error{name = precondition_failed, + explanation = Expl} -> + throw(<<"409">>, Expl, []); + exit:#amqp_error{explanation = Expl} -> + throw(<<"400">>, Expl, []) + end; + {error, {absent, Q, Reason}} -> + absent(Q, Reason) + end, + + RespPayload = encode_queue(Q1, NumMsgs, NumConsumers), + {StatusCode, RespPayload, {PermCache, TopicPermCache}}; + +handle_http_req(<<"PUT">>, + [<<"exchanges">>, XNameBinQuoted], + _Query, + ReqPayload, + Vhost, + User = #user{username = Username}, + _ConnPid, + {PermCache0, TopicPermCache}) -> + XNameBin = rabbit_uri:urldecode(XNameBinQuoted), + #{type := XTypeBin, + durable := Durable, + auto_delete := AutoDelete, + internal := Internal, + arguments := XArgs + } = decode_exchange(ReqPayload), + XTypeAtom = try rabbit_exchange:check_type(XTypeBin) + catch exit:#amqp_error{explanation = Explanation} -> + throw(<<"400">>, Explanation, []) + end, + XName = exchange_resource(Vhost, XNameBin), + ok = prohibit_default_exchange(XName), + PermCache = check_resource_access(XName, configure, User, PermCache0), + X = case rabbit_exchange:lookup(XName) of + {ok, FoundX} -> + FoundX; + {error, not_found} -> + ok = prohibit_cr_lf(XNameBin), + ok = prohibit_reserved_amq(XName), + case rabbit_exchange:declare( + XName, XTypeAtom, Durable, AutoDelete, + Internal, XArgs, Username) of + {ok, DeclaredX} -> + DeclaredX; + {error, timeout} -> + throw( + <<"503">>, + "Could not create ~ts because the operation " + "timed out", + [rabbit_misc:rs(XName)]) + end + end, + try rabbit_exchange:assert_equivalence( + X, XTypeAtom, Durable, AutoDelete, Internal, XArgs) of + ok -> + {<<"204">>, null, {PermCache, TopicPermCache}} + catch exit:#amqp_error{name = precondition_failed, + explanation = Expl} -> + throw(<<"409">>, Expl, []) + end; + +handle_http_req(<<"DELETE">>, + [<<"queues">>, QNameBinQuoted, <<"messages">>], + _Query, + null, + Vhost, + User, + ConnPid, + {PermCache0, TopicPermCache}) -> + QNameBin = rabbit_uri:urldecode(QNameBinQuoted), + QName = queue_resource(Vhost, QNameBin), + PermCache = check_resource_access(QName, read, User, PermCache0), + try rabbit_amqqueue:with_exclusive_access_or_die( + QName, ConnPid, + fun (Q) -> + case rabbit_queue_type:purge(Q) of + {ok, NumMsgs} -> + RespPayload = purge_or_delete_queue_response(NumMsgs), + {<<"200">>, RespPayload, {PermCache, TopicPermCache}}; + {error, not_supported} -> + throw(<<"400">>, + "purge not supported by ~ts", + [rabbit_misc:rs(QName)]) + end + end) + catch exit:#amqp_error{explanation = Explanation} -> + throw(<<"400">>, Explanation, []) + end; + +handle_http_req(<<"DELETE">>, + [<<"queues">>, QNameBinQuoted], + _Query, + null, + Vhost, + User = #user{username = Username}, + ConnPid, + {PermCache0, TopicPermCache}) -> + QNameBin = rabbit_uri:urldecode(QNameBinQuoted), + QName = queue_resource(Vhost, QNameBin), + ok = prohibit_cr_lf(QNameBin), + PermCache = check_resource_access(QName, configure, User, PermCache0), + try rabbit_amqqueue:delete_with(QName, ConnPid, false, false, Username, true) of + {ok, NumMsgs} -> + RespPayload = purge_or_delete_queue_response(NumMsgs), + {<<"200">>, RespPayload, {PermCache, TopicPermCache}} + catch exit:#amqp_error{explanation = Explanation} -> + throw(<<"400">>, Explanation, []) + end; + +handle_http_req(<<"DELETE">>, + [<<"exchanges">>, XNameBinQuoted], + _Query, + null, + Vhost, + User = #user{username = Username}, + _ConnPid, + {PermCache0, TopicPermCache}) -> + XNameBin = rabbit_uri:urldecode(XNameBinQuoted), + XName = exchange_resource(Vhost, XNameBin), + ok = prohibit_cr_lf(XNameBin), + ok = prohibit_default_exchange(XName), + ok = prohibit_reserved_amq(XName), + PermCache = check_resource_access(XName, configure, User, PermCache0), + case rabbit_exchange:ensure_deleted(XName, false, Username) of + ok -> + {<<"204">>, null, {PermCache, TopicPermCache}}; + {error, timeout} -> + throw( + <<"503">>, + "failed to delete ~ts due to a timeout", + [rabbit_misc:rs(XName)]) + end; + +handle_http_req(<<"POST">>, + [<<"bindings">>], + _Query, + ReqPayload, + Vhost, + User = #user{username = Username}, + ConnPid, + PermCaches0) -> + #{source := SrcXNameBin, + binding_key := BindingKey, + arguments := Args} = BindingMap = decode_binding(ReqPayload), + {DstKind, DstNameBin} = case BindingMap of + #{destination_queue := Bin} -> + {queue, Bin}; + #{destination_exchange := Bin} -> + {exchange, Bin} + end, + SrcXName = exchange_resource(Vhost, SrcXNameBin), + DstName = rabbit_misc:r(Vhost, DstKind, DstNameBin), + PermCaches = binding_checks(SrcXName, DstName, BindingKey, User, PermCaches0), + Binding = #binding{source = SrcXName, + destination = DstName, + key = BindingKey, + args = Args}, + ok = binding_action(add, Binding, Username, ConnPid), + {<<"204">>, null, PermCaches}; + +handle_http_req(<<"DELETE">>, + [<<"bindings">>, BindingSegment], + _Query, + null, + Vhost, + User = #user{username = Username}, + ConnPid, + PermCaches0) -> + {SrcXNameBin, + DstKind, + DstNameBin, + BindingKey, + ArgsHash} = decode_binding_path_segment(BindingSegment), + SrcXName = exchange_resource(Vhost, SrcXNameBin), + DstName = rabbit_misc:r(Vhost, DstKind, DstNameBin), + PermCaches = binding_checks(SrcXName, DstName, BindingKey, User, PermCaches0), + Bindings = rabbit_binding:list_for_source_and_destination(SrcXName, DstName), + case search_binding(BindingKey, ArgsHash, Bindings) of + {value, Binding} -> + ok = binding_action(remove, Binding, Username, ConnPid); + false -> + ok + end, + {<<"204">>, null, PermCaches}; + +handle_http_req(<<"GET">>, + [<<"bindings">>], + QueryMap = #{<<"src">> := SrcXNameBin, + <<"key">> := Key}, + null, + Vhost, + _User, + _ConnPid, + PermCaches) -> + {DstKind, + DstNameBin} = case QueryMap of + #{<<"dste">> := DstX} -> + {exchange, DstX}; + #{<<"dstq">> := DstQ} -> + {queue, DstQ}; + _ -> + throw(<<"400">>, + "missing 'dste' or 'dstq' in query: ~tp", + QueryMap) + end, + SrcXName = exchange_resource(Vhost, SrcXNameBin), + DstName = rabbit_misc:r(Vhost, DstKind, DstNameBin), + Bindings0 = rabbit_binding:list_for_source_and_destination(SrcXName, DstName), + Bindings = [B || B = #binding{key = K} <- Bindings0, K =:= Key], + RespPayload = encode_bindings(Bindings), + {<<"200">>, RespPayload, PermCaches}. + +decode_queue({map, KVList}) -> + M = lists:foldl( + fun({{utf8, <<"durable">>}, V}, Acc) + when is_boolean(V) -> + Acc#{durable => V}; + ({{utf8, <<"exclusive">>}, V}, Acc) + when is_boolean(V) -> + Acc#{exclusive => V}; + ({{utf8, <<"auto_delete">>}, V}, Acc) + when is_boolean(V) -> + Acc#{auto_delete => V}; + ({{utf8, <<"arguments">>}, Args}, Acc) -> + Acc#{arguments => args_amqp_to_amqpl(Args)}; + (Prop, _Acc) -> + throw(<<"400">>, "bad queue property ~tp", [Prop]) + end, #{}, KVList), + Defaults = #{durable => true, + exclusive => false, + auto_delete => false, + arguments => []}, + maps:merge(Defaults, M). + +encode_queue(Q, NumMsgs, NumConsumers) -> + #resource{name = QNameBin} = amqqueue:get_name(Q), + Vhost = amqqueue:get_vhost(Q), + Durable = amqqueue:is_durable(Q), + AutoDelete = amqqueue:is_auto_delete(Q), + Exclusive = amqqueue:is_exclusive(Q), + QType = amqqueue:get_type(Q), + QArgs091 = amqqueue:get_arguments(Q), + QArgs = args_amqpl_to_amqp(QArgs091), + {Leader, Replicas} = queue_topology(Q), + KVList0 = [ + {{utf8, <<"message_count">>}, {ulong, NumMsgs}}, + {{utf8, <<"consumer_count">>}, {uint, NumConsumers}}, + {{utf8, <<"name">>}, {utf8, QNameBin}}, + {{utf8, <<"vhost">>}, {utf8, Vhost}}, + {{utf8, <<"durable">>}, {boolean, Durable}}, + {{utf8, <<"auto_delete">>}, {boolean, AutoDelete}}, + {{utf8, <<"exclusive">>}, {boolean, Exclusive}}, + {{utf8, <<"type">>}, {utf8, rabbit_queue_type:to_binary(QType)}}, + {{utf8, <<"arguments">>}, QArgs} + ], + KVList1 = if is_list(Replicas) -> + [{{utf8, <<"replicas">>}, + {array, utf8, [{utf8, atom_to_binary(R)} || R <- Replicas]} + } | KVList0]; + Replicas =:= undefined -> + KVList0 + end, + KVList = case Leader of + undefined -> + KVList1; + _ -> + [{{utf8, <<"leader">>}, + {utf8, atom_to_binary(Leader)} + } | KVList1] + end, + {map, KVList}. + +%% The returned Replicas contain both online and offline replicas. +-spec queue_topology(amqqueue:amqqueue()) -> + {Leader :: undefined | node(), Replicas :: undefined | [node(),...]}. +queue_topology(Q) -> + case amqqueue:get_type(Q) of + rabbit_quorum_queue -> + [{leader, Leader0}, + {members, Members}] = rabbit_queue_type:info(Q, [leader, members]), + Leader = case Leader0 of + '' -> undefined; + _ -> Leader0 + end, + {Leader, Members}; + rabbit_stream_queue -> + #{name := StreamId} = amqqueue:get_type_state(Q), + case rabbit_stream_coordinator:members(StreamId) of + {ok, Members} -> + maps:fold(fun(Node, {_Pid, writer}, {_, Replicas}) -> + {Node, [Node | Replicas]}; + (Node, {_Pid, replica}, {Writer, Replicas}) -> + {Writer, [Node | Replicas]} + end, {undefined, []}, Members); + {error, _} -> + {undefined, undefined} + end; + _ -> + Pid = amqqueue:get_pid(Q), + Node = node(Pid), + {Node, [Node]} + end. + +decode_exchange({map, KVList}) -> + M = lists:foldl( + fun({{utf8, <<"durable">>}, V}, Acc) + when is_boolean(V) -> + Acc#{durable => V}; + ({{utf8, <<"auto_delete">>}, V}, Acc) + when is_boolean(V) -> + Acc#{auto_delete => V}; + ({{utf8, <<"type">>}, {utf8, V}}, Acc) -> + Acc#{type => V}; + ({{utf8, <<"internal">>}, V}, Acc) + when is_boolean(V) -> + Acc#{internal => V}; + ({{utf8, <<"arguments">>}, Args}, Acc) -> + Acc#{arguments => args_amqp_to_amqpl(Args)}; + (Prop, _Acc) -> + throw(<<"400">>, "bad exchange property ~tp", [Prop]) + end, #{}, KVList), + Defaults = #{durable => true, + auto_delete => false, + type => <<"direct">>, + internal => false, + arguments => []}, + maps:merge(Defaults, M). + +decode_binding({map, KVList}) -> + lists:foldl( + fun({{utf8, <<"source">>}, {utf8, V}}, Acc) -> + Acc#{source => V}; + ({{utf8, <<"destination_queue">>}, {utf8, V}}, Acc) -> + Acc#{destination_queue => V}; + ({{utf8, <<"destination_exchange">>}, {utf8, V}}, Acc) -> + Acc#{destination_exchange => V}; + ({{utf8, <<"binding_key">>}, {utf8, V}}, Acc) -> + Acc#{binding_key => V}; + ({{utf8, <<"arguments">>}, Args}, Acc) -> + Acc#{arguments => args_amqp_to_amqpl(Args)}; + (Field, _Acc) -> + throw(<<"400">>, "bad binding field ~tp", [Field]) + end, #{}, KVList). + +encode_bindings(Bindings) -> + Bs = lists:map( + fun(#binding{source = #resource{name = SrcName}, + key = BindingKey, + destination = #resource{kind = DstKind, + name = DstName}, + args = Args091}) -> + DstKindBin = case DstKind of + queue -> <<"queue">>; + exchange -> <<"exchange">> + end, + Args = args_amqpl_to_amqp(Args091), + Location = compose_binding_uri( + SrcName, DstKind, DstName, BindingKey, Args091), + KVList = [ + {{utf8, <<"source">>}, {utf8, SrcName}}, + {{utf8, <<"destination_", DstKindBin/binary>>}, {utf8, DstName}}, + {{utf8, <<"binding_key">>}, {utf8, BindingKey}}, + {{utf8, <<"arguments">>}, Args}, + {{utf8, <<"location">>}, {utf8, Location}} + ], + {map, KVList} + end, Bindings), + {list, Bs}. + +args_amqp_to_amqpl({map, KVList}) -> + lists:map(fun({{T, Key}, TypeVal}) + when T =:= utf8 orelse + T =:= symbol -> + mc_amqpl:to_091(Key, TypeVal); + (Arg) -> + throw(<<"400">>, + "unsupported argument ~tp", + [Arg]) + end, KVList). + +args_amqpl_to_amqp(Args) -> + {map, [{{utf8, K}, mc_amqpl:from_091(T, V)} || {K, T, V} <- Args]}. + +decode_req([], Acc) -> + Acc; +decode_req([#'v1_0.properties'{} = P | Rem], Acc) -> + decode_req(Rem, setelement(1, Acc, P)); +decode_req([#'v1_0.amqp_value'{content = C} | Rem], Acc) -> + decode_req(Rem, setelement(2, Acc, C)); +decode_req([_IgnoreSection | Rem], Acc) -> + decode_req(Rem, Acc). + +parse_uri(Uri) -> + case uri_string:normalize(Uri, [return_map]) of + UriMap = #{path := Path} -> + [<<>> | Segments] = binary:split(Path, <<"/">>, [global]), + QueryMap = case maps:find(query, UriMap) of + {ok, Query} -> + case uri_string:dissect_query(Query) of + QueryList + when is_list(QueryList) -> + maps:from_list(QueryList); + {error, Atom, Term} -> + throw(<<"400">>, + "failed to dissect query '~ts': ~s ~tp", + [Query, Atom, Term]) + end; + error -> + #{} + end, + {Segments, QueryMap}; + {error, Atom, Term} -> + throw(<<"400">>, + "failed to normalize URI '~ts': ~s ~tp", + [Uri, Atom, Term]) + end. + +compose_binding_uri(Src, DstKind, Dst, Key, Args) -> + SrcQ = uri_string:quote(Src), + DstQ = uri_string:quote(Dst), + KeyQ = uri_string:quote(Key), + ArgsHash = args_hash(Args), + DstChar = destination_kind_to_char(DstKind), + <<"/bindings/src=", SrcQ/binary, + ";dst", DstChar, $=, DstQ/binary, + ";key=", KeyQ/binary, + ";args=", ArgsHash/binary>>. + +decode_binding_path_segment(Segment) -> + PersistentTermKey = mp_binding_uri_path_segment, + MP = try persistent_term:get(PersistentTermKey) + catch error:badarg -> + %% This regex matches for example binding: + %% src=e1;dstq=q2;key=my-key;args= + %% Source, destination, and binding key values must be percent encoded. + %% Binding args use the URL safe Base 64 Alphabet: + %% https://datatracker.ietf.org/doc/html/rfc4648#section-5 + {ok, MP0} = re:compile( + <<"^src=([0-9A-Za-z\-.\_\~%]+);dst([eq])=([0-9A-Za-z\-.\_\~%]+);", + "key=([0-9A-Za-z\-.\_\~%]*);args=([0-9A-Za-z\-\_]*)$">>), + ok = persistent_term:put(PersistentTermKey, MP0), + MP0 + end, + case re:run(Segment, MP, [{capture, all_but_first, binary}]) of + {match, [SrcQ, <>, DstQ, KeyQ, ArgsHash]} -> + Src = rabbit_uri:urldecode(SrcQ), + Dst = rabbit_uri:urldecode(DstQ), + Key = rabbit_uri:urldecode(KeyQ), + DstKind = destination_char_to_kind(DstKindChar), + {Src, DstKind, Dst, Key, ArgsHash}; + nomatch -> + throw(<<"400">>, "bad binding path segment '~s'", [Segment]) + end. + +destination_kind_to_char(exchange) -> $e; +destination_kind_to_char(queue) -> $q. + +destination_char_to_kind($e) -> exchange; +destination_char_to_kind($q) -> queue. + +search_binding(BindingKey, ArgsHash, Bindings) -> + lists:search(fun(#binding{key = Key, + args = Args}) + when Key =:= BindingKey -> + args_hash(Args) =:= ArgsHash; + (_) -> + false + end, Bindings). + +-spec args_hash(rabbit_framing:amqp_table()) -> binary(). +args_hash([]) -> + <<>>; +args_hash(Args) + when is_list(Args) -> + %% Args is already sorted. + Bin = <<(erlang:phash2(Args, 1 bsl 32)):32>>, + base64:encode(Bin, #{mode => urlsafe, + padding => false}). + +-spec binding_checks(rabbit_types:exchange_name(), + rabbit_types:r(exchange | queue), + rabbit_types:binding_key(), + rabbit_types:user(), + permission_caches()) -> + permission_caches(). +binding_checks(SrcXName, DstName, BindingKey, User, {PermCache0, TopicPermCache0}) -> + lists:foreach(fun(#resource{name = NameBin} = Name) -> + ok = prohibit_default_exchange(Name), + ok = prohibit_cr_lf(NameBin) + end, [SrcXName, DstName]), + PermCache1 = check_resource_access(DstName, write, User, PermCache0), + PermCache = check_resource_access(SrcXName, read, User, PermCache1), + TopicPermCache = case rabbit_exchange:lookup(SrcXName) of + {ok, SrcX} -> + check_read_permitted_on_topic( + SrcX, User, BindingKey, TopicPermCache0); + {error, not_found} -> + TopicPermCache0 + end, + {PermCache, TopicPermCache}. + +binding_action(Action, Binding, Username, ConnPid) -> + try rabbit_channel:binding_action(Action, Binding, Username, ConnPid) + catch exit:#amqp_error{explanation = Explanation} -> + throw(<<"400">>, Explanation, []) + end. + +purge_or_delete_queue_response(NumMsgs) -> + {map, [{{utf8, <<"message_count">>}, {ulong, NumMsgs}}]}. + +prohibit_cr_lf(NameBin) -> + case binary:match(NameBin, [<<"\n">>, <<"\r">>]) of + nomatch -> + ok; + _Found -> + throw(<<"400">>, + <<"Bad name '~ts': line feed and carriage return characters not allowed">>, + [NameBin]) + end. + +prohibit_default_exchange(#resource{kind = exchange, + name = <<"">>}) -> + throw(<<"403">>, <<"operation not permitted on the default exchange">>, []); +prohibit_default_exchange(_) -> + ok. + +-spec prohibit_reserved_amq(rabbit_types:r(exchange | queue)) -> ok. +prohibit_reserved_amq(Res = #resource{name = <<"amq.", _/binary>>}) -> + throw(<<"403">>, + "~ts starts with reserved prefix 'amq.'", + [rabbit_misc:rs(Res)]); +prohibit_reserved_amq(#resource{}) -> + ok. + +check_dead_letter_exchange(QName = #resource{virtual_host = Vhost}, QArgs, User, PermCache0) -> + case rabbit_misc:r_arg(Vhost, exchange, QArgs, ?DEAD_LETTER_EXCHANGE_KEY) of + undefined -> + PermCache0; + {error, {invalid_type, Type}} -> + throw(<<"400">>, + "invalid type '~ts' for arg '~s'", + [Type, ?DEAD_LETTER_EXCHANGE_KEY]); + DLX -> + PermCache = check_resource_access(QName, read, User, PermCache0), + check_resource_access(DLX, write, User, PermCache) + end. + +-spec absent(amqqueue:amqqueue(), + rabbit_amqqueue:absent_reason()) -> + no_return(). +absent(Queue, Reason) -> + {'EXIT', + #amqp_error{explanation = Explanation} + } = catch rabbit_amqqueue:absent(Queue, Reason), + throw(<<"400">>, Explanation, []). + +-spec throw(binary(), io:format(), [term()]) -> no_return(). +throw(StatusCode, Format, Data) -> + Reason0 = lists:flatten(io_lib:format(Format, Data)), + Reason = unicode:characters_to_binary(Reason0), + throw({?MODULE, StatusCode, Reason}). diff --git a/deps/rabbit/src/rabbit_amqp_reader.erl b/deps/rabbit/src/rabbit_amqp_reader.erl new file mode 100644 index 000000000000..2903e7d654c5 --- /dev/null +++ b/deps/rabbit/src/rabbit_amqp_reader.erl @@ -0,0 +1,1014 @@ +%% This Source Code Form is subject to the terms of the Mozilla Public +%% License, v. 2.0. If a copy of the MPL was not distributed with this +%% file, You can obtain one at https://mozilla.org/MPL/2.0/. +%% +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. +%% + +-module(rabbit_amqp_reader). + +-include_lib("rabbit_common/include/rabbit.hrl"). +-include_lib("amqp10_common/include/amqp10_types.hrl"). +-include("rabbit_amqp.hrl"). + +-export([init/1, + info/2, + mainloop/2]). + +-export([system_continue/3, + system_terminate/4, + system_code_change/4]). + +-import(rabbit_amqp_util, [protocol_error/3]). + +%% same values as in rabbit_reader +-define(NORMAL_TIMEOUT, 3_000). +-define(CLOSING_TIMEOUT, 30_000). +-define(SILENT_CLOSE_DELAY, 3_000). + +%% Allow for potentially large sets of tokens during the SASL exchange. +%% https://docs.oasis-open.org/amqp/amqp-cbs/v1.0/csd01/amqp-cbs-v1.0-csd01.html#_Toc67999915 +-define(INITIAL_MAX_FRAME_SIZE, 8192). + +-type protocol() :: amqp | sasl. +-type channel_number() :: non_neg_integer(). + +-record(v1_connection, + {name :: binary(), + vhost :: none | rabbit_types:vhost(), + %% server host + host :: inet:ip_address() | inet:hostname(), + %% client host + peer_host :: inet:ip_address() | inet:hostname(), + %% server port + port :: inet:port_number(), + %% client port + peer_port :: inet:port_number(), + connected_at :: integer(), + user :: unauthenticated | rabbit_types:user(), + timeout :: non_neg_integer(), + incoming_max_frame_size :: pos_integer(), + outgoing_max_frame_size :: unlimited | pos_integer(), + channel_max :: non_neg_integer(), + auth_mechanism :: sasl_init_unprocessed | {binary(), module()}, + auth_state :: term(), + properties :: undefined | {map, list(tuple())} + }). + +-record(v1, + { + parent :: pid(), + helper_sup :: pid(), + writer :: none | pid(), + heartbeater :: none | rabbit_heartbeat:heartbeaters(), + session_sup :: rabbit_types:option(pid()), + sock :: rabbit_net:socket(), + proxy_socket :: undefined | {rabbit_proxy_socket, any(), any()}, + connection :: #v1_connection{}, + connection_state :: received_amqp3100 | waiting_sasl_init | securing | + waiting_amqp0100 | waiting_open | running | + closing | closed, + callback :: handshake | + {frame_header, protocol()} | + {frame_body, protocol(), DataOffset :: pos_integer(), channel_number()}, + recv_len :: non_neg_integer(), + pending_recv :: boolean(), + buf :: list(), + buf_len :: non_neg_integer(), + tracked_channels :: #{channel_number() => Session :: pid()} + }). + +-type state() :: #v1{}. + +-define(IS_RUNNING(State), State#v1.connection_state =:= running). + +%%-------------------------------------------------------------------------- + +unpack_from_0_9_1( + {Sock, PendingRecv, SupPid, Buf, BufLen, ProxySocket, + ConnectionName, Host, PeerHost, Port, PeerPort, ConnectedAt}, + Parent) -> + logger:update_process_metadata(#{connection => ConnectionName}), + #v1{parent = Parent, + sock = Sock, + callback = {frame_header, sasl}, + recv_len = 8, + pending_recv = PendingRecv, + heartbeater = none, + helper_sup = SupPid, + buf = Buf, + buf_len = BufLen, + proxy_socket = ProxySocket, + tracked_channels = maps:new(), + writer = none, + connection_state = received_amqp3100, + connection = #v1_connection{ + name = ConnectionName, + vhost = none, + host = Host, + peer_host = PeerHost, + port = Port, + peer_port = PeerPort, + connected_at = ConnectedAt, + user = unauthenticated, + timeout = ?NORMAL_TIMEOUT, + incoming_max_frame_size = ?INITIAL_MAX_FRAME_SIZE, + outgoing_max_frame_size = ?INITIAL_MAX_FRAME_SIZE, + channel_max = 0, + auth_mechanism = sasl_init_unprocessed, + auth_state = unauthenticated}}. + +-spec system_continue(pid(), [sys:dbg_opt()], state()) -> no_return() | ok. +system_continue(Parent, Deb, State) -> + ?MODULE:mainloop(Deb, State#v1{parent = Parent}). + +-spec system_terminate(term(), pid(), [sys:dbg_opt()], term()) -> no_return(). +system_terminate(Reason, _Parent, _Deb, _State) -> + exit(Reason). + +-spec system_code_change(term(), module(), undefined | term(), term()) -> {ok, term()}. +system_code_change(Misc, _Module, _OldVsn, _Extra) -> + {ok, Misc}. + +server_properties() -> + Props0 = rabbit_reader:server_properties(amqp_1_0), + Props1 = [{{symbol, K}, {utf8, V}} || {K, longstr, V} <- Props0], + Props = [{{symbol, <<"node">>}, {utf8, atom_to_binary(node())}} | Props1], + {map, Props}. + +%%-------------------------------------------------------------------------- + +inet_op(F) -> rabbit_misc:throw_on_error(inet_error, F). + +recvloop(Deb, State = #v1{pending_recv = true}) -> + mainloop(Deb, State); +recvloop(Deb, State = #v1{sock = Sock, + recv_len = RecvLen, + buf_len = BufLen}) + when BufLen < RecvLen -> + case rabbit_net:setopts(Sock, [{active, once}]) of + ok -> + mainloop(Deb, State#v1{pending_recv = true}); + {error, Reason} -> + throw({inet_error, Reason}) + end; +recvloop(Deb, State0 = #v1{callback = Callback, + recv_len = RecvLen, + buf = Buf, + buf_len = BufLen}) -> + Bin = case Buf of + [B] -> B; + _ -> list_to_binary(lists:reverse(Buf)) + end, + {Data, Rest} = split_binary(Bin, RecvLen), + State1 = State0#v1{buf = [Rest], + buf_len = BufLen - RecvLen}, + State = handle_input(Callback, Data, State1), + recvloop(Deb, State). + +-spec mainloop([sys:dbg_opt()], state()) -> + no_return() | ok. +mainloop(Deb, State = #v1{sock = Sock, buf = Buf, buf_len = BufLen}) -> + case rabbit_net:recv(Sock) of + {data, Data} -> + recvloop(Deb, State#v1{buf = [Data | Buf], + buf_len = BufLen + size(Data), + pending_recv = false}); + closed when State#v1.connection_state =:= closed -> + ok; + closed -> + throw(connection_closed_abruptly); + {error, Reason} -> + throw({inet_error, Reason}); + {other, {system, From, Request}} -> + sys:handle_system_msg(Request, From, State#v1.parent, + ?MODULE, Deb, State); + {other, Other} -> + case handle_other(Other, State) of + stop -> ok; + NewState -> recvloop(Deb, NewState) + end + end. + +handle_other({'EXIT', Parent, Reason}, State = #v1{parent = Parent}) -> + ReasonString = rabbit_misc:format("broker forced connection closure with reason '~w'", + [Reason]), + _ = terminate(ReasonString, State), + %% this is what we are expected to do according to + %% http://www.erlang.org/doc/man/sys.html + %% + %% If we wanted to be *really* nice we should wait for a while for + %% clients to close the socket at their end, just as we do in the + %% ordinary error case. However, since this termination is + %% initiated by our parent it is probably more important to exit + %% quickly. + exit(Reason); +handle_other({{'DOWN', ChannelNum}, _MRef, process, SessionPid, Reason}, State) -> + handle_session_exit(ChannelNum, SessionPid, Reason, State); +handle_other(handshake_timeout, State = #v1{connection_state = ConnState}) + when ConnState =:= running orelse + ConnState =:= closing orelse + ConnState =:= closed -> + State; +handle_other(handshake_timeout, State) -> + throw({handshake_timeout, State#v1.callback}); +handle_other(heartbeat_timeout, State = #v1{connection_state = closed}) -> + State; +handle_other(heartbeat_timeout, State) -> + Error = error_frame(?V_1_0_AMQP_ERROR_RESOURCE_LIMIT_EXCEEDED, + "no frame received from client within idle timeout threshold", []), + handle_exception(State, 0, Error); +handle_other({'$gen_call', From, {shutdown, Explanation}}, + State = #v1{connection = #v1_connection{properties = Properties}}) -> + Ret = case Explanation =:= "Node was put into maintenance mode" andalso + ignore_maintenance(Properties) of + true -> State; + false -> terminate(Explanation, State) + end, + gen_server:reply(From, ok), + Ret; +handle_other({'$gen_call', From, {info, Items}}, State) -> + Reply = try infos(Items, State) of + Infos -> + {ok, Infos} + catch Error -> + {error, Error} + end, + gen_server:reply(From, Reply), + State; +handle_other({'$gen_cast', {force_event_refresh, _Ref}}, State) -> + State; +handle_other(terminate_connection, _State) -> + stop; +handle_other(credential_expired, State) -> + Error = error_frame(?V_1_0_AMQP_ERROR_UNAUTHORIZED_ACCESS, "credential expired", []), + handle_exception(State, 0, Error); +handle_other(Other, _State) -> + %% internal error -> something worth dying for + exit({unexpected_message, Other}). + +switch_callback(State, Callback, Length) -> + State#v1{callback = Callback, + recv_len = Length}. + +terminate(Reason, State) + when ?IS_RUNNING(State) -> + handle_exception(State, 0, + error_frame(?V_1_0_AMQP_ERROR_INTERNAL_ERROR, + "Connection forced: ~tp", [Reason])); +terminate(_, _) -> + stop. + +%%-------------------------------------------------------------------------- +%% error handling / termination + +close(Error, State = #v1{sock = Sock, + connection = #v1_connection{timeout = Timeout}}) -> + %% Client properties will be emitted in the connection_closed event by rabbit_reader. + ClientProperties = i(client_properties, State), + put(client_properties, ClientProperties), + Time = case Timeout > 0 andalso + Timeout < ?CLOSING_TIMEOUT of + true -> Timeout; + false -> ?CLOSING_TIMEOUT + end, + _TRef = erlang:send_after(Time, self(), terminate_connection), + ok = send_on_channel0(Sock, #'v1_0.close'{error = Error}), + State#v1{connection_state = closed}. + +handle_session_exit(ChannelNum, SessionPid, Reason, State0) -> + State = untrack_channel(ChannelNum, SessionPid, State0), + S = case terminated_normally(Reason) of + true -> + State; + false -> + R = case Reason of + {RealReason, Trace} -> + error_frame(?V_1_0_AMQP_ERROR_INTERNAL_ERROR, + "Session error: ~tp~n~tp", + [RealReason, Trace]); + _ -> + error_frame(?V_1_0_AMQP_ERROR_INTERNAL_ERROR, + "Session error: ~tp", + [Reason]) + end, + handle_exception(State, SessionPid, R) + end, + maybe_close(S). + +terminated_normally(normal) -> + true; +terminated_normally(shutdown) -> + true; +terminated_normally({shutdown, _Term}) -> + true; +terminated_normally(_Reason) -> + false. + +maybe_close(State = #v1{connection_state = closing}) -> + close(undefined, State); +maybe_close(State) -> + State. + +error_frame(Condition, Fmt, Args) -> + Description = list_to_binary(rabbit_misc:format(Fmt, Args)), + #'v1_0.error'{condition = Condition, + description = {utf8, Description}}. + +handle_exception(State = #v1{connection_state = closed}, Channel, + #'v1_0.error'{description = {utf8, Desc}}) -> + rabbit_log_connection:error( + "Error on AMQP 1.0 connection ~tp (~tp), channel ~tp:~n~tp", + [self(), closed, Channel, Desc]), + State; +handle_exception(State = #v1{connection_state = CS}, Channel, + Error = #'v1_0.error'{description = {utf8, Desc}}) + when ?IS_RUNNING(State) orelse CS =:= closing -> + rabbit_log_connection:error( + "Error on AMQP 1.0 connection ~tp (~tp), channel ~tp:~n~tp", + [self(), CS, Channel, Desc]), + close(Error, State); +handle_exception(State, Channel, Error) -> + silent_close_delay(), + throw({handshake_error, State#v1.connection_state, Channel, Error}). + +is_connection_frame(#'v1_0.open'{}) -> true; +is_connection_frame(#'v1_0.close'{}) -> true; +is_connection_frame(_) -> false. + +handle_frame(Mode, Channel, Body, State) -> + try + handle_frame0(Mode, Channel, Body, State) + catch + _:#'v1_0.error'{} = Reason -> + handle_exception(State, 0, Reason); + _:{error, {not_allowed, Username}} -> + %% section 2.8.15 in http://docs.oasis-open.org/amqp/core/v1.0/os/amqp-core-complete-v1.0-os.pdf + handle_exception(State, 0, error_frame( + ?V_1_0_AMQP_ERROR_UNAUTHORIZED_ACCESS, + "Access for user '~ts' was refused: insufficient permissions", + [Username])); + _:Reason:Trace -> + handle_exception(State, 0, error_frame( + ?V_1_0_AMQP_ERROR_INTERNAL_ERROR, + "Reader error: ~tp~n~tp", + [Reason, Trace])) + end. + +%% Nothing specifies that connection methods have to be on a particular channel. +handle_frame0(_Mode, Channel, Body, + State = #v1{connection_state = CS}) + when CS =:= closing orelse + CS =:= closed -> + Performative = parse_frame_body(Body, Channel), + case is_connection_frame(Performative) of + true -> handle_connection_frame(Performative, State); + false -> State + end; +handle_frame0(Mode, Channel, Body, State) -> + Performative = parse_frame_body(Body, Channel), + case {Mode, is_connection_frame(Performative)} of + {amqp, true} -> handle_connection_frame(Performative, State); + {amqp, false} -> handle_session_frame(Channel, Performative, State); + {sasl, false} -> handle_sasl_frame(Performative, State) + end. + +%% "The frame body is defined as a performative followed by an opaque payload." [2.3.2] +parse_frame_body(Body, _Channel) -> + BytesBody = size(Body), + {DescribedPerformative, BytesParsed} = amqp10_binary_parser:parse(Body), + Performative = amqp10_framing:decode(DescribedPerformative), + if BytesParsed < BytesBody -> + Payload = binary_part(Body, BytesParsed, BytesBody - BytesParsed), + ?TRACE("channel ~b ->~n ~tp~n followed by ~tb bytes of payload", + [_Channel, amqp10_framing:pprint(Performative), iolist_size(Payload)]), + {Performative, Payload}; + BytesParsed =:= BytesBody -> + ?TRACE("channel ~b ->~n ~tp", + [_Channel, amqp10_framing:pprint(Performative)]), + Performative + end. + +handle_connection_frame( + #'v1_0.open'{container_id = {utf8, ContainerId}, + max_frame_size = ClientMaxFrame, + channel_max = ClientChannelMax, + idle_time_out = IdleTimeout, + hostname = Hostname, + properties = Properties}, + #v1{connection_state = waiting_open, + connection = Connection = #v1_connection{ + name = ConnectionName, + user = User = #user{username = Username}, + auth_mechanism = {Mechanism, _Mod} + }, + helper_sup = HelperSupPid, + sock = Sock} = State0) -> + logger:update_process_metadata(#{amqp_container => ContainerId}), + Vhost = vhost(Hostname), + ok = check_user_loopback(State0), + ok = check_vhost_exists(Vhost, State0), + ok = check_vhost_alive(Vhost), + ok = rabbit_access_control:check_vhost_access(User, Vhost, {socket, Sock}, #{}), + ok = check_vhost_connection_limit(Vhost, Username), + ok = check_user_connection_limit(Username), + ok = ensure_credential_expiry_timer(User), + rabbit_core_metrics:auth_attempt_succeeded(<<>>, Username, amqp10), + notify_auth(user_authentication_success, Username, State0), + rabbit_log_connection:info( + "Connection from AMQP 1.0 container '~ts': user '~ts' authenticated " + "using SASL mechanism ~s and granted access to vhost '~ts'", + [ContainerId, Username, Mechanism, Vhost]), + + OutgoingMaxFrameSize = case ClientMaxFrame of + undefined -> + unlimited; + {uint, Bytes} + when Bytes >= ?MIN_MAX_FRAME_1_0_SIZE -> + Bytes; + {uint, Bytes} -> + protocol_error( + ?V_1_0_AMQP_ERROR_FRAME_SIZE_TOO_SMALL, + "max_frame_size (~w) < minimum maximum frame size (~w)", + [Bytes, ?MIN_MAX_FRAME_1_0_SIZE]) + end, + SendTimeoutSec = + case IdleTimeout of + undefined -> + 0; + {uint, Interval} -> + if Interval =:= 0 -> + 0; + Interval < 1000 -> + %% "If a peer can not, for any reason support a proposed idle timeout, then it SHOULD + %% close the connection using a close frame with an error explaining why. There is no + %% requirement for peers to support arbitrarily short or long idle timeouts." [2.4.5] + %% rabbit_heartbeat does not want to support sub-second timeouts. + protocol_error( + ?V_1_0_AMQP_ERROR_NOT_ALLOWED, + "idle-time-out (~b ms) < minimum idle-time-out (1000 ms)", + [Interval]); + Interval >= 1000 -> + Interval div 1000 + end + end, + {ok, ReceiveTimeoutSec} = application:get_env(rabbit, heartbeat), + ReceiveTimeoutMillis = ReceiveTimeoutSec * 1000, + SendFun = fun() -> + Frame = amqp10_binary_generator:build_heartbeat_frame(), + catch rabbit_net:send(Sock, Frame) + end, + Parent = self(), + ReceiveFun = fun() -> Parent ! heartbeat_timeout end, + %% TODO: only start heartbeat receive timer at next next frame + Heartbeater = rabbit_heartbeat:start( + HelperSupPid, Sock, ConnectionName, + SendTimeoutSec, SendFun, + ReceiveTimeoutSec, ReceiveFun), + {ok, IncomingMaxFrameSize} = application:get_env(rabbit, frame_max), + %% TODO enforce channel_max + ChannelMax = case ClientChannelMax of + undefined -> + %% default as per 2.7.1 + 16#ff_ff; + {ushort, N} -> + N + end, + State1 = State0#v1{connection_state = running, + connection = Connection#v1_connection{ + vhost = Vhost, + incoming_max_frame_size = IncomingMaxFrameSize, + outgoing_max_frame_size = OutgoingMaxFrameSize, + channel_max = ChannelMax, + properties = Properties, + timeout = ReceiveTimeoutMillis}, + heartbeater = Heartbeater}, + State = start_writer(State1), + HostnameVal = case Hostname of + undefined -> undefined; + null -> undefined; + {utf8, Val} -> Val + end, + rabbit_log:debug( + "AMQP 1.0 connection.open frame: hostname = ~ts, extracted vhost = ~ts, idle-time-out = ~p", + [HostnameVal, Vhost, IdleTimeout]), + + Infos = infos(?CONNECTION_EVENT_KEYS, State), + ok = rabbit_core_metrics:connection_created( + proplists:get_value(pid, Infos), + Infos), + ok = rabbit_event:notify(connection_created, Infos), + ok = rabbit_amqp1_0:register_connection(self()), + Caps = [%% https://docs.oasis-open.org/amqp/linkpair/v1.0/cs01/linkpair-v1.0-cs01.html#_Toc51331306 + {symbol, <<"LINK_PAIR_V1_0">>}, + %% https://docs.oasis-open.org/amqp/anonterm/v1.0/cs01/anonterm-v1.0-cs01.html#doc-anonymous-relay + {symbol, <<"ANONYMOUS-RELAY">>}], + Open = #'v1_0.open'{ + channel_max = ClientChannelMax, + max_frame_size = {uint, IncomingMaxFrameSize}, + %% "the value in idle-time-out SHOULD be half the peer's actual timeout threshold" [2.4.5] + idle_time_out = {uint, ReceiveTimeoutMillis div 2}, + container_id = {utf8, rabbit_nodes:cluster_name()}, + offered_capabilities = {array, symbol, Caps}, + properties = server_properties()}, + ok = send_on_channel0(Sock, Open), + State; +handle_connection_frame(#'v1_0.close'{}, State0) -> + State = State0#v1{connection_state = closing}, + close(undefined, State). + +start_writer(#v1{helper_sup = SupPid, + sock = Sock} = State) -> + ChildSpec = #{id => writer, + start => {rabbit_amqp_writer, start_link, [Sock, self()]}, + restart => transient, + significant => true, + shutdown => ?WORKER_WAIT, + type => worker + }, + {ok, Pid} = supervisor:start_child(SupPid, ChildSpec), + State#v1{writer = Pid}. + +handle_session_frame(Channel, Body, #v1{tracked_channels = Channels} = State) -> + case Channels of + #{Channel := SessionPid} -> + rabbit_amqp_session:process_frame(SessionPid, Body), + State; + _ -> + case ?IS_RUNNING(State) of + true -> + case Body of + #'v1_0.begin'{} -> + send_to_new_session(Channel, Body, State); + _ -> + State + end; + false -> + throw({channel_frame_while_connection_not_running, + Channel, + State#v1.connection_state, + Body}) + end + end. + +handle_sasl_frame(#'v1_0.sasl_init'{mechanism = {symbol, Mechanism}, + initial_response = Response, + hostname = _}, + State0 = #v1{connection_state = waiting_sasl_init, + connection = Connection, + sock = Sock}) -> + ResponseBin = case Response of + undefined -> <<>>; + {binary, Bin} -> Bin + end, + AuthMechanism = auth_mechanism_to_module(Mechanism, Sock), + AuthState = AuthMechanism:init(Sock), + State = State0#v1{ + connection = Connection#v1_connection{ + auth_mechanism = {Mechanism, AuthMechanism}, + auth_state = AuthState}, + connection_state = securing}, + auth_phase(ResponseBin, State); +handle_sasl_frame(#'v1_0.sasl_response'{response = {binary, Response}}, + State = #v1{connection_state = securing}) -> + auth_phase(Response, State); +handle_sasl_frame(Performative, State) -> + throw({unexpected_1_0_sasl_frame, Performative, State}). + +handle_input(handshake, + <<"AMQP",0,1,0,0>>, + #v1{connection_state = waiting_amqp0100, + sock = Sock, + connection = #v1_connection{user = #user{}}, + helper_sup = HelperSup + } = State0) -> + %% At this point, client already got successfully authenticated by SASL. + send_handshake(Sock, <<"AMQP",0,1,0,0>>), + ChildSpec = #{id => session_sup, + start => {rabbit_amqp_session_sup, start_link, [self()]}, + restart => transient, + significant => true, + shutdown => infinity, + type => supervisor}, + {ok, SessionSupPid} = supervisor:start_child(HelperSup, ChildSpec), + State = State0#v1{ + session_sup = SessionSupPid, + %% "After establishing or accepting a TCP connection and sending + %% the protocol header, each peer MUST send an open frame before + %% sending any other frames." [2.4.1] + connection_state = waiting_open}, + switch_callback(State, {frame_header, amqp}, 8); +handle_input({frame_header, Mode}, + Header = <>, + State) when DOff >= 2 -> + case {Mode, Type} of + {amqp, 0} -> ok; + {sasl, 1} -> ok; + _ -> throw({bad_1_0_header_type, Header, Mode}) + end, + MaxFrameSize = State#v1.connection#v1_connection.incoming_max_frame_size, + if Size =:= 8 -> + %% heartbeat + State; + Size > MaxFrameSize -> + handle_exception( + State, Channel, error_frame( + ?V_1_0_CONNECTION_ERROR_FRAMING_ERROR, + "frame size (~b bytes) > maximum frame size (~b bytes)", + [Size, MaxFrameSize])); + true -> + switch_callback(State, {frame_body, Mode, DOff, Channel}, Size - 8) + end; +handle_input({frame_header, _Mode}, Malformed, _State) -> + throw({bad_1_0_header, Malformed}); +handle_input({frame_body, Mode, DOff, Channel}, + FrameBin, + State) -> + %% Figure 2.16 + %% DOff = 4-byte words minus 8 bytes we've already read + ExtendedHeaderSize = (DOff * 32 - 64), + <<_IgnoreExtendedHeader:ExtendedHeaderSize, FrameBody/binary>> = FrameBin, + handle_frame(Mode, Channel, FrameBody, + switch_callback(State, {frame_header, Mode}, 8)); + +handle_input(Callback, Data, _State) -> + throw({bad_input, Callback, Data}). + +-spec init(tuple()) -> no_return(). +init(PackedState) -> + {parent, Parent} = erlang:process_info(self(), parent), + ok = rabbit_connection_sup:remove_connection_helper_sup(Parent, helper_sup_amqp_091), + State0 = unpack_from_0_9_1(PackedState, Parent), + State = advertise_sasl_mechanism(State0), + %% By invoking recvloop here we become 1.0. + recvloop(sys:debug_options([]), State). + +advertise_sasl_mechanism(State0 = #v1{connection_state = received_amqp3100, + sock = Sock}) -> + send_handshake(Sock, <<"AMQP",3,1,0,0>>), + Ms0 = [{symbol, atom_to_binary(M)} || M <- auth_mechanisms(Sock)], + Ms1 = {array, symbol, Ms0}, + Ms = #'v1_0.sasl_mechanisms'{sasl_server_mechanisms = Ms1}, + ok = send_on_channel0(Sock, Ms, rabbit_amqp_sasl), + State = State0#v1{connection_state = waiting_sasl_init}, + switch_callback(State, {frame_header, sasl}, 8). + +send_handshake(Sock, Handshake) -> + ok = inet_op(fun () -> rabbit_net:send(Sock, Handshake) end). + +send_on_channel0(Sock, Method) -> + send_on_channel0(Sock, Method, amqp10_framing). + +send_on_channel0(Sock, Method, Framing) -> + ok = rabbit_amqp_writer:internal_send_command(Sock, Method, Framing). + +%% End 1-0 + +auth_mechanism_to_module(TypeBin, Sock) -> + case rabbit_registry:binary_to_type(TypeBin) of + {error, not_found} -> + protocol_error(?V_1_0_AMQP_ERROR_NOT_FOUND, + "unknown authentication mechanism '~ts'", [TypeBin]); + T -> + case {lists:member(T, auth_mechanisms(Sock)), + rabbit_registry:lookup_module(auth_mechanism, T)} of + {true, {ok, Module}} -> + Module; + _ -> + protocol_error(?V_1_0_AMQP_ERROR_NOT_FOUND, + "invalid authentication mechanism '~ts'", [T]) + end + end. + +%% Returns mechanisms ordered in decreasing level of preference (as configured). +auth_mechanisms(Sock) -> + {ok, ConfiguredMechs} = application:get_env(rabbit, auth_mechanisms), + RegisteredMechs = rabbit_registry:lookup_all(auth_mechanism), + lists:filter( + fun(Mech) -> + case proplists:lookup(Mech, RegisteredMechs) of + {Mech, Mod} -> + Mod:should_offer(Sock); + none -> + false + end + end, ConfiguredMechs). + +auth_phase( + Response, + State = #v1{sock = Sock, + connection = Conn = #v1_connection{auth_mechanism = {Name, AuthMechanism}, + auth_state = AuthState}}) -> + case AuthMechanism:handle_response(Response, AuthState) of + {refused, Username, Msg, Args} -> + %% We don't trust the client at this point - force them to wait + %% for a bit before sending the sasl outcome frame + %% so they can't DOS us with repeated failed logins etc. + auth_fail(Username, State), + silent_close_delay(), + Outcome = #'v1_0.sasl_outcome'{code = ?V_1_0_SASL_CODE_AUTH}, + ok = send_on_channel0(Sock, Outcome, rabbit_amqp_sasl), + protocol_error( + ?V_1_0_AMQP_ERROR_UNAUTHORIZED_ACCESS, "~ts login refused: ~ts", + [Name, io_lib:format(Msg, Args)]); + {protocol_error, Msg, Args} -> + auth_fail(none, State), + protocol_error(?V_1_0_AMQP_ERROR_DECODE_ERROR, Msg, Args); + {challenge, Challenge, AuthState1} -> + Challenge = #'v1_0.sasl_challenge'{challenge = {binary, Challenge}}, + ok = send_on_channel0(Sock, Challenge, rabbit_amqp_sasl), + State1 = State#v1{connection = Conn#v1_connection{auth_state = AuthState1}}, + switch_callback(State1, {frame_header, sasl}, 8); + {ok, User} -> + Outcome = #'v1_0.sasl_outcome'{code = ?V_1_0_SASL_CODE_OK}, + ok = send_on_channel0(Sock, Outcome, rabbit_amqp_sasl), + State1 = State#v1{connection_state = waiting_amqp0100, + connection = Conn#v1_connection{user = User, + auth_state = authenticated}}, + switch_callback(State1, handshake, 8) + end. + +auth_fail(Username, State) -> + rabbit_core_metrics:auth_attempt_failed(<<>>, Username, amqp10), + notify_auth(user_authentication_failure, Username, State). + +notify_auth(EventType, Username, State) -> + Name = case Username of + none -> []; + _ -> [{name, Username}] + end, + AuthEventItems = lists:filtermap( + fun(Item = name) -> + {true, {connection_name, i(Item, State)}}; + (Item) -> + case i(Item, State) of + '' -> false; + Val -> {true, {Item, Val}} + end + end, ?AUTH_EVENT_KEYS), + EventProps = Name ++ AuthEventItems, + rabbit_event:notify(EventType, EventProps). + +track_channel(ChannelNum, SessionPid, #v1{tracked_channels = Channels} = State) -> + rabbit_log:debug("AMQP 1.0 created session process ~p for channel number ~b", + [SessionPid, ChannelNum]), + _Ref = erlang:monitor(process, SessionPid, [{tag, {'DOWN', ChannelNum}}]), + State#v1{tracked_channels = maps:put(ChannelNum, SessionPid, Channels)}. + +untrack_channel(ChannelNum, SessionPid, #v1{tracked_channels = Channels0} = State) -> + case maps:take(ChannelNum, Channels0) of + {SessionPid, Channels} -> + rabbit_log:debug("AMQP 1.0 closed session process ~p with channel number ~b", + [SessionPid, ChannelNum]), + State#v1{tracked_channels = Channels}; + _ -> + State + end. + +send_to_new_session( + ChannelNum, BeginFrame, + #v1{session_sup = SessionSup, + connection = #v1_connection{outgoing_max_frame_size = MaxFrame, + vhost = Vhost, + user = User, + name = ConnName}, + writer = WriterPid} = State) -> + %% Subtract fixed frame header size. + OutgoingMaxFrameSize = case MaxFrame of + unlimited -> unlimited; + _ -> MaxFrame - 8 + end, + ChildArgs = [WriterPid, + ChannelNum, + OutgoingMaxFrameSize, + User, + Vhost, + ConnName, + BeginFrame], + case rabbit_amqp_session_sup:start_session(SessionSup, ChildArgs) of + {ok, SessionPid} -> + track_channel(ChannelNum, SessionPid, State); + {error, _} = E -> + throw(E) + end. + +vhost({utf8, <<"vhost:", VHost/binary>>}) -> + VHost; +vhost(_) -> + application:get_env(rabbit, default_vhost, <<"/">>). + +check_user_loopback(#v1{connection = #v1_connection{user = #user{username = Username}}, + sock = Socket} = State) -> + case rabbit_access_control:check_user_loopback(Username, Socket) of + ok -> + ok; + not_allowed -> + auth_fail(Username, State), + protocol_error(?V_1_0_AMQP_ERROR_UNAUTHORIZED_ACCESS, + "user '~ts' can only connect via localhost", + [Username]) + end. + +check_vhost_exists(Vhost, State) -> + case rabbit_vhost:exists(Vhost) of + true -> + ok; + false -> + auth_fail(State#v1.connection#v1_connection.user#user.username, State), + protocol_error(?V_1_0_AMQP_ERROR_UNAUTHORIZED_ACCESS, + "AMQP 1.0 connection failed: virtual host '~s' does not exist", + [Vhost]) + end. + +check_vhost_alive(Vhost) -> + case rabbit_vhost_sup_sup:is_vhost_alive(Vhost) of + true -> + ok; + false -> + protocol_error(?V_1_0_AMQP_ERROR_INTERNAL_ERROR, + "AMQP 1.0 connection failed: virtual host '~s' is down", + [Vhost]) + end. + +check_vhost_connection_limit(Vhost, Username) -> + case rabbit_vhost_limit:is_over_connection_limit(Vhost) of + false -> + ok; + {true, Limit} -> + protocol_error( + ?V_1_0_AMQP_ERROR_RESOURCE_LIMIT_EXCEEDED, + "access to vhost '~ts' refused for user '~ts': vhost connection limit (~p) is reached", + [Vhost, Username, Limit]) + end. + +check_user_connection_limit(Username) -> + case rabbit_auth_backend_internal:is_over_connection_limit(Username) of + false -> + ok; + {true, Limit} -> + protocol_error( + ?V_1_0_AMQP_ERROR_RESOURCE_LIMIT_EXCEEDED, + "connection refused for user '~ts': user connection limit (~p) is reached", + [Username, Limit]) + end. + + +%% TODO Provide a means for the client to refresh the credential. +%% This could be either via: +%% 1. SASL (if multiple authentications are allowed on the same AMQP 1.0 connection), see +%% https://datatracker.ietf.org/doc/html/rfc4422#section-3.8 , or +%% 2. Claims Based Security (CBS) extension, see https://docs.oasis-open.org/amqp/amqp-cbs/v1.0/csd01/amqp-cbs-v1.0-csd01.html +%% and https://github.com/rabbitmq/rabbitmq-server/issues/9259 +%% 3. Simpler variation of 2. where a token is put to a special /token node. +%% +%% If the user does not refresh their credential on time (the only implementation currently), +%% close the entire connection as we must assume that vhost access could have been revoked. +%% +%% If the user refreshes their credential on time (to be implemented), the AMQP reader should +%% 1. rabbit_access_control:check_vhost_access/4 +%% 2. send a message to all its sessions which should then erase the permission caches and +%% re-check all link permissions (i.e. whether reading / writing to exchanges / queues is still allowed). +%% 3. cancel the current timer, and set a new timer +%% similary as done for Stream connections, see https://github.com/rabbitmq/rabbitmq-server/issues/10292 +ensure_credential_expiry_timer(User) -> + case rabbit_access_control:expiry_timestamp(User) of + never -> + ok; + Ts when is_integer(Ts) -> + Time = (Ts - os:system_time(second)) * 1000, + rabbit_log:debug( + "Credential expires in ~b ms frow now (absolute timestamp = ~b seconds since epoch)", + [Time, Ts]), + case Time > 0 of + true -> + _TimerRef = erlang:send_after(Time, self(), credential_expired), + ok; + false -> + protocol_error(?V_1_0_AMQP_ERROR_UNAUTHORIZED_ACCESS, + "Credential expired ~b ms ago", [abs(Time)]) + end + end. + +%% We don't trust the client at this point - force them to wait +%% for a bit so they can't DOS us with repeated failed logins etc. +silent_close_delay() -> + timer:sleep(?SILENT_CLOSE_DELAY). + +%% This function is deprecated. +%% It could be called in 3.13 / 4.0 mixed version clusters by the old 3.13 CLI command +%% rabbitmqctl list_amqp10_connections +%% +%% rabbitmqctl list_connections +%% listing AMQP 1.0 connections in 4.0 uses rabbit_reader:info/2 instead. +-spec info(rabbit_types:connection(), rabbit_types:info_keys()) -> + rabbit_types:infos(). +info(Pid, InfoItems) -> + case InfoItems -- ?INFO_ITEMS of + [] -> + case gen_server:call(Pid, {info, InfoItems}, infinity) of + {ok, InfoList} -> + InfoList; + {error, Error} -> + throw(Error) + end; + UnknownItems -> + throw({bad_argument, UnknownItems}) + end. + +infos(Items, State) -> + [{Item, i(Item, State)} || Item <- Items]. + +i(pid, #v1{}) -> + self(); +i(type, #v1{}) -> + network; +i(protocol, #v1{}) -> + {1, 0}; +i(connection, #v1{connection = Val}) -> + Val; +i(node, #v1{}) -> + node(); +i(auth_mechanism, #v1{connection = #v1_connection{auth_mechanism = Val}}) -> + case Val of + {Name, _Mod} -> Name; + _ -> Val + end; +i(frame_max, #v1{connection = #v1_connection{outgoing_max_frame_size = Val}}) -> + %% Some HTTP API clients expect an integer to be reported. + %% https://github.com/rabbitmq/rabbitmq-server/issues/11838 + if Val =:= unlimited -> ?UINT_MAX; + is_integer(Val) -> Val + end; +i(timeout, #v1{connection = #v1_connection{timeout = Millis}}) -> + Millis div 1000; +i(user, #v1{connection = #v1_connection{user = User}}) -> + case User of + #user{username = Val} -> Val; + unauthenticated -> '' + end; +i(state, S) -> + i(connection_state, S); +i(connection_state, #v1{connection_state = Val}) -> + Val; +i(connected_at, #v1{connection = #v1_connection{connected_at = Val}}) -> + Val; +i(name, #v1{connection = #v1_connection{name = Val}}) -> + Val; +i(vhost, #v1{connection = #v1_connection{vhost = Val}}) -> + Val; +i(host, #v1{connection = #v1_connection{host = Val}}) -> + Val; +i(port, #v1{connection = #v1_connection{port = Val}}) -> + Val; +i(peer_host, #v1{connection = #v1_connection{peer_host = Val}}) -> + Val; +i(peer_port, #v1{connection = #v1_connection{peer_port = Val}}) -> + Val; +i(SockStat, S) when SockStat =:= recv_oct; + SockStat =:= recv_cnt; + SockStat =:= send_oct; + SockStat =:= send_cnt; + SockStat =:= send_pend -> + socket_info(fun (Sock) -> rabbit_net:getstat(Sock, [SockStat]) end, + fun ([{_, I}]) -> I end, S); +i(ssl, #v1{sock = Sock}) -> rabbit_net:is_ssl(Sock); +i(SSL, #v1{sock = Sock, proxy_socket = ProxySock}) + when SSL =:= ssl_protocol; + SSL =:= ssl_key_exchange; + SSL =:= ssl_cipher; + SSL =:= ssl_hash -> + rabbit_ssl:info(SSL, {Sock, ProxySock}); +i(Cert, #v1{sock = Sock}) + when Cert =:= peer_cert_issuer; + Cert =:= peer_cert_subject; + Cert =:= peer_cert_validity -> + rabbit_ssl:cert_info(Cert, Sock); +i(client_properties, #v1{connection = #v1_connection{properties = Props}}) -> + %% Connection properties sent by the client. + %% Displayed in rabbitmq_management/priv/www/js/tmpl/connection.ejs + case Props of + undefined -> + []; + {map, Fields} -> + [mc_amqpl:to_091(Key, TypeVal) || {{symbol, Key}, TypeVal} <- Fields] + end; +i(channels, #v1{tracked_channels = Channels}) -> + maps:size(Channels); +i(channel_max, #v1{connection = #v1_connection{channel_max = Max}}) -> + Max; +i(Item, #v1{}) -> + throw({bad_argument, Item}). + +%% From rabbit_reader +socket_info(Get, Select, #v1{sock = Sock}) -> + case Get(Sock) of + {ok, T} -> Select(T); + {error, _} -> '' + end. + +ignore_maintenance({map, Properties}) -> + lists:member( + {{symbol, <<"ignore-maintenance">>}, true}, + Properties); +ignore_maintenance(_) -> + false. diff --git a/deps/rabbit/src/rabbit_amqp_session.erl b/deps/rabbit/src/rabbit_amqp_session.erl new file mode 100644 index 000000000000..99baaa2b9ac9 --- /dev/null +++ b/deps/rabbit/src/rabbit_amqp_session.erl @@ -0,0 +1,3526 @@ +%% This Source Code Form is subject to the terms of the Mozilla Public +%% License, v. 2.0. If a copy of the MPL was not distributed with this +%% file, You can obtain one at https://mozilla.org/MPL/2.0/. +%% +%% Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. +%% + +-module(rabbit_amqp_session). + +-compile({inline, [maps_update_with/4]}). + +-behaviour(gen_server). + +-include_lib("rabbit_common/include/rabbit.hrl"). +-include_lib("amqp10_common/include/amqp10_types.hrl"). +-include("rabbit_amqp.hrl"). +-include("mc.hrl"). + +-rabbit_deprecated_feature( + {amqp_address_v1, + #{deprecation_phase => permitted_by_default, + doc_url => "https://www.rabbitmq.com/docs/next/amqp#address", + messages => + #{when_permitted => + "RabbitMQ AMQP address version 1 is deprecated. " + "Clients should use RabbitMQ AMQP address version 2.", + when_denied => + "RabbitMQ AMQP address version 1 is unsupported. " + "Clients must use RabbitMQ AMQP address version 2." + }} + }). + +%% This is the link credit that we grant to sending clients. +%% We are free to choose whatever we want, sending clients must obey. +%% Default soft limits / credits in deps/rabbit/Makefile are: +%% 32 for quorum queues +%% 256 for streams +%% 400 for classic queues +%% If link target is a queue (rather than an exchange), we could use one of these depending +%% on target queue type. For the time being just use a static value that's something in between. +%% In future, we could dynamically grow (or shrink) the link credit we grant depending on how fast +%% target queue(s) actually confirm messages: see paper "Credit-Based Flow Control for ATM Networks" +%% from 1995, section 4.2 "Static vs. adaptive credit control" for pros and cons. +-define(DEFAULT_MAX_LINK_CREDIT, 128). +%% Initial and maximum link credit that we grant to a sending queue. +%% Only when we sent sufficient messages to the writer proc, we will again grant +%% credits to the sending queue. We have this limit in place to ensure that our +%% session proc won't be flooded with messages by the sending queue, especially +%% if we are throttled sending messages to the client either by the writer proc +%% or by remote-incoming window (i.e. session flow control). +-define(DEFAULT_MAX_QUEUE_CREDIT, 256). +-define(DEFAULT_MAX_INCOMING_WINDOW, 400). +-define(MAX_MANAGEMENT_LINK_CREDIT, 8). +-define(MANAGEMENT_NODE_ADDRESS, <<"/management">>). +-define(UINT_OUTGOING_WINDOW, {uint, ?UINT_MAX}). +%% "The next-outgoing-id MAY be initialized to an arbitrary value" [2.5.6] +-define(INITIAL_OUTGOING_TRANSFER_ID, ?UINT_MAX - 3). +%% "Note that, despite its name, the delivery-count is not a count but a +%% sequence number initialized at an arbitrary point by the sender." [2.6.7] +-define(INITIAL_DELIVERY_COUNT, ?UINT_MAX - 4). +-define(INITIAL_OUTGOING_DELIVERY_ID, 0). +-define(DEFAULT_MAX_HANDLE, ?UINT_MAX). +-define(UINT(N), {uint, N}). +%% [3.4] +-define(OUTCOMES, [?V_1_0_SYMBOL_ACCEPTED, + ?V_1_0_SYMBOL_REJECTED, + ?V_1_0_SYMBOL_RELEASED, + ?V_1_0_SYMBOL_MODIFIED]). +-define(DEFAULT_EXCHANGE_NAME, <<>>). +-define(PROTOCOL, amqp10). +-define(MAX_PERMISSION_CACHE_SIZE, 12). +-define(HIBERNATE_AFTER, 6_000). +-define(CREDIT_REPLY_TIMEOUT, 30_000). + +-export([start_link/8, + process_frame/2, + list_local/0, + conserve_resources/3, + check_resource_access/4, + check_read_permitted_on_topic/4 + ]). + +-export([init/1, + terminate/2, + handle_call/3, + handle_cast/2, + handle_info/2, + format_status/1]). + +-import(rabbit_amqp_util, + [protocol_error/3]). +-import(serial_number, + [add/2, + diff/2, + compare/2]). +-import(rabbit_misc, + [queue_resource/2, + exchange_resource/2]). + +-type permission_cache() :: [{rabbit_types:r(exchange | queue), + rabbit_types:permission_atom()}]. +-type topic_permission_cache() :: [{rabbit_types:r(topic), + rabbit_types:routing_key(), + rabbit_types:permission_atom()}]. + +-type transfer_frame_body() :: [Performative :: #'v1_0.transfer'{} | + Payload :: iolist()]. + +-export_type([permission_cache/0, + topic_permission_cache/0]). + +%% incoming multi transfer delivery [2.6.14] +-record(multi_transfer_msg, { + payload_fragments_rev :: [binary(),...], + delivery_id :: delivery_number(), + settled :: boolean() + }). + +%% For AMQP management operations, we require a link pair as described in +%% https://docs.oasis-open.org/amqp/linkpair/v1.0/cs01/linkpair-v1.0-cs01.html +-record(management_link_pair, { + client_terminus_address, + incoming_half :: unattached | link_handle(), + outgoing_half :: unattached | link_handle() + }). + +%% Incoming or outgoing half of the link pair. +-record(management_link, { + name :: binary(), + delivery_count :: sequence_no(), + credit :: rabbit_queue_type:credit(), + max_message_size :: unlimited | pos_integer() + }). + +-record(incoming_link, { + %% The exchange is either defined in the ATTACH frame and static for + %% the life time of the link or dynamically provided in each message's + %% "to" field (address v2). + exchange :: rabbit_types:exchange() | rabbit_exchange:name() | to, + %% The routing key is either defined in the ATTACH frame and static for + %% the life time of the link or dynamically provided in each message's + %% "to" field (address v2) or "subject" field (address v1). + routing_key :: rabbit_types:routing_key() | to | subject, + %% queue_name_bin is only set if the link target address refers to a queue. + queue_name_bin :: undefined | rabbit_misc:resource_name(), + max_message_size :: pos_integer(), + delivery_count :: sequence_no(), + credit :: rabbit_queue_type:credit(), + %% TRANSFER delivery IDs published to queues but not yet confirmed by queues + incoming_unconfirmed_map = #{} :: #{delivery_number() => + {#{rabbit_amqqueue:name() := ok}, + IsTransferSettled :: boolean(), + AtLeastOneQueueConfirmed :: boolean()}}, + multi_transfer_msg :: undefined | #multi_transfer_msg{} + }). + +%% A credit request from the client (receiver) as sent in the FLOW frame. +-record(credit_req, { + delivery_count :: sequence_no(), + credit :: rabbit_queue_type:credit(), + drain :: boolean(), + echo :: boolean() + }). + +%% Link flow control state for link between client (receiver) and us (sender). +-record(client_flow_ctl, { + delivery_count :: sequence_no(), + credit :: rabbit_queue_type:credit(), + echo :: boolean() + }). + +%% Link flow control state for link between us (receiver) and queue (sender). +-record(queue_flow_ctl, { + delivery_count :: sequence_no(), + %% We cap the actual credit we grant to the sending queue. + %% If client_flow_ctl.credit is larger than max_queue_credit, + %% we will top up in batches to the sending queue. + credit :: rabbit_queue_type:credit(), + drain :: boolean() + }). + +-record(outgoing_link, { + %% Although the source address of a link might be an exchange name and binding key + %% or a topic filter, an outgoing link will always consume from a queue. + queue_name :: rabbit_amqqueue:name(), + queue_type :: rabbit_queue_type:queue_type(), + send_settled :: boolean(), + max_message_size :: unlimited | pos_integer(), + + %% When feature flag rabbitmq_4.0.0 becomes required, + %% the following 2 fields should be deleted. + credit_api_version :: 1 | 2, + %% When credit API v1 is used, our session process holds the delivery-count + delivery_count :: sequence_no() | credit_api_v2, + %% We use a dual link approach for messages we send to the client. + %% We hold link flow control state for the link to the receiving + %% client and for the link to the sending queue. + client_flow_ctl :: #client_flow_ctl{} | credit_api_v1, + queue_flow_ctl :: #queue_flow_ctl{} | credit_api_v1, + %% 'true' means: + %% * we haven't processed a credit reply yet since we last sent + %% a credit request to the sending queue. + %% * a credit request is certainly in flight + %% * possibly multiple credit requests are in flight (e.g. rabbit_fifo_client + %% will re-send credit requests on our behalf on quorum queue leader changes) + %% 'false' means: + %% * we processed a credit reply since we last sent a credit request to the sending queue + %% * probably no credit request is in flight, but there might be + %% (we aren't sure since we don't use correlations for credit requests) + at_least_one_credit_req_in_flight :: boolean() | credit_api_v1, + %% While at_least_one_credit_req_in_flight is true, we stash the + %% latest credit request from the receiving client. + stashed_credit_req :: none | #credit_req{} | credit_api_v1 + }). + +-record(outgoing_unsettled, { + %% The queue sent us this consumer scoped sequence number. + msg_id :: rabbit_amqqueue:msg_id(), + consumer_tag :: rabbit_types:ctag(), + queue_name :: rabbit_amqqueue:name() + }). + +-record(pending_delivery, { + %% A large message can be split into multiple transfer frames. + frames :: [transfer_frame_body(), ...], + queue_ack_required :: boolean(), + %% Queue that sent us this message. + %% When feature flag rabbitmq_4.0.0 becomes required, this field should be deleted. + queue_pid :: pid() | credit_api_v2, + delivery_id :: delivery_number(), + outgoing_unsettled :: #outgoing_unsettled{} + }). + +-record(pending_management_delivery, { + %% A large message can be split into multiple transfer frames. + frames :: [transfer_frame_body(), ...] + }). + +-record(cfg, { + outgoing_max_frame_size :: unlimited | pos_integer(), + reader_pid :: rabbit_types:connection(), + writer_pid :: pid(), + user :: rabbit_types:user(), + vhost :: rabbit_types:vhost(), + %% We just use the incoming (AMQP 1.0) channel number. + channel_num :: non_neg_integer(), + %% We tolerate our incoming_window to be violated by up to this number of + %% excess TRANSFERs. If the client sends us even more TRANSFERs, we will + %% close the session with session error window-violation. + %% Unless we decrease our incoming_window dynamically, we are strict by + %% default and don't allow for any excess TRANSFERs. + incoming_window_margin = 0 :: non_neg_integer(), + resource_alarms :: sets:set(rabbit_alarm:resource_alarm_source()), + trace_state :: rabbit_trace:state(), + conn_name :: binary(), + max_incoming_window :: pos_integer(), + max_link_credit :: pos_integer(), + max_queue_credit :: pos_integer() + }). + +-record(state, { + cfg :: #cfg{}, + + %% The following 5 fields are state for session flow control. + %% See section 2.5.6. + %% + %% We omit outgoing-window. We keep the outgoing-window always large and don't + %% restrict ourselves delivering messages fast to AMQP clients because keeping an + %% #outgoing_unsettled{} entry in the outgoing_unsettled_map requires far less + %% memory than holding the message payload in the outgoing_pending queue. + %% + %% expected implicit transfer-id of next incoming TRANSFER + next_incoming_id :: transfer_number(), + %% Defines the maximum number of incoming transfer frames that we can currently receive. + %% This value is chosen by us. + %% Purpose: + %% 1. It protects our session process from being overloaded, and + %% 2. Since frames have a maximum size for a given connection, this provides flow control based + %% on the number of bytes transmitted, and therefore protects our platform, i.e. RabbitMQ as a + %% whole. We will set this window to 0 if a cluster wide memory or disk alarm occurs (see module + %% rabbit_alarm) to stop receiving incoming TRANSFERs. + %% (It's an optional feature: If we wanted we could always keep that window huge, i.e. not + %% shrinking the window when we receive a TRANSFER. However, we do want to use that feature + %% due to aforementioned purposes.) + %% Can become negative up to -incoming_window_margin when client overshoots our window. + incoming_window :: integer(), + %% implicit transfer-id of our next outgoing TRANSFER + next_outgoing_id :: transfer_number(), + %% Defines the maximum number of outgoing transfer frames that we are + %% currently allowed to send. This value is chosen by the AMQP client. + remote_incoming_window :: non_neg_integer(), + %% This field is informational. + %% It reflects the maximum number of incoming TRANSFERs that may arrive without exceeding + %% the AMQP client's own outgoing-window. + %% When this window shrinks, it is an indication of outstanding transfers (from AMQP client + %% to us) which we need to settle (after receiving confirmations from target queues) for + %% the window to grow again. + remote_outgoing_window :: non_neg_integer(), + + %% These messages were received from queues thanks to sufficient link credit. + %% However, they are buffered here due to session flow control + %% (when remote_incoming_window <= 0) before being sent to the AMQP client. + %% + %% FLOW frames (and credit reply actions) are stored here as well because for a specific outgoing link + %% the order in which we send TRANSFER and FLOW frames is important. An outgoing FLOW frame with link + %% flow control information must not overtake a TRANSFER frame for the same link just because + %% we are throttled by session flow control. (However, we can still send outgoing FLOW frames + %% that contain only session flow control information, i.e. where the FLOW's 'handle' field is not set.) + %% Example: + %% A receiver grants our queue 2 credits with drain=true and the queue only has 1 message available. + %% Even when we are limited by session flow control, we must make sure to first send the TRANSFER to the + %% client (once the remote_incoming_window got opened) followed by the FLOW with drain=true and credit=0 + %% and advanced delivery count. Otherwise, we would violate the AMQP protocol spec. + outgoing_pending = queue:new() :: queue:queue(#pending_delivery{} | + rabbit_queue_type:credit_reply_action() | + #pending_management_delivery{} | + #'v1_0.flow'{}), + + %% The link or session endpoint assigns each message a unique delivery-id + %% from a session scoped sequence number. + %% + %% Do not confuse this field with next_outgoing_id: + %% Both are session scoped sequence numbers, but initialised at different arbitrary values. + %% + %% next_outgoing_id is an implicit ID, i.e. not sent in the TRANSFER frame. + %% outgoing_delivery_id is an explicit ID, i.e. sent in the TRANSFER frame. + %% + %% next_outgoing_id is incremented per TRANSFER frame. + %% outgoing_delivery_id is incremented per message. + %% Remember that a large message can be split up into multiple TRANSFER frames. + outgoing_delivery_id :: delivery_number(), + + %% Links are unidirectional. + %% We receive messages from clients on incoming links. + incoming_links = #{} :: #{link_handle() => #incoming_link{}}, + %% We send messages to clients on outgoing links. + outgoing_links = #{} :: #{link_handle() => #outgoing_link{}}, + + management_link_pairs = #{} :: #{LinkName :: binary() => #management_link_pair{}}, + incoming_management_links = #{} :: #{link_handle() => #management_link{}}, + outgoing_management_links = #{} :: #{link_handle() => #management_link{}}, + + %% TRANSFER delivery IDs published to consuming clients but not yet acknowledged by clients. + outgoing_unsettled_map = #{} :: #{delivery_number() => #outgoing_unsettled{}}, + + %% Queue actions that we will process later such that we can confirm and reject + %% delivery IDs in ranges to reduce the number of DISPOSITION frames sent to the client. + stashed_rejected = [] :: [{rejected, rabbit_amqqueue:name(), [delivery_number(),...]}], + stashed_settled = [] :: [{settled, rabbit_amqqueue:name(), [delivery_number(),...]}], + %% Classic queues that are down. + stashed_down = []:: [rabbit_amqqueue:name()], + %% Queues that got deleted. + stashed_eol = [] :: [rabbit_amqqueue:name()], + + queue_states = rabbit_queue_type:init() :: rabbit_queue_type:state(), + permission_cache = [] :: permission_cache(), + topic_permission_cache = [] :: topic_permission_cache() + }). + +-type state() :: #state{}. + +start_link(ReaderPid, WriterPid, ChannelNum, FrameMax, User, Vhost, ConnName, BeginFrame) -> + Args = {ReaderPid, WriterPid, ChannelNum, FrameMax, User, Vhost, ConnName, BeginFrame}, + Opts = [{hibernate_after, ?HIBERNATE_AFTER}], + gen_server:start_link(?MODULE, Args, Opts). + +process_frame(Pid, FrameBody) -> + gen_server:cast(Pid, {frame_body, FrameBody}). + +init({ReaderPid, WriterPid, ChannelNum, MaxFrameSize, User, Vhost, ConnName, + #'v1_0.begin'{next_outgoing_id = ?UINT(RemoteNextOutgoingId), + incoming_window = ?UINT(RemoteIncomingWindow), + outgoing_window = ?UINT(RemoteOutgoingWindow), + handle_max = HandleMax0}}) -> + process_flag(trap_exit, true), + process_flag(message_queue_data, off_heap), + + ok = pg:join(pg_scope(), self(), self()), + Alarms0 = rabbit_alarm:register(self(), {?MODULE, conserve_resources, []}), + Alarms = sets:from_list(Alarms0, [{version, 2}]), + + MaxLinkCredit = application:get_env( + rabbit, max_link_credit, ?DEFAULT_MAX_LINK_CREDIT), + MaxQueueCredit = application:get_env( + rabbit, max_queue_credit, ?DEFAULT_MAX_QUEUE_CREDIT), + MaxIncomingWindow = application:get_env( + rabbit, max_incoming_window, ?DEFAULT_MAX_INCOMING_WINDOW), + true = is_valid_max(MaxLinkCredit), + true = is_valid_max(MaxQueueCredit), + true = is_valid_max(MaxIncomingWindow), + IncomingWindow = case sets:is_empty(Alarms) of + true -> MaxIncomingWindow; + false -> 0 + end, + NextOutgoingId = ?INITIAL_OUTGOING_TRANSFER_ID, + + HandleMax = case HandleMax0 of + ?UINT(Max) -> Max; + _ -> ?DEFAULT_MAX_HANDLE + end, + Reply = #'v1_0.begin'{remote_channel = {ushort, ChannelNum}, + handle_max = ?UINT(HandleMax), + next_outgoing_id = ?UINT(NextOutgoingId), + incoming_window = ?UINT(IncomingWindow), + outgoing_window = ?UINT_OUTGOING_WINDOW}, + rabbit_amqp_writer:send_command(WriterPid, ChannelNum, Reply), + + {ok, #state{next_incoming_id = RemoteNextOutgoingId, + next_outgoing_id = NextOutgoingId, + incoming_window = IncomingWindow, + remote_incoming_window = RemoteIncomingWindow, + remote_outgoing_window = RemoteOutgoingWindow, + outgoing_delivery_id = ?INITIAL_OUTGOING_DELIVERY_ID, + cfg = #cfg{reader_pid = ReaderPid, + writer_pid = WriterPid, + outgoing_max_frame_size = MaxFrameSize, + user = User, + vhost = Vhost, + channel_num = ChannelNum, + resource_alarms = Alarms, + trace_state = rabbit_trace:init(Vhost), + conn_name = ConnName, + max_incoming_window = MaxIncomingWindow, + max_link_credit = MaxLinkCredit, + max_queue_credit = MaxQueueCredit + }}}. + +terminate(_Reason, #state{incoming_links = IncomingLinks, + outgoing_links = OutgoingLinks, + queue_states = QStates}) -> + maps:foreach( + fun (_, _) -> + rabbit_global_counters:publisher_deleted(?PROTOCOL) + end, IncomingLinks), + maps:foreach( + fun (_, _) -> + rabbit_global_counters:consumer_deleted(?PROTOCOL) + end, OutgoingLinks), + ok = rabbit_queue_type:close(QStates). + +-spec list_local() -> [pid()]. +list_local() -> + pg:which_groups(pg_scope()). + +-spec conserve_resources(pid(), + rabbit_alarm:resource_alarm_source(), + rabbit_alarm:resource_alert()) -> ok. +conserve_resources(Pid, Source, {_, Conserve, _}) -> + gen_server:cast(Pid, {conserve_resources, Source, Conserve}). + +handle_call(Msg, _From, State) -> + Reply = {error, {not_understood, Msg}}, + reply(Reply, State). + +handle_info(timeout, State) -> + noreply(State); +handle_info({bump_credit, Msg}, State) -> + %% We are receiving credit from the writer proc. + credit_flow:handle_bump_msg(Msg), + noreply(State); +handle_info({{'DOWN', QName}, _MRef, process, QPid, Reason}, + #state{queue_states = QStates0, + stashed_eol = Eol} = State0) -> + case rabbit_queue_type:handle_down(QPid, QName, Reason, QStates0) of + {ok, QStates, Actions} -> + State1 = State0#state{queue_states = QStates}, + State = handle_queue_actions(Actions, State1), + noreply(State); + {eol, QStates, QRef} -> + State = State0#state{queue_states = QStates, + stashed_eol = [QRef | Eol]}, + noreply(State) + end. + +handle_cast({frame_body, FrameBody}, + #state{cfg = #cfg{writer_pid = WriterPid, + channel_num = Ch}} = State0) -> + try handle_control(FrameBody, State0) of + {reply, Replies, State} when is_list(Replies) -> + lists:foreach(fun (Reply) -> + rabbit_amqp_writer:send_command(WriterPid, Ch, Reply) + end, Replies), + noreply(State); + {reply, Reply, State} -> + rabbit_amqp_writer:send_command(WriterPid, Ch, Reply), + noreply(State); + {noreply, State} -> + noreply(State); + {stop, _, _} = Stop -> + Stop + catch exit:#'v1_0.error'{} = Error -> + log_error_and_close_session(Error, State0); + exit:normal -> + {stop, normal, State0}; + _:Reason:Stacktrace -> + {stop, {Reason, Stacktrace}, State0} + end; +handle_cast({queue_event, _, _} = QEvent, State0) -> + try handle_queue_event(QEvent, State0) of + State -> + noreply_coalesce(State) + catch exit:#'v1_0.error'{} = Error -> + log_error_and_close_session(Error, State0) + end; +handle_cast({conserve_resources, Alarm, Conserve}, + #state{incoming_window = IncomingWindow0, + cfg = #cfg{resource_alarms = Alarms0, + incoming_window_margin = Margin0, + writer_pid = WriterPid, + channel_num = Ch, + max_incoming_window = MaxIncomingWindow + } = Cfg + } = State0) -> + Alarms = case Conserve of + true -> sets:add_element(Alarm, Alarms0); + false -> sets:del_element(Alarm, Alarms0) + end, + {SendFlow, IncomingWindow, Margin} = + case {sets:is_empty(Alarms0), sets:is_empty(Alarms)} of + {true, false} -> + %% Alarm kicked in. + %% Notify the client to not send us any more TRANSFERs. Since we decrase + %% our incoming window dynamically, there might be incoming in-flight + %% TRANSFERs. So, let's be lax and allow for some excess TRANSFERs. + {true, 0, MaxIncomingWindow}; + {false, true} -> + %% All alarms cleared. + %% Notify the client that it can resume sending us TRANSFERs. + {true, MaxIncomingWindow, 0}; + _ -> + {false, IncomingWindow0, Margin0} + end, + State = State0#state{incoming_window = IncomingWindow, + cfg = Cfg#cfg{resource_alarms = Alarms, + incoming_window_margin = Margin}}, + case SendFlow of + true -> + Flow = session_flow_fields(#'v1_0.flow'{}, State), + rabbit_amqp_writer:send_command(WriterPid, Ch, Flow); + false -> + ok + end, + noreply(State); +handle_cast(refresh_config, #state{cfg = #cfg{vhost = Vhost} = Cfg} = State0) -> + State = State0#state{cfg = Cfg#cfg{trace_state = rabbit_trace:init(Vhost)}}, + noreply(State). + +log_error_and_close_session( + Error, State = #state{cfg = #cfg{reader_pid = ReaderPid, + writer_pid = WriterPid, + channel_num = Ch}}) -> + End = #'v1_0.end'{error = Error}, + rabbit_log:warning("Closing session for connection ~p: ~tp", + [ReaderPid, Error]), + ok = rabbit_amqp_writer:send_command_sync(WriterPid, Ch, End), + {stop, {shutdown, Error}, State}. + +%% Batch confirms / rejects to publishers. +noreply_coalesce(#state{stashed_rejected = [], + stashed_settled = [], + stashed_down = [], + stashed_eol = []} = State) -> + noreply(State); +noreply_coalesce(State) -> + Timeout = 0, + {noreply, State, Timeout}. + +noreply(State0) -> + State = send_buffered(State0), + {noreply, State}. + +reply(Reply, State0) -> + State = send_buffered(State0), + {reply, Reply, State}. + +send_buffered(State0) -> + State = send_delivery_state_changes(State0), + send_pending(State). + +%% Send confirms / rejects to publishers. +send_delivery_state_changes(#state{stashed_rejected = [], + stashed_settled = [], + stashed_down = [], + stashed_eol = []} = State) -> + State; +send_delivery_state_changes(State0 = #state{cfg = #cfg{writer_pid = Writer, + channel_num = ChannelNum, + max_link_credit = MaxLinkCredit}}) -> + %% Order is important: + %% 1. Process queue rejections. + {RejectedIds, GrantCredits0, State1} = handle_stashed_rejected(State0), + send_dispositions(RejectedIds, #'v1_0.rejected'{}, Writer, ChannelNum), + %% 2. Process queue confirmations. + {AcceptedIds0, GrantCredits1, State2} = handle_stashed_settled(GrantCredits0, State1), + %% 3. Process unavailable classic queues. + {DetachFrames0, State3} = handle_stashed_down(State2), + %% 4. Process queue deletions. + {ReleasedIds, AcceptedIds1, DetachFrames, GrantCredits, State} = handle_stashed_eol(DetachFrames0, GrantCredits1, State3), + send_dispositions(ReleasedIds, #'v1_0.released'{}, Writer, ChannelNum), + AcceptedIds = AcceptedIds1 ++ AcceptedIds0, + send_dispositions(AcceptedIds, #'v1_0.accepted'{}, Writer, ChannelNum), + rabbit_global_counters:messages_confirmed(?PROTOCOL, length(AcceptedIds)), + %% Send DETACH frames after DISPOSITION frames such that + %% clients can handle DISPOSITIONs before closing their links. + lists:foreach(fun(Frame) -> + rabbit_amqp_writer:send_command(Writer, ChannelNum, Frame) + end, DetachFrames), + maps:foreach(fun(HandleInt, DeliveryCount) -> + F0 = flow(?UINT(HandleInt), DeliveryCount, MaxLinkCredit), + F = session_flow_fields(F0, State), + rabbit_amqp_writer:send_command(Writer, ChannelNum, F) + end, GrantCredits), + State. + +handle_stashed_rejected(#state{stashed_rejected = []} = State) -> + {[], #{}, State}; +handle_stashed_rejected(#state{cfg = #cfg{max_link_credit = MaxLinkCredit}, + stashed_rejected = Actions, + incoming_links = Links} = State0) -> + {Ids, GrantCredits, Ls} = + lists:foldl( + fun({rejected, _QName, Correlations}, Accum) -> + lists:foldl( + fun({HandleInt, DeliveryId}, {Ids0, GrantCreds0, Links0} = Acc) -> + case Links0 of + #{HandleInt := Link0 = #incoming_link{incoming_unconfirmed_map = U0}} -> + case maps:take(DeliveryId, U0) of + {{_, Settled, _}, U} -> + Ids1 = case Settled of + true -> Ids0; + false -> [DeliveryId | Ids0] + end, + Link1 = Link0#incoming_link{incoming_unconfirmed_map = U}, + {Link, GrantCreds} = maybe_grant_link_credit( + MaxLinkCredit, HandleInt, + Link1, GrantCreds0), + {Ids1, GrantCreds, maps:update(HandleInt, Link, Links0)}; + error -> + Acc + end; + _ -> + Acc + end + end, Accum, Correlations) + end, {[], #{}, Links}, Actions), + + State = State0#state{stashed_rejected = [], + incoming_links = Ls}, + {Ids, GrantCredits, State}. + +handle_stashed_settled(GrantCredits, #state{stashed_settled = []} = State) -> + {[], GrantCredits, State}; +handle_stashed_settled(GrantCredits0, #state{cfg = #cfg{max_link_credit = MaxLinkCredit}, + stashed_settled = Actions, + incoming_links = Links} = State0) -> + {Ids, GrantCredits, Ls} = + lists:foldl( + fun({settled, QName, Correlations}, Accum) -> + lists:foldl( + fun({HandleInt, DeliveryId}, {Ids0, GrantCreds0, Links0} = Acc) -> + case Links0 of + #{HandleInt := Link0 = #incoming_link{incoming_unconfirmed_map = U0}} -> + case maps:take(DeliveryId, U0) of + {{#{QName := _} = Qs, Settled, _}, U1} -> + UnconfirmedQs = map_size(Qs), + {Ids2, U} = + if UnconfirmedQs =:= 1 -> + %% last queue confirmed + Ids1 = case Settled of + true -> Ids0; + false -> [DeliveryId | Ids0] + end, + {Ids1, U1}; + UnconfirmedQs > 1 -> + U2 = maps:update( + DeliveryId, + {maps:remove(QName, Qs), Settled, true}, + U0), + {Ids0, U2} + end, + Link1 = Link0#incoming_link{incoming_unconfirmed_map = U}, + {Link, GrantCreds} = maybe_grant_link_credit( + MaxLinkCredit, HandleInt, + Link1, GrantCreds0), + {Ids2, GrantCreds, maps:update(HandleInt, Link, Links0)}; + _ -> + Acc + end; + _ -> + Acc + end + end, Accum, Correlations) + end, {[], GrantCredits0, Links}, Actions), + + State = State0#state{stashed_settled = [], + incoming_links = Ls}, + {Ids, GrantCredits, State}. + +handle_stashed_down(#state{stashed_down = []} = State) -> + {[], State}; +handle_stashed_down(#state{stashed_down = QNames, + outgoing_links = OutgoingLinks0} = State0) -> + %% We already processed queue actions settled and rejected for classic queues that are down. + %% Here, we destroy any outgoing links that consume from unavailable classic queues. + %% (This roughly corresponds to consumer_cancel_notify sent from server to client in AMQP 0.9.1.) + {DetachFrames, OutgoingLinks} = + lists:foldl(fun(#resource{name = QNameBinDown}, Acc = {_, OutgoingLinks1}) -> + maps:fold(fun(Handle, Link = #outgoing_link{queue_name = #resource{name = QNameBin}}, {Frames0, Links0}) + when QNameBin =:= QNameBinDown -> + Detach = detach(Handle, Link, ?V_1_0_AMQP_ERROR_ILLEGAL_STATE), + Frames = [Detach | Frames0], + Links = maps:remove(Handle, Links0), + {Frames, Links}; + (_, _, Accum) -> + Accum + end, Acc, OutgoingLinks1) + end, {[], OutgoingLinks0}, QNames), + State = State0#state{stashed_down = [], + outgoing_links = OutgoingLinks}, + {DetachFrames, State}. + +handle_stashed_eol(DetachFrames, GrantCredits, #state{stashed_eol = []} = State) -> + {[], [], DetachFrames, GrantCredits, State}; +handle_stashed_eol(DetachFrames0, GrantCredits0, #state{cfg = #cfg{max_link_credit = MaxLinkCredit}, + stashed_eol = Eols} = State0) -> + {ReleasedIs, AcceptedIds, DetachFrames, GrantCredits, State1} = + lists:foldl(fun(QName, {RIds0, AIds0, DetachFrames1, GrantCreds0, S0 = #state{incoming_links = Links0, + queue_states = QStates0}}) -> + {RIds, AIds, GrantCreds1, Links} = settle_eol( + QName, MaxLinkCredit, + {RIds0, AIds0, GrantCreds0, Links0}), + QStates = rabbit_queue_type:remove(QName, QStates0), + S1 = S0#state{incoming_links = Links, + queue_states = QStates}, + {DetachFrames2, GrantCreds, S} = destroy_links(QName, DetachFrames1, GrantCreds1, S1), + {RIds, AIds, DetachFrames2, GrantCreds, S} + end, {[], [], DetachFrames0, GrantCredits0, State0}, Eols), + + State = State1#state{stashed_eol = []}, + {ReleasedIs, AcceptedIds, DetachFrames, GrantCredits, State}. + +settle_eol(QName, MaxLinkCredit, {_ReleasedIds, _AcceptedIds, _GrantCredits, Links} = Acc) -> + maps:fold(fun(HandleInt, + #incoming_link{incoming_unconfirmed_map = U0} = Link0, + {RelIds0, AcceptIds0, GrantCreds0, Links0}) -> + {RelIds, AcceptIds, U} = settle_eol0(QName, {RelIds0, AcceptIds0, U0}), + Link1 = Link0#incoming_link{incoming_unconfirmed_map = U}, + {Link, GrantCreds} = maybe_grant_link_credit( + MaxLinkCredit, HandleInt, Link1, GrantCreds0), + Links1 = maps:update(HandleInt, + Link, + Links0), + {RelIds, AcceptIds, GrantCreds, Links1} + end, Acc, Links). + +settle_eol0(QName, {_ReleasedIds, _AcceptedIds, UnconfirmedMap} = Acc) -> + maps:fold( + fun(DeliveryId, + {#{QName := _} = Qs, Settled, AtLeastOneQueueConfirmed}, + {RelIds, AcceptIds, U0}) -> + UnconfirmedQs = map_size(Qs), + if UnconfirmedQs =:= 1 -> + %% The last queue that this delivery ID was waiting a confirm for got deleted. + U = maps:remove(DeliveryId, U0), + case Settled of + true -> + {RelIds, AcceptIds, U}; + false -> + case AtLeastOneQueueConfirmed of + true -> + %% Since at least one queue confirmed this message, we reply to + %% the client with ACCEPTED. This allows e.g. for large fanout + %% scenarios where temporary target queues are deleted + %% (think about an MQTT subscriber disconnects). + {RelIds, [DeliveryId | AcceptIds], U}; + false -> + %% Since no queue confirmed this message, we reply to the client + %% with RELEASED. (The client can then re-publish this message.) + {[DeliveryId | RelIds], AcceptIds, U} + end + end; + UnconfirmedQs > 1 -> + U = maps:update(DeliveryId, + {maps:remove(QName, Qs), Settled, AtLeastOneQueueConfirmed}, + U0), + {RelIds, AcceptIds, U} + end; + (_, _, A) -> + A + end, Acc, UnconfirmedMap). + +destroy_links(#resource{kind = queue, + name = QNameBin}, + Frames0, + GrantCredits0, + #state{incoming_links = IncomingLinks0, + outgoing_links = OutgoingLinks0, + outgoing_unsettled_map = Unsettled0, + outgoing_pending = Pending0} = State0) -> + {Frames1, + GrantCredits, + IncomingLinks} = maps:fold(fun(Handle, Link, Acc) -> + destroy_incoming_link(Handle, Link, QNameBin, Acc) + end, {Frames0, GrantCredits0, IncomingLinks0}, IncomingLinks0), + {Frames, + Unsettled, + Pending, + OutgoingLinks} = maps:fold(fun(Handle, Link, Acc) -> + destroy_outgoing_link(Handle, Link, QNameBin, Acc) + end, {Frames1, Unsettled0, Pending0, OutgoingLinks0}, OutgoingLinks0), + State = State0#state{incoming_links = IncomingLinks, + outgoing_links = OutgoingLinks, + outgoing_unsettled_map = Unsettled, + outgoing_pending = Pending}, + {Frames, GrantCredits, State}. + +destroy_incoming_link(Handle, + Link = #incoming_link{queue_name_bin = QNameBin}, + QNameBin, + {Frames, GrantCreds, Links}) -> + {[detach(Handle, Link, ?V_1_0_AMQP_ERROR_RESOURCE_DELETED) | Frames], + %% Don't grant credits for a link that we destroy. + maps:remove(Handle, GrantCreds), + maps:remove(Handle, Links)}; +destroy_incoming_link(_, _, _, Acc) -> + Acc. + +destroy_outgoing_link(Handle, + Link = #outgoing_link{queue_name = #resource{name = QNameBin}}, + QNameBin, + {Frames, Unsettled0, Pending0, Links}) -> + {Unsettled, Pending} = remove_outgoing_link(Handle, Unsettled0, Pending0), + {[detach(Handle, Link, ?V_1_0_AMQP_ERROR_RESOURCE_DELETED) | Frames], + Unsettled, + Pending, + maps:remove(Handle, Links)}; +destroy_outgoing_link(_, _, _, Acc) -> + Acc. + +detach(Handle, Link, Error = #'v1_0.error'{}) -> + rabbit_log:warning("Detaching link handle ~b due to error: ~tp", + [Handle, Error]), + publisher_or_consumer_deleted(Link), + #'v1_0.detach'{handle = ?UINT(Handle), + closed = true, + error = Error}; +detach(Handle, Link, ErrorCondition) -> + detach(Handle, Link, #'v1_0.error'{condition = ErrorCondition}). + +send_dispositions(Ids, DeliveryState, Writer, ChannelNum) -> + Ranges = serial_number:ranges(Ids), + lists:foreach(fun({First, Last}) -> + Disposition = disposition(DeliveryState, First, Last), + rabbit_amqp_writer:send_command(Writer, ChannelNum, Disposition) + end, Ranges). + +disposition(DeliveryState, First, Last) -> + Last1 = case First of + Last -> + %% "If not set, this is taken to be the same as first." [2.7.6] + %% Save a few bytes. + undefined; + _ -> + ?UINT(Last) + end, + #'v1_0.disposition'{ + role = ?AMQP_ROLE_RECEIVER, + settled = true, + state = DeliveryState, + first = ?UINT(First), + last = Last1}. + +handle_control(#'v1_0.attach'{ + role = ?AMQP_ROLE_SENDER, + snd_settle_mode = ?V_1_0_SENDER_SETTLE_MODE_SETTLED, + name = Name = {utf8, LinkName}, + handle = Handle = ?UINT(HandleInt), + source = Source = #'v1_0.source'{address = ClientTerminusAddress}, + target = Target = #'v1_0.target'{address = {utf8, ?MANAGEMENT_NODE_ADDRESS}}, + initial_delivery_count = DeliveryCount = ?UINT(DeliveryCountInt), + properties = Properties + } = Attach, + #state{management_link_pairs = Pairs0, + incoming_management_links = Links + } = State0) -> + ok = validate_attach(Attach), + ok = check_paired(Properties), + Pairs = case Pairs0 of + #{LinkName := #management_link_pair{ + client_terminus_address = ClientTerminusAddress, + incoming_half = unattached, + outgoing_half = H} = Pair} + when is_integer(H) -> + maps:update(LinkName, + Pair#management_link_pair{incoming_half = HandleInt}, + Pairs0); + #{LinkName := Other} -> + protocol_error(?V_1_0_AMQP_ERROR_PRECONDITION_FAILED, + "received invalid attach ~p for management link pair ~p", + [Attach, Other]); + _ -> + maps:put(LinkName, + #management_link_pair{client_terminus_address = ClientTerminusAddress, + incoming_half = HandleInt, + outgoing_half = unattached}, + Pairs0) + end, + MaxMessageSize = persistent_term:get(max_message_size), + Link = #management_link{name = LinkName, + delivery_count = DeliveryCountInt, + credit = ?MAX_MANAGEMENT_LINK_CREDIT, + max_message_size = MaxMessageSize}, + State = State0#state{management_link_pairs = Pairs, + incoming_management_links = maps:put(HandleInt, Link, Links)}, + Reply = #'v1_0.attach'{ + name = Name, + handle = Handle, + %% We are the receiver. + role = ?AMQP_ROLE_RECEIVER, + snd_settle_mode = ?V_1_0_SENDER_SETTLE_MODE_SETTLED, + rcv_settle_mode = ?V_1_0_RECEIVER_SETTLE_MODE_FIRST, + source = Source, + target = Target, + max_message_size = {ulong, MaxMessageSize}, + properties = Properties}, + Flow = #'v1_0.flow'{handle = Handle, + delivery_count = DeliveryCount, + link_credit = ?UINT(?MAX_MANAGEMENT_LINK_CREDIT)}, + reply0([Reply, Flow], State); + +handle_control(#'v1_0.attach'{ + role = ?AMQP_ROLE_RECEIVER, + name = Name = {utf8, LinkName}, + handle = Handle = ?UINT(HandleInt), + source = Source = #'v1_0.source'{address = {utf8, ?MANAGEMENT_NODE_ADDRESS}}, + target = Target = #'v1_0.target'{address = ClientTerminusAddress}, + rcv_settle_mode = RcvSettleMode, + max_message_size = MaybeMaxMessageSize, + properties = Properties + } = Attach, + #state{management_link_pairs = Pairs0, + outgoing_management_links = Links + } = State0) -> + ok = validate_attach(Attach), + ok = check_paired(Properties), + Pairs = case Pairs0 of + #{LinkName := #management_link_pair{ + client_terminus_address = ClientTerminusAddress, + incoming_half = H, + outgoing_half = unattached} = Pair} + when is_integer(H) -> + maps:update(LinkName, + Pair#management_link_pair{outgoing_half = HandleInt}, + Pairs0); + #{LinkName := Other} -> + protocol_error(?V_1_0_AMQP_ERROR_PRECONDITION_FAILED, + "received invalid attach ~p for management link pair ~p", + [Attach, Other]); + _ -> + maps:put(LinkName, + #management_link_pair{client_terminus_address = ClientTerminusAddress, + incoming_half = unattached, + outgoing_half = HandleInt}, + Pairs0) + end, + MaxMessageSize = max_message_size(MaybeMaxMessageSize), + Link = #management_link{name = LinkName, + delivery_count = ?INITIAL_DELIVERY_COUNT, + credit = 0, + max_message_size = MaxMessageSize}, + State = State0#state{management_link_pairs = Pairs, + outgoing_management_links = maps:put(HandleInt, Link, Links)}, + Reply = #'v1_0.attach'{ + name = Name, + handle = Handle, + role = ?AMQP_ROLE_SENDER, + snd_settle_mode = ?V_1_0_SENDER_SETTLE_MODE_SETTLED, + rcv_settle_mode = RcvSettleMode, + source = Source, + target = Target, + initial_delivery_count = ?UINT(?INITIAL_DELIVERY_COUNT), + %% Echo back that we will respect the client's requested max-message-size. + max_message_size = MaybeMaxMessageSize, + properties = Properties}, + reply0(Reply, State); + +handle_control(#'v1_0.attach'{role = ?AMQP_ROLE_SENDER, + name = LinkName, + handle = Handle = ?UINT(HandleInt), + source = Source, + snd_settle_mode = SndSettleMode, + target = Target, + initial_delivery_count = DeliveryCount = ?UINT(DeliveryCountInt) + } = Attach, + State0 = #state{incoming_links = IncomingLinks0, + permission_cache = PermCache0, + cfg = #cfg{max_link_credit = MaxLinkCredit, + vhost = Vhost, + user = User}}) -> + ok = validate_attach(Attach), + case ensure_target(Target, Vhost, User, PermCache0) of + {ok, Exchange, RoutingKey, QNameBin, PermCache} -> + MaxMessageSize = persistent_term:get(max_message_size), + IncomingLink = #incoming_link{ + exchange = Exchange, + routing_key = RoutingKey, + queue_name_bin = QNameBin, + max_message_size = MaxMessageSize, + delivery_count = DeliveryCountInt, + credit = MaxLinkCredit}, + _Outcomes = outcomes(Source), + Reply = #'v1_0.attach'{ + name = LinkName, + handle = Handle, + source = Source, + snd_settle_mode = SndSettleMode, + rcv_settle_mode = ?V_1_0_RECEIVER_SETTLE_MODE_FIRST, + target = Target, + %% We are the receiver. + role = ?AMQP_ROLE_RECEIVER, + max_message_size = {ulong, MaxMessageSize}}, + Flow = #'v1_0.flow'{handle = Handle, + delivery_count = DeliveryCount, + link_credit = ?UINT(MaxLinkCredit)}, + %%TODO check that handle is not in use for any other open links. + %%"The handle MUST NOT be used for other open links. An attempt to attach + %% using a handle which is already associated with a link MUST be responded to + %% with an immediate close carrying a handle-in-use session-error." + IncomingLinks = IncomingLinks0#{HandleInt => IncomingLink}, + State = State0#state{incoming_links = IncomingLinks, + permission_cache = PermCache}, + rabbit_global_counters:publisher_created(?PROTOCOL), + reply0([Reply, Flow], State); + {error, Reason} -> + protocol_error(?V_1_0_AMQP_ERROR_INVALID_FIELD, + "Attach rejected: ~tp", + [Reason]) + end; + +handle_control(#'v1_0.attach'{role = ?AMQP_ROLE_RECEIVER, + name = LinkName, + handle = Handle = ?UINT(HandleInt), + source = Source, + snd_settle_mode = SndSettleMode, + rcv_settle_mode = RcvSettleMode, + max_message_size = MaybeMaxMessageSize} = Attach, + State0 = #state{queue_states = QStates0, + outgoing_links = OutgoingLinks0, + permission_cache = PermCache0, + topic_permission_cache = TopicPermCache0, + cfg = #cfg{vhost = Vhost, + user = User = #user{username = Username}, + reader_pid = ReaderPid}}) -> + ok = validate_attach(Attach), + {SndSettled, EffectiveSndSettleMode} = + case SndSettleMode of + ?V_1_0_SENDER_SETTLE_MODE_SETTLED -> + {true, SndSettleMode}; + _ -> + %% In the future, we might want to support sender settle + %% mode mixed where we would expect a settlement from the + %% client only for durable messages. + {false, ?V_1_0_SENDER_SETTLE_MODE_UNSETTLED} + end, + case ensure_source(Source, Vhost, User, PermCache0, TopicPermCache0) of + {error, Reason} -> + protocol_error(?V_1_0_AMQP_ERROR_INVALID_FIELD, "Attach rejected: ~tp", [Reason]); + {ok, QName = #resource{name = QNameBin}, PermCache1, TopicPermCache} -> + PermCache = check_resource_access(QName, read, User, PermCache1), + case rabbit_amqqueue:with( + QName, + fun(Q) -> + try rabbit_amqqueue:check_exclusive_access(Q, ReaderPid) + catch exit:#amqp_error{name = resource_locked} -> + %% An exclusive queue can only be consumed from by its declaring connection. + protocol_error( + ?V_1_0_AMQP_ERROR_RESOURCE_LOCKED, + "cannot obtain exclusive access to locked ~s", + [rabbit_misc:rs(QName)]) + end, + QType = amqqueue:get_type(Q), + %% Whether credit API v1 or v2 is used is decided only here at link attachment time. + %% This decision applies to the whole life time of the link. + %% This means even when feature flag rabbitmq_4.0.0 will be enabled later, this consumer will + %% continue to use credit API v1. This is the safest and easiest solution avoiding + %% transferring link flow control state (the delivery-count) at runtime from this session + %% process to the queue process. + %% Eventually, after feature flag rabbitmq_4.0.0 gets enabled and a subsequent rolling upgrade, + %% all consumers will use credit API v2. + %% Streams always use credit API v2 since the stream client (rabbit_stream_queue) holds the link + %% flow control state. Hence, credit API mixed version isn't an issue for streams. + {CreditApiVsn, Mode, DeliveryCount, ClientFlowCtl, + QueueFlowCtl, CreditReqInFlight, StashedCreditReq} = + case rabbit_feature_flags:is_enabled('rabbitmq_4.0.0') orelse + QType =:= rabbit_stream_queue of + true -> + {2, + {credited, ?INITIAL_DELIVERY_COUNT}, + credit_api_v2, + #client_flow_ctl{delivery_count = ?INITIAL_DELIVERY_COUNT, + credit = 0, + echo = false}, + #queue_flow_ctl{delivery_count = ?INITIAL_DELIVERY_COUNT, + credit = 0, + drain = false}, + false, + none}; + false -> + {1, + {credited, credit_api_v1}, + ?INITIAL_DELIVERY_COUNT, + credit_api_v1, + credit_api_v1, + credit_api_v1, + credit_api_v1} + end, + Spec = #{no_ack => SndSettled, + channel_pid => self(), + limiter_pid => none, + limiter_active => false, + mode => Mode, + consumer_tag => handle_to_ctag(HandleInt), + exclusive_consume => false, + args => consumer_arguments(Attach), + ok_msg => undefined, + acting_user => Username}, + case rabbit_queue_type:consume(Q, Spec, QStates0) of + {ok, QStates} -> + A = #'v1_0.attach'{ + name = LinkName, + handle = Handle, + initial_delivery_count = ?UINT(?INITIAL_DELIVERY_COUNT), + snd_settle_mode = EffectiveSndSettleMode, + rcv_settle_mode = RcvSettleMode, + %% The queue process monitors our session process. When our session process + %% terminates (abnormally) any messages checked out to our session process + %% will be requeued. That's why the we only support RELEASED as the default outcome. + source = Source#'v1_0.source'{ + default_outcome = #'v1_0.released'{}, + outcomes = outcomes(Source)}, + role = ?AMQP_ROLE_SENDER, + %% Echo back that we will respect the client's requested max-message-size. + max_message_size = MaybeMaxMessageSize}, + MaxMessageSize = max_message_size(MaybeMaxMessageSize), + Link = #outgoing_link{ + queue_name = queue_resource(Vhost, QNameBin), + queue_type = QType, + send_settled = SndSettled, + max_message_size = MaxMessageSize, + credit_api_version = CreditApiVsn, + delivery_count = DeliveryCount, + client_flow_ctl = ClientFlowCtl, + queue_flow_ctl = QueueFlowCtl, + at_least_one_credit_req_in_flight = CreditReqInFlight, + stashed_credit_req = StashedCreditReq}, + OutgoingLinks = OutgoingLinks0#{HandleInt => Link}, + State1 = State0#state{queue_states = QStates, + outgoing_links = OutgoingLinks, + permission_cache = PermCache, + topic_permission_cache = TopicPermCache}, + rabbit_global_counters:consumer_created(?PROTOCOL), + {ok, [A], State1}; + {error, Reason} -> + protocol_error( + ?V_1_0_AMQP_ERROR_INTERNAL_ERROR, + "Consuming from ~s failed: ~tp", + [rabbit_misc:rs(QName), Reason]); + {protocol_error, _Type, Reason, Args} -> + protocol_error( + ?V_1_0_AMQP_ERROR_INTERNAL_ERROR, + Reason, Args) + end + end) of + {ok, Reply, State} -> + reply0(Reply, State); + {error, Reason} -> + protocol_error( + ?V_1_0_AMQP_ERROR_INTERNAL_ERROR, + "Could not operate on ~s: ~tp", + [rabbit_misc:rs(QName), Reason]) + end + end; + +handle_control({Performative = #'v1_0.transfer'{handle = ?UINT(Handle)}, Paylaod}, + State0 = #state{incoming_links = IncomingLinks}) -> + {Flows, State1} = session_flow_control_received_transfer(State0), + + {Reply, State} = + case IncomingLinks of + #{Handle := Link0} -> + case incoming_link_transfer(Performative, Paylaod, Link0, State1) of + {ok, Reply0, Link, State2} -> + {Reply0, State2#state{incoming_links = IncomingLinks#{Handle := Link}}}; + {error, Reply0} -> + %% "When an error occurs at a link endpoint, the endpoint MUST be detached + %% with appropriate error information supplied in the error field of the + %% detach frame. The link endpoint MUST then be destroyed." [2.6.5] + {Reply0, State1#state{incoming_links = maps:remove(Handle, IncomingLinks)}} + end; + _ -> + incoming_mgmt_link_transfer(Performative, Paylaod, State1) + end, + reply0(Reply ++ Flows, State); + + +%% Although the AMQP message format [3.2] requires a body, it is valid to send a transfer frame without payload. +%% For example, when a large multi transfer message is streamed using the ProtonJ2 client, the client could send +%% a final #'v1_0.transfer'{more=false} frame without a payload. +handle_control(Performative = #'v1_0.transfer'{}, State) -> + handle_control({Performative, <<>>}, State); + +%% Flow control. These frames come with two pieces of information: +%% the session window, and optionally, credit for a particular link. +%% We'll deal with each of them separately. +handle_control(#'v1_0.flow'{handle = Handle} = Flow, + #state{incoming_links = IncomingLinks, + outgoing_links = OutgoingLinks, + incoming_management_links = IncomingMgmtLinks, + outgoing_management_links = OutgoingMgmtLinks + } = State0) -> + State = session_flow_control_received_flow(Flow, State0), + S = case Handle of + undefined -> + %% "If not set, the flow frame is carrying only information + %% pertaining to the session endpoint." [2.7.4] + State; + ?UINT(HandleInt) -> + %% "If set, indicates that the flow frame carries flow state information + %% for the local link endpoint associated with the given handle." [2.7.4] + case OutgoingLinks of + #{HandleInt := OutgoingLink} -> + handle_outgoing_link_flow_control(OutgoingLink, Flow, State); + _ -> + case OutgoingMgmtLinks of + #{HandleInt := OutgoingMgmtLink} -> + handle_outgoing_mgmt_link_flow_control(OutgoingMgmtLink, Flow, State); + _ when is_map_key(HandleInt, IncomingLinks) orelse + is_map_key(HandleInt, IncomingMgmtLinks) -> + %% We're being told about available messages at the sender. + State; + _ -> + %% "If set to a handle that is not currently associated with + %% an attached link, the recipient MUST respond by ending the + %% session with an unattached-handle session error." [2.7.4] + rabbit_log:warning( + "Received Flow frame for unknown link handle: ~tp", [Flow]), + protocol_error( + ?V_1_0_SESSION_ERROR_UNATTACHED_HANDLE, + "Unattached link handle: ~b", [HandleInt]) + end + end + end, + {noreply, S}; + +handle_control(Detach = #'v1_0.detach'{handle = ?UINT(HandleInt)}, + State0 = #state{incoming_links = IncomingLinks, + outgoing_links = OutgoingLinks0, + outgoing_unsettled_map = Unsettled0, + outgoing_pending = Pending0, + queue_states = QStates0, + cfg = #cfg{user = #user{username = Username}}}) -> + {OutgoingLinks, Unsettled, Pending, QStates} = + case maps:take(HandleInt, OutgoingLinks0) of + {#outgoing_link{queue_name = QName}, OutgoingLinks1} -> + Ctag = handle_to_ctag(HandleInt), + {Unsettled1, Pending1} = remove_outgoing_link(Ctag, Unsettled0, Pending0), + case rabbit_amqqueue:lookup(QName) of + {ok, Q} -> + Spec = #{consumer_tag => Ctag, + reason => remove, + user => Username}, + case rabbit_queue_type:cancel(Q, Spec, QStates0) of + {ok, QStates1} -> + {OutgoingLinks1, Unsettled1, Pending1, QStates1}; + {error, Reason} -> + protocol_error( + ?V_1_0_AMQP_ERROR_INTERNAL_ERROR, + "Failed to remove consumer from ~s: ~tp", + [rabbit_misc:rs(amqqueue:get_name(Q)), Reason]) + end; + {error, not_found} -> + {OutgoingLinks1, Unsettled1, Pending1, QStates0} + end; + error -> + {OutgoingLinks0, Unsettled0, Pending0, QStates0} + end, + + State1 = State0#state{incoming_links = maps:remove(HandleInt, IncomingLinks), + outgoing_links = OutgoingLinks, + outgoing_unsettled_map = Unsettled, + outgoing_pending = Pending, + queue_states = QStates}, + State = maybe_detach_mgmt_link(HandleInt, State1), + maybe_detach_reply(Detach, State, State0), + publisher_or_consumer_deleted(State, State0), + {noreply, State}; + +handle_control(#'v1_0.end'{}, + State0 = #state{cfg = #cfg{writer_pid = WriterPid, + channel_num = Ch}}) -> + State = send_delivery_state_changes(State0), + ok = try rabbit_amqp_writer:send_command_sync(WriterPid, Ch, #'v1_0.end'{}) + catch exit:{Reason, {gen_server, call, _ArgList}} + when Reason =:= shutdown orelse + Reason =:= noproc -> + %% AMQP connection and therefore the writer process got already terminated + %% before we had the chance to synchronously end the session. + ok + end, + {stop, normal, State}; + +handle_control(#'v1_0.disposition'{role = ?AMQP_ROLE_RECEIVER, + first = ?UINT(First), + last = Last0, + state = Outcome, + settled = DispositionSettled} = Disposition, + #state{outgoing_unsettled_map = UnsettledMap0, + queue_states = QStates0} = State0) -> + Last = case Last0 of + ?UINT(L) -> + L; + undefined -> + %% "If not set, this is taken to be the same as first." [2.7.6] + First + end, + UnsettledMapSize = map_size(UnsettledMap0), + case UnsettledMapSize of + 0 -> + {noreply, State0}; + _ -> + DispositionRangeSize = diff(Last, First) + 1, + {Settled, UnsettledMap} = + case DispositionRangeSize =< UnsettledMapSize of + true -> + %% It is cheaper to iterate over the range of settled delivery IDs. + serial_number:foldl(fun settle_delivery_id/2, + {#{}, UnsettledMap0}, + First, Last); + false -> + %% It is cheaper to iterate over the outgoing unsettled map. + Iter = maps:iterator(UnsettledMap0, + fun(D1, D2) -> compare(D1, D2) =/= greater end), + {Settled0, UnsettledList} = + maps:fold( + fun (DeliveryId, + #outgoing_unsettled{queue_name = QName, + consumer_tag = Ctag, + msg_id = MsgId} = Unsettled, + {SettledAcc, UnsettledAcc}) -> + case serial_number:in_range(DeliveryId, First, Last) of + true -> + SettledAcc1 = maps_update_with( + {QName, Ctag}, + fun(MsgIds) -> [MsgId | MsgIds] end, + [MsgId], + SettledAcc), + {SettledAcc1, UnsettledAcc}; + false -> + {SettledAcc, [{DeliveryId, Unsettled} | UnsettledAcc]} + end + end, + {#{}, []}, Iter), + {Settled0, maps:from_list(UnsettledList)} + end, + + SettleOp = settle_op_from_outcome(Outcome), + {QStates, Actions} = + maps:fold( + fun({QName, Ctag}, MsgIdsRev, {QS0, ActionsAcc}) -> + MsgIds = lists:reverse(MsgIdsRev), + case rabbit_queue_type:settle(QName, SettleOp, Ctag, MsgIds, QS0) of + {ok, QS, Actions0} -> + messages_acknowledged(SettleOp, QName, QS, MsgIds), + {QS, ActionsAcc ++ Actions0}; + {protocol_error, _ErrorType, Reason, ReasonArgs} -> + protocol_error(?V_1_0_AMQP_ERROR_INTERNAL_ERROR, + Reason, ReasonArgs) + end + end, {QStates0, []}, Settled), + + State1 = State0#state{outgoing_unsettled_map = UnsettledMap, + queue_states = QStates}, + Reply = case DispositionSettled of + true -> []; + false -> [Disposition#'v1_0.disposition'{settled = true, + role = ?AMQP_ROLE_SENDER}] + end, + State = handle_queue_actions(Actions, State1), + reply0(Reply, State) + end; + +handle_control(Frame, _State) -> + protocol_error(?V_1_0_AMQP_ERROR_INTERNAL_ERROR, + "Unexpected frame ~tp", + [amqp10_framing:pprint(Frame)]). + +send_pending(#state{remote_incoming_window = RemoteIncomingWindow, + outgoing_pending = Buf0 + } = State) -> + case queue:out(Buf0) of + {empty, _} -> + State; + {{value, CreditReply}, Buf} + when element(1, CreditReply) =:= credit_reply -> + State1 = State#state{outgoing_pending = Buf}, + State2 = handle_credit_reply(CreditReply, State1), + send_pending(State2); + {{value, #'v1_0.flow'{} = Flow0}, Buf} -> + #cfg{writer_pid = WriterPid, + channel_num = Ch} = State#state.cfg, + State1 = State#state{outgoing_pending = Buf}, + Flow = session_flow_fields(Flow0, State1), + rabbit_amqp_writer:send_command(WriterPid, Ch, Flow), + send_pending(State1); + {{value, Delivery}, Buf} -> + case RemoteIncomingWindow =:= 0 orelse + credit_flow:blocked() of + true -> + State; + false -> + {NewRemoteIncomingWindow, State1} = + send_pending_delivery(Delivery, Buf, State), + NumTransfersSent = RemoteIncomingWindow - NewRemoteIncomingWindow, + State2 = session_flow_control_sent_transfers(NumTransfersSent, State1), + %% Recurse to possibly send FLOW frames. + send_pending(State2) + end + end. + +handle_credit_reply(Action = {credit_reply, Ctag, _DeliveryCount, _Credit, _Available, _Drain}, + State = #state{outgoing_links = OutgoingLinks}) -> + Handle = ctag_to_handle(Ctag), + case OutgoingLinks of + #{Handle := Link} -> + handle_credit_reply0(Action, Handle, Link, State); + _ -> + %% Ignore credit reply for a detached link. + State + end. + +handle_credit_reply0( + {credit_reply, Ctag, DeliveryCount, Credit, Available, _Drain = false}, + Handle, + #outgoing_link{ + client_flow_ctl = #client_flow_ctl{ + delivery_count = CDeliveryCount, + credit = CCredit, + echo = CEcho + }, + queue_flow_ctl = #queue_flow_ctl{ + delivery_count = QDeliveryCount + } = QFC0, + stashed_credit_req = StashedCreditReq + } = Link0, + #state{outgoing_links = OutgoingLinks, + queue_states = QStates0 + } = S0) -> + + %% Assertion: Our (receiver) delivery-count should be always + %% in sync with the delivery-count of the sending queue. + QDeliveryCount = DeliveryCount, + + case StashedCreditReq of + #credit_req{} -> + %% We prioritise the stashed client request over finishing the current + %% top-up rounds because the latest link state from the client applies. + S = pop_credit_req(Handle, Ctag, Link0, S0), + echo(CEcho, Handle, CDeliveryCount, CCredit, Available, S), + S; + none when Credit =:= 0 andalso + CCredit > 0 -> + QName = Link0#outgoing_link.queue_name, + %% Provide queue next batch of credits. + CappedCredit = cap_credit(CCredit, S0#state.cfg#cfg.max_queue_credit), + {ok, QStates, Actions} = + rabbit_queue_type:credit( + QName, Ctag, DeliveryCount, CappedCredit, false, QStates0), + Link = Link0#outgoing_link{ + queue_flow_ctl = QFC0#queue_flow_ctl{credit = CappedCredit}, + at_least_one_credit_req_in_flight = true}, + S = S0#state{queue_states = QStates, + outgoing_links = OutgoingLinks#{Handle := Link}}, + handle_queue_actions(Actions, S); + none -> + %% Although we (the receiver) usually determine link credit, we set here + %% our link credit to what the queue says our link credit is (which is safer + %% in case credit requests got applied out of order in quorum queues). + %% This should be fine given that we asserted earlier that our delivery-count is + %% in sync with the delivery-count of the sending queue. + QFC = QFC0#queue_flow_ctl{credit = Credit}, + Link = Link0#outgoing_link{ + queue_flow_ctl = QFC, + at_least_one_credit_req_in_flight = false}, + S = S0#state{outgoing_links = OutgoingLinks#{Handle := Link}}, + echo(CEcho, Handle, CDeliveryCount, CCredit, Available, S), + S + end; +handle_credit_reply0( + {credit_reply, Ctag, DeliveryCount, Credit, Available, _Drain = true}, + Handle, + Link0 = #outgoing_link{ + queue_name = QName, + client_flow_ctl = #client_flow_ctl{ + delivery_count = CDeliveryCount0, + credit = CCredit + } = CFC, + queue_flow_ctl = #queue_flow_ctl{ + delivery_count = QDeliveryCount0 + } = QFC, + stashed_credit_req = StashedCreditReq}, + S0 = #state{cfg = #cfg{writer_pid = Writer, + channel_num = ChanNum, + max_queue_credit = MaxQueueCredit}, + outgoing_links = OutgoingLinks, + queue_states = QStates0}) -> + %% If the queue sent us a drain credit_reply, + %% the queue must have consumed all our granted credit. + 0 = Credit, + + case DeliveryCount =:= QDeliveryCount0 andalso + CCredit > 0 of + true -> + %% We're in drain mode. The queue did not advance its delivery-count which means + %% it might still have messages available for us. The client also desires more messages. + %% Therefore, we do the next round of credit top-up. We prioritise finishing + %% the current drain credit top-up rounds over a stashed credit request because + %% this is easier to reason about and the queue will reply promptly meaning + %% the stashed request will be processed soon enough. + CappedCredit = cap_credit(CCredit, MaxQueueCredit), + {ok, QStates, Actions} = rabbit_queue_type:credit( + QName, Ctag, DeliveryCount, + CappedCredit, true, QStates0), + Link = Link0#outgoing_link{ + queue_flow_ctl = QFC#queue_flow_ctl{credit = CappedCredit}, + at_least_one_credit_req_in_flight = true}, + S = S0#state{queue_states = QStates, + outgoing_links = OutgoingLinks#{Handle := Link}}, + handle_queue_actions(Actions, S); + false -> + case compare(DeliveryCount, QDeliveryCount0) of + equal -> ok; + greater -> ok; %% the sending queue advanced its delivery-count + less -> error({unexpected_delivery_count, DeliveryCount, QDeliveryCount0}) + end, + + %% We're in drain mode. + %% The queue either advanced its delivery-count which means it has + %% no more messages available for us, or the client does not desire more messages. + %% Therefore, we're done with draining and we "the sender will (after sending + %% all available messages) advance the delivery-count as much as possible, + %% consuming all link-credit, and send the flow state to the receiver." + CDeliveryCount = add(CDeliveryCount0, CCredit), + Flow0 = #'v1_0.flow'{handle = ?UINT(Handle), + delivery_count = ?UINT(CDeliveryCount), + link_credit = ?UINT(0), + drain = true, + available = ?UINT(Available)}, + Flow = session_flow_fields(Flow0, S0), + rabbit_amqp_writer:send_command(Writer, ChanNum, Flow), + Link = Link0#outgoing_link{ + client_flow_ctl = CFC#client_flow_ctl{ + delivery_count = CDeliveryCount, + credit = 0}, + queue_flow_ctl = QFC#queue_flow_ctl{ + delivery_count = DeliveryCount, + credit = 0, + drain = false}, + at_least_one_credit_req_in_flight = false + }, + S = S0#state{outgoing_links = OutgoingLinks#{Handle := Link}}, + case StashedCreditReq of + none -> + S; + #credit_req{} -> + pop_credit_req(Handle, Ctag, Link, S) + end + end. + +pop_credit_req( + Handle, Ctag, + Link0 = #outgoing_link{ + queue_name = QName, + client_flow_ctl = #client_flow_ctl{ + delivery_count = CDeliveryCount + } = CFC, + queue_flow_ctl = #queue_flow_ctl{ + delivery_count = QDeliveryCount + } = QFC, + stashed_credit_req = #credit_req{ + delivery_count = DeliveryCountRcv, + credit = LinkCreditRcv, + drain = Drain, + echo = Echo + }}, + S0 = #state{cfg = #cfg{max_queue_credit = MaxQueueCredit}, + outgoing_links = OutgoingLinks, + queue_states = QStates0}) -> + LinkCreditSnd = amqp10_util:link_credit_snd( + DeliveryCountRcv, LinkCreditRcv, CDeliveryCount), + CappedCredit = cap_credit(LinkCreditSnd, MaxQueueCredit), + {ok, QStates, Actions} = rabbit_queue_type:credit( + QName, Ctag, QDeliveryCount, + CappedCredit, Drain, QStates0), + Link = Link0#outgoing_link{ + client_flow_ctl = CFC#client_flow_ctl{ + credit = LinkCreditSnd, + echo = Echo}, + queue_flow_ctl = QFC#queue_flow_ctl{ + credit = CappedCredit, + drain = Drain}, + at_least_one_credit_req_in_flight = true, + stashed_credit_req = none + }, + S = S0#state{queue_states = QStates, + outgoing_links = OutgoingLinks#{Handle := Link}}, + handle_queue_actions(Actions, S). + +echo(Echo, HandleInt, DeliveryCount, LinkCredit, Available, State) -> + case Echo of + true -> + Flow0 = #'v1_0.flow'{handle = ?UINT(HandleInt), + delivery_count = ?UINT(DeliveryCount), + link_credit = ?UINT(LinkCredit), + available = ?UINT(Available)}, + Flow = session_flow_fields(Flow0, State), + #cfg{writer_pid = Writer, + channel_num = Channel} = State#state.cfg, + rabbit_amqp_writer:send_command(Writer, Channel, Flow); + false -> + ok + end. + +send_pending_delivery(#pending_delivery{ + frames = Frames, + queue_pid = QPid, + outgoing_unsettled = #outgoing_unsettled{consumer_tag = Ctag, + queue_name = QName} + } = Pending, + Buf0, + #state{remote_incoming_window = Space, + outgoing_links = OutgoingLinks, + queue_states = QStates, + cfg = #cfg{writer_pid = WriterPid, + channel_num = Ch} + } = State0) -> + Handle = ctag_to_handle(Ctag), + case is_map_key(Handle, OutgoingLinks) of + true -> + SendFun = case QPid of + credit_api_v2 -> + send_fun(WriterPid, Ch); + _ -> + case rabbit_queue_type:module(QName, QStates) of + {ok, rabbit_classic_queue} -> + %% Classic queue client and classic queue process that + %% communicate via credit API v1 use RabbitMQ internal + %% credit flow control. + fun(Transfer, Sections) -> + rabbit_amqp_writer:send_command_and_notify( + WriterPid, QPid, Ch, Transfer, Sections) + end; + {ok, _QType} -> + send_fun(WriterPid, Ch) + end + end, + case send_frames(SendFun, Frames, Space) of + {sent_all, SpaceLeft} -> + State1 = State0#state{outgoing_pending = Buf0}, + State = sent_pending_delivery(Pending, Handle, State1), + {SpaceLeft, State}; + {sent_some, SpaceLeft, Rest} -> + Buf = queue:in_r(Pending#pending_delivery{frames = Rest}, Buf0), + State = State0#state{outgoing_pending = Buf}, + {SpaceLeft, State} + end; + false -> + %% Link got detached. Either the client closed the link in which case the queue + %% already requeued all checked out messages or the queue doesn't exist anymore + %% in which case there is no point in requeuing this message. + %% Therefore, ignore (drop) this delivery. + State = State0#state{outgoing_pending = Buf0}, + {Space, State} + end; +send_pending_delivery(#pending_management_delivery{frames = Frames} = Pending, + Buf0, + #state{remote_incoming_window = Space, + cfg = #cfg{writer_pid = WriterPid, + channel_num = Ch} + } = State0) -> + SendFun = send_fun(WriterPid, Ch), + case send_frames(SendFun, Frames, Space) of + {sent_all, SpaceLeft} -> + State = State0#state{outgoing_pending = Buf0}, + {SpaceLeft, State}; + {sent_some, SpaceLeft, Rest} -> + Buf = queue:in_r(Pending#pending_management_delivery{frames = Rest}, Buf0), + State = State0#state{outgoing_pending = Buf}, + {SpaceLeft, State} + end. + +send_frames(_, [], SpaceLeft) -> + {sent_all, SpaceLeft}; +send_frames(_, Rest, SpaceLeft = 0) -> + {sent_some, SpaceLeft, Rest}; +send_frames(SendFun, [[Transfer, Sections] | Rest] = Frames, SpaceLeft) -> + case SendFun(Transfer, Sections) of + ok -> + send_frames(SendFun, Rest, SpaceLeft - 1); + {error, blocked} -> + {sent_some, SpaceLeft, Frames} + end. + +send_fun(WriterPid, Ch) -> + fun(Transfer, Sections) -> + rabbit_amqp_writer:send_command(WriterPid, Ch, Transfer, Sections) + end. + +sent_pending_delivery( + Pending = #pending_delivery{ + outgoing_unsettled = #outgoing_unsettled{ + consumer_tag = Ctag, + queue_name = QName}}, + Handle, + S0 = #state{outgoing_links = OutgoingLinks0, + queue_states = QStates0}) -> + + #outgoing_link{ + credit_api_version = CreditApiVsn, + client_flow_ctl = CFC0, + queue_flow_ctl = QFC0, + at_least_one_credit_req_in_flight = CreditReqInFlight0 + } = Link0 = maps:get(Handle, OutgoingLinks0), + + S = case CreditApiVsn of + 1 -> + S0; + 2 -> + #client_flow_ctl{ + delivery_count = CDeliveryCount0, + credit = CCredit0 + } = CFC0, + #queue_flow_ctl{ + delivery_count = QDeliveryCount0, + credit = QCredit0 + } = QFC0, + + CDeliveryCount = add(CDeliveryCount0, 1), + %% Even though the spec mandates + %% "If the link-credit is less than or equal to zero, i.e., + %% the delivery-count is the same as or greater than the + %% delivery-limit, a sender MUST NOT send more messages." + %% we forced the message through to be sent to the client. + %% Due to our dual link approach, we don't want to buffer any + %% messages in the session if the receiving client dynamically + %% decreased link credit. The alternative is to requeue messages. + %% "the receiver MAY either handle the excess messages normally + %% or detach the link with a transfer-limit-exceeded error code." + CCredit = max(0, CCredit0 - 1), + + QDeliveryCount = add(QDeliveryCount0, 1), + QCredit1 = max(0, QCredit0 - 1), + + {QCredit, CreditReqInFlight, QStates, Actions} = + case QCredit1 =:= 0 andalso + CCredit > 0 andalso + not CreditReqInFlight0 of + true -> + %% assertion + none = Link0#outgoing_link.stashed_credit_req, + %% Provide queue next batch of credits. + CappedCredit = cap_credit(CCredit, + S0#state.cfg#cfg.max_queue_credit), + {ok, QStates1, Actions0} = + rabbit_queue_type:credit( + QName, Ctag, QDeliveryCount, CappedCredit, + QFC0#queue_flow_ctl.drain, QStates0), + {CappedCredit, true, QStates1, Actions0}; + false -> + {QCredit1, CreditReqInFlight0, QStates0, []} + end, + + CFC = CFC0#client_flow_ctl{ + delivery_count = CDeliveryCount, + credit = CCredit}, + QFC = QFC0#queue_flow_ctl{ + delivery_count = QDeliveryCount, + credit = QCredit}, + Link = Link0#outgoing_link{ + client_flow_ctl = CFC, + queue_flow_ctl = QFC, + at_least_one_credit_req_in_flight = CreditReqInFlight}, + OutgoingLinks = OutgoingLinks0#{Handle := Link}, + S1 = S0#state{outgoing_links = OutgoingLinks, + queue_states = QStates}, + handle_queue_actions(Actions, S1) + end, + record_outgoing_unsettled(Pending, S). + +record_outgoing_unsettled(#pending_delivery{queue_ack_required = true, + delivery_id = DeliveryId, + outgoing_unsettled = Unsettled}, + #state{outgoing_unsettled_map = Map0} = State) -> + %% Record by DeliveryId such that we will ack this message to the queue + %% once we receive the DISPOSITION from the AMQP client. + Map = Map0#{DeliveryId => Unsettled}, + State#state{outgoing_unsettled_map = Map}; +record_outgoing_unsettled(#pending_delivery{queue_ack_required = false}, State) -> + %% => 'snd-settle-mode' at attachment must have been 'settled'. + %% => 'settled' field in TRANSFER must have been 'true'. + %% => AMQP client won't ack this message. + %% Also, queue client already acked to queue on behalf of us. + State. + +reply0([], State) -> + {noreply, State}; +reply0(Reply, State) -> + {reply, session_flow_fields(Reply, State), State}. + +%% Implements section "receiving a transfer" in 2.5.6 +session_flow_control_received_transfer( + #state{next_incoming_id = NextIncomingId, + incoming_window = InWindow0, + remote_outgoing_window = RemoteOutgoingWindow, + cfg = #cfg{incoming_window_margin = Margin, + resource_alarms = Alarms, + max_incoming_window = MaxIncomingWindow} + } = State) -> + InWindow1 = InWindow0 - 1, + case InWindow1 < -Margin of + true -> + protocol_error( + ?V_1_0_SESSION_ERROR_WINDOW_VIOLATION, + "incoming window violation (tolerated excess tranfers: ~b)", + [Margin]); + false -> + ok + end, + {Flows, InWindow} = case InWindow1 =< (MaxIncomingWindow div 2) andalso + sets:is_empty(Alarms) of + true -> + %% We've reached halfway and there are no + %% disk or memory alarm, open the window. + {[#'v1_0.flow'{}], MaxIncomingWindow}; + false -> + {[], InWindow1} + end, + {Flows, State#state{incoming_window = InWindow, + next_incoming_id = add(NextIncomingId, 1), + remote_outgoing_window = RemoteOutgoingWindow - 1}}. + +%% Implements section "sending a transfer" in 2.5.6 +session_flow_control_sent_transfers( + NumTransfers, + #state{remote_incoming_window = RemoteIncomingWindow, + next_outgoing_id = NextOutgoingId} = State) -> + State#state{remote_incoming_window = RemoteIncomingWindow - NumTransfers, + next_outgoing_id = add(NextOutgoingId, NumTransfers)}. + +settle_delivery_id(Current, {Settled, Unsettled} = Acc) -> + case maps:take(Current, Unsettled) of + {#outgoing_unsettled{queue_name = QName, + consumer_tag = Ctag, + msg_id = MsgId}, Unsettled1} -> + Settled1 = maps_update_with( + {QName, Ctag}, + fun(MsgIds) -> [MsgId | MsgIds] end, + [MsgId], + Settled), + {Settled1, Unsettled1}; + error -> + Acc + end. + +settle_op_from_outcome(#'v1_0.accepted'{}) -> + complete; +settle_op_from_outcome(#'v1_0.rejected'{}) -> + discard; +settle_op_from_outcome(#'v1_0.released'{}) -> + requeue; + +%% Not all queue types support the modified outcome fields correctly. +%% However, we still allow the client to settle with the modified outcome +%% because some client libraries such as Apache QPid make use of it: +%% https://github.com/apache/qpid-jms/blob/90eb60f59cb59b7b9ad8363ee8a843d6903b8e77/qpid-jms-client/src/main/java/org/apache/qpid/jms/JmsMessageConsumer.java#L464 +%% In such cases, it's better when RabbitMQ does not end the session. +%% See https://github.com/rabbitmq/rabbitmq-server/issues/6121 +settle_op_from_outcome(#'v1_0.modified'{delivery_failed = DelFailed, + undeliverable_here = UndelHere, + message_annotations = Anns0}) -> + Anns = case Anns0 of + #'v1_0.message_annotations'{content = C} -> + Anns1 = lists:map(fun({{symbol, K}, V}) -> + {K, unwrap(V)} + end, C), + maps:from_list(Anns1); + _ -> + #{} + end, + {modify, + default(DelFailed, false), + default(UndelHere, false), + Anns}; +settle_op_from_outcome(Outcome) -> + protocol_error( + ?V_1_0_AMQP_ERROR_INVALID_FIELD, + "Unrecognised state: ~tp in DISPOSITION", + [Outcome]). + +-spec flow({uint, link_handle()}, sequence_no(), rabbit_queue_type:credit()) -> + #'v1_0.flow'{}. +flow(Handle, DeliveryCount, LinkCredit) -> + #'v1_0.flow'{handle = Handle, + delivery_count = ?UINT(DeliveryCount), + link_credit = ?UINT(LinkCredit)}. + +session_flow_fields(Frames, State) + when is_list(Frames) -> + [session_flow_fields(F, State) || F <- Frames]; +session_flow_fields(Flow = #'v1_0.flow'{}, + #state{next_outgoing_id = NextOutgoingId, + next_incoming_id = NextIncomingId, + incoming_window = IncomingWindow}) -> + Flow#'v1_0.flow'{ + next_outgoing_id = ?UINT(NextOutgoingId), + outgoing_window = ?UINT_OUTGOING_WINDOW, + next_incoming_id = ?UINT(NextIncomingId), + incoming_window = ?UINT(IncomingWindow)}; +session_flow_fields(Frame, _State) -> + Frame. + +%% Implements section "receiving a flow" in 2.5.6 +session_flow_control_received_flow( + #'v1_0.flow'{next_incoming_id = FlowNextIncomingId, + incoming_window = ?UINT(FlowIncomingWindow), + next_outgoing_id = ?UINT(FlowNextOutgoingId), + outgoing_window = ?UINT(FlowOutgoingWindow)}, + #state{next_outgoing_id = NextOutgoingId} = State) -> + + Seq = case FlowNextIncomingId of + ?UINT(Id) -> + case compare(Id, NextOutgoingId) of + greater -> + protocol_error( + ?V_1_0_SESSION_ERROR_WINDOW_VIOLATION, + "next-incoming-id from FLOW (~b) leads next-outgoing-id (~b)", + [Id, NextOutgoingId]); + _ -> + Id + end; + undefined -> + %% The AMQP client might not have yet received our #begin.next_outgoing_id + ?INITIAL_OUTGOING_TRANSFER_ID + end, + + RemoteIncomingWindow0 = diff(add(Seq, FlowIncomingWindow), NextOutgoingId), + %% RemoteIncomingWindow0 can be negative, for example if we sent a TRANSFER to the + %% client between the point in time the client sent us a FLOW with updated + %% incoming_window=0 and we received that FLOW. Whether 0 or negative doesn't matter: + %% In both cases we're blocked sending more TRANSFERs to the client until it sends us + %% a new FLOW with a positive incoming_window. For better understandibility + %% across the code base, we ensure a floor of 0 here. + RemoteIncomingWindow = max(0, RemoteIncomingWindow0), + + State#state{next_incoming_id = FlowNextOutgoingId, + remote_outgoing_window = FlowOutgoingWindow, + remote_incoming_window = RemoteIncomingWindow}. + +% TODO: validate effective settle modes against +% those declared during attach + +% TODO: handle aborted transfers + +handle_queue_event({queue_event, QRef, Evt}, + #state{queue_states = QStates0} = S0) -> + case rabbit_queue_type:handle_event(QRef, Evt, QStates0) of + {ok, QStates1, Actions} -> + S = S0#state{queue_states = QStates1}, + handle_queue_actions(Actions, S); + {eol, Actions} -> + S = handle_queue_actions(Actions, S0), + S#state{stashed_eol = [QRef | S#state.stashed_eol]}; + {protocol_error, _Type, Reason, ReasonArgs} -> + protocol_error(?V_1_0_AMQP_ERROR_INTERNAL_ERROR, Reason, ReasonArgs) + end. + +handle_queue_actions(Actions, State) -> + lists:foldl( + fun ({settled, _QName, _DelIds} = Action, S = #state{stashed_settled = As}) -> + S#state{stashed_settled = [Action | As]}; + ({rejected, _QName, _DelIds} = Action, S = #state{stashed_rejected = As}) -> + S#state{stashed_rejected = [Action | As]}; + ({deliver, CTag, AckRequired, Msgs}, S0) -> + lists:foldl(fun(Msg, S) -> + handle_deliver(CTag, AckRequired, Msg, S) + end, S0, Msgs); + ({credit_reply, _Ctag, _DeliveryCount, _Credit, _Available, _Drain} = Action, + S = #state{outgoing_pending = Pending}) -> + %% credit API v2 + S#state{outgoing_pending = queue:in(Action, Pending)}; + ({credit_reply_v1, Ctag, Credit0, Available, Drain}, + S0 = #state{outgoing_links = OutgoingLinks0, + outgoing_pending = Pending}) -> + %% credit API v1 + %% Delete this branch when feature flag rabbitmq_4.0.0 becomes required. + Handle = ctag_to_handle(Ctag), + Link = #outgoing_link{delivery_count = Count0} = maps:get(Handle, OutgoingLinks0), + {Count, Credit, S} = case Drain of + true -> + Count1 = add(Count0, Credit0), + OutgoingLinks = maps:update( + Handle, + Link#outgoing_link{delivery_count = Count1}, + OutgoingLinks0), + S1 = S0#state{outgoing_links = OutgoingLinks}, + {Count1, 0, S1}; + false -> + {Count0, Credit0, S0} + end, + Flow = #'v1_0.flow'{ + handle = ?UINT(Handle), + delivery_count = ?UINT(Count), + link_credit = ?UINT(Credit), + available = ?UINT(Available), + drain = Drain}, + S#state{outgoing_pending = queue:in(Flow, Pending)}; + ({queue_down, QName}, S = #state{stashed_down = L}) -> + S#state{stashed_down = [QName | L]}; + ({Action, _QName}, S) + when Action =:= block orelse + Action =:= unblock -> + %% Ignore since we rely on our own mechanism to detect if a client sends to fast + %% into a link: If the number of outstanding queue confirmations grows, + %% we won't grant new credits to publishers. + S + end, State, Actions). + +handle_deliver(ConsumerTag, AckRequired, + Msg = {QName, QPid0, MsgId, Redelivered, Mc0}, + State = #state{outgoing_pending = Pending, + outgoing_delivery_id = DeliveryId, + outgoing_links = OutgoingLinks0, + cfg = #cfg{outgoing_max_frame_size = MaxFrameSize, + conn_name = ConnName, + channel_num = ChannelNum, + user = #user{username = Username}, + trace_state = Trace}}) -> + Handle = ctag_to_handle(ConsumerTag), + case OutgoingLinks0 of + #{Handle := #outgoing_link{queue_type = QType, + send_settled = SendSettled, + max_message_size = MaxMessageSize, + credit_api_version = CreditApiVsn, + delivery_count = DelCount} = Link0} -> + Dtag = delivery_tag(MsgId, SendSettled), + Transfer = #'v1_0.transfer'{ + handle = ?UINT(Handle), + delivery_id = ?UINT(DeliveryId), + delivery_tag = {binary, Dtag}, + message_format = ?UINT(?MESSAGE_FORMAT), + settled = SendSettled}, + Mc1 = mc:convert(mc_amqp, Mc0), + Mc = mc:set_annotation(redelivered, Redelivered, Mc1), + Sections = mc:protocol_state(Mc), + validate_message_size(Sections, MaxMessageSize), + Frames = transfer_frames(Transfer, Sections, MaxFrameSize), + messages_delivered(Redelivered, QType), + rabbit_trace:tap_out(Msg, ConnName, ChannelNum, Username, Trace), + {OutgoingLinks, QPid + } = case CreditApiVsn of + 2 -> + {OutgoingLinks0, credit_api_v2}; + 1 -> + DelCount = Link0#outgoing_link.delivery_count, + Link = Link0#outgoing_link{delivery_count = add(DelCount, 1)}, + OutgoingLinks1 = maps:update(Handle, Link, OutgoingLinks0), + {OutgoingLinks1, QPid0} + end, + Del = #outgoing_unsettled{ + msg_id = MsgId, + consumer_tag = ConsumerTag, + queue_name = QName}, + PendingDelivery = #pending_delivery{ + frames = Frames, + queue_ack_required = AckRequired, + queue_pid = QPid, + delivery_id = DeliveryId, + outgoing_unsettled = Del}, + State#state{outgoing_pending = queue:in(PendingDelivery, Pending), + outgoing_delivery_id = add(DeliveryId, 1), + outgoing_links = OutgoingLinks}; + _ -> + %% TODO handle missing link -- why does the queue think it's there? + rabbit_log:warning( + "No link handle ~b exists for delivery with consumer tag ~p from queue ~tp", + [Handle, ConsumerTag, QName]), + State + end. + +%% "The delivery-tag MUST be unique amongst all deliveries that could be +%% considered unsettled by either end of the link." [2.6.12] +delivery_tag(MsgId, _) + when is_integer(MsgId) -> + %% We use MsgId (the consumer scoped sequence number from the queue) as + %% delivery-tag since delivery-tag must be unique only per link (not per session). + %% "A delivery-tag can be up to 32 octets of binary data." [2.8.7] + case MsgId =< ?UINT_MAX of + true -> <>; + false -> <> + end; +delivery_tag(undefined, true) -> + %% Both ends of the link will always consider this message settled because + %% "the sender will send all deliveries settled to the receiver" [3.8.2]. + %% Hence, the delivery tag does not have to be unique on this link. + %% However, the spec still mandates to send a delivery tag. + <<>>; +%% Message comes from a (classic) priority queue. +delivery_tag({Priority, undefined}, true) + when is_integer(Priority) -> + <<>>; +delivery_tag(MsgId = {Priority, Seq}, _) + when is_integer(Priority) andalso + is_integer(Seq) -> + term_to_binary(MsgId). + +%%%%%%%%%%%%%%%%%%%%% +%%% Incoming Link %%% +%%%%%%%%%%%%%%%%%%%%% + +incoming_mgmt_link_transfer( + #'v1_0.transfer'{ + settled = Settled, + more = More, + handle = IncomingHandle = ?UINT(IncomingHandleInt)}, + Request, + #state{management_link_pairs = LinkPairs, + incoming_management_links = IncomingLinks, + outgoing_management_links = OutgoingLinks, + outgoing_pending = Pending, + outgoing_delivery_id = OutgoingDeliveryId, + permission_cache = PermCache0, + topic_permission_cache = TopicPermCache0, + cfg = #cfg{outgoing_max_frame_size = MaxFrameSize, + vhost = Vhost, + user = User, + reader_pid = ReaderPid} + } = State0) -> + IncomingLink0 = case maps:find(IncomingHandleInt, IncomingLinks) of + {ok, Link} -> + Link; + error -> + protocol_error( + ?V_1_0_SESSION_ERROR_UNATTACHED_HANDLE, + "Unknown link handle: ~p", [IncomingHandleInt]) + end, + %% We only allow settled management requests + %% given that we are going to send a response anyway. + true = Settled, + %% In the current implementation, we disallow large incoming management request messages. + false = default(More, false), + #management_link{name = Name, + delivery_count = IncomingDeliveryCount0, + credit = IncomingCredit0, + max_message_size = IncomingMaxMessageSize + } = IncomingLink0, + case IncomingCredit0 > 0 of + true -> + ok; + false -> + protocol_error( + ?V_1_0_LINK_ERROR_TRANSFER_LIMIT_EXCEEDED, + "insufficient credit (~b) for management link from client to RabbitMQ", + [IncomingCredit0]) + end, + #management_link_pair{ + incoming_half = IncomingHandleInt, + outgoing_half = OutgoingHandleInt + } = maps:get(Name, LinkPairs), + OutgoingLink0 = case OutgoingHandleInt of + unattached -> + protocol_error( + ?V_1_0_AMQP_ERROR_PRECONDITION_FAILED, + "received transfer on half open management link pair", []); + _ -> + maps:get(OutgoingHandleInt, OutgoingLinks) + end, + #management_link{name = Name, + delivery_count = OutgoingDeliveryCount, + credit = OutgoingCredit, + max_message_size = OutgoingMaxMessageSize} = OutgoingLink0, + case OutgoingCredit > 0 of + true -> + ok; + false -> + protocol_error( + ?V_1_0_AMQP_ERROR_PRECONDITION_FAILED, + "insufficient credit (~b) for management link from RabbitMQ to client", + [OutgoingCredit]) + end, + validate_message_size(Request, IncomingMaxMessageSize), + {Response, + {PermCache, TopicPermCache}} = rabbit_amqp_management:handle_request( + Request, Vhost, User, ReaderPid, {PermCache0, TopicPermCache0}), + + Transfer = #'v1_0.transfer'{ + handle = ?UINT(OutgoingHandleInt), + delivery_id = ?UINT(OutgoingDeliveryId), + delivery_tag = {binary, <<>>}, + message_format = ?UINT(?MESSAGE_FORMAT), + settled = true}, + validate_message_size(Response, OutgoingMaxMessageSize), + Frames = transfer_frames(Transfer, Response, MaxFrameSize), + PendingDelivery = #pending_management_delivery{frames = Frames}, + IncomingDeliveryCount = add(IncomingDeliveryCount0, 1), + IncomingCredit1 = IncomingCredit0 - 1, + {IncomingCredit, Reply} = maybe_grant_mgmt_link_credit( + IncomingCredit1, IncomingDeliveryCount, IncomingHandle), + IncomingLink = IncomingLink0#management_link{delivery_count = IncomingDeliveryCount, + credit = IncomingCredit}, + OutgoingLink = OutgoingLink0#management_link{delivery_count = add(OutgoingDeliveryCount, 1), + credit = OutgoingCredit - 1}, + State = State0#state{ + outgoing_delivery_id = add(OutgoingDeliveryId, 1), + outgoing_pending = queue:in(PendingDelivery, Pending), + incoming_management_links = maps:update(IncomingHandleInt, IncomingLink, IncomingLinks), + outgoing_management_links = maps:update(OutgoingHandleInt, OutgoingLink, OutgoingLinks), + permission_cache = PermCache, + topic_permission_cache = TopicPermCache}, + {Reply, State}. + +incoming_link_transfer( + #'v1_0.transfer'{more = true, + %% "The delivery-id MUST be supplied on the first transfer of a multi-transfer delivery." + delivery_id = ?UINT(DeliveryId), + settled = Settled}, + MsgPart, + Link0 = #incoming_link{multi_transfer_msg = undefined}, + State) -> + %% This is the first transfer of a multi-transfer message. + Link = Link0#incoming_link{ + multi_transfer_msg = #multi_transfer_msg{ + payload_fragments_rev = [MsgPart], + delivery_id = DeliveryId, + %% "If not set on the first (or only) transfer for a (multi-transfer) + %% delivery, then the settled flag MUST be interpreted as being false." + settled = default(Settled, false)}}, + {ok, [], Link, State}; +incoming_link_transfer( + #'v1_0.transfer'{more = true, + delivery_id = DeliveryId, + settled = Settled}, + MsgPart, + Link0 = #incoming_link{ + max_message_size = MaxMessageSize, + multi_transfer_msg = Multi = #multi_transfer_msg{ + payload_fragments_rev = PFR0, + delivery_id = FirstDeliveryId, + settled = FirstSettled}}, + State) -> + %% This is a continuation transfer with even more transfers to come. + validate_multi_transfer_delivery_id(DeliveryId, FirstDeliveryId), + validate_multi_transfer_settled(Settled, FirstSettled), + PFR = [MsgPart | PFR0], + validate_message_size(PFR, MaxMessageSize), + Link = Link0#incoming_link{multi_transfer_msg = Multi#multi_transfer_msg{payload_fragments_rev = PFR}}, + {ok, [], Link, State}; +incoming_link_transfer( + #'v1_0.transfer'{handle = ?UINT(HandleInt)}, + _, + #incoming_link{credit = Credit} = Link, + _) + when Credit =< 0 -> + Detach = detach(HandleInt, Link, ?V_1_0_LINK_ERROR_TRANSFER_LIMIT_EXCEEDED), + {error, [Detach]}; +incoming_link_transfer( + #'v1_0.transfer'{delivery_id = MaybeDeliveryId, + delivery_tag = DeliveryTag, + settled = MaybeSettled, + rcv_settle_mode = RcvSettleMode, + handle = Handle = ?UINT(HandleInt)}, + MsgPart, + #incoming_link{exchange = LinkExchange, + routing_key = LinkRKey, + max_message_size = MaxMessageSize, + delivery_count = DeliveryCount0, + incoming_unconfirmed_map = U0, + credit = Credit0, + multi_transfer_msg = MultiTransfer + } = Link0, + State0 = #state{queue_states = QStates0, + permission_cache = PermCache0, + topic_permission_cache = TopicPermCache0, + cfg = #cfg{user = User = #user{username = Username}, + vhost = Vhost, + trace_state = Trace, + conn_name = ConnName, + channel_num = ChannelNum, + max_link_credit = MaxLinkCredit}}) -> + + {PayloadBin, DeliveryId, Settled} = + case MultiTransfer of + undefined -> + ?UINT(DeliveryId0) = MaybeDeliveryId, + {MsgPart, DeliveryId0, default(MaybeSettled, false)}; + #multi_transfer_msg{payload_fragments_rev = PFR, + delivery_id = FirstDeliveryId, + settled = FirstSettled} -> + MsgBin0 = list_to_binary(lists:reverse([MsgPart | PFR])), + ok = validate_multi_transfer_delivery_id(MaybeDeliveryId, FirstDeliveryId), + ok = validate_multi_transfer_settled(MaybeSettled, FirstSettled), + {MsgBin0, FirstDeliveryId, FirstSettled} + end, + validate_transfer_rcv_settle_mode(RcvSettleMode, Settled), + validate_message_size(PayloadBin, MaxMessageSize), + + Mc0 = mc:init(mc_amqp, PayloadBin, #{}), + case lookup_target(LinkExchange, LinkRKey, Mc0, Vhost, User, PermCache0) of + {ok, X, RoutingKey, Mc1, PermCache} -> + Mc2 = rabbit_message_interceptor:intercept(Mc1), + check_user_id(Mc2, User), + TopicPermCache = check_write_permitted_on_topic( + X, User, RoutingKey, TopicPermCache0), + messages_received(Settled), + QNames = rabbit_exchange:route(X, Mc2, #{return_binding_keys => true}), + rabbit_trace:tap_in(Mc2, QNames, ConnName, ChannelNum, Username, Trace), + Opts = #{correlation => {HandleInt, DeliveryId}}, + Qs0 = rabbit_amqqueue:lookup_many(QNames), + Qs = rabbit_amqqueue:prepend_extra_bcc(Qs0), + Mc = ensure_mc_cluster_compat(Mc2), + case rabbit_queue_type:deliver(Qs, Mc, Opts, QStates0) of + {ok, QStates, Actions} -> + State1 = State0#state{queue_states = QStates, + permission_cache = PermCache, + topic_permission_cache = TopicPermCache}, + %% Confirms must be registered before processing actions + %% because actions may contain rejections of publishes. + {U, Reply0} = process_routing_confirm( + Qs, Settled, DeliveryId, U0), + State = handle_queue_actions(Actions, State1), + DeliveryCount = add(DeliveryCount0, 1), + Credit1 = Credit0 - 1, + {Credit, Reply1} = maybe_grant_link_credit( + Credit1, MaxLinkCredit, + DeliveryCount, map_size(U), Handle), + Reply = Reply0 ++ Reply1, + Link = Link0#incoming_link{ + delivery_count = DeliveryCount, + credit = Credit, + incoming_unconfirmed_map = U, + multi_transfer_msg = undefined}, + {ok, Reply, Link, State}; + {error, Reason} -> + protocol_error(?V_1_0_AMQP_ERROR_INTERNAL_ERROR, + "Failed to deliver message to queues, " + "delivery_tag=~p, delivery_id=~p, reason=~p", + [DeliveryTag, DeliveryId, Reason]) + end; + {error, #'v1_0.error'{} = Err} -> + Disposition = released(DeliveryId), + Detach = detach(HandleInt, Link0, Err), + {error, [Disposition, Detach]} + end. + +lookup_target(#exchange{} = X, LinkRKey, Mc, _, _, PermCache) -> + lookup_routing_key(X, LinkRKey, Mc, PermCache); +lookup_target(#resource{} = XName, LinkRKey, Mc, _, _, PermCache) -> + case rabbit_exchange:lookup(XName) of + {ok, X} -> + lookup_routing_key(X, LinkRKey, Mc, PermCache); + {error, not_found} -> + {error, error_not_found(XName)} + end; +lookup_target(to, to, Mc, Vhost, User, PermCache0) -> + case mc:property(to, Mc) of + {utf8, String} -> + case parse_target_v2_string(String) of + {ok, XNameBin, RKey, _} -> + XName = exchange_resource(Vhost, XNameBin), + PermCache = check_resource_access(XName, write, User, PermCache0), + case rabbit_exchange:lookup(XName) of + {ok, X} -> + check_internal_exchange(X), + lookup_routing_key(X, RKey, Mc, PermCache); + {error, not_found} -> + {error, error_not_found(XName)} + end; + {error, bad_address} -> + {error, + #'v1_0.error'{ + condition = ?V_1_0_AMQP_ERROR_PRECONDITION_FAILED, + description = {utf8, <<"bad 'to' address string: ", String/binary>>}}} + end; + undefined -> + {error, + #'v1_0.error'{ + condition = ?V_1_0_AMQP_ERROR_PRECONDITION_FAILED, + description = {utf8, <<"anonymous terminus requires 'to' address to be set">>}}} + end. + +lookup_routing_key(X = #exchange{name = #resource{name = XNameBin}}, + RKey0, Mc0, PermCache) -> + RKey = case RKey0 of + subject -> + case mc:property(subject, Mc0) of + {utf8, Subject} -> + Subject; + undefined -> + <<>> + end; + _ when is_binary(RKey0) -> + RKey0 + end, + Mc1 = mc:set_annotation(?ANN_EXCHANGE, XNameBin, Mc0), + Mc = mc:set_annotation(?ANN_ROUTING_KEYS, [RKey], Mc1), + {ok, X, RKey, Mc, PermCache}. + +process_routing_confirm([], _SenderSettles = true, _, U) -> + rabbit_global_counters:messages_unroutable_dropped(?PROTOCOL, 1), + {U, []}; +process_routing_confirm([], _SenderSettles = false, DeliveryId, U) -> + rabbit_global_counters:messages_unroutable_returned(?PROTOCOL, 1), + Disposition = released(DeliveryId), + {U, [Disposition]}; +process_routing_confirm([_|_] = Qs, SenderSettles, DeliveryId, U0) -> + QNames = rabbit_amqqueue:queue_names(Qs), + false = maps:is_key(DeliveryId, U0), + Map = maps:from_keys(QNames, ok), + U = U0#{DeliveryId => {Map, SenderSettles, false}}, + rabbit_global_counters:messages_routed(?PROTOCOL, map_size(Map)), + {U, []}. + +released(DeliveryId) -> + #'v1_0.disposition'{role = ?AMQP_ROLE_RECEIVER, + first = ?UINT(DeliveryId), + settled = true, + state = #'v1_0.released'{}}. + +maybe_grant_link_credit(Credit, MaxLinkCredit, DeliveryCount, NumUnconfirmed, Handle) -> + case grant_link_credit(Credit, MaxLinkCredit, NumUnconfirmed) of + true -> + {MaxLinkCredit, [flow(Handle, DeliveryCount, MaxLinkCredit)]}; + false -> + {Credit, []} + end. + +maybe_grant_link_credit( + MaxLinkCredit, + HandleInt, + Link = #incoming_link{credit = Credit, + incoming_unconfirmed_map = U, + delivery_count = DeliveryCount}, + AccMap) -> + case grant_link_credit(Credit, MaxLinkCredit, map_size(U)) of + true -> + {Link#incoming_link{credit = MaxLinkCredit}, + AccMap#{HandleInt => DeliveryCount}}; + false -> + {Link, AccMap} + end. + +grant_link_credit(Credit, MaxLinkCredit, NumUnconfirmed) -> + Credit =< MaxLinkCredit div 2 andalso + NumUnconfirmed < MaxLinkCredit. + +maybe_grant_mgmt_link_credit(Credit, DeliveryCount, Handle) + when Credit =< ?MAX_MANAGEMENT_LINK_CREDIT div 2 -> + {?MAX_MANAGEMENT_LINK_CREDIT, + [flow(Handle, DeliveryCount, ?MAX_MANAGEMENT_LINK_CREDIT)]}; +maybe_grant_mgmt_link_credit(Credit, _, _) -> + {Credit, []}. + +-spec ensure_source(#'v1_0.source'{}, + rabbit_types:vhost(), + rabbit_types:user(), + permission_cache(), + topic_permission_cache()) -> + {ok, rabbit_amqqueue:name(), permission_cache(), topic_permission_cache()} | + {error, term()}. +ensure_source(#'v1_0.source'{dynamic = true}, _, _, _, _) -> + exit_not_implemented("Dynamic sources not supported"); +ensure_source(#'v1_0.source'{address = Address, + durable = Durable}, + Vhost, User, PermCache, TopicPermCache) -> + case Address of + {utf8, <<"/queues/", QNameBinQuoted/binary>>} -> + %% The only possible v2 source address format is: + %% /queues/:queue + try rabbit_uri:urldecode(QNameBinQuoted) of + QNameBin -> + QName = queue_resource(Vhost, QNameBin), + ok = exit_if_absent(QName), + {ok, QName, PermCache, TopicPermCache} + catch error:_ -> + {error, {bad_address, Address}} + end; + {utf8, SourceAddr} -> + case address_v1_permitted() of + true -> + ensure_source_v1(SourceAddr, Vhost, User, Durable, + PermCache, TopicPermCache); + false -> + {error, {amqp_address_v1_not_permitted, Address}} + end; + _ -> + {error, {bad_address, Address}} + end. + +ensure_source_v1(Address, + Vhost, + User = #user{username = Username}, + Durable, + PermCache0, + TopicPermCache0) -> + case rabbit_routing_parser:parse_endpoint(Address, false) of + {ok, Src} -> + {QNameBin, PermCache1} = ensure_terminus(source, Src, Vhost, User, Durable, PermCache0), + case rabbit_routing_parser:parse_routing(Src) of + {"", QNameList} -> + true = string:equal(QNameList, QNameBin), + QName = queue_resource(Vhost, QNameBin), + {ok, QName, PermCache1, TopicPermCache0}; + {XNameList, RoutingKeyList} -> + RoutingKey = unicode:characters_to_binary(RoutingKeyList), + XNameBin = unicode:characters_to_binary(XNameList), + XName = exchange_resource(Vhost, XNameBin), + QName = queue_resource(Vhost, QNameBin), + Binding = #binding{source = XName, + destination = QName, + key = RoutingKey}, + PermCache2 = check_resource_access(QName, write, User, PermCache1), + PermCache = check_resource_access(XName, read, User, PermCache2), + {ok, X} = rabbit_exchange:lookup(XName), + TopicPermCache = check_read_permitted_on_topic( + X, User, RoutingKey, TopicPermCache0), + case rabbit_binding:add(Binding, Username) of + ok -> + {ok, QName, PermCache, TopicPermCache}; + {error, _} = Err -> + Err + end + end; + {error, _} = Err -> + Err + end. + +-spec ensure_target(#'v1_0.target'{}, + rabbit_types:vhost(), + rabbit_types:user(), + permission_cache()) -> + {ok, + rabbit_types:exchange() | rabbit_exchange:name() | to, + rabbit_types:routing_key() | to | subject, + rabbit_misc:resource_name() | undefined, + permission_cache()} | + {error, term()}. +ensure_target(#'v1_0.target'{dynamic = true}, _, _, _) -> + exit_not_implemented("Dynamic targets not supported"); +ensure_target(#'v1_0.target'{address = Address, + durable = Durable}, + Vhost, User, PermCache) -> + case target_address_version(Address) of + 2 -> + case ensure_target_v2(Address, Vhost) of + {ok, to, RKey, QNameBin} -> + {ok, to, RKey, QNameBin, PermCache}; + {ok, XNameBin, RKey, QNameBin} -> + check_exchange(XNameBin, RKey, QNameBin, User, Vhost, PermCache); + {error, _} = Err -> + Err + end; + 1 -> + case address_v1_permitted() of + true -> + case ensure_target_v1(Address, Vhost, User, Durable, PermCache) of + {ok, XNameBin, RKey, QNameBin, PermCache1} -> + check_exchange(XNameBin, RKey, QNameBin, User, Vhost, PermCache1); + {error, _} = Err -> + Err + end; + false -> + {error, {amqp_address_v1_not_permitted, Address}} + end + end. + +check_exchange(XNameBin, RKey, QNameBin, User, Vhost, PermCache0) -> + XName = exchange_resource(Vhost, XNameBin), + PermCache = check_resource_access(XName, write, User, PermCache0), + case rabbit_exchange:lookup(XName) of + {ok, X} -> + check_internal_exchange(X), + %% Pre-declared exchanges are protected against deletion and modification. + %% Let's cache the whole #exchange{} record to save a + %% rabbit_exchange:lookup(XName) call each time we receive a message. + Exchange = case XNameBin of + ?DEFAULT_EXCHANGE_NAME -> X; + <<"amq.", _/binary>> -> X; + _ -> XName + end, + {ok, Exchange, RKey, QNameBin, PermCache}; + {error, not_found} -> + exit_not_found(XName) + end. + +address_v1_permitted() -> + rabbit_deprecated_features:is_permitted(amqp_address_v1). + +target_address_version({utf8, <<"/exchanges/", _/binary>>}) -> + 2; +target_address_version({utf8, <<"/queues/", _/binary>>}) -> + 2; +target_address_version(undefined) -> + %% anonymous terminus + %% https://docs.oasis-open.org/amqp/anonterm/v1.0/cs01/anonterm-v1.0-cs01.html#doc-anonymous-relay + 2; +target_address_version(_Address) -> + 1. + +%% The possible v2 target address formats are: +%% /exchanges/:exchange/:routing-key +%% /exchanges/:exchange +%% /queues/:queue +%% +ensure_target_v2({utf8, String}, Vhost) -> + case parse_target_v2_string(String) of + {ok, _XNameBin, _RKey, undefined} = Ok -> + Ok; + {ok, _XNameBin, _RKey, QNameBin} = Ok -> + ok = exit_if_absent(queue, Vhost, QNameBin), + Ok; + {error, bad_address} -> + {error, {bad_address_string, String}} + end; +ensure_target_v2(undefined, _) -> + %% anonymous terminus + %% https://docs.oasis-open.org/amqp/anonterm/v1.0/cs01/anonterm-v1.0-cs01.html#doc-anonymous-relay + {ok, to, to, undefined}. + +parse_target_v2_string(String) -> + try parse_target_v2_string0(String) + catch error:_ -> + {error, bad_address} + end. + +parse_target_v2_string0(<<"/exchanges/", Rest/binary>>) -> + Key = cp_slash, + Pattern = try persistent_term:get(Key) + catch error:badarg -> + Cp = binary:compile_pattern(<<"/">>), + ok = persistent_term:put(Key, Cp), + Cp + end, + case binary:split(Rest, Pattern, [global]) of + [?DEFAULT_EXCHANGE_NAME | _] -> + {error, bad_address}; + [<<"amq.default">> | _] -> + {error, bad_address}; + [XNameBinQuoted] -> + XNameBin = rabbit_uri:urldecode(XNameBinQuoted), + {ok, XNameBin, <<>>, undefined}; + [XNameBinQuoted, RKeyQuoted] -> + XNameBin = rabbit_uri:urldecode(XNameBinQuoted), + RKey = rabbit_uri:urldecode(RKeyQuoted), + {ok, XNameBin, RKey, undefined}; + _ -> + {error, bad_address} + end; +parse_target_v2_string0(<<"/queues/">>) -> + %% empty queue name is invalid + {error, bad_address}; +parse_target_v2_string0(<<"/queues/", QNameBinQuoted/binary>>) -> + QNameBin = rabbit_uri:urldecode(QNameBinQuoted), + {ok, ?DEFAULT_EXCHANGE_NAME, QNameBin, QNameBin}; +parse_target_v2_string0(_) -> + {error, bad_address}. + +ensure_target_v1({utf8, Address}, Vhost, User, Durable, PermCache0) -> + case rabbit_routing_parser:parse_endpoint(Address, true) of + {ok, Dest} -> + {QNameBin, PermCache} = ensure_terminus( + target, Dest, Vhost, User, Durable, PermCache0), + {XNameList1, RK} = rabbit_routing_parser:parse_routing(Dest), + XNameBin = unicode:characters_to_binary(XNameList1), + RoutingKey = case RK of + undefined -> subject; + [] -> subject; + _ -> unicode:characters_to_binary(RK) + end, + {ok, XNameBin, RoutingKey, QNameBin, PermCache}; + {error, _} = Err -> + Err + end; +ensure_target_v1(Address, _, _, _, _) -> + {error, {bad_address, Address}}. + +handle_outgoing_mgmt_link_flow_control( + #management_link{delivery_count = DeliveryCountSnd} = Link0, + #'v1_0.flow'{handle = Handle = ?UINT(HandleInt), + delivery_count = MaybeDeliveryCountRcv, + link_credit = ?UINT(LinkCreditRcv), + drain = Drain0, + echo = Echo0}, + #state{outgoing_management_links = Links0, + outgoing_pending = Pending + } = State0) -> + Drain = default(Drain0, false), + Echo = default(Echo0, false), + DeliveryCountRcv = delivery_count_rcv(MaybeDeliveryCountRcv), + LinkCreditSnd = amqp10_util:link_credit_snd(DeliveryCountRcv, LinkCreditRcv, DeliveryCountSnd), + {Count, Credit} = case Drain of + true -> {add(DeliveryCountSnd, LinkCreditSnd), 0}; + false -> {DeliveryCountSnd, LinkCreditSnd} + end, + State = case Echo orelse Drain of + true -> + Flow = #'v1_0.flow'{ + handle = Handle, + delivery_count = ?UINT(Count), + link_credit = ?UINT(Credit), + available = ?UINT(0), + drain = Drain}, + State0#state{outgoing_pending = queue:in(Flow, Pending)}; + false -> + State0 + end, + Link = Link0#management_link{delivery_count = Count, + credit = Credit}, + Links = maps:update(HandleInt, Link, Links0), + State#state{outgoing_management_links = Links}. + +handle_outgoing_link_flow_control( + #outgoing_link{queue_name = QName, + credit_api_version = CreditApiVsn, + client_flow_ctl = CFC, + queue_flow_ctl = QFC, + at_least_one_credit_req_in_flight = CreditReqInFlight + } = Link0, + #'v1_0.flow'{handle = ?UINT(HandleInt), + delivery_count = MaybeDeliveryCountRcv, + link_credit = ?UINT(LinkCreditRcv), + drain = Drain0, + echo = Echo0}, + #state{outgoing_links = OutgoingLinks, + queue_states = QStates0 + } = State0) -> + Ctag = handle_to_ctag(HandleInt), + DeliveryCountRcv = delivery_count_rcv(MaybeDeliveryCountRcv), + Drain = default(Drain0, false), + Echo = default(Echo0, false), + case CreditApiVsn of + 2 -> + case CreditReqInFlight of + false -> + LinkCreditSnd = amqp10_util:link_credit_snd( + DeliveryCountRcv, + LinkCreditRcv, + CFC#client_flow_ctl.delivery_count), + CappedCredit = cap_credit(LinkCreditSnd, + State0#state.cfg#cfg.max_queue_credit), + Link = Link0#outgoing_link{ + client_flow_ctl = CFC#client_flow_ctl{ + credit = LinkCreditSnd, + echo = Echo}, + queue_flow_ctl = QFC#queue_flow_ctl{ + credit = CappedCredit, + drain = Drain}, + at_least_one_credit_req_in_flight = true}, + {ok, QStates, Actions} = rabbit_queue_type:credit( + QName, Ctag, + QFC#queue_flow_ctl.delivery_count, + CappedCredit, Drain, QStates0), + State = State0#state{ + queue_states = QStates, + outgoing_links = OutgoingLinks#{HandleInt := Link}}, + handle_queue_actions(Actions, State); + true -> + %% A credit request is currently in-flight. Let's first process its reply + %% before sending the next request. This ensures our outgoing_pending + %% queue won't contain lots of credit replies for the same consumer + %% when the client floods us with credit requests, but closed its incoming-window. + %% Processing one credit top up at a time between us and the queue is also easier + %% to reason about. Therefore, we stash the new request. If there is already a + %% stashed request, we replace it because the latest flow control state from the + %% client applies. + Link = Link0#outgoing_link{ + stashed_credit_req = #credit_req{ + delivery_count = DeliveryCountRcv, + credit = LinkCreditRcv, + drain = Drain, + echo = Echo}}, + State0#state{outgoing_links = OutgoingLinks#{HandleInt := Link}} + end; + 1 -> + DeliveryCountSnd = Link0#outgoing_link.delivery_count, + LinkCreditSnd = amqp10_util:link_credit_snd( + DeliveryCountRcv, LinkCreditRcv, DeliveryCountSnd), + {ok, QStates, Actions} = rabbit_queue_type:credit_v1( + QName, Ctag, LinkCreditSnd, Drain, QStates0), + State1 = State0#state{queue_states = QStates}, + State = handle_queue_actions(Actions, State1), + process_credit_reply_sync(Ctag, QName, LinkCreditSnd, State) + end. + +delivery_count_rcv(?UINT(DeliveryCount)) -> + DeliveryCount; +delivery_count_rcv(undefined) -> + %% "In the event that the receiver does not yet know the delivery-count, + %% i.e., delivery-countrcv is unspecified, the sender MUST assume that the + %% delivery-countrcv is the first delivery-countsnd sent from sender to + %% receiver, i.e., the delivery-countsnd specified in the flow state carried + %% by the initial attach frame from the sender to the receiver." [2.6.7] + ?INITIAL_DELIVERY_COUNT. + +%% The AMQP 0.9.1 credit extension was poorly designed because a consumer granting +%% credits to a queue has to synchronously wait for a credit reply from the queue: +%% https://github.com/rabbitmq/rabbitmq-server/blob/b9566f4d02f7ceddd2f267a92d46affd30fb16c8/deps/rabbitmq_codegen/credit_extension.json#L43 +%% This blocks our entire AMQP 1.0 session process. Since the credit reply from the +%% queue did not contain the consumr tag prior to feature flag rabbitmq_4.0.0, we +%% must behave here the same way as non-native AMQP 1.0: We wait until the queue +%% sends us a credit reply sucht that we can correlate that reply with our consumer tag. +process_credit_reply_sync( + Ctag, QName, Credit, State = #state{queue_states = QStates}) -> + case rabbit_queue_type:module(QName, QStates) of + {ok, rabbit_classic_queue} -> + receive {'$gen_cast', + {queue_event, + QName, + {send_credit_reply, Avail}}} -> + Action = {credit_reply_v1, Ctag, Credit, Avail, false}, + handle_queue_actions([Action], State) + after ?CREDIT_REPLY_TIMEOUT -> + credit_reply_timeout(classic, QName) + end; + {ok, rabbit_quorum_queue} -> + process_credit_reply_sync_quorum_queue(Ctag, QName, Credit, State); + {error, not_found} -> + State + end. + +process_credit_reply_sync_quorum_queue(Ctag, QName, Credit, State0) -> + receive {'$gen_cast', + {queue_event, + QName, + {QuorumQueue, + {applied, + Applied0}}}} -> + + {Applied, ReceivedCreditReply} + = lists:mapfoldl( + %% Convert v1 send_credit_reply to credit_reply_v1 action. + %% Available refers to *after* and Credit refers to *before* + %% quorum queue sends messages. + %% We therefore keep the same wrong behaviour of RabbitMQ 3.x. + fun({RaIdx, {send_credit_reply, Available}}, _) -> + Action = {credit_reply_v1, Ctag, Credit, Available, false}, + {{RaIdx, Action}, true}; + ({RaIdx, {multi, [{send_credit_reply, Available}, + {send_drained, _} = SendDrained]}}, _) -> + Action = {credit_reply_v1, Ctag, Credit, Available, false}, + {{RaIdx, {multi, [Action, SendDrained]}}, true}; + (E, Acc) -> + {E, Acc} + end, false, Applied0), + + Evt = {queue_event, QName, {QuorumQueue, {applied, Applied}}}, + %% send_drained action must be processed by + %% rabbit_fifo_client to advance the delivery count. + State = handle_queue_event(Evt, State0), + case ReceivedCreditReply of + true -> + State; + false -> + process_credit_reply_sync_quorum_queue(Ctag, QName, Credit, State) + end + after ?CREDIT_REPLY_TIMEOUT -> + credit_reply_timeout(quorum, QName) + end. + +-spec credit_reply_timeout(atom(), rabbit_types:rabbit_amqqueue_name()) -> + no_return(). +credit_reply_timeout(QType, QName) -> + Fmt = "Timed out waiting for credit reply from ~s ~s. " + "Hint: Enable feature flag rabbitmq_4.0.0", + Args = [QType, rabbit_misc:rs(QName)], + rabbit_log:error(Fmt, Args), + protocol_error(?V_1_0_AMQP_ERROR_INTERNAL_ERROR, Fmt, Args). + +default(undefined, Default) -> Default; +default(Thing, _Default) -> Thing. + +transfer_frames(Transfer, Sections, unlimited) -> + [[Transfer, Sections]]; +transfer_frames(Transfer, Sections, MaxFrameSize) -> + PerformativeSize = iolist_size(amqp10_framing:encode_bin(Transfer)), + encode_frames(Transfer, Sections, MaxFrameSize - PerformativeSize, []). + +encode_frames(_T, _Msg, MaxPayloadSize, _Transfers) when MaxPayloadSize =< 0 -> + protocol_error(?V_1_0_AMQP_ERROR_FRAME_SIZE_TOO_SMALL, + "Frame size is too small by ~b bytes", + [-MaxPayloadSize]); +encode_frames(T, Msg, MaxPayloadSize, Transfers) -> + case iolist_size(Msg) > MaxPayloadSize of + true -> + MsgBin = iolist_to_binary(Msg), + {Chunk, Rest} = split_binary(MsgBin, MaxPayloadSize), + T1 = T#'v1_0.transfer'{more = true}, + encode_frames(T, Rest, MaxPayloadSize, [[T1, Chunk] | Transfers]); + false -> + lists:reverse([[T, Msg] | Transfers]) + end. + +consumer_arguments(#'v1_0.attach'{ + source = #'v1_0.source'{filter = Filter}, + properties = Properties}) -> + properties_to_consumer_args(Properties) ++ + filter_to_consumer_args(Filter). + +properties_to_consumer_args({map, KVList}) -> + Key = {symbol, <<"rabbitmq:priority">>}, + case proplists:lookup(Key, KVList) of + {Key, Val = {int, _Prio}} -> + [mc_amqpl:to_091(<<"x-priority">>, Val)]; + _ -> + [] + end; +properties_to_consumer_args(_) -> + []. + +filter_to_consumer_args({map, KVList}) -> + filter_to_consumer_args( + [<<"rabbitmq:stream-offset-spec">>, + <<"rabbitmq:stream-filter">>, + <<"rabbitmq:stream-match-unfiltered">>], + KVList, + []); +filter_to_consumer_args(_) -> + []. + +filter_to_consumer_args([], _KVList, Acc) -> + Acc; +filter_to_consumer_args([<<"rabbitmq:stream-offset-spec">> = H | T], KVList, Acc) -> + Key = {symbol, H}, + Arg = case keyfind_unpack_described(Key, KVList) of + {_, {timestamp, Ts}} -> + [{<<"x-stream-offset">>, timestamp, Ts div 1000}]; %% 0.9.1 uses second based timestamps + {_, {utf8, Spec}} -> + [{<<"x-stream-offset">>, longstr, Spec}]; %% next, last, first and "10m" etc + {_, {_, Offset}} when is_integer(Offset) -> + [{<<"x-stream-offset">>, long, Offset}]; %% integer offset + _ -> + [] + end, + filter_to_consumer_args(T, KVList, Arg ++ Acc); +filter_to_consumer_args([<<"rabbitmq:stream-filter">> = H | T], KVList, Acc) -> + Key = {symbol, H}, + Arg = case keyfind_unpack_described(Key, KVList) of + {_, {list, Filters0}} when is_list(Filters0) -> + Filters = lists:foldl(fun({utf8, Filter}, L) -> + [{longstr, Filter} | L]; + (_, L) -> + L + end, [], Filters0), + [{<<"x-stream-filter">>, array, Filters}]; + {_, {utf8, Filter}} -> + [{<<"x-stream-filter">>, longstr, Filter}]; + _ -> + [] + end, + filter_to_consumer_args(T, KVList, Arg ++ Acc); +filter_to_consumer_args([<<"rabbitmq:stream-match-unfiltered">> = H | T], KVList, Acc) -> + Key = {symbol, H}, + Arg = case keyfind_unpack_described(Key, KVList) of + {_, MU} when is_boolean(MU) -> + [{<<"x-stream-match-unfiltered">>, bool, MU}]; + _ -> + [] + end, + filter_to_consumer_args(T, KVList, Arg ++ Acc); +filter_to_consumer_args([_ | T], KVList, Acc) -> + filter_to_consumer_args(T, KVList, Acc). + +keyfind_unpack_described(Key, KvList) -> + %% filterset values _should_ be described values + %% they aren't always however for historical reasons so we need this bit of + %% code to return a plain value for the given filter key + case lists:keyfind(Key, 1, KvList) of + {Key, {described, Key, Value}} -> + {Key, Value}; + {Key, _} = Kv -> + Kv; + false -> + false + end. + +validate_attach(#'v1_0.attach'{target = #'v1_0.coordinator'{}}) -> + exit_not_implemented("Transactions not supported"); +validate_attach(#'v1_0.attach'{unsettled = {map, [_|_]}}) -> + exit_not_implemented("Link recovery not supported"); +validate_attach(#'v1_0.attach'{incomplete_unsettled = true}) -> + exit_not_implemented("Link recovery not supported"); +validate_attach( + #'v1_0.attach'{snd_settle_mode = SndSettleMode, + rcv_settle_mode = ?V_1_0_RECEIVER_SETTLE_MODE_SECOND}) + when SndSettleMode =/= ?V_1_0_SENDER_SETTLE_MODE_SETTLED -> + exit_not_implemented("rcv-settle-mode second not supported"); +validate_attach(#'v1_0.attach'{}) -> + ok. + +validate_multi_transfer_delivery_id(?UINT(Id), Id) -> + ok; +validate_multi_transfer_delivery_id(undefined, _FirstDeliveryId) -> + %% "On continuation transfers the delivery-id MAY be omitted." + ok; +validate_multi_transfer_delivery_id(OtherId, FirstDeliveryId) -> + %% "It is an error if the delivery-id on a continuation transfer + %% differs from the delivery-id on the first transfer of a delivery." + protocol_error( + ?V_1_0_CONNECTION_ERROR_FRAMING_ERROR, + "delivery-id of continuation transfer (~p) differs from delivery-id on first transfer (~p)", + [OtherId, FirstDeliveryId]). + +validate_multi_transfer_settled(Settled, Settled) + when is_boolean(Settled) -> + ok; +validate_multi_transfer_settled(undefined, Settled) + when is_boolean(Settled) -> + ok; +validate_multi_transfer_settled(Other, First) + when is_boolean(First) -> + protocol_error( + ?V_1_0_CONNECTION_ERROR_FRAMING_ERROR, + "field 'settled' of continuation transfer (~p) differs from " + "(interpreted) field 'settled' on first transfer (~p)", + [Other, First]). + +%% "If the message is being sent settled by the sender, +%% the value of this field [rcv-settle-mode] is ignored." [2.7.5] +validate_transfer_rcv_settle_mode(?V_1_0_RECEIVER_SETTLE_MODE_SECOND, _Settled = false) -> + exit_not_implemented("rcv-settle-mode second not supported"); +validate_transfer_rcv_settle_mode(_, _) -> + ok. + +validate_message_size(_, unlimited) -> + ok; +validate_message_size(Message, MaxMsgSize) + when is_integer(MaxMsgSize) -> + MsgSize = iolist_size(Message), + case MsgSize =< MaxMsgSize of + true -> + ok; + false -> + %% "Any attempt to deliver a message larger than this results in a message-size-exceeded link-error." [2.7.3] + %% We apply that sentence to both incoming messages that are too large for us and outgoing messages that are + %% too large for the client. + %% This is an interesting protocol difference to MQTT where we instead discard outgoing messages that are too + %% large to send and then behave as if we had completed sending that message [MQTT 5.0, MQTT-3.1.2-25]. + protocol_error( + ?V_1_0_LINK_ERROR_MESSAGE_SIZE_EXCEEDED, + "message size (~b bytes) > maximum message size (~b bytes)", + [MsgSize, MaxMsgSize]) + end. + +-spec ensure_terminus(source | target, + term(), + rabbit_types:vhost(), + rabbit_types:user(), + {uint, 0..2}, + permission_cache()) -> + {undefined | rabbit_misc:resource_name(), + permission_cache()}. +ensure_terminus(Type, {exchange, {XNameList, _RoutingKey}}, Vhost, User, Durability, PermCache) -> + ok = exit_if_absent(exchange, Vhost, XNameList), + case Type of + target -> {undefined, PermCache}; + source -> declare_queue(generate_queue_name(), Vhost, User, Durability, PermCache) + end; +ensure_terminus(target, {topic, _bindingkey}, _, _, _, PermCache) -> + %% exchange amq.topic exists + {undefined, PermCache}; +ensure_terminus(source, {topic, _BindingKey}, Vhost, User, Durability, PermCache) -> + %% exchange amq.topic exists + declare_queue(generate_queue_name(), Vhost, User, Durability, PermCache); +ensure_terminus(target, {queue, undefined}, _, _, _, PermCache) -> + %% Target "/queue" means publish to default exchange with message subject as routing key. + %% Default exchange exists. + {undefined, PermCache}; +ensure_terminus(_, {queue, QNameList}, Vhost, User, Durability, PermCache) -> + declare_queue(unicode:characters_to_binary(QNameList), Vhost, User, Durability, PermCache); +ensure_terminus(_, {amqqueue, QNameList}, Vhost, _, _, PermCache) -> + %% Target "/amq/queue/" is handled specially due to AMQP legacy: + %% "Queue names starting with "amq." are reserved for pre-declared and + %% standardised queues. The client MAY declare a queue starting with "amq." + %% if the passive option is set, or the queue already exists." + QNameBin = unicode:characters_to_binary(QNameList), + ok = exit_if_absent(queue, Vhost, QNameBin), + {QNameBin, PermCache}. + +exit_if_absent(Kind, Vhost, Name) when is_list(Name) -> + exit_if_absent(Kind, Vhost, unicode:characters_to_binary(Name)); +exit_if_absent(Kind, Vhost, Name) when is_binary(Name) -> + exit_if_absent(rabbit_misc:r(Vhost, Kind, Name)). + +exit_if_absent(ResourceName = #resource{kind = Kind}) -> + Mod = case Kind of + exchange -> rabbit_exchange; + queue -> rabbit_amqqueue + end, + case Mod:exists(ResourceName) of + true -> ok; + false -> exit_not_found(ResourceName) + end. + +generate_queue_name() -> + rabbit_guid:binary(rabbit_guid:gen_secure(), "amq.gen"). + +declare_queue(QNameBin, + Vhost, + User = #user{username = Username}, + TerminusDurability, + PermCache0) -> + QName = queue_resource(Vhost, QNameBin), + PermCache = check_resource_access(QName, configure, User, PermCache0), + rabbit_core_metrics:queue_declared(QName), + Q0 = amqqueue:new(QName, + _Pid = none, + queue_is_durable(TerminusDurability), + _AutoDelete = false, + _QOwner = none, + _QArgs = [], + Vhost, + #{user => Username}, + rabbit_classic_queue), + case rabbit_queue_type:declare(Q0, node()) of + {new, _Q} -> + rabbit_core_metrics:queue_created(QName); + {existing, _Q} -> + ok; + {error, queue_limit_exceeded, Reason, ReasonArgs} -> + protocol_error( + ?V_1_0_AMQP_ERROR_RESOURCE_LIMIT_EXCEEDED, + Reason, + ReasonArgs); + Other -> + protocol_error(?V_1_0_AMQP_ERROR_INTERNAL_ERROR, + "Failed to declare ~s: ~p", + [rabbit_misc:rs(QName), Other]) + end, + {QNameBin, PermCache}. + +outcomes(#'v1_0.source'{outcomes = undefined}) -> + {array, symbol, ?OUTCOMES}; +outcomes(#'v1_0.source'{outcomes = {array, symbol, Syms} = Outcomes}) -> + case lists:filter(fun(O) -> not lists:member(O, ?OUTCOMES) end, Syms) of + [] -> + Outcomes; + Unsupported -> + exit_not_implemented("Outcomes not supported: ~tp", [Unsupported]) + end; +outcomes(#'v1_0.source'{outcomes = Unsupported}) -> + exit_not_implemented("Outcomes not supported: ~tp", [Unsupported]); +outcomes(_) -> + {array, symbol, ?OUTCOMES}. + +-spec handle_to_ctag(link_handle()) -> rabbit_types:ctag(). +handle_to_ctag(Handle) -> + integer_to_binary(Handle). + +-spec ctag_to_handle(rabbit_types:ctag()) -> link_handle(). +ctag_to_handle(Ctag) -> + binary_to_integer(Ctag). + +queue_is_durable(?V_1_0_TERMINUS_DURABILITY_NONE) -> + false; +queue_is_durable(?V_1_0_TERMINUS_DURABILITY_CONFIGURATION) -> + true; +queue_is_durable(?V_1_0_TERMINUS_DURABILITY_UNSETTLED_STATE) -> + true; +queue_is_durable(undefined) -> + %% + %% [3.5.3] + queue_is_durable(?V_1_0_TERMINUS_DURABILITY_NONE). + +-spec remove_outgoing_link(link_handle() | rabbit_types:ctag(), Map, queue:queue()) -> + {Map, queue:queue()} + when Map :: #{delivery_number() => #outgoing_unsettled{}}. +remove_outgoing_link(Handle, Map, Queue) + when is_integer(Handle) -> + Ctag = handle_to_ctag(Handle), + remove_outgoing_link(Ctag, Map, Queue); +remove_outgoing_link(Ctag, OutgoingUnsettledMap0, OutgoingPending0) + when is_binary(Ctag) -> + OutgoingUnsettledMap = maps:filter( + fun(_DeliveryId, #outgoing_unsettled{consumer_tag = Tag}) -> + Tag =/= Ctag + end, OutgoingUnsettledMap0), + OutgoingPending = queue:filter( + fun(#pending_delivery{outgoing_unsettled = #outgoing_unsettled{consumer_tag = Tag}}) -> + Tag =/= Ctag; + ({credit_reply, Tag, _DeliveryCount, _Credit, _Available, _Drain}) -> + Tag =/= Ctag; + (#pending_management_delivery{}) -> + true; + (#'v1_0.flow'{}) -> + true + end, OutgoingPending0), + {OutgoingUnsettledMap, OutgoingPending}. + +messages_received(Settled) -> + rabbit_global_counters:messages_received(?PROTOCOL, 1), + case Settled of + true -> ok; + false -> rabbit_global_counters:messages_received_confirm(?PROTOCOL, 1) + end. + +messages_delivered(Redelivered, QueueType) -> + rabbit_global_counters:messages_delivered(?PROTOCOL, QueueType, 1), + case Redelivered of + true -> rabbit_global_counters:messages_redelivered(?PROTOCOL, QueueType, 1); + false -> ok + end. + +messages_acknowledged(complete, QName, QS, MsgIds) -> + case rabbit_queue_type:module(QName, QS) of + {ok, QType} -> + rabbit_global_counters:messages_acknowledged(?PROTOCOL, QType, length(MsgIds)); + _ -> + ok + end; +messages_acknowledged(_, _, _, _) -> + ok. + +publisher_or_consumer_deleted(#incoming_link{}) -> + rabbit_global_counters:publisher_deleted(?PROTOCOL); +publisher_or_consumer_deleted(#outgoing_link{}) -> + rabbit_global_counters:consumer_deleted(?PROTOCOL). + +publisher_or_consumer_deleted( + #state{incoming_links = NewIncomingLinks, + outgoing_links = NewOutgoingLinks}, + #state{incoming_links = OldIncomingLinks, + outgoing_links = OldOutgoingLinks}) -> + if map_size(NewIncomingLinks) < map_size(OldIncomingLinks) -> + rabbit_global_counters:publisher_deleted(?PROTOCOL); + map_size(NewOutgoingLinks) < map_size(OldOutgoingLinks) -> + rabbit_global_counters:consumer_deleted(?PROTOCOL); + true -> + ok + end. + +%% If we previously already sent a detach with an error condition, and the Detach we +%% receive here is therefore the client's reply, do not reply again with a 3rd detach. +maybe_detach_reply( + Detach, + #state{incoming_links = NewIncomingLinks, + outgoing_links = NewOutgoingLinks, + incoming_management_links = NewIncomingMgmtLinks, + outgoing_management_links = NewOutgoingMgmtLinks, + cfg = #cfg{writer_pid = WriterPid, + channel_num = Ch}}, + #state{incoming_links = OldIncomingLinks, + outgoing_links = OldOutgoingLinks, + incoming_management_links = OldIncomingMgmtLinks, + outgoing_management_links = OldOutgoingMgmtLinks}) + when map_size(NewIncomingLinks) < map_size(OldIncomingLinks) orelse + map_size(NewOutgoingLinks) < map_size(OldOutgoingLinks) orelse + map_size(NewIncomingMgmtLinks) < map_size(OldIncomingMgmtLinks) orelse + map_size(NewOutgoingMgmtLinks) < map_size(OldOutgoingMgmtLinks) -> + Reply = Detach#'v1_0.detach'{error = undefined}, + rabbit_amqp_writer:send_command(WriterPid, Ch, Reply); +maybe_detach_reply(_, _, _) -> + ok. + +-spec maybe_detach_mgmt_link(link_handle(), state()) -> state(). +maybe_detach_mgmt_link( + HandleInt, + State = #state{management_link_pairs = LinkPairs0, + incoming_management_links = IncomingLinks0, + outgoing_management_links = OutgoingLinks0}) -> + case maps:take(HandleInt, IncomingLinks0) of + {#management_link{name = Name}, IncomingLinks} -> + Pair = #management_link_pair{outgoing_half = OutgoingHalf} = maps:get(Name, LinkPairs0), + LinkPairs = case OutgoingHalf of + unattached -> + maps:remove(Name, LinkPairs0); + _ -> + maps:update(Name, + Pair#management_link_pair{incoming_half = unattached}, + LinkPairs0) + end, + State#state{incoming_management_links = IncomingLinks, + management_link_pairs = LinkPairs}; + error -> + case maps:take(HandleInt, OutgoingLinks0) of + {#management_link{name = Name}, OutgoingLinks} -> + Pair = #management_link_pair{incoming_half = IncomingHalf} = maps:get(Name, LinkPairs0), + LinkPairs = case IncomingHalf of + unattached -> + maps:remove(Name, LinkPairs0); + _ -> + maps:update(Name, + Pair#management_link_pair{outgoing_half = unattached}, + LinkPairs0) + end, + State#state{outgoing_management_links = OutgoingLinks, + management_link_pairs = LinkPairs}; + error -> + State + end + end. + +check_internal_exchange(#exchange{internal = true, + name = XName}) -> + protocol_error(?V_1_0_AMQP_ERROR_UNAUTHORIZED_ACCESS, + "forbidden to publish to internal ~ts", + [rabbit_misc:rs(XName)]); +check_internal_exchange(_) -> + ok. + +-spec check_resource_access(rabbit_types:r(exchange | queue), + rabbit_types:permission_atom(), + rabbit_types:user(), + permission_cache()) -> + permission_cache(). +check_resource_access(Resource, Perm, User, Cache) -> + CacheElem = {Resource, Perm}, + case lists:member(CacheElem, Cache) of + true -> + Cache; + false -> + Context = #{}, + try rabbit_access_control:check_resource_access(User, Resource, Perm, Context) of + ok -> + CacheTail = lists:sublist(Cache, ?MAX_PERMISSION_CACHE_SIZE - 1), + [CacheElem | CacheTail] + catch + exit:#amqp_error{name = access_refused, + explanation = Msg} -> + protocol_error(?V_1_0_AMQP_ERROR_UNAUTHORIZED_ACCESS, Msg, []) + end + end. + +-spec check_write_permitted_on_topic( + rabbit_types:exchange(), + rabbit_types:user(), + rabbit_types:routing_key(), + topic_permission_cache()) -> + topic_permission_cache(). +check_write_permitted_on_topic(Resource, User, RoutingKey, TopicPermCache) -> + check_topic_authorisation(Resource, User, RoutingKey, write, TopicPermCache). + +-spec check_read_permitted_on_topic( + rabbit_types:exchange(), + rabbit_types:user(), + rabbit_types:routing_key(), + topic_permission_cache()) -> + topic_permission_cache(). +check_read_permitted_on_topic(Resource, User, RoutingKey, TopicPermCache) -> + check_topic_authorisation(Resource, User, RoutingKey, read, TopicPermCache). + +check_topic_authorisation(#exchange{type = topic, + name = XName = #resource{virtual_host = VHost}}, + User = #user{username = Username}, + RoutingKey, + Permission, + Cache) -> + Resource = XName#resource{kind = topic}, + CacheElem = {Resource, RoutingKey, Permission}, + case lists:member(CacheElem, Cache) of + true -> + Cache; + false -> + VariableMap = #{<<"vhost">> => VHost, + <<"username">> => Username}, + Context = #{routing_key => RoutingKey, + variable_map => VariableMap}, + try rabbit_access_control:check_topic_access(User, Resource, Permission, Context) of + ok -> + CacheTail = lists:sublist(Cache, ?MAX_PERMISSION_CACHE_SIZE - 1), + [CacheElem | CacheTail] + catch + exit:#amqp_error{name = access_refused, + explanation = Msg} -> + protocol_error(?V_1_0_AMQP_ERROR_UNAUTHORIZED_ACCESS, Msg, []) + end + end; +check_topic_authorisation(_, _, _, _, Cache) -> + Cache. + +check_user_id(Mc, User) -> + case rabbit_access_control:check_user_id(Mc, User) of + ok -> + ok; + {refused, Reason, Args} -> + protocol_error(?V_1_0_AMQP_ERROR_UNAUTHORIZED_ACCESS, Reason, Args) + end. + +maps_update_with(Key, Fun, Init, Map) -> + case Map of + #{Key := Value} -> + Map#{Key := Fun(Value)}; + _ -> + Map#{Key => Init} + end. + +max_message_size({ulong, Size}) + when Size > 0 -> + Size; +max_message_size(_) -> + %% "If this field is zero or unset, there is no + %% maximum size imposed by the link endpoint." + unlimited. + +check_paired({map, Properties}) -> + case lists:any(fun({{symbol, <<"paired">>}, true}) -> + true; + (_) -> + false + end, Properties) of + true -> + ok; + false -> + exit_property_paired_not_set() + end; +check_paired(_) -> + exit_property_paired_not_set(). + +-spec exit_property_paired_not_set() -> no_return(). +exit_property_paired_not_set() -> + protocol_error(?V_1_0_AMQP_ERROR_INVALID_FIELD, + "Link property 'paired' is not set to boolean value 'true'", []). + +-spec exit_not_implemented(io:format()) -> no_return(). +exit_not_implemented(Format) -> + exit_not_implemented(Format, []). + +-spec exit_not_implemented(io:format(), [term()]) -> no_return(). +exit_not_implemented(Format, Args) -> + protocol_error(?V_1_0_AMQP_ERROR_NOT_IMPLEMENTED, Format, Args). + +-spec exit_not_found(rabbit_types:r(exchange | queue)) -> no_return(). +exit_not_found(Resource) -> + protocol_error(?V_1_0_AMQP_ERROR_NOT_FOUND, + "no ~ts", + [rabbit_misc:rs(Resource)]). + +-spec error_not_found(rabbit_types:r(exchange | queue)) -> #'v1_0.error'{}. +error_not_found(Resource) -> + Description = unicode:characters_to_binary("no " ++ rabbit_misc:rs(Resource)), + #'v1_0.error'{ + condition = ?V_1_0_AMQP_ERROR_NOT_FOUND, + description = {utf8, Description}}. + +is_valid_max(Val) -> + is_integer(Val) andalso + Val > 0 andalso + Val =< ?UINT_MAX. + +pg_scope() -> + rabbit:pg_local_scope(amqp_session). + +-spec cap_credit(rabbit_queue_type:credit(), pos_integer()) -> + rabbit_queue_type:credit(). +cap_credit(DesiredCredit, MaxCredit) -> + min(DesiredCredit, MaxCredit). + +ensure_mc_cluster_compat(Mc) -> + Feature = 'rabbitmq_4.0.0', + IsEnabled = rabbit_feature_flags:is_enabled(Feature), + case IsEnabled of + true -> + Mc; + false -> + McEnv = #{Feature => IsEnabled}, + %% other nodes in the cluster may not understand the new internal + %% amqp mc format - in this case we convert to AMQP legacy format + %% for compatibility + mc:convert(mc_amqpl, Mc, McEnv) + end. + +format_status( + #{state := #state{cfg = Cfg, + outgoing_pending = OutgoingPending, + remote_incoming_window = RemoteIncomingWindow, + remote_outgoing_window = RemoteOutgoingWindow, + next_incoming_id = NextIncomingId, + incoming_window = IncomingWindow, + next_outgoing_id = NextOutgoingId, + outgoing_delivery_id = OutgoingDeliveryId, + incoming_links = IncomingLinks, + outgoing_links = OutgoingLinks, + management_link_pairs = ManagementLinks, + incoming_management_links = IncomingManagementLinks, + outgoing_management_links = OutgoingManagementLinks, + outgoing_unsettled_map = OutgoingUnsettledMap, + stashed_rejected = StashedRejected, + stashed_settled = StashedSettled, + stashed_down = StashedDown, + stashed_eol = StashedEol, + queue_states = QueueStates, + permission_cache = PermissionCache, + topic_permission_cache = TopicPermissionCache}} = Status) -> + State = #{cfg => Cfg, + outgoing_pending => queue:len(OutgoingPending), + remote_incoming_window => RemoteIncomingWindow, + remote_outgoing_window => RemoteOutgoingWindow, + next_incoming_id => NextIncomingId, + incoming_window => IncomingWindow, + next_outgoing_id => NextOutgoingId, + outgoing_delivery_id => OutgoingDeliveryId, + incoming_links => IncomingLinks, + outgoing_links => OutgoingLinks, + management_link_pairs => ManagementLinks, + incoming_management_links => IncomingManagementLinks, + outgoing_management_links => OutgoingManagementLinks, + outgoing_unsettled_map => OutgoingUnsettledMap, + stashed_rejected => StashedRejected, + stashed_settled => StashedSettled, + stashed_down => StashedDown, + stashed_eol => StashedEol, + queue_states => rabbit_queue_type:format_status(QueueStates), + permission_cache => PermissionCache, + topic_permission_cache => TopicPermissionCache}, + maps:update(state, State, Status). + +unwrap({_Tag, V}) -> + V; +unwrap(V) -> + V. diff --git a/deps/rabbit/src/rabbit_amqp_session_sup.erl b/deps/rabbit/src/rabbit_amqp_session_sup.erl new file mode 100644 index 000000000000..1c1af2784cfc --- /dev/null +++ b/deps/rabbit/src/rabbit_amqp_session_sup.erl @@ -0,0 +1,39 @@ +%% This Source Code Form is subject to the terms of the Mozilla Public +%% License, v. 2.0. If a copy of the MPL was not distributed with this +%% file, You can obtain one at https://mozilla.org/MPL/2.0/. +%% +%% Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. +%% + +-module(rabbit_amqp_session_sup). +-behaviour(supervisor). + +-include_lib("rabbit_common/include/rabbit.hrl"). + +%% client API +-export([start_link/1, + start_session/2]). + +%% supervisor callback +-export([init/1]). + +-spec start_link(Reader :: pid()) -> + supervisor:startlink_ret(). +start_link(ReaderPid) -> + supervisor:start_link(?MODULE, ReaderPid). + +init(ReaderPid) -> + SupFlags = #{strategy => simple_one_for_one, + intensity => 0, + period => 1}, + ChildSpec = #{id => amqp1_0_session, + start => {rabbit_amqp_session, start_link, [ReaderPid]}, + restart => temporary, + shutdown => ?WORKER_WAIT, + type => worker}, + {ok, {SupFlags, [ChildSpec]}}. + +-spec start_session(pid(), list()) -> + supervisor:startchild_ret(). +start_session(SessionSupPid, Args) -> + supervisor:start_child(SessionSupPid, Args). diff --git a/deps/rabbit/src/rabbit_amqp_util.erl b/deps/rabbit/src/rabbit_amqp_util.erl new file mode 100644 index 000000000000..3257cef93704 --- /dev/null +++ b/deps/rabbit/src/rabbit_amqp_util.erl @@ -0,0 +1,19 @@ +%% This Source Code Form is subject to the terms of the Mozilla Public +%% License, v. 2.0. If a copy of the MPL was not distributed with this +%% file, You can obtain one at https://mozilla.org/MPL/2.0/. +%% +%% Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. +%% + +-module(rabbit_amqp_util). +-include("rabbit_amqp.hrl"). + +-export([protocol_error/3]). + +-spec protocol_error(term(), io:format(), [term()]) -> + no_return(). +protocol_error(Condition, Msg, Args) -> + Description = unicode:characters_to_binary(lists:flatten(io_lib:format(Msg, Args))), + Reason = #'v1_0.error'{condition = Condition, + description = {utf8, Description}}, + exit(Reason). diff --git a/deps/rabbit/src/rabbit_amqp_writer.erl b/deps/rabbit/src/rabbit_amqp_writer.erl new file mode 100644 index 000000000000..7b239a10a107 --- /dev/null +++ b/deps/rabbit/src/rabbit_amqp_writer.erl @@ -0,0 +1,240 @@ +%% This Source Code Form is subject to the terms of the Mozilla Public +%% License, v. 2.0. If a copy of the MPL was not distributed with this +%% file, You can obtain one at https://mozilla.org/MPL/2.0/. +%% +%% Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. +%% + +-module(rabbit_amqp_writer). +-behaviour(gen_server). + +-include("rabbit_amqp.hrl"). + +%% client API +-export([start_link/2, + send_command/3, + send_command/4, + send_command_sync/3, + send_command_and_notify/5, + internal_send_command/3]). + +%% gen_server callbacks +-export([init/1, + handle_call/3, + handle_cast/2, + handle_info/2, + format_status/1]). + +-record(state, { + sock :: rabbit_net:socket(), + reader :: rabbit_types:connection(), + pending :: iolist(), + %% This field is just an optimisation to minimize the cost of erlang:iolist_size/1 + pending_size :: non_neg_integer(), + monitored_sessions :: #{pid() => true} + }). + +-define(HIBERNATE_AFTER, 6_000). +-define(CALL_TIMEOUT, 300_000). +-define(AMQP_SASL_FRAME_TYPE, 1). + +-type performative() :: tuple(). +-type payload() :: iodata(). + +%%%%%%%%%%%%%%%%%% +%%% client API %%% +%%%%%%%%%%%%%%%%%% + +-spec start_link (rabbit_net:socket(), pid()) -> + rabbit_types:ok(pid()). +start_link(Sock, ReaderPid) -> + Args = {Sock, ReaderPid}, + Opts = [{hibernate_after, ?HIBERNATE_AFTER}], + gen_server:start_link(?MODULE, Args, Opts). + +-spec send_command(pid(), + rabbit_types:channel_number(), + performative()) -> ok. +send_command(Writer, ChannelNum, Performative) -> + Request = {send_command, ChannelNum, Performative}, + gen_server:cast(Writer, Request). + +-spec send_command(pid(), + rabbit_types:channel_number(), + performative(), + payload()) -> ok | {error, blocked}. +send_command(Writer, ChannelNum, Performative, Payload) -> + Request = {send_command, self(), ChannelNum, Performative, Payload}, + maybe_send(Writer, Request). + +-spec send_command_sync(pid(), + rabbit_types:channel_number(), + performative()) -> ok. +send_command_sync(Writer, ChannelNum, Performative) -> + Request = {send_command, ChannelNum, Performative}, + gen_server:call(Writer, Request, ?CALL_TIMEOUT). + +%% Delete this function when feature flag rabbitmq_4.0.0 becomes required. +-spec send_command_and_notify(pid(), + pid(), + rabbit_types:channel_number(), + performative(), + payload()) -> ok | {error, blocked}. +send_command_and_notify(Writer, QueuePid, ChannelNum, Performative, Payload) -> + Request = {send_command_and_notify, QueuePid, self(), ChannelNum, Performative, Payload}, + maybe_send(Writer, Request). + +-spec internal_send_command(rabbit_net:socket(), + performative(), + amqp10_framing | rabbit_amqp_sasl) -> ok. +internal_send_command(Sock, Performative, Protocol) -> + Data = assemble_frame(0, Performative, Protocol), + ok = tcp_send(Sock, Data). + +%%%%%%%%%%%%%%%%%%%%%%%%%%%% +%%% gen_server callbacks %%% +%%%%%%%%%%%%%%%%%%%%%%%%%%%% + +init({Sock, ReaderPid}) -> + State = #state{sock = Sock, + reader = ReaderPid, + pending = [], + pending_size = 0, + monitored_sessions = #{}}, + process_flag(message_queue_data, off_heap), + {ok, State}. + +handle_cast({send_command, ChannelNum, Performative}, State0) -> + State = internal_send_command_async(ChannelNum, Performative, State0), + no_reply(State); +handle_cast({send_command, SessionPid, ChannelNum, Performative, Payload}, State0) -> + State1 = internal_send_command_async(ChannelNum, Performative, Payload, State0), + State = credit_flow_ack(SessionPid, State1), + no_reply(State); +%% Delete below function clause when feature flag rabbitmq_4.0.0 becomes required. +handle_cast({send_command_and_notify, QueuePid, SessionPid, ChannelNum, Performative, Payload}, State0) -> + State1 = internal_send_command_async(ChannelNum, Performative, Payload, State0), + State = credit_flow_ack(SessionPid, State1), + rabbit_amqqueue:notify_sent(QueuePid, SessionPid), + no_reply(State). + +handle_call({send_command, ChannelNum, Performative}, _From, State0) -> + State1 = internal_send_command_async(ChannelNum, Performative, State0), + State = flush(State1), + {reply, ok, State}. + +handle_info(timeout, State0) -> + State = flush(State0), + {noreply, State}; +handle_info({{'DOWN', session}, _MRef, process, SessionPid, _Reason}, + State0 = #state{monitored_sessions = Sessions}) -> + credit_flow:peer_down(SessionPid), + State = State0#state{monitored_sessions = maps:remove(SessionPid, Sessions)}, + no_reply(State); +%% Delete below function clause when feature flag rabbitmq_4.0.0 becomes required. +handle_info({'DOWN', _MRef, process, QueuePid, _Reason}, State) -> + rabbit_amqqueue:notify_sent_queue_down(QueuePid), + no_reply(State). + +format_status(Status) -> + maps:update_with( + state, + fun(#state{sock = Sock, + reader = Reader, + pending = Pending, + pending_size = PendingSize}) -> + #{socket => Sock, + reader => Reader, + %% Below 2 fields should always have the same value. + pending => iolist_size(Pending), + pending_size => PendingSize} + end, + Status). + +%%%%%%%%%%%%%%% +%%% Helpers %%% +%%%%%%%%%%%%%%% + +no_reply(State) -> + {noreply, State, 0}. + +maybe_send(Writer, Request) -> + case credit_flow:blocked() of + false -> + credit_flow:send(Writer), + gen_server:cast(Writer, Request); + true -> + {error, blocked} + end. + +credit_flow_ack(SessionPid, State = #state{monitored_sessions = Sessions}) -> + credit_flow:ack(SessionPid), + case is_map_key(SessionPid, Sessions) of + true -> + State; + false -> + _MonitorRef = monitor(process, SessionPid, [{tag, {'DOWN', session}}]), + State#state{monitored_sessions = maps:put(SessionPid, true, Sessions)} + end. + +internal_send_command_async(Channel, Performative, + State = #state{pending = Pending, + pending_size = PendingSize}) -> + Frame = assemble_frame(Channel, Performative), + maybe_flush(State#state{pending = [Frame | Pending], + pending_size = PendingSize + iolist_size(Frame)}). + +internal_send_command_async(Channel, Performative, Payload, + State = #state{pending = Pending, + pending_size = PendingSize}) -> + Frame = assemble_frame_with_payload(Channel, Performative, Payload), + maybe_flush(State#state{pending = [Frame | Pending], + pending_size = PendingSize + iolist_size(Frame)}). + +assemble_frame(Channel, Performative) -> + assemble_frame(Channel, Performative, amqp10_framing). + +assemble_frame(Channel, Performative, amqp10_framing) -> + ?TRACE("channel ~b <-~n ~tp", + [Channel, amqp10_framing:pprint(Performative)]), + PerfBin = amqp10_framing:encode_bin(Performative), + amqp10_binary_generator:build_frame(Channel, PerfBin); +assemble_frame(Channel, Performative, rabbit_amqp_sasl) -> + ?TRACE("channel ~b <-~n ~tp", + [Channel, amqp10_framing:pprint(Performative)]), + PerfBin = amqp10_framing:encode_bin(Performative), + amqp10_binary_generator:build_frame(Channel, ?AMQP_SASL_FRAME_TYPE, PerfBin). + +assemble_frame_with_payload(Channel, Performative, Payload) -> + ?TRACE("channel ~b <-~n ~tp~n followed by ~tb bytes of payload", + [Channel, amqp10_framing:pprint(Performative), iolist_size(Payload)]), + PerfIoData = amqp10_framing:encode_bin(Performative), + amqp10_binary_generator:build_frame(Channel, [PerfIoData, Payload]). + +tcp_send(Sock, Data) -> + rabbit_misc:throw_on_error( + inet_error, + fun() -> rabbit_net:send(Sock, Data) end). + +%% Flush when more than 2.5 * 1460 bytes (TCP over Ethernet MSS) = 3650 bytes of data +%% has accumulated. The idea is to get the TCP data sections full (i.e. fill 1460 bytes) +%% as often as possible to reduce the overhead of TCP/IP headers. +-define(FLUSH_THRESHOLD, 3650). + +maybe_flush(State = #state{pending_size = PendingSize}) -> + case PendingSize > ?FLUSH_THRESHOLD of + true -> flush(State); + false -> State + end. + +flush(State = #state{pending = []}) -> + State; +flush(State = #state{sock = Sock, + pending = Pending}) -> + case rabbit_net:send(Sock, lists:reverse(Pending)) of + ok -> + State#state{pending = [], + pending_size = 0}; + {error, Reason} -> + exit({writer, send_failed, Reason}) + end. diff --git a/deps/rabbit/src/rabbit_amqqueue.erl b/deps/rabbit/src/rabbit_amqqueue.erl index f2df0d8695e9..b3cb051b5430 100644 --- a/deps/rabbit/src/rabbit_amqqueue.erl +++ b/deps/rabbit/src/rabbit_amqqueue.erl @@ -2,12 +2,11 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_amqqueue). --export([warn_file_limit/0]). -export([recover/1, stop/1, start/1, declare/6, declare/7, delete_immediately/1, delete_exclusive/2, delete/4, purge/1, forget_all_durable/1]). @@ -25,25 +24,23 @@ emit_info_all/5, list_local/1, info_local/1, emit_info_local/4, emit_info_down/4]). -export([count/0]). --export([list_down/1, count/1, list_names/0, list_names/1, list_local_names/0, +-export([list_down/1, list_down/2, list_all/1, + count/1, list_names/0, list_names/1, list_local_names/0, list_local_names_down/0]). -export([list_by_type/1, sample_local_queues/0, sample_n_by_name/2, sample_n/2]). -export([force_event_refresh/1, notify_policy_changed/1]). -export([consumers/1, consumers_all/1, emit_consumers_all/4, consumer_info_keys/0]). --export([basic_get/5, basic_consume/12, basic_cancel/5, notify_decorators/1]). +-export([basic_get/5, basic_consume/12, notify_decorators/1]). -export([notify_sent/2, notify_sent_queue_down/1, resume/2]). --export([notify_down_all/2, notify_down_all/3, activate_limit_all/2, credit/5]). +-export([notify_down_all/2, notify_down_all/3, activate_limit_all/2]). -export([on_node_up/1, on_node_down/1]). --export([update/2, store_queue/1, update_decorators/1, policy_changed/2]). --export([update_mirroring/1, sync_mirrors/1, cancel_sync_mirrors/1]). +-export([update/2, store_queue/1, update_decorators/2, policy_changed/2]). -export([emit_unresponsive/6, emit_unresponsive_local/5, is_unresponsive/2]). --export([has_synchronised_mirrors_online/1, is_match/2, is_in_virtual_host/2]). +-export([is_match/2, is_in_virtual_host/2]). -export([is_replicated/1, is_exclusive/1, is_not_exclusive/1, is_dead_exclusive/1]). --export([list_local_quorum_queues/0, list_local_quorum_queue_names/0, list_local_stream_queues/0, - list_local_mirrored_classic_queues/0, list_local_mirrored_classic_names/0, +-export([list_local_quorum_queues/0, list_local_quorum_queue_names/0, + list_local_stream_queues/0, list_stream_queues_on/1, list_local_leaders/0, list_local_followers/0, get_quorum_nodes/1, - list_local_mirrored_classic_without_synchronised_mirrors/0, - list_local_mirrored_classic_without_synchronised_mirrors_for_cli/0, list_local_quorum_queues_with_name_matching/1, list_local_quorum_queues_with_name_matching/2]). -export([is_local_to_node/2, is_local_to_node_set/2]). @@ -65,7 +62,7 @@ -export([is_server_named_allowed/1]). -export([check_max_age/1]). --export([get_queue_type/1, get_resource_vhost_name/1, get_resource_name/1]). +-export([get_queue_type/1, get_queue_type/2, get_resource_vhost_name/1, get_resource_name/1]). -export([deactivate_limit_all/2]). @@ -73,10 +70,10 @@ -export([queue/1, queue_names/1]). -export([kill_queue/2, kill_queue/3, kill_queue_hard/2, kill_queue_hard/3]). +-export([delete_transient_queues_on_node/1]). %% internal -export([internal_declare/2, internal_delete/2, run_backing_queue/3, - set_ram_duration_target/2, set_maximum_since_use/2, emit_consumers_local/3, internal_delete/3]). -include_lib("rabbit_common/include/rabbit.hrl"). @@ -90,7 +87,7 @@ -define(IS_QUORUM(QPid), is_tuple(QPid)). %%---------------------------------------------------------------------------- --export_type([name/0, qmsg/0, absent_reason/0]). +-export_type([name/0, qmsg/0, msg_id/0, absent_reason/0]). -type name() :: rabbit_types:r('queue'). @@ -99,7 +96,7 @@ -type qfun(A) :: fun ((amqqueue:amqqueue()) -> A | no_return()). -type qmsg() :: {name(), pid() | {atom(), pid()}, msg_id(), boolean(), mc:state()}. --type msg_id() :: non_neg_integer(). +-type msg_id() :: undefined | non_neg_integer() | {Priority :: non_neg_integer(), undefined | non_neg_integer()}. -type ok_or_errors() :: 'ok' | {'error', [{'error' | 'exit' | 'throw', any()}]}. -type absent_reason() :: 'nodedown' | 'crashed' | stopped | timeout. @@ -121,21 +118,6 @@ active, activity_status, arguments]). -define(KILL_QUEUE_DELAY_INTERVAL, 100). -warn_file_limit() -> - DurableQueues = find_recoverable_queues(), - L = length(DurableQueues), - - %% if there are not enough file handles, the server might hang - %% when trying to recover queues, warn the user: - case file_handle_cache:get_limit() < L of - true -> - rabbit_log:warning( - "Recovering ~tp queues, available file handles: ~tp. Please increase max open file handles limit to at least ~tp!", - [L, file_handle_cache:get_limit(), L]); - false -> - ok - end. - -spec recover(rabbit_types:vhost()) -> {Recovered :: [amqqueue:amqqueue()], Failed :: [amqqueue:amqqueue()]}. @@ -185,43 +167,42 @@ find_local_durable_queues(VHostName) -> rabbit_queue_type:is_recoverable(Q) end). -find_recoverable_queues() -> - rabbit_db_queue:filter_all_durable(fun(Q) -> - rabbit_queue_type:is_recoverable(Q) - end). - -spec declare(name(), boolean(), boolean(), rabbit_framing:amqp_table(), - rabbit_types:maybe(pid()), + rabbit_types:'maybe'(pid()), rabbit_types:username()) -> {'new' | 'existing' | 'owner_died', amqqueue:amqqueue()} | {'new', amqqueue:amqqueue(), rabbit_fifo_client:state()} | {'absent', amqqueue:amqqueue(), absent_reason()} | + {'error', Type :: atom(), Reason :: string(), Args :: term()} | {protocol_error, Type :: atom(), Reason :: string(), Args :: term()}. declare(QueueName, Durable, AutoDelete, Args, Owner, ActingUser) -> declare(QueueName, Durable, AutoDelete, Args, Owner, ActingUser, node()). -%% The Node argument suggests where the queue (leader if mirrored) -%% should be. Note that in some cases (e.g. with "nodes" policy in -%% effect) this might not be possible to satisfy. +%% The Node argument suggests where the queue leader replica +%% should be placed. Note that this function does not guarantee that +%% this suggestion will be satisfied. -spec declare(name(), boolean(), boolean(), rabbit_framing:amqp_table(), - rabbit_types:maybe(pid()), + rabbit_types:'maybe'(pid()), rabbit_types:username(), node() | {'ignore_location', node()}) -> {'new' | 'existing' | 'owner_died', amqqueue:amqqueue()} | {'absent', amqqueue:amqqueue(), absent_reason()} | + {'error', Type :: atom(), Reason :: string(), Args :: term()} | {protocol_error, Type :: atom(), Reason :: string(), Args :: term()}. declare(QueueName = #resource{virtual_host = VHost}, Durable, AutoDelete, Args, Owner, ActingUser, Node) -> - ok = check_declare_arguments(QueueName, Args), - Type = get_queue_type(Args), + %% note: this is a module name, not a shortcut such as <<"quorum">> + DQT = rabbit_vhost:default_queue_type(VHost, rabbit_queue_type:fallback()), + ok = check_declare_arguments(QueueName, Args, DQT), + Type = get_queue_type(Args, DQT), case rabbit_queue_type:is_enabled(Type) of true -> Q = amqqueue:new(QueueName, @@ -248,24 +229,53 @@ declare(QueueName = #resource{virtual_host = VHost}, Durable, AutoDelete, Args, [rabbit_misc:rs(QueueName), Type, Node]} end. +-spec get_queue_type(Args :: rabbit_framing:amqp_table()) -> rabbit_queue_type:queue_type(). +%% This version is not virtual host metadata-aware but will use +%% the node-wide default type as well as 'rabbit_queue_type:fallback/0'. +get_queue_type([]) -> + rabbit_queue_type:default(); get_queue_type(Args) -> + get_queue_type(Args, rabbit_queue_type:default()). + +%% This version should be used together with 'rabbit_vhost:default_queue_type/{1,2}' +get_queue_type([], DefaultQueueType) -> + rabbit_queue_type:discover(DefaultQueueType); +get_queue_type(Args, DefaultQueueType) -> case rabbit_misc:table_lookup(Args, <<"x-queue-type">>) of undefined -> - rabbit_queue_type:default(); + rabbit_queue_type:discover(DefaultQueueType); + {longstr, undefined} -> + rabbit_queue_type:discover(DefaultQueueType); + {longstr, <<"undefined">>} -> + rabbit_queue_type:discover(DefaultQueueType); {_, V} -> rabbit_queue_type:discover(V) end. --spec internal_declare(amqqueue:amqqueue(), boolean()) -> - {created | existing, amqqueue:amqqueue()} | queue_absent(). +-spec internal_declare(Queue, Recover) -> Ret when + Queue :: amqqueue:amqqueue(), + Recover :: boolean(), + Ret :: {created | existing, amqqueue:amqqueue()} | + queue_absent() | + rabbit_khepri:timeout_error(). internal_declare(Q, Recover) -> do_internal_declare(Q, Recover). do_internal_declare(Q0, true) -> Q = amqqueue:set_state(Q0, live), - ok = store_queue(Q), - {created, Q0}; + case store_queue(Q) of + ok -> + %% TODO Why do we return the old state instead of the actual one? + %% I'm leaving it like it was before the khepri refactor, because + %% rabbit_amqqueue_process:init_it2 compares the result of this + %% declare to decide if continue or stop. If we return the actual + %% one, it fails and the queue stops silently during init. + %% Maybe we should review this bit of code at some point. + {created, Q0}; + {error, timeout} = Err -> + Err + end; do_internal_declare(Q0, false) -> Q = rabbit_policy:set(amqqueue:set_state(Q0, live)), Queue = rabbit_queue_decorator:set(Q), @@ -278,21 +288,28 @@ do_internal_declare(Q0, false) -> update(Name, Fun) -> rabbit_db_queue:update(Name, Fun). -%% only really used for quorum queues to ensure the rabbit_queue record +-spec ensure_rabbit_queue_record_is_initialized(Queue) -> Ret when + Queue :: amqqueue:amqqueue(), + Ret :: ok | {error, timeout}. + +%% only really used for stream queues to ensure the rabbit_queue record %% is initialised ensure_rabbit_queue_record_is_initialized(Q) -> store_queue(Q). --spec store_queue(amqqueue:amqqueue()) -> 'ok'. +-spec store_queue(Queue) -> Ret when + Queue :: amqqueue:amqqueue(), + Ret :: ok | {error, timeout}. store_queue(Q0) -> Q = rabbit_queue_decorator:set(Q0), rabbit_db_queue:set(Q). --spec update_decorators(name()) -> 'ok'. +-spec update_decorators(name(), [Decorator]) -> 'ok' when + Decorator :: atom(). -update_decorators(Name) -> - rabbit_db_queue:update_decorators(Name). +update_decorators(Name, Decorators) -> + rabbit_db_queue:update_decorators(Name, Decorators). -spec policy_changed(amqqueue:amqqueue(), amqqueue:amqqueue()) -> 'ok'. @@ -300,7 +317,6 @@ update_decorators(Name) -> policy_changed(Q1, Q2) -> Decorators1 = amqqueue:get_decorators(Q1), Decorators2 = amqqueue:get_decorators(Q2), - rabbit_mirror_queue_misc:update_mirrors(Q1, Q2), D1 = rabbit_queue_decorator:select(Decorators1), D2 = rabbit_queue_decorator:select(Decorators2), [ok = M:policy_changed(Q1, Q2) || M <- lists:usort(D1 ++ D2)], @@ -323,12 +339,10 @@ is_server_named_allowed(Args) -> Type = get_queue_type(Args), rabbit_queue_type:is_server_named_allowed(Type). --spec lookup - (name()) -> - rabbit_types:ok(amqqueue:amqqueue()) | - rabbit_types:error('not_found'); - ([name()]) -> - [amqqueue:amqqueue()]. +-spec lookup(QueueName) -> Ret when + QueueName :: name(), + Ret :: rabbit_types:ok(amqqueue:amqqueue()) + | rabbit_types:error('not_found'). lookup(Name) when is_record(Name, resource) -> rabbit_db_queue:get(Name). @@ -384,7 +398,7 @@ get_rebalance_lock(Pid) when is_pid(Pid) -> false end. --spec rebalance('all' | 'quorum' | 'classic', binary(), binary()) -> +-spec rebalance('all' | 'quorum', binary(), binary()) -> {ok, [{node(), pos_integer()}]} | {error, term()}. rebalance(Type, VhostSpec, QueueSpec) -> %% We have not yet acquired the rebalance_queues global lock. @@ -395,7 +409,7 @@ maybe_rebalance({true, Id}, Type, VhostSpec, QueueSpec) -> [Type, VhostSpec, QueueSpec]), Running = rabbit_maintenance:filter_out_drained_nodes_consistent_read(rabbit_nodes:list_running()), NumRunning = length(Running), - ToRebalance = [Q || Q <- rabbit_amqqueue:list(), + ToRebalance = [Q || Q <- list(), filter_per_type(Type, Q), is_replicated(Q), is_match(amqqueue:get_vhost(Q), VhostSpec) andalso @@ -417,7 +431,7 @@ maybe_rebalance(false, _Type, _VhostSpec, _QueueSpec) -> %% Stream queues don't yet support rebalance filter_per_type(all, Q) -> - ?amqqueue_is_quorum(Q) or ?amqqueue_is_classic(Q) or ?amqqueue_is_stream(Q); + ?amqqueue_is_quorum(Q) or ?amqqueue_is_stream(Q); filter_per_type(quorum, Q) -> ?amqqueue_is_quorum(Q); filter_per_type(stream, Q) -> @@ -428,9 +442,7 @@ filter_per_type(classic, Q) -> rebalance_module(Q) when ?amqqueue_is_quorum(Q) -> rabbit_quorum_queue; rebalance_module(Q) when ?amqqueue_is_stream(Q) -> - rabbit_stream_queue; -rebalance_module(Q) when ?amqqueue_is_classic(Q) -> - rabbit_mirror_queue_misc. + rabbit_stream_queue. get_resource_name(#resource{name = Name}) -> Name. @@ -541,23 +553,15 @@ with(#resource{} = Name, F, E, RetriesLeft) -> %% Something bad happened to that queue, we are bailing out %% on processing current request. E({absent, Q, timeout}); - {ok, Q} when ?amqqueue_state_is(Q, stopped) andalso RetriesLeft =:= 0 -> - %% The queue was stopped and not migrated + {ok, Q} when ?amqqueue_state_is(Q, stopped) -> + %% The queue was stopped E({absent, Q, stopped}); %% The queue process has crashed with unknown error {ok, Q} when ?amqqueue_state_is(Q, crashed) -> E({absent, Q, crashed}); - %% The queue process has been stopped by a supervisor. - %% In that case a synchronised mirror can take over - %% so we should retry. - {ok, Q} when ?amqqueue_state_is(Q, stopped) -> - %% The queue process was stopped by the supervisor - rabbit_misc:with_exit_handler( - fun () -> retry_wait(Q, F, E, RetriesLeft) end, - fun () -> F(Q) end); %% The queue is supposed to be active. - %% The leader node can go away or queue can be killed - %% so we retry, waiting for a mirror to take over. + %% The node can go away or queue can be killed so we retry. + %% TODO review this: why to retry when mirroring is gone? {ok, Q} when ?amqqueue_state_is(Q, live) -> %% We check is_process_alive(QPid) in case we receive a %% nodedown (for example) in F() that has nothing to do @@ -581,27 +585,19 @@ with(#resource{} = Name, F, E, RetriesLeft) -> retry_wait(Q, F, E, RetriesLeft) -> Name = amqqueue:get_name(Q), QPid = amqqueue:get_pid(Q), - QState = amqqueue:get_state(Q), - case {QState, is_replicated(Q)} of - %% We don't want to repeat an operation if - %% there are no mirrors to migrate to - {stopped, false} -> - E({absent, Q, stopped}); - _ -> - case rabbit_process:is_process_alive(QPid) of - true -> - % rabbitmq-server#1682 - % The old check would have crashed here, - % instead, log it and run the exit fun. absent & alive is weird, - % but better than crashing with badmatch,true - rabbit_log:debug("Unexpected alive queue process ~tp", [QPid]), - E({absent, Q, alive}); - false -> - ok % Expected result - end, - timer:sleep(30), - with(Name, F, E, RetriesLeft - 1) - end. + case rabbit_process:is_process_alive(QPid) of + true -> + %% rabbitmq-server#1682 + %% The old check would have crashed here, + %% instead, log it and run the exit fun. absent & alive is weird, + %% but better than crashing with badmatch,true + rabbit_log:debug("Unexpected alive queue process ~tp", [QPid]), + E({absent, Q, alive}); + false -> + ok % Expected result + end, + timer:sleep(30), + with(Name, F, E, RetriesLeft - 1). -spec with(name(), qfun(A)) -> A | rabbit_types:error(not_found_or_absent()). @@ -670,7 +666,7 @@ priv_absent(QueueName, QPid, _IsDurable, alive) -> -spec assert_equivalence (amqqueue:amqqueue(), boolean(), boolean(), - rabbit_framing:amqp_table(), rabbit_types:maybe(pid())) -> + rabbit_framing:amqp_table(), rabbit_types:'maybe'(pid())) -> 'ok' | rabbit_types:channel_exit() | rabbit_types:connection_exit(). assert_equivalence(Q, DurableDeclare, AutoDeleteDeclare, Args1, Owner) -> @@ -745,7 +741,7 @@ augment_declare_args(VHost, Durable, Exclusive, AutoDelete, Args0) -> case IsPermitted andalso IsCompatible of true -> %% patch up declare arguments with x-queue-type if there - %% is a vhost default set the queue is druable and not exclusive + %% is a vhost default set the queue is durable and not exclusive %% and there is no queue type argument %% present rabbit_misc:set_table_value(Args0, @@ -753,7 +749,12 @@ augment_declare_args(VHost, Durable, Exclusive, AutoDelete, Args0) -> longstr, DefaultQueueType); false -> - Args0 + %% if the properties are incompatible with the declared + %% DQT, use the fall back type + rabbit_misc:set_table_value(Args0, + <<"x-queue-type">>, + longstr, + rabbit_queue_type:short_alias_of(rabbit_queue_type:fallback())) end; _ -> Args0 @@ -780,11 +781,13 @@ check_exclusive_access(Q, _ReaderPid, _MatchType) -> [rabbit_misc:rs(QueueName)]). -spec with_exclusive_access_or_die(name(), pid(), qfun(A)) -> - A | rabbit_types:channel_exit(). - + A | rabbit_types:channel_exit(). with_exclusive_access_or_die(Name, ReaderPid, F) -> with_or_die(Name, - fun (Q) -> check_exclusive_access(Q, ReaderPid), F(Q) end). + fun (Q) -> + check_exclusive_access(Q, ReaderPid), + F(Q) + end). assert_args_equivalence(Q, NewArgs) -> ExistingArgs = amqqueue:get_arguments(Q), @@ -793,7 +796,33 @@ assert_args_equivalence(Q, NewArgs) -> QueueTypeArgs = rabbit_queue_type:arguments(queue_arguments, Type), rabbit_misc:assert_args_equivalence(ExistingArgs, NewArgs, QueueName, QueueTypeArgs). -check_declare_arguments(QueueName, Args) -> +-spec maybe_inject_default_queue_type_shortcut_into_args( + rabbit_framing:amqp_table(), rabbit_queue_type:queue_type()) -> rabbit_framing:amqp_table(). +maybe_inject_default_queue_type_shortcut_into_args(Args0, DefaultQueueType) -> + case rabbit_misc:table_lookup(Args0, <<"x-queue-type">>) of + undefined -> + inject_default_queue_type_shortcut_into_args(Args0, DefaultQueueType); + {longstr, undefined} -> + %% Important: use a shortcut such as 'quorum' or 'stream' that for the given queue type module + inject_default_queue_type_shortcut_into_args(Args0, DefaultQueueType); + {longstr, <<"undefined">>} -> + %% Important: use a shortcut such as 'quorum' or 'stream' that for the given queue type module + inject_default_queue_type_shortcut_into_args(Args0, DefaultQueueType); + _ValueIsAlreadySet -> + Args0 + end. + +-spec inject_default_queue_type_shortcut_into_args( + rabbit_framing:amqp_table(), rabbit_queue_type:queue_type()) -> rabbit_framing:amqp_table(). +inject_default_queue_type_shortcut_into_args(Args0, QueueType) -> + Shortcut = rabbit_queue_type:short_alias_of(QueueType), + NewVal = rabbit_data_coercion:to_binary(Shortcut), + rabbit_misc:set_table_value(Args0, <<"x-queue-type">>, longstr, NewVal). + +check_declare_arguments(QueueName, Args0, DefaultQueueType) -> + %% If the x-queue-type was not provided by the client, inject the + %% (virtual host, global or fallback) default before performing validation. MK. + Args = maybe_inject_default_queue_type_shortcut_into_args(Args0, DefaultQueueType), check_arguments_type_and_value(QueueName, Args, [{<<"x-queue-type">>, fun check_queue_type/2}]), Type = get_queue_type(Args), QueueTypeArgs = rabbit_queue_type:arguments(queue_arguments, Type), @@ -861,7 +890,6 @@ declare_args() -> {<<"x-queue-leader-locator">>, fun check_queue_leader_locator_arg/2}]. consume_args() -> [{<<"x-priority">>, fun check_int_arg/2}, - {<<"x-cancel-on-ha-failover">>, fun check_bool_arg/2}, {<<"x-stream-offset">>, fun check_stream_offset_arg/2}]. check_int_arg({Type, _}, _) -> @@ -1110,6 +1138,8 @@ check_queue_type(Val, _Args) when is_binary(Val) -> true -> ok; false -> {error, rabbit_misc:format("unsupported queue type '~ts'", [Val])} end; +check_queue_type(Val, Args) when is_atom(Val) -> + check_queue_type(rabbit_data_coercion:to_binary(Val), Args); check_queue_type(_Val, _Args) -> {error, invalid_queue_type}. @@ -1129,7 +1159,7 @@ list() -> count() -> rabbit_db_queue:count(). --spec list_names() -> [rabbit_amqqueue:name()]. +-spec list_names() -> [name()]. list_names() -> rabbit_db_queue:list(). @@ -1160,7 +1190,7 @@ is_down(Q) -> -spec sample_local_queues() -> [amqqueue:amqqueue()]. sample_local_queues() -> sample_n_by_name(list_local_names(), 300). --spec sample_n_by_name([rabbit_amqqueue:name()], pos_integer()) -> [amqqueue:amqqueue()]. +-spec sample_n_by_name([name()], pos_integer()) -> [amqqueue:amqqueue()]. sample_n_by_name([], _N) -> []; sample_n_by_name(Names, N) when is_list(Names) andalso is_integer(N) andalso N > 0 -> @@ -1174,7 +1204,7 @@ sample_n_by_name(Names, N) when is_list(Names) andalso is_integer(N) andalso N > end, [], lists:seq(1, M)), lists:map(fun (Id) -> - {ok, Q} = rabbit_amqqueue:lookup(Id), + {ok, Q} = lookup(Id), Q end, lists:usort(Ids)). @@ -1197,7 +1227,7 @@ list_by_type(stream) -> list_by_type(rabbit_stream_queue); list_by_type(Type) -> rabbit_db_queue:get_all_durable_by_type(Type). --spec list_local_quorum_queue_names() -> [rabbit_amqqueue:name()]. +-spec list_local_quorum_queue_names() -> [name()]. list_local_quorum_queue_names() -> [ amqqueue:get_name(Q) || Q <- list_by_type(quorum), @@ -1212,9 +1242,12 @@ list_local_quorum_queues() -> -spec list_local_stream_queues() -> [amqqueue:amqqueue()]. list_local_stream_queues() -> - [ Q || Q <- list_by_type(stream), - amqqueue:get_state(Q) =/= crashed, - lists:member(node(), get_quorum_nodes(Q))]. + list_stream_queues_on(node()). + +-spec list_stream_queues_on(node()) -> [amqqueue:amqqueue()]. +list_stream_queues_on(Node) when is_atom(Node) -> + [Q || Q <- list_by_type(rabbit_stream_queue), + lists:member(Node, get_quorum_nodes(Q))]. -spec list_local_leaders() -> [amqqueue:amqqueue()]. list_local_leaders() -> @@ -1227,53 +1260,11 @@ list_local_followers() -> [Q || Q <- list(), amqqueue:is_quorum(Q), - amqqueue:get_state(Q) =/= crashed, amqqueue:get_leader(Q) =/= node(), + lists:member(node(), get_quorum_nodes(Q)), rabbit_quorum_queue:is_recoverable(Q) ]. --spec list_local_mirrored_classic_queues() -> [amqqueue:amqqueue()]. -list_local_mirrored_classic_queues() -> - [ Q || Q <- list(), - amqqueue:get_state(Q) =/= crashed, - amqqueue:is_classic(Q), - is_local_to_node(amqqueue:get_pid(Q), node()), - is_replicated(Q)]. - --spec list_local_mirrored_classic_names() -> [rabbit_amqqueue:name()]. -list_local_mirrored_classic_names() -> - [ amqqueue:get_name(Q) || Q <- list(), - amqqueue:get_state(Q) =/= crashed, - amqqueue:is_classic(Q), - is_local_to_node(amqqueue:get_pid(Q), node()), - is_replicated(Q)]. - --spec list_local_mirrored_classic_without_synchronised_mirrors() -> - [amqqueue:amqqueue()]. -list_local_mirrored_classic_without_synchronised_mirrors() -> - [ Q || Q <- list(), - amqqueue:get_state(Q) =/= crashed, - amqqueue:is_classic(Q), - %% filter out exclusive queues as they won't actually be mirrored - is_not_exclusive(Q), - is_local_to_node(amqqueue:get_pid(Q), node()), - is_replicated(Q), - not has_synchronised_mirrors_online(Q)]. - --spec list_local_mirrored_classic_without_synchronised_mirrors_for_cli() -> - [#{binary => any()}]. -list_local_mirrored_classic_without_synchronised_mirrors_for_cli() -> - ClassicQs = list_local_mirrored_classic_without_synchronised_mirrors(), - [begin - #resource{name = Name} = amqqueue:get_name(Q), - #{ - <<"readable_name">> => rabbit_data_coercion:to_binary(rabbit_misc:rs(amqqueue:get_name(Q))), - <<"name">> => Name, - <<"virtual_host">> => amqqueue:get_vhost(Q), - <<"type">> => <<"classic">> - } - end || Q <- ClassicQs]. - -spec list_local_quorum_queues_with_name_matching(binary()) -> [amqqueue:amqqueue()]. list_local_quorum_queues_with_name_matching(Pattern) -> [ Q || Q <- list_by_type(quorum), @@ -1306,22 +1297,35 @@ is_in_virtual_host(Q, VHostName) -> -spec list(vhost:name()) -> [amqqueue:amqqueue()]. list(VHostPath) -> + list(VHostPath, rabbit_nodes:list_running()). + +list(VHostPath, NodesRunning) -> All = rabbit_db_queue:get_all(VHostPath), - NodesRunning = rabbit_nodes:list_running(), lists:filter(fun (Q) -> Pid = amqqueue:get_pid(Q), St = amqqueue:get_state(Q), - St =/= stopped orelse is_local_to_node_set(Pid, NodesRunning) + St =/= stopped orelse + is_local_to_node_set(Pid, NodesRunning) end, All). --spec list_down(rabbit_types:vhost()) -> [amqqueue:amqqueue()]. +-spec list_all(vhost:name()) -> [amqqueue:amqqueue()]. +list_all(VHostPath) -> + rabbit_db_queue:get_all(VHostPath). +-spec list_down(rabbit_types:vhost()) -> + [amqqueue:amqqueue()]. list_down(VHostPath) -> + list_down(VHostPath, rabbit_nodes:list_running()). + +-spec list_down(rabbit_types:vhost(), NodesRunning :: [node()]) -> + [amqqueue:amqqueue()]. +list_down(VHostPath, NodesRunning) -> case rabbit_vhost:exists(VHostPath) of false -> []; true -> - Alive = sets:from_list([amqqueue:get_name(Q) || Q <- list(VHostPath)]), - NodesRunning = rabbit_nodes:list_running(), + Alive = sets:from_list([amqqueue:get_name(Q) || + Q <- list(VHostPath, NodesRunning)], + [{version, 2}]), rabbit_db_queue:filter_all_durable( fun (Q) -> N = amqqueue:get_name(Q), @@ -1329,7 +1333,8 @@ list_down(VHostPath) -> St = amqqueue:get_state(Q), amqqueue:get_vhost(Q) =:= VHostPath andalso - ((St =:= stopped andalso not is_local_to_node_set(Pid, NodesRunning)) + ((St =:= stopped andalso + not is_local_to_node_set(Pid, NodesRunning)) orelse (not sets:is_element(N, Alive))) end) @@ -1381,10 +1386,20 @@ is_unresponsive(Q, Timeout) when ?amqqueue_is_stream(Q) -> catch exit:{timeout, _} -> true + end; +is_unresponsive(Q, Timeout) when ?amqqueue_is_mqtt_qos0(Q) -> + QPid = amqqueue:get_pid(Q), + try + delegate:invoke(QPid, {gen_server2, call, [{info, [name]}, Timeout]}), + false + catch + %% TODO catch any exit?? + exit:{timeout, _} -> + true end. -format(Q) when ?amqqueue_is_quorum(Q) -> rabbit_quorum_queue:format(Q); -format(Q) -> rabbit_amqqueue_process:format(Q). +format(Q) -> + rabbit_queue_type:format(Q, #{}). -spec info(amqqueue:amqqueue()) -> rabbit_types:infos(). @@ -1421,13 +1436,15 @@ emit_info_local(VHostPath, Items, Ref, AggregatorPid) -> AggregatorPid, Ref, fun(Q) -> info(Q, Items) end, list_local(VHostPath)). emit_info_all(Nodes, VHostPath, Items, Ref, AggregatorPid) -> - Pids = [ spawn_link(Node, rabbit_amqqueue, emit_info_local, [VHostPath, Items, Ref, AggregatorPid]) || Node <- Nodes ], + Pids = [spawn_link(Node, rabbit_amqqueue, emit_info_local, + [VHostPath, Items, Ref, AggregatorPid]) || Node <- Nodes], rabbit_control_misc:await_emitters_termination(Pids). collect_info_all(VHostPath, Items) -> Nodes = rabbit_nodes:list_running(), Ref = make_ref(), - Pids = [ spawn_link(Node, rabbit_amqqueue, emit_info_local, [VHostPath, Items, Ref, self()]) || Node <- Nodes ], + Pids = [spawn_link(Node, rabbit_amqqueue, emit_info_local, + [VHostPath, Items, Ref, self()]) || Node <- Nodes], rabbit_control_misc:await_emitters_termination(Pids), wait_for_queues(Ref, length(Pids), []). @@ -1468,7 +1485,8 @@ info_local(VHostPath) -> list_local(VHostPath) -> [Q || Q <- list(VHostPath), - amqqueue:get_state(Q) =/= crashed, is_local_to_node(amqqueue:get_pid(Q), node())]. + amqqueue:get_state(Q) =/= crashed, + is_local_to_node(amqqueue:get_pid(Q), node())]. -spec force_event_refresh(reference()) -> 'ok'. @@ -1642,7 +1660,7 @@ delete_with(QueueName, ConnPid, IfUnused, IfEmpty, Username, CheckExclusive) whe {error, not_empty} -> rabbit_misc:precondition_failed("~ts not empty", [rabbit_misc:rs(QueueName)]); {error, {exit, _, _}} -> - %% rabbit_amqqueue:delete()/delegate:invoke might return {error, {exit, _, _}} + %% delete()/delegate:invoke might return {error, {exit, _, _}} {ok, 0}; {ok, Count} -> {ok, Count}; @@ -1702,15 +1720,6 @@ deactivate_limit_all(QRefs, ChPid) -> delegate:invoke_no_result(QPids, {gen_server2, cast, [{deactivate_limit, ChPid}]}). --spec credit(amqqueue:amqqueue(), - rabbit_types:ctag(), - non_neg_integer(), - boolean(), - rabbit_queue_type:state()) -> - {ok, rabbit_queue_type:state(), rabbit_queue_type:actions()}. -credit(Q, CTag, Credit, Drain, QStates) -> - rabbit_queue_type:credit(Q, CTag, Credit, Drain, QStates). - -spec basic_get(amqqueue:amqqueue(), boolean(), pid(), rabbit_types:ctag(), rabbit_queue_type:state()) -> {'ok', non_neg_integer(), qmsg(), rabbit_queue_type:state()} | @@ -1737,7 +1746,7 @@ basic_consume(Q, NoAck, ChPid, LimiterPid, channel_pid => ChPid, limiter_pid => LimiterPid, limiter_active => LimiterActive, - prefetch_count => ConsumerPrefetchCount, + mode => {simple_prefetch, ConsumerPrefetchCount}, consumer_tag => ConsumerTag, exclusive_consume => ExclusiveConsume, args => Args, @@ -1745,14 +1754,6 @@ basic_consume(Q, NoAck, ChPid, LimiterPid, acting_user => ActingUser}, rabbit_queue_type:consume(Q, Spec, QStates). --spec basic_cancel(amqqueue:amqqueue(), rabbit_types:ctag(), any(), - rabbit_types:username(), - rabbit_queue_type:state()) -> - {ok, rabbit_queue_type:state()} | {error, term()}. -basic_cancel(Q, ConsumerTag, OkMsg, ActingUser, QStates) -> - rabbit_queue_type:cancel(Q, ConsumerTag, - OkMsg, ActingUser, QStates). - -spec notify_decorators(amqqueue:amqqueue()) -> 'ok'. notify_decorators(Q) -> @@ -1791,66 +1792,28 @@ internal_delete(Queue, ActingUser, Reason) -> -spec forget_all_durable(node()) -> 'ok'. +%% TODO this is used by `rabbit_mnesia:remove_node_if_mnesia_running` +%% Does it make any sense once mnesia is not used/removed? forget_all_durable(Node) -> UpdateFun = fun(Q) -> - forget_node_for_queue(Node, Q) + forget_node_for_queue(Q) end, FilterFun = fun(Q) -> is_local_to_node(amqqueue:get_pid(Q), Node) end, rabbit_db_queue:foreach_durable(UpdateFun, FilterFun). -%% Try to promote a mirror while down - it should recover as a -%% leader. We try to take the oldest mirror here for best chance of -%% recovery. -forget_node_for_queue(_DeadNode, Q) +forget_node_for_queue(Q) when ?amqqueue_is_quorum(Q) -> ok; -forget_node_for_queue(_DeadNode, Q) +forget_node_for_queue(Q) when ?amqqueue_is_stream(Q) -> ok; -forget_node_for_queue(DeadNode, Q) -> - RS = amqqueue:get_recoverable_slaves(Q), - forget_node_for_queue(DeadNode, RS, Q). - -forget_node_for_queue(_DeadNode, [], Q) -> - %% No mirrors to recover from, queue is gone. +forget_node_for_queue(Q) -> %% Don't process_deletions since that just calls callbacks and we %% are not really up. Name = amqqueue:get_name(Q), - rabbit_db_queue:internal_delete(Name, true, normal); - -%% Should not happen, but let's be conservative. -forget_node_for_queue(DeadNode, [DeadNode | T], Q) -> - forget_node_for_queue(DeadNode, T, Q); - -forget_node_for_queue(DeadNode, [H|T], Q) when ?is_amqqueue(Q) -> - Type = amqqueue:get_type(Q), - case {node_permits_offline_promotion(H), Type} of - {false, _} -> forget_node_for_queue(DeadNode, T, Q); - {true, rabbit_classic_queue} -> - Q1 = amqqueue:set_pid(Q, rabbit_misc:node_to_fake_pid(H)), - %% rabbit_db_queue:set_many/1 just stores a durable queue record, - %% that is the only one required here. - %% rabbit_db_queue:set/1 writes both durable and transient, thus - %% can't be used for this operation. - ok = rabbit_db_queue:set_many([Q1]); - {true, rabbit_quorum_queue} -> - ok - end. - -node_permits_offline_promotion(Node) -> - case node() of - Node -> not rabbit:is_running(); %% [1] - _ -> NotRunning = rabbit_nodes:list_not_running(), - lists:member(Node, NotRunning) %% [2] - end. -%% [1] In this case if we are a real running node (i.e. rabbitmqctl -%% has RPCed into us) then we cannot allow promotion. If on the other -%% hand we *are* rabbitmqctl impersonating the node for offline -%% node-forgetting then we can. -%% -%% [2] This is simpler; as long as it's down that's OK + rabbit_db_queue:internal_delete(Name, true, normal). -spec run_backing_queue (pid(), atom(), (fun ((atom(), A) -> {[rabbit_types:msg_id()], A}))) -> @@ -1859,43 +1822,10 @@ node_permits_offline_promotion(Node) -> run_backing_queue(QPid, Mod, Fun) -> gen_server2:cast(QPid, {run_backing_queue, Mod, Fun}). --spec set_ram_duration_target(pid(), number() | 'infinity') -> 'ok'. - -set_ram_duration_target(QPid, Duration) -> - gen_server2:cast(QPid, {set_ram_duration_target, Duration}). - --spec set_maximum_since_use(pid(), non_neg_integer()) -> 'ok'. - -set_maximum_since_use(QPid, Age) -> - gen_server2:cast(QPid, {set_maximum_since_use, Age}). - --spec update_mirroring(pid()) -> 'ok'. - -update_mirroring(QPid) -> - ok = delegate:invoke_no_result(QPid, {gen_server2, cast, [update_mirroring]}). - --spec sync_mirrors(amqqueue:amqqueue() | pid()) -> - 'ok' | rabbit_types:error('not_mirrored'). - -sync_mirrors(Q) when ?is_amqqueue(Q) -> - QPid = amqqueue:get_pid(Q), - delegate:invoke(QPid, {gen_server2, call, [sync_mirrors, infinity]}); -sync_mirrors(QPid) -> - delegate:invoke(QPid, {gen_server2, call, [sync_mirrors, infinity]}). - --spec cancel_sync_mirrors(amqqueue:amqqueue() | pid()) -> - 'ok' | {'ok', 'not_syncing'}. - -cancel_sync_mirrors(Q) when ?is_amqqueue(Q) -> - QPid = amqqueue:get_pid(Q), - delegate:invoke(QPid, {gen_server2, call, [cancel_sync_mirrors, infinity]}); -cancel_sync_mirrors(QPid) -> - delegate:invoke(QPid, {gen_server2, call, [cancel_sync_mirrors, infinity]}). - -spec is_replicated(amqqueue:amqqueue()) -> boolean(). is_replicated(Q) when ?amqqueue_is_classic(Q) -> - rabbit_mirror_queue_misc:is_mirrored(Q); + false; is_replicated(_Q) -> %% streams and quorum queues are all replicated true. @@ -1914,61 +1844,48 @@ is_dead_exclusive(Q) when ?amqqueue_exclusive_owner_is_pid(Q) -> Pid = amqqueue:get_pid(Q), not rabbit_process:is_process_alive(Pid). --spec has_synchronised_mirrors_online(amqqueue:amqqueue()) -> boolean(). -has_synchronised_mirrors_online(Q) -> - %% a queue with all mirrors down would have no mirror pids. - %% We treat these as in sync intentionally to avoid false positives. - MirrorPids = amqqueue:get_sync_slave_pids(Q), - MirrorPids =/= [] andalso lists:any(fun rabbit_misc:is_process_alive/1, MirrorPids). - -spec on_node_up(node()) -> 'ok'. -on_node_up(Node) -> - rabbit_db_queue:foreach_transient(maybe_clear_recoverable_node(Node)). - -maybe_clear_recoverable_node(Node) -> - fun(Q) -> - SPids = amqqueue:get_sync_slave_pids(Q), - RSs = amqqueue:get_recoverable_slaves(Q), - case lists:member(Node, RSs) of - true -> - %% There is a race with - %% rabbit_mirror_queue_slave:record_synchronised/1 called - %% by the incoming mirror node and this function, called - %% by the leader node. If this function is executed after - %% record_synchronised/1, the node is erroneously removed - %% from the recoverable mirror list. - %% - %% We check if the mirror node's queue PID is alive. If it is - %% the case, then this function is executed after. In this - %% situation, we don't touch the queue record, it is already - %% correct. - DoClearNode = - case [SP || SP <- SPids, node(SP) =:= Node] of - [SPid] -> not rabbit_misc:is_process_alive(SPid); - _ -> true - end, - if - DoClearNode -> RSs1 = RSs -- [Node], - store_queue( - amqqueue:set_recoverable_slaves(Q, RSs1)); - true -> ok - end; - false -> - ok - end - end. +on_node_up(_Node) -> + ok. -spec on_node_down(node()) -> 'ok'. on_node_down(Node) -> + case delete_transient_queues_on_node(Node) of + ok -> + ok; + {error, timeout} -> + %% This case is possible when running Khepri. The node going down + %% could leave the cluster in a minority so the command to delete + %% the transient queue records would fail. Also see + %% `rabbit_khepri:init/0': we also try this deletion when the node + %% restarts - a time that the cluster is very likely to have a + %% majority - to ensure these records are deleted. + rabbit_log:warning("transient queues for node '~ts' could not be " + "deleted because of a timeout. These queues " + "will be removed when node '~ts' restarts or " + "is removed from the cluster.", [Node, Node]), + ok + end. + +-spec delete_transient_queues_on_node(Node) -> Ret when + Node :: node(), + Ret :: ok | rabbit_khepri:timeout_error(). + +delete_transient_queues_on_node(Node) -> {Time, Ret} = timer:tc(fun() -> rabbit_db_queue:delete_transient(filter_transient_queues_to_delete(Node)) end), case Ret of - ok -> ok; - {QueueNames, Deletions} -> + ok -> + ok; + {error, timeout} = Err -> + Err; + {QueueNames, Deletions} when is_list(QueueNames) -> case length(QueueNames) of 0 -> ok; - _ -> rabbit_log:info("~tp transient queues from an old incarnation of node ~tp deleted in ~fs", [length(QueueNames), Node, Time/1000000]) + N -> rabbit_log:info("~b transient queues from node '~ts' " + "deleted in ~fs", + [N, Node, Time / 1_000_000]) end, notify_queue_binding_deletions(Deletions), rabbit_core_metrics:queues_deleted(QueueNames), @@ -1981,8 +1898,9 @@ filter_transient_queues_to_delete(Node) -> amqqueue:qnode(Q) == Node andalso not rabbit_process:is_process_alive(amqqueue:get_pid(Q)) andalso (not amqqueue:is_classic(Q) orelse not amqqueue:is_durable(Q)) - andalso (not rabbit_amqqueue:is_replicated(Q) - orelse rabbit_amqqueue:is_dead_exclusive(Q)) + andalso (not is_replicated(Q) + orelse is_dead_exclusive(Q)) + andalso amqqueue:get_type(Q) =/= rabbit_mqtt_qos0_queue end. notify_queue_binding_deletions(QueueDeletions) when is_list(QueueDeletions) -> @@ -2000,7 +1918,7 @@ notify_transient_queues_deleted(QueueDeletions) -> fun(Queue) -> ok = rabbit_event:notify(queue_deleted, [{name, Queue}, - {kind, rabbit_classic_queue}, + {type, rabbit_classic_queue}, {user, ?INTERNAL_USER}]) end, QueueDeletions). @@ -2096,7 +2014,7 @@ queue_names(Queues) get_bcc_queue(Q, BCCName) -> #resource{virtual_host = VHost} = amqqueue:get_name(Q), BCCQueueName = rabbit_misc:r(VHost, queue, BCCName), - rabbit_amqqueue:lookup(BCCQueueName). + lookup(BCCQueueName). is_queue_args_combination_permitted(Q) -> Durable = amqqueue:is_durable(Q), diff --git a/deps/rabbit/src/rabbit_amqqueue_control.erl b/deps/rabbit/src/rabbit_amqqueue_control.erl index d38e878d85bb..9626a7408566 100644 --- a/deps/rabbit/src/rabbit_amqqueue_control.erl +++ b/deps/rabbit/src/rabbit_amqqueue_control.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_amqqueue_control). diff --git a/deps/rabbit/src/rabbit_amqqueue_process.erl b/deps/rabbit/src/rabbit_amqqueue_process.erl index 61df69e53cc2..ed4fc3ccaf78 100644 --- a/deps/rabbit/src/rabbit_amqqueue_process.erl +++ b/deps/rabbit/src/rabbit_amqqueue_process.erl @@ -2,24 +2,24 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_amqqueue_process). -include_lib("rabbit_common/include/rabbit.hrl"). --include_lib("rabbit_common/include/rabbit_framing.hrl"). -include("amqqueue.hrl"). -behaviour(gen_server2). --define(SYNC_INTERVAL, 200). %% milliseconds --define(RAM_DURATION_UPDATE_INTERVAL, 5000). --define(CONSUMER_BIAS_RATIO, 2.0). %% i.e. consume 100% faster +-define(SYNC_INTERVAL, 200). %% milliseconds +-define(UPDATE_RATES_INTERVAL, 5000). +-define(CONSUMER_BIAS_RATIO, 2.0). %% i.e. consume 100% faster -export([info_keys/0]). -export([init_with_backing_queue_state/7]). +-export([start_link/2]). -export([init/1, terminate/2, code_change/3, handle_call/3, handle_cast/2, handle_info/2, handle_pre_hibernate/1, prioritise_call/4, prioritise_cast/3, prioritise_info/3, format_message_queue/2]). @@ -36,8 +36,7 @@ %% This is used to determine when to delete auto-delete queues. has_had_consumers, %% backing queue module. - %% for mirrored queues, this will be rabbit_mirror_queue_master. - %% for non-priority and non-mirrored queues, rabbit_variable_queue. + %% for non-priority queues, this will be rabbit_variable_queue. %% see rabbit_backing_queue. backing_queue, %% backing queue state. @@ -49,7 +48,7 @@ expires, %% timer used to periodically sync (flush) queue index sync_timer_ref, - %% timer used to update ingress/egress rates and queue RAM duration target + %% timer used to update ingress/egress rates rate_timer_ref, %% timer used to clean up this queue due to TTL (on when unused) expiry_timer_ref, @@ -82,14 +81,10 @@ %% e.g. message expiration messages from previously set up timers %% that may or may not be still valid args_policy_version, - %% used to discard outdated/superseded policy updates, - %% e.g. when policies are applied concurrently. See - %% https://github.com/rabbitmq/rabbitmq-server/issues/803 for one - %% example. - mirroring_policy_version = 0, + mirroring_policy_version = 0, %% reserved %% running | flow | idle status, - %% true | false + %% boolean() single_active_consumer_on }). @@ -112,9 +107,6 @@ consumer_utilisation, consumer_capacity, memory, - slave_pids, - synchronised_slave_pids, - recoverable_slaves, state, garbage_collection ]). @@ -141,6 +133,26 @@ statistics_keys() -> ?STATISTICS_KEYS ++ rabbit_backing_queue:info_keys(). %%---------------------------------------------------------------------------- +-spec start_link(amqqueue:amqqueue(), pid()) + -> rabbit_types:ok_pid_or_error(). + +start_link(Q, Marker) -> + gen_server2:start_link(?MODULE, {Q, Marker}, []). + +init({Q, Marker}) -> + case is_process_alive(Marker) of + true -> + %% start + init(Q); + false -> + %% restart + QueueName = amqqueue:get_name(Q), + {ok, Q1} = rabbit_amqqueue:lookup(QueueName), + rabbit_log:error("Restarting crashed ~ts.", [rabbit_misc:rs(QueueName)]), + gen_server2:cast(self(), init), + init(Q1) + end; + init(Q) -> process_flag(trap_exit, true), ?store_proc_name(amqqueue:get_name(Q)), @@ -178,7 +190,7 @@ init_it(Recover, From, State = #q{q = Q0}) -> #q{backing_queue = undefined, backing_queue_state = undefined, q = Q} = State, - BQ = backing_queue_module(Q), + BQ = backing_queue_module(), {_, Terms} = recovery_status(Recover), BQS = bq_init(BQ, Q, Terms), %% Rely on terminate to delete the queue. @@ -198,12 +210,7 @@ init_it2(Recover, From, State = #q{q = Q, (Res == created orelse Res == existing) -> case matches(Recover, Q, Q1) of true -> - ok = file_handle_cache:register_callback( - rabbit_amqqueue, set_maximum_since_use, [self()]), - ok = rabbit_memory_monitor:register( - self(), {rabbit_amqqueue, - set_ram_duration_target, [self()]}), - BQ = backing_queue_module(Q1), + BQ = backing_queue_module(), BQS = bq_init(BQ, Q, TermsOrNew), send_reply(From, {new, Q}), recovery_barrier(Barrier), @@ -212,13 +219,19 @@ init_it2(Recover, From, State = #q{q = Q, backing_queue_state = BQS}), notify_decorators(startup, State), rabbit_event:notify(queue_created, - infos(?CREATION_EVENT_KEYS, State1)), + queue_created_infos(State1)), rabbit_event:if_enabled(State1, #q.stats_timer, fun() -> emit_stats(State1) end), noreply(State1); false -> {stop, normal, {existing, Q1}, State} end; + {error, timeout} -> + Reason = {protocol_error, internal_error, + "Could not declare ~ts on node '~ts' because the " + "metadata store operation timed out", + [rabbit_misc:rs(amqqueue:get_name(Q)), node()]}, + {stop, normal, Reason, State}; Err -> {stop, normal, Err, State} end. @@ -236,8 +249,7 @@ matches(new, Q1, Q2) -> amqqueue:is_auto_delete(Q1) =:= amqqueue:is_auto_delete(Q2) andalso amqqueue:get_exclusive_owner(Q1) =:= amqqueue:get_exclusive_owner(Q2) andalso amqqueue:get_arguments(Q1) =:= amqqueue:get_arguments(Q2) andalso - amqqueue:get_pid(Q1) =:= amqqueue:get_pid(Q2) andalso - amqqueue:get_slave_pids(Q1) =:= amqqueue:get_slave_pids(Q2); + amqqueue:get_pid(Q1) =:= amqqueue:get_pid(Q2); %% FIXME: Should v1 vs. v2 of the same record match? matches(_, Q, Q) -> true; matches(_, _Q, _Q1) -> false. @@ -305,7 +317,7 @@ terminate(normal, State) -> %% delete case terminate(_Reason, State = #q{q = Q}) -> terminate_shutdown(fun (BQS) -> Q2 = amqqueue:set_state(Q, crashed), - rabbit_amqqueue:store_queue(Q2), + _ = rabbit_amqqueue:store_queue(Q2), BQS end, State). @@ -353,8 +365,7 @@ terminate_shutdown(Fun, #q{status = Status} = State) -> fun stop_ttl_timer/1]), case BQS of undefined -> State1; - _ -> ok = rabbit_memory_monitor:deregister(self()), - QName = qname(State), + _ -> QName = qname(State), notify_decorators(shutdown, State), [emit_consumer_deleted(Ch, CTag, QName, ActingUser) || {Ch, CTag, _, _, _, _, _, _} <- @@ -370,6 +381,13 @@ code_change(_OldVsn, State, _Extra) -> maybe_notify_decorators(false, State) -> State; maybe_notify_decorators(true, State) -> notify_decorators(State), State. +notify_decorators_if_became_empty(WasEmpty, State) -> + case (not WasEmpty) andalso is_empty(State) of + true -> notify_decorators(State); + false -> ok + end, + State. + notify_decorators(Event, State) -> _ = decorator_callback(qname(State), Event, []), ok. @@ -470,12 +488,8 @@ init_queue_mode(Mode, State = #q {backing_queue = BQ, init_queue_version(Version0, State = #q {backing_queue = BQ, backing_queue_state = BQS}) -> - %% When the version is undefined we use the default version 2. - %% We want to BQ:set_queue_version in all cases because a v2 - %% policy might have been deleted, for example, and we want - %% the queue to go back to v1. Version = case Version0 of - undefined -> rabbit_misc:get_env(rabbit, classic_queue_default_version, 2); + undefined -> 2; _ -> Version0 end, BQS1 = BQ:set_queue_version(Version, BQS), @@ -503,12 +517,9 @@ next_state(State = #q{q = Q, timed -> {ensure_sync_timer(State1), 0 } end. -backing_queue_module(Q) -> - case rabbit_mirror_queue_misc:is_mirrored(Q) of - false -> {ok, BQM} = application:get_env(backing_queue_module), - BQM; - true -> rabbit_mirror_queue_master - end. +backing_queue_module() -> + {ok, BQM} = application:get_env(backing_queue_module), + BQM. ensure_sync_timer(State) -> rabbit_misc:ensure_timer(State, #q.sync_timer_ref, @@ -518,8 +529,8 @@ stop_sync_timer(State) -> rabbit_misc:stop_timer(State, #q.sync_timer_ref). ensure_rate_timer(State) -> rabbit_misc:ensure_timer(State, #q.rate_timer_ref, - ?RAM_DURATION_UPDATE_INTERVAL, - update_ram_duration). + ?UPDATE_RATES_INTERVAL, + update_rates). stop_rate_timer(State) -> rabbit_misc:stop_timer(State, #q.rate_timer_ref). @@ -570,14 +581,6 @@ assert_invariant(State = #q{consumers = Consumers, single_active_consumer_on = f is_empty(#q{backing_queue = BQ, backing_queue_state = BQS}) -> BQ:is_empty(BQS). -maybe_send_drained(WasEmpty, #q{q = Q} = State) -> - case (not WasEmpty) andalso is_empty(State) of - true -> notify_decorators(State), - rabbit_queue_consumers:send_drained(amqqueue:get_name(Q)); - false -> ok - end, - State. - confirm_messages([], MTC, _QName) -> MTC; confirm_messages(MsgIds, MTC, QName) -> @@ -626,32 +629,17 @@ send_or_record_confirm(#delivery{confirm = true, {immediately, State} end. -%% This feature was used by `rabbit_amqqueue_process` and -%% `rabbit_mirror_queue_slave` up-to and including RabbitMQ 3.7.x. It is -%% unused in 3.8.x and thus deprecated. We keep it to support in-place -%% upgrades to 3.8.x (i.e. mixed-version clusters), but it is a no-op -%% starting with that version. -send_mandatory(#delivery{mandatory = false}) -> - ok; -send_mandatory(#delivery{mandatory = true, - sender = SenderPid, - msg_seq_no = MsgSeqNo}) -> - gen_server2:cast(SenderPid, {mandatory_received, MsgSeqNo}). - discard(#delivery{confirm = Confirm, sender = SenderPid, - flow = Flow, message = Msg}, BQ, BQS, MTC, QName) -> MsgId = mc:get_annotation(id, Msg), MTC1 = case Confirm of true -> confirm_messages([MsgId], MTC, QName); false -> MTC end, - BQS1 = BQ:discard(MsgId, SenderPid, Flow, BQS), + BQS1 = BQ:discard(MsgId, SenderPid, BQS), {BQS1, MTC1}. -run_message_queue(State) -> run_message_queue(false, State). - run_message_queue(ActiveConsumersChanged, State) -> case is_empty(State) of true -> maybe_notify_decorators(ActiveConsumersChanged, State); @@ -671,7 +659,6 @@ run_message_queue(ActiveConsumersChanged, State) -> end. attempt_delivery(Delivery = #delivery{sender = SenderPid, - flow = Flow, message = Message}, Props, Delivered, State = #q{q = Q, backing_queue = BQ, @@ -680,7 +667,7 @@ attempt_delivery(Delivery = #delivery{sender = SenderPid, case rabbit_queue_consumers:deliver( fun (true) -> {AckTag, BQS1} = BQ:publish_delivered( - Message, Props, SenderPid, Flow, BQS), + Message, Props, SenderPid, BQS), {{Message, Delivered, AckTag}, {BQS1, MTC}}; (false) -> {{Message, Delivered, undefined}, discard(Delivery, BQ, BQS, MTC, amqqueue:get_name(Q))} @@ -704,11 +691,10 @@ maybe_deliver_or_enqueue(Delivery = #delivery{message = Message}, backing_queue_state = BQS, dlx = DLX, dlx_routing_key = RK}) -> - send_mandatory(Delivery), %% must do this before confirms case {will_overflow(Delivery, State), Overflow} of {true, 'reject-publish'} -> %% Drop publish and nack to publisher - send_reject_publish(Delivery, Delivered, State); + send_reject_publish(Delivery, State); {true, 'reject-publish-dlx'} -> %% Publish to DLX _ = with_dlx( @@ -723,7 +709,7 @@ maybe_deliver_or_enqueue(Delivery = #delivery{message = Message}, disabled, 1) end), %% Drop publish and nack to publisher - send_reject_publish(Delivery, Delivered, State); + send_reject_publish(Delivery, State); _ -> {IsDuplicate, BQS1} = BQ:is_duplicate(Message, BQS), State1 = State#q{backing_queue_state = BQS1}, @@ -732,7 +718,7 @@ maybe_deliver_or_enqueue(Delivery = #delivery{message = Message}, {true, drop} -> State1; %% Drop publish and nack to publisher {true, reject} -> - send_reject_publish(Delivery, Delivered, State1); + send_reject_publish(Delivery, State1); %% Enqueue and maybe drop head later false -> deliver_or_enqueue(Delivery, Delivered, State1) @@ -740,8 +726,7 @@ maybe_deliver_or_enqueue(Delivery = #delivery{message = Message}, end. deliver_or_enqueue(Delivery = #delivery{message = Message, - sender = SenderPid, - flow = Flow}, + sender = SenderPid}, Delivered, State = #q{q = Q, backing_queue = BQ}) -> {Confirm, State1} = send_or_record_confirm(Delivery, State), @@ -759,7 +744,7 @@ deliver_or_enqueue(Delivery = #delivery{message = Message, State2#q{backing_queue_state = BQS1, msg_id_to_channel = MTC1}; {undelivered, State2 = #q{backing_queue_state = BQS}} -> - BQS1 = BQ:publish(Message, Props, Delivered, SenderPid, Flow, BQS), + BQS1 = BQ:publish(Message, Props, Delivered, SenderPid, BQS), {Dropped, State3 = #q{backing_queue_state = BQS2}} = maybe_drop_head(State2#q{backing_queue_state = BQS1}), QLen = BQ:len(BQS2), @@ -808,10 +793,8 @@ maybe_drop_head(AlreadyDropped, State = #q{backing_queue = BQ, send_reject_publish(#delivery{confirm = true, sender = SenderPid, - flow = Flow, msg_seq_no = MsgSeqNo, message = Msg}, - _Delivered, State = #q{ q = Q, backing_queue = BQ, backing_queue_state = BQS, @@ -821,10 +804,9 @@ send_reject_publish(#delivery{confirm = true, amqqueue:get_name(Q), MsgSeqNo), MTC1 = maps:remove(MsgId, MTC), - BQS1 = BQ:discard(MsgId, SenderPid, Flow, BQS), + BQS1 = BQ:discard(MsgId, SenderPid, BQS), State#q{ backing_queue_state = BQS1, msg_id_to_channel = MTC1 }; -send_reject_publish(#delivery{confirm = false}, - _Delivered, State) -> +send_reject_publish(#delivery{confirm = false}, State) -> State. will_overflow(_, #q{max_length = undefined, @@ -847,13 +829,6 @@ over_max_length(#q{max_length = MaxLen, backing_queue_state = BQS}) -> BQ:len(BQS) > MaxLen orelse BQ:info(message_bytes_ready, BQS) > MaxBytes. -requeue_and_run(AckTags, State = #q{backing_queue = BQ, - backing_queue_state = BQS}) -> - WasEmpty = BQ:is_empty(BQS), - {_MsgIds, BQS1} = BQ:requeue(AckTags, BQS), - {_Dropped, State1} = maybe_drop_head(State#q{backing_queue_state = BQS1}), - run_message_queue(maybe_send_drained(WasEmpty, drop_expired_msgs(State1))). - fetch(AckRequired, State = #q{backing_queue = BQ, backing_queue_state = BQS}) -> %% @todo We should first drop expired messages then fetch @@ -861,7 +836,7 @@ fetch(AckRequired, State = #q{backing_queue = BQ, %% we will send expired messages at times. {Result, BQS1} = BQ:fetch(AckRequired, BQS), State1 = drop_expired_msgs(State#q{backing_queue_state = BQS1}), - {Result, maybe_send_drained(Result =:= empty, State1)}. + {Result, notify_decorators_if_became_empty(Result =:= empty, State1)}. ack(AckTags, ChPid, State) -> subtract_acks(ChPid, AckTags, State, @@ -873,7 +848,19 @@ ack(AckTags, ChPid, State) -> requeue(AckTags, ChPid, State) -> subtract_acks(ChPid, AckTags, State, - fun (State1) -> requeue_and_run(AckTags, State1) end). + fun (State1) -> requeue_and_run(AckTags, false, State1) end). + +requeue_and_run(AckTags, + ActiveConsumersChanged, + #q{backing_queue = BQ, + backing_queue_state = BQS0} = State0) -> + WasEmpty = BQ:is_empty(BQS0), + {_MsgIds, BQS} = BQ:requeue(AckTags, BQS0), + State1 = State0#q{backing_queue_state = BQS}, + {_Dropped, State2} = maybe_drop_head(State1), + State3 = drop_expired_msgs(State2), + State = notify_decorators_if_became_empty(WasEmpty, State3), + run_message_queue(ActiveConsumersChanged, State). possibly_unblock(Update, ChPid, State = #q{consumers = Consumers}) -> case rabbit_queue_consumers:possibly_unblock(Update, ChPid, Consumers) of @@ -920,15 +907,17 @@ handle_ch_down(DownPid, State = #q{consumers = Consumers, maybe_notify_consumer_updated(State2, Holder, Holder1), notify_decorators(State2), case should_auto_delete(State2) of - true -> + true -> log_auto_delete( io_lib:format( "because all of its consumers (~tp) were on a channel that was closed", [length(ChCTags)]), State), {stop, State2}; - false -> {ok, requeue_and_run(ChAckTags, - ensure_expiry_timer(State2))} + false -> + State3 = ensure_expiry_timer(State2), + State4 = requeue_and_run(ChAckTags, false, State3), + {ok, State4} end end. @@ -992,11 +981,6 @@ calculate_msg_expiry(Msg, TTL) -> os:system_time(microsecond) + T * 1000 end. -%% Logically this function should invoke maybe_send_drained/2. -%% However, that is expensive. Since some frequent callers of -%% drop_expired_msgs/1, in particular deliver_or_enqueue/3, cannot -%% possibly cause the queue to become empty, we push the -%% responsibility to the callers. So be cautious when adding new ones. drop_expired_msgs(State) -> case is_empty(State) of true -> State; @@ -1147,40 +1131,6 @@ i(consumer_capacity, #q{consumers = Consumers}) -> i(memory, _) -> {memory, M} = process_info(self(), memory), M; -i(slave_pids, #q{q = Q0}) -> - Name = amqqueue:get_name(Q0), - case rabbit_amqqueue:lookup(Name) of - {ok, Q} -> - case rabbit_mirror_queue_misc:is_mirrored(Q) of - false -> ''; - true -> amqqueue:get_slave_pids(Q) - end; - {error, not_found} -> - '' - end; -i(synchronised_slave_pids, #q{q = Q0}) -> - Name = amqqueue:get_name(Q0), - case rabbit_amqqueue:lookup(Name) of - {ok, Q} -> - case rabbit_mirror_queue_misc:is_mirrored(Q) of - false -> ''; - true -> amqqueue:get_sync_slave_pids(Q) - end; - {error, not_found} -> - '' - end; -i(recoverable_slaves, #q{q = Q0}) -> - Name = amqqueue:get_name(Q0), - Durable = amqqueue:is_durable(Q0), - case rabbit_amqqueue:lookup(Name) of - {ok, Q} -> - case Durable andalso rabbit_mirror_queue_misc:is_mirrored(Q) of - false -> ''; - true -> amqqueue:get_recoverable_slaves(Q) - end; - {error, not_found} -> - '' - end; i(state, #q{status = running}) -> credit_flow:state(); i(state, #q{status = State}) -> State; i(garbage_collection, _State) -> @@ -1233,14 +1183,12 @@ emit_consumer_deleted(ChPid, ConsumerTag, QName, ActingUser) -> %%---------------------------------------------------------------------------- -prioritise_call(Msg, _From, _Len, State) -> +prioritise_call(Msg, _From, _Len, _State) -> case Msg of info -> 9; {info, _Items} -> 9; consumers -> 9; stat -> 7; - {basic_consume, _, _, _, _, _, _, _, _, _} -> consumer_bias(State, 0, 2); - {basic_cancel, _, _, _} -> consumer_bias(State, 0, 2); _ -> 0 end. @@ -1248,8 +1196,6 @@ prioritise_cast(Msg, _Len, State) -> case Msg of delete_immediately -> 8; {delete_exclusive, _Pid} -> 8; - {set_ram_duration_target, _Duration} -> 8; - {set_maximum_since_use, _Age} -> 8; {run_backing_queue, _Mod, _Fun} -> 6; {ack, _AckTags, _ChPid} -> 4; %% [1] {resume, _ChPid} -> 3; @@ -1268,7 +1214,7 @@ prioritise_cast(Msg, _Len, State) -> consumer_bias(#q{backing_queue = BQ, backing_queue_state = BQS}, Low, High) -> case BQ:msg_rates(BQS) of - {0.0, _} -> Low; + {Ingress, _} when Ingress =:= +0.0 orelse Ingress =:= -0.0 -> Low; {Ingress, Egress} when Egress / Ingress < ?CONSUMER_BIAS_RATIO -> High; {_, _} -> Low end. @@ -1276,27 +1222,16 @@ consumer_bias(#q{backing_queue = BQ, backing_queue_state = BQS}, Low, High) -> prioritise_info(Msg, _Len, #q{q = Q}) -> DownPid = amqqueue:get_exclusive_owner(Q), case Msg of - {'DOWN', _, process, DownPid, _} -> 8; - update_ram_duration -> 8; - {maybe_expire, _Version} -> 8; - {drop_expired, _Version} -> 8; - emit_stats -> 7; - sync_timeout -> 6; - _ -> 0 + {'DOWN', _, process, DownPid, _} -> 8; + {maybe_expire, _Version} -> 8; + {drop_expired, _Version} -> 8; + emit_stats -> 7; + sync_timeout -> 6; + _ -> 0 end. handle_call({init, Recover}, From, State) -> - try - init_it(Recover, From, State) - catch - {coordinator_not_started, Reason} -> - %% The GM can shutdown before the coordinator has started up - %% (lost membership or missing group), thus the start_link of - %% the coordinator returns {error, shutdown} as rabbit_amqqueue_process - %% is trapping exists. The master captures this return value and - %% throws the current exception. - {stop, Reason, State} - end; + init_it(Recover, From, State); handle_call(info, _From, State) -> reply({ok, infos(info_keys(), State)}, State); @@ -1343,9 +1278,8 @@ handle_call({basic_get, ChPid, NoAck, LimiterPid}, _From, end; handle_call({basic_consume, NoAck, ChPid, LimiterPid, LimiterActive, - PrefetchCount, ConsumerTag, ExclusiveConsume, Args, OkMsg, ActingUser}, - _From, State = #q{q = Q, - consumers = Consumers, + ModeOrPrefetch, ConsumerTag, ExclusiveConsume, Args, OkMsg, ActingUser}, + _From, State = #q{consumers = Consumers, active_consumer = Holder, single_active_consumer_on = SingleActiveConsumerOn}) -> ConsumerRegistration = case SingleActiveConsumerOn of @@ -1355,33 +1289,28 @@ handle_call({basic_consume, NoAck, ChPid, LimiterPid, LimiterActive, {error, reply({error, exclusive_consume_unavailable}, State)}; false -> Consumers1 = rabbit_queue_consumers:add( - amqqueue:get_name(Q), ChPid, ConsumerTag, NoAck, - LimiterPid, LimiterActive, - PrefetchCount, Args, is_empty(State), - ActingUser, Consumers), - - case Holder of - none -> - NewConsumer = rabbit_queue_consumers:get(ChPid, ConsumerTag, Consumers1), - {state, State#q{consumers = Consumers1, - has_had_consumers = true, - active_consumer = NewConsumer}}; - _ -> - {state, State#q{consumers = Consumers1, - has_had_consumers = true}} - end + LimiterPid, LimiterActive, ModeOrPrefetch, + Args, ActingUser, Consumers), + case Holder of + none -> + NewConsumer = rabbit_queue_consumers:get(ChPid, ConsumerTag, Consumers1), + {state, State#q{consumers = Consumers1, + has_had_consumers = true, + active_consumer = NewConsumer}}; + _ -> + {state, State#q{consumers = Consumers1, + has_had_consumers = true}} + end end; false -> case check_exclusive_access(Holder, ExclusiveConsume, State) of in_use -> {error, reply({error, exclusive_consume_unavailable}, State)}; ok -> Consumers1 = rabbit_queue_consumers:add( - amqqueue:get_name(Q), ChPid, ConsumerTag, NoAck, - LimiterPid, LimiterActive, - PrefetchCount, Args, is_empty(State), - ActingUser, Consumers), + LimiterPid, LimiterActive, ModeOrPrefetch, + Args, ActingUser, Consumers), ExclusiveConsumer = if ExclusiveConsume -> {ChPid, ConsumerTag}; true -> Holder @@ -1408,41 +1337,56 @@ handle_call({basic_consume, NoAck, ChPid, LimiterPid, LimiterActive, {false, _} -> {true, up} end, - rabbit_core_metrics:consumer_created( + PrefetchCount = rabbit_queue_consumers:parse_prefetch_count(ModeOrPrefetch), + rabbit_core_metrics:consumer_created( ChPid, ConsumerTag, ExclusiveConsume, AckRequired, QName, PrefetchCount, ConsumerIsActive, ActivityStatus, Args), emit_consumer_created(ChPid, ConsumerTag, ExclusiveConsume, AckRequired, QName, PrefetchCount, Args, none, ActingUser), notify_decorators(State1), - reply(ok, run_message_queue(State1)) + reply(ok, run_message_queue(false, State1)) end; -handle_call({basic_cancel, ChPid, ConsumerTag, OkMsg, ActingUser}, _From, - State = #q{consumers = Consumers, - active_consumer = Holder, - single_active_consumer_on = SingleActiveConsumerOn }) -> +handle_call({basic_cancel, ChPid, ConsumerTag, OkMsg, ActingUser}, From, State) -> + handle_call({stop_consumer, #{pid => ChPid, + consumer_tag => ConsumerTag, + ok_msg => OkMsg, + user => ActingUser}}, + From, State); + +handle_call({stop_consumer, #{pid := ChPid, + consumer_tag := ConsumerTag, + user := ActingUser} = Spec}, + _From, + State = #q{consumers = Consumers, + active_consumer = Holder, + single_active_consumer_on = SingleActiveConsumerOn}) -> + Reason = maps:get(reason, Spec, cancel), + OkMsg = maps:get(ok_msg, Spec, undefined), ok = maybe_send_reply(ChPid, OkMsg), - case rabbit_queue_consumers:remove(ChPid, ConsumerTag, Consumers) of + case rabbit_queue_consumers:remove(ChPid, ConsumerTag, Reason, Consumers) of not_found -> reply(ok, State); - Consumers1 -> - Holder1 = new_single_active_consumer_after_basic_cancel(ChPid, ConsumerTag, - Holder, SingleActiveConsumerOn, Consumers1 - ), - State1 = State#q{consumers = Consumers1, - active_consumer = Holder1}, + {AckTags, Consumers1} -> + Holder1 = new_single_active_consumer_after_basic_cancel( + ChPid, ConsumerTag, Holder, SingleActiveConsumerOn, Consumers1), + State1 = State#q{consumers = Consumers1, + active_consumer = Holder1}, maybe_notify_consumer_updated(State1, Holder, Holder1), emit_consumer_deleted(ChPid, ConsumerTag, qname(State1), ActingUser), notify_decorators(State1), case should_auto_delete(State1) of - false -> reply(ok, ensure_expiry_timer(State1)); + false -> + State2 = requeue_and_run(AckTags, Holder =/= Holder1, State1), + State3 = ensure_expiry_timer(State2), + reply(ok, State3); true -> log_auto_delete( - io_lib:format( - "because its last consumer with tag '~ts' was cancelled", - [ConsumerTag]), - State), + io_lib:format( + "because its last consumer with tag '~ts' was cancelled", + [ConsumerTag]), + State), stop(ok, State1) end end; @@ -1467,40 +1411,11 @@ handle_call(purge, _From, State = #q{backing_queue = BQ, backing_queue_state = BQS}) -> {Count, BQS1} = BQ:purge(BQS), State1 = State#q{backing_queue_state = BQS1}, - reply({ok, Count}, maybe_send_drained(Count =:= 0, State1)); + reply({ok, Count}, notify_decorators_if_became_empty(Count =:= 0, State1)); handle_call({requeue, AckTags, ChPid}, From, State) -> gen_server2:reply(From, ok), - noreply(requeue(AckTags, ChPid, State)); - -handle_call(sync_mirrors, _From, - State = #q{backing_queue = rabbit_mirror_queue_master, - backing_queue_state = BQS}) -> - S = fun(BQSN) -> State#q{backing_queue_state = BQSN} end, - HandleInfo = fun (Status) -> - receive {'$gen_call', From, {info, Items}} -> - Infos = infos(Items, State#q{status = Status}), - gen_server2:reply(From, {ok, Infos}) - after 0 -> - ok - end - end, - EmitStats = fun (Status) -> - rabbit_event:if_enabled( - State, #q.stats_timer, - fun() -> emit_stats(State#q{status = Status}) end) - end, - case rabbit_mirror_queue_master:sync_mirrors(HandleInfo, EmitStats, BQS) of - {ok, BQS1} -> reply(ok, S(BQS1)); - {stop, Reason, BQS1} -> {stop, Reason, S(BQS1)} - end; - -handle_call(sync_mirrors, _From, State) -> - reply({error, not_mirrored}, State); - -%% By definition if we get this message here we do not have to do anything. -handle_call(cancel_sync_mirrors, _From, State) -> - reply({ok, not_syncing}, State). + noreply(requeue(AckTags, ChPid, State)). new_single_active_consumer_after_basic_cancel(ChPid, ConsumerTag, CurrentSingleActiveConsumer, _SingleActiveConsumerIsOn = true, Consumers) -> @@ -1539,17 +1454,7 @@ maybe_notify_consumer_updated(#q{single_active_consumer_on = true} = State, _Pre end. handle_cast(init, State) -> - try - init_it({no_barrier, non_clean_shutdown}, none, State) - catch - {coordinator_not_started, Reason} -> - %% The GM can shutdown before the coordinator has started up - %% (lost membership or missing group), thus the start_link of - %% the coordinator returns {error, shutdown} as rabbit_amqqueue_process - %% is trapping exists. The master captures this return value and - %% throws the current exception. - {stop, Reason, State} - end; + init_it({no_barrier, non_clean_shutdown}, none, State); handle_cast({run_backing_queue, Mod, Fun}, State = #q{backing_queue = BQ, backing_queue_state = BQS}) -> @@ -1558,25 +1463,18 @@ handle_cast({run_backing_queue, Mod, Fun}, handle_cast({deliver, Delivery = #delivery{sender = Sender, flow = Flow}, - SlaveWhenPublished}, + Delivered}, State = #q{senders = Senders}) -> Senders1 = case Flow of %% In both credit_flow:ack/1 we are acking messages to the channel %% process that sent us the message delivery. See handle_ch_down %% for more info. flow -> credit_flow:ack(Sender), - case SlaveWhenPublished of - true -> credit_flow:ack(Sender); %% [0] - false -> ok - end, pmon:monitor(Sender, Senders); noflow -> Senders end, State1 = State#q{senders = Senders1}, - noreply(maybe_deliver_or_enqueue(Delivery, SlaveWhenPublished, State1)); -%% [0] The second ack is since the channel thought we were a mirror at -%% the time it published this message, so it used two credits (see -%% rabbit_queue_type:deliver/2). + noreply(maybe_deliver_or_enqueue(Delivery, Delivered, State1)); handle_cast({ack, AckTags, ChPid}, State) -> noreply(ack(AckTags, ChPid, State)); @@ -1619,47 +1517,64 @@ handle_cast({deactivate_limit, ChPid}, State) -> noreply(possibly_unblock(rabbit_queue_consumers:deactivate_limit_fun(), ChPid, State)); -handle_cast({set_ram_duration_target, Duration}, - State = #q{backing_queue = BQ, backing_queue_state = BQS}) -> - BQS1 = BQ:set_ram_duration_target(Duration, BQS), - noreply(State#q{backing_queue_state = BQS1}); - -handle_cast({set_maximum_since_use, Age}, State) -> - ok = file_handle_cache:set_maximum_since_use(Age), +handle_cast({credit, SessionPid, CTag, Credit, Drain}, + #q{q = Q, + backing_queue = BQ, + backing_queue_state = BQS0} = State) -> + %% Credit API v1. + %% Delete this function clause when feature flag rabbitmq_4.0.0 becomes required. + %% Behave like non-native AMQP 1.0: Send send_credit_reply before deliveries. + rabbit_classic_queue:send_credit_reply_credit_api_v1( + SessionPid, amqqueue:get_name(Q), BQ:len(BQS0)), + handle_cast({credit, SessionPid, CTag, credit_api_v1, Credit, Drain}, State); +handle_cast({credit, SessionPid, CTag, DeliveryCountRcv, Credit, Drain}, + #q{consumers = Consumers0, + q = Q} = State0) -> + QName = amqqueue:get_name(Q), + State = #q{backing_queue_state = PostBQS, + backing_queue = BQ} = + case rabbit_queue_consumers:process_credit( + DeliveryCountRcv, Credit, SessionPid, CTag, Consumers0) of + unchanged -> + State0; + {unblocked, Consumers1} -> + State1 = State0#q{consumers = Consumers1}, + run_message_queue(true, State1) + end, + case rabbit_queue_consumers:get_link_state(SessionPid, CTag) of + {credit_api_v1, PostCred} + when Drain andalso + is_integer(PostCred) andalso PostCred > 0 -> + %% credit API v1 + rabbit_queue_consumers:drained(credit_api_v1, SessionPid, CTag), + rabbit_classic_queue:send_drained_credit_api_v1(SessionPid, QName, CTag, PostCred); + {PostDeliveryCountSnd, PostCred} + when is_integer(PostDeliveryCountSnd) andalso + Drain andalso + is_integer(PostCred) andalso PostCred > 0 -> + %% credit API v2 + AdvancedDeliveryCount = serial_number:add(PostDeliveryCountSnd, PostCred), + rabbit_queue_consumers:drained(AdvancedDeliveryCount, SessionPid, CTag), + Avail = BQ:len(PostBQS), + rabbit_classic_queue:send_credit_reply( + SessionPid, QName, CTag, AdvancedDeliveryCount, 0, Avail, Drain); + {PostDeliveryCountSnd, PostCred} + when is_integer(PostDeliveryCountSnd) -> + %% credit API v2 + Avail = BQ:len(PostBQS), + rabbit_classic_queue:send_credit_reply( + SessionPid, QName, CTag, PostDeliveryCountSnd, PostCred, Avail, Drain); + _ -> + ok + end, noreply(State); -handle_cast(update_mirroring, State = #q{q = Q, - mirroring_policy_version = Version}) -> - case needs_update_mirroring(Q, Version) of - false -> - noreply(State); - {Policy, NewVersion} -> - State1 = State#q{mirroring_policy_version = NewVersion}, - noreply(update_mirroring(Policy, State1)) - end; - -handle_cast({credit, ChPid, CTag, Credit, Drain}, - State = #q{consumers = Consumers, - backing_queue = BQ, - backing_queue_state = BQS, - q = Q}) -> - Len = BQ:len(BQS), - rabbit_classic_queue:send_credit_reply(ChPid, amqqueue:get_name(Q), Len), - noreply( - case rabbit_queue_consumers:credit(amqqueue:get_name(Q), - Len == 0, Credit, Drain, ChPid, CTag, - Consumers) of - unchanged -> State; - {unblocked, Consumers1} -> State1 = State#q{consumers = Consumers1}, - run_message_queue(true, State1) - end); - % Note: https://www.pivotaltracker.com/story/show/166962656 % This event is necessary for the stats timer to be initialized with % the correct values once the management agent has started handle_cast({force_event_refresh, Ref}, State = #q{consumers = Consumers}) -> - rabbit_event:notify(queue_created, infos(?CREATION_EVENT_KEYS, State), Ref), + rabbit_event:notify(queue_created, queue_created_infos(State), Ref), QName = qname(State), AllConsumers = rabbit_queue_consumers:all(Consumers), rabbit_log:debug("Queue ~ts forced to re-emit events, consumers: ~tp", [rabbit_misc:rs(QName), AllConsumers]), @@ -1677,24 +1592,38 @@ handle_cast(notify_decorators, State) -> handle_cast(policy_changed, State = #q{q = Q0}) -> Name = amqqueue:get_name(Q0), %% We depend on the #q.q field being up to date at least WRT - %% policy (but not mirror pids) in various places, so when it - %% changes we go and read it from Mnesia again. + %% policy in various places, so when it + %% changes we go and read it from the database again. %% %% This also has the side effect of waking us up so we emit a %% stats event - so event consumers see the changed policy. {ok, Q} = rabbit_amqqueue:lookup(Name), noreply(process_args_policy(State#q{q = Q})); -handle_cast({sync_start, _, _}, State = #q{q = Q}) -> - Name = amqqueue:get_name(Q), - %% Only a mirror should receive this, it means we are a duplicated master - rabbit_mirror_queue_misc:log_warning( - Name, "Stopping after receiving sync_start from another master", []), - stop(State). +handle_cast({policy_changed, Q0}, State) -> + Name = amqqueue:get_name(Q0), + PolicyVersion0 = amqqueue:get_policy_version(Q0), + %% We depend on the #q.q field being up to date at least WRT + %% policy in various places, so when it + %% changes we go and read it from the database again. + %% + %% This also has the side effect of waking us up so we emit a + %% stats event - so event consumers see the changed policy. + {ok, Q} = rabbit_amqqueue:lookup(Name), + PolicyVersion = amqqueue:get_policy_version(Q), + case PolicyVersion >= PolicyVersion0 of + true -> + noreply(process_args_policy(State#q{q = Q})); + false -> + noreply(process_args_policy(State#q{q = Q0})) + end. -handle_info({maybe_expire, Vsn}, State = #q{args_policy_version = Vsn}) -> +handle_info({maybe_expire, Vsn}, State = #q{q = Q, expires = Expiry, args_policy_version = Vsn}) -> case is_unused(State) of - true -> stop(State); + true -> + QResource = rabbit_misc:rs(amqqueue:get_name(Q)), + rabbit_log_queue:debug("Deleting 'classic ~ts' on expiry after ~tp milliseconds", [QResource, Expiry]), + stop(State); false -> noreply(State#q{expiry_timer_ref = undefined}) end; @@ -1704,7 +1633,7 @@ handle_info({maybe_expire, _Vsn}, State) -> handle_info({drop_expired, Vsn}, State = #q{args_policy_version = Vsn}) -> WasEmpty = is_empty(State), State1 = drop_expired_msgs(State#q{ttl_timer_ref = undefined}), - noreply(maybe_send_drained(WasEmpty, State1)); + noreply(notify_decorators_if_became_empty(WasEmpty, State1)); handle_info({drop_expired, _Vsn}, State) -> noreply(State); @@ -1733,15 +1662,12 @@ handle_info({'DOWN', _MonitorRef, process, DownPid, _Reason}, State) -> {stop, State1} -> stop(State1) end; -handle_info(update_ram_duration, State = #q{backing_queue = BQ, - backing_queue_state = BQS}) -> - {RamDuration, BQS1} = BQ:ram_duration(BQS), - DesiredDuration = - rabbit_memory_monitor:report_ram_duration(self(), RamDuration), - BQS2 = BQ:set_ram_duration_target(DesiredDuration, BQS1), +handle_info(update_rates, State = #q{backing_queue = BQ, + backing_queue_state = BQS}) -> + BQS1 = BQ:update_rates(BQS), %% Don't call noreply/1, we don't want to set timers {State1, Timeout} = next_state(State#q{rate_timer_ref = undefined, - backing_queue_state = BQS2}), + backing_queue_state = BQS1}), {noreply, State1, Timeout}; handle_info(sync_timeout, State) -> @@ -1771,11 +1697,8 @@ handle_pre_hibernate(State = #q{backing_queue_state = undefined}) -> {hibernate, State}; handle_pre_hibernate(State = #q{backing_queue = BQ, backing_queue_state = BQS}) -> - {RamDuration, BQS1} = BQ:ram_duration(BQS), - DesiredDuration = - rabbit_memory_monitor:report_ram_duration(self(), RamDuration), - BQS2 = BQ:set_ram_duration_target(DesiredDuration, BQS1), - BQS3 = BQ:handle_pre_hibernate(BQS2), + BQS1 = BQ:update_rates(BQS), + BQS3 = BQ:handle_pre_hibernate(BQS1), rabbit_event:if_enabled( State, #q.stats_timer, fun () -> emit_stats(State, @@ -1788,17 +1711,9 @@ handle_pre_hibernate(State = #q{backing_queue = BQ, format_message_queue(Opt, MQ) -> rabbit_misc:format_message_queue(Opt, MQ). +%% TODO: this can be removed after 3.13 format(Q) when ?is_amqqueue(Q) -> - case rabbit_mirror_queue_misc:is_mirrored(Q) of - false -> - [{node, node(amqqueue:get_pid(Q))}]; - true -> - Slaves = amqqueue:get_slave_pids(Q), - SSlaves = amqqueue:get_sync_slave_pids(Q), - [{slave_nodes, [node(S) || S <- Slaves]}, - {synchronised_slave_nodes, [node(S) || S <- SSlaves]}, - {node, node(amqqueue:get_pid(Q))}] - end. + [{node, node(amqqueue:get_pid(Q))}]. -spec is_policy_applicable(amqqueue:amqqueue(), any()) -> boolean(). is_policy_applicable(_Q, _Policy) -> @@ -1820,58 +1735,6 @@ log_auto_delete(Reason, #q{ q = Q }) -> Reason, [QName, VHost]). -needs_update_mirroring(Q, Version) -> - {ok, UpQ} = rabbit_amqqueue:lookup(amqqueue:get_name(Q)), - DBVersion = amqqueue:get_policy_version(UpQ), - case DBVersion > Version of - true -> {rabbit_policy:get(<<"ha-mode">>, UpQ), DBVersion}; - false -> false - end. - - -update_mirroring(Policy, State = #q{backing_queue = BQ}) -> - case update_to(Policy, BQ) of - start_mirroring -> - start_mirroring(State); - stop_mirroring -> - stop_mirroring(State); - ignore -> - State; - update_ha_mode -> - update_ha_mode(State) - end. - -update_to(undefined, rabbit_mirror_queue_master) -> - stop_mirroring; -update_to(_, rabbit_mirror_queue_master) -> - update_ha_mode; -update_to(undefined, BQ) when BQ =/= rabbit_mirror_queue_master -> - ignore; -update_to(_, BQ) when BQ =/= rabbit_mirror_queue_master -> - start_mirroring. - -start_mirroring(State = #q{backing_queue = BQ, - backing_queue_state = BQS}) -> - %% lookup again to get policy for init_with_existing_bq - {ok, Q} = rabbit_amqqueue:lookup(qname(State)), - true = BQ =/= rabbit_mirror_queue_master, %% assertion - BQ1 = rabbit_mirror_queue_master, - BQS1 = BQ1:init_with_existing_bq(Q, BQ, BQS), - State#q{backing_queue = BQ1, - backing_queue_state = BQS1}. - -stop_mirroring(State = #q{backing_queue = BQ, - backing_queue_state = BQS}) -> - BQ = rabbit_mirror_queue_master, %% assertion - {BQ1, BQS1} = BQ:stop_mirroring(BQS), - State#q{backing_queue = BQ1, - backing_queue_state = BQS1}. - -update_ha_mode(State) -> - {ok, Q} = rabbit_amqqueue:lookup(qname(State)), - ok = rabbit_mirror_queue_misc:update_mirrors(Q), - State. - confirm_to_sender(Pid, QName, MsgSeqNos) -> rabbit_classic_queue:confirm_to_sender(Pid, QName, MsgSeqNos). @@ -1882,3 +1745,8 @@ update_state(State, Q) -> Q1 = amqqueue:set_state(Q0, State), amqqueue:set_decorators(Q1, Decorators) end). + +queue_created_infos(State) -> + %% On the events API, we use long names for queue types + Keys = ?CREATION_EVENT_KEYS -- [type], + infos(Keys, State) ++ [{type, rabbit_classic_queue}]. diff --git a/deps/rabbit/src/rabbit_amqqueue_sup.erl b/deps/rabbit/src/rabbit_amqqueue_sup.erl index fcab6baaee2c..921652ce65fe 100644 --- a/deps/rabbit/src/rabbit_amqqueue_sup.erl +++ b/deps/rabbit/src/rabbit_amqqueue_sup.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_amqqueue_sup). @@ -17,19 +17,19 @@ %%---------------------------------------------------------------------------- --spec start_link(amqqueue:amqqueue(), rabbit_prequeue:start_mode()) -> +-spec start_link(amqqueue:amqqueue(), any()) -> {'ok', pid(), pid()}. -start_link(Q, StartMode) -> +start_link(Q, _StartMode) -> Marker = spawn_link(fun() -> receive stop -> ok end end), - StartMFA = {rabbit_prequeue, start_link, [Q, StartMode, Marker]}, + StartMFA = {rabbit_amqqueue_process, start_link, [Q, Marker]}, ChildSpec = #{id => rabbit_amqqueue, start => StartMFA, restart => transient, significant => true, shutdown => ?CLASSIC_QUEUE_WORKER_WAIT, type => worker, - modules => [rabbit_amqqueue_process, rabbit_mirror_queue_slave]}, + modules => [rabbit_amqqueue_process]}, {ok, SupPid} = supervisor:start_link(?MODULE, []), {ok, QPid} = supervisor:start_child(SupPid, ChildSpec), unlink(Marker), diff --git a/deps/rabbit/src/rabbit_amqqueue_sup_sup.erl b/deps/rabbit/src/rabbit_amqqueue_sup_sup.erl index 144c0c77d4eb..9d01ff974736 100644 --- a/deps/rabbit/src/rabbit_amqqueue_sup_sup.erl +++ b/deps/rabbit/src/rabbit_amqqueue_sup_sup.erl @@ -2,14 +2,14 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_amqqueue_sup_sup). -behaviour(supervisor). --export([start_link/0, start_queue_process/3]). +-export([start_link/0, start_queue_process/2]). -export([start_for_vhost/1, stop_for_vhost/1, find_for_vhost/2, find_for_vhost/1]). @@ -27,13 +27,12 @@ start_link() -> supervisor:start_link(?MODULE, []). -spec start_queue_process - (node(), amqqueue:amqqueue(), 'declare' | 'recovery' | 'slave') -> - pid(). + (node(), amqqueue:amqqueue()) -> pid(). -start_queue_process(Node, Q, StartMode) -> +start_queue_process(Node, Q) -> #resource{virtual_host = VHost} = amqqueue:get_name(Q), {ok, Sup} = find_for_vhost(VHost, Node), - {ok, _SupPid, QPid} = supervisor:start_child(Sup, [Q, StartMode]), + {ok, _SupPid, QPid} = supervisor:start_child(Sup, [Q, declare]), QPid. init([]) -> diff --git a/deps/rabbit/src/rabbit_auth_backend_internal.erl b/deps/rabbit/src/rabbit_auth_backend_internal.erl index 196fa1b7f4ef..1c52ee939559 100644 --- a/deps/rabbit/src/rabbit_auth_backend_internal.erl +++ b/deps/rabbit/src/rabbit_auth_backend_internal.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_auth_backend_internal). @@ -17,12 +17,15 @@ -export([add_user/3, add_user/4, add_user/5, delete_user/2, lookup_user/1, exists/1, change_password/3, clear_password/2, hash_password/2, change_password_hash/2, change_password_hash/3, - set_tags/3, set_permissions/6, clear_permissions/3, clear_permissions_for_vhost/2, set_permissions_globally/5, - set_topic_permissions/6, clear_topic_permissions/3, clear_topic_permissions/4, clear_topic_permissions_for_vhost/2, + set_tags/3, set_permissions/6, clear_permissions/3, set_permissions_globally/5, + set_topic_permissions/6, clear_topic_permissions/3, clear_topic_permissions/4, + clear_all_permissions_for_vhost/2, add_user_sans_validation/3, put_user/2, put_user/3, update_user/5, update_user_with_hash/5, - add_user_sans_validation/6]). + add_user_sans_validation/6, + add_user_with_pre_hashed_password_sans_validation/3 +]). -export([set_user_limits/3, clear_user_limits/3, is_over_connection_limit/1, is_over_channel_limit/1, get_user_limits/0, get_user_limits/1]). @@ -30,6 +33,8 @@ -export([user_info_keys/0, perms_info_keys/0, user_perms_info_keys/0, vhost_perms_info_keys/0, user_vhost_perms_info_keys/0, all_users/0, + user_topic_perms_info_keys/0, vhost_topic_perms_info_keys/0, + user_vhost_topic_perms_info_keys/0, list_users/0, list_users/2, list_permissions/0, list_user_permissions/1, list_user_permissions/3, list_topic_permissions/0, @@ -37,11 +42,15 @@ list_user_vhost_permissions/2, list_user_topic_permissions/1, list_vhost_topic_permissions/1, list_user_vhost_topic_permissions/2]). --export([state_can_expire/0]). +-export([expiry_timestamp/1]). -%% for testing -export([hashing_module_for_user/1, expand_topic_permission/2]). +-ifdef(TEST). +-export([extract_user_permission_params/2, + extract_topic_permission_params/2]). +-endif. + -import(rabbit_data_coercion, [to_atom/1, to_list/1, to_binary/1]). %%---------------------------------------------------------------------------- @@ -101,7 +110,7 @@ user_login_authentication(Username, AuthProps) -> end end. -state_can_expire() -> false. +expiry_timestamp(_) -> never. user_login_authorization(Username, _AuthProps) -> case user_login_authentication(Username, []) of @@ -216,6 +225,10 @@ add_user(Username, Password, ActingUser, Limits, Tags) -> validate_and_alternate_credentials(Username, Password, ActingUser, add_user_sans_validation(Limits, Tags)). +add_user_with_pre_hashed_password_sans_validation(Username, PasswordHash, ActingUser) -> + HashingAlgorithm = rabbit_password:hashing_mod(), + add_user_sans_validation(Username, PasswordHash, HashingAlgorithm, [], undefined, ActingUser). + add_user_sans_validation(Username, Password, ActingUser) -> add_user_sans_validation(Username, Password, ActingUser, undefined, []). @@ -240,14 +253,12 @@ add_user_sans_validation(Username, Password, ActingUser, Limits, Tags) -> end, add_user_sans_validation_in(Username, User, ConvertedTags, Limits, ActingUser). -add_user_sans_validation(Username, PasswordHash, HashingAlgorithm, Tags, Limits, ActingUser) -> +add_user_sans_validation(Username, PasswordHash, HashingMod, Tags, Limits, ActingUser) -> rabbit_log:debug("Asked to create a new user '~ts' with password hash", [Username]), ConvertedTags = [rabbit_data_coercion:to_atom(I) || I <- Tags], - HashingMod = rabbit_password:hashing_mod(), User0 = internal_user:create_user(Username, PasswordHash, HashingMod), User1 = internal_user:set_tags( - internal_user:set_password_hash(User0, - PasswordHash, HashingAlgorithm), + internal_user:set_password_hash(User0, PasswordHash, HashingMod), ConvertedTags), User = case Limits of undefined -> User1; @@ -292,7 +303,10 @@ delete_user(Username, ActingUser) -> {user_who_performed_action, ActingUser}]), ok; false -> - ok + ok; + Error0 -> + rabbit_log:info("Failed to delete user '~ts': ~tp", [Username, Error0]), + throw(Error0) end catch Class:Error:Stacktrace -> @@ -527,8 +541,35 @@ clear_permissions(Username, VirtualHost, ActingUser) -> erlang:raise(Class, Error, Stacktrace) end. -clear_permissions_for_vhost(VirtualHost, _ActingUser) -> - rabbit_db_user:clear_matching_user_permissions('_', VirtualHost). +-spec clear_all_permissions_for_vhost(VirtualHost, ActingUser) -> Ret when + VirtualHost :: rabbit_types:vhost(), + ActingUser :: rabbit_types:username(), + Ret :: ok | {error, Reason :: any()}. + +clear_all_permissions_for_vhost(VirtualHost, ActingUser) -> + case rabbit_db_user:clear_all_permissions_for_vhost(VirtualHost) of + {ok, Deletions} -> + lists:foreach( + fun (#topic_permission{topic_permission_key = + #topic_permission_key{user_vhost = + #user_vhost{username = Username}}}) -> + rabbit_event:notify( + topic_permission_deleted, + [{user, Username}, + {vhost, VirtualHost}, + {user_who_performed_action, ActingUser}]); + (#user_permission{user_vhost = + #user_vhost{username = Username}}) -> + rabbit_event:notify( + permission_deleted, + [{user, Username}, + {vhost, VirtualHost}, + {user_who_performed_action, ActingUser}]) + end, Deletions), + ok; + {error, _} = Err -> + Err + end. set_permissions_globally(Username, ConfigurePerm, WritePerm, ReadPerm, ActingUser) -> VirtualHosts = rabbit_vhost:list_names(), @@ -618,9 +659,9 @@ clear_topic_permissions(Username, VirtualHost, Exchange, ActingUser) -> Username, VirtualHost, Exchange), rabbit_log:info("Successfully cleared topic permissions on exchange '~ts' for user '~ts' in virtual host '~ts'", [Exchange, Username, VirtualHost]), - rabbit_event:notify(permission_deleted, [{user, Username}, - {vhost, VirtualHost}, - {user_who_performed_action, ActingUser}]), + rabbit_event:notify(topic_permission_deleted, [{user, Username}, + {vhost, VirtualHost}, + {user_who_performed_action, ActingUser}]), R catch Class:Error:Stacktrace -> @@ -629,9 +670,6 @@ clear_topic_permissions(Username, VirtualHost, Exchange, ActingUser) -> erlang:raise(Class, Error, Stacktrace) end. -clear_topic_permissions_for_vhost(VirtualHost, _ActingUser) -> - rabbit_db_user:clear_matching_topic_permissions('_', VirtualHost, '_'). - put_user(User, ActingUser) -> put_user(User, undefined, ActingUser). put_user(User, Version, ActingUser) -> @@ -685,8 +723,8 @@ put_user(User, Version, ActingUser) -> throw({error, both_password_and_password_hash_are_provided}); %% clear password, update tags if needed _ -> - rabbit_auth_backend_internal:set_tags(Username, Tags, ActingUser), - rabbit_auth_backend_internal:clear_password(Username, ActingUser) + set_tags(Username, Tags, ActingUser), + clear_password(Username, ActingUser) end; false -> case {HasPassword, HasPasswordHash} of @@ -719,13 +757,13 @@ update_user_password_hash(Username, PasswordHash, Tags, Limits, User, Version) - Hash = rabbit_misc:b64decode_or_throw(PasswordHash), ConvertedTags = [rabbit_data_coercion:to_atom(I) || I <- Tags], - rabbit_auth_backend_internal:update_user_with_hash( + update_user_with_hash( Username, Hash, HashingAlgorithm, ConvertedTags, Limits). create_user_with_password(_PassedCredentialValidation = true, Username, Password, Tags, undefined, Limits, ActingUser) -> - ok = rabbit_auth_backend_internal:add_user(Username, Password, ActingUser, Limits, Tags); + ok = add_user(Username, Password, ActingUser, Limits, Tags); create_user_with_password(_PassedCredentialValidation = true, Username, Password, Tags, PreconfiguredPermissions, Limits, ActingUser) -> - ok = rabbit_auth_backend_internal:add_user(Username, Password, ActingUser, Limits, Tags), + ok = add_user(Username, Password, ActingUser, Limits, Tags), preconfigure_permissions(Username, PreconfiguredPermissions, ActingUser); create_user_with_password(_PassedCredentialValidation = false, _Username, _Password, _Tags, _, _, _) -> %% we don't log here because @@ -738,14 +776,14 @@ create_user_with_password_hash(Username, PasswordHash, Tags, User, Version, Prec HashingAlgorithm = hashing_algorithm(User, Version), Hash = rabbit_misc:b64decode_or_throw(PasswordHash), - rabbit_auth_backend_internal:add_user_sans_validation(Username, Hash, HashingAlgorithm, Tags, Limits, ActingUser), + add_user_sans_validation(Username, Hash, HashingAlgorithm, Tags, Limits, ActingUser), preconfigure_permissions(Username, PreconfiguredPermissions, ActingUser). preconfigure_permissions(_Username, undefined, _ActingUser) -> ok; preconfigure_permissions(Username, Map, ActingUser) when is_map(Map) -> _ = maps:map(fun(VHost, M) -> - rabbit_auth_backend_internal:set_permissions(Username, VHost, + set_permissions(Username, VHost, maps:get(<<"configure">>, M), maps:get(<<"write">>, M), maps:get(<<"read">>, M), diff --git a/deps/rabbit/src/rabbit_auth_mechanism_amqplain.erl b/deps/rabbit/src/rabbit_auth_mechanism_amqplain.erl index ef9cbdba1850..a17202b5b1b7 100644 --- a/deps/rabbit/src/rabbit_auth_mechanism_amqplain.erl +++ b/deps/rabbit/src/rabbit_auth_mechanism_amqplain.erl @@ -2,12 +2,10 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_auth_mechanism_amqplain). --include_lib("rabbit_common/include/rabbit.hrl"). - -behaviour(rabbit_auth_mechanism). -export([description/0, should_offer/1, init/1, handle_response/2]). @@ -32,14 +30,17 @@ should_offer(_Sock) -> init(_Sock) -> []. --define(IS_STRING_TYPE(Type), Type =:= longstr orelse Type =:= shortstr). +-define(IS_STRING_TYPE(Type), + Type =:= longstr orelse + Type =:= shortstr orelse + Type =:= binary). handle_response(Response, _State) -> LoginTable = rabbit_binary_parser:parse_table(Response), case {lists:keysearch(<<"LOGIN">>, 1, LoginTable), lists:keysearch(<<"PASSWORD">>, 1, LoginTable)} of {{value, {_, UserType, User}}, - {value, {_, PassType, Pass}}} when ?IS_STRING_TYPE(UserType); + {value, {_, PassType, Pass}}} when ?IS_STRING_TYPE(UserType) andalso ?IS_STRING_TYPE(PassType) -> rabbit_access_control:check_user_pass_login(User, Pass); {{value, {_, _UserType, _User}}, diff --git a/deps/rabbit/src/rabbit_auth_mechanism_anonymous.erl b/deps/rabbit/src/rabbit_auth_mechanism_anonymous.erl new file mode 100644 index 000000000000..a5183156d45c --- /dev/null +++ b/deps/rabbit/src/rabbit_auth_mechanism_anonymous.erl @@ -0,0 +1,54 @@ +%% This Source Code Form is subject to the terms of the Mozilla Public +%% License, v. 2.0. If a copy of the MPL was not distributed with this +%% file, You can obtain one at https://mozilla.org/MPL/2.0/. +%% +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. +%% + +-module(rabbit_auth_mechanism_anonymous). +-behaviour(rabbit_auth_mechanism). + +-export([description/0, should_offer/1, init/1, handle_response/2]). +-export([credentials/0]). + +-define(STATE, []). + +-rabbit_boot_step( + {?MODULE, + [{description, "auth mechanism anonymous"}, + {mfa, {rabbit_registry, register, [auth_mechanism, <<"ANONYMOUS">>, ?MODULE]}}, + {requires, rabbit_registry}, + {enables, kernel_ready}]}). + +description() -> + [{description, <<"SASL ANONYMOUS authentication mechanism">>}]. + +should_offer(_Sock) -> + case credentials() of + {ok, _, _} -> + true; + error -> + false + end. + +init(_Sock) -> + ?STATE. + +handle_response(_TraceInfo, ?STATE) -> + {ok, User, Pass} = credentials(), + rabbit_access_control:check_user_pass_login(User, Pass). + +-spec credentials() -> + {ok, rabbit_types:username(), rabbit_types:password()} | error. +credentials() -> + case application:get_env(rabbit, anonymous_login_user) of + {ok, User} when is_binary(User) -> + case application:get_env(rabbit, anonymous_login_pass) of + {ok, Pass} when is_binary(Pass) -> + {ok, User, Pass}; + _ -> + error + end; + _ -> + error + end. diff --git a/deps/rabbit/src/rabbit_auth_mechanism_cr_demo.erl b/deps/rabbit/src/rabbit_auth_mechanism_cr_demo.erl index 27834ea564fd..9f877414f2a1 100644 --- a/deps/rabbit/src/rabbit_auth_mechanism_cr_demo.erl +++ b/deps/rabbit/src/rabbit_auth_mechanism_cr_demo.erl @@ -2,12 +2,10 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_auth_mechanism_cr_demo). --include_lib("rabbit_common/include/rabbit.hrl"). - -behaviour(rabbit_auth_mechanism). -export([description/0, should_offer/1, init/1, handle_response/2]). diff --git a/deps/rabbit/src/rabbit_auth_mechanism_plain.erl b/deps/rabbit/src/rabbit_auth_mechanism_plain.erl index 5a520d2105ab..d0881b4acc84 100644 --- a/deps/rabbit/src/rabbit_auth_mechanism_plain.erl +++ b/deps/rabbit/src/rabbit_auth_mechanism_plain.erl @@ -2,12 +2,10 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_auth_mechanism_plain). --include_lib("rabbit_common/include/rabbit.hrl"). - -behaviour(rabbit_auth_mechanism). -export([description/0, should_offer/1, init/1, handle_response/2]). @@ -41,11 +39,15 @@ handle_response(Response, _State) -> extract_user_pass(Response) -> case extract_elem(Response) of - {ok, User, Response1} -> case extract_elem(Response1) of - {ok, Pass, <<>>} -> {ok, User, Pass}; - _ -> error - end; - error -> error + {ok, User, Response1} -> + case extract_elem(Response1) of + {ok, Pass, <<>>} -> + {ok, User, Pass}; + _ -> + error + end; + error -> + error end. extract_elem(<<0:8, Rest/binary>>) -> diff --git a/deps/rabbit/src/rabbit_autoheal.erl b/deps/rabbit/src/rabbit_autoheal.erl index c17d9fd7c0a3..63f38dc82366 100644 --- a/deps/rabbit/src/rabbit_autoheal.erl +++ b/deps/rabbit/src/rabbit_autoheal.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_autoheal). @@ -263,6 +263,12 @@ handle_msg({winner_is, Winner}, State = {leader_waiting, Winner, _}, %% This node is the leader and a loser at the same time. Pid = restart_loser(State, Winner), {restarting, Pid}; +handle_msg({winner_is, Winner}, State = {winner_waiting, _OutstandingStops, _Notify}, + _Partitions) -> + %% This node is still in winner_waiting with a winner reported, restart loser + %% and update state + Pid = restart_loser(State, Winner), + {restarting, Pid}; handle_msg(Request, {restarting, Pid} = St, _Partitions) -> %% ignore, we can contribute no further @@ -405,7 +411,7 @@ make_decision(AllPartitions) -> partition_value(Partition) -> Connections = [Res || Node <- Partition, Res <- [rpc:call(Node, rabbit_networking, - connections_local, [])], + local_connections, [])], is_list(Res)], {length(lists:append(Connections)), length(Partition)}. diff --git a/deps/rabbit/src/rabbit_backing_queue.erl b/deps/rabbit/src/rabbit_backing_queue.erl index f4ae7d3c67ad..ffa0a791f1b5 100644 --- a/deps/rabbit/src/rabbit_backing_queue.erl +++ b/deps/rabbit/src/rabbit_backing_queue.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_backing_queue). @@ -21,10 +21,7 @@ -type ack() :: any(). -type state() :: any(). --type flow() :: 'flow' | 'noflow'. -type msg_ids() :: [rabbit_types:msg_id()]. --type publish() :: {mc:state(), - rabbit_types:message_properties(), boolean()}. -type delivered_publish() :: {mc:state(), rabbit_types:message_properties()}. -type fetch_result(Ack) :: @@ -36,7 +33,6 @@ -type purged_msg_count() :: non_neg_integer(). -type async_callback() :: fun ((atom(), fun ((atom(), state()) -> state())) -> 'ok'). --type duration() :: ('undefined' | 'infinity' | number()). -type msg_fun(A) :: fun ((mc:state(), ack(), A) -> A). -type msg_pred() :: fun ((rabbit_types:message_properties()) -> boolean()). @@ -96,28 +92,20 @@ %% Publish a message. -callback publish(mc:state(), - rabbit_types:message_properties(), boolean(), pid(), flow(), + rabbit_types:message_properties(), boolean(), pid(), state()) -> state(). -%% Like publish/6 but for batches of publishes. --callback batch_publish([publish()], pid(), flow(), state()) -> state(). - %% Called for messages which have already been passed straight %% out to a client. The queue will be empty for these calls %% (i.e. saves the round trip through the backing queue). -callback publish_delivered(mc:state(), - rabbit_types:message_properties(), pid(), flow(), + rabbit_types:message_properties(), pid(), state()) -> {ack(), state()}. -%% Like publish_delivered/5 but for batches of publishes. --callback batch_publish_delivered([delivered_publish()], pid(), flow(), - state()) - -> {[ack()], state()}. - %% Called to inform the BQ about messages which have reached the %% queue, but are not going to be further passed to BQ. --callback discard(rabbit_types:msg_id(), pid(), flow(), state()) -> state(). +-callback discard(rabbit_types:msg_id(), pid(), state()) -> state(). %% Return ids of messages which have been confirmed since the last %% invocation of this function (or initialisation). @@ -201,21 +189,8 @@ %% What's the queue depth, where depth = length + number of pending acks -callback depth(state()) -> non_neg_integer(). -%% For the next three functions, the assumption is that you're -%% monitoring something like the ingress and egress rates of the -%% queue. The RAM duration is thus the length of time represented by -%% the messages held in RAM given the current rates. If you want to -%% ignore all of this stuff, then do so, and return 0 in -%% ram_duration/1. - -%% The target is to have no more messages in RAM than indicated by the -%% duration and the current queue rates. --callback set_ram_duration_target(duration(), state()) -> state(). - -%% Optionally recalculate the duration internally (likely to be just -%% update your internal rates), and report how many seconds the -%% messages in RAM represent given the current rates of the queue. --callback ram_duration(state()) -> {duration(), state()}. +%% Update the internal message rates. +-callback update_rates(state()) -> state(). %% Should 'timeout' be called as soon as the queue process can manage %% (either on an empty mailbox, or when a timer fires)? diff --git a/deps/rabbit/src/rabbit_basic.erl b/deps/rabbit/src/rabbit_basic.erl index cf02eb8fadb5..bc0b5e77389a 100644 --- a/deps/rabbit/src/rabbit_basic.erl +++ b/deps/rabbit/src/rabbit_basic.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_basic). @@ -235,18 +235,23 @@ is_message_persistent(#content{properties = #'P_basic'{ %% Extract CC routes from headers --spec header_routes(undefined | rabbit_framing:amqp_table()) -> [string()]. +-spec header_routes(undefined | rabbit_framing:amqp_table()) -> [string()] | {error, Reason :: any()}. header_routes(undefined) -> []; header_routes(HeadersTable) -> - lists:append( - [case rabbit_misc:table_lookup(HeadersTable, HeaderKey) of - {array, Routes} -> [Route || {longstr, Route} <- Routes]; - undefined -> []; - {Type, _Val} -> throw({error, {unacceptable_type_in_header, - binary_to_list(HeaderKey), Type}}) - end || HeaderKey <- ?ROUTING_HEADERS]). + try + lists:append( + [case rabbit_misc:table_lookup(HeadersTable, HeaderKey) of + {array, Routes} -> [Route || {longstr, Route} <- Routes]; + undefined -> []; + {Type, _Val} -> throw({error, {unacceptable_type_in_header, + binary_to_list(HeaderKey), Type}}) + end || HeaderKey <- ?ROUTING_HEADERS]) + catch + {error, _Reason} = Error -> + Error + end. -spec parse_expiration (rabbit_framing:amqp_property_record()) -> @@ -281,7 +286,7 @@ msg_size(Content) -> rabbit_writer:msg_size(Content). add_header(Name, Type, Value, #basic_message{content = Content0} = Msg) -> - Content = rabbit_basic:map_headers( + Content = map_headers( fun(undefined) -> rabbit_misc:set_table_value([], Name, Type, Value); (Headers) -> @@ -318,15 +323,19 @@ binary_prefix_64(Bin, Len) -> binary:part(Bin, 0, min(byte_size(Bin), Len)). make_message(XName, RoutingKey, #content{properties = Props} = DecodedContent, Guid) -> - try - {ok, #basic_message{ - exchange_name = XName, - content = strip_header(DecodedContent, ?DELETED_HEADER), - id = Guid, - is_persistent = is_message_persistent(DecodedContent), - routing_keys = [RoutingKey | - header_routes(Props#'P_basic'.headers)]}} - catch - {error, _Reason} = Error -> Error + case header_routes(Props#'P_basic'.headers) of + {error, _} = Error -> + Error; + Routes -> + try + {ok, #basic_message{ + exchange_name = XName, + content = strip_header(DecodedContent, ?DELETED_HEADER), + id = Guid, + is_persistent = is_message_persistent(DecodedContent), + routing_keys = [RoutingKey | Routes]}} + catch + {error, _Reason} = Error -> Error + end end. diff --git a/deps/rabbit/src/rabbit_binding.erl b/deps/rabbit/src/rabbit_binding.erl index 81f2385f3746..cf7f79b51e6a 100644 --- a/deps/rabbit/src/rabbit_binding.erl +++ b/deps/rabbit/src/rabbit_binding.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_binding). @@ -41,7 +41,8 @@ -type bind_ok_or_error() :: 'ok' | bind_errors() | rabbit_types:error({'binding_invalid', string(), [any()]}) | %% inner_fun() result - rabbit_types:error(rabbit_types:amqp_error()). + rabbit_types:error(rabbit_types:amqp_error()) | + rabbit_khepri:timeout_error(). -type bind_res() :: bind_ok_or_error() | rabbit_misc:thunk(bind_ok_or_error()). -type inner_fun() :: fun((rabbit_types:exchange(), @@ -100,13 +101,21 @@ recover(XNames, QNames) -> recover_semi_durable_route(Gatherer, Binding, Src, Dst, ToRecover, Fun) -> case sets:is_element(Dst, ToRecover) of - true -> {ok, X} = rabbit_exchange:lookup(Src), - ok = gatherer:fork(Gatherer), - ok = worker_pool:submit_async( - fun () -> - Fun(Binding, X), - gatherer:finish(Gatherer) - end); + true -> + case rabbit_exchange:lookup(Src) of + {ok, X} -> + ok = gatherer:fork(Gatherer), + ok = worker_pool:submit_async( + fun () -> + Fun(Binding, X), + gatherer:finish(Gatherer) + end); + {error, not_found}=Error -> + rabbit_log:warning( + "expected exchange ~tp to exist during recovery, " + "error: ~tp", [Src, Error]), + ok + end; false -> ok end. @@ -382,7 +391,11 @@ combine_deletions(Deletions1, Deletions2) -> merge_entry({X1, Deleted1, Bindings1}, {X2, Deleted2, Bindings2}) -> {anything_but(undefined, X1, X2), anything_but(not_deleted, Deleted1, Deleted2), - [Bindings1 | Bindings2]}. + Bindings1 ++ Bindings2}; +merge_entry({X1, Deleted1, Bindings1, none}, {X2, Deleted2, Bindings2, none}) -> + {anything_but(undefined, X1, X2), + anything_but(not_deleted, Deleted1, Deleted2), + Bindings1 ++ Bindings2, none}. notify_deletions({error, not_found}, _) -> ok; diff --git a/deps/rabbit/src/rabbit_boot_steps.erl b/deps/rabbit/src/rabbit_boot_steps.erl index 593e86727147..776032b61346 100644 --- a/deps/rabbit/src/rabbit_boot_steps.erl +++ b/deps/rabbit/src/rabbit_boot_steps.erl @@ -2,11 +2,14 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_boot_steps). +-include_lib("kernel/include/logger.hrl"). +-include_lib("rabbit_common/include/logging.hrl"). + -export([run_boot_steps/0, run_boot_steps/1, run_cleanup_steps/1]). -export([find_steps/0, find_steps/1]). @@ -31,7 +34,14 @@ find_steps() -> find_steps(loaded_applications()). find_steps(Apps) -> - All = sort_boot_steps(rabbit_misc:all_module_attributes(rabbit_boot_step)), + T0 = erlang:monotonic_time(), + AttrsPerApp = rabbit_misc:rabbitmq_related_module_attributes(rabbit_boot_step), + T1 = erlang:monotonic_time(), + ?LOG_DEBUG( + "Boot steps: time to find boot steps: ~tp us", + [erlang:convert_time_unit(T1 - T0, native, microsecond)], + #{domain => ?RMQLOG_DOMAIN_GLOBAL}), + All = sort_boot_steps(AttrsPerApp), [Step || {App, _, _} = Step <- All, lists:member(App, Apps)]. run_step(Attributes, AttributeName) -> diff --git a/deps/rabbit/src/rabbit_channel.erl b/deps/rabbit/src/rabbit_channel.erl index fa85e1d2268a..908892781574 100644 --- a/deps/rabbit/src/rabbit_channel.erl +++ b/deps/rabbit/src/rabbit_channel.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_channel). @@ -45,7 +45,7 @@ -behaviour(gen_server2). -export([start_link/11, start_link/12, do/2, do/3, do_flow/3, flush/1, shutdown/1]). --export([send_command/2, deliver_reply/2]). +-export([send_command/2]). -export([list/0, info_keys/0, info/1, info/2, info_all/0, info_all/1, emit_info_all/4, info_local/1]). -export([refresh_config_local/0, ready_for_close/1]). @@ -63,10 +63,11 @@ -export([get_vhost/1, get_user/1]). %% For testing -export([build_topic_variable_map/3]). --export([list_queue_states/1, get_max_message_size/0]). +-export([list_queue_states/1]). %% Mgmt HTTP API refactor --export([handle_method/6]). +-export([handle_method/6, + binding_action/4]). -import(rabbit_misc, [maps_put_truthy/3]). @@ -87,13 +88,9 @@ %% same as reader's name, see #v1.name %% in rabbit_reader conn_name, - %% channel's originating source e.g. rabbit_reader | rabbit_direct | undefined - %% or any other channel creating/spawning entity - source, %% same as #v1.user in the reader, used in %% authorisation checks user, - %% same as #v1.user in the reader virtual_host, %% when queue.bind's queue field is empty, %% this name will be used instead @@ -107,15 +104,11 @@ capabilities, trace_state :: rabbit_trace:state(), consumer_prefetch, - %% Message content size limit - max_message_size, consumer_timeout, authz_context, + max_consumers, % taken from rabbit.consumer_max_per_channel %% defines how ofter gc will be executed - writer_gc_threshold, - %% true with AMQP 1.0 to include the publishing sequence - %% in the return callback, false otherwise - extended_return_callback + writer_gc_threshold }). -record(pending_ack, { @@ -124,7 +117,7 @@ delivery_tag, %% consumer tag tag, - delivered_at, + delivered_at :: integer(), %% queue name queue, %% message ID used by queue and message store implementations @@ -165,9 +158,8 @@ %% rejected but are yet to be sent to the client rejected, %% used by "one shot RPC" (amq. - reply_consumer, - %% flow | noflow, see rabbitmq-server#114 - delivery_flow, + reply_consumer :: none | {rabbit_types:ctag(), binary(), binary()}, + delivery_flow, %% Deprecated since removal of CMQ in 4.0 interceptor_state, queue_states, tick_timer, @@ -273,7 +265,7 @@ do(Pid, Method) -> -spec do (pid(), rabbit_framing:amqp_method_record(), - rabbit_types:maybe(rabbit_types:content())) -> + rabbit_types:'maybe'(rabbit_types:content())) -> 'ok'. do(Pid, Method, Content) -> @@ -281,7 +273,7 @@ do(Pid, Method, Content) -> -spec do_flow (pid(), rabbit_framing:amqp_method_record(), - rabbit_types:maybe(rabbit_types:content())) -> + rabbit_types:'maybe'(rabbit_types:content())) -> 'ok'. do_flow(Pid, Method, Content) -> @@ -496,10 +488,6 @@ init([Channel, ReaderPid, WriterPid, ConnPid, ConnName, Protocol, User, VHost, ?LG_PROCESS_TYPE(channel), ?store_proc_name({ConnName, Channel}), ok = pg_local:join(rabbit_channels, self()), - Flow = case rabbit_misc:get_env(rabbit, mirroring_flow_control, true) of - true -> flow; - false -> noflow - end, {ok, {Global0, Prefetch}} = application:get_env(rabbit, default_consumer_prefetch), Limiter0 = rabbit_limiter:new(LimiterPid), Global = Global0 andalso is_global_qos_permitted(), @@ -513,11 +501,10 @@ init([Channel, ReaderPid, WriterPid, ConnPid, ConnName, Protocol, User, VHost, end, %% Process dictionary is used here because permission cache already uses it. MK. put(permission_cache_can_expire, rabbit_access_control:permission_cache_can_expire(User)), - MaxMessageSize = get_max_message_size(), ConsumerTimeout = get_consumer_timeout(), OptionalVariables = extract_variable_map_from_amqp_params(AmqpParams), - UseExtendedReturnCallback = use_extended_return_callback(AmqpParams), {ok, GCThreshold} = application:get_env(rabbit, writer_gc_threshold), + MaxConsumers = application:get_env(rabbit, consumer_max_per_channel, infinity), State = #ch{cfg = #conf{state = starting, protocol = Protocol, channel = Channel, @@ -532,17 +519,15 @@ init([Channel, ReaderPid, WriterPid, ConnPid, ConnName, Protocol, User, VHost, capabilities = Capabilities, trace_state = rabbit_trace:init(VHost), consumer_prefetch = Prefetch, - max_message_size = MaxMessageSize, consumer_timeout = ConsumerTimeout, authz_context = OptionalVariables, - writer_gc_threshold = GCThreshold, - extended_return_callback = UseExtendedReturnCallback + max_consumers = MaxConsumers, + writer_gc_threshold = GCThreshold }, limiter = Limiter, tx = none, next_tag = 1, unacked_message_q = ?QUEUE:new(), - queue_monitors = pmon:new(), consumer_mapping = #{}, queue_consumers = #{}, confirm_enabled = false, @@ -551,7 +536,6 @@ init([Channel, ReaderPid, WriterPid, ConnPid, ConnName, Protocol, User, VHost, rejected = [], confirmed = [], reply_consumer = none, - delivery_flow = Flow, interceptor_state = undefined, queue_states = rabbit_queue_type:init() }, @@ -688,8 +672,8 @@ handle_cast({deliver_reply, Key, Msg}, next_tag = DeliveryTag, reply_consumer = {ConsumerTag, _Suffix, Key}}) -> Content = mc:protocol_state(mc:convert(mc_amqpl, Msg)), - ExchName = mc:get_annotation(exchange, Msg), - [RoutingKey | _] = mc:get_annotation(routing_keys, Msg), + ExchName = mc:exchange(Msg), + [RoutingKey | _] = mc:routing_keys(Msg), ok = rabbit_writer:send_command( WriterPid, #'basic.deliver'{consumer_tag = ConsumerTag, @@ -710,16 +694,6 @@ handle_cast({force_event_refresh, Ref}, State) -> Ref), noreply(rabbit_event:init_stats_timer(State, #ch.stats_timer)); -handle_cast({mandatory_received, _MsgSeqNo}, State) -> - %% This feature was used by `rabbit_amqqueue_process` and - %% `rabbit_mirror_queue_slave` up-to and including RabbitMQ 3.7.x. - %% It is unused in 3.8.x and thus deprecated. We keep it to support - %% in-place upgrades to 3.8.x (i.e. mixed-version clusters), but it - %% is a no-op starting with that version. - %% - %% NB: don't call noreply/1 since we don't want to send confirms. - noreply_coalesce(State); - handle_cast({queue_event, QRef, Evt}, #ch{queue_states = QueueStates0} = State0) -> case rabbit_queue_type:handle_event(QRef, Evt, QueueStates0) of @@ -728,17 +702,8 @@ handle_cast({queue_event, QRef, Evt}, State = handle_queue_actions(Actions, State1), noreply_coalesce(State); {eol, Actions} -> - State1 = handle_queue_actions(Actions, State0), - State2 = handle_consuming_queue_down_or_eol(QRef, State1), - {ConfirmMXs, UC1} = - rabbit_confirms:remove_queue(QRef, State2#ch.unconfirmed), - %% Deleted queue is a special case. - %% Do not nack the "rejected" messages. - State3 = record_confirms(ConfirmMXs, - State2#ch{unconfirmed = UC1}), - _ = erase_queue_stats(QRef), - noreply_coalesce( - State3#ch{queue_states = rabbit_queue_type:remove(QRef, QueueStates0)}); + State = handle_queue_actions(Actions, State0), + handle_eol(QRef, State); {protocol_error, Type, Reason, ReasonArgs} -> rabbit_misc:protocol_error(Type, Reason, ReasonArgs) end. @@ -764,8 +729,7 @@ handle_info(emit_stats, State) -> {noreply, send_confirms_and_nacks(State1), hibernate}; handle_info({{'DOWN', QName}, _MRef, process, QPid, Reason}, - #ch{queue_states = QStates0, - queue_monitors = _QMons} = State0) -> + #ch{queue_states = QStates0} = State0) -> credit_flow:peer_down(QPid), case rabbit_queue_type:handle_down(QPid, QName, Reason, QStates0) of {ok, QState1, Actions} -> @@ -773,18 +737,8 @@ handle_info({{'DOWN', QName}, _MRef, process, QPid, Reason}, State = handle_queue_actions(Actions, State1), noreply_coalesce(State); {eol, QState1, QRef} -> - State1 = handle_consuming_queue_down_or_eol(QRef, State0#ch{ - queue_states = QState1 - }), - {ConfirmMXs, UC1} = - rabbit_confirms:remove_queue(QRef, State1#ch.unconfirmed), - %% Deleted queue is a special case. - %% Do not nack the "rejected" messages. - State2 = record_confirms(ConfirmMXs, - State1#ch{unconfirmed = UC1}), - _ = erase_queue_stats(QRef), - noreply_coalesce( - State2#ch{queue_states = rabbit_queue_type:remove(QRef, State2#ch.queue_states)}) + State = State0#ch{queue_states = QState1}, + handle_eol(QRef, State) end; handle_info({'EXIT', _Pid, Reason}, State) -> @@ -831,20 +785,21 @@ terminate(_Reason, State = #ch{cfg = #conf{user = #user{username = Username}}, consumer_mapping = CM, queue_states = QueueCtxs}) -> - _ = rabbit_queue_type:close(QueueCtxs), + rabbit_queue_type:close(QueueCtxs), {_Res, _State1} = notify_queues(State), pg_local:leave(rabbit_channels, self()), rabbit_event:if_enabled(State, #ch.stats_timer, fun() -> emit_stats(State) end), [delete_stats(Tag) || {Tag, _} <- get()], maybe_decrease_global_publishers(State), - _ = maps:map( - fun (_, _) -> - rabbit_global_counters:consumer_deleted(amqp091) - end, CM), + maps:foreach( + fun (_, _) -> + rabbit_global_counters:consumer_deleted(amqp091) + end, CM), rabbit_core_metrics:channel_closed(self()), rabbit_event:notify(channel_closed, [{pid, self()}, - {user_who_performed_action, Username}]), + {user_who_performed_action, Username}, + {consumer_count, maps:size(CM)}]), case rabbit_confirms:size(State#ch.unconfirmed) of 0 -> ok; NumConfirms -> @@ -857,16 +812,6 @@ code_change(_OldVsn, State, _Extra) -> format_message_queue(Opt, MQ) -> rabbit_misc:format_message_queue(Opt, MQ). --spec get_max_message_size() -> non_neg_integer(). - -get_max_message_size() -> - case application:get_env(rabbit, max_message_size) of - {ok, MS} when is_integer(MS) -> - erlang:min(MS, ?MAX_MSG_SIZE); - _ -> - ?MAX_MSG_SIZE - end. - get_consumer_timeout() -> case application:get_env(rabbit, consumer_timeout) of {ok, MS} when is_integer(MS) -> @@ -972,30 +917,19 @@ check_write_permitted_on_topic(Resource, User, RoutingKey, AuthzContext) -> check_read_permitted_on_topic(Resource, User, RoutingKey, AuthzContext) -> check_topic_authorisation(Resource, User, RoutingKey, AuthzContext, read). -check_user_id_header(#'P_basic'{user_id = undefined}, _) -> - ok; -check_user_id_header(#'P_basic'{user_id = Username}, - #ch{cfg = #conf{user = #user{username = Username}}}) -> - ok; -check_user_id_header( - #'P_basic'{}, #ch{cfg = #conf{user = #user{authz_backends = - [{rabbit_auth_backend_dummy, _}]}}}) -> - ok; -check_user_id_header(#'P_basic'{user_id = Claimed}, - #ch{cfg = #conf{user = #user{username = Actual, - tags = Tags}}}) -> - case lists:member(impersonator, Tags) of - true -> ok; - false -> rabbit_misc:precondition_failed( - "user_id property set to '~ts' but authenticated user was " - "'~ts'", [Claimed, Actual]) +check_user_id_header(Msg, User) -> + case rabbit_access_control:check_user_id(Msg, User) of + ok -> + ok; + {refused, Reason, Args} -> + rabbit_misc:precondition_failed(Reason, Args) end. check_expiration_header(Props) -> case rabbit_basic:parse_expiration(Props) of {ok, _} -> ok; {error, E} -> rabbit_misc:precondition_failed("invalid expiration '~ts': ~tp", - [Props#'P_basic'.expiration, E]) + [Props#'P_basic'.expiration, E]) end. check_internal_exchange(#exchange{name = Name, internal = true}) -> @@ -1046,39 +980,23 @@ extract_variable_map_from_amqp_params([Value]) -> extract_variable_map_from_amqp_params(_) -> #{}. -%% Use tuple representation of amqp_params to avoid a dependency on amqp_client. -%% Used for AMQP 1.0 -use_extended_return_callback({amqp_params_direct,_,_,_,_, - {amqp_adapter_info,_,_,_,_,_,{'AMQP',"1.0"},_}, - _}) -> - true; -use_extended_return_callback(_) -> - false. - -check_msg_size(Content, MaxMessageSize, GCThreshold) -> +check_msg_size(Content, GCThreshold) -> + MaxMessageSize = persistent_term:get(max_message_size), Size = rabbit_basic:maybe_gc_large_msg(Content, GCThreshold), - case Size of - S when S > MaxMessageSize -> - ErrorMessage = case MaxMessageSize of - ?MAX_MSG_SIZE -> - "message size ~B is larger than max size ~B"; - _ -> - "message size ~B is larger than configured max size ~B" - end, - rabbit_misc:precondition_failed(ErrorMessage, - [Size, MaxMessageSize]); - _ -> ok + case Size =< MaxMessageSize of + true -> + ok; + false -> + Fmt = case MaxMessageSize of + ?MAX_MSG_SIZE -> + "message size ~B is larger than max size ~B"; + _ -> + "message size ~B is larger than configured max size ~B" + end, + rabbit_misc:precondition_failed( + Fmt, [Size, MaxMessageSize]) end. -check_vhost_queue_limit(#resource{name = QueueName}, VHost) -> - case rabbit_vhost_limit:is_over_queue_limit(VHost) of - false -> ok; - {true, Limit} -> rabbit_misc:precondition_failed("cannot declare queue '~ts': " - "queue limit in vhost '~ts' (~tp) is reached", - [QueueName, VHost, Limit]) - - end. - qbin_to_resource(QueueNameBin, VHostPath) -> name_to_resource(queue, QueueNameBin, VHostPath). @@ -1244,22 +1162,21 @@ handle_method(#'basic.publish'{immediate = true}, _Content, _State) -> handle_method(#'basic.publish'{exchange = ExchangeNameBin, routing_key = RoutingKey, mandatory = Mandatory}, - Content, State = #ch{cfg = #conf{channel = ChannelNum, - conn_name = ConnName, - virtual_host = VHostPath, - user = #user{username = Username} = User, - trace_state = TraceState, - max_message_size = MaxMessageSize, - authz_context = AuthzContext, - writer_gc_threshold = GCThreshold - }, + Content, State0 = #ch{cfg = #conf{channel = ChannelNum, + conn_name = ConnName, + virtual_host = VHostPath, + user = #user{username = Username} = User, + trace_state = TraceState, + authz_context = AuthzContext, + writer_gc_threshold = GCThreshold + }, tx = Tx, confirm_enabled = ConfirmEnabled, delivery_flow = Flow }) -> - State0 = maybe_increase_global_publishers(State), + State1 = maybe_increase_global_publishers(State0), rabbit_global_counters:messages_received(amqp091, 1), - check_msg_size(Content, MaxMessageSize, GCThreshold), + check_msg_size(Content, GCThreshold), ExchangeName = rabbit_misc:r(VHostPath, exchange, ExchangeNameBin), check_write_permitted(ExchangeName, User, AuthzContext), Exchange = rabbit_exchange:lookup_or_die(ExchangeName), @@ -1269,41 +1186,44 @@ handle_method(#'basic.publish'{exchange = ExchangeNameBin, %% certain to want to look at delivery-mode and priority. DecodedContent = #content {properties = Props} = maybe_set_fast_reply_to( - rabbit_binary_parser:ensure_content_decoded(Content), State), - check_user_id_header(Props, State), + rabbit_binary_parser:ensure_content_decoded(Content), State1), check_expiration_header(Props), DoConfirm = Tx =/= none orelse ConfirmEnabled, - {DeliveryOptions, SeqNum, State1} = + {DeliveryOptions, State} = case DoConfirm of false -> - {maps_put_truthy(flow, Flow, #{}), undefined, State0}; + {maps_put_truthy(flow, Flow, #{mandatory => Mandatory}), State1}; true -> rabbit_global_counters:messages_received_confirm(amqp091, 1), - SeqNo = State0#ch.publish_seqno, - Opts = maps_put_truthy(flow, Flow, #{correlation => SeqNo}), - {Opts, SeqNo, State0#ch{publish_seqno = SeqNo + 1}} + SeqNo = State1#ch.publish_seqno, + Opts = maps_put_truthy(flow, Flow, #{correlation => SeqNo, + mandatory => Mandatory}), + {Opts, State1#ch{publish_seqno = SeqNo + 1}} end, - % rabbit_feature_flags:is_enabled(message_containers), - Message0 = mc_amqpl:message(ExchangeName, - RoutingKey, - DecodedContent), - Message = rabbit_message_interceptor:intercept(Message0), - QNames = rabbit_exchange:route(Exchange, Message, #{return_binding_keys => true}), - [rabbit_channel:deliver_reply(RK, Message) || - {virtual_reply_queue, RK} <- QNames], - Queues = rabbit_amqqueue:lookup_many(QNames), - ok = process_routing_mandatory(Mandatory, Queues, SeqNum, Message, ExchangeName, State0), - rabbit_trace:tap_in(Message, QNames, ConnName, ChannelNum, - Username, TraceState), - %% TODO: call delivery_to_queues with plain args - Delivery = {Message, DeliveryOptions, Queues}, - {noreply, case Tx of - none -> - deliver_to_queues(ExchangeName, Delivery, State1); - {Msgs, Acks} -> - Msgs1 = ?QUEUE:in(Delivery, Msgs), - State1#ch{tx = {Msgs1, Acks}} - end}; + + case mc_amqpl:message(ExchangeName, + RoutingKey, + DecodedContent) of + {error, Reason} -> + rabbit_misc:precondition_failed("invalid message: ~tp", [Reason]); + {ok, Message0} -> + Message = rabbit_message_interceptor:intercept(Message0), + check_user_id_header(Message, User), + QNames = rabbit_exchange:route(Exchange, Message, #{return_binding_keys => true}), + [deliver_reply(RK, Message) || {virtual_reply_queue, RK} <- QNames], + Queues = rabbit_amqqueue:lookup_many(QNames), + rabbit_trace:tap_in(Message, QNames, ConnName, ChannelNum, + Username, TraceState), + %% TODO: call delivery_to_queues with plain args + Delivery = {Message, DeliveryOptions, Queues}, + {noreply, case Tx of + none -> + deliver_to_queues(ExchangeName, Delivery, State); + {Msgs, Acks} -> + Msgs1 = ?QUEUE:in(Delivery, Msgs), + State#ch{tx = {Msgs1, Acks}} + end} + end; handle_method(#'basic.nack'{delivery_tag = DeliveryTag, multiple = Multiple, @@ -1371,8 +1291,13 @@ handle_method(#'basic.consume'{queue = <<"amq.rabbitmq.reply-to">>, no_ack = NoAck, nowait = NoWait}, _, State = #ch{reply_consumer = ReplyConsumer, + cfg = #conf{max_consumers = MaxConsumers}, consumer_mapping = ConsumerMapping}) -> + CurrentConsumers = maps:size(ConsumerMapping), case maps:find(CTag0, ConsumerMapping) of + error when CurrentConsumers >= MaxConsumers -> % false when MaxConsumers is 'infinity' + rabbit_misc:protocol_error( + not_allowed, "reached maximum (~B) of consumers per channel", [MaxConsumers]); error -> case {ReplyConsumer, NoAck} of {none, true} -> @@ -1421,12 +1346,17 @@ handle_method(#'basic.consume'{queue = QueueNameBin, nowait = NoWait, arguments = Args}, _, State = #ch{cfg = #conf{consumer_prefetch = ConsumerPrefetch, + max_consumers = MaxConsumers, user = User, virtual_host = VHostPath, authz_context = AuthzContext}, consumer_mapping = ConsumerMapping }) -> + CurrentConsumers = maps:size(ConsumerMapping), case maps:find(ConsumerTag, ConsumerMapping) of + error when CurrentConsumers >= MaxConsumers -> % false when MaxConsumers is 'infinity' + rabbit_misc:protocol_error( + not_allowed, "reached maximum (~B) of consumers per channel", [MaxConsumers]); error -> QueueName = qbin_to_resource(QueueNameBin, VHostPath), check_read_permitted(QueueName, User, AuthzContext), @@ -1498,8 +1428,9 @@ handle_method(#'basic.cancel'{consumer_tag = ConsumerTag, nowait = NoWait}, fun () -> {error, not_found} end, fun () -> rabbit_queue_type:cancel( - Q, ConsumerTag, ok_msg(NoWait, OkMsg), - Username, QueueStates0) + Q, #{consumer_tag => ConsumerTag, + ok_msg => ok_msg(NoWait, OkMsg), + user => Username}, QueueStates0) end) of {ok, QueueStates} -> rabbit_global_counters:consumer_deleted(amqp091), @@ -1744,19 +1675,6 @@ handle_method(#'channel.flow'{active = true}, _, State) -> handle_method(#'channel.flow'{active = false}, _, _State) -> rabbit_misc:protocol_error(not_implemented, "active=false", []); -handle_method(#'basic.credit'{consumer_tag = CTag, - credit = Credit, - drain = Drain}, - _, State = #ch{consumer_mapping = Consumers, - queue_states = QStates0}) -> - case maps:find(CTag, Consumers) of - {ok, {Q, _CParams}} -> - {ok, QStates, Actions} = rabbit_queue_type:credit(Q, CTag, Credit, Drain, QStates0), - {noreply, handle_queue_actions(Actions, State#ch{queue_states = QStates})}; - error -> rabbit_misc:precondition_failed( - "unknown consumer tag '~ts'", [CTag]) - end; - handle_method(_MethodRecord, _Content, _State) -> rabbit_misc:protocol_error( command_invalid, "unimplemented method", []). @@ -1834,20 +1752,8 @@ handle_consuming_queue_down_or_eol(QName, {ok, CTags} -> CTags end, gb_sets:fold( - fun (CTag, StateN = #ch{consumer_mapping = CMap}) -> - case queue_down_consumer_action(CTag, CMap) of - remove -> - cancel_consumer(CTag, QName, StateN); - {recover, {NoAck, ConsumerPrefetch, Exclusive, Args}} -> - case catch basic_consume( - QName, NoAck, ConsumerPrefetch, CTag, - Exclusive, Args, true, StateN) of - {ok, StateN1} -> - StateN1; - _Err -> - cancel_consumer(CTag, QName, StateN) - end - end + fun (CTag, StateN = #ch{}) -> + cancel_consumer(CTag, QName, StateN) end, State#ch{queue_consumers = maps:remove(QName, QCons)}, ConsumerTags). %% [0] There is a slight danger here that if a queue is deleted and @@ -1870,16 +1776,10 @@ cancel_consumer(CTag, QName, {queue, QName}]), State#ch{consumer_mapping = maps:remove(CTag, CMap)}. -queue_down_consumer_action(CTag, CMap) -> - {_, {_, _, _, Args} = ConsumeSpec} = maps:get(CTag, CMap), - case rabbit_misc:table_lookup(Args, <<"x-cancel-on-ha-failover">>) of - {bool, true} -> remove; - _ -> {recover, ConsumeSpec} - end. - -binding_action(Fun, SourceNameBin0, DestinationType, DestinationNameBin0, - RoutingKey, Arguments, VHostPath, ConnPid, AuthzContext, - #user{username = Username} = User) -> +binding_action_with_checks( + Action, SourceNameBin0, DestinationType, DestinationNameBin0, + RoutingKey, Arguments, VHostPath, ConnPid, AuthzContext, + #user{username = Username} = User) -> ExchangeNameBin = strip_cr_lf(SourceNameBin0), DestinationNameBin = strip_cr_lf(DestinationNameBin0), DestinationName = name_to_resource(DestinationType, DestinationNameBin, VHostPath), @@ -1893,18 +1793,27 @@ binding_action(Fun, SourceNameBin0, DestinationType, DestinationNameBin0, {ok, Exchange} -> check_read_permitted_on_topic(Exchange, User, RoutingKey, AuthzContext) end, - case Fun(#binding{source = ExchangeName, - destination = DestinationName, - key = RoutingKey, - args = Arguments}, - fun (_X, Q) when ?is_amqqueue(Q) -> - try rabbit_amqqueue:check_exclusive_access(Q, ConnPid) - catch exit:Reason -> {error, Reason} - end; - (_X, #exchange{}) -> - ok - end, - Username) of + Binding = #binding{source = ExchangeName, + destination = DestinationName, + key = RoutingKey, + args = Arguments}, + binding_action(Action, Binding, Username, ConnPid). + +-spec binding_action(add | remove, + rabbit_types:binding(), + rabbit_types:username(), + pid()) -> ok. +binding_action(Action, Binding, Username, ConnPid) -> + case rabbit_binding:Action( + Binding, + fun (_X, Q) when ?is_amqqueue(Q) -> + try rabbit_amqqueue:check_exclusive_access(Q, ConnPid) + catch exit:Reason -> {error, Reason} + end; + (_X, #exchange{}) -> + ok + end, + Username) of {error, {resources_missing, [{not_found, Name} | _]}} -> rabbit_amqqueue:not_found(Name); {error, {resources_missing, [{absent, Q, Reason} | _]}} -> @@ -1913,6 +1822,9 @@ binding_action(Fun, SourceNameBin0, DestinationType, DestinationNameBin0, rabbit_misc:protocol_error(precondition_failed, Fmt, Args); {error, #amqp_error{} = Error} -> rabbit_misc:protocol_error(Error); + {error, timeout} -> + rabbit_misc:protocol_error( + internal_error, "Could not ~s binding due to timeout", [Action]); ok -> ok end. @@ -1995,10 +1907,10 @@ record_sent(Type, QueueType, Tag, AckRequired, false -> ok end, - DeliveredAt = os:system_time(millisecond), rabbit_trace:tap_out(Msg, ConnName, ChannelNum, Username, TraceState), UAMQ1 = case AckRequired of true -> + DeliveredAt = erlang:monotonic_time(millisecond), ?QUEUE:in(#pending_ack{delivery_tag = DeliveryTag, tag = Tag, delivered_at = DeliveredAt, @@ -2045,7 +1957,7 @@ collect_acks(AcknowledgedAcc, RemainingAcc, UAMQ, DeliveryTag, Multiple) -> end. %% Settles (acknowledges) messages at the queue replica process level. -%% This happens in the youngest-first order (ascending by delivery tag). +%% This happens in the oldest-first order (ascending by delivery tag). settle_acks(Acks, State = #ch{queue_states = QueueStates0}) -> {QueueStates, Actions} = foreach_per_queue( @@ -2139,14 +2051,13 @@ notify_limiter(Limiter, Acked) -> end end. -deliver_to_queues({Message, _Options, _RoutedToQueues = []} = Delivery, +deliver_to_queues({Message, _Options, _RoutedToQueues} = Delivery, #ch{cfg = #conf{virtual_host = VHost}} = State) -> - XNameBin = mc:get_annotation(exchange, Message), - XName = rabbit_misc:r(VHost, exchange, XNameBin), + XName = rabbit_misc:r(VHost, exchange, mc:exchange(Message)), deliver_to_queues(XName, Delivery, State). deliver_to_queues(XName, - {_Message, Options, _RoutedToQueues = []}, + {_Message, #{mandatory := false} = Options, _RoutedToQueues = []}, State) when not is_map_key(correlation, Options) -> %% optimisation when there are no queues ?INCR_STATS(exchange_stats, XName, 1, publish, State), @@ -2154,16 +2065,18 @@ deliver_to_queues(XName, ?INCR_STATS(exchange_stats, XName, 1, drop_unroutable, State), State; deliver_to_queues(XName, - {Message, Options, RoutedToQueues}, + {Message, Options0, RoutedToQueues}, State0 = #ch{queue_states = QueueStates0}) -> + {Mandatory, Options} = maps:take(mandatory, Options0), Qs = rabbit_amqqueue:prepend_extra_bcc(RoutedToQueues), case rabbit_queue_type:deliver(Qs, Message, Options, QueueStates0) of {ok, QueueStates, Actions} -> rabbit_global_counters:messages_routed(amqp091, length(Qs)), QueueNames = rabbit_amqqueue:queue_names(Qs), - MsgSeqNo = maps:get(correlation, Options, undefined), %% NB: the order here is important since basic.returns must be %% sent before confirms. + ok = process_routing_mandatory(Mandatory, RoutedToQueues, Message, XName, State0), + MsgSeqNo = maps:get(correlation, Options, undefined), State1 = process_routing_confirm(MsgSeqNo, QueueNames, XName, State0), %% Actions must be processed after registering confirms as actions may %% contain rejections of publishes @@ -2192,32 +2105,23 @@ deliver_to_queues(XName, process_routing_mandatory(_Mandatory = true, _RoutedToQs = [], - MsgSeqNo, Msg, XName, - State = #ch{cfg = #conf{extended_return_callback = ExtRetCallback}}) -> + State) -> rabbit_global_counters:messages_unroutable_returned(amqp091, 1), ?INCR_STATS(exchange_stats, XName, 1, return_unroutable, State), - Content0 = mc:protocol_state(Msg), - Content = case ExtRetCallback of - true -> - %% providing the publishing sequence for AMQP 1.0 - {MsgSeqNo, Content0}; - false -> - Content0 - end, - [RoutingKey | _] = mc:get_annotation(routing_keys, Msg), + Content = mc:protocol_state(Msg), + [RoutingKey | _] = mc:routing_keys(Msg), ok = basic_return(Content, RoutingKey, XName#resource.name, State, no_route); process_routing_mandatory(_Mandatory = false, _RoutedToQs = [], - _MsgSeqNo, _Msg, XName, State) -> rabbit_global_counters:messages_unroutable_dropped(amqp091, 1), ?INCR_STATS(exchange_stats, XName, 1, drop_unroutable, State), ok; -process_routing_mandatory(_, _, _, _, _, _) -> +process_routing_mandatory(_, _, _, _, _) -> ok. process_routing_confirm(undefined, _, _, State) -> @@ -2441,33 +2345,33 @@ handle_method(#'exchange.bind'{destination = DestinationNameBin, routing_key = RoutingKey, arguments = Arguments}, ConnPid, AuthzContext, _CollectorId, VHostPath, User) -> - binding_action(fun rabbit_binding:add/3, - SourceNameBin, exchange, DestinationNameBin, - RoutingKey, Arguments, VHostPath, ConnPid, AuthzContext, User); + binding_action_with_checks( + add, SourceNameBin, exchange, DestinationNameBin, + RoutingKey, Arguments, VHostPath, ConnPid, AuthzContext, User); handle_method(#'exchange.unbind'{destination = DestinationNameBin, source = SourceNameBin, routing_key = RoutingKey, arguments = Arguments}, ConnPid, AuthzContext, _CollectorId, VHostPath, User) -> - binding_action(fun rabbit_binding:remove/3, - SourceNameBin, exchange, DestinationNameBin, - RoutingKey, Arguments, VHostPath, ConnPid, AuthzContext, User); + binding_action_with_checks( + remove, SourceNameBin, exchange, DestinationNameBin, + RoutingKey, Arguments, VHostPath, ConnPid, AuthzContext, User); handle_method(#'queue.unbind'{queue = QueueNameBin, exchange = ExchangeNameBin, routing_key = RoutingKey, arguments = Arguments}, ConnPid, AuthzContext, _CollectorId, VHostPath, User) -> - binding_action(fun rabbit_binding:remove/3, - ExchangeNameBin, queue, QueueNameBin, - RoutingKey, Arguments, VHostPath, ConnPid, AuthzContext, User); + binding_action_with_checks( + remove, ExchangeNameBin, queue, QueueNameBin, + RoutingKey, Arguments, VHostPath, ConnPid, AuthzContext, User); handle_method(#'queue.bind'{queue = QueueNameBin, exchange = ExchangeNameBin, routing_key = RoutingKey, arguments = Arguments}, ConnPid, AuthzContext, _CollectorId, VHostPath, User) -> - binding_action(fun rabbit_binding:add/3, - ExchangeNameBin, queue, QueueNameBin, - RoutingKey, Arguments, VHostPath, ConnPid, AuthzContext, User); + binding_action_with_checks( + add, ExchangeNameBin, queue, QueueNameBin, + RoutingKey, Arguments, VHostPath, ConnPid, AuthzContext, User); %% Note that all declares to these are effectively passive. If it %% exists it by definition has one consumer. handle_method(#'queue.declare'{queue = <<"amq.rabbitmq.reply-to", @@ -2499,6 +2403,7 @@ handle_method(#'queue.declare'{queue = QueueNameBin, Args0), StrippedQueueNameBin = strip_cr_lf(QueueNameBin), Durable = DurableDeclare andalso not ExclusiveDeclare, + Kind = queue, ActualNameBin = case StrippedQueueNameBin of <<>> -> case rabbit_amqqueue:is_server_named_allowed(Args) of @@ -2510,9 +2415,9 @@ handle_method(#'queue.declare'{queue = QueueNameBin, "Cannot declare a server-named queue for type ~tp", [rabbit_amqqueue:get_queue_type(Args)]) end; - Other -> check_name('queue', Other) + Other -> check_name(Kind, Other) end, - QueueName = rabbit_misc:r(VHostPath, queue, ActualNameBin), + QueueName = rabbit_misc:r(VHostPath, Kind, ActualNameBin), check_configure_permitted(QueueName, User, AuthzContext), rabbit_core_metrics:queue_declared(QueueName), case rabbit_amqqueue:with( @@ -2525,7 +2430,6 @@ handle_method(#'queue.declare'{queue = QueueNameBin, {ok, QueueName, MessageCount, ConsumerCount}; {error, not_found} -> %% enforce the limit for newly declared queues only - check_vhost_queue_limit(QueueName, VHostPath), DlxKey = <<"x-dead-letter-exchange">>, case rabbit_misc:r_arg(VHostPath, exchange, Args, DlxKey) of undefined -> @@ -2567,6 +2471,8 @@ handle_method(#'queue.declare'{queue = QueueNameBin, %% connection has died. Pretend the queue exists though, %% just so nothing fails. {ok, QueueName, 0, 0}; + {error, queue_limit_exceeded, Reason, ReasonArgs} -> + rabbit_misc:precondition_failed(Reason, ReasonArgs); {protocol_error, ErrorType, Reason, ReasonArgs} -> rabbit_misc:protocol_error(ErrorType, Reason, ReasonArgs) end; @@ -2606,13 +2512,16 @@ handle_method(#'exchange.delete'{exchange = ExchangeNameBin, check_not_default_exchange(ExchangeName), check_exchange_deletion(ExchangeName), check_configure_permitted(ExchangeName, User, AuthzContext), - case rabbit_exchange:delete(ExchangeName, IfUnused, Username) of - {error, not_found} -> + case rabbit_exchange:ensure_deleted(ExchangeName, IfUnused, Username) of + ok -> ok; {error, in_use} -> rabbit_misc:precondition_failed("~ts in use", [rabbit_misc:rs(ExchangeName)]); - ok -> - ok + {error, timeout} -> + rabbit_misc:protocol_error( + internal_error, + "failed to delete ~ts due to a timeout", + [rabbit_misc:rs(ExchangeName)]) end; handle_method(#'queue.purge'{queue = QueueNameBin}, ConnPid, AuthzContext, _CollectorPid, VHostPath, User) -> @@ -2631,7 +2540,7 @@ handle_method(#'queue.purge'{queue = QueueNameBin}, [rabbit_misc:rs(amqqueue:get_name(Q))]) end end); -handle_method(#'exchange.declare'{exchange = ExchangeNameBin, +handle_method(#'exchange.declare'{exchange = XNameBin, type = TypeNameBin, passive = false, durable = Durable, @@ -2641,13 +2550,14 @@ handle_method(#'exchange.declare'{exchange = ExchangeNameBin, _ConnPid, AuthzContext, _CollectorPid, VHostPath, #user{username = Username} = User) -> CheckedType = rabbit_exchange:check_type(TypeNameBin), - ExchangeName = rabbit_misc:r(VHostPath, exchange, strip_cr_lf(ExchangeNameBin)), + XNameBinStripped = strip_cr_lf(XNameBin), + ExchangeName = rabbit_misc:r(VHostPath, exchange, XNameBinStripped), check_not_default_exchange(ExchangeName), check_configure_permitted(ExchangeName, User, AuthzContext), X = case rabbit_exchange:lookup(ExchangeName) of {ok, FoundX} -> FoundX; {error, not_found} -> - _ = check_name('exchange', strip_cr_lf(ExchangeNameBin)), + _ = check_name('exchange', XNameBinStripped), AeKey = <<"alternate-exchange">>, case rabbit_misc:r_arg(VHostPath, exchange, Args, AeKey) of undefined -> ok; @@ -2659,13 +2569,22 @@ handle_method(#'exchange.declare'{exchange = ExchangeNameBin, check_write_permitted(AName, User, AuthzContext), ok end, - rabbit_exchange:declare(ExchangeName, - CheckedType, - Durable, - AutoDelete, - Internal, - Args, - Username) + case rabbit_exchange:declare(ExchangeName, + CheckedType, + Durable, + AutoDelete, + Internal, + Args, + Username) of + {ok, DeclaredX} -> + DeclaredX; + {error, timeout} -> + rabbit_misc:protocol_error( + internal_error, + "failed to declare ~ts because the operation " + "timed out", + [rabbit_misc:rs(ExchangeName)]) + end end, ok = rabbit_exchange:assert_equivalence(X, CheckedType, Durable, AutoDelete, Internal, Args); @@ -2687,14 +2606,14 @@ handle_deliver0(ConsumerTag, AckRequired, writer_gc_threshold = GCThreshold}, next_tag = DeliveryTag, queue_states = Qs}) -> - [RoutingKey | _] = mc:get_annotation(routing_keys, MsgCont0), - ExchangeNameBin = mc:get_annotation(exchange, MsgCont0), + Exchange = mc:exchange(MsgCont0), + [RoutingKey | _] = mc:routing_keys(MsgCont0), MsgCont = mc:convert(mc_amqpl, MsgCont0), Content = mc:protocol_state(MsgCont), Deliver = #'basic.deliver'{consumer_tag = ConsumerTag, delivery_tag = DeliveryTag, redelivered = Redelivered, - exchange = ExchangeNameBin, + exchange = Exchange, routing_key = RoutingKey}, {ok, QueueType} = rabbit_queue_type:module(QName, Qs), case QueueType of @@ -2713,15 +2632,15 @@ handle_deliver0(ConsumerTag, AckRequired, handle_basic_get(WriterPid, DeliveryTag, NoAck, MessageCount, Msg0 = {_QName, _QPid, _MsgId, Redelivered, MsgCont0}, QueueType, State) -> - [RoutingKey | _] = mc:get_annotation(routing_keys, MsgCont0), - ExchangeName = mc:get_annotation(exchange, MsgCont0), + Exchange = mc:exchange(MsgCont0), + [RoutingKey | _] = mc:routing_keys(MsgCont0), MsgCont = mc:convert(mc_amqpl, MsgCont0), Content = mc:protocol_state(MsgCont), ok = rabbit_writer:send_command( WriterPid, #'basic.get_ok'{delivery_tag = DeliveryTag, redelivered = Redelivered, - exchange = ExchangeName, + exchange = Exchange, routing_key = RoutingKey, message_count = MessageCount}, Content), @@ -2786,7 +2705,7 @@ evaluate_consumer_timeout(State = #ch{unacked_message_q = UAMQ}) -> evaluate_consumer_timeout1(PA = #pending_ack{delivered_at = Time}, State) -> - Now = os:system_time(millisecond), + Now = erlang:monotonic_time(millisecond), case get_consumer_timeout(PA, State) of Timeout when is_integer(Timeout) andalso Time < Now - Timeout -> @@ -2795,13 +2714,15 @@ evaluate_consumer_timeout1(PA = #pending_ack{delivered_at = Time}, {noreply, State} end. -handle_consumer_timed_out(Timeout,#pending_ack{delivery_tag = DeliveryTag}, +handle_consumer_timed_out(Timeout,#pending_ack{delivery_tag = DeliveryTag, tag = ConsumerTag, queue = QName}, State = #ch{cfg = #conf{channel = Channel}}) -> - rabbit_log_channel:warning("Consumer ~ts on channel ~w has timed out " - "waiting for delivery acknowledgement. Timeout used: ~tp ms. " + rabbit_log_channel:warning("Consumer '~ts' on channel ~w and ~ts has timed out " + "waiting for a consumer acknowledgement of a delivery with delivery tag = ~b. Timeout used: ~tp ms. " "This timeout value can be configured, see consumers doc guide to learn more", - [rabbit_data_coercion:to_binary(DeliveryTag), - Channel, Timeout]), + [ConsumerTag, + Channel, + rabbit_misc:rs(QName), + DeliveryTag, Timeout]), Ex = rabbit_misc:amqp_error(precondition_failed, "delivery acknowledgement on channel ~w timed out. " "Timeout value used: ~tp ms. " @@ -2809,12 +2730,11 @@ handle_consumer_timed_out(Timeout,#pending_ack{delivery_tag = DeliveryTag}, [Channel, Timeout], none), handle_exception(Ex, State). -handle_queue_actions(Actions, #ch{cfg = #conf{writer_pid = WriterPid}} = State0) -> +handle_queue_actions(Actions, State) -> lists:foldl( - fun - ({settled, QRef, MsgSeqNos}, S0) -> + fun({settled, QRef, MsgSeqNos}, S0) -> confirm(MsgSeqNos, QRef, S0); - ({rejected, _QRef, MsgSeqNos}, S0) -> + ({rejected, _QRef, MsgSeqNos}, S0) -> {U, Rej} = lists:foldr( fun(SeqNo, {U1, Acc}) -> @@ -2827,26 +2747,29 @@ handle_queue_actions(Actions, #ch{cfg = #conf{writer_pid = WriterPid}} = State0) end, {S0#ch.unconfirmed, []}, MsgSeqNos), S = S0#ch{unconfirmed = U}, record_rejects(Rej, S); - ({deliver, CTag, AckRequired, Msgs}, S0) -> + ({deliver, CTag, AckRequired, Msgs}, S0) -> handle_deliver(CTag, AckRequired, Msgs, S0); - ({queue_down, QRef}, S0) -> + ({queue_down, QRef}, S0) -> handle_consuming_queue_down_or_eol(QRef, S0); - ({block, QName}, S0) -> + ({block, QName}, S0) -> credit_flow:block(QName), S0; - ({unblock, QName}, S0) -> + ({unblock, QName}, S0) -> credit_flow:unblock(QName), - S0; - ({send_credit_reply, Avail}, S0) -> - ok = rabbit_writer:send_command(WriterPid, - #'basic.credit_ok'{available = Avail}), - S0; - ({send_drained, {CTag, Credit}}, S0) -> - ok = rabbit_writer:send_command(WriterPid, - #'basic.credit_drained'{consumer_tag = CTag, - credit_drained = Credit}), S0 - end, State0, Actions). + end, State, Actions). + +handle_eol(QName, State0) -> + State1 = handle_consuming_queue_down_or_eol(QName, State0), + {ConfirmMXs, Unconfirmed} = rabbit_confirms:remove_queue(QName, State1#ch.unconfirmed), + State2 = State1#ch{unconfirmed = Unconfirmed}, + %% Deleted queue is a special case. + %% Do not nack the "rejected" messages. + State3 = record_confirms(ConfirmMXs, State2), + _ = erase_queue_stats(QName), + QStates = rabbit_queue_type:remove(QName, State3#ch.queue_states), + State = State3#ch{queue_states = QStates}, + noreply_coalesce(State). maybe_increase_global_publishers(#ch{publishing_mode = true} = State0) -> State0; @@ -2861,4 +2784,3 @@ maybe_decrease_global_publishers(#ch{publishing_mode = true}) -> is_global_qos_permitted() -> rabbit_deprecated_features:is_permitted(global_qos). - diff --git a/deps/rabbit/src/rabbit_channel_interceptor.erl b/deps/rabbit/src/rabbit_channel_interceptor.erl index 2410e5c6b013..f33a77c54959 100644 --- a/deps/rabbit/src/rabbit_channel_interceptor.erl +++ b/deps/rabbit/src/rabbit_channel_interceptor.erl @@ -2,12 +2,11 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_channel_interceptor). --include_lib("rabbit_common/include/rabbit_framing.hrl"). -include_lib("rabbit_common/include/rabbit.hrl"). -export([init/1, intercept_in/3]). @@ -19,8 +18,8 @@ -type(method_name() :: rabbit_framing:amqp_method_name()). -type(original_method() :: rabbit_framing:amqp_method_record()). -type(processed_method() :: rabbit_framing:amqp_method_record()). --type(original_content() :: rabbit_types:maybe(rabbit_types:content())). --type(processed_content() :: rabbit_types:maybe(rabbit_types:content())). +-type(original_content() :: rabbit_types:'maybe'(rabbit_types:content())). +-type(processed_content() :: rabbit_types:'maybe'(rabbit_types:content())). -type(interceptor_state() :: term()). -callback description() -> [proplists:property()]. @@ -72,7 +71,9 @@ call_module(Mod, St, M, C) -> % this little dance is because Mod might be unloaded at any point case (catch {ok, Mod:intercept(M, C, St)}) of {ok, R} -> validate_response(Mod, M, C, R); - {'EXIT', {undef, [{Mod, intercept, _, _} | _]}} -> {M, C} + {'EXIT', {undef, [{Mod, intercept, _, _} | _]}} -> {M, C}; + {'EXIT', {amqp_error, _Type, _ErrMsg, _} = AMQPError} -> + rabbit_misc:protocol_error(AMQPError) end. validate_response(Mod, M1, C1, R = {M2, C2}) -> diff --git a/deps/rabbit/src/rabbit_channel_sup.erl b/deps/rabbit/src/rabbit_channel_sup.erl index 9298eb73ada5..8a0a8171b1d0 100644 --- a/deps/rabbit/src/rabbit_channel_sup.erl +++ b/deps/rabbit/src/rabbit_channel_sup.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_channel_sup). diff --git a/deps/rabbit/src/rabbit_channel_sup_sup.erl b/deps/rabbit/src/rabbit_channel_sup_sup.erl index 8bdee2a62561..85f09b006ec3 100644 --- a/deps/rabbit/src/rabbit_channel_sup_sup.erl +++ b/deps/rabbit/src/rabbit_channel_sup_sup.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_channel_sup_sup). diff --git a/deps/rabbit/src/rabbit_channel_tracking.erl b/deps/rabbit/src/rabbit_channel_tracking.erl index 4cfb466ccdef..0931352416df 100644 --- a/deps/rabbit/src/rabbit_channel_tracking.erl +++ b/deps/rabbit/src/rabbit_channel_tracking.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2020-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_channel_tracking). @@ -25,6 +25,7 @@ shutdown_tracked_items/2]). -export([list/0, list_of_user/1, list_on_node/1, + channel_count_on_node/1, tracked_channel_table_name_for/1, tracked_channel_per_user_table_name_for/1, ensure_tracked_tables_for_this_node/0, @@ -185,8 +186,18 @@ list_on_node(Node) -> [] end. --spec tracked_channel_table_name_for(node()) -> atom(). +channel_count_on_node(Node) when Node == node() -> + ets:info(?TRACKED_CHANNEL_TABLE, size); +channel_count_on_node(Node) -> + case rabbit_misc:rpc_call(Node, ?MODULE, ?FUNCTION_NAME, [Node]) of + Int when is_integer(Int) -> + Int; + _ -> + 0 + end. + +-spec tracked_channel_table_name_for(node()) -> atom(). tracked_channel_table_name_for(Node) -> list_to_atom(rabbit_misc:format("tracked_channel_on_node_~ts", [Node])). @@ -220,8 +231,8 @@ get_tracked_channels_by_connection_pid(ConnPid) -> get_tracked_channel_by_id(ChId) -> rabbit_tracking:match_tracked_items( - ?TRACKED_CHANNEL_TABLE, - #tracked_channel{id = ChId, _ = '_'}). + ?TRACKED_CHANNEL_TABLE, + #tracked_channel{id = ChId, _ = '_'}). delete_tracked_channel_user_entry(Username) -> rabbit_tracking:delete_tracked_entry( diff --git a/deps/rabbit/src/rabbit_channel_tracking_handler.erl b/deps/rabbit/src/rabbit_channel_tracking_handler.erl index f26b860badf0..13c3963a54d5 100644 --- a/deps/rabbit/src/rabbit_channel_tracking_handler.erl +++ b/deps/rabbit/src/rabbit_channel_tracking_handler.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2020-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_channel_tracking_handler). diff --git a/deps/rabbit/src/rabbit_classic_queue.erl b/deps/rabbit/src/rabbit_classic_queue.erl index 34580cb0c9b1..2da8d55f7a6f 100644 --- a/deps/rabbit/src/rabbit_classic_queue.erl +++ b/deps/rabbit/src/rabbit_classic_queue.erl @@ -1,5 +1,6 @@ -module(rabbit_classic_queue). -behaviour(rabbit_queue_type). +-behaviour(rabbit_policy_validator). -include("amqqueue.hrl"). -include_lib("rabbit_common/include/rabbit.hrl"). @@ -32,15 +33,17 @@ purge/1, policy_changed/1, stat/1, + format/2, init/1, close/1, update/2, consume/3, - cancel/5, + cancel/3, handle_event/3, deliver/3, settle/5, - credit/5, + credit_v1/5, + credit/6, dequeue/5, info/2, state_info/1, @@ -51,13 +54,46 @@ -export([delete_crashed/1, delete_crashed/2, - delete_crashed_internal/2]). + delete_crashed_internal/2, + delete_crashed_in_backing_queue/1]). -export([confirm_to_sender/3, send_rejection/3, deliver_to_consumer/5, - send_drained/3, - send_credit_reply/3]). + send_credit_reply_credit_api_v1/3, + send_drained_credit_api_v1/4, + send_credit_reply/7]). + +-export([validate_policy/1]). + +-rabbit_boot_step( + {?MODULE, + [{description, "Deprecated queue-master-locator support." + "Use queue-leader-locator instead."}, + {mfa, {rabbit_registry, register, + [policy_validator, <<"queue-master-locator">>, ?MODULE]}}, + {mfa, {rabbit_registry, register, + [operator_policy_validator, <<"queue-master-locator">>, ?MODULE]}}, + {requires, rabbit_registry}, + {enables, recovery}]}). + +validate_policy(Args) -> + %% queue-master-locator was deprecated in 4.0 + Locator = proplists:get_value(<<"queue-master-locator">>, Args, unknown), + case Locator of + unknown -> + ok; + _ -> + case rabbit_queue_location:master_locator_permitted() of + true -> + case lists:member(Locator, rabbit_queue_location:queue_leader_locators()) of + true -> ok; + false -> {error, "~tp is not a valid master locator", [Locator]} + end; + false -> + {error, "use of deprecated queue-master-locator argument is not permitted", []} + end + end. -spec is_enabled() -> boolean(). is_enabled() -> true. @@ -66,7 +102,28 @@ is_enabled() -> true. is_compatible(_, _, _) -> true. +validate_arguments(Args) -> + case lists:keymember(<<"x-queue-master-locator">>, 1, Args) of + false -> + ok; + true -> + case rabbit_queue_location:master_locator_permitted() of + true -> + ok; + false -> + Warning = rabbit_deprecated_features:get_warning( + queue_master_locator), + {protocol_error, internal_error, "~ts", [Warning]} + end + end. + declare(Q, Node) when ?amqqueue_is_classic(Q) -> + case validate_arguments(amqqueue:get_arguments(Q)) of + ok -> do_declare(Q, Node); + Error -> Error + end. + +do_declare(Q, Node) when ?amqqueue_is_classic(Q) -> QName = amqqueue:get_name(Q), VHost = amqqueue:get_vhost(Q), Node1 = case {Node, rabbit_amqqueue:is_exclusive(Q)} of @@ -75,46 +132,45 @@ declare(Q, Node) when ?amqqueue_is_classic(Q) -> {_, true} -> Node; _ -> - case rabbit_queue_master_location_misc:get_location(Q) of - {ok, Node0} -> Node0; - _ -> Node - end + {Node0, _} = rabbit_queue_location:select_leader_and_followers(Q, 1), + Node0 end, - Node2 = rabbit_mirror_queue_misc:initial_queue_node(Q, Node1), - case rabbit_vhost_sup_sup:get_vhost_sup(VHost, Node2) of + case rabbit_vhost_sup_sup:get_vhost_sup(VHost, Node1) of {ok, _} -> gen_server2:call( - rabbit_amqqueue_sup_sup:start_queue_process(Node2, Q, declare), + rabbit_amqqueue_sup_sup:start_queue_process(Node1, Q), {init, new}, infinity); {error, Error} -> {protocol_error, internal_error, "Cannot declare a queue '~ts' on node '~ts': ~255p", - [rabbit_misc:rs(QName), Node2, Error]} + [rabbit_misc:rs(QName), Node1, Error]} end. -delete(Q, IfUnused, IfEmpty, ActingUser) when ?amqqueue_is_classic(Q) -> - case wait_for_promoted_or_stopped(Q) of - {promoted, Q1} -> - QPid = amqqueue:get_pid(Q1), - delegate:invoke(QPid, {gen_server2, call, - [{delete, IfUnused, IfEmpty, ActingUser}, - infinity]}); - {stopped, Q1} -> - #resource{name = Name, virtual_host = Vhost} = amqqueue:get_name(Q1), - case IfEmpty of +delete(Q0, IfUnused, IfEmpty, ActingUser) when ?amqqueue_is_classic(Q0) -> + QName = amqqueue:get_name(Q0), + case rabbit_amqqueue:lookup(QName) of + {ok, Q} -> + QPid = amqqueue:get_pid(Q), + case rabbit_process:is_process_alive(QPid) of true -> - rabbit_log:error("Queue ~ts in vhost ~ts has its master node down and " - "no mirrors available or eligible for promotion. " - "The queue may be non-empty. " - "Refusing to force-delete.", - [Name, Vhost]), - {error, not_empty}; + delegate:invoke(QPid, {gen_server2, call, + [{delete, IfUnused, IfEmpty, ActingUser}, + infinity]}); false -> - rabbit_log:warning("Queue ~ts in vhost ~ts has its master node is down and " - "no mirrors available or eligible for promotion. " - "Forcing queue deletion.", - [Name, Vhost]), - delete_crashed_internal(Q1, ActingUser), - {ok, 0} + #resource{name = Name, virtual_host = Vhost} = QName, + case IfEmpty of + true -> + rabbit_log:error("Queue ~ts in vhost ~ts is down. " + "The queue may be non-empty. " + "Refusing to force-delete.", + [Name, Vhost]), + {error, not_empty}; + false -> + rabbit_log:warning("Queue ~ts in vhost ~ts is down. " + "Forcing queue deletion.", + [Name, Vhost]), + delete_crashed_internal(Q, ActingUser), + {ok, 0} + end end; {error, not_found} -> %% Assume the queue was deleted @@ -124,10 +180,6 @@ delete(Q, IfUnused, IfEmpty, ActingUser) when ?amqqueue_is_classic(Q) -> is_recoverable(Q) when ?is_amqqueue(Q) and ?amqqueue_is_classic(Q) -> Node = node(), Node =:= amqqueue:qnode(Q) andalso - %% Terminations on node down will not remove the rabbit_queue - %% record if it is a mirrored queue (such info is now obtained from - %% the policy). Thus, we must check if the local pid is alive - %% - if the record is present - in order to restart. (not rabbit_db_queue:consistent_exists(amqqueue:get_name(Q)) orelse not rabbit_process:is_process_alive(amqqueue:get_pid(Q))). @@ -169,12 +221,36 @@ find_missing_queues([Q1|Rem1], [Q2|Rem2] = Q2s, Acc) -> -spec policy_changed(amqqueue:amqqueue()) -> ok. policy_changed(Q) -> QPid = amqqueue:get_pid(Q), - gen_server2:cast(QPid, policy_changed). + case rabbit_khepri:is_enabled() of + false -> + gen_server2:cast(QPid, policy_changed); + true -> + %% When using Khepri, projections are guaranteed to be atomic on + %% the node that processes them, but there might be a slight delay + %% until they're applied on other nodes. Some test suites fail + %% intermittently, showing that rabbit_amqqueue_process is reading + %% the old policy value. We use the khepri ff to hide this API change, + %% and use the up-to-date record to update the policy on the gen_server + %% state. + gen_server2:cast(QPid, {policy_changed, Q}) + end. stat(Q) -> delegate:invoke(amqqueue:get_pid(Q), {gen_server2, call, [stat, infinity]}). + +format(Q, _Ctx) when ?is_amqqueue(Q) -> + State = case amqqueue:get_state(Q) of + live -> + running; + S -> + S + end, + [{type, classic}, + {state, State}, + {node, node(amqqueue:get_pid(Q))}]. + -spec init(amqqueue:amqqueue()) -> {ok, state()}. init(Q) when ?amqqueue_is_classic(Q) -> {ok, #?STATE{pid = amqqueue:get_pid(Q)}}. @@ -200,16 +276,17 @@ consume(Q, Spec, State0) when ?amqqueue_is_classic(Q) -> channel_pid := ChPid, limiter_pid := LimiterPid, limiter_active := LimiterActive, - prefetch_count := ConsumerPrefetchCount, + mode := Mode, consumer_tag := ConsumerTag, exclusive_consume := ExclusiveConsume, - args := Args, + args := Args0, ok_msg := OkMsg, acting_user := ActingUser} = Spec, + {ModeOrPrefetch, Args} = consume_backwards_compat(Mode, Args0), case delegate:invoke(QPid, {gen_server2, call, [{basic_consume, NoAck, ChPid, LimiterPid, - LimiterActive, ConsumerPrefetchCount, ConsumerTag, + LimiterActive, ModeOrPrefetch, ConsumerTag, ExclusiveConsume, Args, OkMsg, ActingUser}, infinity]}) of ok -> @@ -220,36 +297,69 @@ consume(Q, Spec, State0) when ?amqqueue_is_classic(Q) -> Err end. -cancel(Q, ConsumerTag, OkMsg, ActingUser, State) -> - QPid = amqqueue:get_pid(Q), - case delegate:invoke(QPid, {gen_server2, call, - [{basic_cancel, self(), ConsumerTag, - OkMsg, ActingUser}, infinity]}) of - ok -> - {ok, State}; +%% Delete this function when feature flag rabbitmq_4.0.0 becomes required. +consume_backwards_compat({simple_prefetch, PrefetchCount} = Mode, Args) -> + case rabbit_feature_flags:is_enabled('rabbitmq_4.0.0') of + true -> {Mode, Args}; + false -> {PrefetchCount, Args} + end; +consume_backwards_compat({credited, InitialDeliveryCount} = Mode, Args) + when is_integer(InitialDeliveryCount) -> + %% credit API v2 + {Mode, Args}; +consume_backwards_compat({credited, credit_api_v1}, Args) -> + %% credit API v1 + {_PrefetchCount = 0, + [{<<"x-credit">>, table, [{<<"credit">>, long, 0}, + {<<"drain">>, bool, false}]} | Args]}. + +cancel(Q, Spec, State) -> + %% Cancel API v2 reuses feature flag rabbitmq_4.0.0. + Request = case rabbit_feature_flags:is_enabled('rabbitmq_4.0.0') of + true -> + {stop_consumer, Spec#{pid => self()}}; + false -> + #{consumer_tag := ConsumerTag, + user := ActingUser} = Spec, + OkMsg = maps:get(ok_msg, Spec, undefined), + {basic_cancel, self(), ConsumerTag, OkMsg, ActingUser} + end, + case delegate:invoke(amqqueue:get_pid(Q), + {gen_server2, call, [Request, infinity]}) of + ok -> {ok, State}; Err -> Err end. -spec settle(rabbit_amqqueue:name(), rabbit_queue_type:settle_op(), rabbit_types:ctag(), [non_neg_integer()], state()) -> {state(), rabbit_queue_type:actions()}. -settle(_QName, complete, _CTag, MsgIds, State) -> - Pid = State#?STATE.pid, - delegate:invoke_no_result(Pid, - {gen_server2, cast, [{ack, MsgIds, self()}]}), - {State, []}; -settle(_QName, Op, _CTag, MsgIds, State) -> - ChPid = self(), - ok = delegate:invoke_no_result(State#?STATE.pid, - {gen_server2, cast, - [{reject, Op == requeue, MsgIds, ChPid}]}), +settle(QName, {modify, _DelFailed, Undel, _Anns}, CTag, MsgIds, State) -> + %% translate modify into other op + Op = case Undel of + true -> + discard; + false -> + requeue + end, + settle(QName, Op, CTag, MsgIds, State); +settle(_QName, Op, _CTag, MsgIds, State = #?STATE{pid = Pid}) -> + Arg = case Op of + complete -> + {ack, MsgIds, self()}; + _ -> + {reject, Op == requeue, MsgIds, self()} + end, + delegate:invoke_no_result(Pid, {gen_server2, cast, [Arg]}), {State, []}. -credit(_QName, CTag, Credit, Drain, State) -> - ChPid = self(), - delegate:invoke_no_result(State#?STATE.pid, - {gen_server2, cast, - [{credit, ChPid, CTag, Credit, Drain}]}), +credit_v1(_QName, Ctag, LinkCreditSnd, Drain, #?STATE{pid = QPid} = State) -> + Request = {credit, self(), Ctag, LinkCreditSnd, Drain}, + delegate:invoke_no_result(QPid, {gen_server2, cast, [Request]}), + {State, []}. + +credit(_QName, Ctag, DeliveryCountRcv, LinkCreditRcv, Drain, #?STATE{pid = QPid} = State) -> + Request = {credit, self(), Ctag, DeliveryCountRcv, LinkCreditRcv, Drain}, + delegate:invoke_no_result(QPid, {gen_server2, cast, [Request]}), {State, []}. handle_event(QName, {confirm, MsgSeqNos, Pid}, #?STATE{unconfirmed = U0} = State) -> @@ -273,21 +383,14 @@ handle_event(QName, {reject_publish, SeqNo, _QPid}, Actions = [{rejected, QName, Rejected}], {ok, State#?STATE{unconfirmed = U}, Actions}; handle_event(QName, {down, Pid, Info}, #?STATE{monitored = Monitored, - pid = MasterPid, unconfirmed = U0} = State0) -> State = State0#?STATE{monitored = maps:remove(Pid, Monitored)}, - Actions0 = case Pid =:= MasterPid of - true -> - [{queue_down, QName}]; - false -> - [] - end, + Actions0 = [{queue_down, QName}], case rabbit_misc:is_abnormal_exit(Info) of - false when Info =:= normal andalso Pid == MasterPid -> - %% queue was deleted and masterpid is down + false when Info =:= normal -> + %% queue was deleted {eol, []}; false -> - %% this assumes the mirror isn't part of the active set MsgSeqNos = maps:keys( maps:filter(fun (_, #msg_status{pending = Pids}) -> lists:member(Pid, Pids) @@ -300,8 +403,7 @@ handle_event(QName, {down, Pid, Info}, #?STATE{monitored = Monitored, {ok, State#?STATE{unconfirmed = Unconfirmed}, Actions}; true -> %% any abnormal exit should be considered a full reject of the - %% oustanding message ids - If the message didn't get to all - %% mirrors we have to assume it will never get there + %% oustanding message ids MsgIds = maps:fold( fun (SeqNo, Status, Acc) -> case lists:member(Pid, Status#msg_status.pending) of @@ -315,9 +417,13 @@ handle_event(QName, {down, Pid, Info}, #?STATE{monitored = Monitored, {ok, State#?STATE{unconfirmed = U}, [{rejected, QName, MsgIds} | Actions0]} end; -handle_event(_QName, {send_drained, _} = Action, State) -> +handle_event(_QName, Action, State) + when element(1, Action) =:= credit_reply -> {ok, State, [Action]}; -handle_event(_QName, {send_credit_reply, _} = Action, State) -> +handle_event(_QName, {send_drained, {Ctag, Credit}}, State) -> + %% This function clause should be deleted when feature flag + %% rabbitmq_4.0.0 becomes required. + Action = {credit_reply_v1, Ctag, Credit, _Available = 0, _Drain = true}, {ok, State, [Action]}. settlement_action(_Type, _QRef, [], Acc) -> @@ -338,8 +444,8 @@ deliver(Qs0, Msg0, Options) -> Flow = maps:get(flow, Options, noflow), Confirm = MsgSeqNo /= undefined, - {MPids, SPids, Qs} = qpids(Qs0, Confirm, MsgSeqNo), - Delivery = rabbit_basic:delivery(Mandatory, Confirm, Msg, MsgSeqNo, Flow), + {MPids, Qs} = qpids(Qs0, Confirm, MsgSeqNo), + Delivery = rabbit_basic:delivery(Mandatory, Confirm, Msg, MsgSeqNo), case Flow of %% Here we are tracking messages sent by the rabbit_channel @@ -347,14 +453,11 @@ deliver(Qs0, Msg0, Options) -> %% dictionary. flow -> _ = [credit_flow:send(QPid) || QPid <- MPids], - _ = [credit_flow:send(QPid) || QPid <- SPids], ok; noflow -> ok end, MMsg = {deliver, Delivery, false}, - SMsg = {deliver, Delivery, true}, delegate:invoke_no_result(MPids, {gen_server2, cast, [MMsg]}), - delegate:invoke_no_result(SPids, {gen_server2, cast, [SMsg]}), {Qs, []}. -spec dequeue(rabbit_amqqueue:name(), NoAck :: boolean(), @@ -403,80 +506,61 @@ purge(Q) when ?is_amqqueue(Q) -> qpids(Qs, Confirm, MsgNo) -> lists:foldl( - fun ({Q, S0}, {MPidAcc, SPidAcc, Qs0}) -> + fun ({Q, S0}, {MPidAcc, Qs0}) -> QPid = amqqueue:get_pid(Q), - SPids = amqqueue:get_slave_pids(Q), QRef = amqqueue:get_name(Q), S1 = ensure_monitor(QPid, QRef, S0), - S2 = lists:foldl(fun(SPid, Acc) -> - ensure_monitor(SPid, QRef, Acc) - end, S1, SPids), %% confirm record only if necessary - S = case S2 of + S = case S1 of #?STATE{unconfirmed = U0} -> - Rec = [QPid | SPids], + Rec = [QPid], U = case Confirm of false -> U0; true -> U0#{MsgNo => #msg_status{pending = Rec}} end, - S2#?STATE{pid = QPid, + S1#?STATE{pid = QPid, unconfirmed = U}; stateless -> - S2 + S1 end, - {[QPid | MPidAcc], SPidAcc ++ SPids, - [{Q, S} | Qs0]} - end, {[], [], []}, Qs). - -%% internal-ish --spec wait_for_promoted_or_stopped(amqqueue:amqqueue()) -> - {promoted, amqqueue:amqqueue()} | - {stopped, amqqueue:amqqueue()} | - {error, not_found}. -wait_for_promoted_or_stopped(Q0) -> - QName = amqqueue:get_name(Q0), - case rabbit_amqqueue:lookup(QName) of - {ok, Q} -> - QPid = amqqueue:get_pid(Q), - SPids = amqqueue:get_slave_pids(Q), - case rabbit_process:is_process_alive(QPid) of - true -> {promoted, Q}; - false -> - case lists:any(fun(Pid) -> - rabbit_process:is_process_alive(Pid) - end, SPids) of - %% There is a live slave. May be promoted - true -> - timer:sleep(100), - wait_for_promoted_or_stopped(Q); - %% All slave pids are stopped. - %% No process left for the queue - false -> {stopped, Q} - end - end; - {error, not_found} -> - {error, not_found} - end. + {[QPid | MPidAcc], [{Q, S} | Qs0]} + end, {[], []}, Qs). -spec delete_crashed(amqqueue:amqqueue()) -> ok. delete_crashed(Q) -> delete_crashed(Q, ?INTERNAL_USER). delete_crashed(Q, ActingUser) -> - ok = rpc:call(amqqueue:qnode(Q), ?MODULE, delete_crashed_internal, - [Q, ActingUser]). + %% Delete from `rabbit_db_queue' from the queue's node. The deletion's + %% change to the Khepri projection is immediately consistent on that node, + %% so the call will block until that node has fully deleted and forgotten + %% about the queue. + Ret = rpc:call(amqqueue:qnode(Q), ?MODULE, delete_crashed_in_backing_queue, + [Q]), + case Ret of + {badrpc, {'EXIT', {undef, _}}} -> + %% Compatibility: if the remote node doesn't yet expose this + %% function, call it directly on this node. + ok = delete_crashed_in_backing_queue(Q); + ok -> + ok + end, + ok = rabbit_amqqueue:internal_delete(Q, ActingUser). delete_crashed_internal(Q, ActingUser) -> - {ok, BQ} = application:get_env(rabbit, backing_queue_module), - BQ:delete_crashed(Q), + delete_crashed_in_backing_queue(Q), ok = rabbit_amqqueue:internal_delete(Q, ActingUser). +delete_crashed_in_backing_queue(Q) -> + {ok, BQ} = application:get_env(rabbit, backing_queue_module), + BQ:delete_crashed(Q). + recover_durable_queues(QueuesAndRecoveryTerms) -> {Results, Failures} = gen_server2:mcall( - [{rabbit_amqqueue_sup_sup:start_queue_process(node(), Q, recovery), + [{rabbit_amqqueue_sup_sup:start_queue_process(node(), Q), {init, {self(), Terms}}} || {Q, Terms} <- QueuesAndRecoveryTerms]), [rabbit_log:error("Queue ~tp failed to initialise: ~tp", [Pid, Error]) || {Pid, Error} <- Failures], @@ -485,17 +569,19 @@ recover_durable_queues(QueuesAndRecoveryTerms) -> capabilities() -> #{unsupported_policies => [%% Stream policies <<"max-age">>, <<"stream-max-segment-size-bytes">>, - <<"queue-leader-locator">>, <<"initial-cluster-size">>, + <<"initial-cluster-size">>, %% Quorum policies - <<"delivery-limit">>, <<"dead-letter-strategy">>], + <<"delivery-limit">>, <<"dead-letter-strategy">>, <<"max-in-memory-length">>, <<"max-in-memory-bytes">>, <<"target-group-size">>], queue_arguments => [<<"x-expires">>, <<"x-message-ttl">>, <<"x-dead-letter-exchange">>, <<"x-dead-letter-routing-key">>, <<"x-max-length">>, <<"x-max-length-bytes">>, <<"x-max-priority">>, <<"x-overflow">>, <<"x-queue-mode">>, <<"x-queue-version">>, - <<"x-single-active-consumer">>, <<"x-queue-type">>, - <<"x-queue-master-locator">>], - consumer_arguments => [<<"x-cancel-on-ha-failover">>, - <<"x-priority">>, <<"x-credit">>], + <<"x-single-active-consumer">>, <<"x-queue-type">>, <<"x-queue-master-locator">>] + ++ case rabbit_feature_flags:is_enabled('rabbitmq_4.0.0') of + true -> [<<"x-queue-leader-locator">>]; + false -> [] + end, + consumer_arguments => [<<"x-priority">>], server_named => true}. notify_decorators(Q) when ?is_amqqueue(Q) -> @@ -557,26 +643,30 @@ ensure_monitor(Pid, QName, State = #?STATE{monitored = Monitored}) -> %% part of channel <-> queue api confirm_to_sender(Pid, QName, MsgSeqNos) -> - Msg = {confirm, MsgSeqNos, self()}, - gen_server:cast(Pid, {queue_event, QName, Msg}). + Evt = {confirm, MsgSeqNos, self()}, + send_queue_event(Pid, QName, Evt). send_rejection(Pid, QName, MsgSeqNo) -> - Msg = {reject_publish, MsgSeqNo, self()}, - gen_server:cast(Pid, {queue_event, QName, Msg}). + Evt = {reject_publish, MsgSeqNo, self()}, + send_queue_event(Pid, QName, Evt). deliver_to_consumer(Pid, QName, CTag, AckRequired, Message) -> - Deliver = {deliver, CTag, AckRequired, [Message]}, - Evt = {queue_event, QName, Deliver}, - gen_server:cast(Pid, Evt). - -send_drained(Pid, QName, CTagCredits) when is_list(CTagCredits) -> - lists:foreach(fun(CTagCredit) -> - send_drained(Pid, QName, CTagCredit) - end, CTagCredits); -send_drained(Pid, QName, CTagCredit) when is_tuple(CTagCredit) -> - gen_server:cast(Pid, {queue_event, QName, - {send_drained, CTagCredit}}). - -send_credit_reply(Pid, QName, Len) when is_integer(Len) -> - gen_server:cast(Pid, {queue_event, QName, - {send_credit_reply, Len}}). + Evt = {deliver, CTag, AckRequired, [Message]}, + send_queue_event(Pid, QName, Evt). + +%% Delete this function when feature flag rabbitmq_4.0.0 becomes required. +send_credit_reply_credit_api_v1(Pid, QName, Available) -> + Evt = {send_credit_reply, Available}, + send_queue_event(Pid, QName, Evt). + +%% Delete this function when feature flag rabbitmq_4.0.0 becomes required. +send_drained_credit_api_v1(Pid, QName, Ctag, Credit) -> + Evt = {send_drained, {Ctag, Credit}}, + send_queue_event(Pid, QName, Evt). + +send_credit_reply(Pid, QName, Ctag, DeliveryCount, Credit, Available, Drain) -> + Evt = {credit_reply, Ctag, DeliveryCount, Credit, Available, Drain}, + send_queue_event(Pid, QName, Evt). + +send_queue_event(Pid, QName, Event) -> + gen_server:cast(Pid, {queue_event, QName, Event}). diff --git a/deps/rabbit/src/rabbit_classic_queue_index_v2.erl b/deps/rabbit/src/rabbit_classic_queue_index_v2.erl index 67a22f711254..32111ca9651f 100644 --- a/deps/rabbit/src/rabbit_classic_queue_index_v2.erl +++ b/deps/rabbit/src/rabbit_classic_queue_index_v2.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_classic_queue_index_v2). @@ -41,18 +41,7 @@ -define(HEADER_SIZE, 64). %% bytes -define(ENTRY_SIZE, 32). %% bytes -%% The file_handle_cache module tracks reservations at -%% the level of the process. This means we cannot -%% handle them independently in the store and index. -%% Because the index may reserve more FDs than the -%% store the index becomes responsible for this and -%% will always reserve at least 2 FDs, and release -%% everything when terminating. --define(STORE_FD_RESERVATIONS, 2). - -include_lib("rabbit_common/include/rabbit.hrl"). --include_lib("kernel/include/file.hrl"). - %% Set to true to get an awful lot of debug logs. -if(false). -define(DEBUG(X,Y), logger:debug("~0p: " ++ X, [?FUNCTION_NAME|Y])). @@ -196,9 +185,10 @@ init_for_conversion(#resource{ virtual_host = VHost } = Name, OnSyncFun, OnSyncM init1(Name, Dir, OnSyncFun, OnSyncMsgFun) -> ensure_queue_name_stub_file(Name, Dir), + DirBin = rabbit_file:filename_to_binary(Dir), #qi{ queue_name = Name, - dir = rabbit_file:filename_to_binary(Dir), + dir = << DirBin/binary, "/" >>, on_sync = OnSyncFun, on_sync_msg = OnSyncMsgFun }. @@ -539,7 +529,6 @@ terminate(VHost, Terms, State0 = #qi { dir = Dir, ok = file:sync(Fd), ok = file:close(Fd) end, OpenFds), - file_handle_cache:release_reservation(), %% Write recovery terms for faster recovery. _ = rabbit_recovery_terms:store(VHost, filename:basename(rabbit_file:binary_to_filename(Dir)), @@ -556,7 +545,6 @@ delete_and_terminate(State = #qi { dir = Dir, _ = maps:map(fun(_, Fd) -> ok = file:close(Fd) end, OpenFds), - file_handle_cache:release_reservation(), %% Erase the data on disk. ok = erase_index_dir(rabbit_file:binary_to_filename(Dir)), State#qi{ segments = #{}, @@ -627,18 +615,9 @@ new_segment_file(Segment, SegmentEntryCount, State = #qi{ segments = Segments }) %% using too many FDs when the consumer lags a lot. We %% limit at 4 because we try to keep up to 2 for reading %% and 2 for writing. -reduce_fd_usage(SegmentToOpen, State = #qi{ fds = OpenFds }) +reduce_fd_usage(_SegmentToOpen, State = #qi{ fds = OpenFds }) when map_size(OpenFds) < 4 -> - %% The only case where we need to update reservations is - %% when we are opening a segment that wasn't already open, - %% and we are not closing another segment at the same time. - case OpenFds of - #{SegmentToOpen := _} -> - State; - _ -> - file_handle_cache:set_reservation(?STORE_FD_RESERVATIONS + map_size(OpenFds) + 1), - State - end; + State; reduce_fd_usage(SegmentToOpen, State = #qi{ fds = OpenFds0 }) -> case OpenFds0 of #{SegmentToOpen := _} -> @@ -720,7 +699,6 @@ flush_buffer(State0 = #qi { write_buffer = WriteBuffer0, {Fd, FoldState} = get_fd_for_segment(Segment, FoldState1), LocBytes = flush_buffer_consolidate(lists:sort(LocBytes0), 1), ok = file:pwrite(Fd, LocBytes), - file_handle_cache_stats:update(queue_index_write), FoldState end, State0, Writes), %% Update the cache. If we are flushing the entire write buffer, @@ -869,7 +847,6 @@ delete_segment(Segment, State0 = #qi{ fds = OpenFds0 }) -> State = case maps:take(Segment, OpenFds0) of {Fd, OpenFds} -> ok = file:close(Fd), - file_handle_cache:set_reservation(?STORE_FD_RESERVATIONS + map_size(OpenFds)), State0#qi{ fds = OpenFds }; error -> State0 @@ -983,7 +960,6 @@ read_from_disk(SeqIdsToRead0, State0 = #qi{ write_buffer = WriteBuffer }, Acc0) ReadSize = (LastSeqId - FirstSeqId + 1) * ?ENTRY_SIZE, case get_fd(FirstSeqId, State0) of {Fd, OffsetForSeqId, State} -> - file_handle_cache_stats:update(queue_index_read), %% When reading further than the end of a partial file, %% file:pread/3 will return what it could read. case file:pread(Fd, OffsetForSeqId, ReadSize) of @@ -1077,7 +1053,7 @@ sync(State0 = #qi{ confirms = Confirms, end, State#qi{ confirms = sets:new([{version,2}]) }. --spec needs_sync(state()) -> 'false'. +-spec needs_sync(state()) -> 'false' | 'confirms'. needs_sync(State = #qi{ confirms = Confirms }) -> ?DEBUG("~0p", [State]), @@ -1126,8 +1102,11 @@ queue_index_walker({next, Gatherer}) when is_pid(Gatherer) -> empty -> ok = gatherer:stop(Gatherer), finished; + %% From v1 index walker. @todo Remove when no longer possible to convert from v1. {value, {MsgId, Count}} -> - {MsgId, Count, {next, Gatherer}} + {MsgId, Count, {next, Gatherer}}; + {value, MsgIds} -> + {MsgIds, {next, Gatherer}} end. queue_index_walker_reader(#resource{ virtual_host = VHost } = Name, Gatherer) -> @@ -1154,27 +1133,30 @@ queue_index_walker_segment(F, Gatherer) -> {ok, <>} -> - queue_index_walker_segment(Fd, Gatherer, 0, ToSeqId - FromSeqId); + queue_index_walker_segment(Fd, Gatherer, 0, ToSeqId - FromSeqId, []); _ -> %% Invalid segment file. Skip. ok end, ok = file:close(Fd). -queue_index_walker_segment(_, _, N, N) -> +queue_index_walker_segment(_, Gatherer, N, N, Acc) -> %% We reached the end of the segment file. + gatherer:sync_in(Gatherer, Acc), ok; -queue_index_walker_segment(Fd, Gatherer, N, Total) -> +queue_index_walker_segment(Fd, Gatherer, N, Total, Acc) -> case file:read(Fd, ?ENTRY_SIZE) of %% We found a non-ack persistent entry. Gather it. {ok, <<1,_:7,1:1,_,1,Id:16/binary,_/bits>>} -> - gatherer:sync_in(Gatherer, {Id, 1}), - queue_index_walker_segment(Fd, Gatherer, N + 1, Total); + queue_index_walker_segment(Fd, Gatherer, N + 1, Total, [Id|Acc]); %% We found an ack, a transient entry or a non-entry. Skip it. {ok, _} -> - queue_index_walker_segment(Fd, Gatherer, N + 1, Total); + queue_index_walker_segment(Fd, Gatherer, N + 1, Total, Acc); %% We reached the end of a partial segment file. + eof when Acc =:= [] -> + ok; eof -> + gatherer:sync_in(Gatherer, Acc), ok end. @@ -1253,7 +1235,7 @@ segment_entry_count() -> %% A value lower than the max write_buffer size results in nothing needing %% to be written to disk as long as the consumer consumes as fast as the %% producer produces. - persistent_term:get({rabbit, classic_queue_index_v2_segment_entry_count}, 4096). + persistent_term:get(classic_queue_index_v2_segment_entry_count, 4096). %% Note that store files will also be removed if there are any in this directory. %% Currently the v2 per-queue store expects this function to remove its own files. @@ -1277,8 +1259,8 @@ queue_name_to_dir_name(#resource { kind = queue, rabbit_misc:format("~.36B", [Num]). segment_file(Segment, #qi{ dir = Dir }) -> - filename:join(rabbit_file:binary_to_filename(Dir), - integer_to_list(Segment) ++ ?SEGMENT_EXTENSION). + N = integer_to_binary(Segment), + <>. highest_continuous_seq_id([SeqId|Tail], EndSeqId) when (1 + SeqId) =:= EndSeqId -> diff --git a/deps/rabbit/src/rabbit_classic_queue_store_v2.erl b/deps/rabbit/src/rabbit_classic_queue_store_v2.erl index 247101a113da..a98e666f853c 100644 --- a/deps/rabbit/src/rabbit_classic_queue_store_v2.erl +++ b/deps/rabbit/src/rabbit_classic_queue_store_v2.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% %% The classic queue store works as follow: @@ -41,11 +41,6 @@ %% need to look into the store to discard them. Messages on disk %% will be dropped at the same time as the index deletes the %% corresponding segment file. -%% -%% The file_handle_cache reservations are done by the v2 index -%% because they are handled at a pid level. Since we are using -%% up to 2 FDs in this module we make the index reserve 2 extra -%% FDs. -module(rabbit_classic_queue_store_v2). @@ -118,7 +113,8 @@ init(#resource{ virtual_host = VHost } = Name) -> ?DEBUG("~0p", [Name]), VHostDir = rabbit_vhost:msg_store_dir_path(VHost), Dir = rabbit_classic_queue_index_v2:queue_dir(VHostDir, Name), - #qs{dir = rabbit_file:filename_to_binary(Dir)}. + DirBin = rabbit_file:filename_to_binary(Dir), + #qs{dir = << DirBin/binary, "/" >>}. -spec terminate(State) -> State when State::state(). @@ -560,16 +556,16 @@ delete_segments(Segments, State0 = #qs{ write_buffer = WriteBuffer0, segment_entry_count() -> %% We use the same value as the index. - persistent_term:get({rabbit, classic_queue_index_v2_segment_entry_count}, 4096). + persistent_term:get(classic_queue_index_v2_segment_entry_count, 4096). max_cache_size() -> - persistent_term:get({rabbit, classic_queue_store_v2_max_cache_size}, 512000). + persistent_term:get(classic_queue_store_v2_max_cache_size, 512000). check_crc32() -> - persistent_term:get({rabbit, classic_queue_store_v2_check_crc32}, true). + persistent_term:get(classic_queue_store_v2_check_crc32, true). %% Same implementation as rabbit_classic_queue_index_v2:segment_file/2, %% but with a different state record. -segment_file(Segment, #qs{ dir = Dir }) -> - filename:join(rabbit_file:binary_to_filename(Dir), - integer_to_list(Segment) ++ ?SEGMENT_EXTENSION). +segment_file(Segment, #qs{dir = Dir}) -> + N = integer_to_binary(Segment), + <>. diff --git a/deps/rabbit/src/rabbit_client_sup.erl b/deps/rabbit/src/rabbit_client_sup.erl index f24a2c99213a..96ff41c4528c 100644 --- a/deps/rabbit/src/rabbit_client_sup.erl +++ b/deps/rabbit/src/rabbit_client_sup.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_client_sup). diff --git a/deps/rabbit/src/rabbit_confirms.erl b/deps/rabbit/src/rabbit_confirms.erl index b950e6df67eb..2ea00bc9cb39 100644 --- a/deps/rabbit/src/rabbit_confirms.erl +++ b/deps/rabbit/src/rabbit_confirms.erl @@ -45,7 +45,7 @@ insert(SeqNo, QNames, #resource{kind = exchange} = XName, when is_integer(SeqNo) andalso is_list(QNames) andalso not is_map_key(SeqNo, U0) -> - U = U0#{SeqNo => {XName, maps:from_list([{Q, ok} || Q <- QNames])}}, + U = U0#{SeqNo => {XName, maps:from_keys(QNames, ok)}}, S = case S0 of undefined -> SeqNo; _ -> S0 @@ -58,20 +58,18 @@ insert(SeqNo, QNames, #resource{kind = exchange} = XName, confirm(SeqNos, QName, #?MODULE{smallest = Smallest0, unconfirmed = U0} = State) when is_list(SeqNos) -> - {Confirmed, U} = lists:foldr( - fun (SeqNo, Acc) -> - confirm_one(SeqNo, QName, Acc) - end, {[], U0}, SeqNos), - %% check if smallest is in Confirmed - %% TODO: this can be optimised by checking in the preceeding foldr - Smallest = - case lists:any(fun ({S, _}) -> S == Smallest0 end, Confirmed) of - true -> - %% work out new smallest - next_smallest(Smallest0, U); - false -> - Smallest0 - end, + {Confirmed, ConfirmedSmallest, U} = + lists:foldl( + fun (SeqNo, Acc) -> + confirm_one(SeqNo, QName, Smallest0, Acc) + end, {[], false, U0}, SeqNos), + Smallest = case ConfirmedSmallest of + true -> + %% work out new smallest + next_smallest(Smallest0, U); + false -> + Smallest0 + end, {Confirmed, State#?MODULE{smallest = Smallest, unconfirmed = U}}. @@ -124,17 +122,21 @@ is_empty(State) -> %% INTERNAL -confirm_one(SeqNo, QName, {Acc, U0}) -> +confirm_one(SeqNo, QName, Smallest, {Acc, ConfirmedSmallest0, U0}) -> case maps:take(SeqNo, U0) of {{XName, QS}, U1} when is_map_key(QName, QS) andalso map_size(QS) == 1 -> %% last queue confirm - {[{SeqNo, XName} | Acc], U1}; + ConfirmedSmallest = case SeqNo of + Smallest -> true; + _ -> ConfirmedSmallest0 + end, + {[{SeqNo, XName} | Acc], ConfirmedSmallest, U1}; {{XName, QS}, U1} -> - {Acc, U1#{SeqNo => {XName, maps:remove(QName, QS)}}}; + {Acc, ConfirmedSmallest0, U1#{SeqNo => {XName, maps:remove(QName, QS)}}}; error -> - {Acc, U0} + {Acc, ConfirmedSmallest0, U0} end. next_smallest(_S, U) when map_size(U) == 0 -> diff --git a/deps/rabbit/src/rabbit_connection_helper_sup.erl b/deps/rabbit/src/rabbit_connection_helper_sup.erl index 82141f04155d..7d665f2ab2be 100644 --- a/deps/rabbit/src/rabbit_connection_helper_sup.erl +++ b/deps/rabbit/src/rabbit_connection_helper_sup.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_connection_helper_sup). @@ -18,7 +18,7 @@ -behaviour(supervisor). --export([start_link/0]). +-export([start_link/1]). -export([ start_channel_sup_sup/1, start_queue_collector/2 @@ -30,10 +30,10 @@ %%---------------------------------------------------------------------------- --spec start_link() -> rabbit_types:ok_pid_or_error(). - -start_link() -> - supervisor:start_link(?MODULE, []). +-spec start_link(supervisor:sup_flags()) -> + supervisor:startlink_ret(). +start_link(SupFlags) -> + supervisor:start_link(?MODULE, SupFlags). -spec start_channel_sup_sup(pid()) -> rabbit_types:ok_pid_or_error(). @@ -62,10 +62,6 @@ start_queue_collector(SupPid, Identity) -> %%---------------------------------------------------------------------------- -init([]) -> +init(SupFlags) -> ?LG_PROCESS_TYPE(connection_helper_sup), - SupFlags = #{strategy => one_for_one, - intensity => 10, - period => 10, - auto_shutdown => any_significant}, {ok, {SupFlags, []}}. diff --git a/deps/rabbit/src/rabbit_connection_sup.erl b/deps/rabbit/src/rabbit_connection_sup.erl index f4061af9adb0..00c003c8d136 100644 --- a/deps/rabbit/src/rabbit_connection_sup.erl +++ b/deps/rabbit/src/rabbit_connection_sup.erl @@ -2,12 +2,12 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_connection_sup). -%% Supervisor for a (network) AMQP 0-9-1 client connection. +%% Supervisor for a (network) AMQP client connection. %% %% Supervises %% @@ -19,7 +19,10 @@ -behaviour(supervisor). -behaviour(ranch_protocol). --export([start_link/3, reader/1]). +-export([start_link/3, + reader/1, + remove_connection_helper_sup/2 + ]). -export([init/1]). @@ -27,7 +30,7 @@ %%---------------------------------------------------------------------------- --spec start_link(any(), module(), any()) -> +-spec start_link(ranch:ref(), module(), any()) -> {'ok', pid(), pid()}. start_link(Ref, _Transport, _Opts) -> @@ -42,25 +45,38 @@ start_link(Ref, _Transport, _Opts) -> %% the queue collector process, since these must not be siblings of the %% reader due to the potential for deadlock if they are added/restarted %% whilst the supervision tree is shutting down. - {ok, HelperSup} = + ChildSpec = #{restart => transient, + significant => true, + shutdown => infinity, + type => supervisor}, + {ok, HelperSup091} = supervisor:start_child( - SupPid, - #{ - id => helper_sup, - start => {rabbit_connection_helper_sup, start_link, []}, - restart => transient, - significant => true, - shutdown => infinity, - type => supervisor, - modules => [rabbit_connection_helper_sup] - } - ), + SupPid, + ChildSpec#{ + id => helper_sup_amqp_091, + start => {rabbit_connection_helper_sup, start_link, + [#{strategy => one_for_one, + intensity => 10, + period => 10, + auto_shutdown => any_significant}]}} + ), + {ok, HelperSup10} = + supervisor:start_child( + SupPid, + ChildSpec#{ + id => helper_sup_amqp_10, + start => {rabbit_connection_helper_sup, start_link, + [#{strategy => one_for_all, + intensity => 0, + period => 1, + auto_shutdown => any_significant}]}} + ), {ok, ReaderPid} = supervisor:start_child( SupPid, #{ id => reader, - start => {rabbit_reader, start_link, [HelperSup, Ref]}, + start => {rabbit_reader, start_link, [{HelperSup091, HelperSup10}, Ref]}, restart => transient, significant => true, shutdown => ?WORKER_WAIT, @@ -71,10 +87,14 @@ start_link(Ref, _Transport, _Opts) -> {ok, SupPid, ReaderPid}. -spec reader(pid()) -> pid(). - reader(Pid) -> hd(rabbit_misc:find_child(Pid, reader)). +-spec remove_connection_helper_sup(pid(), helper_sup_amqp_091 | helper_sup_amqp_10) -> ok. +remove_connection_helper_sup(ConnectionSupPid, ConnectionHelperId) -> + ok = supervisor:terminate_child(ConnectionSupPid, ConnectionHelperId), + ok = supervisor:delete_child(ConnectionSupPid, ConnectionHelperId). + %%-------------------------------------------------------------------------- init([]) -> diff --git a/deps/rabbit/src/rabbit_connection_tracking.erl b/deps/rabbit/src/rabbit_connection_tracking.erl index 740bd6f2575e..da906fa41144 100644 --- a/deps/rabbit/src/rabbit_connection_tracking.erl +++ b/deps/rabbit/src/rabbit_connection_tracking.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_connection_tracking). @@ -108,7 +108,7 @@ handle_cast({vhost_deleted, Details}) -> delete_tracked_connection_vhost_entry, [VHost]), rabbit_log_connection:info("Closing all connections in vhost '~ts' because it's being deleted", [VHost]), shutdown_tracked_items( - rabbit_connection_tracking:list(VHost), + list(VHost), rabbit_misc:format("vhost '~ts' is deleted", [VHost])); %% Note: under normal circumstances this will be called immediately %% after the vhost_deleted above. Therefore we should be careful about @@ -120,16 +120,16 @@ handle_cast({vhost_down, Details}) -> " because the vhost is stopping", [VHost, Node]), shutdown_tracked_items( - rabbit_connection_tracking:list_on_node(Node, VHost), + list_on_node(Node, VHost), rabbit_misc:format("vhost '~ts' is down", [VHost])); handle_cast({user_deleted, Details}) -> Username = pget(name, Details), %% Schedule user entry deletion, allowing time for connections to close _ = timer:apply_after(?TRACKING_EXECUTION_TIMEOUT, ?MODULE, delete_tracked_connection_user_entry, [Username]), - rabbit_log_connection:info("Closing all connections from user '~ts' because it's being deleted", [Username]), + rabbit_log_connection:info("Closing all connections for user '~ts' because the user is being deleted", [Username]), shutdown_tracked_items( - rabbit_connection_tracking:list_of_user(Username), + list_of_user(Username), rabbit_misc:format("user '~ts' is deleted", [Username])). -spec register_tracked(rabbit_types:tracked_connection()) -> ok. diff --git a/deps/rabbit/src/rabbit_connection_tracking_handler.erl b/deps/rabbit/src/rabbit_connection_tracking_handler.erl index 589232ff4525..211bbff46c89 100644 --- a/deps/rabbit/src/rabbit_connection_tracking_handler.erl +++ b/deps/rabbit/src/rabbit_connection_tracking_handler.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_connection_tracking_handler). diff --git a/deps/rabbit/src/rabbit_control_pbe.erl b/deps/rabbit/src/rabbit_control_pbe.erl index 2f772acad65e..96cbf26458df 100644 --- a/deps/rabbit/src/rabbit_control_pbe.erl +++ b/deps/rabbit/src/rabbit_control_pbe.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_control_pbe). diff --git a/deps/rabbit/src/rabbit_core_ff.erl b/deps/rabbit/src/rabbit_core_ff.erl index 05c19f2c76ad..6501ddb8da65 100644 --- a/deps/rabbit/src/rabbit_core_ff.erl +++ b/deps/rabbit/src/rabbit_core_ff.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2018-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_core_ff). @@ -101,7 +101,7 @@ {restart_streams, #{desc => "Support for restarting streams with optional preferred next leader argument." "Used to implement stream leader rebalancing", - stability => stable, + stability => required, depends_on => [stream_queue] }}). @@ -109,20 +109,80 @@ {stream_sac_coordinator_unblock_group, #{desc => "Bug fix to unblock a group of consumers in a super stream partition", doc_url => "https://github.com/rabbitmq/rabbitmq-server/issues/7743", - stability => stable, + stability => required, depends_on => [stream_single_active_consumer] }}). -rabbit_feature_flag( {stream_filtering, #{desc => "Support for stream filtering.", - stability => stable, + stability => required, depends_on => [stream_queue] }}). -rabbit_feature_flag( {message_containers, #{desc => "Message containers.", - stability => stable, + stability => required, depends_on => [feature_flags_v2] }}). + +-rabbit_feature_flag( + {khepri_db, + #{desc => "Use the new Khepri Raft-based metadata store", + doc_url => "", %% TODO + stability => experimental, + depends_on => [feature_flags_v2, + direct_exchange_routing_v2, + maintenance_mode_status, + user_limits, + virtual_host_metadata, + tracking_records_in_ets, + listener_records_in_ets, + + %% Deprecated features. + classic_queue_mirroring, + ram_node_type], + callbacks => #{enable => + {rabbit_khepri, khepri_db_migration_enable}, + post_enable => + {rabbit_khepri, khepri_db_migration_post_enable}} + }}). + +-rabbit_feature_flag( + {stream_update_config_command, + #{desc => "A new internal command that is used to update streams as " + "part of a policy.", + stability => required, + depends_on => [stream_queue] + }}). + +-rabbit_feature_flag( + {quorum_queue_non_voters, + #{desc => + "Allows new quorum queue members to be added as non voters initially.", + stability => stable, + depends_on => [quorum_queue] + }}). + +-rabbit_feature_flag( + {message_containers_deaths_v2, + #{desc => "Bug fix for dead letter cycle detection", + doc_url => "https://github.com/rabbitmq/rabbitmq-server/issues/11159", + stability => stable, + depends_on => [message_containers] + }}). + +%% We bundle the following separate concerns (which could have been separate feature flags) +%% into a single feature flag for better user experience: +%% 1. credit API v2 between classic / quorum queue client and classic / quorum queue server +%% 2. cancel API v2 betweeen classic queue client and classic queue server +%% 3. more compact quorum queue commands in quorum queue v4 +%% 4. store messages in message containers AMQP 1.0 disk format v1 +%% 5. support queue leader locator in classic queues +-rabbit_feature_flag( + {'rabbitmq_4.0.0', + #{desc => "Allows rolling upgrades from 3.13.x to 4.0.x", + stability => stable, + depends_on => [message_containers] + }}). diff --git a/deps/rabbit/src/rabbit_core_metrics_gc.erl b/deps/rabbit/src/rabbit_core_metrics_gc.erl index b797d08c1b6f..792dcb790ab2 100644 --- a/deps/rabbit/src/rabbit_core_metrics_gc.erl +++ b/deps/rabbit/src/rabbit_core_metrics_gc.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_core_metrics_gc). @@ -92,14 +92,17 @@ gc_leader_data(Id, Table, GbSet) -> gc_global_queues() -> GbSet = gb_sets:from_list(rabbit_amqqueue:list_names()), gc_process_and_entity(channel_queue_metrics, GbSet), + gc_entity(queue_delivery_metrics, GbSet), gc_process_and_entity(consumer_created, GbSet), ExchangeGbSet = gb_sets:from_list(rabbit_exchange:list_names()), - gc_process_and_entities(channel_queue_exchange_metrics, GbSet, ExchangeGbSet). + gc_process_and_entities(channel_queue_exchange_metrics, GbSet, ExchangeGbSet), + gc_entities(queue_exchange_metrics, GbSet, ExchangeGbSet). gc_exchanges() -> Exchanges = rabbit_exchange:list_names(), GbSet = gb_sets:from_list(Exchanges), - gc_process_and_entity(channel_exchange_metrics, GbSet). + gc_process_and_entity(channel_exchange_metrics, GbSet), + gc_entity(exchange_metrics, GbSet). gc_nodes() -> Nodes = rabbit_nodes:list_members(), @@ -153,6 +156,12 @@ gc_entity(Table, GbSet) -> ({Id = Key, _, _}, none) -> gc_entity(Id, Table, Key, GbSet); ({Id = Key, _, _, _, _}, none) -> + gc_entity(Id, Table, Key, GbSet); + ({Id = Key, _, _, _, _, _}, none) + when Table == exchange_metrics -> + gc_entity(Id, Table, Key, GbSet); + ({Id = Key, _, _, _, _, _, _, _, _}, none) + when Table == queue_delivery_metrics -> gc_entity(Id, Table, Key, GbSet) end, none, Table). @@ -188,6 +197,13 @@ gc_process_and_entity(Id, Pid, Table, Key, GbSet) -> none end. +gc_entities(Table, QueueGbSet, ExchangeGbSet) -> + ets:foldl(fun({{QueueId, ExchangeId} = Key, _, _}, none) + when Table == queue_exchange_metrics -> + gc_entity(QueueId, Table, Key, QueueGbSet), + gc_entity(ExchangeId, Table, Key, ExchangeGbSet) + end, none, Table). + gc_process_and_entities(Table, QueueGbSet, ExchangeGbSet) -> ets:foldl(fun({{Pid, {Q, X}} = Key, _, _}, none) -> gc_process(Pid, Table, Key), diff --git a/deps/rabbit/src/rabbit_credential_validation.erl b/deps/rabbit/src/rabbit_credential_validation.erl index 8f41b48d4847..e8869d0f316a 100644 --- a/deps/rabbit/src/rabbit_credential_validation.erl +++ b/deps/rabbit/src/rabbit_credential_validation.erl @@ -2,13 +2,11 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_credential_validation). --include_lib("rabbit_common/include/rabbit.hrl"). - %% used for backwards compatibility -define(DEFAULT_BACKEND, rabbit_credential_validator_accept_everything). diff --git a/deps/rabbit/src/rabbit_credential_validator.erl b/deps/rabbit/src/rabbit_credential_validator.erl index 52840aba629e..00c4913f31c4 100644 --- a/deps/rabbit/src/rabbit_credential_validator.erl +++ b/deps/rabbit/src/rabbit_credential_validator.erl @@ -2,13 +2,11 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_credential_validator). --include_lib("rabbit_common/include/rabbit.hrl"). - %% Validates a password. Used by `rabbit_auth_backend_internal`. %% %% Possible return values: diff --git a/deps/rabbit/src/rabbit_credential_validator_accept_everything.erl b/deps/rabbit/src/rabbit_credential_validator_accept_everything.erl index 91bd2049a3ad..c7c693dcfa5f 100644 --- a/deps/rabbit/src/rabbit_credential_validator_accept_everything.erl +++ b/deps/rabbit/src/rabbit_credential_validator_accept_everything.erl @@ -2,13 +2,11 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_credential_validator_accept_everything). --include_lib("rabbit_common/include/rabbit.hrl"). - -behaviour(rabbit_credential_validator). %% diff --git a/deps/rabbit/src/rabbit_credential_validator_min_password_length.erl b/deps/rabbit/src/rabbit_credential_validator_min_password_length.erl index 730071b7b289..8cf752d8f671 100644 --- a/deps/rabbit/src/rabbit_credential_validator_min_password_length.erl +++ b/deps/rabbit/src/rabbit_credential_validator_min_password_length.erl @@ -2,13 +2,11 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_credential_validator_min_password_length). --include_lib("rabbit_common/include/rabbit.hrl"). - -behaviour(rabbit_credential_validator). %% accommodates default (localhost-only) user credentials, diff --git a/deps/rabbit/src/rabbit_credential_validator_password_regexp.erl b/deps/rabbit/src/rabbit_credential_validator_password_regexp.erl index 38a83440cd84..1cc3f952cf75 100644 --- a/deps/rabbit/src/rabbit_credential_validator_password_regexp.erl +++ b/deps/rabbit/src/rabbit_credential_validator_password_regexp.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% @@ -10,8 +10,6 @@ %% password against a pre-configured regular expression. -module(rabbit_credential_validator_password_regexp). --include_lib("rabbit_common/include/rabbit.hrl"). - -behaviour(rabbit_credential_validator). %% diff --git a/deps/rabbit/src/rabbit_cuttlefish.erl b/deps/rabbit/src/rabbit_cuttlefish.erl index a1326fb94a20..f43b4a1f4745 100644 --- a/deps/rabbit/src/rabbit_cuttlefish.erl +++ b/deps/rabbit/src/rabbit_cuttlefish.erl @@ -2,14 +2,17 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2023-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_cuttlefish). -export([ aggregate_props/2, - aggregate_props/3 + aggregate_props/3, + + optionally_tagged_binary/2, + optionally_tagged_string/2 ]). -type keyed_props() :: [{binary(), [{binary(), any()}]}]. @@ -41,3 +44,25 @@ aggregate_props(Conf, Prefix, KeyFun) -> FlatList ) ). + +optionally_tagged_binary(Key, Conf) -> + case cuttlefish:conf_get(Key, Conf) of + undefined -> cuttlefish:unset(); + {encrypted, Bin} when is_binary(Bin) -> {encrypted, Bin}; + {_, Bin} when is_binary(Bin) -> {encrypted, Bin}; + {encrypted, Str} when is_list(Str) -> {encrypted, list_to_binary(Str)}; + {_, Str} when is_list(Str) -> {encrypted, list_to_binary(Str)}; + Bin when is_binary(Bin) -> Bin; + Str when is_list(Str) -> list_to_binary(Str) + end. + +optionally_tagged_string(Key, Conf) -> + case cuttlefish:conf_get(Key, Conf) of + undefined -> cuttlefish:unset(); + {encrypted, Str} when is_list(Str) -> {encrypted, Str}; + {_, Str} when is_list(Str) -> {encrypted, Str}; + {encrypted, Bin} when is_binary(Bin) -> {encrypted, binary_to_list(Bin)}; + {_, Bin} when is_binary(Bin) -> {encrypted, binary_to_list(Bin)}; + Str when is_list(Str) -> Str; + Bin when is_binary(Bin) -> binary_to_list(Bin) + end. \ No newline at end of file diff --git a/deps/rabbit/src/rabbit_db.erl b/deps/rabbit/src/rabbit_db.erl index 3dbb681b533c..faa4dd28e6b3 100644 --- a/deps/rabbit/src/rabbit_db.erl +++ b/deps/rabbit/src/rabbit_db.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_db). @@ -12,17 +12,23 @@ -include_lib("rabbit_common/include/logging.hrl"). +-define(PT_KEY_INIT_FINISHED, {?MODULE, node(), initialisation_finished}). + -export([init/0, reset/0, force_reset/0, force_load_on_next_boot/0, is_virgin_node/0, is_virgin_node/1, dir/0, - ensure_dir_exists/0]). + ensure_dir_exists/0, + is_init_finished/0, + clear_init_finished/0]). %% Exported to be used by various rabbit_db_* modules -export([ - list_in_mnesia/2 + list_in_mnesia/2, + list_in_khepri/1, + list_in_khepri/2 ]). %% Default timeout for operations on remote nodes. @@ -43,19 +49,34 @@ init() -> #{domain => ?RMQLOG_DOMAIN_DB}), ensure_dir_exists(), - rabbit_peer_discovery:log_configured_backend(), rabbit_peer_discovery:maybe_init(), + rabbit_peer_discovery:maybe_register(), pre_init(IsVirgin), - Ret = init_using_mnesia(), + case IsVirgin of + true -> + %% At this point, the database backend could change if the node + %% joins a cluster and that cluster uses a different database. + ?LOG_INFO( + "DB: virgin node -> run peer discovery", + #{domain => ?RMQLOG_DOMAIN_DB}), + rabbit_peer_discovery:sync_desired_cluster(); + false -> + ok + end, + + Ret = case rabbit_khepri:is_enabled() of + true -> init_using_khepri(); + false -> init_using_mnesia() + end, case Ret of ok -> ?LOG_DEBUG( "DB: initialization successeful", #{domain => ?RMQLOG_DOMAIN_DB}), - post_init(IsVirgin), + init_finished(), ok; Error -> @@ -70,12 +91,6 @@ pre_init(IsVirgin) -> OtherMembers = rabbit_nodes:nodes_excl_me(Members), rabbit_db_cluster:ensure_feature_flags_are_in_sync(OtherMembers, IsVirgin). -post_init(false = _IsVirgin) -> - rabbit_peer_discovery:maybe_register(); -post_init(true = _IsVirgin) -> - %% Registration handled by rabbit_peer_discovery. - ok. - init_using_mnesia() -> ?LOG_DEBUG( "DB: initialize Mnesia", @@ -84,12 +99,39 @@ init_using_mnesia() -> ?assertEqual(rabbit:data_dir(), mnesia_dir()), rabbit_sup:start_child(mnesia_sync). +init_using_khepri() -> + ?LOG_DEBUG( + "DB: initialize Khepri", + #{domain => ?RMQLOG_DOMAIN_DB}), + rabbit_khepri:init(). + +init_finished() -> + %% Used during initialisation by rabbit_logger_exchange_h.erl + %% If an exchange logger is configured, it needs to declare the + %% exchange. For this, it requires the metadata store to be + %% initialised. The initialisation happens on a rabbit boot step, + %% after the second phase of the prelaunch where the logger is + %% configured. + %% Using this persistent term the logger exchange can delay + %% declaring the exchange until the metadata store is ready. + persistent_term:put(?PT_KEY_INIT_FINISHED, true). + +is_init_finished() -> + persistent_term:get(?PT_KEY_INIT_FINISHED, false). + +clear_init_finished() -> + _ = persistent_term:erase(?PT_KEY_INIT_FINISHED), + ok. + -spec reset() -> Ret when Ret :: ok. %% @doc Resets the database and the node. reset() -> - ok = reset_using_mnesia(), + ok = case rabbit_khepri:is_enabled() of + true -> reset_using_khepri(); + false -> reset_using_mnesia() + end, post_reset(). reset_using_mnesia() -> @@ -98,12 +140,21 @@ reset_using_mnesia() -> #{domain => ?RMQLOG_DOMAIN_DB}), rabbit_mnesia:reset(). +reset_using_khepri() -> + ?LOG_DEBUG( + "DB: resetting node (using Khepri)", + #{domain => ?RMQLOG_DOMAIN_DB}), + rabbit_khepri:reset(). + -spec force_reset() -> Ret when Ret :: ok. %% @doc Resets the database and the node. force_reset() -> - ok = force_reset_using_mnesia(), + ok = case rabbit_khepri:is_enabled() of + true -> force_reset_using_khepri(); + false -> force_reset_using_mnesia() + end, post_reset(). force_reset_using_mnesia() -> @@ -112,6 +163,12 @@ force_reset_using_mnesia() -> #{domain => ?RMQLOG_DOMAIN_DB}), rabbit_mnesia:force_reset(). +force_reset_using_khepri() -> + ?LOG_DEBUG( + "DB: resetting node forcefully (using Khepri)", + #{domain => ?RMQLOG_DOMAIN_DB}), + rabbit_khepri:force_reset(). + -spec force_load_on_next_boot() -> Ret when Ret :: ok. %% @doc Requests that the database to be forcefully loaded during next boot. @@ -120,7 +177,14 @@ force_reset_using_mnesia() -> %% state, like if critical members are MIA. force_load_on_next_boot() -> - force_load_on_next_boot_using_mnesia(). + %% TODO force load using Khepri might need to be implemented for disaster + %% recovery scenarios where just a minority of nodes are accessible. + %% Potentially, it could also be replaced with a way to export all the + %% data. + case rabbit_khepri:is_enabled() of + true -> ok; + false -> force_load_on_next_boot_using_mnesia() + end. force_load_on_next_boot_using_mnesia() -> ?LOG_DEBUG( @@ -129,7 +193,18 @@ force_load_on_next_boot_using_mnesia() -> rabbit_mnesia:force_load_next_boot(). post_reset() -> - rabbit_feature_flags:reset_registry(), + rabbit_feature_flags:reset(), + + %% The cluster status files that RabbitMQ uses when Mnesia is the database + %% are initially created from rabbit_prelaunch_cluster. However, it will + %% only be done once the `rabbit` app is restarted. Meanwhile, they are + %% missing and the CLI or the testsuite may rely on them. Indeed, after + %% the reset, Mnesia is assumed to be the database and the cluster status + %% files should have been created the first time the application was + %% started already. + ThisNode = node(), + rabbit_node_monitor:write_cluster_status({[ThisNode], [ThisNode], []}), + ok. %% ------------------------------------------------------------------- @@ -145,11 +220,20 @@ post_reset() -> %% @see is_virgin_node/1. is_virgin_node() -> - is_virgin_node_using_mnesia(). + case rabbit_khepri:is_enabled() of + true -> is_virgin_node_using_khepri(); + false -> is_virgin_node_using_mnesia() + end. is_virgin_node_using_mnesia() -> rabbit_mnesia:is_virgin_node(). +is_virgin_node_using_khepri() -> + case rabbit_khepri:is_empty() of + {error, _} -> true; + IsEmpty -> IsEmpty + end. + -spec is_virgin_node(Node) -> IsVirgin | undefined when Node :: node(), IsVirgin :: boolean(). @@ -171,6 +255,10 @@ is_virgin_node(Node) when is_atom(Node) -> undefined end. +%% ------------------------------------------------------------------- +%% dir(). +%% ------------------------------------------------------------------- + -spec dir() -> DBDir when DBDir :: file:filename(). %% @doc Returns the directory where the database stores its data. @@ -178,11 +266,21 @@ is_virgin_node(Node) when is_atom(Node) -> %% @returns the directory path. dir() -> - mnesia_dir(). + case rabbit_khepri:is_enabled() of + true -> khepri_dir(); + false -> mnesia_dir() + end. mnesia_dir() -> rabbit_mnesia:dir(). +khepri_dir() -> + rabbit_khepri:dir(). + +%% ------------------------------------------------------------------- +%% ensure_dir_exists(). +%% ------------------------------------------------------------------- + -spec ensure_dir_exists() -> ok | no_return(). %% @doc Ensures the database directory exists. %% @@ -210,3 +308,25 @@ list_in_mnesia(Table, Match) -> %% Not dirty_match_object since that would not be transactional when used %% in a tx context mnesia:async_dirty(fun () -> mnesia:match_object(Table, Match, read) end). + +%% ------------------------------------------------------------------- +%% list_in_khepri(). +%% ------------------------------------------------------------------- + +-spec list_in_khepri(Path) -> Objects when + Path :: khepri_path:pattern(), + Objects :: [term()]. + +list_in_khepri(Path) -> + list_in_khepri(Path, #{}). + +-spec list_in_khepri(Path, Options) -> Objects when + Path :: khepri_path:pattern(), + Options :: map(), + Objects :: [term()]. + +list_in_khepri(Path, Options) -> + case rabbit_khepri:match(Path, Options) of + {ok, Map} -> maps:values(Map); + _ -> [] + end. diff --git a/deps/rabbit/src/rabbit_db_binding.erl b/deps/rabbit/src/rabbit_db_binding.erl index a320c9d44ca1..cc03de705412 100644 --- a/deps/rabbit/src/rabbit_db_binding.erl +++ b/deps/rabbit/src/rabbit_db_binding.erl @@ -2,11 +2,12 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_db_binding). +-include_lib("khepri/include/khepri.hrl"). -include_lib("rabbit_common/include/rabbit.hrl"). -export([exists/1, @@ -25,11 +26,23 @@ %% Exported to be used by various rabbit_db_* modules -export([ delete_for_destination_in_mnesia/2, + delete_for_destination_in_khepri/2, delete_all_for_exchange_in_mnesia/3, + delete_all_for_exchange_in_khepri/3, delete_transient_for_destination_in_mnesia/1, - has_for_source_in_mnesia/1 + has_for_source_in_mnesia/1, + has_for_source_in_khepri/1, + match_source_and_destination_in_khepri_tx/2 ]). +-export([ + khepri_route_path/1, + khepri_routes_path/0, + khepri_route_exchange_path/1 + ]). + +%% Recovery is only needed for transient entities. Once mnesia is removed, these +%% functions can be deleted -export([recover/0, recover/1]). %% For testing @@ -40,6 +53,8 @@ -define(MNESIA_SEMI_DURABLE_TABLE, rabbit_semi_durable_route). -define(MNESIA_REVERSE_TABLE, rabbit_reverse_route). -define(MNESIA_INDEX_TABLE, rabbit_index_route). +-define(KHEPRI_BINDINGS_PROJECTION, rabbit_khepri_bindings). +-define(KHEPRI_INDEX_ROUTE_PROJECTION, rabbit_khepri_index_route). %% ------------------------------------------------------------------- %% exists(). @@ -55,7 +70,10 @@ %% @private exists(Binding) -> - exists_in_mnesia(Binding). + rabbit_khepri:handle_fallback( + #{mnesia => fun() -> exists_in_mnesia(Binding) end, + khepri => fun() -> exists_in_khepri(Binding) end + }). exists_in_mnesia(Binding) -> binding_action_in_mnesia( @@ -85,6 +103,45 @@ not_found_or_absent_errs_in_mnesia(Names) -> Errs = [not_found_or_absent_in_mnesia(Name) || Name <- Names], rabbit_misc:const({error, {resources_missing, Errs}}). +exists_in_khepri(#binding{source = SrcName, + destination = DstName} = Binding) -> + Path = khepri_route_path(Binding), + case rabbit_khepri:transaction( + fun () -> + case {lookup_resource_in_khepri_tx(SrcName), + lookup_resource_in_khepri_tx(DstName)} of + {[_Src], [_Dst]} -> + case khepri_tx:get(Path) of + {ok, Set} -> + {ok, Set}; + _ -> + {ok, not_found} + end; + Errs -> + Errs + end + end, ro) of + {ok, not_found} -> false; + {ok, Set} -> sets:is_element(Binding, Set); + Errs -> not_found_errs_in_khepri(not_found(Errs, SrcName, DstName)) + end. + +lookup_resource_in_khepri_tx(#resource{kind = queue} = Name) -> + rabbit_db_queue:get_in_khepri_tx(Name); +lookup_resource_in_khepri_tx(#resource{kind = exchange} = Name) -> + rabbit_db_exchange:get_in_khepri_tx(Name). + +not_found_errs_in_khepri(Names) -> + Errs = [{not_found, Name} || Name <- Names], + {error, {resources_missing, Errs}}. + +not_found({[], [_]}, SrcName, _) -> + [SrcName]; +not_found({[_], []}, _, DstName) -> + [DstName]; +not_found({[], []}, SrcName, DstName) -> + [SrcName, DstName]. + %% ------------------------------------------------------------------- %% create(). %% ------------------------------------------------------------------- @@ -93,8 +150,9 @@ not_found_or_absent_errs_in_mnesia(Names) -> Binding :: rabbit_types:binding(), Src :: rabbit_types:binding_source(), Dst :: rabbit_types:binding_destination(), - ChecksFun :: fun((Src, Dst) -> ok | {error, Reason :: any()}), - Ret :: ok | {error, Reason :: any()}. + ChecksFun :: fun((Src, Dst) -> ok | {error, ChecksErrReason}), + ChecksErrReason :: any(), + Ret :: ok | {error, ChecksErrReason} | rabbit_khepri:timeout_error(). %% @doc Writes a binding if it doesn't exist already and passes the validation in %% `ChecksFun' i.e. exclusive access %% @@ -103,7 +161,10 @@ not_found_or_absent_errs_in_mnesia(Names) -> %% @private create(Binding, ChecksFun) -> - create_in_mnesia(Binding, ChecksFun). + rabbit_khepri:handle_fallback( + #{mnesia => fun() -> create_in_mnesia(Binding, ChecksFun) end, + khepri => fun() -> create_in_khepri(Binding, ChecksFun) end + }). create_in_mnesia(Binding, ChecksFun) -> binding_action_in_mnesia( @@ -130,6 +191,63 @@ create_in_mnesia(Binding, ChecksFun) -> end end, fun not_found_or_absent_errs_in_mnesia/1). +create_in_khepri(#binding{source = SrcName, + destination = DstName} = Binding, ChecksFun) -> + case {lookup_resource(SrcName), lookup_resource(DstName)} of + {[Src], [Dst]} -> + case ChecksFun(Src, Dst) of + ok -> + RoutePath = khepri_route_path(Binding), + MaybeSerial = rabbit_exchange:serialise_events(Src), + Serial = rabbit_khepri:transaction( + fun() -> + ExchangePath = khepri_route_exchange_path(SrcName), + ok = khepri_tx:put(ExchangePath, #{type => Src#exchange.type}), + case khepri_tx:get(RoutePath) of + {ok, Set} -> + case sets:is_element(Binding, Set) of + true -> + already_exists; + false -> + ok = khepri_tx:put(RoutePath, sets:add_element(Binding, Set)), + serial_in_khepri(MaybeSerial, Src) + end; + _ -> + ok = khepri_tx:put(RoutePath, sets:add_element(Binding, sets:new([{version, 2}]))), + serial_in_khepri(MaybeSerial, Src) + end + end, rw), + case Serial of + already_exists -> + ok; + {error, _} = Error -> + Error; + _ -> + rabbit_exchange:callback(Src, add_binding, Serial, [Src, Binding]) + end; + {error, _} = Err -> + Err + end; + Errs -> + not_found_errs_in_khepri(not_found(Errs, SrcName, DstName)) + end. + +lookup_resource(#resource{kind = queue} = Name) -> + case rabbit_db_queue:get(Name) of + {error, _} -> []; + {ok, Q} -> [Q] + end; +lookup_resource(#resource{kind = exchange} = Name) -> + case rabbit_db_exchange:get(Name) of + {ok, X} -> [X]; + _ -> [] + end. + +serial_in_khepri(false, _) -> + none; +serial_in_khepri(true, X) -> + rabbit_db_exchange:next_serial_in_khepri_tx(X). + %% ------------------------------------------------------------------- %% delete(). %% ------------------------------------------------------------------- @@ -138,15 +256,22 @@ create_in_mnesia(Binding, ChecksFun) -> Binding :: rabbit_types:binding(), Src :: rabbit_types:binding_source(), Dst :: rabbit_types:binding_destination(), - ChecksFun :: fun((Src, Dst) -> ok | {error, Reason :: any()}), - Ret :: ok | {ok, rabbit_binding:deletions()} | {error, Reason :: any()}. + ChecksFun :: fun((Src, Dst) -> ok | {error, ChecksErrReason}), + ChecksErrReason :: any(), + Ret :: ok | + {ok, rabbit_binding:deletions()} | + {error, ChecksErrReason} | + rabbit_khepri:timeout_error(). %% @doc Deletes a binding record from the database if it passes the validation in %% `ChecksFun'. It also triggers the deletion of auto-delete exchanges if needed. %% %% @private delete(Binding, ChecksFun) -> - delete_in_mnesia(Binding, ChecksFun). + rabbit_khepri:handle_fallback( + #{mnesia => fun() -> delete_in_mnesia(Binding, ChecksFun) end, + khepri => fun() -> delete_in_khepri(Binding, ChecksFun) end + }). delete_in_mnesia(Binding, ChecksFun) -> binding_action_in_mnesia( @@ -173,7 +298,7 @@ delete_in_mnesia(Binding, ChecksFun) -> Src :: rabbit_types:exchange() | amqqueue:amqqueue(), Dst :: rabbit_types:exchange() | amqqueue:amqqueue(), Binding :: rabbit_types:binding(), - Ret :: fun(() -> rabbit_binding:deletions()). + Ret :: fun(() -> {ok, rabbit_binding:deletions()}). delete_in_mnesia(Src, Dst, B) -> ok = sync_route(#route{binding = B}, rabbit_binding:binding_type(Src, Dst), should_index_table(Src), fun delete/3), @@ -199,6 +324,73 @@ not_found_or_absent_in_mnesia(#resource{kind = queue} = Name) -> {ok, Q} -> {absent, Q, nodedown} end. +delete_in_khepri(#binding{source = SrcName, + destination = DstName} = Binding, ChecksFun) -> + Path = khepri_route_path(Binding), + case rabbit_khepri:transaction( + fun () -> + case {lookup_resource_in_khepri_tx(SrcName), + lookup_resource_in_khepri_tx(DstName)} of + {[Src], [Dst]} -> + case exists_in_khepri(Path, Binding) of + false -> + ok; + true -> + case ChecksFun(Src, Dst) of + ok -> + ok = delete_in_khepri(Binding), + maybe_auto_delete_exchange_in_khepri(Binding#binding.source, [Binding], rabbit_binding:new_deletions(), false); + {error, _} = Err -> + Err + end + end; + _Errs -> + %% No absent queues, always present on disk + ok + end + end) of + ok -> + ok; + {error, _} = Err -> + Err; + Deletions -> + {ok, rabbit_binding:process_deletions(Deletions)} + end. + +exists_in_khepri(Path, Binding) -> + case khepri_tx:get(Path) of + {ok, Set} -> + sets:is_element(Binding, Set); + _ -> + false + end. + +delete_in_khepri(Binding) -> + Path = khepri_route_path(Binding), + case khepri_tx:get(Path) of + {ok, Set0} -> + Set = sets:del_element(Binding, Set0), + case sets:is_empty(Set) of + true -> + ok = khepri_tx:delete(Path); + false -> + ok = khepri_tx:put(Path, Set) + end; + _ -> + ok + end. + +maybe_auto_delete_exchange_in_khepri(XName, Bindings, Deletions, OnlyDurable) -> + {Entry, Deletions1} = + case rabbit_db_exchange:maybe_auto_delete_in_khepri(XName, OnlyDurable) of + {not_deleted, X} -> + {{X, not_deleted, Bindings}, Deletions}; + {deleted, X, Deletions2} -> + {{X, deleted, Bindings}, + rabbit_binding:combine_deletions(Deletions, Deletions2)} + end, + rabbit_binding:add_deletion(XName, Entry, Deletions1). + %% ------------------------------------------------------------------- %% get_all(). %% ------------------------------------------------------------------- @@ -213,7 +405,10 @@ not_found_or_absent_in_mnesia(#resource{kind = queue} = Name) -> %% @private get_all() -> - get_all_in_mnesia(). + rabbit_khepri:handle_fallback( + #{mnesia => fun() -> get_all_in_mnesia() end, + khepri => fun() -> get_all_in_khepri() end + }). get_all_in_mnesia() -> mnesia:async_dirty( @@ -222,6 +417,14 @@ get_all_in_mnesia() -> [B || #route{binding = B} <- AllRoutes] end). +get_all_in_khepri() -> + try + [B || #route{binding = B} <- ets:tab2list(?KHEPRI_BINDINGS_PROJECTION)] + catch + error:badarg -> + [] + end. + -spec get_all(VHostName) -> [Binding] when VHostName :: vhost:name(), Binding :: rabbit_types:binding(). @@ -232,7 +435,10 @@ get_all_in_mnesia() -> %% @private get_all(VHost) -> - get_all_in_mnesia(VHost). + rabbit_khepri:handle_fallback( + #{mnesia => fun() -> get_all_in_mnesia(VHost) end, + khepri => fun() -> get_all_in_khepri(VHost) end + }). get_all_in_mnesia(VHost) -> VHostResource = rabbit_misc:r(VHost, '_'), @@ -242,6 +448,19 @@ get_all_in_mnesia(VHost) -> _ = '_'}, [B || #route{binding = B} <- rabbit_db:list_in_mnesia(?MNESIA_TABLE, Match)]. +get_all_in_khepri(VHost) -> + try + VHostResource = rabbit_misc:r(VHost, '_'), + Match = #route{binding = #binding{source = VHostResource, + destination = VHostResource, + _ = '_'}}, + [B || #route{binding = B} <- ets:match_object( + ?KHEPRI_BINDINGS_PROJECTION, Match)] + catch + error:badarg -> + [] + end. + -spec get_all(Src, Dst, Reverse) -> [Binding] when Src :: rabbit_types:binding_source(), Dst :: rabbit_types:binding_destination(), @@ -255,7 +474,10 @@ get_all_in_mnesia(VHost) -> %% @private get_all(SrcName, DstName, Reverse) -> - get_all_in_mnesia(SrcName, DstName, Reverse). + rabbit_khepri:handle_fallback( + #{mnesia => fun() -> get_all_in_mnesia(SrcName, DstName, Reverse) end, + khepri => fun() -> get_all_in_khepri(SrcName, DstName) end + }). get_all_in_mnesia(SrcName, DstName, Reverse) -> Route = #route{binding = #binding{source = SrcName, @@ -264,6 +486,18 @@ get_all_in_mnesia(SrcName, DstName, Reverse) -> Fun = list_for_route(Route, Reverse), mnesia:async_dirty(Fun). +get_all_in_khepri(SrcName, DstName) -> + try + MatchHead = #route{binding = #binding{source = SrcName, + destination = DstName, + _ = '_'}}, + [B || #route{binding = B} <- ets:match_object( + ?KHEPRI_BINDINGS_PROJECTION, MatchHead)] + catch + error:badarg -> + [] + end. + %% ------------------------------------------------------------------- %% get_all_for_source(). %% ------------------------------------------------------------------- @@ -278,7 +512,10 @@ get_all_in_mnesia(SrcName, DstName, Reverse) -> %% @private get_all_for_source(Resource) -> - get_all_for_source_in_mnesia(Resource). + rabbit_khepri:handle_fallback( + #{mnesia => fun() -> get_all_for_source_in_mnesia(Resource) end, + khepri => fun() -> get_all_for_source_in_khepri(Resource) end + }). get_all_for_source_in_mnesia(Resource) -> Route = #route{binding = #binding{source = Resource, _ = '_'}}, @@ -297,6 +534,16 @@ list_for_route(Route, true) -> rabbit_binding:reverse_route(Route), read)] end. +get_all_for_source_in_khepri(Resource) -> + try + Route = #route{binding = #binding{source = Resource, _ = '_'}}, + [B || #route{binding = B} <- ets:match_object( + ?KHEPRI_BINDINGS_PROJECTION, Route)] + catch + error:badarg -> + [] + end. + %% ------------------------------------------------------------------- %% get_all_for_destination(). %% ------------------------------------------------------------------- @@ -312,7 +559,10 @@ list_for_route(Route, true) -> %% @private get_all_for_destination(Dst) -> - get_all_for_destination_in_mnesia(Dst). + rabbit_khepri:handle_fallback( + #{mnesia => fun() -> get_all_for_destination_in_mnesia(Dst) end, + khepri => fun() -> get_all_for_destination_in_khepri(Dst) end + }). get_all_for_destination_in_mnesia(Dst) -> Route = #route{binding = #binding{destination = Dst, @@ -320,6 +570,17 @@ get_all_for_destination_in_mnesia(Dst) -> Fun = list_for_route(Route, true), mnesia:async_dirty(Fun). +get_all_for_destination_in_khepri(Destination) -> + try + Match = #route{binding = #binding{destination = Destination, + _ = '_'}}, + [B || #route{binding = B} <- ets:match_object( + ?KHEPRI_BINDINGS_PROJECTION, Match)] + catch + error:badarg -> + [] + end. + %% ------------------------------------------------------------------- %% fold(). %% ------------------------------------------------------------------- @@ -338,13 +599,29 @@ get_all_for_destination_in_mnesia(Dst) -> %% @private fold(Fun, Acc) -> - fold_in_mnesia(Fun, Acc). + rabbit_khepri:handle_fallback( + #{mnesia => fun() -> fold_in_mnesia(Fun, Acc) end, + khepri => fun() -> fold_in_khepri(Fun, Acc) end + }). fold_in_mnesia(Fun, Acc) -> ets:foldl(fun(#route{binding = Binding}, Acc0) -> Fun(Binding, Acc0) end, Acc, ?MNESIA_TABLE). +fold_in_khepri(Fun, Acc) -> + Path = khepri_routes_path() ++ [_VHost = ?KHEPRI_WILDCARD_STAR, + _SrcName = ?KHEPRI_WILDCARD_STAR, + rabbit_khepri:if_has_data_wildcard()], + {ok, Res} = rabbit_khepri:fold( + Path, + fun(_, #{data := SetOfBindings}, Acc0) -> + lists:foldl(fun(Binding, Acc1) -> + Fun(Binding, Acc1) + end, Acc0, sets:to_list(SetOfBindings)) + end, Acc), + Res. + %% Routing - HOT CODE PATH %% ------------------------------------------------------------------- %% match(). @@ -363,7 +640,10 @@ fold_in_mnesia(Fun, Acc) -> %% @private match(SrcName, Match) -> - match_in_mnesia(SrcName, Match). + rabbit_khepri:handle_fallback( + #{mnesia => fun() -> match_in_mnesia(SrcName, Match) end, + khepri => fun() -> match_in_khepri(SrcName, Match) end + }). match_in_mnesia(SrcName, Match) -> MatchHead = #route{binding = #binding{source = SrcName, @@ -372,6 +652,18 @@ match_in_mnesia(SrcName, Match) -> [Dest || [#route{binding = Binding = #binding{destination = Dest}}] <- Routes, Match(Binding)]. +match_in_khepri(SrcName, Match) -> + try + MatchHead = #route{binding = #binding{source = SrcName, + _ = '_'}}, + Routes = ets:select( + ?KHEPRI_BINDINGS_PROJECTION, [{MatchHead, [], [['$_']]}]), + [Dest || [#route{binding = Binding = #binding{destination = Dest}}] <- + Routes, Match(Binding)] + catch + error:badarg -> + [] + end. %% Routing - HOT CODE PATH %% ------------------------------------------------------------------- @@ -391,7 +683,10 @@ match_in_mnesia(SrcName, Match) -> %% @private match_routing_key(SrcName, RoutingKeys, UseIndex) -> - match_routing_key_in_mnesia(SrcName, RoutingKeys, UseIndex). + rabbit_khepri:handle_fallback( + #{mnesia => fun() -> match_routing_key_in_mnesia(SrcName, RoutingKeys, UseIndex) end, + khepri => fun() -> match_routing_key_in_khepri(SrcName, RoutingKeys) end + }). match_routing_key_in_mnesia(SrcName, RoutingKeys, UseIndex) -> case UseIndex of @@ -401,6 +696,30 @@ match_routing_key_in_mnesia(SrcName, RoutingKeys, UseIndex) -> route_in_mnesia_v1(SrcName, RoutingKeys) end. +match_routing_key_in_khepri(Src, ['_']) -> + try + MatchHead = #index_route{source_key = {Src, '_'}, + destination = '$1', + _ = '_'}, + ets:select(?KHEPRI_INDEX_ROUTE_PROJECTION, [{MatchHead, [], ['$1']}]) + catch + error:badarg -> + [] + end; +match_routing_key_in_khepri(Src, RoutingKeys) -> + lists:foldl( + fun(RK, Acc) -> + try + Dst = ets:lookup_element( + ?KHEPRI_INDEX_ROUTE_PROJECTION, + {Src, RK}, + #index_route.destination), + Dst ++ Acc + catch + _:_:_ -> Acc + end + end, [], RoutingKeys). + %% ------------------------------------------------------------------- %% recover(). %% ------------------------------------------------------------------- @@ -411,7 +730,11 @@ match_routing_key_in_mnesia(SrcName, RoutingKeys, UseIndex) -> %% @private recover() -> - recover_in_mnesia(). + rabbit_khepri:handle_fallback( + #{mnesia => fun() -> recover_in_mnesia() end, + %% Nothing to do in khepri, single table storage + khepri => ok + }). recover_in_mnesia() -> rabbit_mnesia:execute_mnesia_transaction( @@ -438,7 +761,10 @@ recover_in_mnesia() -> %% @private recover(RecoverFun) -> - recover_in_mnesia(RecoverFun). + rabbit_khepri:handle_fallback( + #{mnesia => fun() -> recover_in_mnesia(RecoverFun) end, + khepri => ok + }). recover_in_mnesia(RecoverFun) -> _ = [RecoverFun(Route, Src, Dst, fun recover_semi_durable_route/2) || @@ -482,6 +808,33 @@ delete_for_source_in_mnesia(SrcName, ShouldIndexTable) -> mnesia:dirty_match_object(?MNESIA_SEMI_DURABLE_TABLE, Match)), ShouldIndexTable). +%% ------------------------------------------------------------------- +%% delete_all_for_exchange_in_khepri(). +%% ------------------------------------------------------------------- + +-spec delete_all_for_exchange_in_khepri(Exchange, OnlyDurable, RemoveBindingsForSource) + -> Ret when + Exchange :: rabbit_types:exchange(), + OnlyDurable :: boolean(), + RemoveBindingsForSource :: boolean(), + Binding :: rabbit_types:binding(), + Ret :: {deleted, Exchange, [Binding], rabbit_binding:deletions()}. + +delete_all_for_exchange_in_khepri(X = #exchange{name = XName}, OnlyDurable, RemoveBindingsForSource) -> + Bindings = case RemoveBindingsForSource of + true -> delete_for_source_in_khepri(XName); + false -> [] + end, + {deleted, X, Bindings, delete_for_destination_in_khepri(XName, OnlyDurable)}. + +delete_for_source_in_khepri(#resource{virtual_host = VHost, name = Name}) -> + Path = khepri_routes_path() ++ [VHost, Name], + {ok, Bindings} = khepri_tx:get_many(Path ++ [rabbit_khepri:if_has_data_wildcard()]), + ok = khepri_tx:delete(Path), + maps:fold(fun(_P, Set, Acc) -> + sets:to_list(Set) ++ Acc + end, [], Bindings). + %% ------------------------------------------------------------------- %% delete_for_destination_in_mnesia(). %% ------------------------------------------------------------------- @@ -513,6 +866,29 @@ delete_for_destination_in_mnesia(DstName, OnlyDurable, Fun) -> rabbit_binding:group_bindings_fold(fun maybe_auto_delete_exchange_in_mnesia/4, lists:keysort(#binding.source, Bindings), OnlyDurable). +%% ------------------------------------------------------------------- +%% delete_for_destination_in_khepri(). +%% ------------------------------------------------------------------- + +-spec delete_for_destination_in_khepri(Dst, OnlyDurable) -> Deletions when + Dst :: rabbit_types:binding_destination(), + OnlyDurable :: boolean(), + Deletions :: rabbit_binding:deletions(). + +delete_for_destination_in_khepri(DstName, OnlyDurable) -> + BindingsMap = match_destination_in_khepri(DstName), + maps:foreach(fun(K, _V) -> khepri_tx:delete(K) end, BindingsMap), + Bindings = maps:fold(fun(_, Set, Acc) -> + sets:to_list(Set) ++ Acc + end, [], BindingsMap), + rabbit_binding:group_bindings_fold(fun maybe_auto_delete_exchange_in_khepri/4, + lists:keysort(#binding.source, Bindings), OnlyDurable). + +match_destination_in_khepri(#resource{virtual_host = VHost, kind = Kind, name = Name}) -> + Path = khepri_routes_path() ++ [VHost, ?KHEPRI_WILDCARD_STAR, Kind, Name, ?KHEPRI_WILDCARD_STAR_STAR], + {ok, Map} = khepri_tx:get_many(Path), + Map. + %% ------------------------------------------------------------------- %% delete_transient_for_destination_in_mnesia(). %% ------------------------------------------------------------------- @@ -543,6 +919,38 @@ has_for_source_in_mnesia(SrcName) -> contains(?MNESIA_TABLE, Match) orelse contains(?MNESIA_SEMI_DURABLE_TABLE, Match). +%% ------------------------------------------------------------------- +%% has_for_source_in_khepri(). +%% ------------------------------------------------------------------- + +-spec has_for_source_in_khepri(rabbit_types:binding_source()) -> boolean(). + +has_for_source_in_khepri(#resource{virtual_host = VHost, name = Name}) -> + Path = khepri_routes_path() ++ [VHost, Name, rabbit_khepri:if_has_data_wildcard()], + case khepri_tx:get_many(Path) of + {ok, Map} -> + maps:size(Map) > 0; + _ -> + false + end. + +%% ------------------------------------------------------------------- +%% match_source_and_destination_in_khepri_tx(). +%% ------------------------------------------------------------------- + +-spec match_source_and_destination_in_khepri_tx(Src, Dst) -> Bindings when + Src :: rabbit_types:binding_source(), + Dst :: rabbit_types:binding_destination(), + Bindings :: [Binding :: rabbit_types:binding()]. + +match_source_and_destination_in_khepri_tx(#resource{virtual_host = VHost, name = Name}, + #resource{kind = Kind, name = DstName}) -> + Path = khepri_routes_path() ++ [VHost, Name, Kind, DstName, rabbit_khepri:if_has_data_wildcard()], + case khepri_tx:get_many(Path) of + {ok, Map} -> maps:values(Map); + _ -> [] + end. + %% ------------------------------------------------------------------- %% clear(). %% ------------------------------------------------------------------- @@ -553,7 +961,9 @@ has_for_source_in_mnesia(SrcName) -> %% @private clear() -> - clear_in_mnesia(). + rabbit_khepri:handle_fallback( + #{mnesia => fun() -> clear_in_mnesia() end, + khepri => fun() -> clear_in_khepri() end}). clear_in_mnesia() -> {atomic, ok} = mnesia:clear_table(?MNESIA_TABLE), @@ -563,6 +973,27 @@ clear_in_mnesia() -> {atomic, ok} = mnesia:clear_table(?MNESIA_INDEX_TABLE), ok. +clear_in_khepri() -> + Path = khepri_routes_path(), + case rabbit_khepri:delete(Path) of + ok -> ok; + Error -> throw(Error) + end. + +%% -------------------------------------------------------------- +%% Paths +%% -------------------------------------------------------------- +khepri_route_path(#binding{source = #resource{virtual_host = VHost, name = SrcName}, + destination = #resource{kind = Kind, name = DstName}, + key = RoutingKey}) -> + [?MODULE, routes, VHost, SrcName, Kind, DstName, RoutingKey]. + +khepri_routes_path() -> + [?MODULE, routes]. + +khepri_route_exchange_path(#resource{virtual_host = VHost, name = SrcName}) -> + [?MODULE, routes, VHost, SrcName]. + %% -------------------------------------------------------------- %% Internal %% -------------------------------------------------------------- @@ -732,8 +1163,8 @@ route_in_mnesia_v1(SrcName, [_|_] = RoutingKeys) -> %% ets:select/2 is expensive because it needs to compile the match spec every %% time and lookup does not happen by a hash key. %% -%% In contrast, route_v2/2 increases end-to-end message sending throughput -%% (i.e. from RabbitMQ client to the queue process) by up to 35% by using ets:lookup_element/3. +%% In contrast, route_v2/3 increases end-to-end message sending throughput +%% (i.e. from RabbitMQ client to the queue process) by up to 35% by using ets:lookup_element/4. %% Only the direct exchange type uses the rabbit_index_route table to store its %% bindings by table key tuple {SourceExchange, RoutingKey}. -spec route_v2(ets:table(), rabbit_types:binding_source(), [rabbit_router:routing_key(), ...]) -> @@ -747,16 +1178,7 @@ route_v2(Table, SrcName, [_|_] = RoutingKeys) -> end, RoutingKeys). destinations(Table, SrcName, RoutingKey) -> - %% Prefer try-catch block over checking Key existence with ets:member/2. - %% The latter reduces throughput by a few thousand messages per second because - %% of function db_member_hash in file erl_db_hash.c. - %% We optimise for the happy path, that is the binding / table key is present. - try - ets:lookup_element(Table, - {SrcName, RoutingKey}, - #index_route.destination) - catch - error:badarg -> - [] - end. - + ets:lookup_element(Table, + {SrcName, RoutingKey}, + #index_route.destination, + []). diff --git a/deps/rabbit/src/rabbit_db_binding_m2k_converter.erl b/deps/rabbit/src/rabbit_db_binding_m2k_converter.erl new file mode 100644 index 000000000000..0bef352db141 --- /dev/null +++ b/deps/rabbit/src/rabbit_db_binding_m2k_converter.erl @@ -0,0 +1,118 @@ +%% This Source Code Form is subject to the terms of the Mozilla Public +%% License, v. 2.0. If a copy of the MPL was not distributed with this +%% file, You can obtain one at https://mozilla.org/MPL/2.0/. +%% +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. +%% + +-module(rabbit_db_binding_m2k_converter). + +-behaviour(mnesia_to_khepri_converter). + +-include_lib("kernel/include/logger.hrl"). +-include_lib("khepri_mnesia_migration/src/kmm_logging.hrl"). +-include_lib("rabbit_common/include/rabbit.hrl"). + +-export([init_copy_to_khepri/3, + copy_to_khepri/3, + delete_from_khepri/3]). + +-record(?MODULE, {}). + +-spec init_copy_to_khepri(StoreId, MigrationId, Tables) -> Ret when + StoreId :: khepri:store_id(), + MigrationId :: mnesia_to_khepri:migration_id(), + Tables :: [mnesia_to_khepri:mnesia_table()], + Ret :: {ok, Priv}, + Priv :: #?MODULE{}. +%% @private + +init_copy_to_khepri(_StoreId, _MigrationId, Tables) -> + %% Clean up any previous attempt to copy the Mnesia table to Khepri. + lists:foreach(fun clear_data_in_khepri/1, Tables), + + State = #?MODULE{}, + {ok, State}. + +-spec copy_to_khepri(Table, Record, State) -> Ret when + Table :: mnesia_to_khepri:mnesia_table(), + Record :: tuple(), + State :: rabbit_db_m2k_converter:state(), + Ret :: {ok, NewState} | {error, Reason}, + NewState :: rabbit_db_m2k_converter:state(), + Reason :: any(). +%% @private + +copy_to_khepri(rabbit_route = Table, + #route{binding = #binding{source = XName} = Binding}, + State) -> + ?LOG_DEBUG( + "Mnesia->Khepri data copy: [~0p] key: ~0p", + [Table, Binding], + #{domain => ?KMM_M2K_TABLE_COPY_LOG_DOMAIN}), + Path = rabbit_db_binding:khepri_route_path(Binding), + rabbit_db_m2k_converter:with_correlation_id( + fun(CorrId) -> + Extra = #{async => CorrId}, + XPath = rabbit_db_binding:khepri_route_exchange_path(XName), + ?LOG_DEBUG( + "Mnesia->Khepri data copy: [~0p] path: ~0p corr: ~0p", + [Table, Path, CorrId], + #{domain => ?KMM_M2K_TABLE_COPY_LOG_DOMAIN}), + rabbit_khepri:transaction( + fun() -> + %% Store the exchange's type in the exchange name + %% branch of the tree. + [#exchange{type = XType}] = + rabbit_db_exchange:get_in_khepri_tx(XName), + ok = khepri_tx:put(XPath, #{type => XType}), + %% Add the binding to the set at the binding's + %% path. + Set = case khepri_tx:get(Path) of + {ok, Set0} -> + Set0; + _ -> + sets:new([{version, 2}]) + end, + khepri_tx:put(Path, sets:add_element(Binding, Set)) + end, rw, Extra) + end, State); +copy_to_khepri(Table, Record, State) -> + ?LOG_DEBUG("Mnesia->Khepri unexpected record table ~0p record ~0p state ~0p", + [Table, Record, State]), + {error, unexpected_record}. + +-spec delete_from_khepri(Table, Key, State) -> Ret when + Table :: mnesia_to_khepri:mnesia_table(), + Key :: any(), + State :: rabbit_db_m2k_converter:state(), + Ret :: {ok, NewState} | {error, Reason}, + NewState :: rabbit_db_m2k_converter:state(), + Reason :: any(). +%% @private + +delete_from_khepri(rabbit_route = Table, Key, State) -> + ?LOG_DEBUG( + "Mnesia->Khepri data delete: [~0p] key: ~0p", + [Table, Key], + #{domain => ?KMM_M2K_TABLE_COPY_LOG_DOMAIN}), + Path = rabbit_db_binding:khepri_route_path(Key), + rabbit_db_m2k_converter:with_correlation_id( + fun(CorrId) -> + Extra = #{async => CorrId}, + ?LOG_DEBUG( + "Mnesia->Khepri data delete: [~0p] path: ~0p corr: ~0p", + [Table, Path, CorrId], + #{domain => ?KMM_M2K_TABLE_COPY_LOG_DOMAIN}), + rabbit_khepri:delete(Path, Extra) + end, State). + +-spec clear_data_in_khepri(Table) -> ok when + Table :: atom(). + +clear_data_in_khepri(rabbit_route) -> + Path = rabbit_db_binding:khepri_routes_path(), + case rabbit_khepri:delete(Path) of + ok -> ok; + Error -> throw(Error) + end. diff --git a/deps/rabbit/src/rabbit_db_cluster.erl b/deps/rabbit/src/rabbit_db_cluster.erl index 7e377a2badc4..b1f8cb5348ef 100644 --- a/deps/rabbit/src/rabbit_db_cluster.erl +++ b/deps/rabbit/src/rabbit_db_cluster.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_db_cluster). @@ -17,6 +17,7 @@ -export([change_node_type/1]). -export([is_clustered/0, members/0, + consistent_members/0, disc_members/0, node_type/0, check_compatibility/1, @@ -58,39 +59,152 @@ can_join(RemoteNode) -> #{domain => ?RMQLOG_DOMAIN_DB}), case rabbit_feature_flags:check_node_compatibility(RemoteNode) of ok -> - can_join_using_mnesia(RemoteNode); + case rabbit_khepri:is_enabled(RemoteNode) of + true -> can_join_using_khepri(RemoteNode); + false -> can_join_using_mnesia(RemoteNode) + end; Error -> Error end. can_join_using_mnesia(RemoteNode) -> + case rabbit_khepri:is_enabled() of + true -> rabbit_node_monitor:prepare_cluster_status_files(); + false -> ok + end, rabbit_mnesia:can_join_cluster(RemoteNode). +can_join_using_khepri(RemoteNode) -> + rabbit_khepri:can_join_cluster(RemoteNode). + -spec join(RemoteNode, NodeType) -> Ret when RemoteNode :: node(), - NodeType :: rabbit_db_cluster:node_type(), + NodeType :: node_type(), Ret :: Ok | Error, Ok :: ok | {ok, already_member}, Error :: {error, {inconsistent_cluster, string()}}. %% @doc Adds this node to a cluster using `RemoteNode' to reach it. +join(ThisNode, _NodeType) when ThisNode =:= node() -> + {error, cannot_cluster_node_with_itself}; join(RemoteNode, NodeType) when is_atom(RemoteNode) andalso ?IS_NODE_TYPE(NodeType) -> case can_join(RemoteNode) of {ok, ClusterNodes} when is_list(ClusterNodes) -> - ok = rabbit_db:reset(), + %% RabbitMQ and Mnesia must be stopped to modify the cluster. In + %% particular, we stop Mnesia regardless of the remotely selected + %% database because we might change it during the join. + RestartMnesia = rabbit_mnesia:is_running(), + RestartFFCtl = rabbit_ff_controller:is_running(), + RestartRaSystems = rabbit_ra_systems:are_running(), + RestartRabbit = rabbit:is_running(), + case RestartRabbit of + true -> + rabbit:stop(); + false -> + %% The Ra systems were started before we initialize the + %% database (because Khepri depends on one of them). + %% Therefore, there are files in the data directory. They + %% will go away with the reset and we will need to restart + %% Ra systems afterwards. + case RestartRaSystems of + true -> ok = rabbit_ra_systems:ensure_stopped(); + false -> ok + end, + + case RestartFFCtl of + true -> + ok = rabbit_ff_controller:wait_for_task_and_stop(); + false -> + ok + end, + case RestartMnesia of + true -> rabbit_mnesia:stop_mnesia(); + false -> ok + end + end, + + %% We acquire the feature flags registry reload lock because + %% between the time we reset the registry (as part of + %% `rabbit_db:reset/0' and the states copy from the remote node, + %% there could be a concurrent reload of the registry (for instance + %% because of peer discovery on another node) with the + %% default/empty states. + %% + %% To make this work, the lock is also acquired from + %% `rabbit_ff_registry_wrapper'. + rabbit_ff_registry_factory:acquire_state_change_lock(), + try + ok = rabbit_db:reset(), + rabbit_feature_flags:copy_feature_states_after_reset( + RemoteNode) + after + rabbit_ff_registry_factory:release_state_change_lock() + end, + + %% After the regular reset, we also reset Mnesia specifically if + %% it is meant to be used. That's because we may switch back from + %% Khepri to Mnesia. To be safe, remove possibly stale files from + %% a previous instance where Mnesia was used. + case rabbit_khepri:is_enabled(RemoteNode) of + true -> ok; + false -> ok = rabbit_mnesia:reset_gracefully() + end, + + ok = rabbit_node_monitor:notify_left_cluster(node()), + + %% Now that the files are all gone after the reset above, restart + %% the Ra systems. They will recreate their folder in the process. + case RestartRabbit of + true -> + ok; + false -> + case RestartRaSystems of + true -> + ok = rabbit_ra_systems:ensure_started(), + ok = rabbit_khepri:setup(); + false -> + ok + end + end, ?LOG_INFO( "DB: joining cluster using remote nodes:~n~tp", [ClusterNodes], #{domain => ?RMQLOG_DOMAIN_DB}), - Ret = join_using_mnesia(ClusterNodes, NodeType), + Ret = case rabbit_khepri:is_enabled(RemoteNode) of + true -> join_using_khepri(ClusterNodes, NodeType); + false -> join_using_mnesia(ClusterNodes, NodeType) + end, + + %% Restart RabbitMQ afterwards, if it was running before the join. + %% Likewise for the Feature flags controller and Mnesia (if we + %% still need it). + case RestartRabbit of + true -> + rabbit:start(); + false -> + case RestartFFCtl of + true -> + ok = rabbit_sup:start_child(rabbit_ff_controller); + false -> + ok + end, + NeedMnesia = not rabbit_khepri:is_enabled(), + case RestartMnesia andalso NeedMnesia of + true -> rabbit_mnesia:start_mnesia(false); + false -> ok + end + end, + case Ret of ok -> - rabbit_feature_flags:copy_feature_states_after_reset( - RemoteNode), rabbit_node_monitor:notify_joined_cluster(), ok; {error, _} = Error -> + %% We reset feature flags states again and make sure the + %% recorded states on disk are deleted. + rabbit_feature_flags:reset(), + Error end; {ok, already_member} -> @@ -100,18 +214,36 @@ join(RemoteNode, NodeType) end. join_using_mnesia(ClusterNodes, NodeType) when is_list(ClusterNodes) -> - ok = rabbit_mnesia:reset_gracefully(), rabbit_mnesia:join_cluster(ClusterNodes, NodeType). +join_using_khepri(ClusterNodes, disc) -> + rabbit_khepri:add_member(node(), ClusterNodes); +join_using_khepri(_ClusterNodes, ram = NodeType) -> + {error, {node_type_unsupported, khepri, NodeType}}. + -spec forget_member(Node, RemoveWhenOffline) -> ok when Node :: node(), RemoveWhenOffline :: boolean(). %% @doc Removes `Node' from the cluster. forget_member(Node, RemoveWhenOffline) -> + case forget_member0(Node, RemoveWhenOffline) of + ok -> + rabbit_node_monitor:notify_left_cluster(Node); + Error -> + Error + end. + +forget_member0(Node, RemoveWhenOffline) -> case rabbit:is_running(Node) of false -> - forget_member_using_mnesia(Node, RemoveWhenOffline); + ?LOG_DEBUG( + "DB: removing cluster member `~ts`", [Node], + #{domain => ?RMQLOG_DOMAIN_DB}), + case rabbit_khepri:is_enabled() of + true -> forget_member_using_khepri(Node, RemoveWhenOffline); + false -> forget_member_using_mnesia(Node, RemoveWhenOffline) + end; true -> {error, {failed_to_remove_node, Node, rabbit_still_running}} end. @@ -119,18 +251,31 @@ forget_member(Node, RemoveWhenOffline) -> forget_member_using_mnesia(Node, RemoveWhenOffline) -> rabbit_mnesia:forget_cluster_node(Node, RemoveWhenOffline). +forget_member_using_khepri(_Node, true) -> + ?LOG_WARNING( + "Remove node with --offline flag is not supported by Khepri. " + "Skipping...", + #{domain => ?RMQLOG_DOMAIN_DB}), + {error, not_supported}; +forget_member_using_khepri(Node, false = _RemoveWhenOffline) -> + rabbit_khepri:leave_cluster(Node). + %% ------------------------------------------------------------------- %% Cluster update. %% ------------------------------------------------------------------- -spec change_node_type(NodeType) -> ok when - NodeType :: rabbit_db_cluster:node_type(). + NodeType :: node_type(). %% @doc Changes the node type to `NodeType'. %% %% Node types may not all be valid with all databases. change_node_type(NodeType) -> - change_node_type_using_mnesia(NodeType). + rabbit_mnesia:ensure_node_type_is_permitted(NodeType), + case rabbit_khepri:is_enabled() of + true -> ok; + false -> change_node_type_using_mnesia(NodeType) + end. change_node_type_using_mnesia(NodeType) -> rabbit_mnesia:change_cluster_node_type(NodeType). @@ -144,43 +289,78 @@ change_node_type_using_mnesia(NodeType) -> %% @doc Indicates if this node is clustered with other nodes or not. is_clustered() -> - is_clustered_using_mnesia(). - -is_clustered_using_mnesia() -> - rabbit_mnesia:is_clustered(). + Members = members(), + Members =/= [] andalso Members =/= [node()]. -spec members() -> Members when Members :: [node()]. %% @doc Returns the list of cluster members. members() -> - members_using_mnesia(). + case rabbit_khepri:get_feature_state() of + enabled -> members_using_khepri(); + _ -> members_using_mnesia() + end. members_using_mnesia() -> rabbit_mnesia:members(). +members_using_khepri() -> + %% This function returns the empty list when it encounters an error + %% trying to query khepri for it's members. As this function does not + %% return ok | error this is the only way for callers to detect this. + %% rabbit_mnesia:members/0 however _will_ still return at least the + %% current node making it impossible to detect the situation where + %% the current cluster members may not be correct. It is unlikely we + %% ever reach that as the mnesia cluster file probably always exists. + %% For khepri however it is a lot more likely to encounter an error + %% so we need to allow callers to be more defensive in this case. + rabbit_khepri:locally_known_nodes(). + +-spec consistent_members() -> Members when + Members :: [node()]. +%% @doc Returns the list of cluster members. + +consistent_members() -> + case rabbit_khepri:get_feature_state() of + enabled -> consistent_members_using_khepri(); + _ -> members_using_mnesia() + end. + +consistent_members_using_khepri() -> + rabbit_khepri:nodes(). + -spec disc_members() -> Members when Members :: [node()]. %% @private disc_members() -> - disc_members_using_mnesia(). + case rabbit_khepri:get_feature_state() of + enabled -> members_using_khepri(); + _ -> disc_members_using_mnesia() + end. disc_members_using_mnesia() -> rabbit_mnesia:cluster_nodes(disc). -spec node_type() -> NodeType when - NodeType :: rabbit_db_cluster:node_type(). + NodeType :: node_type(). %% @doc Returns the type of this node, `disc' or `ram'. %% %% Node types may not all be relevant with all databases. node_type() -> - node_type_using_mnesia(). + case rabbit_khepri:get_feature_state() of + enabled -> node_type_using_khepri(); + _ -> node_type_using_mnesia() + end. node_type_using_mnesia() -> rabbit_mnesia:node_type(). +node_type_using_khepri() -> + disc. + -spec check_compatibility(RemoteNode) -> ok | {error, Reason} when RemoteNode :: node(), Reason :: any(). @@ -190,7 +370,10 @@ node_type_using_mnesia() -> check_compatibility(RemoteNode) -> case rabbit_feature_flags:check_node_compatibility(RemoteNode) of ok -> - check_compatibility_using_mnesia(RemoteNode); + case rabbit_khepri:get_feature_state() of + enabled -> ok; + _ -> check_compatibility_using_mnesia(RemoteNode) + end; Error -> Error end. @@ -202,20 +385,32 @@ check_compatibility_using_mnesia(RemoteNode) -> %% @doc Ensures the cluster is consistent. check_consistency() -> - check_consistency_using_mnesia(). + case rabbit_khepri:get_feature_state() of + enabled -> check_consistency_using_khepri(); + _ -> check_consistency_using_mnesia() + end. check_consistency_using_mnesia() -> rabbit_mnesia:check_cluster_consistency(). +check_consistency_using_khepri() -> + rabbit_khepri:check_cluster_consistency(). + -spec cli_cluster_status() -> ClusterStatus when - ClusterStatus :: [{nodes, [{rabbit_db_cluster:node_type(), [node()]}]} | + ClusterStatus :: [{nodes, [{node_type(), [node()]}]} | {running_nodes, [node()]} | {partitions, [{node(), [node()]}]}]. %% @doc Returns information from the cluster for the `cluster_status' CLI %% command. cli_cluster_status() -> - cli_cluster_status_using_mnesia(). + case rabbit_khepri:is_enabled() of + true -> cli_cluster_status_using_khepri(); + false -> cli_cluster_status_using_mnesia() + end. cli_cluster_status_using_mnesia() -> rabbit_mnesia:status(). + +cli_cluster_status_using_khepri() -> + rabbit_khepri:cli_cluster_status(). diff --git a/deps/rabbit/src/rabbit_db_exchange.erl b/deps/rabbit/src/rabbit_db_exchange.erl index 001fa7be297f..e45edd6dda66 100644 --- a/deps/rabbit/src/rabbit_db_exchange.erl +++ b/deps/rabbit/src/rabbit_db_exchange.erl @@ -2,11 +2,12 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_db_exchange). +-include_lib("khepri/include/khepri.hrl"). -include_lib("rabbit_common/include/rabbit.hrl"). -export([ @@ -31,18 +32,32 @@ %% Used by other rabbit_db_* modules -export([ + maybe_auto_delete_in_khepri/2, maybe_auto_delete_in_mnesia/2, next_serial_in_mnesia_tx/1, + next_serial_in_khepri_tx/1, + delete_in_khepri/3, delete_in_mnesia/3, - update_in_mnesia_tx/2 + get_in_khepri_tx/1, + update_in_mnesia_tx/2, + update_in_khepri_tx/2, + path/1 ]). %% For testing -export([clear/0]). +-export([ + khepri_exchange_path/1, + khepri_exchange_serial_path/1, + khepri_exchanges_path/0, + khepri_exchange_serials_path/0 + ]). + -define(MNESIA_TABLE, rabbit_exchange). -define(MNESIA_DURABLE_TABLE, rabbit_durable_exchange). -define(MNESIA_SERIAL_TABLE, rabbit_exchange_serial). +-define(KHEPRI_PROJECTION, rabbit_khepri_exchange). %% ------------------------------------------------------------------- %% get_all(). @@ -57,11 +72,17 @@ %% @private get_all() -> - get_all_in_mnesia(). + rabbit_khepri:handle_fallback( + #{mnesia => fun() -> get_all_in_mnesia() end, + khepri => fun() -> get_all_in_khepri() end + }). get_all_in_mnesia() -> rabbit_db:list_in_mnesia(?MNESIA_TABLE, #exchange{_ = '_'}). +get_all_in_khepri() -> + rabbit_db:list_in_khepri(khepri_exchanges_path() ++ [rabbit_khepri:if_has_data_wildcard()]). + -spec get_all(VHostName) -> [Exchange] when VHostName :: vhost:name(), Exchange :: rabbit_types:exchange(). @@ -72,12 +93,18 @@ get_all_in_mnesia() -> %% @private get_all(VHost) -> - get_all_in_mnesia(VHost). + rabbit_khepri:handle_fallback( + #{mnesia => fun() -> get_all_in_mnesia(VHost) end, + khepri => fun() -> get_all_in_khepri(VHost) end + }). get_all_in_mnesia(VHost) -> Match = #exchange{name = rabbit_misc:r(VHost, exchange), _ = '_'}, rabbit_db:list_in_mnesia(?MNESIA_TABLE, Match). +get_all_in_khepri(VHost) -> + rabbit_db:list_in_khepri(khepri_exchanges_path() ++ [VHost, rabbit_khepri:if_has_data_wildcard()]). + %% ------------------------------------------------------------------- %% get_all_durable(). %% ------------------------------------------------------------------- @@ -91,11 +118,17 @@ get_all_in_mnesia(VHost) -> %% @private get_all_durable() -> - get_all_durable_in_mnesia(). + rabbit_khepri:handle_fallback( + #{mnesia => fun() -> get_all_durable_in_mnesia() end, + khepri => fun() -> get_all_durable_in_khepri() end + }). get_all_durable_in_mnesia() -> rabbit_db:list_in_mnesia(rabbit_durable_exchange, #exchange{_ = '_'}). +get_all_durable_in_khepri() -> + rabbit_db:list_in_khepri(khepri_exchanges_path() ++ [rabbit_khepri:if_has_data_wildcard()]). + %% ------------------------------------------------------------------- %% list(). %% ------------------------------------------------------------------- @@ -109,11 +142,23 @@ get_all_durable_in_mnesia() -> %% @private list() -> - list_in_mnesia(). + rabbit_khepri:handle_fallback( + #{mnesia => fun() -> list_in_mnesia() end, + khepri => fun() -> list_in_khepri() end + }). list_in_mnesia() -> mnesia:dirty_all_keys(?MNESIA_TABLE). +list_in_khepri() -> + case rabbit_khepri:match(khepri_exchanges_path() ++ + [rabbit_khepri:if_has_data_wildcard()]) of + {ok, Map} -> + maps:fold(fun(_K, X, Acc) -> [X#exchange.name | Acc] end, [], Map); + _ -> + [] + end. + %% ------------------------------------------------------------------- %% get(). %% ------------------------------------------------------------------- @@ -129,11 +174,37 @@ list_in_mnesia() -> %% @private get(Name) -> - get_in_mnesia(Name). - + rabbit_khepri:handle_fallback( + #{mnesia => fun() -> get_in_mnesia(Name) end, + khepri => fun() -> get_in_khepri(Name) end + }). + get_in_mnesia(Name) -> rabbit_mnesia:dirty_read({?MNESIA_TABLE, Name}). +get_in_khepri(Name) -> + try ets:lookup(?KHEPRI_PROJECTION, Name) of + [X] -> {ok, X}; + [] -> {error, not_found} + catch + error:badarg -> + {error, not_found} + end. + +%% ------------------------------------------------------------------- +%% get_in_khepri_tx(). +%% ------------------------------------------------------------------- + +-spec get_in_khepri_tx(ExchangeName) -> Ret when + ExchangeName :: rabbit_exchange:name(), + Ret :: [Exchange :: rabbit_types:exchange()]. + +get_in_khepri_tx(Name) -> + case khepri_tx:get(khepri_exchange_path(Name)) of + {ok, X} -> [X]; + _ -> [] + end. + %% ------------------------------------------------------------------- %% get_many(). %% ------------------------------------------------------------------- @@ -148,7 +219,10 @@ get_in_mnesia(Name) -> %% @private get_many(Names) when is_list(Names) -> - get_many_in_mnesia(?MNESIA_TABLE, Names). + rabbit_khepri:handle_fallback( + #{mnesia => fun() -> get_many_in_mnesia(?MNESIA_TABLE, Names) end, + khepri => fun() -> get_many_in_khepri(Names) end + }). get_many_in_mnesia(Table, [Name]) -> ets:lookup(Table, Name); get_many_in_mnesia(Table, Names) when is_list(Names) -> @@ -156,6 +230,14 @@ get_many_in_mnesia(Table, Names) when is_list(Names) -> %% expensive for reasons explained in rabbit_mnesia:dirty_read/1. lists:append([ets:lookup(Table, Name) || Name <- Names]). +get_many_in_khepri(Names) when is_list(Names) -> + try + lists:append([ets:lookup(?KHEPRI_PROJECTION, Name) || Name <- Names]) + catch + error:badarg -> + [] + end. + %% ------------------------------------------------------------------- %% count(). %% ------------------------------------------------------------------- @@ -168,18 +250,25 @@ get_many_in_mnesia(Table, Names) when is_list(Names) -> %% @private count() -> - count_in_mnesia(). + rabbit_khepri:handle_fallback( + #{mnesia => fun() -> count_in_mnesia() end, + khepri => fun() -> count_in_khepri() end + }). count_in_mnesia() -> mnesia:table_info(?MNESIA_TABLE, size). +count_in_khepri() -> + rabbit_khepri:count_children(khepri_exchanges_path() ++ [?KHEPRI_WILDCARD_STAR]). + %% ------------------------------------------------------------------- %% update(). %% ------------------------------------------------------------------- --spec update(ExchangeName, UpdateFun) -> ok when +-spec update(ExchangeName, UpdateFun) -> Ret when ExchangeName :: rabbit_exchange:name(), - UpdateFun :: fun((Exchange) -> Exchange). + UpdateFun :: fun((Exchange) -> Exchange), + Ret :: ok | rabbit_khepri:timeout_error(). %% @doc Updates an existing exchange record using the result of %% `UpdateFun'. %% @@ -189,7 +278,10 @@ count_in_mnesia() -> %% @private update(XName, Fun) -> - update_in_mnesia(XName, Fun). + rabbit_khepri:handle_fallback( + #{mnesia => fun() -> update_in_mnesia(XName, Fun) end, + khepri => fun() -> update_in_khepri(XName, Fun) end + }). update_in_mnesia(XName, Fun) -> rabbit_mnesia:execute_mnesia_transaction( @@ -224,13 +316,61 @@ set_ram_in_mnesia_tx(X) -> ok = mnesia:write(?MNESIA_TABLE, X1, write), X1. +update_in_khepri(XName, Fun) -> + Path = khepri_exchange_path(XName), + Ret1 = rabbit_khepri:adv_get(Path), + case Ret1 of + {ok, #{data := X, payload_version := Vsn}} -> + X1 = Fun(X), + UpdatePath = + khepri_path:combine_with_conditions( + Path, [#if_payload_version{version = Vsn}]), + Ret2 = rabbit_khepri:put(UpdatePath, X1), + case Ret2 of + ok -> + ok; + {error, {khepri, mismatching_node, _}} -> + update_in_khepri(XName, Fun); + {error, {khepri, node_not_found, _}} -> + ok; + {error, _} = Error -> + Error + end; + {error, {khepri, node_not_found, _}} -> + ok; + {error, _} = Error -> + Error + end. + +%% ------------------------------------------------------------------- +%% update_in_khepri_tx(). +%% ------------------------------------------------------------------- + +-spec update_in_khepri_tx(ExchangeName, UpdateFun) -> Ret when + ExchangeName :: rabbit_exchange:name(), + Exchange :: rabbit_types:exchange(), + UpdateFun :: fun((Exchange) -> Exchange), + Ret :: not_found | Exchange. + +update_in_khepri_tx(Name, Fun) -> + Path = khepri_exchange_path(Name), + case khepri_tx:get(Path) of + {ok, X} -> + X1 = Fun(X), + ok = khepri_tx:put(Path, X1), + X1; + _ -> not_found + end. + %% ------------------------------------------------------------------- %% create_or_get(). %% ------------------------------------------------------------------- -spec create_or_get(Exchange) -> Ret when Exchange :: rabbit_types:exchange(), - Ret :: {new, Exchange} | {existing, Exchange} | {error, any()}. + Ret :: {new, Exchange} | + {existing, Exchange} | + rabbit_khepri:timeout_error(). %% @doc Writes an exchange record if it doesn't exist already or returns %% the existing one. %% @@ -240,7 +380,10 @@ set_ram_in_mnesia_tx(X) -> %% @private create_or_get(X) -> - create_or_get_in_mnesia(X). + rabbit_khepri:handle_fallback( + #{mnesia => fun() -> create_or_get_in_mnesia(X) end, + khepri => fun() -> create_or_get_in_khepri(X) end + }). create_or_get_in_mnesia(#exchange{name = XName} = X) -> rabbit_mnesia:execute_mnesia_transaction( @@ -253,6 +396,17 @@ create_or_get_in_mnesia(#exchange{name = XName} = X) -> end end). +create_or_get_in_khepri(#exchange{name = XName} = X) -> + Path = khepri_exchange_path(XName), + case rabbit_khepri:create(Path, X) of + ok -> + {new, X}; + {error, {khepri, mismatching_node, #{node_props := #{data := ExistingX}}}} -> + {existing, ExistingX}; + {error, timeout} = Err -> + Err + end. + %% ------------------------------------------------------------------- %% set(). %% ------------------------------------------------------------------- @@ -266,7 +420,10 @@ create_or_get_in_mnesia(#exchange{name = XName} = X) -> %% @private set(Xs) -> - set_in_mnesia(Xs). + rabbit_khepri:handle_fallback( + #{mnesia => fun() -> set_in_mnesia(Xs) end, + khepri => fun() -> set_in_khepri(Xs) end + }). set_in_mnesia(Xs) when is_list(Xs) -> rabbit_mnesia:execute_mnesia_transaction( @@ -275,6 +432,18 @@ set_in_mnesia(Xs) when is_list(Xs) -> end), ok. +set_in_khepri(Xs) when is_list(Xs) -> + rabbit_khepri:transaction( + fun() -> + [set_in_khepri_tx(X) || X <- Xs] + end, rw), + ok. + +set_in_khepri_tx(X) -> + Path = khepri_exchange_path(X#exchange.name), + ok = khepri_tx:put(Path, X), + X. + %% ------------------------------------------------------------------- %% peek_serial(). %% ------------------------------------------------------------------- @@ -289,7 +458,10 @@ set_in_mnesia(Xs) when is_list(Xs) -> %% @private peek_serial(XName) -> - peek_serial_in_mnesia(XName). + rabbit_khepri:handle_fallback( + #{mnesia => fun() -> peek_serial_in_mnesia(XName) end, + khepri => fun() -> peek_serial_in_khepri(XName) end + }). peek_serial_in_mnesia(XName) -> rabbit_mnesia:execute_mnesia_transaction( @@ -303,6 +475,15 @@ peek_serial_in_mnesia_tx(XName, LockType) -> _ -> 1 end. +peek_serial_in_khepri(XName) -> + Path = khepri_exchange_serial_path(XName), + case rabbit_khepri:get(Path) of + {ok, Serial} -> + Serial; + _ -> + 1 + end. + %% ------------------------------------------------------------------- %% next_serial(). %% ------------------------------------------------------------------- @@ -317,7 +498,10 @@ peek_serial_in_mnesia_tx(XName, LockType) -> %% @private next_serial(XName) -> - next_serial_in_mnesia(XName). + rabbit_khepri:handle_fallback( + #{mnesia => fun() -> next_serial_in_mnesia(XName) end, + khepri => fun() -> next_serial_in_khepri(XName) end + }). next_serial_in_mnesia(XName) -> rabbit_mnesia:execute_mnesia_transaction(fun() -> @@ -334,6 +518,41 @@ next_serial_in_mnesia_tx(XName) -> #exchange_serial{name = XName, next = Serial + 1}, write), Serial. +next_serial_in_khepri(XName) -> + %% Just storing the serial number is enough, no need to keep #exchange_serial{} + Path = khepri_exchange_serial_path(XName), + Ret1 = rabbit_khepri:adv_get(Path), + case Ret1 of + {ok, #{data := Serial, + payload_version := Vsn}} -> + UpdatePath = + khepri_path:combine_with_conditions( + Path, [#if_payload_version{version = Vsn}]), + case rabbit_khepri:put(UpdatePath, Serial + 1, #{timeout => infinity}) of + ok -> + Serial; + {error, {khepri, mismatching_node, _}} -> + next_serial_in_khepri(XName) + end; + _ -> + Serial = 1, + ok = rabbit_khepri:put(Path, Serial + 1, #{timeout => infinity}), + Serial + end. + +-spec next_serial_in_khepri_tx(Exchange) -> Serial when + Exchange :: rabbit_types:exchange(), + Serial :: integer(). + +next_serial_in_khepri_tx(#exchange{name = XName}) -> + Path = khepri_exchange_serial_path(XName), + Serial = case khepri_tx:get(Path) of + {ok, Serial0} -> Serial0; + _ -> 1 + end, + ok = khepri_tx:put(Path, Serial + 1), + Serial. + %% ------------------------------------------------------------------- %% delete(). %% ------------------------------------------------------------------- @@ -344,7 +563,10 @@ next_serial_in_mnesia_tx(XName) -> Exchange :: rabbit_types:exchange(), Binding :: rabbit_types:binding(), Deletions :: dict:dict(), - Ret :: {error, not_found} | {error, in_use} | {deleted, Exchange, [Binding], Deletions}. + Ret :: {deleted, Exchange, [Binding], Deletions} | + {error, not_found} | + {error, in_use} | + rabbit_khepri:timeout_error(). %% @doc Deletes an exchange record from the database. If `IfUnused' is set %% to `true', it is only deleted when there are no bindings present on the %% exchange. @@ -358,7 +580,10 @@ next_serial_in_mnesia_tx(XName) -> %% @private delete(XName, IfUnused) -> - delete_in_mnesia(XName, IfUnused). + rabbit_khepri:handle_fallback( + #{mnesia => fun() -> delete_in_mnesia(XName, IfUnused) end, + khepri => fun() -> delete_in_khepri(XName, IfUnused) end + }). delete_in_mnesia(XName, IfUnused) -> DeletionFun = case IfUnused of @@ -396,6 +621,32 @@ delete_in_mnesia(X = #exchange{name = XName}, OnlyDurable, RemoveBindingsForSour rabbit_db_binding:delete_all_for_exchange_in_mnesia( X, OnlyDurable, RemoveBindingsForSource). +delete_in_khepri(XName, IfUnused) -> + DeletionFun = case IfUnused of + true -> fun conditional_delete_in_khepri/2; + false -> fun unconditional_delete_in_khepri/2 + end, + rabbit_khepri:transaction( + fun() -> + case khepri_tx:get(khepri_exchange_path(XName)) of + {ok, X} -> DeletionFun(X, false); + _ -> {error, not_found} + end + end, rw). + +conditional_delete_in_khepri(X = #exchange{name = XName}, OnlyDurable) -> + case rabbit_db_binding:has_for_source_in_khepri(XName) of + false -> delete_in_khepri(X, OnlyDurable, false); + true -> {error, in_use} + end. + +unconditional_delete_in_khepri(X, OnlyDurable) -> + delete_in_khepri(X, OnlyDurable, true). + +delete_in_khepri(X = #exchange{name = XName}, OnlyDurable, RemoveBindingsForSource) -> + ok = khepri_tx:delete(khepri_exchange_path(XName)), + rabbit_db_binding:delete_all_for_exchange_in_khepri(X, OnlyDurable, RemoveBindingsForSource). + %% ------------------------------------------------------------------- %% delete_serial(). %% ------------------------------------------------------------------- @@ -409,7 +660,10 @@ delete_in_mnesia(X = #exchange{name = XName}, OnlyDurable, RemoveBindingsForSour %% @private delete_serial(XName) -> - delete_serial_in_mnesia(XName). + rabbit_khepri:handle_fallback( + #{mnesia => fun() -> delete_serial_in_mnesia(XName) end, + khepri => fun() -> delete_serial_in_khepri(XName) end + }). delete_serial_in_mnesia(XName) -> rabbit_mnesia:execute_mnesia_transaction( @@ -417,6 +671,10 @@ delete_serial_in_mnesia(XName) -> mnesia:delete({?MNESIA_SERIAL_TABLE, XName}) end). +delete_serial_in_khepri(XName) -> + Path = khepri_exchange_serial_path(XName), + ok = rabbit_khepri:delete(Path). + %% ------------------------------------------------------------------- %% recover(). %% ------------------------------------------------------------------- @@ -431,7 +689,10 @@ delete_serial_in_mnesia(XName) -> %% @private recover(VHost) -> - recover_in_mnesia(VHost). + rabbit_khepri:handle_fallback( + #{mnesia => fun() -> recover_in_mnesia(VHost) end, + khepri => fun() -> recover_in_khepri(VHost) end + }). recover_in_mnesia(VHost) -> rabbit_mnesia:table_filter( @@ -449,6 +710,28 @@ recover_in_mnesia(VHost) -> end, ?MNESIA_DURABLE_TABLE). +recover_in_khepri(VHost) -> + %% Transient exchanges are deprecated in Khepri, all exchanges are recovered + %% Node boot and recovery should hang until the data is ready. + %% Recovery needs to wait until progress can be done, as it + %% cannot be skipped and stopping the node is not an option - + %% the next boot most likely would behave the same way. + %% Any other request stays with the default timeout, currently 30s. + Exchanges0 = rabbit_db:list_in_khepri(khepri_exchanges_path() ++ [VHost, rabbit_khepri:if_has_data_wildcard()], + #{timeout => infinity}), + Exchanges = [rabbit_exchange_decorator:set(X) || X <- Exchanges0], + + rabbit_khepri:transaction( + fun() -> + [_ = set_in_khepri_tx(X) || X <- Exchanges] + end, rw, #{timeout => infinity}), + %% TODO once mnesia is gone, this callback should go back to `rabbit_exchange` + [begin + Serial = rabbit_exchange:serial(X), + rabbit_exchange:callback(X, create, Serial, [X]) + end || X <- Exchanges], + Exchanges. + %% ------------------------------------------------------------------- %% match(). %% ------------------------------------------------------------------- @@ -464,9 +747,12 @@ recover_in_mnesia(VHost) -> %% @private match(Pattern) -> - match_in_mnesia(Pattern). + rabbit_khepri:handle_fallback( + #{mnesia => fun() -> match_in_mnesia(Pattern) end, + khepri => fun() -> match_in_khepri(Pattern) end + }). -match_in_mnesia(Pattern) -> +match_in_mnesia(Pattern) -> case mnesia:transaction( fun() -> mnesia:match_object(?MNESIA_TABLE, Pattern, read) @@ -475,6 +761,10 @@ match_in_mnesia(Pattern) -> {aborted, Err} -> {error, Err} end. +match_in_khepri(Pattern0) -> + Pattern = #if_data_matches{pattern = Pattern0}, + rabbit_db:list_in_khepri(khepri_exchanges_path() ++ [?KHEPRI_WILDCARD_STAR, Pattern]). + %% ------------------------------------------------------------------- %% exists(). %% ------------------------------------------------------------------- @@ -489,11 +779,17 @@ match_in_mnesia(Pattern) -> %% @private exists(Name) -> - exists_in_mnesia(Name). + rabbit_khepri:handle_fallback( + #{mnesia => fun() -> exists_in_mnesia(Name) end, + khepri => fun() -> exists_in_khepri(Name) end + }). exists_in_mnesia(Name) -> ets:member(?MNESIA_TABLE, Name). +exists_in_khepri(Name) -> + rabbit_khepri:exists(khepri_exchange_path(Name)). + %% ------------------------------------------------------------------- %% clear(). %% ------------------------------------------------------------------- @@ -504,7 +800,10 @@ exists_in_mnesia(Name) -> %% @private clear() -> - clear_in_mnesia(). + rabbit_khepri:handle_fallback( + #{mnesia => fun() -> clear_in_mnesia() end, + khepri => fun() -> clear_in_khepri() end + }). clear_in_mnesia() -> {atomic, ok} = mnesia:clear_table(?MNESIA_TABLE), @@ -512,6 +811,16 @@ clear_in_mnesia() -> {atomic, ok} = mnesia:clear_table(?MNESIA_SERIAL_TABLE), ok. +clear_in_khepri() -> + khepri_delete(khepri_exchanges_path()), + khepri_delete(khepri_exchange_serials_path()). + +khepri_delete(Path) -> + case rabbit_khepri:delete(Path) of + ok -> ok; + Error -> throw(Error) + end. + %% ------------------------------------------------------------------- %% maybe_auto_delete_in_mnesia(). %% ------------------------------------------------------------------- @@ -535,3 +844,54 @@ maybe_auto_delete_in_mnesia(XName, OnlyDurable) -> {deleted, X, [], Deletions} -> {deleted, X, Deletions} end end. + +%% ------------------------------------------------------------------- +%% maybe_auto_delete_in_khepri(). +%% ------------------------------------------------------------------- + +-spec maybe_auto_delete_in_khepri(ExchangeName, boolean()) -> Ret when + ExchangeName :: rabbit_exchange:name(), + Exchange :: rabbit_types:exchange(), + Deletions :: rabbit_binding:deletions(), + Ret :: {'not_deleted', 'undefined' | Exchange} | + {'deleted', Exchange, Deletions}. + +maybe_auto_delete_in_khepri(XName, OnlyDurable) -> + case khepri_tx:get(khepri_exchange_path(XName)) of + {ok, #exchange{auto_delete = false} = X} -> + {not_deleted, X}; + {ok, #exchange{auto_delete = true} = X} -> + case conditional_delete_in_khepri(X, OnlyDurable) of + {error, in_use} -> {not_deleted, X}; + {deleted, X, [], Deletions} -> {deleted, X, Deletions} + end; + {error, _} -> + {not_deleted, undefined} + end. + +%% ------------------------------------------------------------------- +%% Khepri paths +%% ------------------------------------------------------------------- + +khepri_exchanges_path() -> + [?MODULE, exchanges]. + +khepri_exchange_path(#resource{virtual_host = VHost, name = Name}) -> + [?MODULE, exchanges, VHost, Name]. + +khepri_exchange_serials_path() -> + [?MODULE, exchange_serials]. + +khepri_exchange_serial_path(#resource{virtual_host = VHost, name = Name}) -> + [?MODULE, exchange_serials, VHost, Name]. + +%% ------------------------------------------------------------------- +%% path(). +%% ------------------------------------------------------------------- + +-spec path(ExchangeName) -> Path when + ExchangeName :: rabbit_exchange:name(), + Path :: khepri_path:path(). + +path(Name) -> + khepri_exchange_path(Name). diff --git a/deps/rabbit/src/rabbit_db_exchange_m2k_converter.erl b/deps/rabbit/src/rabbit_db_exchange_m2k_converter.erl new file mode 100644 index 000000000000..320d6fc7a034 --- /dev/null +++ b/deps/rabbit/src/rabbit_db_exchange_m2k_converter.erl @@ -0,0 +1,140 @@ +%% This Source Code Form is subject to the terms of the Mozilla Public +%% License, v. 2.0. If a copy of the MPL was not distributed with this +%% file, You can obtain one at https://mozilla.org/MPL/2.0/. +%% +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. +%% + +-module(rabbit_db_exchange_m2k_converter). + +-behaviour(mnesia_to_khepri_converter). + +-include_lib("kernel/include/logger.hrl"). +-include_lib("khepri/include/khepri.hrl"). +-include_lib("khepri_mnesia_migration/src/kmm_logging.hrl"). +-include_lib("rabbit_common/include/rabbit.hrl"). + +-export([init_copy_to_khepri/3, + copy_to_khepri/3, + delete_from_khepri/3]). + +-record(?MODULE, {}). + +-spec init_copy_to_khepri(StoreId, MigrationId, Tables) -> Ret when + StoreId :: khepri:store_id(), + MigrationId :: mnesia_to_khepri:migration_id(), + Tables :: [mnesia_to_khepri:mnesia_table()], + Ret :: {ok, Priv}, + Priv :: #?MODULE{}. +%% @private + +init_copy_to_khepri(_StoreId, _MigrationId, Tables) -> + %% Clean up any previous attempt to copy the Mnesia table to Khepri. + lists:foreach(fun clear_data_in_khepri/1, Tables), + + SubState = #?MODULE{}, + {ok, SubState}. + +-spec copy_to_khepri(Table, Record, State) -> Ret when + Table :: mnesia_to_khepri:mnesia_table(), + Record :: tuple(), + State :: rabbit_db_m2k_converter:state(), + Ret :: {ok, NewState} | {error, Reason}, + NewState :: rabbit_db_m2k_converter:state(), + Reason :: any(). +%% @private + +copy_to_khepri( + rabbit_exchange = Table, #exchange{} = Record, State) -> + Name = Record#exchange.name, + ?LOG_DEBUG( + "Mnesia->Khepri data copy: [~0p] key: ~0p", + [Table, Name], + #{domain => ?KMM_M2K_TABLE_COPY_LOG_DOMAIN}), + Path = rabbit_db_exchange:khepri_exchange_path(Name), + rabbit_db_m2k_converter:with_correlation_id( + fun(CorrId) -> + Extra = #{async => CorrId}, + ?LOG_DEBUG( + "Mnesia->Khepri data copy: [~0p] path: ~0p corr: ~0p", + [Table, Path, CorrId], + #{domain => ?KMM_M2K_TABLE_COPY_LOG_DOMAIN}), + rabbit_khepri:put(Path, Record, Extra) + end, State); +copy_to_khepri(rabbit_exchange_serial = Table, + #exchange_serial{name = Resource, next = Serial}, + State) -> + ?LOG_DEBUG( + "Mnesia->Khepri data copy: [~0p] key: ~0p", + [Table, Resource], + #{domain => ?KMM_M2K_TABLE_COPY_LOG_DOMAIN}), + Path = khepri_path:combine_with_conditions( + rabbit_db_exchange:khepri_exchange_serial_path(Resource), + [#if_node_exists{exists = false}]), + rabbit_db_m2k_converter:with_correlation_id( + fun(CorrId) -> + Extra = #{async => CorrId}, + ?LOG_DEBUG( + "Mnesia->Khepri data copy: [~0p] path: ~0p corr: ~0p", + [Table, Path, CorrId], + #{domain => ?KMM_M2K_TABLE_COPY_LOG_DOMAIN}), + rabbit_khepri:put(Path, Serial, Extra) + end, State); +copy_to_khepri(Table, Record, State) -> + ?LOG_DEBUG("Mnesia->Khepri unexpected record table ~0p record ~0p state ~0p", + [Table, Record, State]), + {error, unexpected_record}. + +-spec delete_from_khepri(Table, Key, State) -> Ret when + Table :: mnesia_to_khepri:mnesia_table(), + Key :: any(), + State :: rabbit_db_m2k_converter:state(), + Ret :: {ok, NewState} | {error, Reason}, + NewState :: rabbit_db_m2k_converter:state(), + Reason :: any(). +%% @private + +delete_from_khepri(rabbit_exchange = Table, Key, State) -> + ?LOG_DEBUG( + "Mnesia->Khepri data delete: [~0p] key: ~0p", + [Table, Key], + #{domain => ?KMM_M2K_TABLE_COPY_LOG_DOMAIN}), + Path = rabbit_db_exchange:khepri_exchange_path(Key), + rabbit_db_m2k_converter:with_correlation_id( + fun(CorrId) -> + Extra = #{async => CorrId}, + ?LOG_DEBUG( + "Mnesia->Khepri data delete: [~0p] path: ~0p corr: ~0p", + [Table, Path, CorrId], + #{domain => ?KMM_M2K_TABLE_COPY_LOG_DOMAIN}), + rabbit_khepri:delete(Path, Extra) + end, State); +delete_from_khepri(rabbit_exchange_serial = Table, Key, State) -> + ?LOG_DEBUG( + "Mnesia->Khepri data delete: [~0p] key: ~0p", + [Table, Key], + #{domain => ?KMM_M2K_TABLE_COPY_LOG_DOMAIN}), + Path = rabbit_db_exchange:khepri_exchange_serial_path(Key), + rabbit_db_m2k_converter:with_correlation_id( + fun(CorrId) -> + Extra = #{async => CorrId}, + ?LOG_DEBUG( + "Mnesia->Khepri data delete: [~0p] path: ~0p corr: ~0p", + [Table, Path, CorrId], + #{domain => ?KMM_M2K_TABLE_COPY_LOG_DOMAIN}), + rabbit_khepri:delete(Path, Extra) + end, State). + +-spec clear_data_in_khepri(Table) -> ok when + Table :: atom(). + +clear_data_in_khepri(rabbit_exchange) -> + khepri_delete(rabbit_db_exchange:khepri_exchanges_path()); +clear_data_in_khepri(rabbit_exchange_serial) -> + khepri_delete(rabbit_db_exchange:khepri_exchange_serials_path()). + +khepri_delete(Path) -> + case rabbit_khepri:delete(Path) of + ok -> ok; + Error -> throw(Error) + end. diff --git a/deps/rabbit/src/rabbit_db_m2k_converter.erl b/deps/rabbit/src/rabbit_db_m2k_converter.erl new file mode 100644 index 000000000000..6bec45ea7b12 --- /dev/null +++ b/deps/rabbit/src/rabbit_db_m2k_converter.erl @@ -0,0 +1,243 @@ +%% This Source Code Form is subject to the terms of the Mozilla Public +%% License, v. 2.0. If a copy of the MPL was not distributed with this +%% file, You can obtain one at https://mozilla.org/MPL/2.0/. +%% +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. +%% + +-module(rabbit_db_m2k_converter). + +-behaviour(mnesia_to_khepri_converter). + +-include_lib("khepri/include/khepri.hrl"). +-include_lib("khepri_mnesia_migration/src/kmm_logging.hrl"). +-include_lib("rabbit_common/include/rabbit.hrl"). + +%% Functions for `rabbit_db_*_m2k_converter' modules to call. +-export([with_correlation_id/2]). + +%% `mnesia_to_khepri_converter' callbacks. +-export([init_copy_to_khepri/4, + copy_to_khepri/3, + delete_from_khepri/3, + finish_copy_to_khepri/1]). + +-define(MAX_ASYNC_REQUESTS, 64). + +-type migration() :: {mnesia_to_khepri:mnesia_table(), + mnesia_to_khepri:converter_mod()}. + +-type migrations() :: [migration()]. + +-type correlation_id() :: non_neg_integer(). + +-type async_request_fun() :: fun((correlation_id()) -> ok | {error, any()}). + +-record(?MODULE, {migrations :: migrations(), + sub_states :: #{module() => any()}, + seq_no = 0 :: correlation_id(), + last_acked_seq_no = 0 :: correlation_id(), + async_requests = #{} :: #{correlation_id() => + async_request_fun()}}). + +-opaque state() :: #?MODULE{}. + +-export_type([state/0]). + +-spec with_correlation_id(Fun, State) -> Ret when + Fun :: async_request_fun(), + State :: state(), + Ret :: {ok, NewState} | {error, Reason}, + NewState :: state(), + Reason :: any(). + +with_correlation_id( + Fun, + #?MODULE{seq_no = SeqNo0, + last_acked_seq_no = LastAckedSeqNo} = State0) -> + case SeqNo0 - LastAckedSeqNo >= ?MAX_ASYNC_REQUESTS of + true -> + case wait_for_async_requests(State0) of + {ok, State} -> + with_correlation_id(Fun, State); + {error, _} = Error -> + Error + end; + false -> + run_async_fun(Fun, State0) + end. + +%% `mnesia_to_khepri_converter' callbacks + +-spec init_copy_to_khepri(StoreId, MigrationId, Tables, Migrations) -> + Ret when + StoreId :: khepri:store_id(), + MigrationId :: mnesia_to_khepri:migration_id(), + Tables :: [mnesia_to_khepri:mnesia_table()], + Migrations :: migrations(), + Ret :: {ok, Priv}, + Priv :: #?MODULE{}. +%% @private + +init_copy_to_khepri(StoreId, MigrationId, _Tables, Migrations) -> + TablesPerMod = lists:foldl( + fun + ({Table, Mod}, Acc) -> + Tables0 = maps:get(Mod, Acc, []), + Tables1 = Tables0 ++ [Table], + Acc#{Mod => Tables1}; + (_Table, Acc) -> + Acc + end, #{}, Migrations), + + SubStates = maps:fold( + fun(Mod, Tables, Acc) -> + {ok, SubState} = + case Mod of + {ActualMod, Args} -> + ActualMod:init_copy_to_khepri( + StoreId, MigrationId, + Tables, Args); + _ -> + Mod:init_copy_to_khepri( + StoreId, MigrationId, + Tables) + end, + Acc#{Mod => SubState} + end, #{}, TablesPerMod), + + State = #?MODULE{migrations = Migrations, + sub_states = SubStates}, + {ok, State}. + +-spec copy_to_khepri(Table, Record, State) -> Ret when + Table :: mnesia_to_khepri:mnesia_table(), + Record :: tuple(), + State :: state(), + Ret :: {ok, NewState} | {error, Reason}, + NewState :: state(), + Reason :: any(). +%% @private + +copy_to_khepri( + Table, Record, #?MODULE{migrations = Migrations} = State) -> + case proplists:get_value(Table, Migrations) of + true -> + {ok, State}; + Mod when Mod =/= undefined -> + ActualMod = actual_mod(Mod), + case ActualMod:copy_to_khepri(Table, Record, State) of + {ok, State1} -> + {ok, State1}; + {error, _} = Error -> + Error + end + end. + +-spec delete_from_khepri(Table, Key, State) -> Ret when + Table :: mnesia_to_khepri:mnesia_table(), + Key :: any(), + State :: state(), + Ret :: {ok, NewState} | {error, Reason}, + NewState :: state(), + Reason :: any(). +%% @private + +delete_from_khepri( + Table, Key, #?MODULE{migrations = Migrations} = State) -> + case proplists:get_value(Table, Migrations) of + true -> + {ok, State}; + Mod when Mod =/= undefined -> + ActualMod = actual_mod(Mod), + case ActualMod:delete_from_khepri(Table, Key, State) of + {ok, State1} -> + {ok, State1}; + {error, _} = Error -> + Error + end + end. + +-spec finish_copy_to_khepri(State) -> Ret when + State :: state(), + Ret :: ok. +%% @private + +finish_copy_to_khepri(State) -> + {ok, _} = wait_for_all_async_requests(State), + ok. + +wait_for_all_async_requests( + #?MODULE{seq_no = SeqNo, + last_acked_seq_no = LastAckedSeqNo} = State) -> + case SeqNo - LastAckedSeqNo > 0 of + true -> + case wait_for_async_requests(State) of + {ok, State1} -> + wait_for_all_async_requests(State1); + {error, _} = Error -> + Error + end; + false -> + {ok, State} + end. + +-spec wait_for_async_requests(State) -> Ret when + State :: state(), + Ret :: {ok, NewState} | {error, Reason}, + NewState :: state(), + Reason :: any(). + +wait_for_async_requests(State0) -> + receive + {ra_event, _, _} = RaEvent -> + Correlations = rabbit_khepri:handle_async_ret(RaEvent), + lists:foldl( + fun({CorrelationId, Result}, {ok, State}) -> + #?MODULE{async_requests = AsyncRequests, + seq_no = SeqNo, + last_acked_seq_no = LastAcked} = State, + {Fun, AsyncRequests1} = maps:take( + CorrelationId, AsyncRequests), + LastAcked1 = erlang:max(SeqNo, LastAcked), + State1 = State#?MODULE{last_acked_seq_no = LastAcked1, + async_requests = AsyncRequests1}, + case Result of + ok -> + {ok, State1}; + {ok, _} -> + {ok, State1}; + {error, not_leader} -> + %% If the command failed because it was sent to + %% a non-leader member, retry the fun. + %% `rabbit_khepri:handle_async_ret/1' has updated + %% the leader information, so the next attempt + %% might be sent to the correct member. + run_async_fun(Fun, State1); + {error, _} = Error -> + Error + end; + (_Correlation, {error, _} = Error) -> + Error + end, {ok, State0}, Correlations) + after 5_000 -> + {error, timeout} + end. + +run_async_fun( + Fun, + #?MODULE{seq_no = SeqNo0, + async_requests = AsyncRequests0} = State0) -> + SeqNo = SeqNo0 + 1, + case Fun(SeqNo) of + ok -> + AsyncRequests = AsyncRequests0#{SeqNo => Fun}, + State = State0#?MODULE{seq_no = SeqNo, + async_requests = AsyncRequests}, + {ok, State}; + {error, _} = Error -> + Error + end. + +actual_mod({Mod, _}) -> Mod; +actual_mod(Mod) -> Mod. diff --git a/deps/rabbit/src/rabbit_db_maintenance.erl b/deps/rabbit/src/rabbit_db_maintenance.erl index a70cd66b7bde..0a39e8db4506 100644 --- a/deps/rabbit/src/rabbit_db_maintenance.erl +++ b/deps/rabbit/src/rabbit_db_maintenance.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_db_maintenance). @@ -10,85 +10,32 @@ -include_lib("rabbit_common/include/rabbit.hrl"). -export([ - setup_schema/0, + table_definitions/0, set/1, get/1, get_consistent/1 ]). --type mnesia_table() :: atom(). +-export([ + khepri_maintenance_path/1, + khepri_maintenance_path/0 + ]). -define(TABLE, rabbit_node_maintenance_states). %% ------------------------------------------------------------------- -%% setup_schema(). +%% table_definitions(). %% ------------------------------------------------------------------- --spec setup_schema() -> ok | {error, any()}. -%% @doc Creates the internal schema used by the selected metadata store -%% -%% @private - -setup_schema() -> - setup_schema_in_mnesia(). - -setup_schema_in_mnesia() -> - TableName = status_table_name(), - rabbit_log:info( - "Creating table ~ts for maintenance mode status", - [TableName]), - try - rabbit_table:create( - TableName, - status_table_definition()), - %% The `rabbit_node_maintenance_states' table used to be global but not - %% replicated. This leads to various errors during RabbitMQ boot or - %% operations on the Mnesia database. The reason is the table existed - %% on a single node and, if that node was stopped or MIA, other nodes - %% may wait forever on that node for the table to be available. - %% - %% The call below makes sure this node has a copy of the table. - case rabbit_table:ensure_table_copy(TableName, node(), ram_copies) of - ok -> - %% Next, we try to fix other nodes in the cluster if they are - %% running a version of RabbitMQ which does not replicate the - %% table. All nodes must have a replica for Mnesia operations - %% to work properly. Therefore the code below is to make older - %% compatible with newer nodes. - Replicas = mnesia:table_info(TableName, all_nodes), - Members = rabbit_nodes:list_running(), - MissingOn = Members -- Replicas, - lists:foreach( - fun(Node) -> - %% Errors from adding a replica on those older nodes - %% are ignored however. They should not be fatal. The - %% problem will solve by itself once all nodes are - %% upgraded. - _ = rpc:call( - Node, - rabbit_table, ensure_table_copy, - [TableName, Node, ram_copies]) - end, MissingOn), - ok; - Error -> - Error - end - catch throw:Reason -> - rabbit_log:error( - "Failed to create maintenance status table: ~tp", - [Reason]) - end. - --spec status_table_name() -> mnesia_table(). -status_table_name() -> - ?TABLE. +-spec table_definitions() -> [Def] when + Def :: {Name :: atom(), term()}. --spec status_table_definition() -> list(). -status_table_definition() -> - maps:to_list(#{ - record_name => node_maintenance_state, - attributes => record_info(fields, node_maintenance_state) - }). +table_definitions() -> + [{?TABLE, maps:to_list(#{ + record_name => node_maintenance_state, + attributes => record_info(fields, node_maintenance_state), + match => #node_maintenance_state{_ = '_'} + })}]. %% ------------------------------------------------------------------- %% set(). @@ -102,7 +49,10 @@ status_table_definition() -> %% @private set(Status) -> - set_in_mnesia(Status). + rabbit_khepri:handle_fallback( + #{mnesia => fun() -> set_in_mnesia(Status) end, + khepri => fun() -> set_in_khepri(Status) end + }). set_in_mnesia(Status) -> Res = mnesia:transaction( @@ -127,6 +77,18 @@ set_in_mnesia(Status) -> _ -> false end. +set_in_khepri(Status) -> + Node = node(), + Path = khepri_maintenance_path(Node), + Record = #node_maintenance_state{ + node = Node, + status = Status + }, + case rabbit_khepri:put(Path, Record) of + ok -> true; + _ -> false + end. + %% ------------------------------------------------------------------- %% get(). %% ------------------------------------------------------------------- @@ -141,7 +103,10 @@ set_in_mnesia(Status) -> %% @private get(Node) -> - get_in_mnesia(Node). + rabbit_khepri:handle_fallback( + #{mnesia => fun() -> get_in_mnesia(Node) end, + khepri => fun() -> get_in_khepri(Node) end + }). get_in_mnesia(Node) -> case catch mnesia:dirty_read(?TABLE, Node) of @@ -151,6 +116,15 @@ get_in_mnesia(Node) -> _ -> undefined end. +get_in_khepri(Node) -> + Path = khepri_maintenance_path(Node), + case rabbit_khepri:get(Path) of + {ok, #node_maintenance_state{status = Status}} -> + Status; + _ -> + undefined + end. + %% ------------------------------------------------------------------- %% get_consistent(). %% ------------------------------------------------------------------- @@ -165,7 +139,10 @@ get_in_mnesia(Node) -> %% @private get_consistent(Node) -> - get_consistent_in_mnesia(Node). + rabbit_khepri:handle_fallback( + #{mnesia => fun() -> get_consistent_in_mnesia(Node) end, + khepri => fun() -> get_consistent_in_khepri(Node) end + }). get_consistent_in_mnesia(Node) -> case mnesia:transaction(fun() -> mnesia:read(?TABLE, Node) end) of @@ -175,3 +152,27 @@ get_consistent_in_mnesia(Node) -> {atomic, _} -> undefined; {aborted, _Reason} -> undefined end. + +get_consistent_in_khepri(Node) -> + Path = khepri_maintenance_path(Node), + %% FIXME: Ra consistent queries are fragile in the sense that the query + %% function may run on a remote node and the function reference or MFA may + %% not be valid on that node. That's why we force a local query for now. + %Options = #{favor => consistent}, + Options = #{favor => local}, + case rabbit_khepri:get(Path, Options) of + {ok, #node_maintenance_state{status = Status}} -> + Status; + _ -> + undefined + end. + +%% ------------------------------------------------------------------- +%% Khepri paths +%% ------------------------------------------------------------------- + +khepri_maintenance_path() -> + [?MODULE, maintenance]. + +khepri_maintenance_path(Node) -> + [?MODULE, maintenance, Node]. diff --git a/deps/rabbit/src/rabbit_db_maintenance_m2k_converter.erl b/deps/rabbit/src/rabbit_db_maintenance_m2k_converter.erl new file mode 100644 index 000000000000..815b8a41e543 --- /dev/null +++ b/deps/rabbit/src/rabbit_db_maintenance_m2k_converter.erl @@ -0,0 +1,102 @@ +%% This Source Code Form is subject to the terms of the Mozilla Public +%% License, v. 2.0. If a copy of the MPL was not distributed with this +%% file, You can obtain one at https://mozilla.org/MPL/2.0/. +%% +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. +%% + +-module(rabbit_db_maintenance_m2k_converter). + +-behaviour(mnesia_to_khepri_converter). + +-include_lib("kernel/include/logger.hrl"). +-include_lib("khepri_mnesia_migration/src/kmm_logging.hrl"). +-include_lib("rabbit_common/include/rabbit.hrl"). + +-export([init_copy_to_khepri/3, + copy_to_khepri/3, + delete_from_khepri/3]). + +-record(?MODULE, {}). + +-spec init_copy_to_khepri(StoreId, MigrationId, Tables) -> Ret when + StoreId :: khepri:store_id(), + MigrationId :: mnesia_to_khepri:migration_id(), + Tables :: [mnesia_to_khepri:mnesia_table()], + Ret :: {ok, Priv}, + Priv :: #?MODULE{}. +%% @private + +init_copy_to_khepri(_StoreId, _MigrationId, Tables) -> + %% Clean up any previous attempt to copy the Mnesia table to Khepri. + lists:foreach(fun clear_data_in_khepri/1, Tables), + + SubState = #?MODULE{}, + {ok, SubState}. + +-spec copy_to_khepri(Table, Record, State) -> Ret when + Table :: mnesia_to_khepri:mnesia_table(), + Record :: tuple(), + State :: rabbit_db_m2k_converter:state(), + Ret :: {ok, NewState} | {error, Reason}, + NewState :: rabbit_db_m2k_converter:state(), + Reason :: any(). +%% @private + +copy_to_khepri( + rabbit_node_maintenance_states = Table, #node_maintenance_state{} = Record, + State) -> + Name = Record#node_maintenance_state.node, + ?LOG_DEBUG( + "Mnesia->Khepri data copy: [~0p] key: ~0p", + [Table, Name], + #{domain => ?KMM_M2K_TABLE_COPY_LOG_DOMAIN}), + Path = rabbit_db_maintenance:khepri_maintenance_path(Name), + rabbit_db_m2k_converter:with_correlation_id( + fun(CorrId) -> + Extra = #{async => CorrId}, + ?LOG_DEBUG( + "Mnesia->Khepri data copy: [~0p] path: ~0p corr: ~0p", + [Table, Path, CorrId], + #{domain => ?KMM_M2K_TABLE_COPY_LOG_DOMAIN}), + rabbit_khepri:put(Path, Record, Extra) + end, State); +copy_to_khepri(Table, Record, State) -> + ?LOG_DEBUG("Mnesia->Khepri unexpected record table ~0p record ~0p state ~0p", + [Table, Record, State]), + {error, unexpected_record}. + +-spec delete_from_khepri(Table, Key, State) -> Ret when + Table :: mnesia_to_khepri:mnesia_table(), + Key :: any(), + State :: rabbit_db_m2k_converter:state(), + Ret :: {ok, NewState} | {error, Reason}, + NewState :: rabbit_db_m2k_converter:state(), + Reason :: any(). +%% @private + +delete_from_khepri(rabbit_node_maintenance_states = Table, Key, State) -> + ?LOG_DEBUG( + "Mnesia->Khepri data delete: [~0p] key: ~0p", + [Table, Key], + #{domain => ?KMM_M2K_TABLE_COPY_LOG_DOMAIN}), + Path = rabbit_db_maintenance:khepri_maintenance_path(Key), + rabbit_db_m2k_converter:with_correlation_id( + fun(CorrId) -> + Extra = #{async => CorrId}, + ?LOG_DEBUG( + "Mnesia->Khepri data delete: [~0p] path: ~0p corr: ~0p", + [Table, Path, CorrId], + #{domain => ?KMM_M2K_TABLE_COPY_LOG_DOMAIN}), + rabbit_khepri:delete(Path, Extra) + end, State). + +-spec clear_data_in_khepri(Table) -> ok when + Table :: atom(). + +clear_data_in_khepri(rabbit_node_maintenance_states) -> + Path = rabbit_db_maintenance:khepri_maintenance_path(), + case rabbit_khepri:delete(Path) of + ok -> ok; + Error -> throw(Error) + end. diff --git a/deps/rabbit/src/rabbit_db_msup.erl b/deps/rabbit/src/rabbit_db_msup.erl index 5acb7cbfadf3..3939efa6ae60 100644 --- a/deps/rabbit/src/rabbit_db_msup.erl +++ b/deps/rabbit/src/rabbit_db_msup.erl @@ -2,11 +2,14 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_db_msup). +-include_lib("khepri/include/khepri.hrl"). +-include("mirrored_supervisor.hrl"). + -export([ create_tables/0, table_definitions/0, @@ -19,6 +22,11 @@ -export([clear/0]). +-export([ + khepri_mirrored_supervisor_path/2, + khepri_mirrored_supervisor_path/0 + ]). + -define(TABLE, mirrored_sup_childspec). -define(TABLE_DEF, {?TABLE, @@ -27,8 +35,6 @@ {attributes, record_info(fields, mirrored_sup_childspec)}]}). -define(TABLE_MATCH, {match, #mirrored_sup_childspec{ _ = '_' }}). --record(mirrored_sup_childspec, {key, mirroring_pid, childspec}). - %% ------------------------------------------------------------------- %% create_tables(). %% ------------------------------------------------------------------- @@ -37,7 +43,10 @@ Ret :: 'ok' | {error, Reason :: term()}. create_tables() -> - create_tables_in_mnesia([?TABLE_DEF]). + rabbit_khepri:handle_fallback( + #{mnesia => fun() -> create_tables_in_mnesia([?TABLE_DEF]) end, + khepri => fun() -> ok end + }). create_tables_in_mnesia([]) -> ok; @@ -64,15 +73,24 @@ table_definitions() -> %% ------------------------------------------------------------------- -spec create_or_update(Group, Overall, Delegate, ChildSpec, Id) -> Ret when - Group :: any(), + Group :: mirrored_supervisor:group_name(), Overall :: pid(), Delegate :: pid() | undefined, ChildSpec :: supervisor2:child_spec(), - Id :: {any(), any()}, + Id :: mirrored_supervisor:child_id(), Ret :: start | undefined | pid(). create_or_update(Group, Overall, Delegate, ChildSpec, Id) -> - create_or_update_in_mnesia(Group, Overall, Delegate, ChildSpec, Id). + rabbit_khepri:handle_fallback( + #{mnesia => + fun() -> + create_or_update_in_mnesia(Group, Overall, Delegate, ChildSpec, Id) + end, + khepri => + fun() -> + create_or_update_in_khepri(Group, Overall, Delegate, ChildSpec, Id) + end + }). create_or_update_in_mnesia(Group, Overall, Delegate, ChildSpec, Id) -> rabbit_mnesia:execute_mnesia_transaction( @@ -111,16 +129,54 @@ write_in_mnesia(Group, Overall, ChildSpec, Id) -> ok = mnesia:write(?TABLE, S, write), ChildSpec. +create_or_update_in_khepri(Group, Overall, Delegate, ChildSpec, Id) -> + Path = khepri_mirrored_supervisor_path(Group, Id), + S = #mirrored_sup_childspec{key = {Group, Id}, + mirroring_pid = Overall, + childspec = ChildSpec}, + case rabbit_khepri:adv_get(Path) of + {ok, #{data := #mirrored_sup_childspec{mirroring_pid = Pid}, + payload_version := Vsn}} -> + case Overall of + Pid -> + Delegate; + _ -> + %% The supervisor(Pid) call can't happen inside of a transaction. + %% We have to read and update the record in two different khepri calls + case mirrored_supervisor:supervisor(Pid) of + dead -> + UpdatePath = + khepri_path:combine_with_conditions( + Path, [#if_payload_version{version = Vsn}]), + Ret = rabbit_khepri:put(UpdatePath, S), + case Ret of + ok -> start; + {error, {khepri, mismatching_node, _}} -> + create_or_update_in_khepri(Group, Overall, Delegate, ChildSpec, Id); + {error, _} = Error -> Error + end; + Delegate0 -> + Delegate0 + end + end; + _ -> + ok = rabbit_khepri:put(Path, S), + start + end. + %% ------------------------------------------------------------------- %% delete(). %% ------------------------------------------------------------------- -spec delete(Group, Id) -> ok when - Group :: any(), - Id :: any(). + Group :: mirrored_supervisor:group_name(), + Id :: mirrored_supervisor:child_id(). delete(Group, Id) -> - delete_in_mnesia(Group, Id). + rabbit_khepri:handle_fallback( + #{mnesia => fun() -> delete_in_mnesia(Group, Id) end, + khepri => fun() -> delete_in_khepri(Group, Id) end + }). delete_in_mnesia(Group, Id) -> rabbit_mnesia:execute_mnesia_transaction( @@ -128,20 +184,26 @@ delete_in_mnesia(Group, Id) -> ok = mnesia:delete({?TABLE, {Group, Id}}) end). +delete_in_khepri(Group, Id) -> + ok = rabbit_khepri:delete(khepri_mirrored_supervisor_path(Group, Id)). + %% ------------------------------------------------------------------- %% find_mirror(). %% ------------------------------------------------------------------- -spec find_mirror(Group, Id) -> Ret when - Group :: any(), - Id :: any(), + Group :: mirrored_supervisor:group_name(), + Id :: mirrored_supervisor:child_id(), Ret :: {ok, pid()} | {error, not_found}. find_mirror(Group, Id) -> %% If we did this inside a tx we could still have failover %% immediately after the tx - we can't be 100% here. So we may as %% well dirty_select. - find_mirror_in_mnesia(Group, Id). + rabbit_khepri:handle_fallback( + #{mnesia => fun() -> find_mirror_in_mnesia(Group, Id) end, + khepri => fun() -> find_mirror_in_khepri(Group, Id) end + }). find_mirror_in_mnesia(Group, Id) -> MatchHead = #mirrored_sup_childspec{mirroring_pid = '$1', @@ -152,6 +214,14 @@ find_mirror_in_mnesia(Group, Id) -> _ -> {error, not_found} end. +find_mirror_in_khepri(Group, Id) -> + case rabbit_khepri:get(khepri_mirrored_supervisor_path(Group, Id)) of + {ok, #mirrored_sup_childspec{mirroring_pid = Pid}} -> + {ok, Pid}; + _ -> + {error, not_found} + end. + %% ------------------------------------------------------------------- %% update_all(). %% ------------------------------------------------------------------- @@ -161,7 +231,10 @@ find_mirror_in_mnesia(Group, Id) -> ChildSpec :: supervisor2:child_spec(). update_all(Overall, OldOverall) -> - update_all_in_mnesia(Overall, OldOverall). + rabbit_khepri:handle_fallback( + #{mnesia => fun() -> update_all_in_mnesia(Overall, OldOverall) end, + khepri => fun() -> update_all_in_khepri(Overall, OldOverall) end + }). update_all_in_mnesia(Overall, OldOverall) -> rabbit_mnesia:execute_mnesia_transaction( @@ -174,15 +247,35 @@ update_all_in_mnesia(Overall, OldOverall) -> [{Group, Id}, C] <- mnesia:select(?TABLE, [{MatchHead, [], ['$$']}])] end). +update_all_in_khepri(Overall, OldOverall) -> + Pattern = #mirrored_sup_childspec{mirroring_pid = OldOverall, + _ = '_'}, + Conditions = [?KHEPRI_WILDCARD_STAR_STAR, #if_data_matches{pattern = Pattern}], + PathPattern = khepri_mirrored_supervisor_path() ++ [#if_all{conditions = Conditions}], + rabbit_khepri:transaction( + fun() -> + case khepri_tx:get_many(PathPattern) of + {ok, Map} -> + [begin + S = S0#mirrored_sup_childspec{mirroring_pid = Overall}, + ok = khepri_tx:put(Path, S), + S0#mirrored_sup_childspec.childspec + end || {Path, S0} <- maps:to_list(Map)] + end + end). + %% ------------------------------------------------------------------- %% delete_all(). %% ------------------------------------------------------------------- -spec delete_all(Group) -> ok when - Group :: any(). + Group :: mirrored_supervisor:group_name(). delete_all(Group) -> - delete_all_in_mnesia(Group). + rabbit_khepri:handle_fallback( + #{mnesia => fun() -> delete_all_in_mnesia(Group) end, + khepri => fun() -> delete_all_in_khepri(Group) end + }). delete_all_in_mnesia(Group) -> rabbit_mnesia:execute_mnesia_transaction( @@ -194,6 +287,13 @@ delete_all_in_mnesia(Group) -> end), ok. +delete_all_in_khepri(Group) -> + Pattern = #mirrored_sup_childspec{key = {Group, '_'}, + _ = '_'}, + Conditions = [?KHEPRI_WILDCARD_STAR_STAR, #if_data_matches{pattern = Pattern}], + rabbit_khepri:delete(khepri_mirrored_supervisor_path() ++ + [#if_all{conditions = Conditions}]). + %% ------------------------------------------------------------------- %% clear(). %% ------------------------------------------------------------------- @@ -201,8 +301,32 @@ delete_all_in_mnesia(Group) -> -spec clear() -> ok. clear() -> - clear_in_mnesia(). + rabbit_khepri:handle_fallback( + #{mnesia => fun() -> clear_in_mnesia() end, + khepri => fun() -> clear_in_khepri() end + }). clear_in_mnesia() -> {atomic, ok} = mnesia:clear_table(?TABLE), ok. + +clear_in_khepri() -> + Path = khepri_mirrored_supervisor_path(), + case rabbit_khepri:delete(Path) of + ok -> ok; + Error -> throw(Error) + end. + +%% ------------------------------------------------------------------- +%% Khepri paths +%% ------------------------------------------------------------------- + +khepri_mirrored_supervisor_path() -> + [?MODULE, mirrored_supervisor_childspec]. + +khepri_mirrored_supervisor_path(Group, Id) + when is_atom(Id) orelse is_binary(Id) -> + [?MODULE, mirrored_supervisor_childspec, Group, Id]; +khepri_mirrored_supervisor_path(Group, Id) -> + IdPath = Group:id_to_khepri_path(Id), + [?MODULE, mirrored_supervisor_childspec, Group] ++ IdPath. diff --git a/deps/rabbit/src/rabbit_db_msup_m2k_converter.erl b/deps/rabbit/src/rabbit_db_msup_m2k_converter.erl new file mode 100644 index 000000000000..a1610716835c --- /dev/null +++ b/deps/rabbit/src/rabbit_db_msup_m2k_converter.erl @@ -0,0 +1,103 @@ +%% This Source Code Form is subject to the terms of the Mozilla Public +%% License, v. 2.0. If a copy of the MPL was not distributed with this +%% file, You can obtain one at https://mozilla.org/MPL/2.0/. +%% +%% Copyright (c) 2022-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. +%% + +-module(rabbit_db_msup_m2k_converter). + +-behaviour(mnesia_to_khepri_converter). + +-include_lib("kernel/include/logger.hrl"). +-include_lib("khepri_mnesia_migration/src/kmm_logging.hrl"). +-include_lib("rabbit_common/include/rabbit.hrl"). +-include("mirrored_supervisor.hrl"). + +-export([init_copy_to_khepri/3, + copy_to_khepri/3, + delete_from_khepri/3]). + +-record(?MODULE, {}). + +-spec init_copy_to_khepri(StoreId, MigrationId, Tables) -> Ret when + StoreId :: khepri:store_id(), + MigrationId :: mnesia_to_khepri:migration_id(), + Tables :: [mnesia_to_khepri:mnesia_table()], + Ret :: {ok, Priv}, + Priv :: #?MODULE{}. +%% @private + +init_copy_to_khepri(_StoreId, _MigrationId, Tables) -> + %% Clean up any previous attempt to copy the Mnesia table to Khepri. + lists:foreach(fun clear_data_in_khepri/1, Tables), + + SubState = #?MODULE{}, + {ok, SubState}. + +-spec copy_to_khepri(Table, Record, State) -> Ret when + Table :: mnesia_to_khepri:mnesia_table(), + Record :: tuple(), + State :: rabbit_db_m2k_converter:state(), + Ret :: {ok, NewState} | {error, Reason}, + NewState :: rabbit_db_m2k_converter:state(), + Reason :: any(). +%% @private + +copy_to_khepri(mirrored_sup_childspec = Table, + #mirrored_sup_childspec{key = {Group, Id} = Key} = Record, + State) -> + ?LOG_DEBUG( + "Mnesia->Khepri data copy: [~0p] key: ~0p", + [Table, Key], + #{domain => ?KMM_M2K_TABLE_COPY_LOG_DOMAIN}), + Path = rabbit_db_msup:khepri_mirrored_supervisor_path(Group, Id), + rabbit_db_m2k_converter:with_correlation_id( + fun(CorrId) -> + Extra = #{async => CorrId}, + ?LOG_DEBUG( + "Mnesia->Khepri data copy: [~0p] path: ~0p corr: ~0p", + [Table, Path, CorrId], + #{domain => ?KMM_M2K_TABLE_COPY_LOG_DOMAIN}), + rabbit_khepri:put(Path, Record, Extra) + end, State); +copy_to_khepri(Table, Record, State) -> + ?LOG_DEBUG("Mnesia->Khepri unexpected record table ~0p record ~0p state ~0p", + [Table, Record, State]), + {error, unexpected_record}. + +-spec delete_from_khepri(Table, Key, State) -> Ret when + Table :: mnesia_to_khepri:mnesia_table(), + Key :: any(), + State :: rabbit_db_m2k_converter:state(), + Ret :: {ok, NewState} | {error, Reason}, + NewState :: rabbit_db_m2k_converter:state(), + Reason :: any(). +%% @private + +delete_from_khepri( + mirrored_sup_childspec = Table, {Group, Id} = Key, State) -> + ?LOG_DEBUG( + "Mnesia->Khepri data delete: [~0p] key: ~0p", + [Table, Key], + #{domain => ?KMM_M2K_TABLE_COPY_LOG_DOMAIN}), + Path = rabbit_db_msup:khepri_mirrored_supervisor_path(Group, Id), + rabbit_db_m2k_converter:with_correlation_id( + fun(CorrId) -> + Extra = #{async => CorrId}, + ?LOG_DEBUG( + "Mnesia->Khepri data delete: [~0p] path: ~0p corr: ~0p", + [Table, Path, CorrId], + #{domain => ?KMM_M2K_TABLE_COPY_LOG_DOMAIN}), + rabbit_khepri:delete(Path, Extra) + end, State). + +-spec clear_data_in_khepri(Table) -> ok when + Table :: atom(). + +clear_data_in_khepri(mirrored_sup_childspec) -> + Path = rabbit_db_msup:khepri_mirrored_supervisor_path(), + case rabbit_khepri:delete(Path) of + ok -> ok; + Error -> throw(Error) + end. diff --git a/deps/rabbit/src/rabbit_db_policy.erl b/deps/rabbit/src/rabbit_db_policy.erl index e0ecf83ee09d..91cce630e6f4 100644 --- a/deps/rabbit/src/rabbit_db_policy.erl +++ b/deps/rabbit/src/rabbit_db_policy.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_db_policy). @@ -27,7 +27,10 @@ Ret :: {[{Exchange, Exchange}], [{Queue, Queue}]}. update(VHost, GetUpdatedExchangeFun, GetUpdatedQueueFun) -> - update_in_mnesia(VHost, GetUpdatedExchangeFun, GetUpdatedQueueFun). + rabbit_khepri:handle_fallback( + #{mnesia => fun() -> update_in_mnesia(VHost, GetUpdatedExchangeFun, GetUpdatedQueueFun) end, + khepri => fun() -> update_in_khepri(VHost, GetUpdatedExchangeFun, GetUpdatedQueueFun) end + }). %% [1] We need to prevent this from becoming O(n^2) in a similar %% manner to rabbit_binding:remove_for_{source,destination}. So see @@ -48,6 +51,19 @@ update_in_mnesia(VHost, GetUpdatedExchangeFun, GetUpdatedQueueFun) -> || Map <- Queues, is_map(Map)]} end). +update_in_khepri(VHost, GetUpdatedExchangeFun, GetUpdatedQueueFun) -> + Exchanges0 = rabbit_db_exchange:get_all(VHost), + Queues0 = rabbit_db_queue:get_all(VHost), + Exchanges = [GetUpdatedExchangeFun(X) || X <- Exchanges0], + Queues = [GetUpdatedQueueFun(Q) || Q <- Queues0], + rabbit_khepri:transaction( + fun() -> + {[update_exchange_policies(Map, fun rabbit_db_exchange:update_in_khepri_tx/2) + || Map <- Exchanges, is_map(Map)], + [update_queue_policies(Map, fun rabbit_db_queue:update_in_khepri_tx/2) + || Map <- Queues, is_map(Map)]} + end, rw). + update_exchange_policies(#{exchange := X = #exchange{name = XName}, update_function := UpdateFun}, StoreFun) -> NewExchange = StoreFun(XName, UpdateFun), diff --git a/deps/rabbit/src/rabbit_db_queue.erl b/deps/rabbit/src/rabbit_db_queue.erl index 7993e2b4e082..95f30342853a 100644 --- a/deps/rabbit/src/rabbit_db_queue.erl +++ b/deps/rabbit/src/rabbit_db_queue.erl @@ -2,13 +2,15 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_db_queue). --include_lib("rabbit_common/include/rabbit.hrl"). -include_lib("stdlib/include/qlc.hrl"). + +-include_lib("khepri/include/khepri.hrl"). +-include_lib("rabbit_common/include/rabbit.hrl"). -include("amqqueue.hrl"). -export([ @@ -23,13 +25,14 @@ count/1, create_or_get/1, set/1, - set_many/1, delete/2, update/2, - update_decorators/1, + update_decorators/2, exists/1 ]). +%% Once mnesia is removed, all transient entities will be deleted. These can be replaced +%% with the plain get_all* functions -export([ get_all_durable/0, get_all_durable_by_type/1, @@ -40,28 +43,46 @@ consistent_exists/1 ]). -%% Used by on_node_up and on_node_down +%% Used by on_node_up and on_node_down. +%% Can be deleted once transient entities/mnesia are removed. -export([foreach_transient/1, delete_transient/1]). -%% Used only by forget all durable +%% Only used by rabbit_amqqueue:forget_node_for_queue, which is only called +%% by `rabbit_mnesia:remove_node_if_mnesia_running`. Thus, once mnesia and/or +%% HA queues are removed it can be deleted. -export([foreach_durable/2, internal_delete/3]). +%% Storing it on Khepri is not needed, this function is just used in +%% rabbit_quorum_queue to ensure the queue is present in the rabbit_queue +%% table and not just in rabbit_durable_queue. Can be deleted with mnesia removal -export([set_dirty/1]). %% Used by other rabbit_db_* modules -export([ update_in_mnesia_tx/2, - get_durable_in_mnesia_tx/1 + update_in_khepri_tx/2, + get_durable_in_mnesia_tx/1, + get_in_khepri_tx/1 ]). %% For testing -export([clear/0]). +-export([ + khepri_queue_path/1, + khepri_queues_path/0 + ]). + +-dialyzer({nowarn_function, [foreach_transient/1, + foreach_transient_in_khepri/1]}). + -define(MNESIA_TABLE, rabbit_queue). -define(MNESIA_DURABLE_TABLE, rabbit_durable_queue). +-define(KHEPRI_PROJECTION, rabbit_khepri_queue). + %% ------------------------------------------------------------------- %% get_all(). %% ------------------------------------------------------------------- @@ -76,7 +97,10 @@ %% @private get_all() -> - get_all_in_mnesia(). + rabbit_khepri:handle_fallback( + #{mnesia => fun() -> get_all_in_mnesia() end, + khepri => fun() -> get_all_in_khepri() end + }). get_all_in_mnesia() -> list_with_possible_retry_in_mnesia( @@ -84,6 +108,17 @@ get_all_in_mnesia() -> rabbit_db:list_in_mnesia(?MNESIA_TABLE, amqqueue:pattern_match_all()) end). +get_all_in_khepri() -> + list_with_possible_retry_in_khepri( + fun() -> + try + ets:tab2list(?KHEPRI_PROJECTION) + catch + error:badarg -> + [] + end + end). + -spec get_all(VHostName) -> [Queue] when VHostName :: vhost:name(), Queue :: amqqueue:amqqueue(). @@ -95,7 +130,10 @@ get_all_in_mnesia() -> %% @private get_all(VHostName) -> - get_all_in_mnesia(VHostName). + rabbit_khepri:handle_fallback( + #{mnesia => fun() -> get_all_in_mnesia(VHostName) end, + khepri => fun() -> get_all_in_khepri(VHostName) end + }). get_all_in_mnesia(VHostName) -> list_with_possible_retry_in_mnesia( @@ -104,6 +142,18 @@ get_all_in_mnesia(VHostName) -> rabbit_db:list_in_mnesia(?MNESIA_TABLE, Pattern) end). +get_all_in_khepri(VHostName) -> + list_with_possible_retry_in_khepri( + fun() -> + try + Pattern = amqqueue:pattern_match_on_name(rabbit_misc:r(VHostName, queue)), + ets:match_object(?KHEPRI_PROJECTION, Pattern) + catch + error:badarg -> + [] + end + end). + %% ------------------------------------------------------------------- %% get_all_durable(). %% ------------------------------------------------------------------- @@ -118,7 +168,10 @@ get_all_in_mnesia(VHostName) -> %% @private get_all_durable() -> - get_all_durable_in_mnesia(). + rabbit_khepri:handle_fallback( + #{mnesia => fun() -> get_all_durable_in_mnesia() end, + khepri => fun() -> get_all_durable_in_khepri() end + }). get_all_durable_in_mnesia() -> list_with_possible_retry_in_mnesia( @@ -126,6 +179,18 @@ get_all_durable_in_mnesia() -> rabbit_db:list_in_mnesia(?MNESIA_DURABLE_TABLE, amqqueue:pattern_match_all()) end). +get_all_durable_in_khepri() -> + list_with_possible_retry_in_khepri( + fun() -> + try + Pattern = amqqueue:pattern_match_on_durable(true), + ets:match_object(?KHEPRI_PROJECTION, Pattern) + catch + error:badarg -> + [] + end + end). + -spec get_all_durable_by_type(Type) -> [Queue] when Type :: atom(), Queue :: amqqueue:amqqueue(). @@ -137,12 +202,24 @@ get_all_durable_in_mnesia() -> %% @private get_all_durable_by_type(Type) -> - get_all_durable_by_type_in_mnesia(Type). + rabbit_khepri:handle_fallback( + #{mnesia => fun() -> get_all_durable_by_type_in_mnesia(Type) end, + khepri => fun() -> get_all_durable_by_type_in_khepri(Type) end + }). get_all_durable_by_type_in_mnesia(Type) -> Pattern = amqqueue:pattern_match_on_type(Type), rabbit_db:list_in_mnesia(?MNESIA_DURABLE_TABLE, Pattern). +get_all_durable_by_type_in_khepri(Type) -> + try + Pattern = amqqueue:pattern_match_on_type_and_durable(Type, true), + ets:match_object(?KHEPRI_PROJECTION, Pattern) + catch + error:badarg -> + [] + end. + %% ------------------------------------------------------------------- %% filter_all_durable(). %% ------------------------------------------------------------------- @@ -158,7 +235,10 @@ get_all_durable_by_type_in_mnesia(Type) -> %% @private filter_all_durable(FilterFun) -> - filter_all_durable_in_mnesia(FilterFun). + rabbit_khepri:handle_fallback( + #{mnesia => fun() -> filter_all_durable_in_mnesia(FilterFun) end, + khepri => fun() -> filter_all_durable_in_khepri(FilterFun) end + }). filter_all_durable_in_mnesia(FilterFun) -> rabbit_mnesia:execute_mnesia_transaction( @@ -168,6 +248,21 @@ filter_all_durable_in_mnesia(FilterFun) -> ])) end). +filter_all_durable_in_khepri(FilterFun) -> + try + ets:foldl( + fun(Q, Acc0) -> + case amqqueue:is_durable(Q) andalso FilterFun(Q) of + true -> [Q | Acc0]; + false -> Acc0 + end + end, + [], ?KHEPRI_PROJECTION) + catch + error:badarg -> + [] + end. + %% ------------------------------------------------------------------- %% list(). %% ------------------------------------------------------------------- @@ -182,11 +277,23 @@ filter_all_durable_in_mnesia(FilterFun) -> %% @private list() -> - list_in_mnesia(). + rabbit_khepri:handle_fallback( + #{mnesia => fun() -> list_in_mnesia() end, + khepri => fun() -> list_in_khepri() end + }). list_in_mnesia() -> mnesia:dirty_all_keys(?MNESIA_TABLE). +list_in_khepri() -> + try + Pattern = amqqueue:pattern_match_on_name('$1'), + ets:select(?KHEPRI_PROJECTION, [{Pattern, [], ['$1']}]) + catch + error:badarg -> + [] + end. + %% ------------------------------------------------------------------- %% count(). %% ------------------------------------------------------------------- @@ -201,11 +308,23 @@ list_in_mnesia() -> %% @private count() -> - count_in_mnesia(). + rabbit_khepri:handle_fallback( + #{mnesia => fun() -> count_in_mnesia() end, + khepri => fun() -> count_in_khepri() end + }). count_in_mnesia() -> mnesia:table_info(?MNESIA_TABLE, size). +count_in_khepri() -> + case ets:info(?KHEPRI_PROJECTION, size) of + undefined -> + %% `ets:info/2` on a table that does not exist returns `undefined`. + 0; + Size -> + Size + end. + -spec count(VHostName) -> Count when VHostName :: vhost:name(), Count :: integer(). @@ -226,14 +345,14 @@ count(VHostName) -> end. list_for_count(VHostName) -> - list_for_count_in_mnesia(VHostName). + rabbit_khepri:handle_fallback( + #{mnesia => fun() -> list_for_count_in_mnesia(VHostName) end, + khepri => fun() -> list_for_count_in_khepri(VHostName) end + }). list_for_count_in_mnesia(VHostName) -> %% this is certainly suboptimal but there is no way to count - %% things using a secondary index in Mnesia. Our counter-table-per-node - %% won't work here because with master migration of mirrored queues - %% the "ownership" of queues by nodes becomes a non-trivial problem - %% that requires a proper consensus algorithm. + %% things using a secondary index in Mnesia. list_with_possible_retry_in_mnesia( fun() -> length(mnesia:dirty_index_read(?MNESIA_TABLE, @@ -241,6 +360,15 @@ list_for_count_in_mnesia(VHostName) -> amqqueue:field_vhost())) end). +list_for_count_in_khepri(VHostName) -> + try + Pattern = amqqueue:pattern_match_on_name(rabbit_misc:r(VHostName, queue)), + ets:select_count(?KHEPRI_PROJECTION, [{Pattern, [], [true]}]) + catch + error:badarg -> + 0 + end. + %% ------------------------------------------------------------------- %% delete(). %% ------------------------------------------------------------------- @@ -251,7 +379,10 @@ list_for_count_in_mnesia(VHostName) -> Ret :: ok | Deletions :: rabbit_binding:deletions(). delete(QueueName, Reason) -> - delete_in_mnesia(QueueName, Reason). + rabbit_khepri:handle_fallback( + #{mnesia => fun() -> delete_in_mnesia(QueueName, Reason) end, + khepri => fun() -> delete_in_khepri(QueueName) end + }). delete_in_mnesia(QueueName, Reason) -> rabbit_mnesia:execute_mnesia_transaction( @@ -269,6 +400,23 @@ delete_in_mnesia(QueueName, Reason) -> end end). +delete_in_khepri(QueueName) -> + delete_in_khepri(QueueName, false). + +delete_in_khepri(QueueName, OnlyDurable) -> + rabbit_khepri:transaction( + fun () -> + Path = khepri_queue_path(QueueName), + case khepri_tx_adv:delete(Path) of + {ok, #{data := _}} -> + %% we want to execute some things, as decided by rabbit_exchange, + %% after the transaction. + rabbit_db_binding:delete_for_destination_in_khepri(QueueName, OnlyDurable); + {ok, _} -> + ok + end + end, rw). + %% ------------------------------------------------------------------- %% internal_delete(). %% ------------------------------------------------------------------- @@ -283,7 +431,10 @@ internal_delete(QueueName, OnlyDurable, Reason) -> %% Only used by rabbit_amqqueue:forget_node_for_queue, which is only called %% by `rabbit_mnesia:remove_node_if_mnesia_running'. Thus, once mnesia and/or %% HA queues are removed it can be removed. - internal_delete_in_mnesia(QueueName, OnlyDurable, Reason). + rabbit_khepri:handle_fallback( + #{mnesia => fun() -> internal_delete_in_mnesia(QueueName, OnlyDurable, Reason) end, + khepri => fun() -> delete_in_khepri(QueueName, OnlyDurable) end + }). internal_delete_in_mnesia(QueueName, OnlyDurable, Reason) -> ok = mnesia:delete({?MNESIA_TABLE, QueueName}), @@ -309,20 +460,28 @@ internal_delete_in_mnesia(QueueName, OnlyDurable, Reason) -> -spec get_many(rabbit_exchange:route_return()) -> [amqqueue:amqqueue() | {amqqueue:amqqueue(), rabbit_exchange:route_infos()}]. get_many(Names) when is_list(Names) -> - get_many_in_mnesia(?MNESIA_TABLE, Names). + rabbit_khepri:handle_fallback( + #{mnesia => fun() -> get_many_in_ets(?MNESIA_TABLE, Names) end, + khepri => fun() -> get_many_in_khepri(Names) end + }). -get_many_in_mnesia(Table, [{Name, RouteInfos}]) +get_many_in_khepri(Names) -> + try + get_many_in_ets(?KHEPRI_PROJECTION, Names) + catch + error:badarg -> + [] + end. + +get_many_in_ets(Table, [{Name, RouteInfos}]) when is_map(RouteInfos) -> case ets:lookup(Table, Name) of [] -> []; [Q] -> [{Q, RouteInfos}] end; -get_many_in_mnesia(Table, [Name]) -> +get_many_in_ets(Table, [Name]) -> ets:lookup(Table, Name); -get_many_in_mnesia(Table, Names) - when is_list(Names) -> - %% Normally we'd call mnesia:dirty_read/1 here, but that is quite - %% expensive for reasons explained in rabbit_mnesia:dirty_read/1. +get_many_in_ets(Table, Names) when is_list(Names) -> lists:filtermap(fun({Name, RouteInfos}) when is_map(RouteInfos) -> case ets:lookup(Table, Name) of @@ -344,11 +503,23 @@ get_many_in_mnesia(Table, Names) QName :: rabbit_amqqueue:name(), Ret :: {ok, Queue :: amqqueue:amqqueue()} | {error, not_found}. get(Name) -> - get_in_mnesia(Name). + rabbit_khepri:handle_fallback( + #{mnesia => fun() -> get_in_mnesia(Name) end, + khepri => fun() -> get_in_khepri(Name) end + }). get_in_mnesia(Name) -> rabbit_mnesia:dirty_read({?MNESIA_TABLE, Name}). +get_in_khepri(Name) -> + try ets:lookup(?KHEPRI_PROJECTION, Name) of + [Q] -> {ok, Q}; + [] -> {error, not_found} + catch + error:badarg -> + {error, not_found} + end. + %% ------------------------------------------------------------------- %% get_durable(). %% ------------------------------------------------------------------- @@ -358,11 +529,25 @@ get_in_mnesia(Name) -> Ret :: {ok, Queue :: amqqueue:amqqueue()} | {error, not_found}. get_durable(Name) -> - get_durable_in_mnesia(Name). + rabbit_khepri:handle_fallback( + #{mnesia => fun() -> get_durable_in_mnesia(Name) end, + khepri => fun() -> get_durable_in_khepri(Name) end + }). get_durable_in_mnesia(Name) -> rabbit_mnesia:dirty_read({?MNESIA_DURABLE_TABLE, Name}). +get_durable_in_khepri(Name) -> + case get_in_khepri(Name) of + {ok, Queue} = Ret -> + case amqqueue:is_durable(Queue) of + true -> Ret; + false -> {error, not_found} + end; + Error -> + Error + end. + %% ------------------------------------------------------------------- %% get_many_durable(). %% ------------------------------------------------------------------- @@ -372,7 +557,22 @@ get_durable_in_mnesia(Name) -> Ret :: [Queue :: amqqueue:amqqueue()]. get_many_durable(Names) when is_list(Names) -> - get_many_in_mnesia(?MNESIA_DURABLE_TABLE, Names). + rabbit_khepri:handle_fallback( + #{mnesia => fun() -> get_many_durable_in_mnesia(Names) end, + khepri => fun() -> get_many_durable_in_khepri(Names) end + }). + +get_many_durable_in_mnesia(Names) -> + get_many_in_ets(?MNESIA_DURABLE_TABLE, Names). + +get_many_durable_in_khepri(Names) -> + try + Queues = get_many_in_ets(?KHEPRI_PROJECTION, Names), + [Q || Q <- Queues, amqqueue:is_durable(Q)] + catch + error:badarg -> + [] + end. %% ------------------------------------------------------------------- %% update(). @@ -382,13 +582,16 @@ get_many_durable(Names) when is_list(Names) -> QName :: rabbit_amqqueue:name(), Queue :: amqqueue:amqqueue(), UpdateFun :: fun((Queue) -> Queue), - Ret :: Queue | not_found. + Ret :: Queue | not_found. %% @doc Updates an existing queue record using `UpdateFun'. %% %% @private update(QName, Fun) -> - update_in_mnesia(QName, Fun). + rabbit_khepri:handle_fallback( + #{mnesia => fun() -> update_in_mnesia(QName, Fun) end, + khepri => fun() -> update_in_khepri(QName, Fun) end + }). update_in_mnesia(QName, Fun) -> rabbit_mnesia:execute_mnesia_transaction( @@ -396,29 +599,76 @@ update_in_mnesia(QName, Fun) -> update_in_mnesia_tx(QName, Fun) end). +update_in_khepri(QName, Fun) -> + Path = khepri_queue_path(QName), + Ret1 = rabbit_khepri:adv_get(Path), + case Ret1 of + {ok, #{data := Q, payload_version := Vsn}} -> + UpdatePath = khepri_path:combine_with_conditions( + Path, [#if_payload_version{version = Vsn}]), + Q1 = Fun(Q), + Ret2 = rabbit_khepri:put(UpdatePath, Q1), + case Ret2 of + ok -> Q1; + {error, {khepri, mismatching_node, _}} -> + update_in_khepri(QName, Fun); + Err -> Err + end; + _ -> + not_found + end. + %% ------------------------------------------------------------------- %% update_decorators(). %% ------------------------------------------------------------------- --spec update_decorators(QName) -> ok when - QName :: rabbit_amqqueue:name(). +-spec update_decorators(QName, [Decorator]) -> ok when + QName :: rabbit_amqqueue:name(), + Decorator :: atom(). %% @doc Updates an existing queue record adding the active queue decorators. %% %% @private -update_decorators(QName) -> - update_decorators_in_mnesia(QName). +update_decorators(QName, Decorators) -> + rabbit_khepri:handle_fallback( + #{mnesia => fun() -> update_decorators_in_mnesia(QName, Decorators) end, + khepri => fun() -> update_decorators_in_khepri(QName, Decorators) end + }). -update_decorators_in_mnesia(Name) -> +update_decorators_in_mnesia(Name, Decorators) -> rabbit_mnesia:execute_mnesia_transaction( fun() -> case mnesia:wread({?MNESIA_TABLE, Name}) of - [Q] -> ok = mnesia:write(?MNESIA_TABLE, rabbit_queue_decorator:set(Q), + [Q] -> ok = mnesia:write(?MNESIA_TABLE, amqqueue:set_decorators(Q, Decorators), write); [] -> ok end end). +update_decorators_in_khepri(QName, Decorators) -> + %% Decorators are stored on an ETS table, so we need to query them before the transaction. + %% Also, to verify which ones are active could lead to any kind of side-effects. + %% Thus it needs to be done outside of the transaction. + %% Decorators have just been calculated on `rabbit_queue_decorator:maybe_recover/1`, thus + %% we can update them here directly. + Path = khepri_queue_path(QName), + Ret1 = rabbit_khepri:adv_get(Path), + case Ret1 of + {ok, #{data := Q1, payload_version := Vsn}} -> + Q2 = amqqueue:set_decorators(Q1, Decorators), + UpdatePath = khepri_path:combine_with_conditions( + Path, [#if_payload_version{version = Vsn}]), + Ret2 = rabbit_khepri:put(UpdatePath, Q2), + case Ret2 of + ok -> ok; + {error, {khepri, mismatching_node, _}} -> + update_decorators_in_khepri(QName, Decorators); + {error, _} = Error -> Error + end; + _ -> + ok + end. + %% ------------------------------------------------------------------- %% update_durable(). %% ------------------------------------------------------------------- @@ -432,7 +682,12 @@ update_decorators_in_mnesia(Name) -> %% @private update_durable(UpdateFun, FilterFun) -> - update_durable_in_mnesia(UpdateFun, FilterFun). + rabbit_khepri:handle_fallback( + #{mnesia => + fun() -> update_durable_in_mnesia(UpdateFun, FilterFun) end, + khepri => + fun() -> update_durable_in_khepri(UpdateFun, FilterFun) end + }). update_durable_in_mnesia(UpdateFun, FilterFun) -> Pattern = amqqueue:pattern_match_all(), @@ -446,6 +701,53 @@ update_durable_in_mnesia(UpdateFun, FilterFun) -> end), ok. +update_durable_in_khepri(UpdateFun, FilterFun) -> + PathPattern = khepri_queues_path() ++ + [?KHEPRI_WILDCARD_STAR, + #if_data_matches{ + pattern = amqqueue:pattern_match_on_durable(true)}], + %% The `FilterFun' or `UpdateFun' might attempt to do something + %% incompatible with Khepri transactions (such as dynamic apply, sending + %% a message, etc.), so this function cannot be written as a regular + %% transaction. Instead we can get all queues and track their versions, + %% update them, then apply the updates in a transaction, failing if any + %% queue has changed since reading the queue record. + case rabbit_khepri:adv_get_many(PathPattern) of + {ok, Props} -> + Updates = maps:fold( + fun(Path0, #{data := Q0, payload_version := Vsn}, Acc) + when ?is_amqqueue(Q0) -> + case FilterFun(Q0) of + true -> + Path = khepri_path:combine_with_conditions( + Path0, + [#if_payload_version{version = Vsn}]), + Q = UpdateFun(Q0), + [{Path, Q} | Acc]; + false -> + Acc + end + end, [], Props), + Res = rabbit_khepri:transaction( + fun() -> + rabbit_misc:for_each_while_ok( + fun({Path, Q}) -> khepri_tx:put(Path, Q) end, + Updates) + end), + case Res of + ok -> + ok; + {error, {khepri, mismatching_node, _}} -> + %% One of the queues changed while attempting to update + %% all queues. Retry the operation. + update_durable_in_khepri(UpdateFun, FilterFun); + {error, _} = Error -> + Error + end; + {error, _} = Error -> + Error + end. + %% ------------------------------------------------------------------- %% exists(). %% ------------------------------------------------------------------- @@ -460,11 +762,22 @@ update_durable_in_mnesia(UpdateFun, FilterFun) -> %% @private exists(QName) -> - exists_in_mnesia(QName). + rabbit_khepri:handle_fallback( + #{mnesia => fun() -> exists_in_mnesia(QName) end, + khepri => fun() -> exists_in_khepri(QName) end + }). exists_in_mnesia(QName) -> ets:member(?MNESIA_TABLE, QName). +exists_in_khepri(QName) -> + try + ets:member(?KHEPRI_PROJECTION, QName) + catch + error:badarg -> + false + end. + %% ------------------------------------------------------------------- %% exists(). %% ------------------------------------------------------------------- @@ -481,7 +794,10 @@ exists_in_mnesia(QName) -> %% @private consistent_exists(QName) -> - consistent_exists_in_mnesia(QName). + rabbit_khepri:handle_fallback( + #{mnesia => fun() -> consistent_exists_in_mnesia(QName) end, + khepri => fun() -> exists_in_khepri(QName) end + }). consistent_exists_in_mnesia(QName) -> case mnesia:read({?MNESIA_TABLE, QName}) of @@ -505,11 +821,17 @@ consistent_exists_in_mnesia(QName) -> get_all_by_type(Type) -> Pattern = amqqueue:pattern_match_on_type(Type), - get_all_by_pattern_in_mnesia(Pattern). + rabbit_khepri:handle_fallback( + #{mnesia => fun() -> get_all_by_pattern_in_mnesia(Pattern) end, + khepri => fun() -> get_all_by_pattern_in_khepri(Pattern) end + }). get_all_by_pattern_in_mnesia(Pattern) -> rabbit_db:list_in_mnesia(?MNESIA_TABLE, Pattern). +get_all_by_pattern_in_khepri(Pattern) -> + rabbit_db:list_in_khepri(khepri_queues_path() ++ [rabbit_khepri:if_has_data([?KHEPRI_WILDCARD_STAR_STAR, #if_data_matches{pattern = Pattern}])]). + %% ------------------------------------------------------------------- %% get_all_by_type_and_node(). %% ------------------------------------------------------------------- @@ -527,7 +849,10 @@ get_all_by_pattern_in_mnesia(Pattern) -> %% @private get_all_by_type_and_node(VHostName, Type, Node) -> - get_all_by_type_and_node_in_mnesia(VHostName, Type, Node). + rabbit_khepri:handle_fallback( + #{mnesia => fun() -> get_all_by_type_and_node_in_mnesia(VHostName, Type, Node) end, + khepri => fun() -> get_all_by_type_and_node_in_khepri(VHostName, Type, Node) end + }). get_all_by_type_and_node_in_mnesia(VHostName, Type, Node) -> mnesia:async_dirty( @@ -538,25 +863,36 @@ get_all_by_type_and_node_in_mnesia(VHostName, Type, Node) -> amqqueue:qnode(Q) == Node])) end). +get_all_by_type_and_node_in_khepri(VHostName, Type, Node) -> + Pattern = amqqueue:pattern_match_on_type(Type), + Qs = rabbit_db:list_in_khepri(khepri_queues_path() ++ [VHostName, rabbit_khepri:if_has_data([?KHEPRI_WILDCARD_STAR_STAR, #if_data_matches{pattern = Pattern}])]), + [Q || Q <- Qs, amqqueue:qnode(Q) == Node]. + %% ------------------------------------------------------------------- %% create_or_get(). %% ------------------------------------------------------------------- -spec create_or_get(Queue) -> Ret when Queue :: amqqueue:amqqueue(), - Ret :: {created, Queue} | {existing, Queue} | {absent, Queue, nodedown}. + Ret :: {created, Queue} | + {existing, Queue} | + {absent, Queue, nodedown} | + rabbit_khepri:timeout_error(). %% @doc Writes a queue record if it doesn't exist already or returns the existing one %% %% @returns the existing record if there is one in the database already, or the newly -%% created record. +%% created record. %% %% @private create_or_get(Q) -> - create_or_get_in_mnesia(Q). + rabbit_khepri:handle_fallback( + #{mnesia => fun() -> create_or_get_in_mnesia(Q) end, + khepri => fun() -> create_or_get_in_khepri(Q) end + }). create_or_get_in_mnesia(Q) -> - DurableQ = amqqueue:reset_mirroring_and_decorators(Q), + DurableQ = amqqueue:reset_decorators(Q), QueueName = amqqueue:get_name(Q), rabbit_mnesia:execute_mnesia_transaction( fun () -> @@ -574,30 +910,46 @@ create_or_get_in_mnesia(Q) -> end end). +create_or_get_in_khepri(Q) -> + QueueName = amqqueue:get_name(Q), + Path = khepri_queue_path(QueueName), + case rabbit_khepri:adv_create(Path, Q) of + {error, {khepri, mismatching_node, #{node_props := #{data := ExistingQ}}}} -> + {existing, ExistingQ}; + {ok, _} -> + {created, Q}; + Error -> + Error + end. + %% ------------------------------------------------------------------- %% set(). %% ------------------------------------------------------------------- --spec set(Queue) -> ok when - Queue :: amqqueue:amqqueue(). +-spec set(Queue) -> Ret when + Queue :: amqqueue:amqqueue(), + Ret :: ok | rabbit_khepri:timeout_error(). %% @doc Writes a queue record. If the queue is durable, it writes both instances: -%% durable and transient. For the durable one, it resets mirrors and decorators. +%% durable and transient. For the durable one, it resets decorators. %% The transient one is left as it is. %% %% @private set(Q) -> - set_in_mnesia(Q). + rabbit_khepri:handle_fallback( + #{mnesia => fun() -> set_in_mnesia(Q) end, + khepri => fun() -> set_in_khepri(Q) end + }). set_in_mnesia(Q) -> - DurableQ = amqqueue:reset_mirroring_and_decorators(Q), + DurableQ = amqqueue:reset_decorators(Q), rabbit_mnesia:execute_mnesia_transaction( fun () -> set_in_mnesia_tx(DurableQ, Q) end). set_in_mnesia_tx(DurableQ, Q) -> - case ?amqqueue_is_durable(Q) of + case amqqueue:is_durable(Q) of true -> ok = mnesia:write(?MNESIA_DURABLE_TABLE, DurableQ, write); false -> @@ -605,30 +957,9 @@ set_in_mnesia_tx(DurableQ, Q) -> end, ok = mnesia:write(?MNESIA_TABLE, Q, write). -%% ------------------------------------------------------------------- -%% set_many(). -%% ------------------------------------------------------------------- - --spec set_many([Queue]) -> ok when - Queue :: amqqueue:amqqueue(). -%% @doc Writes a list of durable queue records. -%% It is responsibility of the calling function to ensure all records are durable. -%% Once transient entities are deprecated, this is a non-issue. -%% -%% @private - -set_many(Qs) -> - set_many_in_mnesia(Qs). - -set_many_in_mnesia(Qs) -> - {atomic, ok} = - %% Just to be nested in forget_node_for_queue - mnesia:transaction( - fun() -> - [ok = mnesia:write(?MNESIA_DURABLE_TABLE, Q, write) || Q <- Qs], - ok - end), - ok. +set_in_khepri(Q) -> + Path = khepri_queue_path(amqqueue:get_name(Q)), + rabbit_khepri:put(Path, Q). %% ------------------------------------------------------------------- %% delete_transient(). @@ -638,13 +969,17 @@ set_many_in_mnesia(Qs) -> Queue :: amqqueue:amqqueue(), FilterFun :: fun((Queue) -> boolean()), QName :: rabbit_amqqueue:name(), - Ret :: {[QName], [Deletions :: rabbit_binding:deletions()]}. + Ret :: {[QName], [Deletions :: rabbit_binding:deletions()]} + | rabbit_khepri:timeout_error(). %% @doc Deletes all transient queues that match `FilterFun'. %% %% @private delete_transient(FilterFun) -> - delete_transient_in_mnesia(FilterFun). + rabbit_khepri:handle_fallback( + #{mnesia => fun() -> delete_transient_in_mnesia(FilterFun) end, + khepri => fun() -> delete_transient_in_khepri(FilterFun) end + }). delete_transient_in_mnesia(FilterFun) -> Qs = rabbit_mnesia:execute_mnesia_transaction( @@ -685,6 +1020,74 @@ partition_queues([Q0,Q1,Q2,Q3,Q4,Q5,Q6,Q7,Q8,Q9 | T]) -> partition_queues(T) -> [T]. +delete_transient_in_khepri(FilterFun) -> + PathPattern = khepri_queues_path() ++ + [?KHEPRI_WILDCARD_STAR, + #if_data_matches{ + pattern = amqqueue:pattern_match_on_durable(false)}], + %% The `FilterFun' might try to determine if the queue's process is alive. + %% This can cause a `calling_self' exception if we use the `FilterFun' + %% within the function passed to `khepri:fold/5' since the Khepri server + %% process might call itself. Instead we can fetch all of the transient + %% queues with `get_many' and then filter and fold the results outside of + %% Khepri's Ra server process. + case rabbit_khepri:adv_get_many(PathPattern) of + {ok, Props} -> + Qs = maps:fold( + fun(Path0, #{data := Q, payload_version := Vsn}, Acc) + when ?is_amqqueue(Q) -> + case FilterFun(Q) of + true -> + Path = khepri_path:combine_with_conditions( + Path0, + [#if_payload_version{version = Vsn}]), + QName = amqqueue:get_name(Q), + [{Path, QName} | Acc]; + false -> + Acc + end + end, [], Props), + do_delete_transient_queues_in_khepri(Qs, FilterFun); + {error, _} = Error -> + Error + end. + +do_delete_transient_queues_in_khepri([], _FilterFun) -> + %% If there are no changes to make, avoid performing a transaction. When + %% Khepri is in a minority this avoids a long timeout waiting for the + %% transaction command to be processed. Otherwise it avoids appending a + %% somewhat large transaction command to Khepri's log. + {[], []}; +do_delete_transient_queues_in_khepri(Qs, FilterFun) -> + Res = rabbit_khepri:transaction( + fun() -> + rabbit_misc:fold_while_ok( + fun({Path, QName}, Acc) -> + %% Also see `delete_in_khepri/2'. + case khepri_tx_adv:delete(Path) of + {ok, #{data := _}} -> + Deletions = rabbit_db_binding:delete_for_destination_in_khepri( + QName, false), + {ok, [{QName, Deletions} | Acc]}; + {ok, _} -> + {ok, Acc}; + {error, _} = Error -> + Error + end + end, [], Qs) + end), + case Res of + {ok, Items} -> + {QNames, Deletions} = lists:unzip(Items), + {QNames, lists:flatten(Deletions)}; + {error, {khepri, mismatching_node, _}} -> + %% One of the queues changed while attempting to update all + %% queues. Retry the operation. + delete_transient_in_khepri(FilterFun); + {error, _} = Error -> + Error + end. + %% ------------------------------------------------------------------- %% foreach_transient(). %% ------------------------------------------------------------------- @@ -697,7 +1100,10 @@ partition_queues(T) -> %% @private foreach_transient(UpdateFun) -> - foreach_transient_in_mnesia(UpdateFun). + rabbit_khepri:handle_fallback( + #{mnesia => fun() -> foreach_transient_in_mnesia(UpdateFun) end, + khepri => fun() -> foreach_transient_in_khepri(UpdateFun) end + }). foreach_transient_in_mnesia(UpdateFun) -> Pattern = amqqueue:pattern_match_all(), @@ -708,6 +1114,27 @@ foreach_transient_in_mnesia(UpdateFun) -> ok end). +foreach_transient_in_khepri(UpdateFun) -> + PathPattern = khepri_queues_path() ++ + [?KHEPRI_WILDCARD_STAR, + #if_data_matches{ + pattern = amqqueue:pattern_match_on_durable(false)}], + %% The `UpdateFun' might try to determine if the queue's process is alive. + %% This can cause a `calling_self' exception if we use the `UpdateFun' + %% within the function passed to `khepri:fold/5' since the Khepri server + %% process might call itself. Instead we can fetch all of the transient + %% queues with `get_many' and then filter and fold the results outside of + %% Khepri's Ra server process. + case rabbit_khepri:get_many(PathPattern) of + {ok, Qs} -> + maps:foreach( + fun(_Path, Queue) when ?is_amqqueue(Queue) -> + UpdateFun(Queue) + end, Qs); + {error, _} = Error -> + Error + end. + %% ------------------------------------------------------------------- %% foreach_durable(). %% ------------------------------------------------------------------- @@ -720,7 +1147,10 @@ foreach_transient_in_mnesia(UpdateFun) -> %% @private foreach_durable(UpdateFun, FilterFun) -> - foreach_durable_in_mnesia(UpdateFun, FilterFun). + rabbit_khepri:handle_fallback( + #{mnesia => fun() -> foreach_durable_in_mnesia(UpdateFun, FilterFun) end, + khepri => fun() -> foreach_durable_in_khepri(UpdateFun, FilterFun) end + }). foreach_durable_in_mnesia(UpdateFun, FilterFun) -> %% Note rabbit is not running so we avoid e.g. the worker pool. Also why @@ -733,7 +1163,22 @@ foreach_durable_in_mnesia(UpdateFun, FilterFun) -> _ = [UpdateFun(Q) || Q <- Qs, FilterFun(Q)], ok end), - ok. + ok. + +foreach_durable_in_khepri(UpdateFun, FilterFun) -> + Path = khepri_queues_path() ++ + [?KHEPRI_WILDCARD_STAR, + #if_data_matches{ + pattern = amqqueue:pattern_match_on_durable(true)}], + case rabbit_khepri:filter(Path, fun(_, #{data := Q}) -> + FilterFun(Q) + end) of + {ok, Qs} -> + _ = [UpdateFun(Q) || Q <- maps:values(Qs)], + ok; + Error -> + Error + end. %% ------------------------------------------------------------------- %% set_dirty(). @@ -746,7 +1191,10 @@ foreach_durable_in_mnesia(UpdateFun, FilterFun) -> %% @private set_dirty(Q) -> - set_dirty_in_mnesia(Q). + rabbit_khepri:handle_fallback( + #{mnesia => fun() -> set_dirty_in_mnesia(Q) end, + khepri => ok + }). set_dirty_in_mnesia(Q) -> ok = mnesia:dirty_write(?MNESIA_TABLE, rabbit_queue_decorator:set(Q)). @@ -776,6 +1224,27 @@ update_in_mnesia_tx(Name, Fun) -> not_found end. +%% ------------------------------------------------------------------- +%% update_in_khepri_tx(). +%% ------------------------------------------------------------------- + +-spec update_in_khepri_tx(QName, UpdateFun) -> Ret when + QName :: rabbit_amqqueue:name(), + Queue :: amqqueue:amqqueue(), + UpdateFun :: fun((Queue) -> Queue), + Ret :: Queue | not_found. + +update_in_khepri_tx(Name, Fun) -> + Path = khepri_queue_path(Name), + case khepri_tx:get(Path) of + {ok, Q} -> + Q1 = Fun(Q), + ok = khepri_tx:put(Path, Q1), + Q1; + _ -> + not_found + end. + %% ------------------------------------------------------------------- %% get_durable_in_mnesia_tx(). %% ------------------------------------------------------------------- @@ -790,6 +1259,12 @@ get_durable_in_mnesia_tx(Name) -> [Q] -> {ok, Q} end. +get_in_khepri_tx(Name) -> + case khepri_tx:get(khepri_queue_path(Name)) of + {ok, X} -> [X]; + _ -> [] + end. + %% ------------------------------------------------------------------- %% clear(). %% ------------------------------------------------------------------- @@ -800,13 +1275,23 @@ get_durable_in_mnesia_tx(Name) -> %% @private clear() -> - clear_in_mnesia(). + rabbit_khepri:handle_fallback( + #{mnesia => fun() -> clear_in_mnesia() end, + khepri => fun() -> clear_in_khepri() end}). clear_in_mnesia() -> {atomic, ok} = mnesia:clear_table(?MNESIA_TABLE), {atomic, ok} = mnesia:clear_table(?MNESIA_DURABLE_TABLE), ok. +clear_in_khepri() -> + Path = khepri_queues_path(), + case rabbit_khepri:delete(Path) of + ok -> ok; + Error -> throw(Error) + end. + +%% -------------------------------------------------------------- %% Internal %% -------------------------------------------------------------- @@ -841,3 +1326,34 @@ list_with_possible_retry_in_mnesia(Fun) -> Ret -> Ret end. + +list_with_possible_retry_in_khepri(Fun) -> + %% See equivalent `list_with_possible_retry_in_mnesia` first. + %% Not sure how much of this is possible in Khepri, as there is no dirty read, + %% but the amqqueue record migration is still happening. + %% Let's retry just in case + AmqqueueRecordVersion = amqqueue:record_version_to_use(), + case Fun() of + [] -> + case khepri_tx:is_transaction() of + true -> + []; + false -> + case amqqueue:record_version_to_use() of + AmqqueueRecordVersion -> []; + _ -> Fun() + end + end; + Ret -> + Ret + end. + +%% -------------------------------------------------------------- +%% Khepri paths +%% -------------------------------------------------------------- + +khepri_queues_path() -> + [?MODULE, queues]. + +khepri_queue_path(#resource{virtual_host = VHost, name = Name}) -> + [?MODULE, queues, VHost, Name]. diff --git a/deps/rabbit/src/rabbit_db_queue_m2k_converter.erl b/deps/rabbit/src/rabbit_db_queue_m2k_converter.erl new file mode 100644 index 000000000000..fd9f88b0ee8f --- /dev/null +++ b/deps/rabbit/src/rabbit_db_queue_m2k_converter.erl @@ -0,0 +1,106 @@ +%% This Source Code Form is subject to the terms of the Mozilla Public +%% License, v. 2.0. If a copy of the MPL was not distributed with this +%% file, You can obtain one at https://mozilla.org/MPL/2.0/. +%% +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. +%% + +-module(rabbit_db_queue_m2k_converter). + +-behaviour(mnesia_to_khepri_converter). + +-include_lib("kernel/include/logger.hrl"). +-include_lib("khepri_mnesia_migration/src/kmm_logging.hrl"). +-include_lib("rabbit_common/include/rabbit.hrl"). +-include("amqqueue.hrl"). + +-export([init_copy_to_khepri/3, + copy_to_khepri/3, + delete_from_khepri/3]). + +-record(?MODULE, {}). + +-spec init_copy_to_khepri(StoreId, MigrationId, Tables) -> Ret when + StoreId :: khepri:store_id(), + MigrationId :: mnesia_to_khepri:migration_id(), + Tables :: [mnesia_to_khepri:mnesia_table()], + Ret :: {ok, Priv}, + Priv :: #?MODULE{}. +%% @private + +init_copy_to_khepri(_StoreId, _MigrationId, Tables) -> + %% Clean up any previous attempt to copy the Mnesia table to Khepri. + lists:foreach(fun clear_data_in_khepri/1, Tables), + + SubState = #?MODULE{}, + {ok, SubState}. + +-spec copy_to_khepri(Table, Record, State) -> Ret when + Table :: mnesia_to_khepri:mnesia_table(), + Record :: tuple(), + State :: rabbit_db_m2k_converter:state(), + Ret :: {ok, NewState} | {error, Reason}, + NewState :: rabbit_db_m2k_converter:state(), + Reason :: any(). +%% @private + +copy_to_khepri( + rabbit_queue = Table, Record, State) when ?is_amqqueue(Record) -> + Name = amqqueue:get_name(Record), + ?LOG_DEBUG( + "Mnesia->Khepri data copy: [~0p] key: ~0p", + [Table, Name], + #{domain => ?KMM_M2K_TABLE_COPY_LOG_DOMAIN}), + Path = rabbit_db_queue:khepri_queue_path(Name), + rabbit_db_m2k_converter:with_correlation_id( + fun(CorrId) -> + Extra = #{async => CorrId}, + ?LOG_DEBUG( + "Mnesia->Khepri data copy: [~0p] path: ~0p corr: ~0p", + [Table, Path, CorrId], + #{domain => ?KMM_M2K_TABLE_COPY_LOG_DOMAIN}), + rabbit_khepri:put(Path, Record, Extra) + end, State); +copy_to_khepri(Table, Record, State) -> + ?LOG_DEBUG("Mnesia->Khepri unexpected record table ~0p record ~0p state ~0p", + [Table, Record, State]), + {error, unexpected_record}. + +-spec delete_from_khepri(Table, Key, State) -> Ret when + Table :: mnesia_to_khepri:mnesia_table(), + Key :: any(), + State :: rabbit_db_m2k_converter:state(), + Ret :: {ok, NewState} | {error, Reason}, + NewState :: rabbit_db_m2k_converter:state(), + Reason :: any(). +%% @private + +delete_from_khepri(rabbit_queue = Table, Key, State) -> + ?LOG_DEBUG( + "Mnesia->Khepri data delete: [~0p] key: ~0p", + [Table, Key], + #{domain => ?KMM_M2K_TABLE_COPY_LOG_DOMAIN}), + Path = rabbit_db_queue:khepri_queue_path(Key), + rabbit_db_m2k_converter:with_correlation_id( + fun(CorrId) -> + Extra = #{async => CorrId}, + ?LOG_DEBUG( + "Mnesia->Khepri data delete: [~0p] path: ~0p corr: ~0p", + [Table, Path, CorrId], + #{domain => ?KMM_M2K_TABLE_COPY_LOG_DOMAIN}), + rabbit_khepri:delete(Path, Extra) + end, State). + +-spec clear_data_in_khepri(Table) -> ok when + Table :: atom(). + +clear_data_in_khepri(rabbit_queue) -> + khepri_delete(rabbit_db_queue:khepri_queues_path()); +clear_data_in_khepri(rabbit_durable_queue) -> + khepri_delete(rabbit_db_queue:khepri_queues_path()). + +khepri_delete(Path) -> + case rabbit_khepri:delete(Path) of + ok -> ok; + Error -> throw(Error) + end. diff --git a/deps/rabbit/src/rabbit_db_rtparams.erl b/deps/rabbit/src/rabbit_db_rtparams.erl index 850dc949a6b6..0f07bf82b483 100644 --- a/deps/rabbit/src/rabbit_db_rtparams.erl +++ b/deps/rabbit/src/rabbit_db_rtparams.erl @@ -2,20 +2,31 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_db_rtparams). +-include_lib("khepri/include/khepri.hrl"). -include_lib("rabbit_common/include/rabbit.hrl"). -export([set/2, set/4, get/1, - get_or_set/2, get_all/0, get_all/2, - delete/1, delete/3]). + delete/1, delete/3, + delete_vhost/1]). + +-export([khepri_vhost_rp_path/3, + khepri_global_rp_path/1, + khepri_rp_path/0 + ]). -define(MNESIA_TABLE, rabbit_runtime_parameters). +-define(KHEPRI_PROJECTION, rabbit_khepri_runtime_parameters). +-define(any(Value), case Value of + '_' -> ?KHEPRI_WILDCARD_STAR; + _ -> Value + end). %% ------------------------------------------------------------------- %% set(). @@ -33,12 +44,25 @@ %% @private set(Key, Term) when is_atom(Key) -> - set_in_mnesia(Key, Term). + rabbit_khepri:handle_fallback( + #{mnesia => fun() -> set_in_mnesia(Key, Term) end, + khepri => fun() -> set_in_khepri(Key, Term) end}). set_in_mnesia(Key, Term) -> rabbit_mnesia:execute_mnesia_transaction( fun() -> set_in_mnesia_tx(Key, Term) end). +set_in_khepri(Key, Term) -> + Path = khepri_rp_path(Key), + Record = #runtime_parameters{key = Key, + value = Term}, + case rabbit_khepri:adv_put(Path, Record) of + {ok, #{data := Params}} -> + {old, Params#runtime_parameters.value}; + {ok, _} -> + new + end. + -spec set(VHostName, Comp, Name, Term) -> Ret when VHostName :: vhost:name(), Comp :: binary(), @@ -58,7 +82,9 @@ set(VHostName, Comp, Name, Term) is_binary(Comp) andalso (is_binary(Name) orelse is_atom(Name)) -> Key = {VHostName, Comp, Name}, - set_in_mnesia(VHostName, Key, Term). + rabbit_khepri:handle_fallback( + #{mnesia => fun() -> set_in_mnesia(VHostName, Key, Term) end, + khepri => fun() -> set_in_khepri(VHostName, Key, Term) end}). set_in_mnesia(VHostName, Key, Term) -> rabbit_mnesia:execute_mnesia_transaction( @@ -76,6 +102,22 @@ set_in_mnesia_tx(Key, Term) -> mnesia:write(?MNESIA_TABLE, Record, write), Res. +set_in_khepri(VHostName, Key, Term) -> + rabbit_khepri:transaction( + rabbit_db_vhost:with_fun_in_khepri_tx( + VHostName, fun() -> set_in_khepri_tx(Key, Term) end), rw). + +set_in_khepri_tx(Key, Term) -> + Path = khepri_rp_path(Key), + Record = #runtime_parameters{key = Key, + value = Term}, + case khepri_tx_adv:put(Path, Record) of + {ok, #{data := Params}} -> + {old, Params#runtime_parameters.value}; + {ok, _} -> + new + end. + %% ------------------------------------------------------------------- %% get(). %% ------------------------------------------------------------------- @@ -94,9 +136,13 @@ get({VHostName, Comp, Name} = Key) when is_binary(VHostName) andalso is_binary(Comp) andalso (is_binary(Name) orelse is_atom(Name)) -> - get_in_mnesia(Key); + rabbit_khepri:handle_fallback( + #{mnesia => fun() -> get_in_mnesia(Key) end, + khepri => fun() -> get_in_khepri(Key) end}); get(Key) when is_atom(Key) -> - get_in_mnesia(Key). + rabbit_khepri:handle_fallback( + #{mnesia => fun() -> get_in_mnesia(Key) end, + khepri => fun() -> get_in_khepri(Key) end}). get_in_mnesia(Key) -> case mnesia:dirty_read(?MNESIA_TABLE, Key) of @@ -104,39 +150,13 @@ get_in_mnesia(Key) -> [Record] -> Record end. -%% ------------------------------------------------------------------- -%% get_or_set(). -%% ------------------------------------------------------------------- - --spec get_or_set(Key, Default) -> Ret when - Key :: atom() | {vhost:name(), binary(), binary()}, - Default :: any(), - Ret :: #runtime_parameters{}. -%% @doc Returns a runtime parameter or sets its value if it does not exist. -%% -%% @private - -get_or_set({VHostName, Comp, Name} = Key, Default) - when is_binary(VHostName) andalso - is_binary(Comp) andalso - (is_binary(Name) orelse is_atom(Name)) -> - get_or_set_in_mnesia(Key, Default); -get_or_set(Key, Default) -> - get_or_set_in_mnesia(Key, Default). - -get_or_set_in_mnesia(Key, Default) -> - rabbit_mnesia:execute_mnesia_transaction( - fun() -> get_or_set_in_mnesia_tx(Key, Default) end). - -get_or_set_in_mnesia_tx(Key, Default) -> - case mnesia:read(?MNESIA_TABLE, Key, read) of - [Record] -> - Record; - [] -> - Record = #runtime_parameters{key = Key, - value = Default}, - mnesia:write(?MNESIA_TABLE, Record, write), - Record +get_in_khepri(Key) -> + try ets:lookup(?KHEPRI_PROJECTION, Key) of + [] -> undefined; + [Record] -> Record + catch + error:badarg -> + undefined end. %% ------------------------------------------------------------------- @@ -152,11 +172,21 @@ get_or_set_in_mnesia_tx(Key, Default) -> %% @private get_all() -> - get_all_in_mnesia(). + rabbit_khepri:handle_fallback( + #{mnesia => fun() -> get_all_in_mnesia() end, + khepri => fun() -> get_all_in_khepri() end}). get_all_in_mnesia() -> rabbit_mnesia:dirty_read_all(?MNESIA_TABLE). +get_all_in_khepri() -> + try + ets:tab2list(?KHEPRI_PROJECTION) + catch + error:badarg -> + [] + end. + -spec get_all(VHostName, Comp) -> Ret when VHostName :: vhost:name() | '_', Comp :: binary() | '_', @@ -171,7 +201,9 @@ get_all_in_mnesia() -> get_all(VHostName, Comp) when (is_binary(VHostName) orelse VHostName =:= '_') andalso (is_binary(Comp) orelse Comp =:= '_') -> - get_all_in_mnesia(VHostName, Comp). + rabbit_khepri:handle_fallback( + #{mnesia => fun() -> get_all_in_mnesia(VHostName, Comp) end, + khepri => fun() -> get_all_in_khepri(VHostName, Comp) end}). get_all_in_mnesia(VHostName, Comp) -> mnesia:async_dirty( @@ -185,6 +217,20 @@ get_all_in_mnesia(VHostName, Comp) -> mnesia:match_object(?MNESIA_TABLE, Match, read) end). +get_all_in_khepri(VHostName, Comp) -> + case VHostName of + '_' -> ok; + _ -> rabbit_vhost:assert(VHostName) + end, + try + Match = #runtime_parameters{key = {VHostName, Comp, '_'}, + _ = '_'}, + ets:match_object(?KHEPRI_PROJECTION, Match) + catch + error:badarg -> + [] + end. + %% ------------------------------------------------------------------- %% delete(). %% ------------------------------------------------------------------- @@ -196,7 +242,9 @@ get_all_in_mnesia(VHostName, Comp) -> %% @private delete(Key) when is_atom(Key) -> - delete_in_mnesia(Key). + rabbit_khepri:handle_fallback( + #{mnesia => fun() -> delete_in_mnesia(Key) end, + khepri => fun() -> delete_in_khepri(Key) end}). -spec delete(VHostName, Comp, Name) -> ok when VHostName :: vhost:name() | '_', @@ -212,10 +260,16 @@ delete(VHostName, Comp, Name) is_binary(Comp) andalso (is_binary(Name) orelse (is_atom(Name) andalso Name =/= '_')) -> Key = {VHostName, Comp, Name}, - delete_in_mnesia(Key); + rabbit_khepri:handle_fallback( + #{mnesia => fun() -> delete_in_mnesia(Key) end, + khepri => fun() -> delete_in_khepri(Key) end}); delete(VHostName, Comp, Name) when VHostName =:= '_' orelse Comp =:= '_' orelse Name =:= '_' -> - delete_matching_in_mnesia(VHostName, Comp, Name). + rabbit_khepri:handle_fallback( + #{mnesia => + fun() -> delete_matching_in_mnesia(VHostName, Comp, Name) end, + khepri => + fun() -> delete_matching_in_khepri(VHostName, Comp, Name) end}). delete_in_mnesia(Key) -> rabbit_mnesia:execute_mnesia_transaction( @@ -235,3 +289,75 @@ delete_matching_in_mnesia_tx(VHostName, Comp, Name) -> || #runtime_parameters{key = Key} <- mnesia:match_object(?MNESIA_TABLE, Match, write)], ok. + +delete_in_khepri(Key) -> + Path = khepri_rp_path(Key), + ok = rabbit_khepri:delete(Path). + +delete_matching_in_khepri(VHostName, Comp, Name) -> + Key = {?any(VHostName), ?any(Comp), ?any(Name)}, + delete_in_khepri(Key). + +%% ------------------------------------------------------------------- +%% delete_vhost(). +%% ------------------------------------------------------------------- + +-spec delete_vhost(VHostName) -> Ret when + VHostName :: vhost:name(), + Ret :: {ok, Deletions} | {error, Reason :: any()}, + Deletions :: [#runtime_parameters{}]. +%% @doc Deletes all runtime parameters belonging to the given virtual host. +%% +%% @returns an OK tuple containing the deleted runtime parameters if +%% successful, or an error tuple otherwise. +%% +%% @private + +delete_vhost(VHostName) when is_binary(VHostName) -> + rabbit_khepri:handle_fallback( + #{mnesia => fun() -> delete_vhost_in_mnesia(VHostName) end, + khepri => fun() -> delete_vhost_in_khepri(VHostName) end}). + +delete_vhost_in_mnesia(VHostName) -> + rabbit_mnesia:execute_mnesia_transaction( + fun() -> + Deletions = delete_vhost_in_mnesia_tx(VHostName), + {ok, Deletions} + end). + +delete_vhost_in_mnesia_tx(VHostName) -> + Match = #runtime_parameters{key = {VHostName, '_', '_'}, + _ = '_'}, + [begin + mnesia:delete(?MNESIA_TABLE, Key, write), + Record + end + || #runtime_parameters{key = Key} = Record + <- mnesia:match_object(?MNESIA_TABLE, Match, read)]. + +delete_vhost_in_khepri(VHostName) -> + Path = khepri_vhost_rp_path( + VHostName, ?KHEPRI_WILDCARD_STAR, ?KHEPRI_WILDCARD_STAR), + case rabbit_khepri:adv_delete_many(Path) of + {ok, Props} -> + {ok, rabbit_khepri:collect_payloads(Props)}; + {error, _} = Err -> + Err + end. + +%% ------------------------------------------------------------------- + +khepri_rp_path() -> + [?MODULE]. + +khepri_rp_path({VHost, Component, Name}) -> + khepri_vhost_rp_path(VHost, Component, Name); +khepri_rp_path(Key) -> + khepri_global_rp_path(Key). + +khepri_global_rp_path(Key) -> + [?MODULE, global, Key]. + +khepri_vhost_rp_path(VHost, Component, Name) -> + [?MODULE, per_vhost, VHost, Component, Name]. + diff --git a/deps/rabbit/src/rabbit_db_rtparams_m2k_converter.erl b/deps/rabbit/src/rabbit_db_rtparams_m2k_converter.erl new file mode 100644 index 000000000000..fdc8fd9a20b9 --- /dev/null +++ b/deps/rabbit/src/rabbit_db_rtparams_m2k_converter.erl @@ -0,0 +1,106 @@ +%% This Source Code Form is subject to the terms of the Mozilla Public +%% License, v. 2.0. If a copy of the MPL was not distributed with this +%% file, You can obtain one at https://mozilla.org/MPL/2.0/. +%% +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. +%% + +-module(rabbit_db_rtparams_m2k_converter). + +-behaviour(mnesia_to_khepri_converter). + +-include_lib("kernel/include/logger.hrl"). +-include_lib("khepri_mnesia_migration/src/kmm_logging.hrl"). +-include_lib("rabbit_common/include/rabbit.hrl"). + +-export([init_copy_to_khepri/3, + copy_to_khepri/3, + delete_from_khepri/3]). + +-record(?MODULE, {}). + +-spec init_copy_to_khepri(StoreId, MigrationId, Tables) -> Ret when + StoreId :: khepri:store_id(), + MigrationId :: mnesia_to_khepri:migration_id(), + Tables :: [mnesia_to_khepri:mnesia_table()], + Ret :: {ok, Priv}, + Priv :: #?MODULE{}. +%% @private + +init_copy_to_khepri(_StoreId, _MigrationId, Tables) -> + %% Clean up any previous attempt to copy the Mnesia table to Khepri. + lists:foreach(fun clear_data_in_khepri/1, Tables), + + SubState = #?MODULE{}, + {ok, SubState}. + +-spec copy_to_khepri(Table, Record, State) -> Ret when + Table :: mnesia_to_khepri:mnesia_table(), + Record :: tuple(), + State :: rabbit_db_m2k_converter:state(), + Ret :: {ok, NewState} | {error, Reason}, + NewState :: rabbit_db_m2k_converter:state(), + Reason :: any(). +%% @private + +copy_to_khepri( + rabbit_runtime_parameters = Table, #runtime_parameters{key = Key} = Record, + State) -> + ?LOG_DEBUG( + "Mnesia->Khepri data copy: [~0p] key: ~0p", + [Table, Key], + #{domain => ?KMM_M2K_TABLE_COPY_LOG_DOMAIN}), + Path = rtparams_path(Key), + rabbit_db_m2k_converter:with_correlation_id( + fun(CorrId) -> + Extra = #{async => CorrId}, + ?LOG_DEBUG( + "Mnesia->Khepri data copy: [~0p] path: ~0p corr: ~0p", + [Table, Path, CorrId], + #{domain => ?KMM_M2K_TABLE_COPY_LOG_DOMAIN}), + rabbit_khepri:put(Path, Record, Extra) + end, State); +copy_to_khepri(Table, Record, State) -> + ?LOG_DEBUG("Mnesia->Khepri unexpected record table ~0p record ~0p state ~0p", + [Table, Record, State]), + {error, unexpected_record}. + +-spec delete_from_khepri(Table, Key, State) -> Ret when + Table :: mnesia_to_khepri:mnesia_table(), + Key :: any(), + State :: rabbit_db_m2k_converter:state(), + Ret :: {ok, NewState} | {error, Reason}, + NewState :: rabbit_db_m2k_converter:state(), + Reason :: any(). +%% @private + +delete_from_khepri(rabbit_runtime_parameters = Table, Key, State) -> + ?LOG_DEBUG( + "Mnesia->Khepri data delete: [~0p] key: ~0p", + [Table, Key], + #{domain => ?KMM_M2K_TABLE_COPY_LOG_DOMAIN}), + Path = rtparams_path(Key), + rabbit_db_m2k_converter:with_correlation_id( + fun(CorrId) -> + Extra = #{async => CorrId}, + ?LOG_DEBUG( + "Mnesia->Khepri data delete: [~0p] path: ~0p corr: ~0p", + [Table, Path, CorrId], + #{domain => ?KMM_M2K_TABLE_COPY_LOG_DOMAIN}), + rabbit_khepri:delete(Path, Extra) + end, State). + +rtparams_path({VHost, Comp, Name})-> + rabbit_db_rtparams:khepri_vhost_rp_path(VHost, Comp, Name); +rtparams_path(Key) -> + rabbit_db_rtparams:khepri_global_rp_path(Key). + +-spec clear_data_in_khepri(Table) -> ok when + Table :: atom(). + +clear_data_in_khepri(rabbit_runtime_parameters) -> + Path = rabbit_db_rtparams:khepri_rp_path(), + case rabbit_khepri:delete(Path) of + ok -> ok; + Error -> throw(Error) + end. diff --git a/deps/rabbit/src/rabbit_db_topic_exchange.erl b/deps/rabbit/src/rabbit_db_topic_exchange.erl index 6d77d72afbdb..6d9affd55598 100644 --- a/deps/rabbit/src/rabbit_db_topic_exchange.erl +++ b/deps/rabbit/src/rabbit_db_topic_exchange.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_db_topic_exchange). @@ -11,16 +11,28 @@ -export([set/1, delete_all_for_exchange/1, delete/1, match/3]). +%% These functions are used to process mnesia deletion events generated during the +%% migration from mnesia to khepri +-export([ + split_topic_key/1, + split_topic_key_binary/1, + trie_binding_to_key/1, + trie_records_to_key/1 + ]). + %% For testing -export([clear/0]). -define(MNESIA_NODE_TABLE, rabbit_topic_trie_node). -define(MNESIA_EDGE_TABLE, rabbit_topic_trie_edge). -define(MNESIA_BINDING_TABLE, rabbit_topic_trie_binding). +-define(KHEPRI_PROJECTION, rabbit_khepri_topic_trie). -type match_result() :: [rabbit_types:binding_destination() | {rabbit_amqqueue:name(), rabbit_types:binding_key()}]. +-define(COMPILED_TOPIC_SPLIT_PATTERN, dot_binary_pattern). + %% ------------------------------------------------------------------- %% set(). %% ------------------------------------------------------------------- @@ -32,7 +44,22 @@ %% @private set(#binding{source = XName, key = BindingKey, destination = Destination, args = Args}) -> - set_in_mnesia(XName, BindingKey, Destination, Args). + rabbit_khepri:handle_fallback( + #{ + mnesia => fun() -> set_in_mnesia(XName, BindingKey, Destination, Args) end, + khepri => fun() -> set_in_khepri(XName, BindingKey, Destination, Args) end + }). + +set_in_mnesia(XName, BindingKey, Destination, Args) -> + rabbit_mnesia:execute_mnesia_transaction( + fun() -> + FinalNode = follow_down_create(XName, split_topic_key(BindingKey)), + trie_add_binding(XName, FinalNode, Destination, Args), + ok + end). + +set_in_khepri(_XName, _RoutingKey, _Destination, _Args) -> + ok. %% ------------------------------------------------------------------- %% delete_all_for_exchange(). @@ -45,7 +72,23 @@ set(#binding{source = XName, key = BindingKey, destination = Destination, args = %% @private delete_all_for_exchange(XName) -> - delete_all_for_exchange_in_mnesia(XName). + rabbit_khepri:handle_fallback( + #{ + mnesia => fun() -> delete_all_for_exchange_in_mnesia(XName) end, + khepri => fun() -> delete_all_for_exchange_in_khepri(XName) end + }). + +delete_all_for_exchange_in_mnesia(XName) -> + rabbit_mnesia:execute_mnesia_transaction( + fun() -> + trie_remove_all_nodes(XName), + trie_remove_all_edges(XName), + trie_remove_all_bindings(XName), + ok + end). + +delete_all_for_exchange_in_khepri(_XName) -> + ok. %% ------------------------------------------------------------------- %% delete(). @@ -58,7 +101,18 @@ delete_all_for_exchange(XName) -> %% @private delete(Bs) when is_list(Bs) -> - delete_in_mnesia(Bs). + rabbit_khepri:handle_fallback( + #{ + mnesia => fun() -> delete_in_mnesia(Bs) end, + khepri => fun() -> delete_in_khepri(Bs) end + }). + +delete_in_mnesia(Bs) -> + rabbit_mnesia:execute_mnesia_transaction( + fun() -> delete_in_mnesia_tx(Bs) end). + +delete_in_khepri(_Bs) -> + ok. %% ------------------------------------------------------------------- %% match(). @@ -76,7 +130,25 @@ delete(Bs) when is_list(Bs) -> match(XName, RoutingKey, Opts) -> BKeys = maps:get(return_binding_keys, Opts, false), - match_in_mnesia(XName, RoutingKey, BKeys). + rabbit_khepri:handle_fallback( + #{ + mnesia => + fun() -> + match_in_mnesia(XName, RoutingKey, BKeys) + end, + khepri => + fun() -> + match_in_khepri(XName, RoutingKey, BKeys) + end + }). + +match_in_mnesia(XName, RoutingKey, BKeys) -> + Words = split_topic_key(RoutingKey), + mnesia:async_dirty(fun trie_match/3, [XName, Words, BKeys]). + +match_in_khepri(XName, RoutingKey, BKeys) -> + Words = split_topic_key_binary(RoutingKey), + trie_match_in_khepri(XName, Words, BKeys). %% ------------------------------------------------------------------- %% clear(). @@ -88,7 +160,10 @@ match(XName, RoutingKey, Opts) -> %% @private clear() -> - clear_in_mnesia(). + rabbit_khepri:handle_fallback( + #{mnesia => fun() -> clear_in_mnesia() end, + khepri => fun() -> clear_in_khepri() end + }). clear_in_mnesia() -> {atomic, ok} = mnesia:clear_table(?MNESIA_NODE_TABLE), @@ -96,32 +171,94 @@ clear_in_mnesia() -> {atomic, ok} = mnesia:clear_table(?MNESIA_BINDING_TABLE), ok. -%% Internal +clear_in_khepri() -> + ok. + %% -------------------------------------------------------------- +%% split_topic_key(). +%% -------------------------------------------------------------- + +-spec split_topic_key(RoutingKey) -> Words when + RoutingKey :: binary(), + Words :: [[byte()]]. split_topic_key(Key) -> split_topic_key(Key, [], []). -set_in_mnesia(XName, BindingKey, Destination, Args) -> - rabbit_mnesia:execute_mnesia_transaction( - fun() -> - FinalNode = follow_down_create(XName, split_topic_key(BindingKey)), - trie_add_binding(XName, FinalNode, Destination, Args), - ok - end). +split_topic_key(<<>>, [], []) -> + []; +split_topic_key(<<>>, RevWordAcc, RevResAcc) -> + lists:reverse([lists:reverse(RevWordAcc) | RevResAcc]); +split_topic_key(<<$., Rest/binary>>, RevWordAcc, RevResAcc) -> + split_topic_key(Rest, [], [lists:reverse(RevWordAcc) | RevResAcc]); +split_topic_key(<>, RevWordAcc, RevResAcc) -> + split_topic_key(Rest, [C | RevWordAcc], RevResAcc). -delete_all_for_exchange_in_mnesia(XName) -> +%% -------------------------------------------------------------- +%% split_topic_key_binary(). +%% -------------------------------------------------------------- + +-spec split_topic_key_binary(RoutingKey) -> Words when + RoutingKey :: binary(), + Words :: [binary()]. + +split_topic_key_binary(<<>>) -> + []; +split_topic_key_binary(RoutingKey) -> + Pattern = + case persistent_term:get(?COMPILED_TOPIC_SPLIT_PATTERN, undefined) of + undefined -> + P = binary:compile_pattern(<<".">>), + persistent_term:put(?COMPILED_TOPIC_SPLIT_PATTERN, P), + P; + P -> + P + end, + binary:split(RoutingKey, Pattern, [global]). + +%% -------------------------------------------------------------- +%% trie_binding_to_key(). +%% -------------------------------------------------------------- + +-spec trie_binding_to_key(#topic_trie_binding{}) -> RoutingKey :: binary(). + +trie_binding_to_key(#topic_trie_binding{trie_binding = #trie_binding{node_id = NodeId}}) -> rabbit_mnesia:execute_mnesia_transaction( fun() -> - trie_remove_all_nodes(XName), - trie_remove_all_edges(XName), - trie_remove_all_bindings(XName), - ok + follow_up_get_path(mnesia, rabbit_topic_trie_edge, NodeId) end). -match_in_mnesia(XName, RoutingKey, BKeys) -> - Words = split_topic_key(RoutingKey), - mnesia:async_dirty(fun trie_match/3, [XName, Words, BKeys]). +%% -------------------------------------------------------------- +%% trie_records_to_key(). +%% -------------------------------------------------------------- + +-spec trie_records_to_key([#topic_trie_binding{}]) -> + [{#trie_binding{}, RoutingKey :: binary()}]. + +trie_records_to_key(Records) -> + Tab = ensure_topic_deletion_ets(), + TrieBindings = lists:foldl(fun(#topic_trie_binding{} = R, Acc) -> + [R | Acc]; + (#topic_trie_edge{} = R, Acc) -> + ets:insert(Tab, R), + Acc; + (_, Acc) -> + Acc + end, [], Records), + List = lists:foldl( + fun(#topic_trie_binding{trie_binding = #trie_binding{node_id = Node} = TB} = B, + Acc) -> + case follow_up_get_path(ets, Tab, Node) of + {error, not_found} -> [{TB, trie_binding_to_key(B)} | Acc]; + RK -> [{TB, RK} | Acc] + end + end, [], TrieBindings), + ets:delete(Tab), + List. + +%% -------------------------------------------------------------- +%% Internal +%% -------------------------------------------------------------- trie_remove_all_nodes(X) -> remove_all(?MNESIA_NODE_TABLE, @@ -166,18 +303,21 @@ delete_in_mnesia_tx(Bs) -> end || #binding{source = X, key = K, destination = D, args = Args} <- Bs], ok. -delete_in_mnesia(Bs) -> - rabbit_mnesia:execute_mnesia_transaction( - fun() -> delete_in_mnesia_tx(Bs) end). - -split_topic_key(<<>>, [], []) -> - []; -split_topic_key(<<>>, RevWordAcc, RevResAcc) -> - lists:reverse([lists:reverse(RevWordAcc) | RevResAcc]); -split_topic_key(<<$., Rest/binary>>, RevWordAcc, RevResAcc) -> - split_topic_key(Rest, [], [lists:reverse(RevWordAcc) | RevResAcc]); -split_topic_key(<>, RevWordAcc, RevResAcc) -> - split_topic_key(Rest, [C | RevWordAcc], RevResAcc). +follow_up_get_path(Mod, Tab, Node) -> + follow_up_get_path(Mod, Tab, Node, []). + +follow_up_get_path(_Mod, _Tab, root, Acc) -> + Acc; +follow_up_get_path(Mod, Tab, Node, Acc) -> + MatchHead = #topic_trie_edge{node_id = Node, + trie_edge = '$1'}, + case Mod:select(Tab, [{MatchHead, [], ['$1']}]) of + [#trie_edge{node_id = PreviousNode, + word = Word}] -> + follow_up_get_path(Mod, Tab, PreviousNode, [Word | Acc]); + [] -> + {error, not_found} + end. trie_match(X, Words, BKeys) -> trie_match(X, root, Words, BKeys, []). @@ -339,3 +479,79 @@ add_matched(DestinationsArgs, true, Acc) -> ({DestX, _BindingArgs}, L) -> [DestX | L] end, Acc, DestinationsArgs). + +ensure_topic_deletion_ets() -> + Tab = rabbit_db_topic_exchange_delete_table, + case ets:whereis(Tab) of + undefined -> + ets:new(Tab, [public, named_table, {keypos, #topic_trie_edge.trie_edge}]); + Tid -> + Tid + end. + +%% Khepri topic graph + +trie_match_in_khepri(X, Words, BKeys) -> + try + trie_match_in_khepri(X, root, Words, BKeys, []) + catch + error:badarg -> + [] + end. + +trie_match_in_khepri(X, Node, [], BKeys, ResAcc0) -> + Destinations = trie_bindings_in_khepri(X, Node, BKeys), + ResAcc = add_matched(Destinations, BKeys, ResAcc0), + trie_match_part_in_khepri( + X, Node, <<"#">>, + fun trie_match_skip_any_in_khepri/5, [], BKeys, ResAcc); +trie_match_in_khepri(X, Node, [W | RestW] = Words, BKeys, ResAcc) -> + lists:foldl(fun ({WArg, MatchFun, RestWArg}, Acc) -> + trie_match_part_in_khepri( + X, Node, WArg, MatchFun, RestWArg, BKeys, Acc) + end, ResAcc, [{W, fun trie_match_in_khepri/5, RestW}, + {<<"*">>, fun trie_match_in_khepri/5, RestW}, + {<<"#">>, + fun trie_match_skip_any_in_khepri/5, Words}]). + +trie_match_part_in_khepri(X, Node, Search, MatchFun, RestW, BKeys, ResAcc) -> + case trie_child_in_khepri(X, Node, Search) of + {ok, NextNode} -> MatchFun(X, NextNode, RestW, BKeys, ResAcc); + error -> ResAcc + end. + +trie_match_skip_any_in_khepri(X, Node, [], BKeys, ResAcc) -> + trie_match_in_khepri(X, Node, [], BKeys, ResAcc); +trie_match_skip_any_in_khepri(X, Node, [_ | RestW] = Words, BKeys, ResAcc) -> + trie_match_skip_any_in_khepri( + X, Node, RestW, BKeys, + trie_match_in_khepri(X, Node, Words, BKeys, ResAcc)). + +trie_child_in_khepri(X, Node, Word) -> + case ets:lookup( + ?KHEPRI_PROJECTION, + #trie_edge{exchange_name = X, + node_id = Node, + word = Word}) of + [#topic_trie_edge{node_id = NextNode}] -> {ok, NextNode}; + [] -> error + end. + +trie_bindings_in_khepri(X, Node, BKeys) -> + case ets:lookup( + ?KHEPRI_PROJECTION, + #trie_edge{exchange_name = X, + node_id = Node, + word = bindings}) of + [#topic_trie_edge{node_id = {bindings, Bindings}}] -> + [case BKeys of + true -> + {Dest, Args}; + false -> + Dest + end || #binding{destination = Dest, + args = Args} <- sets:to_list(Bindings)]; + [] -> + [] + end. + diff --git a/deps/rabbit/src/rabbit_db_user.erl b/deps/rabbit/src/rabbit_db_user.erl index 84e85d64c541..fb00b01a5daa 100644 --- a/deps/rabbit/src/rabbit_db_user.erl +++ b/deps/rabbit/src/rabbit_db_user.erl @@ -2,13 +2,14 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_db_user). -include_lib("stdlib/include/assert.hrl"). +-include_lib("khepri/include/khepri.hrl"). -include_lib("rabbit_common/include/rabbit.hrl"). -export([create/1, @@ -16,6 +17,7 @@ get/1, get_all/0, with_fun_in_mnesia_tx/2, + with_fun_in_khepri_tx/2, get_user_permissions/2, check_and_match_user_permissions/2, set_user_permissions/1, @@ -26,13 +28,52 @@ set_topic_permissions/1, clear_topic_permissions/3, clear_matching_topic_permissions/3, - delete/1]). + delete/1, + clear_all_permissions_for_vhost/1]). +-export([khepri_users_path/0, + khepri_user_path/1, + khepri_user_permission_path/2, + khepri_topic_permission_path/3]). + +%% for testing -export([clear/0]). +-ifdef(TEST). +-export([get_in_mnesia/1, + get_in_khepri/1, + create_in_mnesia/2, + create_in_khepri/2, + get_all_in_mnesia/0, + get_all_in_khepri/0, + update_in_mnesia/2, + update_in_khepri/2, + delete_in_mnesia/1, + delete_in_khepri/1, + get_user_permissions_in_mnesia/2, + get_user_permissions_in_khepri/2, + set_user_permissions_in_mnesia/3, + set_user_permissions_in_khepri/3, + set_topic_permissions_in_mnesia/3, + set_topic_permissions_in_khepri/3, + match_user_permissions_in_mnesia/2, + match_user_permissions_in_khepri/2, + clear_user_permissions_in_mnesia/2, + clear_user_permissions_in_khepri/2, + get_topic_permissions_in_mnesia/3, + get_topic_permissions_in_khepri/3, + match_topic_permissions_in_mnesia/3, + match_topic_permissions_in_khepri/3, + clear_topic_permissions_in_mnesia/3, + clear_topic_permissions_in_khepri/3 + ]). +-endif. + -define(MNESIA_TABLE, rabbit_user). -define(PERM_MNESIA_TABLE, rabbit_user_permission). -define(TOPIC_PERM_MNESIA_TABLE, rabbit_topic_permission). +-define(KHEPRI_USERS_PROJECTION, rabbit_khepri_users). +-define(KHEPRI_PERMISSIONS_PROJECTION, rabbit_khepri_user_permissions). %% ------------------------------------------------------------------- %% create(). @@ -49,7 +90,9 @@ create(User) -> Username = internal_user:get_username(User), - create_in_mnesia(Username, User). + rabbit_khepri:handle_fallback( + #{mnesia => fun() -> create_in_mnesia(Username, User) end, + khepri => fun() -> create_in_khepri(Username, User) end}). create_in_mnesia(Username, User) -> rabbit_mnesia:execute_mnesia_transaction( @@ -61,6 +104,17 @@ create_in_mnesia_tx(Username, User) -> _ -> mnesia:abort({user_already_exists, Username}) end. +create_in_khepri(Username, User) -> + Path = khepri_user_path(Username), + case rabbit_khepri:create(Path, User) of + ok -> + ok; + {error, {khepri, mismatching_node, _}} -> + throw({error, {user_already_exists, Username}}); + {error, _} = Error -> + throw(Error) + end. + %% ------------------------------------------------------------------- %% update(). %% ------------------------------------------------------------------- @@ -75,7 +129,9 @@ create_in_mnesia_tx(Username, User) -> update(Username, UpdateFun) when is_binary(Username) andalso is_function(UpdateFun, 1) -> - update_in_mnesia(Username, UpdateFun). + rabbit_khepri:handle_fallback( + #{mnesia => fun() -> update_in_mnesia(Username, UpdateFun) end, + khepri => fun() -> update_in_khepri(Username, UpdateFun) end}). update_in_mnesia(Username, UpdateFun) -> rabbit_mnesia:execute_mnesia_transaction( @@ -90,6 +146,21 @@ update_in_mnesia_tx(Username, UpdateFun) -> mnesia:abort({no_such_user, Username}) end. +update_in_khepri(Username, UpdateFun) -> + rabbit_khepri:transaction( + fun () -> + Path = khepri_user_path(Username), + case khepri_tx:get(Path) of + {ok, User} -> + case khepri_tx:put(Path, UpdateFun(User)) of + ok -> ok; + Error -> khepri_tx:abort(Error) + end; + _ -> + khepri_tx:abort({no_such_user, Username}) + end + end). + %% ------------------------------------------------------------------- %% get(). %% ------------------------------------------------------------------- @@ -97,7 +168,6 @@ update_in_mnesia_tx(Username, UpdateFun) -> -spec get(Username) -> User | undefined when Username :: internal_user:username(), User :: internal_user:internal_user(). - %% @doc Returns the record of the internal user named `Username'. %% %% @returns the internal user record or `undefined' if no internal user is named @@ -106,7 +176,9 @@ update_in_mnesia_tx(Username, UpdateFun) -> %% @private get(Username) when is_binary(Username) -> - get_in_mnesia(Username). + rabbit_khepri:handle_fallback( + #{mnesia => fun() -> get_in_mnesia(Username) end, + khepri => fun() -> get_in_khepri(Username) end}). get_in_mnesia(Username) -> case ets:lookup(?MNESIA_TABLE, Username) of @@ -114,6 +186,15 @@ get_in_mnesia(Username) -> [] -> undefined end. +get_in_khepri(Username) -> + try ets:lookup(?KHEPRI_USERS_PROJECTION, Username) of + [User] -> User; + _ -> undefined + catch + error:badarg -> + undefined + end. + %% ------------------------------------------------------------------- %% get_all(). %% ------------------------------------------------------------------- @@ -127,13 +208,22 @@ get_in_mnesia(Username) -> %% @private get_all() -> - get_all_in_mnesia(). + rabbit_khepri:handle_fallback( + #{mnesia => fun() -> get_all_in_mnesia() end, + khepri => fun() -> get_all_in_khepri() end}). get_all_in_mnesia() -> mnesia:dirty_match_object( ?MNESIA_TABLE, internal_user:pattern_match_all()). +get_all_in_khepri() -> + Path = khepri_users_path(), + case rabbit_khepri:list(Path) of + {ok, Users} -> maps:values(Users); + _ -> [] + end. + %% ------------------------------------------------------------------- %% with_fun_in_*(). %% ------------------------------------------------------------------- @@ -162,6 +252,16 @@ with_fun_in_mnesia_tx(Username, TxFun) end end. +with_fun_in_khepri_tx(Username, TxFun) + when is_binary(Username) andalso is_function(TxFun, 0) -> + fun() -> + Path = khepri_user_path(Username), + case khepri_tx:exists(Path) of + true -> TxFun(); + false -> khepri_tx:abort({no_such_user, Username}) + end + end. + %% ------------------------------------------------------------------- %% get_user_permissions(). %% ------------------------------------------------------------------- @@ -180,7 +280,11 @@ with_fun_in_mnesia_tx(Username, TxFun) get_user_permissions(Username, VHostName) when is_binary(Username) andalso is_binary(VHostName) -> - get_user_permissions_in_mnesia(Username, VHostName). + rabbit_khepri:handle_fallback( + #{mnesia => + fun() -> get_user_permissions_in_mnesia(Username, VHostName) end, + khepri => + fun() -> get_user_permissions_in_khepri(Username, VHostName) end}). get_user_permissions_in_mnesia(Username, VHostName) -> Key = #user_vhost{username = Username, @@ -190,6 +294,19 @@ get_user_permissions_in_mnesia(Username, VHostName) -> [] -> undefined end. +get_user_permissions_in_khepri(Username, VHostName) -> + UserVHost = #user_vhost{username = Username, + virtual_host = VHostName}, + try ets:lookup(?KHEPRI_PERMISSIONS_PROJECTION, UserVHost) of + [UserPermission] -> + UserPermission; + _ -> + undefined + catch + error:badarg -> + undefined + end. + %% ------------------------------------------------------------------- %% check_and_match_user_permissions(). %% ------------------------------------------------------------------- @@ -209,7 +326,11 @@ get_user_permissions_in_mnesia(Username, VHostName) -> check_and_match_user_permissions(Username, VHostName) when (is_binary(Username) orelse Username =:= '_') andalso (is_binary(VHostName) orelse VHostName =:= '_') -> - match_user_permissions_in_mnesia(Username, VHostName). + rabbit_khepri:handle_fallback( + #{mnesia => + fun() -> match_user_permissions_in_mnesia(Username, VHostName) end, + khepri => + fun() -> match_user_permissions_in_khepri(Username, VHostName) end}). match_user_permissions_in_mnesia('_' = Username, '_' = VHostName) -> rabbit_mnesia:execute_mnesia_transaction( @@ -249,6 +370,48 @@ match_user_permissions_in_mnesia_tx(Username, VHostName) -> permission = '_'}, read). +match_user_permissions_in_khepri('_' = _Username, '_' = _VHostName) -> + Path = khepri_user_permission_path(?KHEPRI_WILDCARD_STAR, ?KHEPRI_WILDCARD_STAR), + case rabbit_khepri:match(Path) of + {ok, Map} -> + maps:values(Map); + _ -> + [] + end; +match_user_permissions_in_khepri('_' = _Username, VHostName) -> + rabbit_khepri:transaction( + rabbit_db_vhost:with_fun_in_khepri_tx( + VHostName, + fun() -> + match_user_permissions_in_khepri_tx(?KHEPRI_WILDCARD_STAR, VHostName) + end), + ro); +match_user_permissions_in_khepri(Username, '_' = _VHostName) -> + rabbit_khepri:transaction( + with_fun_in_khepri_tx( + Username, + fun() -> + match_user_permissions_in_khepri_tx(Username, ?KHEPRI_WILDCARD_STAR) + end), + ro); +match_user_permissions_in_khepri(Username, VHostName) -> + rabbit_khepri:transaction( + with_fun_in_khepri_tx( + Username, + rabbit_db_vhost:with_fun_in_khepri_tx( + VHostName, + fun() -> + match_user_permissions_in_khepri_tx(Username, VHostName) + end)), + ro). + +match_user_permissions_in_khepri_tx(Username, VHostName) -> + Path = khepri_user_permission_path(Username, VHostName), + case khepri_tx:get_many(Path) of + {ok, Map} -> maps:values(Map); + _ -> [] + end. + %% ------------------------------------------------------------------- %% set_user_permissions(). %% ------------------------------------------------------------------- @@ -264,7 +427,17 @@ set_user_permissions( #user_permission{user_vhost = #user_vhost{username = Username, virtual_host = VHostName}} = UserPermission) -> - set_user_permissions_in_mnesia(Username, VHostName, UserPermission). + rabbit_khepri:handle_fallback( + #{mnesia => + fun() -> + set_user_permissions_in_mnesia( + Username, VHostName, UserPermission) + end, + khepri => + fun() -> + set_user_permissions_in_khepri( + Username, VHostName, UserPermission) + end}). set_user_permissions_in_mnesia(Username, VHostName, UserPermission) -> rabbit_mnesia:execute_mnesia_transaction( @@ -277,6 +450,32 @@ set_user_permissions_in_mnesia(Username, VHostName, UserPermission) -> set_user_permissions_in_mnesia_tx(UserPermission) -> mnesia:write(?PERM_MNESIA_TABLE, UserPermission, write). +set_user_permissions_in_khepri(Username, VHostName, UserPermission) -> + rabbit_khepri:transaction( + with_fun_in_khepri_tx( + Username, + rabbit_db_vhost:with_fun_in_khepri_tx( + VHostName, + fun() -> + set_user_permissions_in_khepri_tx(Username, VHostName, UserPermission) + end)), rw). + +set_user_permissions_in_khepri_tx(Username, VHostName, UserPermission) -> + Path = khepri_user_permission_path( + #if_all{conditions = + [Username, + #if_node_exists{exists = true}]}, + VHostName), + Extra = #{keep_while => + #{rabbit_db_vhost:khepri_vhost_path(VHostName) => + #if_node_exists{exists = true}}}, + Ret = khepri_tx:put( + Path, UserPermission, Extra), + case Ret of + ok -> ok; + Error -> khepri_tx:abort(Error) + end. + %% ------------------------------------------------------------------- %% clear_user_permissions(). %% ------------------------------------------------------------------- @@ -291,7 +490,12 @@ set_user_permissions_in_mnesia_tx(UserPermission) -> clear_user_permissions(Username, VHostName) when is_binary(Username) andalso is_binary(VHostName) -> - clear_user_permissions_in_mnesia(Username, VHostName). + rabbit_khepri:handle_fallback( + #{mnesia => + fun() -> clear_user_permissions_in_mnesia(Username, VHostName) end, + khepri => + fun() -> clear_user_permissions_in_khepri(Username, VHostName) end + }). clear_user_permissions_in_mnesia(Username, VHostName) -> rabbit_mnesia:execute_mnesia_transaction( @@ -302,6 +506,13 @@ clear_user_permissions_in_mnesia_tx(Username, VHostName) -> #user_vhost{username = Username, virtual_host = VHostName}}). +clear_user_permissions_in_khepri(Username, VHostName) -> + Path = khepri_user_permission_path(Username, VHostName), + case rabbit_khepri:delete(Path) of + ok -> ok; + Error -> khepri_tx:abort(Error) + end. + %% ------------------------------------------------------------------- %% clear_matching_user_permissions(). %% ------------------------------------------------------------------- @@ -309,22 +520,31 @@ clear_user_permissions_in_mnesia_tx(Username, VHostName) -> -spec clear_matching_user_permissions(Username, VHostName) -> Ret when Username :: internal_user:username() | '_', VHostName :: vhost:name() | '_', - Ret :: [#user_permission{}]. + Ret :: ok. %% @doc Clears all user permissions matching arguments. %% -%% @returns a list of matching user permissions. -%% %% @private clear_matching_user_permissions(Username, VHostName) when (is_binary(Username) orelse Username =:= '_') andalso (is_binary(VHostName) orelse VHostName =:= '_') -> - clear_matching_user_permissions_in_mnesia(Username, VHostName). + rabbit_khepri:handle_fallback( + #{mnesia => + fun() -> + clear_matching_user_permissions_in_mnesia(Username, VHostName) + end, + khepri => + fun() -> + clear_matching_user_permissions_in_khepri(Username, VHostName) + end + }). clear_matching_user_permissions_in_mnesia(Username, VHostName) -> rabbit_mnesia:execute_mnesia_transaction( fun() -> - clear_matching_user_permissions_in_mnesia_tx( Username, VHostName) + _ = clear_matching_user_permissions_in_mnesia_tx( + Username, VHostName), + ok end). clear_matching_user_permissions_in_mnesia_tx(Username, VHostName) -> @@ -335,6 +555,64 @@ clear_matching_user_permissions_in_mnesia_tx(Username, VHostName) -> || #user_permission{user_vhost = Key} = Record <- match_user_permissions_in_mnesia_tx(Username, VHostName)]. +clear_matching_user_permissions_in_khepri(Username, VHostName) -> + Path = khepri_user_permission_path(any(Username), any(VHostName)), + ok = rabbit_khepri:delete(Path). + +any('_') -> ?KHEPRI_WILDCARD_STAR; +any(Value) -> Value. + +%% ------------------------------------------------------------------- +%% clear_all_permissions_for_vhost(). +%% ------------------------------------------------------------------- + +-spec clear_all_permissions_for_vhost(VHostName) -> Ret when + VHostName :: vhost:name(), + Ret :: {ok, DeletedPermissions} | {error, Reason :: any()}, + DeletedPermissions :: [#topic_permission{} | #user_permission{}]. +%% @doc Transactionally deletes all user and topic permissions for a virtual +%% host, returning any permissions that were deleted. +%% +%% @returns an OK-tuple with the deleted permissions or an error tuple if the +%% operation could not be completed. +%% +%% @private + +clear_all_permissions_for_vhost(VHostName) when is_binary(VHostName) -> + rabbit_khepri:handle_fallback( + #{mnesia => + fun() -> clear_all_permissions_for_vhost_in_mnesia(VHostName) end, + khepri => + fun() -> clear_all_permissions_for_vhost_in_khepri(VHostName) end}). + +clear_all_permissions_for_vhost_in_mnesia(VHostName) -> + rabbit_mnesia:execute_mnesia_transaction( + fun() -> + Deletions = + clear_matching_topic_permissions_in_mnesia_tx( + '_', VHostName, '_') ++ + clear_matching_user_permissions_in_mnesia_tx( + '_', VHostName), + {ok, Deletions} + end). + +clear_all_permissions_for_vhost_in_khepri(VHostName) -> + rabbit_khepri:transaction( + fun() -> + UserPermissionsPath = khepri_user_permission_path( + ?KHEPRI_WILDCARD_STAR, VHostName), + TopicPermissionsPath = khepri_topic_permission_path( + ?KHEPRI_WILDCARD_STAR, VHostName, + ?KHEPRI_WILDCARD_STAR), + {ok, UserProps} = khepri_tx_adv:delete_many(UserPermissionsPath), + {ok, TopicProps} = khepri_tx_adv:delete_many( + TopicPermissionsPath), + Deletions = rabbit_khepri:collect_payloads( + TopicProps, + rabbit_khepri:collect_payloads(UserProps)), + {ok, Deletions} + end, rw). + %% ------------------------------------------------------------------- %% get_topic_permissions(). %% ------------------------------------------------------------------- @@ -356,7 +634,17 @@ get_topic_permissions(Username, VHostName, ExchangeName) when is_binary(Username) andalso is_binary(VHostName) andalso is_binary(ExchangeName) -> - get_topic_permissions_in_mnesia(Username, VHostName, ExchangeName). + rabbit_khepri:handle_fallback( + #{mnesia => + fun() -> + get_topic_permissions_in_mnesia( + Username, VHostName, ExchangeName) + end, + khepri => + fun() -> + get_topic_permissions_in_khepri( + Username, VHostName, ExchangeName) + end}). get_topic_permissions_in_mnesia(Username, VHostName, ExchangeName) -> Key = #topic_permission_key{ @@ -368,6 +656,13 @@ get_topic_permissions_in_mnesia(Username, VHostName, ExchangeName) -> [] -> undefined end. +get_topic_permissions_in_khepri(Username, VHostName, ExchangeName) -> + Path = khepri_topic_permission_path(Username, VHostName, ExchangeName), + case rabbit_khepri:get(Path) of + {ok, TopicPermission} -> TopicPermission; + _ -> undefined + end. + %% ------------------------------------------------------------------- %% check_and_match_topic_permissions(). %% ------------------------------------------------------------------- @@ -390,7 +685,18 @@ check_and_match_topic_permissions(Username, VHostName, ExchangeName) when (is_binary(Username) orelse Username =:= '_') andalso (is_binary(VHostName) orelse VHostName =:= '_') andalso (is_binary(ExchangeName) orelse ExchangeName =:= '_') -> - match_topic_permissions_in_mnesia(Username, VHostName, ExchangeName). + rabbit_khepri:handle_fallback( + #{mnesia => + fun() -> + match_topic_permissions_in_mnesia( + Username, VHostName, ExchangeName) + end, + khepri => + fun() -> + match_topic_permissions_in_khepri( + Username, VHostName, ExchangeName) + end + }). match_topic_permissions_in_mnesia( '_' = Username, '_' = VHostName, ExchangeName) -> @@ -441,6 +747,51 @@ match_topic_permissions_in_mnesia_tx(Username, VHostName, ExchangeName) -> permission = '_'}, read). +match_topic_permissions_in_khepri('_' = _Username, '_' = _VHostName, ExchangeName) -> + rabbit_khepri:transaction( + fun() -> + match_topic_permissions_in_khepri_tx( + ?KHEPRI_WILDCARD_STAR, ?KHEPRI_WILDCARD_STAR, any(ExchangeName)) + end, ro); +match_topic_permissions_in_khepri('_' = _Username, VHostName, ExchangeName) -> + rabbit_khepri:transaction( + rabbit_db_vhost:with_fun_in_khepri_tx( + VHostName, + fun() -> + match_topic_permissions_in_khepri_tx( + ?KHEPRI_WILDCARD_STAR, VHostName, any(ExchangeName)) + end), + ro); +match_topic_permissions_in_khepri( + Username, '_' = _VHostName, ExchangeName) -> + rabbit_khepri:transaction( + with_fun_in_khepri_tx( + Username, + fun() -> + match_topic_permissions_in_khepri_tx( + Username, ?KHEPRI_WILDCARD_STAR, any(ExchangeName)) + end), + ro); +match_topic_permissions_in_khepri( + Username, VHostName, ExchangeName) -> + rabbit_khepri:transaction( + with_fun_in_khepri_tx( + Username, + rabbit_db_vhost:with_fun_in_khepri_tx( + VHostName, + fun() -> + match_topic_permissions_in_khepri_tx( + Username, VHostName, any(ExchangeName)) + end)), + ro). + +match_topic_permissions_in_khepri_tx(Username, VHostName, ExchangeName) -> + Path = khepri_topic_permission_path(Username, VHostName, ExchangeName), + case khepri_tx:get_many(Path) of + {ok, Map} -> maps:values(Map); + _ -> [] + end. + %% ------------------------------------------------------------------- %% set_topic_permissions(). %% ------------------------------------------------------------------- @@ -459,7 +810,18 @@ set_topic_permissions( user_vhost = #user_vhost{username = Username, virtual_host = VHostName}}} = TopicPermission) -> - set_topic_permissions_in_mnesia(Username, VHostName, TopicPermission). + rabbit_khepri:handle_fallback( + #{mnesia => + fun() -> + set_topic_permissions_in_mnesia( + Username, VHostName, TopicPermission) + end, + khepri => + fun() -> + set_topic_permissions_in_khepri( + Username, VHostName, TopicPermission) + end + }). set_topic_permissions_in_mnesia(Username, VHostName, TopicPermission) -> rabbit_mnesia:execute_mnesia_transaction( @@ -474,6 +836,34 @@ set_topic_permissions_in_mnesia(Username, VHostName, TopicPermission) -> set_topic_permissions_in_mnesia_tx(TopicPermission) -> mnesia:write(?TOPIC_PERM_MNESIA_TABLE, TopicPermission, write). +set_topic_permissions_in_khepri(Username, VHostName, TopicPermission) -> + rabbit_khepri:transaction( + with_fun_in_khepri_tx( + Username, + rabbit_db_vhost:with_fun_in_khepri_tx( + VHostName, + fun() -> + set_topic_permissions_in_khepri_tx(Username, VHostName, TopicPermission) + end)), rw). + +set_topic_permissions_in_khepri_tx(Username, VHostName, TopicPermission) -> + #topic_permission{topic_permission_key = + #topic_permission_key{exchange = ExchangeName}} = TopicPermission, + Path = khepri_topic_permission_path( + #if_all{conditions = + [Username, + #if_node_exists{exists = true}]}, + VHostName, + ExchangeName), + Extra = #{keep_while => + #{rabbit_db_vhost:khepri_vhost_path(VHostName) => + #if_node_exists{exists = true}}}, + Ret = khepri_tx:put(Path, TopicPermission, Extra), + case Ret of + ok -> ok; + Error -> khepri_tx:abort(Error) + end. + %% ------------------------------------------------------------------- %% clear_topic_permissions(). %% ------------------------------------------------------------------- @@ -490,7 +880,18 @@ set_topic_permissions_in_mnesia_tx(TopicPermission) -> clear_topic_permissions(Username, VHostName, ExchangeName) when is_binary(Username) andalso is_binary(VHostName) andalso (is_binary(ExchangeName) orelse ExchangeName =:= '_') -> - clear_topic_permissions_in_mnesia(Username, VHostName, ExchangeName). + rabbit_khepri:handle_fallback( + #{mnesia => + fun() -> + clear_topic_permissions_in_mnesia( + Username, VHostName, ExchangeName) + end, + khepri => + fun() -> + clear_topic_permissions_in_khepri( + Username, VHostName, ExchangeName) + end + }). clear_topic_permissions_in_mnesia(Username, VHostName, ExchangeName) -> rabbit_mnesia:execute_mnesia_transaction( @@ -502,6 +903,10 @@ clear_topic_permissions_in_mnesia(Username, VHostName, ExchangeName) -> clear_topic_permissions_in_mnesia_tx(Username, VHostName, ExchangeName) -> delete_topic_permission_in_mnesia_tx(Username, VHostName, ExchangeName). +clear_topic_permissions_in_khepri(Username, VHostName, ExchangeName) -> + Path = khepri_topic_permission_path(any(Username), any(VHostName), any(ExchangeName)), + rabbit_khepri:delete(Path). + %% ------------------------------------------------------------------- %% clear_matching_topic_permissions(). %% ------------------------------------------------------------------- @@ -511,26 +916,35 @@ clear_topic_permissions_in_mnesia_tx(Username, VHostName, ExchangeName) -> Username :: rabbit_types:username() | '_', VHostName :: vhost:name() | '_', ExchangeName :: binary() | '_', - Ret :: [#topic_permission{}]. + Ret :: ok. %% @doc Clears all topic permissions matching arguments. %% -%% @returns a list of matching topic permissions. -%% %% @private clear_matching_topic_permissions(Username, VHostName, ExchangeName) when (is_binary(Username) orelse Username =:= '_') andalso (is_binary(VHostName) orelse VHostName =:= '_') andalso (is_binary(ExchangeName) orelse ExchangeName =:= '_') -> - clear_matching_topic_permissions_in_mnesia( - Username, VHostName, ExchangeName). + rabbit_khepri:handle_fallback( + #{mnesia => + fun() -> + clear_matching_topic_permissions_in_mnesia( + Username, VHostName, ExchangeName) + end, + khepri => + fun() -> + clear_matching_topic_permissions_in_khepri( + Username, VHostName, ExchangeName) + end + }). clear_matching_topic_permissions_in_mnesia( Username, VHostName, ExchangeName) -> rabbit_mnesia:execute_mnesia_transaction( fun() -> - clear_matching_topic_permissions_in_mnesia_tx( - Username, VHostName, ExchangeName) + _ = clear_matching_topic_permissions_in_mnesia_tx( + Username, VHostName, ExchangeName), + ok end). clear_matching_topic_permissions_in_mnesia_tx( @@ -543,13 +957,19 @@ clear_matching_topic_permissions_in_mnesia_tx( <- match_topic_permissions_in_mnesia_tx( Username, VHostName, ExchangeName)]. +clear_matching_topic_permissions_in_khepri( + Username, VHostName, ExchangeName) -> + Path = khepri_topic_permission_path( + any(Username), any(VHostName), any(ExchangeName)), + ok = rabbit_khepri:delete(Path). + %% ------------------------------------------------------------------- %% delete(). %% ------------------------------------------------------------------- -spec delete(Username) -> Existed when Username :: internal_user:username(), - Existed :: boolean(). + Existed :: boolean() | {error, any()}. %% @doc Deletes a user and its permissions from the database. %% %% @returns a boolean indicating if the user existed. It throws an exception @@ -558,7 +978,10 @@ clear_matching_topic_permissions_in_mnesia_tx( %% @private delete(Username) when is_binary(Username) -> - delete_in_mnesia(Username). + rabbit_khepri:handle_fallback( + #{mnesia => fun() -> delete_in_mnesia(Username) end, + khepri => fun() -> delete_in_khepri(Username) end + }). delete_in_mnesia(Username) -> rabbit_mnesia:execute_mnesia_transaction( @@ -586,6 +1009,14 @@ delete_topic_permission_in_mnesia_tx(Username, VHostName, ExchangeName) -> R <- mnesia:match_object(?TOPIC_PERM_MNESIA_TABLE, Pattern, write)], ok. +delete_in_khepri(Username) -> + Path = khepri_user_path(Username), + case rabbit_khepri:delete_or_fail(Path) of + ok -> true; + {error, {node_not_found, _}} -> false; + Error -> Error + end. + user_permission_pattern(Username, VHostName) -> #user_permission{user_vhost = #user_vhost{ username = Username, @@ -612,10 +1043,32 @@ topic_permission_pattern(Username, VHostName, ExchangeName) -> %% @private clear() -> - clear_in_mnesia(). + rabbit_khepri:handle_fallback( + #{mnesia => fun() -> clear_in_mnesia() end, + khepri => fun() -> clear_in_khepri() end}). clear_in_mnesia() -> {atomic, ok} = mnesia:clear_table(?MNESIA_TABLE), {atomic, ok} = mnesia:clear_table(?PERM_MNESIA_TABLE), {atomic, ok} = mnesia:clear_table(?TOPIC_PERM_MNESIA_TABLE), ok. + +clear_in_khepri() -> + Path = khepri_users_path(), + case rabbit_khepri:delete(Path) of + ok -> ok; + Error -> throw(Error) + end. + +%% -------------------------------------------------------------- +%% Paths +%% -------------------------------------------------------------- + +khepri_users_path() -> [?MODULE, users]. +khepri_user_path(Username) -> [?MODULE, users, Username]. + +khepri_user_permission_path(Username, VHostName) -> + [?MODULE, users, Username, user_permissions, VHostName]. + +khepri_topic_permission_path(Username, VHostName, Exchange) -> + [?MODULE, users, Username, topic_permissions, VHostName, Exchange]. diff --git a/deps/rabbit/src/rabbit_db_user_m2k_converter.erl b/deps/rabbit/src/rabbit_db_user_m2k_converter.erl new file mode 100644 index 000000000000..194514e2afc9 --- /dev/null +++ b/deps/rabbit/src/rabbit_db_user_m2k_converter.erl @@ -0,0 +1,201 @@ +%% This Source Code Form is subject to the terms of the Mozilla Public +%% License, v. 2.0. If a copy of the MPL was not distributed with this +%% file, You can obtain one at https://mozilla.org/MPL/2.0/. +%% +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. +%% + +-module(rabbit_db_user_m2k_converter). + +-behaviour(mnesia_to_khepri_converter). + +-include_lib("kernel/include/logger.hrl"). +-include_lib("khepri/include/khepri.hrl"). +-include_lib("khepri_mnesia_migration/src/kmm_logging.hrl"). +-include_lib("rabbit_common/include/rabbit.hrl"). +-include("internal_user.hrl"). + +-export([init_copy_to_khepri/3, + copy_to_khepri/3, + delete_from_khepri/3]). + +-record(?MODULE, {}). + +-spec init_copy_to_khepri(StoreId, MigrationId, Tables) -> Ret when + StoreId :: khepri:store_id(), + MigrationId :: mnesia_to_khepri:migration_id(), + Tables :: [mnesia_to_khepri:mnesia_table()], + Ret :: {ok, Priv}, + Priv :: #?MODULE{}. +%% @private + +init_copy_to_khepri(_StoreId, _MigrationId, Tables) -> + %% Clean up any previous attempt to copy the Mnesia table to Khepri. + lists:foreach(fun clear_data_in_khepri/1, Tables), + + SubState = #?MODULE{}, + {ok, SubState}. + +-spec copy_to_khepri(Table, Record, State) -> Ret when + Table :: mnesia_to_khepri:mnesia_table(), + Record :: tuple(), + State :: rabbit_db_m2k_converter:state(), + Ret :: {ok, NewState} | {error, Reason}, + NewState :: rabbit_db_m2k_converter:state(), + Reason :: any(). +%% @private + +copy_to_khepri( + rabbit_user = Table, Record, State) when ?is_internal_user(Record) -> + Username = internal_user:get_username(Record), + ?LOG_DEBUG( + "Mnesia->Khepri data copy: [~0p] key: ~0p", + [Table, Username], + #{domain => ?KMM_M2K_TABLE_COPY_LOG_DOMAIN}), + Path = rabbit_db_user:khepri_user_path(Username), + rabbit_db_m2k_converter:with_correlation_id( + fun(CorrId) -> + Extra = #{async => CorrId}, + ?LOG_DEBUG( + "Mnesia->Khepri data copy: [~0p] path: ~0p corr: ~0p", + [Table, Path, CorrId], + #{domain => ?KMM_M2K_TABLE_COPY_LOG_DOMAIN}), + rabbit_khepri:put(Path, Record, Extra) + end, State); +copy_to_khepri( + rabbit_user_permission = Table, #user_permission{} = Record, State) -> + #user_permission{ + user_vhost = #user_vhost{ + username = Username, + virtual_host = VHost}} = Record, + ?LOG_DEBUG( + "Mnesia->Khepri data copy: [~0p] user: ~0p vhost: ~0p", + [Table, Username, VHost], + #{domain => ?KMM_M2K_TABLE_COPY_LOG_DOMAIN}), + Path = rabbit_db_user:khepri_user_permission_path( + #if_all{conditions = + [Username, + #if_node_exists{exists = true}]}, + VHost), + rabbit_db_m2k_converter:with_correlation_id( + fun(CorrId) -> + Extra = #{keep_while => + #{rabbit_db_vhost:khepri_vhost_path(VHost) => + #if_node_exists{exists = true}}, + async => CorrId}, + ?LOG_DEBUG( + "Mnesia->Khepri data copy: [~0p] path: ~0p corr: ~0p", + [Table, Path, CorrId], + #{domain => ?KMM_M2K_TABLE_COPY_LOG_DOMAIN}), + rabbit_khepri:put(Path, Record, Extra) + end, State); +copy_to_khepri( + rabbit_topic_permission = Table, #topic_permission{} = Record, State) -> + #topic_permission{ + topic_permission_key = + #topic_permission_key{ + user_vhost = #user_vhost{ + username = Username, + virtual_host = VHost}, + exchange = Exchange}} = Record, + ?LOG_DEBUG( + "Mnesia->Khepri data copy: [~0p] user: ~0p vhost: ~0p", + [Table, Username, VHost], + #{domain => ?KMM_M2K_TABLE_COPY_LOG_DOMAIN}), + Path = rabbit_db_user:khepri_topic_permission_path( + #if_all{conditions = + [Username, + #if_node_exists{exists = true}]}, + VHost, + Exchange), + rabbit_db_m2k_converter:with_correlation_id( + fun(CorrId) -> + Extra = #{keep_while => + #{rabbit_db_vhost:khepri_vhost_path(VHost) => + #if_node_exists{exists = true}}, + async => CorrId}, + ?LOG_DEBUG( + "Mnesia->Khepri data copy: [~0p] path: ~0p corr: ~0p", + [Table, Path, CorrId], + #{domain => ?KMM_M2K_TABLE_COPY_LOG_DOMAIN}), + rabbit_khepri:put(Path, Record, Extra) + end, State); +copy_to_khepri(Table, Record, State) -> + ?LOG_DEBUG("Mnesia->Khepri unexpected record table ~0p record ~0p state ~0p", + [Table, Record, State]), + {error, unexpected_record}. + +-spec delete_from_khepri(Table, Key, State) -> Ret when + Table :: mnesia_to_khepri:mnesia_table(), + Key :: any(), + State :: rabbit_db_m2k_converter:state(), + Ret :: {ok, NewState} | {error, Reason}, + NewState :: rabbit_db_m2k_converter:state(), + Reason :: any(). +%% @private + +delete_from_khepri(rabbit_user = Table, Key, State) -> + ?LOG_DEBUG( + "Mnesia->Khepri data delete: [~0p] key: ~0p", + [Table, Key], + #{domain => ?KMM_M2K_TABLE_COPY_LOG_DOMAIN}), + Path = rabbit_db_user:khepri_user_path(Key), + rabbit_db_m2k_converter:with_correlation_id( + fun(CorrId) -> + Extra = #{async => CorrId}, + ?LOG_DEBUG( + "Mnesia->Khepri data delete: [~0p] path: ~0p corr: ~0p", + [Table, Path, CorrId], + #{domain => ?KMM_M2K_TABLE_COPY_LOG_DOMAIN}), + rabbit_khepri:delete(Path, Extra) + end, State); +delete_from_khepri(rabbit_user_permission = Table, Key, State) -> + #user_vhost{ + username = Username, + virtual_host = VHost} = Key, + ?LOG_DEBUG( + "Mnesia->Khepri data delete: [~0p] key: ~0p", + [Table, Key], + #{domain => ?KMM_M2K_TABLE_COPY_LOG_DOMAIN}), + Path = rabbit_db_user:khepri_user_permission_path(Username, VHost), + rabbit_db_m2k_converter:with_correlation_id( + fun(CorrId) -> + Extra = #{async => CorrId}, + ?LOG_DEBUG( + "Mnesia->Khepri data delete: [~0p] path: ~0p corr: ~0p", + [Table, Path, CorrId], + #{domain => ?KMM_M2K_TABLE_COPY_LOG_DOMAIN}), + rabbit_khepri:delete(Path, Extra) + end, State); +delete_from_khepri(rabbit_topic_permission = Table, Key, State) -> + #topic_permission_key{ + user_vhost = #user_vhost{ + username = Username, + virtual_host = VHost}, + exchange = Exchange} = Key, + ?LOG_DEBUG( + "Mnesia->Khepri data delete: [~0p] key: ~0p", + [Table, Key], + #{domain => ?KMM_M2K_TABLE_COPY_LOG_DOMAIN}), + Path = rabbit_db_user:khepri_topic_permission_path(Username, VHost, Exchange), + rabbit_db_m2k_converter:with_correlation_id( + fun(CorrId) -> + Extra = #{async => CorrId}, + ?LOG_DEBUG( + "Mnesia->Khepri data delete: [~0p] path: ~0p corr: ~0p", + [Table, Path, CorrId], + #{domain => ?KMM_M2K_TABLE_COPY_LOG_DOMAIN}), + rabbit_khepri:delete(Path, Extra) + end, State). + +-spec clear_data_in_khepri(Table) -> ok when + Table :: atom(). + +clear_data_in_khepri(rabbit_user) -> + Path = rabbit_db_user:khepri_users_path(), + case rabbit_khepri:delete(Path) of + ok -> ok; + Error -> throw(Error) + end; +clear_data_in_khepri(_) -> + ok. diff --git a/deps/rabbit/src/rabbit_db_vhost.erl b/deps/rabbit/src/rabbit_db_vhost.erl index be40c74fb433..247acb4632af 100644 --- a/deps/rabbit/src/rabbit_db_vhost.erl +++ b/deps/rabbit/src/rabbit_db_vhost.erl @@ -2,14 +2,14 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_db_vhost). --include_lib("kernel/include/logger.hrl"). -include_lib("stdlib/include/assert.hrl"). -include_lib("rabbit_common/include/logging.hrl"). +-include_lib("khepri/include/khepri.hrl"). -include("vhost.hrl"). @@ -22,11 +22,37 @@ list/0, update/2, with_fun_in_mnesia_tx/2, + with_fun_in_khepri_tx/2, delete/1]). +-export([khepri_vhost_path/1, + khepri_vhosts_path/0]). + +%% For testing -export([clear/0]). +-ifdef(TEST). +-export([create_or_get_in_mnesia/2, + create_or_get_in_khepri/2, + get_in_mnesia/1, + get_in_khepri/1, + exists_in_mnesia/1, + exists_in_khepri/1, + list_in_mnesia/0, + list_in_khepri/0, + get_all_in_mnesia/0, + get_all_in_khepri/0, + update_in_mnesia/2, + update_in_khepri/2, + merge_metadata_in_mnesia/2, + merge_metadata_in_khepri/2, + delete_in_mnesia/1, + delete_in_khepri/1 + ]). +-endif. + -define(MNESIA_TABLE, rabbit_vhost). +-define(KHEPRI_PROJECTION, rabbit_khepri_vhost). %% ------------------------------------------------------------------- %% create_or_get(). @@ -36,7 +62,7 @@ VHostName :: vhost:name(), Limits :: vhost:limits(), Metadata :: vhost:metadata(), - Ret :: {existing | new, VHost}, + Ret :: {existing | new, VHost} | no_return(), VHost :: vhost:vhost(). %% @doc Writes a virtual host record if it doesn't exist already or returns %% the existing one. @@ -51,7 +77,9 @@ create_or_get(VHostName, Limits, Metadata) is_list(Limits) andalso is_map(Metadata) -> VHost = vhost:new(VHostName, Limits, Metadata), - create_or_get_in_mnesia(VHostName, VHost). + rabbit_khepri:handle_fallback( + #{mnesia => fun() -> create_or_get_in_mnesia(VHostName, VHost) end, + khepri => fun() -> create_or_get_in_khepri(VHostName, VHost) end}). create_or_get_in_mnesia(VHostName, VHost) -> rabbit_mnesia:execute_mnesia_transaction( @@ -68,6 +96,20 @@ create_or_get_in_mnesia_tx(VHostName, VHost) -> {existing, ExistingVHost} end. +create_or_get_in_khepri(VHostName, VHost) -> + Path = khepri_vhost_path(VHostName), + rabbit_log:debug("Inserting a virtual host record ~tp", [VHost]), + case rabbit_khepri:create(Path, VHost) of + ok -> + {new, VHost}; + {error, {khepri, mismatching_node, + #{node_path := Path, + node_props := #{data := ExistingVHost}}}} -> + {existing, ExistingVHost}; + Error -> + throw(Error) + end. + %% ------------------------------------------------------------------- %% merge_metadata(). %% ------------------------------------------------------------------- @@ -75,7 +117,9 @@ create_or_get_in_mnesia_tx(VHostName, VHost) -> -spec merge_metadata(VHostName, Metadata) -> Ret when VHostName :: vhost:name(), Metadata :: vhost:metadata(), - Ret :: {ok, VHost} | {error, {no_such_vhost, VHostName}}, + Ret :: {ok, VHost} | + {error, {no_such_vhost, VHostName}} | + rabbit_khepri:timeout_error(), VHost :: vhost:vhost(). %% @doc Updates the metadata of an existing virtual host record. %% @@ -96,7 +140,9 @@ merge_metadata(VHostName, Metadata) end. do_merge_metadata(VHostName, Metadata) -> - merge_metadata_in_mnesia(VHostName, Metadata). + rabbit_khepri:handle_fallback( + #{mnesia => fun() -> merge_metadata_in_mnesia(VHostName, Metadata) end, + khepri => fun() -> merge_metadata_in_khepri(VHostName, Metadata) end}). merge_metadata_in_mnesia(VHostName, Metadata) -> rabbit_mnesia:execute_mnesia_transaction( @@ -113,6 +159,30 @@ merge_metadata_in_mnesia_tx(VHostName, Metadata) -> {ok, VHost1} end. +merge_metadata_in_khepri(VHostName, Metadata) -> + Path = khepri_vhost_path(VHostName), + Ret1 = rabbit_khepri:adv_get(Path), + case Ret1 of + {ok, #{data := VHost0, payload_version := DVersion}} -> + VHost = vhost:merge_metadata(VHost0, Metadata), + rabbit_log:debug("Updating a virtual host record ~p", [VHost]), + Path1 = khepri_path:combine_with_conditions( + Path, [#if_payload_version{version = DVersion}]), + Ret2 = rabbit_khepri:put(Path1, VHost), + case Ret2 of + ok -> + {ok, VHost}; + {error, {khepri, mismatching_node, _}} -> + merge_metadata_in_khepri(VHostName, Metadata); + {error, _} = Error -> + Error + end; + {error, {khepri, node_not_found, _}} -> + {error, {no_such_vhost, VHostName}}; + {error, _} = Error -> + Error + end. + %% ------------------------------------------------------------------- %% set_tags(). %% ------------------------------------------------------------------- @@ -120,7 +190,7 @@ merge_metadata_in_mnesia_tx(VHostName, Metadata) -> -spec set_tags(VHostName, Tags) -> VHost when VHostName :: vhost:name(), Tags :: [vhost:tag() | binary() | string()], - VHost :: vhost:vhost(). + VHost :: vhost:vhost() | no_return(). %% @doc Sets the tags of an existing virtual host record. %% %% @returns the updated virtual host record if the record existed and the @@ -131,7 +201,9 @@ merge_metadata_in_mnesia_tx(VHostName, Metadata) -> set_tags(VHostName, Tags) when is_binary(VHostName) andalso is_list(Tags) -> ConvertedTags = lists:usort([rabbit_data_coercion:to_atom(Tag) || Tag <- Tags]), - set_tags_in_mnesia(VHostName, ConvertedTags). + rabbit_khepri:handle_fallback( + #{mnesia => fun() -> set_tags_in_mnesia(VHostName, ConvertedTags) end, + khepri => fun() -> set_tags_in_khepri(VHostName, ConvertedTags) end}). set_tags_in_mnesia(VHostName, Tags) -> rabbit_mnesia:execute_mnesia_transaction( @@ -146,6 +218,10 @@ do_set_tags(VHost, Tags) when ?is_vhost(VHost) andalso is_list(Tags) -> Metadata1 = Metadata0#{tags => Tags}, vhost:set_metadata(VHost, Metadata1). +set_tags_in_khepri(VHostName, Tags) -> + UpdateFun = fun(VHost) -> do_set_tags(VHost, Tags) end, + update_in_khepri(VHostName, UpdateFun). + %% ------------------------------------------------------------------- %% exists(). %% ------------------------------------------------------------------- @@ -160,11 +236,21 @@ do_set_tags(VHost, Tags) when ?is_vhost(VHost) andalso is_list(Tags) -> %% @private exists(VHostName) when is_binary(VHostName) -> - exists_in_mnesia(VHostName). + rabbit_khepri:handle_fallback( + #{mnesia => fun() -> exists_in_mnesia(VHostName) end, + khepri => fun() -> exists_in_khepri(VHostName) end}). exists_in_mnesia(VHostName) -> mnesia:dirty_read({?MNESIA_TABLE, VHostName}) /= []. +exists_in_khepri(VHostName) -> + try + ets:member(?KHEPRI_PROJECTION, VHostName) + catch + error:badarg -> + false + end. + %% ------------------------------------------------------------------- %% get(). %% ------------------------------------------------------------------- @@ -180,7 +266,9 @@ exists_in_mnesia(VHostName) -> %% @private get(VHostName) when is_binary(VHostName) -> - get_in_mnesia(VHostName). + rabbit_khepri:handle_fallback( + #{mnesia => fun() -> get_in_mnesia(VHostName) end, + khepri => fun() -> get_in_khepri(VHostName) end}). get_in_mnesia(VHostName) -> case mnesia:dirty_read({?MNESIA_TABLE, VHostName}) of @@ -188,6 +276,15 @@ get_in_mnesia(VHostName) -> [] -> undefined end. +get_in_khepri(VHostName) -> + try ets:lookup(?KHEPRI_PROJECTION, VHostName) of + [Record] -> Record; + _ -> undefined + catch + error:badarg -> + undefined + end. + %% ------------------------------------------------------------------- %% get_all(). %% ------------------------------------------------------------------- @@ -201,11 +298,21 @@ get_in_mnesia(VHostName) -> %% @private get_all() -> - get_all_in_mnesia(). + rabbit_khepri:handle_fallback( + #{mnesia => fun() -> get_all_in_mnesia() end, + khepri => fun() -> get_all_in_khepri() end}). get_all_in_mnesia() -> mnesia:dirty_match_object(?MNESIA_TABLE, vhost:pattern_match_all()). +get_all_in_khepri() -> + try + ets:tab2list(?KHEPRI_PROJECTION) + catch + error:badarg -> + [] + end. + %% ------------------------------------------------------------------- %% list(). %% ------------------------------------------------------------------- @@ -219,11 +326,22 @@ get_all_in_mnesia() -> %% @private list() -> - list_in_mnesia(). + rabbit_khepri:handle_fallback( + #{mnesia => fun() -> list_in_mnesia() end, + khepri => fun() -> list_in_khepri() end}). list_in_mnesia() -> mnesia:dirty_all_keys(?MNESIA_TABLE). +list_in_khepri() -> + try + ets:select( + ?KHEPRI_PROJECTION, [{vhost:pattern_match_names(), [], ['$1']}]) + catch + error:badarg -> + [] + end. + %% ------------------------------------------------------------------- %% update_in_*tx(). %% ------------------------------------------------------------------- @@ -231,7 +349,7 @@ list_in_mnesia() -> -spec update(VHostName, UpdateFun) -> VHost when VHostName :: vhost:name(), UpdateFun :: fun((VHost) -> VHost), - VHost :: vhost:vhost(). + VHost :: vhost:vhost() | no_return(). %% @doc Updates an existing virtual host record using the result of %% `UpdateFun'. %% @@ -242,7 +360,9 @@ list_in_mnesia() -> update(VHostName, UpdateFun) when is_binary(VHostName) andalso is_function(UpdateFun, 1) -> - update_in_mnesia(VHostName, UpdateFun). + rabbit_khepri:handle_fallback( + #{mnesia => fun() -> update_in_mnesia(VHostName, UpdateFun) end, + khepri => fun() -> update_in_khepri(VHostName, UpdateFun) end}). update_in_mnesia(VHostName, UpdateFun) -> rabbit_mnesia:execute_mnesia_transaction( @@ -259,6 +379,27 @@ update_in_mnesia_tx(VHostName, UpdateFun) mnesia:abort({no_such_vhost, VHostName}) end. +update_in_khepri(VHostName, UpdateFun) -> + Path = khepri_vhost_path(VHostName), + case rabbit_khepri:adv_get(Path) of + {ok, #{data := V, payload_version := DVersion}} -> + V1 = UpdateFun(V), + Path1 = khepri_path:combine_with_conditions( + Path, [#if_payload_version{version = DVersion}]), + case rabbit_khepri:put(Path1, V1) of + ok -> + V1; + {error, {khepri, mismatching_node, _}} -> + update_in_khepri(VHostName, UpdateFun); + Error -> + throw(Error) + end; + {error, {khepri, node_not_found, _}} -> + throw({error, {no_such_vhost, VHostName}}); + Error -> + throw(Error) + end. + %% ------------------------------------------------------------------- %% with_fun_in_*_tx(). %% ------------------------------------------------------------------- @@ -287,13 +428,23 @@ with_fun_in_mnesia_tx(VHostName, TxFun) end end. +with_fun_in_khepri_tx(VHostName, Thunk) -> + fun() -> + Path = khepri_vhost_path(VHostName), + case khepri_tx:exists(Path) of + true -> Thunk(); + false -> khepri_tx:abort({no_such_vhost, VHostName}) + end + end. + %% ------------------------------------------------------------------- %% delete(). %% ------------------------------------------------------------------- --spec delete(VHostName) -> Existed when +-spec delete(VHostName) -> Ret when VHostName :: vhost:name(), - Existed :: boolean(). + Existed :: boolean(), + Ret :: Existed | rabbit_khepri:timeout_error(). %% @doc Deletes a virtual host record from the database. %% %% @returns a boolean indicating if the vhost existed or not. It throws an @@ -302,7 +453,9 @@ with_fun_in_mnesia_tx(VHostName, TxFun) %% @private delete(VHostName) when is_binary(VHostName) -> - delete_in_mnesia(VHostName). + rabbit_khepri:handle_fallback( + #{mnesia => fun() -> delete_in_mnesia(VHostName) end, + khepri => fun() -> delete_in_khepri(VHostName) end}). delete_in_mnesia(VHostName) -> rabbit_mnesia:execute_mnesia_transaction( @@ -313,6 +466,14 @@ delete_in_mnesia_tx(VHostName) -> mnesia:delete({?MNESIA_TABLE, VHostName}), Existed. +delete_in_khepri(VHostName) -> + Path = khepri_vhost_path(VHostName), + case rabbit_khepri:delete_or_fail(Path) of + ok -> true; + {error, {node_not_found, _}} -> false; + {error, _} = Err -> Err + end. + %% ------------------------------------------------------------------- %% clear(). %% ------------------------------------------------------------------- @@ -323,8 +484,24 @@ delete_in_mnesia_tx(VHostName) -> %% @private clear() -> - clear_in_mnesia(). + rabbit_khepri:handle_fallback( + #{mnesia => fun() -> clear_in_mnesia() end, + khepri => fun() -> clear_in_khepri() end}). clear_in_mnesia() -> {atomic, ok} = mnesia:clear_table(?MNESIA_TABLE), ok. + +clear_in_khepri() -> + Path = khepri_vhosts_path(), + case rabbit_khepri:delete(Path) of + ok -> ok; + Error -> throw(Error) + end. + +%% -------------------------------------------------------------- +%% Paths +%% -------------------------------------------------------------- + +khepri_vhosts_path() -> [?MODULE]. +khepri_vhost_path(VHost) -> [?MODULE, VHost]. diff --git a/deps/rabbit/src/rabbit_db_vhost_defaults.erl b/deps/rabbit/src/rabbit_db_vhost_defaults.erl index e756866a9abf..b1fa63f7327e 100644 --- a/deps/rabbit/src/rabbit_db_vhost_defaults.erl +++ b/deps/rabbit/src/rabbit_db_vhost_defaults.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2023-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_db_vhost_defaults). diff --git a/deps/rabbit/src/rabbit_db_vhost_m2k_converter.erl b/deps/rabbit/src/rabbit_db_vhost_m2k_converter.erl new file mode 100644 index 000000000000..4e4e14cf5457 --- /dev/null +++ b/deps/rabbit/src/rabbit_db_vhost_m2k_converter.erl @@ -0,0 +1,102 @@ +%% This Source Code Form is subject to the terms of the Mozilla Public +%% License, v. 2.0. If a copy of the MPL was not distributed with this +%% file, You can obtain one at https://mozilla.org/MPL/2.0/. +%% +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. +%% + +-module(rabbit_db_vhost_m2k_converter). + +-behaviour(mnesia_to_khepri_converter). + +-include_lib("kernel/include/logger.hrl"). +-include_lib("khepri_mnesia_migration/src/kmm_logging.hrl"). +-include_lib("rabbit_common/include/rabbit.hrl"). +-include("vhost.hrl"). + +-export([init_copy_to_khepri/3, + copy_to_khepri/3, + delete_from_khepri/3]). + +-record(?MODULE, {}). + +-spec init_copy_to_khepri(StoreId, MigrationId, Tables) -> Ret when + StoreId :: khepri:store_id(), + MigrationId :: mnesia_to_khepri:migration_id(), + Tables :: [mnesia_to_khepri:mnesia_table()], + Ret :: {ok, Priv}, + Priv :: #?MODULE{}. +%% @private + +init_copy_to_khepri(_StoreId, _MigrationId, Tables) -> + %% Clean up any previous attempt to copy the Mnesia table to Khepri. + lists:foreach(fun clear_data_in_khepri/1, Tables), + + SubState = #?MODULE{}, + {ok, SubState}. + +-spec copy_to_khepri(Table, Record, State) -> Ret when + Table :: mnesia_to_khepri:mnesia_table(), + Record :: tuple(), + State :: rabbit_db_m2k_converter:state(), + Ret :: {ok, NewState} | {error, Reason}, + NewState :: rabbit_db_m2k_converter:state(), + Reason :: any(). +%% @private + +copy_to_khepri( + rabbit_vhost = Table, Record, State) when ?is_vhost(Record) -> + Name = vhost:get_name(Record), + ?LOG_DEBUG( + "Mnesia->Khepri data copy: [~0p] key: ~0p", + [Table, Name], + #{domain => ?KMM_M2K_TABLE_COPY_LOG_DOMAIN}), + Path = rabbit_db_vhost:khepri_vhost_path(Name), + rabbit_db_m2k_converter:with_correlation_id( + fun(CorrId) -> + Extra = #{async => CorrId}, + ?LOG_DEBUG( + "Mnesia->Khepri data copy: [~0p] path: ~0p corr: ~0p", + [Table, Path, CorrId], + #{domain => ?KMM_M2K_TABLE_COPY_LOG_DOMAIN}), + rabbit_khepri:put(Path, Record, Extra) + end, State); +copy_to_khepri(Table, Record, State) -> + ?LOG_DEBUG("Mnesia->Khepri unexpected record table ~0p record ~0p state ~0p", + [Table, Record, State]), + {error, unexpected_record}. + +-spec delete_from_khepri(Table, Key, State) -> Ret when + Table :: mnesia_to_khepri:mnesia_table(), + Key :: any(), + State :: rabbit_db_m2k_converter:state(), + Ret :: {ok, NewState} | {error, Reason}, + NewState :: rabbit_db_m2k_converter:state(), + Reason :: any(). +%% @private + +delete_from_khepri(rabbit_vhost = Table, Key, State) -> + ?LOG_DEBUG( + "Mnesia->Khepri data delete: [~0p] key: ~0p", + [Table, Key], + #{domain => ?KMM_M2K_TABLE_COPY_LOG_DOMAIN}), + Path = rabbit_db_vhost:khepri_vhost_path(Key), + rabbit_db_m2k_converter:with_correlation_id( + fun(CorrId) -> + Extra = #{async => CorrId}, + ?LOG_DEBUG( + "Mnesia->Khepri data delete: [~0p] path: ~0p corr: ~0p", + [Table, Path, CorrId], + #{domain => ?KMM_M2K_TABLE_COPY_LOG_DOMAIN}), + rabbit_khepri:delete(Path, Extra) + end, State). + +-spec clear_data_in_khepri(Table) -> ok when + Table :: atom(). + +clear_data_in_khepri(rabbit_vhost) -> + Path = rabbit_db_vhost:khepri_vhosts_path(), + case rabbit_khepri:delete(Path) of + ok -> ok; + Error -> throw(Error) + end. diff --git a/deps/rabbit/src/rabbit_dead_letter.erl b/deps/rabbit/src/rabbit_dead_letter.erl index 4246879cee1c..a8c6b4515eda 100644 --- a/deps/rabbit/src/rabbit_dead_letter.erl +++ b/deps/rabbit/src/rabbit_dead_letter.erl @@ -2,10 +2,11 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_dead_letter). +-include("mc.hrl"). -export([publish/5, detect_cycles/3]). @@ -26,50 +27,57 @@ publish(Msg0, Reason, #exchange{name = XName} = DLX, RK, #resource{name = SourceQName}) -> DLRKeys = case RK of undefined -> - mc:get_annotation(routing_keys, Msg0); + mc:routing_keys(Msg0); _ -> [RK] end, - Msg1 = mc:record_death(Reason, SourceQName, Msg0), + Env = case rabbit_feature_flags:is_enabled(?FF_MC_DEATHS_V2) of + true -> #{}; + false -> #{?FF_MC_DEATHS_V2 => false} + end, + Msg1 = mc:record_death(Reason, SourceQName, Msg0, Env), {Ttl, Msg2} = mc:take_annotation(dead_letter_ttl, Msg1), Msg3 = mc:set_ttl(Ttl, Msg2), - Msg4 = mc:set_annotation(routing_keys, DLRKeys, Msg3), - DLMsg = mc:set_annotation(exchange, XName#resource.name, Msg4), - Routed = rabbit_exchange:route(DLX, DLMsg, #{return_binding_keys => true}), - {QNames, Cycles} = detect_cycles(Reason, DLMsg, Routed), + Msg4 = mc:set_annotation(?ANN_ROUTING_KEYS, DLRKeys, Msg3), + DLMsg = mc:set_annotation(?ANN_EXCHANGE, XName#resource.name, Msg4), + Routed0 = rabbit_exchange:route(DLX, DLMsg, #{return_binding_keys => true}), + {Cycles, Routed} = detect_cycles(Reason, DLMsg, Routed0), lists:foreach(fun log_cycle_once/1, Cycles), - Qs0 = rabbit_amqqueue:lookup_many(QNames), + Qs0 = rabbit_amqqueue:lookup_many(Routed), Qs = rabbit_amqqueue:prepend_extra_bcc(Qs0), _ = rabbit_queue_type:deliver(Qs, DLMsg, #{}, stateless), ok. detect_cycles(rejected, _Msg, Queues) -> - {Queues, []}; + %% shortcut + {[], Queues}; detect_cycles(_Reason, Msg, Queues) -> - {Cycling, NotCycling} = - lists:partition(fun (#resource{name = Queue}) -> - mc:is_death_cycle(Queue, Msg); - ({#resource{name = Queue}, _RouteInfos}) -> - mc:is_death_cycle(Queue, Msg) - end, Queues), - DeathQueues = mc:death_queue_names(Msg), - CycleKeys = lists:foldl(fun(#resource{name = Q}, Acc) -> - [Q | Acc]; - ({#resource{name = Q}, _RouteInfos}, Acc) -> - [Q | Acc] - end, DeathQueues, Cycling), - {NotCycling, CycleKeys}. + {Cycling, + NotCycling} = lists:partition(fun(#resource{name = Queue}) -> + mc:is_death_cycle(Queue, Msg); + ({#resource{name = Queue}, _RouteInfos}) -> + mc:is_death_cycle(Queue, Msg) + end, Queues), + Names = mc:death_queue_names(Msg), + Cycles = lists:map(fun(#resource{name = Q}) -> + [Q | Names]; + ({#resource{name = Q}, _RouteInfos}) -> + [Q | Names] + end, Cycling), + {Cycles, NotCycling}. -log_cycle_once(Queues) -> - %% using a hash won't eliminate this as a potential memory leak but it will - %% reduce the potential amount of memory used whilst probably being - %% "good enough" - Key = {queue_cycle, erlang:phash2(Queues)}, +log_cycle_once(Cycle) -> + %% Using a hash won't eliminate this as a potential memory leak but it will + %% reduce the potential amount of memory used whilst probably being "good enough". + Key = {dead_letter_cycle, erlang:phash2(Cycle)}, case get(Key) of - true -> ok; + true -> + ok; undefined -> - rabbit_log:warning("Message dropped. Dead-letter queues cycle detected" - ": ~tp~nThis cycle will NOT be reported again.", - [Queues]), + rabbit_log:warning( + "Message dropped because the following list of queues (ordered by " + "death recency) contains a dead letter cycle without reason 'rejected'. " + "This list will not be logged again: ~tp", + [Cycle]), put(Key, true) end. diff --git a/deps/rabbit/src/rabbit_definitions.erl b/deps/rabbit/src/rabbit_definitions.erl index ff838c398f19..9ebc5e074f63 100644 --- a/deps/rabbit/src/rabbit_definitions.erl +++ b/deps/rabbit/src/rabbit_definitions.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% @@ -80,7 +80,8 @@ 'bindings' | 'exchanges'. --type definition_object() :: #{binary() => any()}. +-type definition_key() :: binary() | atom(). +-type definition_object() :: #{definition_key() => any()}. -type definition_list() :: [definition_object()]. -type definitions() :: #{ @@ -104,18 +105,73 @@ maybe_load_definitions() -> {error, E} -> {error, E} end. -validate_definitions(Defs) when is_list(Defs) -> +-spec validate_parsing_of_doc(any()) -> boolean(). +validate_parsing_of_doc(Body) when is_binary(Body) -> + case decode(Body) of + {ok, _Map} -> true; + {error, _Err} -> false + end. + +-spec validate_parsing_of_doc_collection(list(any())) -> boolean(). +validate_parsing_of_doc_collection(Defs) when is_list(Defs) -> lists:foldl(fun(_Body, false) -> - false; - (Body, true) -> - case decode(Body) of - {ok, _Map} -> true; - {error, _Err} -> false - end - end, true, Defs); + false; + (Body, true) -> + case decode(Body) of + {ok, _Map} -> true; + {error, _Err} -> false + end + end, true, Defs). + +-spec filter_orphaned_objects(definition_list()) -> definition_list(). +filter_orphaned_objects(Maps) -> + lists:filter(fun(M) -> maps:get(<<"vhost">>, M, undefined) =:= undefined end, Maps). + +-spec any_orphaned_objects(definition_list()) -> boolean(). +any_orphaned_objects(Maps) -> + length(filter_orphaned_objects(Maps)) > 0. + +-spec any_orphaned_in_doc(definitions()) -> boolean(). +any_orphaned_in_doc(DefsMap) -> + any_orphaned_in_category(DefsMap, <<"queues">>) + orelse any_orphaned_in_category(DefsMap, <<"exchanges">>) + orelse any_orphaned_in_category(DefsMap, <<"bindings">>). + +-spec any_orphaned_in_category(definitions(), definition_category() | binary()) -> boolean(). +any_orphaned_in_category(DefsMap, Category) -> + %% try both binary and atom keys + any_orphaned_objects(maps:get(Category, DefsMap, + maps:get(rabbit_data_coercion:to_atom(Category), DefsMap, []))). + +-spec validate_orphaned_objects_in_doc_collection(list() | binary()) -> boolean(). +validate_orphaned_objects_in_doc_collection(Defs) when is_list(Defs) -> + lists:foldl(fun(_Body, false) -> + false; + (Body, true) -> + validate_parsing_of_doc(Body) + end, true, Defs). + +-spec validate_orphaned_objects_in_doc(binary()) -> boolean(). +validate_orphaned_objects_in_doc(Body) when is_binary(Body) -> + case decode(Body) of + {ok, DefsMap} -> + AnyOrphaned = any_orphaned_in_doc(DefsMap), + case AnyOrphaned of + true -> + log_an_error_about_orphaned_objects(); + false -> ok + end, + AnyOrphaned; + {error, _Err} -> false + end. + +-spec validate_definitions(list(any()) | binary()) -> boolean(). +validate_definitions(Defs) when is_list(Defs) -> + validate_parsing_of_doc_collection(Defs) andalso + validate_orphaned_objects_in_doc_collection(Defs); validate_definitions(Body) when is_binary(Body) -> case decode(Body) of - {ok, _Map} -> true; + {ok, Defs} -> validate_orphaned_objects_in_doc(Defs); {error, _Err} -> false end. @@ -284,6 +340,7 @@ maybe_load_definitions_from_local_filesystem(App, Key) -> undefined -> ok; {ok, none} -> ok; {ok, Path} -> + rabbit_log:debug("~ts.~ts is set to '~ts', will discover definition file(s) to import", [App, Key, Path]), IsDir = filelib:is_dir(Path), Mod = rabbit_definitions_import_local_filesystem, rabbit_log:debug("Will use module ~ts to import definitions", [Mod]), @@ -409,6 +466,10 @@ should_skip_if_unchanged() -> ReachedTargetClusterSize = rabbit_nodes:reached_target_cluster_size(), OptedIn andalso ReachedTargetClusterSize. +log_an_error_about_orphaned_objects() -> + rabbit_log:error("Definitions import: some queues, exchanges or bindings in the definition file " + "are missing the virtual host field. Such files are produced when definitions of " + "a single virtual host are exported. They cannot be used to import definitions at boot time"). -spec apply_defs(Map :: #{atom() => any()}, ActingUser :: rabbit_types:username()) -> 'ok' | {error, term()}. @@ -424,6 +485,20 @@ apply_defs(Map, ActingUser, VHost) when is_binary(VHost) -> apply_defs(Map, ActingUser, fun () -> ok end, VHost); apply_defs(Map, ActingUser, SuccessFun) when is_function(SuccessFun) -> Version = maps:get(rabbitmq_version, Map, maps:get(rabbit_version, Map, undefined)), + + %% If any of the queues or exchanges do not have virtual hosts set, + %% this definition file was a virtual-host specific import. They cannot be applied + %% as "complete" definition imports, most notably, imported on boot. + AnyOrphaned = any_orphaned_in_doc(Map), + + case AnyOrphaned of + true -> + log_an_error_about_orphaned_objects(), + throw({error, invalid_definitions_file}); + false -> + ok + end, + try concurrent_for_all(users, ActingUser, Map, fun(User, _Username) -> @@ -457,8 +532,8 @@ apply_defs(Map, ActingUser, SuccessFun) when is_function(SuccessFun) -> SuccessFun(), ok - catch {error, E} -> {error, E}; - exit:E -> {error, E} + catch {error, E} -> {error, format(E)}; + exit:E -> {error, format(E)} after rabbit_runtime:gc_all_processes() end. @@ -587,8 +662,11 @@ do_concurrent_for_all(List, WorkPoolFun) -> fun() -> _ = try WorkPoolFun(M) - catch {error, E} -> gatherer:in(Gatherer, {error, E}); - _:E -> gatherer:in(Gatherer, {error, E}) + catch {error, E} -> gatherer:in(Gatherer, {error, E}); + _:E:Stacktrace -> + rabbit_log:debug("Definition import: a work pool operation has thrown an exception ~st, stacktrace: ~p", + [E, Stacktrace]), + gatherer:in(Gatherer, {error, E}) end, gatherer:finish(Gatherer) end) @@ -627,6 +705,10 @@ format({no_such_vhost, VHost}) -> [VHost])); format({vhost_limit_exceeded, ErrMsg}) -> rabbit_data_coercion:to_binary(ErrMsg); +format({shutdown, _} = Error) -> + rabbit_log:debug("Metadata store is unavailable: ~p", [Error]), + rabbit_data_coercion:to_binary( + rabbit_misc:format("Metadata store is unavailable. Please try again.", [])); format(E) -> rabbit_data_coercion:to_binary(rabbit_misc:format("~tp", [E])). @@ -691,7 +773,7 @@ add_policy(VHost, Param, Username) -> exit(rabbit_data_coercion:to_binary(rabbit_misc:escape_html_tags(E ++ S))) end. --spec add_vhost(map(), rabbit_types:username()) -> ok. +-spec add_vhost(map(), rabbit_types:username()) -> ok | no_return(). add_vhost(VHost, ActingUser) -> Name = maps:get(name, VHost, undefined), @@ -701,7 +783,12 @@ add_vhost(VHost, ActingUser) -> Tags = maps:get(tags, VHost, maps:get(tags, Metadata, [])), DefaultQueueType = maps:get(default_queue_type, Metadata, undefined), - rabbit_vhost:put_vhost(Name, Description, Tags, DefaultQueueType, IsTracingEnabled, ActingUser). + case rabbit_vhost:put_vhost(Name, Description, Tags, DefaultQueueType, IsTracingEnabled, ActingUser) of + ok -> + ok; + {error, _} = Err -> + throw(Err) + end. add_permission(Permission, ActingUser) -> rabbit_auth_backend_internal:set_permissions(maps:get(user, Permission, undefined), @@ -731,6 +818,10 @@ add_queue_int(_Queue, R = #resource{kind = queue, Name = R#resource.name, rabbit_log:warning("Skipping import of a queue whose name begins with 'amq.', " "name: ~ts, acting user: ~ts", [Name, ActingUser]); +add_queue_int(_Queue, R = #resource{kind = queue, virtual_host = undefined}, ActingUser) -> + Name = R#resource.name, + rabbit_log:warning("Skipping import of a queue with an unset virtual host field, " + "name: ~ts, acting user: ~ts", [Name, ActingUser]); add_queue_int(Queue, Name = #resource{virtual_host = VHostName}, ActingUser) -> case rabbit_amqqueue:exists(Name) of true -> @@ -777,13 +868,18 @@ add_exchange_int(Exchange, Name, ActingUser) -> undefined -> false; %% =< 2.2.0 I -> I end, - rabbit_exchange:declare(Name, - rabbit_exchange:check_type(maps:get(type, Exchange, undefined)), - maps:get(durable, Exchange, undefined), - maps:get(auto_delete, Exchange, undefined), - Internal, - args(maps:get(arguments, Exchange, undefined)), - ActingUser) + case rabbit_exchange:declare(Name, + rabbit_exchange:check_type(maps:get(type, Exchange, undefined)), + maps:get(durable, Exchange, undefined), + maps:get(auto_delete, Exchange, undefined), + Internal, + args(maps:get(arguments, Exchange, undefined)), + ActingUser) of + {ok, _Exchange} -> + ok; + {error, timeout} = Err -> + throw(Err) + end end. add_binding(Binding, ActingUser) -> @@ -797,12 +893,17 @@ add_binding(VHost, Binding, ActingUser) -> rv(VHost, DestType, destination, Binding), ActingUser). add_binding_int(Binding, Source, Destination, ActingUser) -> - rabbit_binding:add( - #binding{source = Source, - destination = Destination, - key = maps:get(routing_key, Binding, undefined), - args = args(maps:get(arguments, Binding, undefined))}, - ActingUser). + case rabbit_binding:add( + #binding{source = Source, + destination = Destination, + key = maps:get(routing_key, Binding, undefined), + args = args(maps:get(arguments, Binding, undefined))}, + ActingUser) of + ok -> + ok; + {error, _} = Err -> + throw(Err) + end. dest_type(Binding) -> rabbit_data_coercion:to_atom(maps:get(destination_type, Binding, undefined)). @@ -824,6 +925,7 @@ validate_limits(All) -> undefined -> ok; Queues0 -> {ok, VHostMap} = filter_out_existing_queues(Queues0), + _ = rabbit_log:debug("Definition import. Virtual host map for validation: ~p", [VHostMap]), maps:fold(fun validate_vhost_limit/3, ok, VHostMap) end. @@ -848,19 +950,30 @@ filter_out_existing_queues(VHost, Queues) -> build_queue_data(Queue) -> VHost = maps:get(<<"vhost">>, Queue, undefined), - Rec = rv(VHost, queue, <<"name">>, Queue), - {Rec, VHost}. + case VHost of + undefined -> undefined; + Value -> + Rec = rv(Value, queue, <<"name">>, Queue), + {Rec, VHost} + end. build_filtered_map([], AccMap) -> {ok, AccMap}; build_filtered_map([Queue|Rest], AccMap0) -> - {Rec, VHost} = build_queue_data(Queue), - case rabbit_amqqueue:exists(Rec) of - false -> - AccMap1 = maps:update_with(VHost, fun(V) -> V + 1 end, 1, AccMap0), - build_filtered_map(Rest, AccMap1); - true -> - build_filtered_map(Rest, AccMap0) + %% If virtual host is not specified in a queue, + %% this definition file is likely virtual host-specific. + %% + %% Skip such queues. + case build_queue_data(Queue) of + undefined -> build_filtered_map(Rest, AccMap0); + {Rec, VHost} when VHost =/= undefined -> + case rabbit_amqqueue:exists(Rec) of + false -> + AccMap1 = maps:update_with(VHost, fun(V) -> V + 1 end, 1, AccMap0), + build_filtered_map(Rest, AccMap1); + true -> + build_filtered_map(Rest, AccMap0) + end end. validate_vhost_limit(VHost, AddCount, ok) -> @@ -983,9 +1096,20 @@ runtime_parameter_definition(Param) -> <<"vhost">> => pget(vhost, Param), <<"component">> => pget(component, Param), <<"name">> => pget(name, Param), - <<"value">> => maps:from_list(pget(value, Param)) + <<"value">> => maybe_map(pget(value, Param)) }. +maybe_map(Value) -> + %% Not all definitions are maps. `federation-upstream-set` is + %% a list of maps, and it should be exported as it has been + %% imported + try + rabbit_data_coercion:to_map(Value) + catch + error:badarg -> + Value + end. + list_global_runtime_parameters() -> [global_runtime_parameter_definition(P) || P <- rabbit_runtime_parameters:list_global(), not is_internal_parameter(P)]. diff --git a/deps/rabbit/src/rabbit_definitions_hashing.erl b/deps/rabbit/src/rabbit_definitions_hashing.erl index d77833791c28..0e8c8d40a1ef 100644 --- a/deps/rabbit/src/rabbit_definitions_hashing.erl +++ b/deps/rabbit/src/rabbit_definitions_hashing.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% %% This module is responsible for definition content hashing. Content hashing diff --git a/deps/rabbit/src/rabbit_definitions_import_https.erl b/deps/rabbit/src/rabbit_definitions_import_https.erl index 1c32358b4010..de6a020ff061 100644 --- a/deps/rabbit/src/rabbit_definitions_import_https.erl +++ b/deps/rabbit/src/rabbit_definitions_import_https.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% %% This module is responsible for loading definition from an HTTPS endpoint. @@ -14,8 +14,6 @@ %% * rabbit_definitions_import_local_filesystem %% * rabbit_definitions_hashing -module(rabbit_definitions_import_https). --include_lib("rabbit_common/include/rabbit.hrl"). - -export([ is_enabled/0, load/1, diff --git a/deps/rabbit/src/rabbit_definitions_import_local_filesystem.erl b/deps/rabbit/src/rabbit_definitions_import_local_filesystem.erl index 0c79ea0c4a00..19a70ac9c272 100644 --- a/deps/rabbit/src/rabbit_definitions_import_local_filesystem.erl +++ b/deps/rabbit/src/rabbit_definitions_import_local_filesystem.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% %% This module is responsible for loading definition from a local filesystem @@ -15,8 +15,6 @@ %% * rabbit_definitions_import_http %% * rabbit_definitions_hashing -module(rabbit_definitions_import_local_filesystem). --include_lib("rabbit_common/include/rabbit.hrl"). - -export([ is_enabled/0, %% definition source options @@ -94,7 +92,9 @@ load_with_hashing(IsDir, Path, PreviousHash, Algo) when is_boolean(IsDir) -> Other end; false -> - rabbit_log:error("Failed to parse a definition file, path: ~p", [Path]), + rabbit_log:error("Definitions file at path ~p failed validation. The file must be a valid JSON document " + "and all virtual host-scoped resources must have a virtual host field to be set. " + "Definition files exported for a single virtual host CANNOT be imported at boot time", [Path]), {error, not_json} end end. diff --git a/deps/rabbit/src/rabbit_depr_ff_extra.erl b/deps/rabbit/src/rabbit_depr_ff_extra.erl new file mode 100644 index 000000000000..5267c3efbfb6 --- /dev/null +++ b/deps/rabbit/src/rabbit_depr_ff_extra.erl @@ -0,0 +1,69 @@ +%% This Source Code Form is subject to the terms of the Mozilla Public +%% License, v. 2.0. If a copy of the MPL was not distributed with this +%% file, You can obtain one at https://mozilla.org/MPL/2.0/. +%% +%% Copyright (c) 2023 Broadcom. All Rights Reserved. The term “Broadcom” +%% refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. +%% +%% @doc +%% This module provides extra functions unused by the feature flags +%% subsystem core functionality. + +-module(rabbit_depr_ff_extra). + +-export([cli_info/1]). + +-type cli_info() :: [cli_info_entry()]. +%% A list of deprecated feature properties, formatted for the RabbitMQ CLI. + +-type cli_info_entry() :: + #{name => rabbit_feature_flags:feature_name(), + deprecation_phase => rabbit_deprecated_features:deprecation_phase(), + provided_by => atom(), + desc => string(), + doc_url => string()}. +%% A list of properties for a single deprecated feature, formatted for the +%% RabbitMQ CLI. + +-spec cli_info(Which) -> CliInfo when + Which :: all | used, + CliInfo :: cli_info(). +%% @doc +%% Returns a list of all or used deprecated features properties, +%% depending on the argument. +%% +%% @param Which The group of deprecated features to return: `all' or `used'. +%% @returns the list of all deprecated feature properties. + +cli_info(all) -> + cli_info0(rabbit_deprecated_features:list(all)); +cli_info(used) -> + cli_info0(rabbit_deprecated_features:list(used)). + +-spec cli_info0(FeatureFlags) -> CliInfo when + FeatureFlags :: rabbit_feature_flags:feature_flags(), + CliInfo :: cli_info(). +%% @doc +%% Formats a map of deprecated features and their properties into a list of +%% deprecated feature properties as expected by the RabbitMQ CLI. +%% +%% @param DeprecatedFeatures A map of deprecated features. +%% @returns the list of deprecated features properties, created from the map +%% specified in arguments. + +cli_info0(DeprecatedFeature) -> + lists:foldr( + fun(FeatureName, Acc) -> + FeatureProps = maps:get(FeatureName, DeprecatedFeature), + + App = maps:get(provided_by, FeatureProps), + DeprecationPhase = maps:get(deprecation_phase, FeatureProps, ""), + Desc = maps:get(desc, FeatureProps, ""), + DocUrl = maps:get(doc_url, FeatureProps, ""), + Info = #{name => FeatureName, + desc => unicode:characters_to_binary(Desc), + deprecation_phase => DeprecationPhase, + doc_url => unicode:characters_to_binary(DocUrl), + provided_by => App}, + [Info | Acc] + end, [], lists:sort(maps:keys(DeprecatedFeature))). diff --git a/deps/rabbit/src/rabbit_deprecated_features.erl b/deps/rabbit/src/rabbit_deprecated_features.erl index a83dc8cf56d2..93289be033eb 100644 --- a/deps/rabbit/src/rabbit_deprecated_features.erl +++ b/deps/rabbit/src/rabbit_deprecated_features.erl @@ -2,11 +2,11 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% %% @author The RabbitMQ team -%% @copyright 2023 VMware, Inc. or its affiliates. +%% @copyright 2007-2024 Broadcom. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% %% @doc %% This module provides an API to manage deprecated features in RabbitMQ. It @@ -117,7 +117,8 @@ get_warning/1]). -export([extend_properties/2, should_be_permitted/2, - enable_underlying_feature_flag_cb/1]). + enable_underlying_feature_flag_cb/1, + list/1]). -type deprecated_feature_modattr() :: {rabbit_feature_flags:feature_name(), feature_props()}. @@ -202,6 +203,10 @@ %% needed. Other added properties are the same as {@link %% rabbit_feature_flags:feature_props_extended()}. +-type deprecated_features() :: + #{rabbit_feature_flags:feature_name() => + feature_props_extended()}. + -type callbacks() :: is_feature_used_callback(). %% All possible callbacks. @@ -346,6 +351,28 @@ get_warning(FeatureProps, Permitted) when is_map(FeatureProps) -> maps:get(when_removed, Msgs) end. +-spec list(Which :: all | used) -> deprecated_features(). +%% @doc +%% Lists all or used deprecated features, depending on the argument. +%% +%% @param Which The group of deprecated features to return: `all' or `used'. +%% @returns A map of selected deprecated features. + +list(all) -> + maps:filter( + fun(_, FeatureProps) -> ?IS_DEPRECATION(FeatureProps) end, + rabbit_ff_registry_wrapper:list(all)); +list(used) -> + maps:filter( + fun(FeatureName, FeatureProps) -> + ?IS_DEPRECATION(FeatureProps) + and + (is_deprecated_feature_in_use( + #{feature_name => FeatureName, + feature_props => FeatureProps}) =:= true) + end, + rabbit_ff_registry_wrapper:list(all)). + %% ------------------------------------------------------------------- %% Internal functions. %% ------------------------------------------------------------------- @@ -386,8 +413,8 @@ generate_warnings1(FeatureName, FeatureProps, Msgs) -> "Feature `~ts` is deprecated.~n" "By default, this feature can still be used for now.~n" "Its use will not be permitted by default in a future minor " - "RabbitMQ version and the feature will be removed from a" - "future major RabbitMQ version; actual versions to be" + "RabbitMQ version and the feature will be removed from a " + "future major RabbitMQ version; actual versions to be " "determined.~n" "To continue using this feature when it is not permitted " "by default, set the following parameter in your " @@ -562,12 +589,12 @@ maybe_log_warning(FeatureName, Permitted) -> should_log_warning(FeatureName) -> Key = ?PT_DEPRECATION_WARNING_TS(FeatureName), - Now = erlang:timestamp(), + Now = erlang:monotonic_time(), try Last = persistent_term:get(Key), - Diff = timer:now_diff(Now, Last), + Diff = erlang:convert_time_unit(Now - Last, native, second), if - Diff >= 24 * 60 * 60 * 1000 * 1000 -> + Diff >= 24 * 60 * 60 -> persistent_term:put(Key, Now), true; true -> @@ -581,24 +608,29 @@ should_log_warning(FeatureName) -> enable_underlying_feature_flag_cb( #{command := enable, - feature_name := FeatureName, - feature_props := #{callbacks := Callbacks}} = Args) -> - case Callbacks of - #{is_feature_used := {CallbackMod, CallbackFun}} -> - Args1 = Args#{command => is_feature_used}, - IsUsed = erlang:apply(CallbackMod, CallbackFun, [Args1]), - case IsUsed of - false -> - ok; - true -> - ?LOG_ERROR( - "Deprecated features: `~ts`: can't deny deprecated " - "feature because it is actively used", - [FeatureName], - #{domain => ?RMQLOG_DOMAIN_FEAT_FLAGS}), - {error, - {failed_to_deny_deprecated_features, [FeatureName]}} - end; + feature_name := FeatureName} = Args) -> + IsUsed = is_deprecated_feature_in_use(Args), + case IsUsed of + true -> + ?LOG_ERROR( + "Deprecated features: `~ts`: can't deny deprecated " + "feature because it is actively used", + [FeatureName], + #{domain => ?RMQLOG_DOMAIN_FEAT_FLAGS}), + {error, + {failed_to_deny_deprecated_features, [FeatureName]}}; _ -> ok end. + +is_deprecated_feature_in_use( + #{feature_props := #{callbacks := Callbacks}} = Args1) -> + case Callbacks of + #{is_feature_used := {CallbackMod, CallbackFun}} -> + Args = Args1#{command => is_feature_used}, + erlang:apply(CallbackMod, CallbackFun, [Args]); + _ -> + undefined + end; +is_deprecated_feature_in_use(_) -> + undefined. diff --git a/deps/rabbit/src/rabbit_diagnostics.erl b/deps/rabbit/src/rabbit_diagnostics.erl index e67d90de94f3..33c623c24886 100644 --- a/deps/rabbit/src/rabbit_diagnostics.erl +++ b/deps/rabbit/src/rabbit_diagnostics.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_diagnostics). diff --git a/deps/rabbit/src/rabbit_direct.erl b/deps/rabbit/src/rabbit_direct.erl index 987e4ebe4209..f19da93e91af 100644 --- a/deps/rabbit/src/rabbit_direct.erl +++ b/deps/rabbit/src/rabbit_direct.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_direct). @@ -158,7 +158,7 @@ is_vhost_alive(VHost, {Username, _Password}, Pid) -> true -> true; false -> rabbit_log_connection:error( - "Error on Direct connection ~tp~n" + "Error on direct client connection ~tp~n" "access to vhost '~ts' refused for user '~ts': " "vhost '~ts' is down", [Pid, VHost, PrintedUsername, VHost]), @@ -174,7 +174,7 @@ is_over_vhost_connection_limit(VHost, {Username, _Password}, Pid) -> false -> false; {true, Limit} -> rabbit_log_connection:error( - "Error on Direct connection ~tp~n" + "Error on direct client connection ~tp~n" "access to vhost '~ts' refused for user '~ts': " "vhost connection limit (~tp) is reached", [Pid, VHost, PrintedUsername, Limit]), @@ -182,7 +182,7 @@ is_over_vhost_connection_limit(VHost, {Username, _Password}, Pid) -> catch throw:{error, {no_such_vhost, VHost}} -> rabbit_log_connection:error( - "Error on Direct connection ~tp~n" + "Error on direct client connection ~tp~n" "vhost ~ts not found", [Pid, VHost]), true end. diff --git a/deps/rabbit/src/rabbit_direct_reply_to.erl b/deps/rabbit/src/rabbit_direct_reply_to.erl index 4e8a6eb19e93..c45d27101d16 100644 --- a/deps/rabbit/src/rabbit_direct_reply_to.erl +++ b/deps/rabbit/src/rabbit_direct_reply_to.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_direct_reply_to). diff --git a/deps/rabbit/src/rabbit_disk_monitor.erl b/deps/rabbit/src/rabbit_disk_monitor.erl index 688850932a5e..f0dc60206ec5 100644 --- a/deps/rabbit/src/rabbit_disk_monitor.erl +++ b/deps/rabbit/src/rabbit_disk_monitor.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_disk_monitor). @@ -421,6 +421,9 @@ start_timer(State) -> interval(#state{alarmed = true, max_interval = MaxInterval}) -> MaxInterval; +interval(#state{actual = 'NaN', + max_interval = MaxInterval}) -> + MaxInterval; interval(#state{limit = Limit, actual = Actual, min_interval = MinInterval, diff --git a/deps/rabbit/src/rabbit_epmd_monitor.erl b/deps/rabbit/src/rabbit_epmd_monitor.erl index 9090d41ed2d9..493f487b229c 100644 --- a/deps/rabbit/src/rabbit_epmd_monitor.erl +++ b/deps/rabbit/src/rabbit_epmd_monitor.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_epmd_monitor). diff --git a/deps/rabbit/src/rabbit_event_consumer.erl b/deps/rabbit/src/rabbit_event_consumer.erl index 523e3f86886d..e12d6d3ccad5 100644 --- a/deps/rabbit/src/rabbit_event_consumer.erl +++ b/deps/rabbit/src/rabbit_event_consumer.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_event_consumer). diff --git a/deps/rabbit/src/rabbit_exchange.erl b/deps/rabbit/src/rabbit_exchange.erl index 60347d80afe3..5a00d4de80da 100644 --- a/deps/rabbit/src/rabbit_exchange.erl +++ b/deps/rabbit/src/rabbit_exchange.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_exchange). @@ -13,7 +13,8 @@ lookup/1, lookup_many/1, lookup_or_die/1, list/0, list/1, lookup_scratch/2, update_scratch/3, update_decorators/2, immutable/1, info_keys/0, info/1, info/2, info_all/1, info_all/2, info_all/4, - route/2, route/3, delete/3, validate_binding/2, count/0]). + route/2, route/3, delete/3, validate_binding/2, count/0, + ensure_deleted/3]). -export([list_names/0]). -export([serialise_events/1]). -export([serial/1, peek_serial/1]). @@ -91,10 +92,16 @@ serial(X) -> true -> rabbit_db_exchange:next_serial(X#exchange.name) end. --spec declare - (name(), type(), boolean(), boolean(), boolean(), - rabbit_framing:amqp_table(), rabbit_types:username()) - -> rabbit_types:exchange(). +-spec declare(Name, Type, Durable, AutoDelete, Internal, Args, Username) -> + Ret when + Name :: name(), + Type :: type(), + Durable :: boolean(), + AutoDelete :: boolean(), + Internal :: boolean(), + Args :: rabbit_framing:amqp_table(), + Username :: rabbit_types:username(), + Ret :: {ok, rabbit_types:exchange()} | {error, timeout}. declare(XName, Type, Durable, AutoDelete, Internal, Args, Username) -> X = rabbit_exchange_decorator:set( @@ -121,16 +128,16 @@ declare(XName, Type, Durable, AutoDelete, Internal, Args, Username) -> Serial = serial(Exchange), ok = callback(X, create, Serial, [Exchange]), rabbit_event:notify(exchange_created, info(Exchange)), - Exchange; + {ok, Exchange}; {existing, Exchange} -> - Exchange; - Err -> + {ok, Exchange}; + {error, timeout} = Err -> Err end; _ -> rabbit_log:warning("ignoring exchange.declare for exchange ~tp, exchange.delete in progress~n.", [XName]), - X + {ok, X} end. %% Used with binaries sent over the wire; the type may not exist. @@ -142,11 +149,11 @@ check_type(TypeBin) -> case rabbit_registry:binary_to_type(rabbit_data_coercion:to_binary(TypeBin)) of {error, not_found} -> rabbit_misc:protocol_error( - command_invalid, "unknown exchange type '~ts'", [TypeBin]); + precondition_failed, "unknown exchange type '~ts'", [TypeBin]); T -> case rabbit_registry:lookup_module(exchange, T) of {error, not_found} -> rabbit_misc:protocol_error( - command_invalid, + precondition_failed, "invalid exchange type '~ts'", [T]); {ok, _Module} -> T end @@ -220,14 +227,11 @@ list() -> count() -> rabbit_db_exchange:count(). --spec list_names() -> [rabbit_exchange:name()]. +-spec list_names() -> [name()]. list_names() -> rabbit_db_exchange:list(). -%% Not dirty_match_object since that would not be transactional when used in a -%% tx context - -spec list(rabbit_types:vhost()) -> [rabbit_types:exchange()]. list(VHostPath) -> @@ -354,7 +358,7 @@ route(Exchange, Message) -> route(#exchange{name = #resource{name = ?DEFAULT_EXCHANGE_NAME, virtual_host = VHost}}, Message, _Opts) -> - RKs0 = mc:get_annotation(routing_keys, Message), + RKs0 = mc:routing_keys(Message), RKs = lists:usort(RKs0), [begin case virtual_reply_queue(RK) of @@ -449,9 +453,13 @@ cons_if_present(XName, L) -> -spec delete (name(), 'true', rabbit_types:username()) -> - 'ok'| rabbit_types:error('not_found' | 'in_use'); + 'ok' | + rabbit_types:error('not_found' | 'in_use') | + rabbit_khepri:timeout_error(); (name(), 'false', rabbit_types:username()) -> - 'ok' | rabbit_types:error('not_found'). + 'ok' | + rabbit_types:error('not_found') | + rabbit_khepri:timeout_error(). delete(XName, IfUnused, Username) -> try @@ -483,6 +491,26 @@ process_deletions({deleted, #exchange{name = XName} = X, Bs, Deletions}) -> rabbit_binding:add_deletion( XName, {X, deleted, Bs}, Deletions)). +-spec ensure_deleted(ExchangeName, IfUnused, Username) -> Ret when + ExchangeName :: name(), + IfUnused :: boolean(), + Username :: rabbit_types:username(), + Ret :: ok | + rabbit_types:error('in_use') | + rabbit_khepri:timeout_error(). +%% @doc A wrapper around `delete/3' which returns `ok' in the case that the +%% exchange did not exist at time of deletion. + +ensure_deleted(XName, IfUnused, Username) -> + case delete(XName, IfUnused, Username) of + ok -> + ok; + {error, not_found} -> + ok; + {error, _} = Err -> + Err + end. + -spec validate_binding (rabbit_types:exchange(), rabbit_types:binding()) -> rabbit_types:ok_or_error({'binding_invalid', string(), [any()]}). diff --git a/deps/rabbit/src/rabbit_exchange_decorator.erl b/deps/rabbit/src/rabbit_exchange_decorator.erl index 99251fa0ca50..11f5bc38d16d 100644 --- a/deps/rabbit/src/rabbit_exchange_decorator.erl +++ b/deps/rabbit/src/rabbit_exchange_decorator.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_exchange_decorator). @@ -26,6 +26,11 @@ -type(serial() :: pos_integer() | 'none'). +%% Callbacks on Khepri are always executed outside of a transaction, thus +%% this implementation has been updated to reflect this. The 'transaction' +%% parameter disappears, even for mnesia, callbacks run only once +%% and their implementation must ensure any transaction required. + -callback description() -> [proplists:property()]. %% Should Rabbit ensure that all binding events that are @@ -109,6 +114,7 @@ maybe_recover(X = #exchange{name = Name, case New of Old -> ok; _ -> %% TODO create a tx here for non-federation decorators - _ = [M:create(none, X) || M <- New -- Old], + Serial = rabbit_exchange:serial(X), + _ = [M:create(Serial, X) || M <- New -- Old], rabbit_exchange:update_decorators(Name, Decs1) end. diff --git a/deps/rabbit/src/rabbit_exchange_parameters.erl b/deps/rabbit/src/rabbit_exchange_parameters.erl index 64d22070bb0f..04670b4865ee 100644 --- a/deps/rabbit/src/rabbit_exchange_parameters.erl +++ b/deps/rabbit/src/rabbit_exchange_parameters.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_exchange_parameters). diff --git a/deps/rabbit/src/rabbit_exchange_type.erl b/deps/rabbit/src/rabbit_exchange_type.erl index 8dd27370214a..bb975d1d3df8 100644 --- a/deps/rabbit/src/rabbit_exchange_type.erl +++ b/deps/rabbit/src/rabbit_exchange_type.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_exchange_type). diff --git a/deps/rabbit/src/rabbit_exchange_type_direct.erl b/deps/rabbit/src/rabbit_exchange_type_direct.erl index 41223721e9cd..96b849ea81e5 100644 --- a/deps/rabbit/src/rabbit_exchange_type_direct.erl +++ b/deps/rabbit/src/rabbit_exchange_type_direct.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_exchange_type_direct). @@ -35,52 +35,15 @@ route(#exchange{name = Name, type = Type}, Msg) -> route(#exchange{name = Name, type = Type}, Msg, #{}). route(#exchange{name = Name, type = Type}, Msg, _Opts) -> - Routes = mc:get_annotation(routing_keys, Msg), - case Type of - direct -> - route_v2(Name, Routes); - _ -> - rabbit_router:match_routing_key(Name, Routes) - end. + Routes = mc:routing_keys(Msg), + rabbit_db_binding:match_routing_key(Name, Routes, Type =:= direct). validate(_X) -> ok. validate_binding(_X, _B) -> ok. -create(_Tx, _X) -> ok. -delete(_Tx, _X) -> ok. +create(_Serial, _X) -> ok. +delete(_Serial, _X) -> ok. policy_changed(_X1, _X2) -> ok. -add_binding(_Tx, _X, _B) -> ok. -remove_bindings(_Tx, _X, _Bs) -> ok. +add_binding(_Serial, _X, _B) -> ok. +remove_bindings(_Serial, _X, _Bs) -> ok. assert_args_equivalence(X, Args) -> rabbit_exchange:assert_args_equivalence(X, Args). - -%% rabbit_router:match_routing_key/2 uses ets:select/2 to get destinations. -%% ets:select/2 is expensive because it needs to compile the match spec every -%% time and lookup does not happen by a hash key. -%% -%% In contrast, route_v2/2 increases end-to-end message sending throughput -%% (i.e. from RabbitMQ client to the queue process) by up to 35% by using ets:lookup_element/3. -%% Only the direct exchange type uses the rabbit_index_route table to store its -%% bindings by table key tuple {SourceExchange, RoutingKey}. --spec route_v2(rabbit_types:binding_source(), [rabbit_router:routing_key(), ...]) -> - rabbit_router:match_result(). -route_v2(SrcName, [RoutingKey]) -> - %% optimization - destinations(SrcName, RoutingKey); -route_v2(SrcName, [_|_] = RoutingKeys) -> - lists:flatmap(fun(Key) -> - destinations(SrcName, Key) - end, RoutingKeys). - -destinations(SrcName, RoutingKey) -> - %% Prefer try-catch block over checking Key existence with ets:member/2. - %% The latter reduces throughput by a few thousand messages per second because - %% of function db_member_hash in file erl_db_hash.c. - %% We optimise for the happy path, that is the binding / table key is present. - try - ets:lookup_element(rabbit_index_route, - {SrcName, RoutingKey}, - #index_route.destination) - catch - error:badarg -> - [] - end. diff --git a/deps/rabbit/src/rabbit_exchange_type_fanout.erl b/deps/rabbit/src/rabbit_exchange_type_fanout.erl index 992f945d2f8c..1ed593073646 100644 --- a/deps/rabbit/src/rabbit_exchange_type_fanout.erl +++ b/deps/rabbit/src/rabbit_exchange_type_fanout.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_exchange_type_fanout). @@ -39,10 +39,10 @@ route(#exchange{name = Name}, _Message, _Opts) -> validate(_X) -> ok. validate_binding(_X, _B) -> ok. -create(_Tx, _X) -> ok. -delete(_Tx, _X) -> ok. +create(_Serial, _X) -> ok. +delete(_Serial, _X) -> ok. policy_changed(_X1, _X2) -> ok. -add_binding(_Tx, _X, _B) -> ok. -remove_bindings(_Tx, _X, _Bs) -> ok. +add_binding(_Serial, _X, _B) -> ok. +remove_bindings(_Serial, _X, _Bs) -> ok. assert_args_equivalence(X, Args) -> rabbit_exchange:assert_args_equivalence(X, Args). diff --git a/deps/rabbit/src/rabbit_exchange_type_headers.erl b/deps/rabbit/src/rabbit_exchange_type_headers.erl index c9cf4b3d2ff8..2fd347523430 100644 --- a/deps/rabbit/src/rabbit_exchange_type_headers.erl +++ b/deps/rabbit/src/rabbit_exchange_type_headers.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. -module(rabbit_exchange_type_headers). -include_lib("rabbit_common/include/rabbit.hrl"). @@ -104,10 +104,10 @@ validate_binding(_X, #binding{args = Args}) -> end. validate(_X) -> ok. -create(_Tx, _X) -> ok. -delete(_Tx, _X) -> ok. +create(_Serial, _X) -> ok. +delete(_Serial, _X) -> ok. policy_changed(_X1, _X2) -> ok. -add_binding(_Tx, _X, _B) -> ok. -remove_bindings(_Tx, _X, _Bs) -> ok. +add_binding(_Serial, _X, _B) -> ok. +remove_bindings(_Serial, _X, _Bs) -> ok. assert_args_equivalence(X, Args) -> rabbit_exchange:assert_args_equivalence(X, Args). diff --git a/deps/rabbit/src/rabbit_exchange_type_invalid.erl b/deps/rabbit/src/rabbit_exchange_type_invalid.erl index e7816c967b76..e06a0684bcbb 100644 --- a/deps/rabbit/src/rabbit_exchange_type_invalid.erl +++ b/deps/rabbit/src/rabbit_exchange_type_invalid.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_exchange_type_invalid). @@ -39,10 +39,10 @@ route(#exchange{name = Name, type = Type}, _, _Opts) -> validate(_X) -> ok. validate_binding(_X, _B) -> ok. -create(_Tx, _X) -> ok. -delete(_Tx, _X) -> ok. +create(_Serial, _X) -> ok. +delete(_Serial, _X) -> ok. policy_changed(_X1, _X2) -> ok. -add_binding(_Tx, _X, _B) -> ok. -remove_bindings(_Tx, _X, _Bs) -> ok. +add_binding(_Serial, _X, _B) -> ok. +remove_bindings(_Serial, _X, _Bs) -> ok. assert_args_equivalence(X, Args) -> rabbit_exchange:assert_args_equivalence(X, Args). diff --git a/deps/rabbit/src/rabbit_exchange_type_local_random.erl b/deps/rabbit/src/rabbit_exchange_type_local_random.erl new file mode 100644 index 000000000000..db9b37475fdc --- /dev/null +++ b/deps/rabbit/src/rabbit_exchange_type_local_random.erl @@ -0,0 +1,116 @@ +%% This Source Code Form is subject to the terms of the Mozilla Public +%% License, v. 2.0. If a copy of the MPL was not distributed with this +%% file, You can obtain one at https://mozilla.org/MPL/2.0/. +%% +%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% + +-module(rabbit_exchange_type_local_random). +-behaviour(rabbit_exchange_type). +-include_lib("rabbit_common/include/rabbit.hrl"). + +-rabbit_feature_flag({?MODULE, + #{desc => "Local random exchange", + stability => stable + }}). + +-rabbit_boot_step({?MODULE, + [{description, "exchange type local random"}, + {mfa, {rabbit_registry, register, + [exchange, <<"x-local-random">>, ?MODULE]}}, + {requires, rabbit_registry}, + {enables, kernel_ready} + ]}). + +-export([add_binding/3, + assert_args_equivalence/2, + create/2, + delete/2, + policy_changed/2, + description/0, + recover/2, + remove_bindings/3, + validate_binding/2, + route/3, + serialise_events/0, + validate/1, + info/1, + info/2 + ]). + +description() -> + [{name, <<"x-local-random">>}, + {description, <<"Picks one random local binding (queue) to route via (to).">>}]. + +route(#exchange{name = Name}, _Msg, _Opts) -> + Matches = rabbit_router:match_routing_key(Name, [<<>>]), + case lists:filter(fun filter_local_queue/1, Matches) of + [] -> + []; + [_] = One -> + One; + LocalMatches -> + Rand = rand:uniform(length(LocalMatches)), + [lists:nth(Rand, LocalMatches)] + end. + +info(_X) -> []. +info(_X, _) -> []. +serialise_events() -> false. +validate(_X) -> + case rabbit_feature_flags:is_enabled(?MODULE) of + true -> + ok; + false -> + rabbit_misc:amqp_error( + precondition_failed, + "x-local-random exchange feature not available", [], + 'exchange.declare') + end. + +create(_Serial, _X) -> ok. +recover(_X, _Bs) -> ok. +delete(_Serial, _X) -> ok. +policy_changed(_X1, _X2) -> ok. +add_binding(_Serial, _X, _B) -> ok. +remove_bindings(_Serial, _X, _Bs) -> ok. + +validate_binding(_X, #binding{destination = Dest, + key = <<>>}) -> + case rabbit_amqqueue:lookup(Dest) of + {ok, Q} -> + case amqqueue:get_type(Q) of + rabbit_classic_queue -> + ok; + Type -> + {error, {binding_invalid, + "Queue type ~ts not valid for this exchange type", + [Type]}} + end; + _ -> + {error, {binding_invalid, + "Destination not found", + []}} + end; +validate_binding(_X, #binding{key = BKey}) -> + {error, {binding_invalid, + "Non empty binding '~s' key not permitted", + [BKey]}}. + +assert_args_equivalence(X, Args) -> + rabbit_exchange:assert_args_equivalence(X, Args). + +filter_local_queue(QName) -> + %% TODO: introduce lookup function that _only_ gets the pid + case rabbit_amqqueue:lookup(QName) of + {ok, Q} -> + case amqqueue:get_pid(Q) of + Pid when is_pid(Pid) andalso + node(Pid) =:= node() -> + is_process_alive(Pid); + _ -> + false + end; + _ -> + false + end. diff --git a/deps/rabbit/src/rabbit_exchange_type_topic.erl b/deps/rabbit/src/rabbit_exchange_type_topic.erl index 31ee2df797c8..3a82a68ca89d 100644 --- a/deps/rabbit/src/rabbit_exchange_type_topic.erl +++ b/deps/rabbit/src/rabbit_exchange_type_topic.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_exchange_type_topic). @@ -40,12 +40,12 @@ route(Exchange, Msg) -> route(Exchange, Msg, #{}). route(#exchange{name = XName}, Msg, Opts) -> - RKeys = mc:get_annotation(routing_keys, Msg), + RKeys = mc:routing_keys(Msg), lists:append([rabbit_db_topic_exchange:match(XName, RKey, Opts) || RKey <- RKeys]). validate(_X) -> ok. validate_binding(_X, _B) -> ok. -create(_Tx, _X) -> ok. +create(_Serial, _X) -> ok. delete(_Serial, #exchange{name = X}) -> rabbit_db_topic_exchange:delete_all_for_exchange(X). diff --git a/deps/rabbit/src/rabbit_feature_flags.erl b/deps/rabbit/src/rabbit_feature_flags.erl index 411b14655ea5..f635e50d2b5f 100644 --- a/deps/rabbit/src/rabbit_feature_flags.erl +++ b/deps/rabbit/src/rabbit_feature_flags.erl @@ -2,11 +2,11 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2018-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% %% @author The RabbitMQ team -%% @copyright 2018-2023 VMware, Inc. or its affiliates. +%% @copyright 2007-2024 Broadcom. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% %% @doc %% This module offers a framework to declare capabilities a RabbitMQ node @@ -122,6 +122,7 @@ read_enabled_feature_flags_list/0, copy_feature_states_after_reset/1, uses_callbacks/1, + reset/0, reset_registry/0]). -ifdef(TEST). @@ -227,7 +228,7 @@ %% It is called when a feature flag is being enabled. The function is %% responsible for this feature-flag-specific verification and data %% conversion. It returns `ok' if RabbitMQ can mark the feature flag as -%% enabled an continue with the next one, if any. `{error, Reason}' and +%% enabled and continue with the next one, if any. `{error, Reason}' and %% exceptions are an error and the feature flag will remain disabled. %% %% The migration function is called on all nodes which fulfill the following @@ -295,7 +296,8 @@ -type inventory() :: #{applications := [atom()], feature_flags := feature_flags(), - states := feature_states()}. + states := feature_states(), + written_to_disk := boolean()}. -type cluster_inventory() :: #{feature_flags := feature_flags(), applications_per_node := @@ -698,10 +700,9 @@ info(Options) when is_map(Options) -> get_state(FeatureName) when is_atom(FeatureName) -> IsEnabled = is_enabled(FeatureName), - IsSupported = is_supported(FeatureName), case IsEnabled of true -> enabled; - false -> case IsSupported of + false -> case is_supported(FeatureName) of true -> disabled; false -> unavailable end @@ -771,11 +772,17 @@ init() -> ok. -define(PT_TESTSUITE_ATTRS, {?MODULE, testsuite_feature_flags_attrs}). +%% We must lock while making updates to the above persistent_term in order to +%% make the updates atomic. Otherwise if two processes attempt to inject +%% different flags at the same time, they might race and a flag could be +%% mistakenly discarded. +-define(LOCK_TESTSUITE_ATTRS, {?PT_TESTSUITE_ATTRS, self()}). inject_test_feature_flags(FeatureFlags) -> inject_test_feature_flags(FeatureFlags, true). inject_test_feature_flags(FeatureFlags, InitReg) -> + true = global:set_lock(?LOCK_TESTSUITE_ATTRS, [node()]), ExistingAppAttrs = module_attributes_from_testsuite(), FeatureFlagsPerApp0 = lists:foldl( fun({Origin, Origin, FFlags}, Acc) -> @@ -809,13 +816,17 @@ inject_test_feature_flags(FeatureFlags, InitReg) -> [FeatureFlags, AttributesFromTestsuite], #{domain => ?RMQLOG_DOMAIN_FEAT_FLAGS}), ok = persistent_term:put(?PT_TESTSUITE_ATTRS, AttributesFromTestsuite), + true = global:del_lock(?LOCK_TESTSUITE_ATTRS, [node()]), case InitReg of true -> rabbit_ff_registry_factory:initialize_registry(); false -> ok end. clear_injected_test_feature_flags() -> - _ = persistent_term:erase(?PT_TESTSUITE_ATTRS), + _ = global:trans( + ?LOCK_TESTSUITE_ATTRS, + fun() -> persistent_term:erase(?PT_TESTSUITE_ATTRS) end, + [node()]), ok. module_attributes_from_testsuite() -> @@ -830,7 +841,7 @@ query_supported_feature_flags() -> ?LOG_DEBUG( "Feature flags: query feature flags in loaded applications", #{domain => ?RMQLOG_DOMAIN_FEAT_FLAGS}), - T0 = erlang:timestamp(), + T0 = erlang:monotonic_time(), %% We need to know the list of applications we scanned for feature flags. %% We can't derive that list of the returned feature flags because an %% application might be loaded/present and not have a specific feature @@ -842,11 +853,11 @@ query_supported_feature_flags() -> rabbit_deprecated_feature, ScannedApps), AttrsFromTestsuite = module_attributes_from_testsuite(), TestsuiteProviders = [App || {App, _, _} <- AttrsFromTestsuite], - T1 = erlang:timestamp(), + T1 = erlang:monotonic_time(), ?LOG_DEBUG( "Feature flags: time to find supported feature flags and deprecated " "features: ~tp us", - [timer:now_diff(T1, T0)], + [erlang:convert_time_unit(T1 - T0, native, microsecond)], #{domain => ?RMQLOG_DOMAIN_FEAT_FLAGS}), AllAttributes = AttrsPerAppA ++ AttrsPerAppB ++ AttrsFromTestsuite, AllApps = lists:usort(ScannedApps ++ TestsuiteProviders), @@ -1129,7 +1140,7 @@ do_write_enabled_feature_flags_list(EnabledFeatureNames) -> EnabledFeatureNames1 = lists:sort(EnabledFeatureNames), File = enabled_feature_flags_list_file(), - Content = io_lib:format("~tp.~n", [EnabledFeatureNames1]), + Content = io_lib:format("~1tp.~n", [EnabledFeatureNames1]), %% TODO: If we fail to write the the file, we should spawn a process %% to retry the operation. case file:write_file(File, Content) of @@ -1144,6 +1155,18 @@ do_write_enabled_feature_flags_list(EnabledFeatureNames) -> Error end. +-spec delete_enabled_feature_flags_list_file() -> Ret when + Ret :: ok | {error, file:posix() | badarg}. +%% @private + +delete_enabled_feature_flags_list_file() -> + File = enabled_feature_flags_list_file(), + case file:delete(File) of + ok -> ok; + {error, enoent} -> ok; + Error -> Error + end. + -spec enabled_feature_flags_list_file() -> file:filename(). %% @doc %% Returns the path to the file where the state of feature flags is stored. @@ -1322,6 +1345,14 @@ sync_feature_flags_with_cluster(Nodes, _NodeIsVirgin) -> refresh_feature_flags_after_app_load() -> rabbit_ff_controller:refresh_after_app_load(). +-spec reset() -> ok. +%% @doc Resets the feature flags registry and recorded states on disk. + +reset() -> + ok = reset_registry(), + ok = delete_enabled_feature_flags_list_file(), + ok. + -spec reset_registry() -> ok. %% @doc Resets the feature flags registry. %% diff --git a/deps/rabbit/src/rabbit_ff_controller.erl b/deps/rabbit/src/rabbit_ff_controller.erl index 53087faff97a..f82ed6000e16 100644 --- a/deps/rabbit/src/rabbit_ff_controller.erl +++ b/deps/rabbit/src/rabbit_ff_controller.erl @@ -2,11 +2,11 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2021-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% %% @author The RabbitMQ team -%% @copyright 2023 VMware, Inc. or its affiliates. +%% @copyright 2007-2024 Broadcom. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% %% @doc %% The feature flag controller is responsible for synchronization and managing @@ -49,7 +49,8 @@ running_nodes/0, collect_inventory_on_nodes/1, collect_inventory_on_nodes/2, mark_as_enabled_on_nodes/4, - wait_for_task_and_stop/0]). + wait_for_task_and_stop/0, + is_running/0]). %% gen_statem callbacks. -export([callback_mode/0, @@ -79,7 +80,13 @@ start_link() -> gen_statem:start_link({local, ?LOCAL_NAME}, ?MODULE, none, []). wait_for_task_and_stop() -> - gen_statem:stop(?LOCAL_NAME). + case erlang:whereis(rabbit_sup) of + undefined -> gen_statem:stop(?LOCAL_NAME); + _ -> rabbit_sup:stop_child(?LOCAL_NAME) + end. + +is_running() -> + is_pid(erlang:whereis(?LOCAL_NAME)). is_supported(FeatureNames) -> is_supported(FeatureNames, ?TIMEOUT). @@ -176,6 +183,10 @@ callback_mode() -> state_functions. init(_Args) -> + ?LOG_DEBUG( + "Feature flags: controller standing by", + #{domain => ?RMQLOG_DOMAIN_FEAT_FLAGS}), + process_flag(trap_exit, true), {ok, standing_by, none}. standing_by( @@ -295,6 +306,14 @@ terminate(_Reason, _State, _Data) -> ok. wait_for_in_flight_operations() -> + case global:whereis_name(?GLOBAL_NAME) of + Pid when Pid == self() -> + ok; + _ -> + wait_for_in_flight_operations0() + end. + +wait_for_in_flight_operations0() -> case register_globally() of yes -> %% We don't unregister so the controller holds the lock until it @@ -1065,17 +1084,20 @@ post_enable(#{states_per_node := _}, FeatureName, Nodes, Enabled) -> -ifndef(TEST). all_nodes() -> - lists:usort([node() | rabbit_nodes:list_members()]). + lists:sort(rabbit_nodes:list_members()). running_nodes() -> lists:usort([node() | rabbit_nodes:list_running()]). -else. all_nodes() -> - RemoteNodes = case rabbit_feature_flags:get_overriden_nodes() of - undefined -> rabbit_nodes:list_members(); - Nodes -> Nodes - end, - lists:usort([node() | RemoteNodes]). + AllNodes = case rabbit_feature_flags:get_overriden_nodes() of + undefined -> + rabbit_nodes:list_members(); + Nodes -> + ?assert(lists:member(node(), Nodes)), + Nodes + end, + lists:sort(AllNodes). running_nodes() -> RemoteNodes = case rabbit_feature_flags:get_overriden_running_nodes() of @@ -1106,6 +1128,8 @@ collect_inventory_on_nodes(Nodes, Timeout) -> Rets = inventory_rpcs(Nodes, Timeout), maps:fold( fun + (_Node, init_required, {ok, Inventory}) -> + {ok, Inventory}; (Node, #{feature_flags := FeatureFlags1, applications := ScannedApps, @@ -1130,12 +1154,20 @@ collect_inventory_on_nodes(Nodes, Timeout) -> end, {ok, Inventory0}, Rets). inventory_rpcs(Nodes, Timeout) -> - %% We must use `rabbit_ff_registry_wrapper' if it is available to avoid - %% any deadlock with the Code server. If it is unavailable, we fall back - %% to `rabbit_ff_registry'. + %% In the past, the feature flag registry in `rabbit_ff_registry' was + %% implemented with a module which was dynamically regenerated and + %% reloaded. To avoid deadlocks with the Code server we need to first call + %% into `rabbit_ff_registry_wrapper' if it is available. If it is + %% unavailable, we fall back to `rabbit_ff_registry'. %% %% See commit aacfa1978e24bcacd8de7d06a7c3c5d9d8bd098e and pull request %% #8155. + %% + %% In the long run, when compatibility with nodes that use module creation + %% for `rabbit_ff_registry' is no longer required, this block can be + %% replaced with a call of: + %% + %% rpc_calls(Nodes, rabbit_ff_registry, inventory, []). Rets0 = rpc_calls( Nodes, rabbit_ff_registry_wrapper, inventory, [], Timeout), @@ -1270,20 +1302,29 @@ list_feature_flags_enabled_somewhere( FeatureName :: rabbit_feature_flags:feature_name(). list_deprecated_features_that_cant_be_denied( - #{states_per_node := StatesPerNode}) -> + #{feature_flags := FeatureFlags, + states_per_node := StatesPerNode}) -> ThisNode = node(), States = maps:get(ThisNode, StatesPerNode), maps:fold( fun (FeatureName, true, Acc) -> - #{ThisNode := IsUsed} = run_callback( - [ThisNode], FeatureName, - is_feature_used, #{}, infinity), - case IsUsed of - true -> [FeatureName | Acc]; - false -> Acc; - _Error -> Acc + FeatureProps = maps:get(FeatureName, FeatureFlags), + Stability = rabbit_feature_flags:get_stability(FeatureProps), + case Stability of + required -> + Acc; + _ -> + #{ThisNode := IsUsed} = run_callback( + [ThisNode], FeatureName, + is_feature_used, #{}, + infinity), + case IsUsed of + true -> [FeatureName | Acc]; + false -> Acc; + _Error -> [FeatureName | Acc] + end end; (_FeatureName, false, Acc) -> Acc @@ -1349,32 +1390,9 @@ this_node_first(Nodes) -> Ret :: term() | {error, term()}. rpc_call(Node, Module, Function, Args, Timeout) -> - SleepBetweenRetries = 5000, - T0 = erlang:monotonic_time(), try erpc:call(Node, Module, Function, Args, Timeout) catch - %% In case of `noconnection' with `Timeout'=infinity, we don't retry - %% at all. This is because the infinity "timeout" is used to run - %% callbacks on remote node and they can last an indefinite amount of - %% time, for instance, if there is a lot of data to migrate. - error:{erpc, noconnection} = Reason - when is_integer(Timeout) andalso Timeout > SleepBetweenRetries -> - ?LOG_WARNING( - "Feature flags: no connection to node `~ts`; " - "retrying in ~b milliseconds", - [Node, SleepBetweenRetries], - #{domain => ?RMQLOG_DOMAIN_FEAT_FLAGS}), - timer:sleep(SleepBetweenRetries), - T1 = erlang:monotonic_time(), - TDiff = erlang:convert_time_unit(T1 - T0, native, millisecond), - Remaining = Timeout - TDiff, - Timeout1 = erlang:max(Remaining, 0), - case Timeout1 of - 0 -> {error, Reason}; - _ -> rpc_call(Node, Module, Function, Args, Timeout1) - end; - Class:Reason:Stacktrace -> Message0 = erl_error:format_exception(Class, Reason, Stacktrace), Message1 = lists:flatten(Message0), diff --git a/deps/rabbit/src/rabbit_ff_extra.erl b/deps/rabbit/src/rabbit_ff_extra.erl index 37a59b2c8871..9eba72185936 100644 --- a/deps/rabbit/src/rabbit_ff_extra.erl +++ b/deps/rabbit/src/rabbit_ff_extra.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% @copyright 2018-2023 VMware, Inc. or its affiliates. +%% @copyright 2007-2024 Broadcom. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% %% @doc %% This module provides extra functions unused by the feature flags diff --git a/deps/rabbit/src/rabbit_ff_registry.erl b/deps/rabbit/src/rabbit_ff_registry.erl index 50f7ac2f13bb..864ff564dc64 100644 --- a/deps/rabbit/src/rabbit_ff_registry.erl +++ b/deps/rabbit/src/rabbit_ff_registry.erl @@ -2,11 +2,11 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2018-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% %% @author The RabbitMQ team -%% @copyright 2018-2023 VMware, Inc. or its affiliates. +%% @copyright 2007-2024 Broadcom. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% %% @doc %% This module exposes the API of the {@link rabbit_feature_flags} @@ -24,6 +24,9 @@ -include_lib("rabbit_common/include/logging.hrl"). +-include("src/rabbit_feature_flags.hrl"). +-include("src/rabbit_ff_registry.hrl"). + -export([get/1, list/1, states/0, @@ -33,39 +36,6 @@ is_registry_written_to_disk/0, inventory/0]). --ifdef(TEST). --on_load(on_load/0). --endif. - -%% In this registry stub, most functions want to return `init_required' to let -%% {@link rabbit_ff_registry_wrapper} do the first time initialization. -%% -%% The inner case statement is here to convince Dialyzer that the function -%% could return values of type `__ReturnedIfUninitialized' or -%% `__NeverReturned'. -%% -%% If the function was only calling itself (`Call'), Dialyzer would consider -%% that it would never return. With the outer case, Dialyzer would conclude -%% that `__ReturnedIfUninitialized' is always returned and other values will -%% never be returned and there is no point in expecting them. -%% -%% In the end, `Call' is never executed because {@link -%% rabbit_ff_registry_wrapper} is responsible for calling the registry -%% function again after initialization. -%% -%% With both cases in place, it seems that we can convince Dialyzer that the -%% function returns values matching its spec. --define(convince_dialyzer(__Call, __ReturnedIfUninitialized, __NeverReturned), - case always_return_true() of - false -> - __Call; - true -> - case always_return_true() of - true -> __ReturnedIfUninitialized; - false -> __NeverReturned - end - end). - -spec get(FeatureName) -> Ret when FeatureName :: rabbit_feature_flags:feature_name(), Ret :: FeatureProps | init_required, @@ -82,26 +52,15 @@ %% @returns the properties of the specified feature flag. get(FeatureName) -> - ?convince_dialyzer( - ?MODULE:get(FeatureName), - init_required, - lists:nth( - rand:uniform(2), - [#{name => feature_flag, - provided_by => rabbit}, - #{name => deprecated_feature, - deprecation_phase => - lists:nth( - 4, - [permitted_by_default, - denied_by_default, - disconnected, - removed]), - messages => #{}, - provided_by => rabbit}])). + case inventory() of + init_required -> + init_required; + #{feature_flags := FeatureFlags} -> + maps:get(FeatureName, FeatureFlags, undefined) + end. -spec list(Which) -> Ret when - Which :: all | enabled | disabled, + Which :: all | enabled | disabled | state_changing, Ret :: FeatureFlags | init_required, FeatureFlags :: rabbit_feature_flags:feature_flags(). %% @doc @@ -114,8 +73,49 @@ get(FeatureName) -> %% `disabled'. %% @returns A map of selected feature flags. -list(Which) -> - ?convince_dialyzer(?MODULE:list(Which), init_required, #{}). +list(all) -> + case inventory() of + init_required -> + init_required; + #{feature_flags := AllFeatureFlags} -> + AllFeatureFlags + end; +list(enabled) -> + case inventory() of + init_required -> + init_required; + #{feature_flags := AllFeatureFlags, states := FeatureStates} -> + maps:filter( + fun(FeatureName, _FeatureProps) -> + maps:is_key(FeatureName, FeatureStates) + andalso + maps:get(FeatureName, FeatureStates) =:= true + end, AllFeatureFlags) + end; +list(disabled) -> + case inventory() of + init_required -> + init_required; + #{feature_flags := AllFeatureFlags, states := FeatureStates} -> + maps:filter( + fun(FeatureName, _FeatureProps) -> + not maps:is_key(FeatureName, FeatureStates) + orelse + maps:get(FeatureName, FeatureStates) =:= false + end, AllFeatureFlags) + end; +list(state_changing) -> + case inventory() of + init_required -> + init_required; + #{feature_flags := AllFeatureFlags, states := FeatureStates} -> + maps:filter( + fun(FeatureName, _FeatureProps) -> + maps:is_key(FeatureName, FeatureStates) + andalso + maps:get(FeatureName, FeatureStates) =:= state_changing + end, AllFeatureFlags) + end. -spec states() -> Ret when Ret :: FeatureStates | init_required, @@ -129,7 +129,12 @@ list(Which) -> %% @returns A map of feature flag states. states() -> - ?convince_dialyzer(?MODULE:states(), init_required, #{}). + case inventory() of + init_required -> + init_required; + #{states := FeatureStates} -> + FeatureStates + end. -spec is_supported(FeatureName) -> Ret when FeatureName :: rabbit_feature_flags:feature_name(), @@ -146,7 +151,12 @@ states() -> %% otherwise. is_supported(FeatureName) -> - ?convince_dialyzer(?MODULE:is_supported(FeatureName), init_required, true). + case inventory() of + init_required -> + init_required; + #{feature_flags := FeatureFlags} -> + maps:is_key(FeatureName, FeatureFlags) + end. -spec is_enabled(FeatureName) -> Ret when FeatureName :: rabbit_feature_flags:feature_name(), @@ -163,7 +173,12 @@ is_supported(FeatureName) -> %% its state is transient, or `false' otherwise. is_enabled(FeatureName) -> - ?convince_dialyzer(?MODULE:is_enabled(FeatureName), init_required, true). + case inventory() of + init_required -> + init_required; + #{states := FeatureStates} -> + maps:get(FeatureName, FeatureStates, false) + end. -spec is_registry_initialized() -> IsInitialized when IsInitialized :: boolean(). @@ -178,7 +193,7 @@ is_enabled(FeatureName) -> %% source code. is_registry_initialized() -> - always_return_false(). + inventory() =/= init_required. -spec is_registry_written_to_disk() -> WrittenToDisk when WrittenToDisk :: boolean(). @@ -196,45 +211,16 @@ is_registry_initialized() -> %% flags state on restart. is_registry_written_to_disk() -> - always_return_true(). + case inventory() of + init_required -> + false; + #{written_to_disk := IsWrittenToDisk} -> + IsWrittenToDisk + end. -spec inventory() -> Ret when Ret :: Inventory | init_required, Inventory :: rabbit_feature_flags:inventory(). inventory() -> - Inventory = #{applications => [], - feature_flags => #{}, - states => #{}}, - ?convince_dialyzer(?MODULE:inventory(), init_required, Inventory). - -always_return_true() -> - %% This function is here to trick Dialyzer. We want some functions - %% in this initial on-disk registry to always return `true` or - %% `false`. However the generated registry will return actual - %% booleans. The `-spec()` correctly advertises a return type of - %% `boolean()`. But in the meantime, Dialyzer only knows about this - %% copy which, without the trick below, would always return either - %% `true` (e.g. in is_registry_written_to_disk/0) or `false` (e.g. - %% is_registry_initialized/0). This obviously causes some warnings - %% where the registry functions are used: Dialyzer believes that - %% e.g. matching the return value of is_registry_initialized/0 - %% against `true` will never succeed. - %% - %% That's why this function makes a call which we know the result, - %% but not Dialyzer, to "create" that hard-coded `true` return - %% value. - erlang:get({?MODULE, always_undefined}) =:= undefined. - -always_return_false() -> - not always_return_true(). - --ifdef(TEST). -on_load() -> - _ = (catch ?LOG_DEBUG( - "Feature flags: Loading initial (uninitialized) registry " - "module (~tp)", - [self()], - #{domain => ?RMQLOG_DOMAIN_FEAT_FLAGS})), - ok. --endif. + persistent_term:get(?PT_INVENTORY_KEY, init_required). diff --git a/deps/rabbit/src/rabbit_ff_registry.hrl b/deps/rabbit/src/rabbit_ff_registry.hrl new file mode 100644 index 000000000000..7306009f5b8f --- /dev/null +++ b/deps/rabbit/src/rabbit_ff_registry.hrl @@ -0,0 +1,8 @@ +-define(PT_INVENTORY_KEY, rabbit_ff_registry). +%% The `persistent_term' key used to hold the feature flag inventory. +%% +%% `persistent_term:get(?PT_INVENTORY_KEY)' should return a value with the type +%% `rabbit_feature_flags:inventory()' if the registry is initialized. +%% +%% Rather than fetching this key directly, use the functions in the +%% `rabbit_ff_registry' module. diff --git a/deps/rabbit/src/rabbit_ff_registry_factory.erl b/deps/rabbit/src/rabbit_ff_registry_factory.erl index 01b47644acfc..0d91a7b64955 100644 --- a/deps/rabbit/src/rabbit_ff_registry_factory.erl +++ b/deps/rabbit/src/rabbit_ff_registry_factory.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2018-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_ff_registry_factory). @@ -13,6 +13,7 @@ -include_lib("rabbit_common/include/logging.hrl"). -include("src/rabbit_feature_flags.hrl"). +-include("src/rabbit_ff_registry.hrl"). -export([initialize_registry/0, initialize_registry/1, @@ -22,15 +23,12 @@ reset_registry/0]). -ifdef(TEST). --export([registry_loading_lock/0, - purge_old_registry/1]). +-export([registry_loading_lock/0]). -endif. -define(FF_STATE_CHANGE_LOCK, {feature_flags_state_change, self()}). -define(FF_REGISTRY_LOADING_LOCK, {feature_flags_registry_loading, self()}). --type registry_vsn() :: term(). - -spec acquire_state_change_lock() -> ok. acquire_state_change_lock() -> @@ -171,19 +169,23 @@ initialize_registry(NewSupportedFeatureFlags, NewFeatureStates, WrittenToDisk) -> try - Ret = maybe_initialize_registry(NewSupportedFeatureFlags, - NewFeatureStates, - WrittenToDisk), - case Ret of - ok -> ok; - restart -> initialize_registry(NewSupportedFeatureFlags, - NewFeatureStates, - WrittenToDisk); - Error1 -> Error1 - end + true = global:set_lock(?FF_REGISTRY_LOADING_LOCK, [node()]), + ?LOG_DEBUG( + "Feature flags: acquired lock before initializing registry (~tp)", + [self()], + #{domain => ?RMQLOG_DOMAIN_FEAT_FLAGS}), + ok = maybe_initialize_registry(NewSupportedFeatureFlags, + NewFeatureStates, + WrittenToDisk) catch throw:{error, _} = Error2 -> Error2 + after + ?LOG_DEBUG( + "Feature flags: releasing lock after initializing registry (~tp)", + [self()], + #{domain => ?RMQLOG_DOMAIN_FEAT_FLAGS}), + true = global:del_lock(?FF_REGISTRY_LOADING_LOCK, [node()]) end. -spec maybe_initialize_registry(FeatureFlags, @@ -192,17 +194,11 @@ initialize_registry(NewSupportedFeatureFlags, FeatureFlags :: rabbit_feature_flags:feature_flags(), FeatureStates :: rabbit_feature_flags:feature_states(), WrittenToDisk :: boolean(), - Ret :: ok | restart | {error, any()} | no_return(). + Ret :: ok | no_return(). maybe_initialize_registry(NewSupportedFeatureFlags, NewFeatureStates, WrittenToDisk) -> - %% We save the version of the current registry before computing - %% the new one. This is used when we do the actual reload: if the - %% current registry was reloaded in the meantime, we need to restart - %% the computation to make sure we don't loose data. - RegistryVsn = registry_vsn(), - %% We take the feature flags already registered. RegistryInitialized = rabbit_ff_registry:is_registry_initialized(), KnownFeatureFlags1 = case RegistryInitialized of @@ -328,12 +324,11 @@ maybe_initialize_registry(NewSupportedFeatureFlags, %% known by this node or not, and decide if a missing feature flag is %% unknown or unsupported. Inventory = #{applications => ScannedApps, - feature_flags => KnownFeatureFlags2, - states => FeatureStates}, + feature_flags => AllFeatureFlags, + states => FeatureStates, + written_to_disk => WrittenToDisk}, - Proceed = does_registry_need_refresh(AllFeatureFlags, - FeatureStates, - WrittenToDisk), + Proceed = does_registry_need_refresh(Inventory), case Proceed of true -> @@ -341,16 +336,12 @@ maybe_initialize_registry(NewSupportedFeatureFlags, "Feature flags: (re)initialize registry (~tp)", [self()], #{domain => ?RMQLOG_DOMAIN_FEAT_FLAGS}), - T0 = erlang:timestamp(), - Ret = do_initialize_registry(RegistryVsn, - AllFeatureFlags, - FeatureStates, - Inventory, - WrittenToDisk), - T1 = erlang:timestamp(), + T0 = erlang:monotonic_time(), + Ret = do_initialize_registry(Inventory), + T1 = erlang:monotonic_time(), ?LOG_DEBUG( "Feature flags: time to regen registry: ~tp us", - [timer:now_diff(T1, T0)], + [erlang:convert_time_unit(T1 - T0, native, microsecond)], #{domain => ?RMQLOG_DOMAIN_FEAT_FLAGS}), Ret; false -> @@ -360,27 +351,20 @@ maybe_initialize_registry(NewSupportedFeatureFlags, ok end. --spec does_registry_need_refresh(FeatureFlags, - FeatureStates, - WrittenToDisk) -> Ret when - FeatureFlags :: rabbit_feature_flags:feature_flags(), - FeatureStates :: rabbit_feature_flags:feature_states(), - WrittenToDisk :: boolean(), +-spec does_registry_need_refresh(Inventory) -> Ret when + Inventory :: rabbit_feature_flags:inventory(), Ret :: boolean(). -does_registry_need_refresh(AllFeatureFlags, - FeatureStates, - WrittenToDisk) -> - case rabbit_ff_registry:is_registry_initialized() of - true -> +does_registry_need_refresh(#{feature_flags := AllFeatureFlags, + states := FeatureStates, + written_to_disk := WrittenToDisk}) -> + case rabbit_ff_registry:inventory() of + #{feature_flags := CurrentAllFeatureFlags, + states := CurrentFeatureStates, + written_to_disk := CurrentWrittenToDisk} -> %% Before proceeding with the actual %% (re)initialization, let's see if there are any %% changes. - CurrentAllFeatureFlags = rabbit_ff_registry_wrapper:list(all), - CurrentFeatureStates = rabbit_ff_registry_wrapper:states(), - CurrentWrittenToDisk = - rabbit_ff_registry:is_registry_written_to_disk(), - if AllFeatureFlags =/= CurrentAllFeatureFlags -> ?LOG_DEBUG( @@ -406,7 +390,7 @@ does_registry_need_refresh(AllFeatureFlags, #{domain => ?RMQLOG_DOMAIN_FEAT_FLAGS}), false end; - false -> + init_required -> ?LOG_DEBUG( "Feature flags: registry refresh needed: " "yes, first-time initialization", @@ -447,24 +431,15 @@ enable_deprecated_features_required_by_enabled_feature_flags( FeatureFlags, FeatureStates1) end. --spec do_initialize_registry(Vsn, - FeatureFlags, - FeatureStates, - Inventory, - WrittenToDisk) -> Ret when - Vsn :: registry_vsn(), - FeatureFlags :: rabbit_feature_flags:feature_flags(), - FeatureStates :: rabbit_feature_flags:feature_states(), +-spec do_initialize_registry(Inventory) -> Ret when Inventory :: rabbit_feature_flags:inventory(), - WrittenToDisk :: boolean(), - Ret :: ok | restart | {error, any()} | no_return(). + Ret :: ok. %% @private -do_initialize_registry(RegistryVsn, - AllFeatureFlags, - FeatureStates, - #{applications := ScannedApps} = Inventory, - WrittenToDisk) -> +do_initialize_registry(#{feature_flags := AllFeatureFlags, + states := FeatureStates, + applications := ScannedApps, + written_to_disk := WrittenToDisk} = Inventory) -> %% We log the state of those feature flags. ?LOG_DEBUG( lists:flatten( @@ -501,338 +476,12 @@ do_initialize_registry(RegistryVsn, #{domain => ?RMQLOG_DOMAIN_FEAT_FLAGS} ), - %% We request the registry to be regenerated and reloaded with the - %% new state. - regen_registry_mod(RegistryVsn, - AllFeatureFlags, - FeatureStates, - Inventory, - WrittenToDisk). - --spec regen_registry_mod( - RegistryVsn, AllFeatureFlags, FeatureStates, Inventory, - WrittenToDisk) -> Ret when - RegistryVsn :: registry_vsn(), - AllFeatureFlags :: rabbit_feature_flags:feature_flags(), - FeatureStates :: rabbit_feature_flags:feature_states(), - Inventory :: rabbit_feature_flags:inventory(), - WrittenToDisk :: boolean(), - Ret :: ok | restart | {error, any()} | no_return(). -%% @private - -regen_registry_mod(RegistryVsn, - AllFeatureFlags, - FeatureStates, - Inventory, - WrittenToDisk) -> - %% Here, we recreate the source code of the `rabbit_ff_registry` - %% module from scratch. - %% - %% IMPORTANT: We want both modules to have the exact same public - %% API in order to simplify the life of developers and their tools - %% (Dialyzer, completion, and so on). - - %% -module(rabbit_ff_registry). - ModuleAttr = erl_syntax:attribute( - erl_syntax:atom(module), - [erl_syntax:atom(rabbit_ff_registry)]), - ModuleForm = erl_syntax:revert(ModuleAttr), - %% -export([...]). - ExportAttr = erl_syntax:attribute( - erl_syntax:atom(export), - [erl_syntax:list( - [erl_syntax:arity_qualifier( - erl_syntax:atom(F), - erl_syntax:integer(A)) - || {F, A} <- [{get, 1}, - {list, 1}, - {states, 0}, - {is_supported, 1}, - {is_enabled, 1}, - {is_registry_initialized, 0}, - {is_registry_written_to_disk, 0}, - {inventory, 0}]] - ) - ] - ), - ExportForm = erl_syntax:revert(ExportAttr), - %% get(_) -> ... - GetClauses = [erl_syntax:clause( - [erl_syntax:atom(FeatureName)], - [], - [erl_syntax:abstract(maps:get(FeatureName, - AllFeatureFlags))]) - || FeatureName <- maps:keys(AllFeatureFlags) - ], - GetUnknownClause = erl_syntax:clause( - [erl_syntax:variable("_")], - [], - [erl_syntax:atom(undefined)]), - GetFun = erl_syntax:function( - erl_syntax:atom(get), - GetClauses ++ [GetUnknownClause]), - GetFunForm = erl_syntax:revert(GetFun), - %% list(_) -> ... - ListAllBody = erl_syntax:abstract(AllFeatureFlags), - ListAllClause = erl_syntax:clause([erl_syntax:atom(all)], - [], - [ListAllBody]), - EnabledFeatureFlags = maps:filter( - fun(FeatureName, _) -> - maps:is_key(FeatureName, - FeatureStates) - andalso - maps:get(FeatureName, FeatureStates) - =:= - true - end, AllFeatureFlags), - ListEnabledBody = erl_syntax:abstract(EnabledFeatureFlags), - ListEnabledClause = erl_syntax:clause( - [erl_syntax:atom(enabled)], - [], - [ListEnabledBody]), - DisabledFeatureFlags = maps:filter( - fun(FeatureName, _) -> - not maps:is_key(FeatureName, - FeatureStates) - orelse - maps:get(FeatureName, FeatureStates) - =:= - false - end, AllFeatureFlags), - ListDisabledBody = erl_syntax:abstract(DisabledFeatureFlags), - ListDisabledClause = erl_syntax:clause( - [erl_syntax:atom(disabled)], - [], - [ListDisabledBody]), - StateChangingFeatureFlags = maps:filter( - fun(FeatureName, _) -> - maps:is_key(FeatureName, - FeatureStates) - andalso - maps:get(FeatureName, FeatureStates) - =:= - state_changing - end, AllFeatureFlags), - ListStateChangingBody = erl_syntax:abstract(StateChangingFeatureFlags), - ListStateChangingClause = erl_syntax:clause( - [erl_syntax:atom(state_changing)], - [], - [ListStateChangingBody]), - ListFun = erl_syntax:function( - erl_syntax:atom(list), - [ListAllClause, - ListEnabledClause, - ListDisabledClause, - ListStateChangingClause]), - ListFunForm = erl_syntax:revert(ListFun), - %% states() -> ... - StatesBody = erl_syntax:abstract(FeatureStates), - StatesClause = erl_syntax:clause([], [], [StatesBody]), - StatesFun = erl_syntax:function( - erl_syntax:atom(states), - [StatesClause]), - StatesFunForm = erl_syntax:revert(StatesFun), - %% is_supported(_) -> ... - IsSupportedClauses = [erl_syntax:clause( - [erl_syntax:atom(FeatureName)], - [], - [erl_syntax:atom(true)]) - || FeatureName <- maps:keys(AllFeatureFlags) - ], - NotSupportedClause = erl_syntax:clause( - [erl_syntax:variable("_")], - [], - [erl_syntax:atom(false)]), - IsSupportedFun = erl_syntax:function( - erl_syntax:atom(is_supported), - IsSupportedClauses ++ [NotSupportedClause]), - IsSupportedFunForm = erl_syntax:revert(IsSupportedFun), - %% is_enabled(_) -> ... - IsEnabledClauses = [erl_syntax:clause( - [erl_syntax:atom(FeatureName)], - [], - [case maps:is_key(FeatureName, FeatureStates) of - true -> - erl_syntax:atom( - maps:get(FeatureName, FeatureStates)); - false -> - erl_syntax:atom(false) - end]) - || FeatureName <- maps:keys(AllFeatureFlags) - ], - NotEnabledClause = erl_syntax:clause( - [erl_syntax:variable("_")], - [], - [erl_syntax:atom(false)]), - IsEnabledFun = erl_syntax:function( - erl_syntax:atom(is_enabled), - IsEnabledClauses ++ [NotEnabledClause]), - IsEnabledFunForm = erl_syntax:revert(IsEnabledFun), - %% is_registry_initialized() -> ... - IsInitializedClauses = [erl_syntax:clause( - [], - [], - [erl_syntax:atom(true)]) - ], - IsInitializedFun = erl_syntax:function( - erl_syntax:atom(is_registry_initialized), - IsInitializedClauses), - IsInitializedFunForm = erl_syntax:revert(IsInitializedFun), - %% is_registry_written_to_disk() -> ... - IsWrittenToDiskClauses = [erl_syntax:clause( - [], - [], - [erl_syntax:atom(WrittenToDisk)]) - ], - IsWrittenToDiskFun = erl_syntax:function( - erl_syntax:atom(is_registry_written_to_disk), - IsWrittenToDiskClauses), - IsWrittenToDiskFunForm = erl_syntax:revert(IsWrittenToDiskFun), - %% inventory() -> ... - InventoryBody = erl_syntax:abstract(Inventory), - InventoryClause = erl_syntax:clause([], [], [InventoryBody]), - InventoryFun = erl_syntax:function( - erl_syntax:atom(inventory), - [InventoryClause]), - InventoryFunForm = erl_syntax:revert(InventoryFun), - %% Compilation! - Forms = [ModuleForm, - ExportForm, - GetFunForm, - ListFunForm, - StatesFunForm, - IsSupportedFunForm, - IsEnabledFunForm, - IsInitializedFunForm, - IsWrittenToDiskFunForm, - InventoryFunForm], - maybe_log_registry_source_code(Forms), - CompileOpts = [return_errors, - return_warnings], - case compile:forms(Forms, CompileOpts) of - {ok, Mod, Bin, _} -> - load_registry_mod(RegistryVsn, Mod, Bin); - {error, Errors, Warnings} -> - ?LOG_ERROR( - "Feature flags: registry compilation failure:~n" - "Errors: ~tp~n" - "Warnings: ~tp", - [Errors, Warnings], - #{domain => ?RMQLOG_DOMAIN_FEAT_FLAGS}), - {error, {compilation_failure, Errors, Warnings}}; - error -> - ?LOG_ERROR( - "Feature flags: registry compilation failure", - [], - #{domain => ?RMQLOG_DOMAIN_FEAT_FLAGS}), - {error, {compilation_failure, [], []}} - end. - -maybe_log_registry_source_code(Forms) -> - case rabbit_prelaunch:get_context() of - #{log_feature_flags_registry := true} -> - ?LOG_DEBUG( - "== FEATURE FLAGS REGISTRY ==~n" - "~ts~n" - "== END ==~n", - [erl_prettypr:format(erl_syntax:form_list(Forms))], - #{domain => ?RMQLOG_DOMAIN_FEAT_FLAGS}), - ok; - _ -> - ok - end. + persistent_term:put(?PT_INVENTORY_KEY, Inventory). -ifdef(TEST). registry_loading_lock() -> ?FF_REGISTRY_LOADING_LOCK. -endif. --spec load_registry_mod(Vsn, Mod, Bin) -> Ret when - Vsn :: registry_vsn(), - Mod :: module(), - Bin :: binary(), - Ret :: ok | restart | no_return(). -%% @private - -load_registry_mod(RegistryVsn, Mod, Bin) -> - ?LOG_DEBUG( - "Feature flags: registry module ready, loading it (~tp)...", - [self()], - #{domain => ?RMQLOG_DOMAIN_FEAT_FLAGS}), - FakeFilename = "Compiled and loaded by " ?MODULE_STRING, - %% Time to load the new registry, replacing the old one. We use a - %% lock here to synchronize concurrent reloads. - global:set_lock(?FF_REGISTRY_LOADING_LOCK, [node()]), - ?LOG_DEBUG( - "Feature flags: acquired lock before reloading registry module (~tp)", - [self()], - #{domain => ?RMQLOG_DOMAIN_FEAT_FLAGS}), - %% We want to make sure that the old registry (not the one being - %% currently in use) is purged by the code server. It means no - %% process lingers on that old code. - %% - %% We use code:soft_purge() for that (meaning no process is killed) - %% and we wait in an infinite loop for that to succeed. - ok = purge_old_registry(Mod), - %% Now we can replace the currently loaded registry by the new one. - %% The code server takes care of marking the current registry as old - %% and load the new module in an atomic operation. - %% - %% Therefore there is no chance of a window where there is no - %% registry module available, causing the one on disk to be - %% reloaded. - Ret = case registry_vsn() of - RegistryVsn -> code:load_binary(Mod, FakeFilename, Bin); - OtherVsn -> {error, {restart, RegistryVsn, OtherVsn}} - end, - ?LOG_DEBUG( - "Feature flags: releasing lock after reloading registry module (~tp)", - [self()], - #{domain => ?RMQLOG_DOMAIN_FEAT_FLAGS}), - global:del_lock(?FF_REGISTRY_LOADING_LOCK, [node()]), - case Ret of - {module, _} -> - ?LOG_DEBUG( - "Feature flags: registry module loaded (vsn: ~tp -> ~tp)", - [RegistryVsn, registry_vsn()], - #{domain => ?RMQLOG_DOMAIN_FEAT_FLAGS}), - ok; - {error, {restart, Expected, Current}} -> - ?LOG_DEBUG( - "Feature flags: another registry module was loaded in the " - "meantime (expected old vsn: ~tp, current vsn: ~tp); " - "restarting the regen", - [Expected, Current], - #{domain => ?RMQLOG_DOMAIN_FEAT_FLAGS}), - restart; - {error, Reason} -> - ?LOG_ERROR( - "Feature flags: failed to load registry module: ~tp", - [Reason], - #{domain => ?RMQLOG_DOMAIN_FEAT_FLAGS}), - throw({feature_flag_registry_reload_failure, Reason}) - end. - --spec registry_vsn() -> Vsn when - Vsn :: registry_vsn(). -%% @private - -registry_vsn() -> - Attrs = rabbit_ff_registry:module_info(attributes), - proplists:get_value(vsn, Attrs, undefined). - -purge_old_registry(Mod) -> - case code:is_loaded(Mod) of - {file, _} -> do_purge_old_registry(Mod); - false -> ok - end. - -do_purge_old_registry(Mod) -> - case code:soft_purge(Mod) of - true -> ok; - false -> do_purge_old_registry(Mod) - end. - -spec reset_registry() -> ok. reset_registry() -> @@ -840,7 +489,6 @@ reset_registry() -> "Feature flags: resetting loaded registry", [], #{domain => ?RMQLOG_DOMAIN_FEAT_FLAGS}), - _ = code:purge(rabbit_ff_registry), - _ = code:delete(rabbit_ff_registry), + persistent_term:erase(?PT_INVENTORY_KEY), ?assertNot(rabbit_ff_registry:is_registry_initialized()), ok. diff --git a/deps/rabbit/src/rabbit_ff_registry_wrapper.erl b/deps/rabbit/src/rabbit_ff_registry_wrapper.erl index 5d8555e84a0f..beef32f657cf 100644 --- a/deps/rabbit/src/rabbit_ff_registry_wrapper.erl +++ b/deps/rabbit/src/rabbit_ff_registry_wrapper.erl @@ -2,11 +2,11 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% %% @author The RabbitMQ team -%% @copyright 2023 VMware, Inc. or its affiliates. +%% @copyright 2007-2024 Broadcom. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% %% @doc %% This module sits in front of {@link rabbit_ff_registry}. @@ -52,7 +52,7 @@ get(FeatureName) -> case rabbit_ff_registry:get(FeatureName) of init_required -> - _ = rabbit_ff_registry_factory:initialize_registry(), + initialize_registry(), get(FeatureName); Ret -> Ret @@ -74,7 +74,7 @@ get(FeatureName) -> list(Which) -> case rabbit_ff_registry:list(Which) of init_required -> - _ = rabbit_ff_registry_factory:initialize_registry(), + initialize_registry(), list(Which); Ret -> Ret @@ -93,7 +93,7 @@ list(Which) -> states() -> case rabbit_ff_registry:states() of init_required -> - _ = rabbit_ff_registry_factory:initialize_registry(), + initialize_registry(), states(); Ret -> Ret @@ -115,7 +115,7 @@ states() -> is_supported(FeatureName) -> case rabbit_ff_registry:is_supported(FeatureName) of init_required -> - _ = rabbit_ff_registry_factory:initialize_registry(), + initialize_registry(), is_supported(FeatureName); Ret -> Ret @@ -137,7 +137,7 @@ is_supported(FeatureName) -> is_enabled(FeatureName) -> case rabbit_ff_registry:is_enabled(FeatureName) of init_required -> - _ = rabbit_ff_registry_factory:initialize_registry(), + initialize_registry(), is_enabled(FeatureName); Ret -> Ret @@ -150,8 +150,22 @@ is_enabled(FeatureName) -> inventory() -> case rabbit_ff_registry:inventory() of init_required -> - _ = rabbit_ff_registry_factory:initialize_registry(), + initialize_registry(), inventory(); Ret -> Ret end. + +initialize_registry() -> + %% We acquire the feature flags registry reload lock here to make sure we + %% don't reload the registry in the middle of a cluster join. Indeed, the + %% registry is reset and feature flags states are copied from a remote + %% node. Therefore, there is a small window where the registry is not + %% loaded and the states on disk do not reflect the intent. + rabbit_ff_registry_factory:acquire_state_change_lock(), + try + _ = rabbit_ff_registry_factory:initialize_registry(), + ok + after + rabbit_ff_registry_factory:release_state_change_lock() + end. diff --git a/deps/rabbit/src/rabbit_fhc_helpers.erl b/deps/rabbit/src/rabbit_fhc_helpers.erl index 9bc2bf05efee..70014f2fc260 100644 --- a/deps/rabbit/src/rabbit_fhc_helpers.erl +++ b/deps/rabbit/src/rabbit_fhc_helpers.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_fhc_helpers). @@ -30,9 +30,8 @@ clear_queue_read_cache([]) -> ok; clear_queue_read_cache([Q | Rest]) when ?is_amqqueue(Q) -> MPid = amqqueue:get_pid(Q), - SPids = amqqueue:get_slave_pids(Q), %% Limit the action to the current node. - Pids = [P || P <- [MPid | SPids], node(P) =:= node()], + Pids = [P || P <- [MPid], node(P) =:= node()], %% This function is executed in the context of the backing queue %% process because the read buffer is stored in the process %% dictionary. diff --git a/deps/rabbit/src/rabbit_fifo.erl b/deps/rabbit/src/rabbit_fifo.erl index c170716087ae..0c981b543ad9 100644 --- a/deps/rabbit/src/rabbit_fifo.erl +++ b/deps/rabbit/src/rabbit_fifo.erl @@ -2,9 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. - -%% before post gc 1M msg: 203MB, after recovery + gc: 203MB +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. -module(rabbit_fifo). @@ -16,7 +14,28 @@ -dialyzer(no_improper_lists). -include("rabbit_fifo.hrl"). --include_lib("rabbit_common/include/rabbit.hrl"). + +-define(STATE, ?MODULE). + +-define(CONSUMER_PID(Pid), #consumer{cfg = #consumer_cfg{pid = Pid}}). +-define(CONSUMER_PRIORITY(P), #consumer{cfg = #consumer_cfg{priority = P}}). +-define(CONSUMER_TAG_PID(Tag, Pid), + #consumer{cfg = #consumer_cfg{tag = Tag, + pid = Pid}}). + +-ifdef(TEST). +-define(SIZE(Msg), + case mc:is(Msg) of + true -> + mc:size(Msg); + false when is_binary(Msg) -> + {0, byte_size(Msg)}; + false -> + {0, erts_debug:size(Msg)} + end). +-else. +-define(SIZE(Msg), mc:size(Msg)). +-endif. -export([ %% ra_machine callbacks @@ -32,7 +51,7 @@ which_module/1, %% aux init_aux/1, - handle_aux/6, + handle_aux/5, % queries query_messages_ready/1, query_messages_checked_out/1, @@ -49,12 +68,12 @@ query_peek/2, query_notify_decorators_info/1, usage/1, + is_v4/0, %% misc - dehydrate_state/1, - normalize/1, get_msg_header/1, get_header/2, + annotate_msg/2, get_msg/1, %% protocol helpers @@ -63,8 +82,10 @@ make_checkout/3, make_settle/2, make_return/2, + is_return/1, make_discard/2, make_credit/4, + make_modify/5, make_purge/0, make_purge_nodes/1, make_update_config/1, @@ -73,14 +94,23 @@ -ifdef(TEST). -export([update_header/4, - chunk_disk_msgs/3]). + chunk_disk_msgs/3, + smallest_raft_index/1, + make_requeue/4]). -endif. +-import(serial_number, [add/2, diff/2]). +-define(ENQ_V2, e). + %% command records representing all the protocol actions that are supported -record(enqueue, {pid :: option(pid()), seq :: option(msg_seqno()), msg :: raw_msg()}). --record(requeue, {consumer_id :: consumer_id(), +-record(?ENQ_V2, {seq :: option(msg_seqno()), + msg :: raw_msg(), + size :: {MetadataSize :: non_neg_integer(), + PayloadSize :: non_neg_integer()}}). +-record(requeue, {consumer_key :: consumer_key(), msg_id :: msg_id(), index :: ra:index(), header :: msg_header(), @@ -89,23 +119,30 @@ -record(checkout, {consumer_id :: consumer_id(), spec :: checkout_spec(), meta :: consumer_meta()}). --record(settle, {consumer_id :: consumer_id(), +-record(settle, {consumer_key :: consumer_key(), msg_ids :: [msg_id()]}). --record(return, {consumer_id :: consumer_id(), +-record(return, {consumer_key :: consumer_key(), msg_ids :: [msg_id()]}). --record(discard, {consumer_id :: consumer_id(), +-record(discard, {consumer_key :: consumer_key(), msg_ids :: [msg_id()]}). --record(credit, {consumer_id :: consumer_id(), +-record(credit, {consumer_key :: consumer_key(), credit :: non_neg_integer(), - delivery_count :: non_neg_integer(), + delivery_count :: rabbit_queue_type:delivery_count(), drain :: boolean()}). +-record(modify, {consumer_key :: consumer_key(), + msg_ids :: [msg_id()], + delivery_failed :: boolean(), + undeliverable_here :: boolean(), + annotations :: mc:annotations()}). -record(purge, {}). -record(purge_nodes, {nodes :: [node()]}). -record(update_config, {config :: config()}). -record(garbage_collection, {}). +% -record(eval_consumer_timeouts, {consumer_keys :: [consumer_key()]}). -opaque protocol() :: #enqueue{} | + #?ENQ_V2{} | #requeue{} | #register_enqueuer{} | #checkout{} | @@ -113,6 +150,7 @@ #return{} | #discard{} | #credit{} | + #modify{} | #purge{} | #purge_nodes{} | #update_config{} | @@ -126,15 +164,15 @@ -type client_msg() :: delivery(). %% the messages `rabbit_fifo' can send to consumers. --opaque state() :: #?MODULE{}. +-opaque state() :: #?STATE{}. -export_type([protocol/0, delivery/0, command/0, credit_mode/0, - consumer_tag/0, consumer_meta/0, consumer_id/0, + consumer_key/0, client_msg/0, msg/0, msg_id/0, @@ -148,13 +186,12 @@ -spec init(config()) -> state(). init(#{name := Name, queue_resource := Resource} = Conf) -> - update_config(Conf, #?MODULE{cfg = #cfg{name = Name, - resource = Resource}}). + update_config(Conf, #?STATE{cfg = #cfg{name = Name, + resource = Resource}}). update_config(Conf, State) -> DLH = maps:get(dead_letter_handler, Conf, undefined), BLH = maps:get(become_leader_handler, Conf, undefined), - RCI = maps:get(release_cursor_interval, Conf, ?RELEASE_CURSOR_EVERY), Overflow = maps:get(overflow_strategy, Conf, drop_head), MaxLength = maps:get(max_length, Conf, undefined), MaxBytes = maps:get(max_bytes, Conf, undefined), @@ -167,39 +204,40 @@ update_config(Conf, State) -> false -> competing end, - Cfg = State#?MODULE.cfg, - RCISpec = {RCI, RCI}, + Cfg = State#?STATE.cfg, LastActive = maps:get(created, Conf, undefined), - State#?MODULE{cfg = Cfg#cfg{release_cursor_interval = RCISpec, - dead_letter_handler = DLH, - become_leader_handler = BLH, - overflow_strategy = Overflow, - max_length = MaxLength, - max_bytes = MaxBytes, - consumer_strategy = ConsumerStrategy, - delivery_limit = DeliveryLimit, - expires = Expires, - msg_ttl = MsgTTL}, - last_active = LastActive}. + State#?STATE{cfg = Cfg#cfg{dead_letter_handler = DLH, + become_leader_handler = BLH, + overflow_strategy = Overflow, + max_length = MaxLength, + max_bytes = MaxBytes, + consumer_strategy = ConsumerStrategy, + delivery_limit = DeliveryLimit, + expires = Expires, + msg_ttl = MsgTTL}, + last_active = LastActive}. % msg_ids are scoped per consumer % ra_indexes holds all raft indexes for enqueues currently on queue -spec apply(ra_machine:command_meta_data(), command(), state()) -> - {state(), Reply :: term(), ra_machine:effects()} | - {state(), Reply :: term()}. + {state(), ra_machine:reply(), ra_machine:effects() | ra_machine:effect()} | + {state(), ra_machine:reply()}. apply(Meta, #enqueue{pid = From, seq = Seq, msg = RawMsg}, State00) -> - apply_enqueue(Meta, From, Seq, RawMsg, State00); + apply_enqueue(Meta, From, Seq, RawMsg, message_size(RawMsg), State00); +apply(#{reply_mode := {notify, _Corr, EnqPid}} = Meta, + #?ENQ_V2{seq = Seq, msg = RawMsg, size = Size}, State00) -> + apply_enqueue(Meta, EnqPid, Seq, RawMsg, Size, State00); apply(_Meta, #register_enqueuer{pid = Pid}, - #?MODULE{enqueuers = Enqueuers0, - cfg = #cfg{overflow_strategy = Overflow}} = State0) -> + #?STATE{enqueuers = Enqueuers0, + cfg = #cfg{overflow_strategy = Overflow}} = State0) -> State = case maps:is_key(Pid, Enqueuers0) of true -> %% if the enqueuer exits just echo the overflow state State0; false -> - State0#?MODULE{enqueuers = Enqueuers0#{Pid => #enqueuer{}}} + State0#?STATE{enqueuers = Enqueuers0#{Pid => #enqueuer{}}} end, Res = case is_over_limit(State) of true when Overflow == reject_publish -> @@ -208,201 +246,198 @@ apply(_Meta, #register_enqueuer{pid = Pid}, ok end, {State, Res, [{monitor, process, Pid}]}; -apply(Meta, - #settle{msg_ids = MsgIds, consumer_id = ConsumerId}, - #?MODULE{consumers = Cons0} = State) -> - case Cons0 of - #{ConsumerId := Con0} -> - complete_and_checkout(Meta, MsgIds, ConsumerId, +apply(Meta, #settle{msg_ids = MsgIds, + consumer_key = Key}, + #?STATE{consumers = Consumers} = State) -> + case find_consumer(Key, Consumers) of + {ConsumerKey, Con0} -> + %% find_consumer/2 returns the actual consumer key even if + %% if id was passed instead for example + complete_and_checkout(Meta, MsgIds, ConsumerKey, Con0, [], State); _ -> {State, ok} end; -apply(Meta, #discard{msg_ids = MsgIds, consumer_id = ConsumerId}, - #?MODULE{consumers = Cons, - dlx = DlxState0, - cfg = #cfg{dead_letter_handler = DLH}} = State0) -> - case Cons of - #{ConsumerId := #consumer{checked_out = Checked} = Con} -> - % Publishing to dead-letter exchange must maintain same order as messages got rejected. - DiscardMsgs = lists:filtermap(fun(Id) -> - case maps:get(Id, Checked, undefined) of - undefined -> - false; - Msg -> - {true, Msg} - end - end, MsgIds), - {DlxState, Effects} = rabbit_fifo_dlx:discard(DiscardMsgs, rejected, DLH, DlxState0), - State = State0#?MODULE{dlx = DlxState}, - complete_and_checkout(Meta, MsgIds, ConsumerId, Con, Effects, State); +apply(Meta, #discard{consumer_key = ConsumerKey, + msg_ids = MsgIds}, + #?STATE{consumers = Consumers } = State0) -> + case find_consumer(ConsumerKey, Consumers) of + {ConsumerKey, #consumer{} = Con} -> + discard(Meta, MsgIds, ConsumerKey, Con, true, #{}, State0); _ -> {State0, ok} end; -apply(Meta, #return{msg_ids = MsgIds, consumer_id = ConsumerId}, - #?MODULE{consumers = Cons0} = State) -> - case Cons0 of - #{ConsumerId := #consumer{checked_out = Checked0}} -> - Returned = maps:with(MsgIds, Checked0), - return(Meta, ConsumerId, Returned, [], State); +apply(Meta, #return{consumer_key = ConsumerKey, + msg_ids = MsgIds}, + #?STATE{consumers = Cons} = State) -> + case find_consumer(ConsumerKey, Cons) of + {ActualConsumerKey, #consumer{checked_out = Checked}} -> + return(Meta, ActualConsumerKey, MsgIds, false, + #{}, Checked, [], State); + _ -> + {State, ok} + end; +apply(Meta, #modify{consumer_key = ConsumerKey, + delivery_failed = DelFailed, + undeliverable_here = Undel, + annotations = Anns, + msg_ids = MsgIds}, + #?STATE{consumers = Cons} = State) -> + case find_consumer(ConsumerKey, Cons) of + {ConsumerKey, #consumer{checked_out = Checked}} + when Undel == false -> + return(Meta, ConsumerKey, MsgIds, DelFailed, + Anns, Checked, [], State); + {ConsumerKey, #consumer{} = Con} + when Undel == true -> + discard(Meta, MsgIds, ConsumerKey, Con, DelFailed, Anns, State); _ -> {State, ok} end; apply(#{index := Idx} = Meta, - #requeue{consumer_id = ConsumerId, + #requeue{consumer_key = ConsumerKey, msg_id = MsgId, index = OldIdx, - header = Header0, - msg = _Msg}, - #?MODULE{consumers = Cons0, - messages = Messages, - ra_indexes = Indexes0, - enqueue_count = EnqCount} = State00) -> - case Cons0 of - #{ConsumerId := #consumer{checked_out = Checked0} = Con0} + header = Header0}, + #?STATE{consumers = Cons, + messages = Messages, + ra_indexes = Indexes0, + enqueue_count = EnqCount} = State00) -> + %% the actual consumer key was looked up in the aux handler so we + %% dont need to use find_consumer/2 here + case Cons of + #{ConsumerKey := #consumer{checked_out = Checked0} = Con0} when is_map_key(MsgId, Checked0) -> %% construct a message with the current raft index - %% and update delivery count before adding it to the message queue - Header = update_header(delivery_count, fun incr/1, 1, Header0), + %% and update acquired count before adding it to the message queue + Header = update_header(acquired_count, fun incr/1, 1, Header0), State0 = add_bytes_return(Header, State00), Con = Con0#consumer{checked_out = maps:remove(MsgId, Checked0), - credit = increase_credit(Meta, Con0, 1)}, - State1 = State0#?MODULE{ra_indexes = rabbit_fifo_index:delete(OldIdx, Indexes0), - messages = lqueue:in(?MSG(Idx, Header), Messages), - enqueue_count = EnqCount + 1}, - State2 = update_or_remove_sub(Meta, ConsumerId, Con, State1), - {State, Ret, Effs} = checkout(Meta, State0, State2, []), - update_smallest_raft_index(Idx, Ret, - maybe_store_release_cursor(Idx, State), - Effs); + credit = increase_credit(Con0, 1)}, + State1 = State0#?STATE{ra_indexes = rabbit_fifo_index:delete(OldIdx, + Indexes0), + messages = rabbit_fifo_q:in(no, + ?MSG(Idx, Header), + Messages), + enqueue_count = EnqCount + 1}, + State2 = update_or_remove_con(Meta, ConsumerKey, Con, State1), + checkout(Meta, State0, State2, []); _ -> {State00, ok, []} end; -apply(Meta, #credit{credit = NewCredit, delivery_count = RemoteDelCnt, - drain = Drain, consumer_id = ConsumerId}, - #?MODULE{consumers = Cons0, - service_queue = ServiceQueue0, - waiting_consumers = Waiting0} = State0) -> - case Cons0 of - #{ConsumerId := #consumer{delivery_count = DelCnt} = Con0} -> - %% this can go below 0 when credit is reduced - C = max(0, RemoteDelCnt + NewCredit - DelCnt), - %% grant the credit - Con1 = Con0#consumer{credit = C}, - ServiceQueue = maybe_queue_consumer(ConsumerId, Con1, - ServiceQueue0), - Cons = maps:put(ConsumerId, Con1, Cons0), - {State1, ok, Effects} = - checkout(Meta, State0, - State0#?MODULE{service_queue = ServiceQueue, - consumers = Cons}, []), - Response = {send_credit_reply, messages_ready(State1)}, - %% by this point all checkouts for the updated credit value - %% should be processed so we can evaluate the drain - case Drain of - false -> - %% just return the result of the checkout - {State1, Response, Effects}; - true -> - Con = #consumer{credit = PostCred} = - maps:get(ConsumerId, State1#?MODULE.consumers), - %% add the outstanding credit to the delivery count - DeliveryCount = Con#consumer.delivery_count + PostCred, - Consumers = maps:put(ConsumerId, - Con#consumer{delivery_count = DeliveryCount, - credit = 0}, - State1#?MODULE.consumers), - Drained = Con#consumer.credit, - {CTag, _} = ConsumerId, - {State1#?MODULE{consumers = Consumers}, - %% returning a multi response with two client actions - %% for the channel to execute - {multi, [Response, {send_drained, {CTag, Drained}}]}, - Effects} - end; - _ when Waiting0 /= [] -> - %% there are waiting consuemrs - case lists:keytake(ConsumerId, 1, Waiting0) of - {value, {_, Con0 = #consumer{delivery_count = DelCnt}}, Waiting} -> - %% the consumer is a waiting one - %% grant the credit - C = max(0, RemoteDelCnt + NewCredit - DelCnt), - Con = Con0#consumer{credit = C}, - State = State0#?MODULE{waiting_consumers = - [{ConsumerId, Con} | Waiting]}, - {State, {send_credit_reply, messages_ready(State)}}; - false -> - {State0, ok} - end; +apply(Meta, #credit{consumer_key = ConsumerKey} = Credit, + #?STATE{consumers = Cons} = State) -> + case Cons of + #{ConsumerKey := Con} -> + credit_active_consumer(Credit, Con, Meta, State); _ -> - %% credit for unknown consumer - just ignore - {State0, ok} + case lists:keytake(ConsumerKey, 1, State#?STATE.waiting_consumers) of + {value, {_, Con}, Waiting} -> + credit_inactive_consumer(Credit, Con, Waiting, State); + false -> + %% credit for unknown consumer - just ignore + {State, ok} + end end; apply(_, #checkout{spec = {dequeue, _}}, - #?MODULE{cfg = #cfg{consumer_strategy = single_active}} = State0) -> + #?STATE{cfg = #cfg{consumer_strategy = single_active}} = State0) -> {State0, {error, {unsupported, single_active_consumer}}}; apply(#{index := Index, system_time := Ts, from := From} = Meta, #checkout{spec = {dequeue, Settlement}, meta = ConsumerMeta, consumer_id = ConsumerId}, - #?MODULE{consumers = Consumers} = State00) -> + #?STATE{consumers = Consumers} = State00) -> %% dequeue always updates last_active - State0 = State00#?MODULE{last_active = Ts}, + State0 = State00#?STATE{last_active = Ts}, %% all dequeue operations result in keeping the queue from expiring - Exists = maps:is_key(ConsumerId, Consumers), + Exists = find_consumer(ConsumerId, Consumers) /= undefined, case messages_ready(State0) of 0 -> - update_smallest_raft_index(Index, {dequeue, empty}, State0, []); + {State0, {dequeue, empty}, []}; _ when Exists -> %% a dequeue using the same consumer_id isn't possible at this point {State0, {dequeue, empty}}; _ -> - {_, State1} = update_consumer(Meta, ConsumerId, ConsumerMeta, - {once, 1, simple_prefetch}, 0, + {_, State1} = update_consumer(Meta, ConsumerId, ConsumerId, ConsumerMeta, + {once, {simple_prefetch, 1}}, 0, State0), case checkout_one(Meta, false, State1, []) of - {success, _, MsgId, ?MSG(RaftIdx, Header), ExpiredMsg, State2, Effects0} -> - {State4, Effects1} = case Settlement of - unsettled -> - {_, Pid} = ConsumerId, - {State2, [{monitor, process, Pid} | Effects0]}; - settled -> - %% immediately settle the checkout - {State3, _, SettleEffects} = - apply(Meta, make_settle(ConsumerId, [MsgId]), - State2), - {State3, SettleEffects ++ Effects0} - end, - Effects2 = [reply_log_effect(RaftIdx, MsgId, Header, messages_ready(State4), From) | Effects1], - {State, DroppedMsg, Effects} = evaluate_limit(Index, false, State0, State4, - Effects2), - Reply = '$ra_no_reply', - case {DroppedMsg, ExpiredMsg} of - {false, false} -> - {State, Reply, Effects}; - _ -> - update_smallest_raft_index(Index, Reply, State, Effects) - end; + {success, _, MsgId, + ?MSG(RaftIdx, Header), _ExpiredMsg, State2, Effects0} -> + {State4, Effects1} = + case Settlement of + unsettled -> + {_, Pid} = ConsumerId, + {State2, [{monitor, process, Pid} | Effects0]}; + settled -> + %% immediately settle the checkout + {State3, _, SettleEffects} = + apply(Meta, make_settle(ConsumerId, [MsgId]), + State2), + {State3, SettleEffects ++ Effects0} + end, + Effects2 = [reply_log_effect(RaftIdx, MsgId, Header, + messages_ready(State4), From) + | Effects1], + {State, _DroppedMsg, Effects} = + evaluate_limit(Index, false, State0, State4, Effects2), + {State, '$ra_no_reply', Effects}; {nochange, _ExpiredMsg = true, State2, Effects0} -> %% All ready messages expired. - State3 = State2#?MODULE{consumers = maps:remove(ConsumerId, State2#?MODULE.consumers)}, - {State, _, Effects} = evaluate_limit(Index, false, State0, State3, Effects0), - update_smallest_raft_index(Index, {dequeue, empty}, State, Effects) + State3 = State2#?STATE{consumers = + maps:remove(ConsumerId, + State2#?STATE.consumers)}, + {State, _, Effects} = evaluate_limit(Index, false, State0, + State3, Effects0), + {State, {dequeue, empty}, Effects} end end; +apply(#{index := _Idx} = Meta, + #checkout{spec = Spec, + consumer_id = ConsumerId}, State0) + when Spec == cancel orelse + Spec == remove -> + case consumer_key_from_id(ConsumerId, State0) of + {ok, ConsumerKey} -> + {State1, Effects1} = activate_next_consumer( + cancel_consumer(Meta, ConsumerKey, State0, [], + Spec)), + Reply = {ok, consumer_cancel_info(ConsumerKey, State1)}, + {State, _, Effects} = checkout(Meta, State0, State1, Effects1), + {State, Reply, Effects}; + error -> + {State0, {error, consumer_not_found}, []} + end; apply(#{index := Idx} = Meta, - #checkout{spec = cancel, - consumer_id = ConsumerId}, State0) -> - {State1, Effects1} = cancel_consumer(Meta, ConsumerId, State0, [], - consumer_cancel), - {State, Reply, Effects} = checkout(Meta, State0, State1, Effects1), - update_smallest_raft_index(Idx, Reply, State, Effects); -apply(Meta, #checkout{spec = Spec, meta = ConsumerMeta, - consumer_id = {_, Pid} = ConsumerId}, State0) -> - Priority = get_priority_from_args(ConsumerMeta), - {Consumer, State1} = update_consumer(Meta, ConsumerId, ConsumerMeta, - Spec, Priority, State0), + #checkout{spec = Spec0, + meta = ConsumerMeta, + consumer_id = {_, Pid} = ConsumerId}, State0) -> + %% might be better to check machine_version + IsV4 = tuple_size(Spec0) == 2, + %% normalise spec format + Spec = case Spec0 of + {_, _} -> + Spec0; + {Life, Prefetch, simple_prefetch} -> + {Life, {simple_prefetch, Prefetch}}; + {Life, _Credit, credited} -> + {Life, credited} + end, + Priority = get_priority(ConsumerMeta), + ConsumerKey = case consumer_key_from_id(ConsumerId, State0) of + {ok, K} -> + K; + error when IsV4 -> + %% if the consumer does not already exist use the + %% raft index as it's unique identifier in future + %% settle, credit, return and discard operations + Idx; + error -> + ConsumerId + end, + {Consumer, State1} = update_consumer(Meta, ConsumerKey, ConsumerId, + ConsumerMeta, Spec, Priority, State0), {State2, Effs} = activate_next_consumer(State1, []), #consumer{checked_out = Checked, credit = Credit, @@ -412,90 +447,88 @@ apply(Meta, #checkout{spec = Spec, meta = ConsumerMeta, %% reply with a consumer summary Reply = {ok, #{next_msg_id => NextMsgId, credit => Credit, + key => ConsumerKey, delivery_count => DeliveryCount, + is_active => is_active(ConsumerKey, State2), num_checked_out => map_size(Checked)}}, checkout(Meta, State0, State2, [{monitor, process, Pid} | Effs], Reply); apply(#{index := Index}, #purge{}, - #?MODULE{messages_total = Total, - returns = Returns, - ra_indexes = Indexes0 - } = State0) -> + #?STATE{messages_total = Total, + returns = Returns, + ra_indexes = Indexes0 + } = State0) -> NumReady = messages_ready(State0), Indexes = case Total of NumReady -> - %% All messages are either in 'messages' queue or 'returns' queue. + %% All messages are either in 'messages' queue or + %% 'returns' queue. %% No message is awaiting acknowledgement. %% Optimization: empty all 'ra_indexes'. rabbit_fifo_index:empty(); _ -> - %% Some messages are checked out to consumers awaiting acknowledgement. + %% Some messages are checked out to consumers + %% awaiting acknowledgement. %% Therefore we cannot empty all 'ra_indexes'. - %% We only need to delete the indexes from the 'returns' queue because - %% messages of the 'messages' queue are not part of the 'ra_indexes'. + %% We only need to delete the indexes from the 'returns' + %% queue because messages of the 'messages' queue are + %% not part of the 'ra_indexes'. lqueue:fold(fun(?MSG(I, _), Acc) -> rabbit_fifo_index:delete(I, Acc) end, Indexes0, Returns) end, - State1 = State0#?MODULE{ra_indexes = Indexes, - messages = lqueue:new(), - messages_total = Total - NumReady, - returns = lqueue:new(), - msg_bytes_enqueue = 0 - }, - Effects0 = [garbage_collection], + State1 = State0#?STATE{ra_indexes = Indexes, + messages = rabbit_fifo_q:new(), + messages_total = Total - NumReady, + returns = lqueue:new(), + msg_bytes_enqueue = 0 + }, + Effects0 = [{aux, force_checkpoint}, garbage_collection], Reply = {purge, NumReady}, {State, _, Effects} = evaluate_limit(Index, false, State0, State1, Effects0), - update_smallest_raft_index(Index, Reply, State, Effects); -apply(#{index := Idx}, #garbage_collection{}, State) -> - update_smallest_raft_index(Idx, ok, State, [{aux, garbage_collection}]); + {State, Reply, Effects}; +apply(#{index := _Idx}, #garbage_collection{}, State) -> + {State, ok, [{aux, garbage_collection}]}; apply(Meta, {timeout, expire_msgs}, State) -> checkout(Meta, State, State, []); -apply(#{system_time := Ts, machine_version := MachineVersion} = Meta, +apply(#{system_time := Ts} = Meta, {down, Pid, noconnection}, - #?MODULE{consumers = Cons0, - cfg = #cfg{consumer_strategy = single_active}, - waiting_consumers = Waiting0, - enqueuers = Enqs0} = State0) -> + #?STATE{consumers = Cons0, + cfg = #cfg{consumer_strategy = single_active}, + waiting_consumers = Waiting0, + enqueuers = Enqs0} = State0) -> Node = node(Pid), %% if the pid refers to an active or cancelled consumer, %% mark it as suspected and return it to the waiting queue {State1, Effects0} = - maps:fold(fun({_, P} = Cid, C0, {S0, E0}) - when node(P) =:= Node -> - %% the consumer should be returned to waiting - %% and checked out messages should be returned - Effs = consumer_update_active_effects( - S0, Cid, C0, false, suspected_down, E0), - C1 = case MachineVersion of - V when V >= 3 -> - C0; - 2 -> - Checked = C0#consumer.checked_out, - Credit = increase_credit(Meta, C0, maps:size(Checked)), - C0#consumer{credit = Credit} - end, - {St, Effs1} = return_all(Meta, S0, Effs, Cid, C1), - %% if the consumer was cancelled there is a chance it got - %% removed when returning hence we need to be defensive here - Waiting = case St#?MODULE.consumers of - #{Cid := C} -> - Waiting0 ++ [{Cid, C}]; - _ -> - Waiting0 - end, - {St#?MODULE{consumers = maps:remove(Cid, St#?MODULE.consumers), - waiting_consumers = Waiting, - last_active = Ts}, - Effs1}; - (_, _, S) -> - S - end, {State0, []}, Cons0), + maps:fold( + fun(CKey, ?CONSUMER_PID(P) = C0, {S0, E0}) + when node(P) =:= Node -> + %% the consumer should be returned to waiting + %% and checked out messages should be returned + Effs = consumer_update_active_effects( + S0, C0, false, suspected_down, E0), + {St, Effs1} = return_all(Meta, S0, Effs, CKey, C0, true), + %% if the consumer was cancelled there is a chance it got + %% removed when returning hence we need to be defensive here + Waiting = case St#?STATE.consumers of + #{CKey := C} -> + Waiting0 ++ [{CKey, C}]; + _ -> + Waiting0 + end, + {St#?STATE{consumers = maps:remove(CKey, St#?STATE.consumers), + waiting_consumers = Waiting, + last_active = Ts}, + Effs1}; + (_, _, S) -> + S + end, {State0, []}, Cons0), WaitingConsumers = update_waiting_consumer_status(Node, State1, suspected_down), %% select a new consumer from the waiting queue and run a checkout - State2 = State1#?MODULE{waiting_consumers = WaitingConsumers}, + State2 = State1#?STATE{waiting_consumers = WaitingConsumers}, {State, Effects1} = activate_next_consumer(State2, Effects0), %% mark any enquers as suspected @@ -504,10 +537,10 @@ apply(#{system_time := Ts, machine_version := MachineVersion} = Meta, (_, E) -> E end, Enqs0), Effects = [{monitor, node, Node} | Effects1], - checkout(Meta, State0, State#?MODULE{enqueuers = Enqs}, Effects); -apply(#{system_time := Ts, machine_version := MachineVersion} = Meta, + checkout(Meta, State0, State#?STATE{enqueuers = Enqs}, Effects); +apply(#{system_time := Ts} = Meta, {down, Pid, noconnection}, - #?MODULE{consumers = Cons0, + #?STATE{consumers = Cons0, enqueuers = Enqs0} = State0) -> %% A node has been disconnected. This doesn't necessarily mean that %% any processes on this node are down, they _may_ come back so here @@ -521,19 +554,12 @@ apply(#{system_time := Ts, machine_version := MachineVersion} = Meta, {State, Effects1} = maps:fold( - fun({_, P} = Cid, #consumer{checked_out = Checked0, - status = up} = C0, + fun(CKey, #consumer{cfg = #consumer_cfg{pid = P}, + status = up} = C0, {St0, Eff}) when node(P) =:= Node -> - C = case MachineVersion of - V when V >= 3 -> - C0#consumer{status = suspected_down}; - 2 -> - Credit = increase_credit(Meta, C0, map_size(Checked0)), - C0#consumer{status = suspected_down, - credit = Credit} - end, - {St, Eff0} = return_all(Meta, St0, Eff, Cid, C), - Eff1 = consumer_update_active_effects(St, Cid, C, false, + C = C0#consumer{status = suspected_down}, + {St, Eff0} = return_all(Meta, St0, Eff, CKey, C, true), + Eff1 = consumer_update_active_effects(St, C, false, suspected_down, Eff0), {St, Eff1}; (_, _, {St, Eff}) -> @@ -549,15 +575,14 @@ apply(#{system_time := Ts, machine_version := MachineVersion} = Meta, % these processes Effects = [{monitor, node, Node} | Effects1], - checkout(Meta, State0, State#?MODULE{enqueuers = Enqs, - last_active = Ts}, Effects); -apply(#{index := Idx} = Meta, {down, Pid, _Info}, State0) -> - {State1, Effects1} = handle_down(Meta, Pid, State0), - {State, Reply, Effects} = checkout(Meta, State0, State1, Effects1), - update_smallest_raft_index(Idx, Reply, State, Effects); -apply(Meta, {nodeup, Node}, #?MODULE{consumers = Cons0, - enqueuers = Enqs0, - service_queue = _SQ0} = State0) -> + checkout(Meta, State0, State#?STATE{enqueuers = Enqs, + last_active = Ts}, Effects); +apply(Meta, {down, Pid, _Info}, State0) -> + {State1, Effects1} = activate_next_consumer(handle_down(Meta, Pid, State0)), + checkout(Meta, State0, State1, Effects1); +apply(Meta, {nodeup, Node}, #?STATE{consumers = Cons0, + enqueuers = Enqs0, + service_queue = _SQ0} = State0) -> %% A node we are monitoring has come back. %% If we have suspected any processes of being %% down we should now re-issue the monitors for them to detect if they're @@ -572,329 +597,220 @@ apply(Meta, {nodeup, Node}, #?MODULE{consumers = Cons0, ConsumerUpdateActiveFun = consumer_active_flag_update_function(State0), %% mark all consumers as up {State1, Effects1} = - maps:fold(fun({_, P} = ConsumerId, C, {SAcc, EAcc}) + maps:fold(fun(ConsumerKey, ?CONSUMER_PID(P) = C, {SAcc, EAcc}) when (node(P) =:= Node) and (C#consumer.status =/= cancelled) -> - EAcc1 = ConsumerUpdateActiveFun(SAcc, ConsumerId, + EAcc1 = ConsumerUpdateActiveFun(SAcc, ConsumerKey, C, true, up, EAcc), - {update_or_remove_sub(Meta, ConsumerId, + {update_or_remove_con(Meta, ConsumerKey, C#consumer{status = up}, SAcc), EAcc1}; (_, _, Acc) -> Acc end, {State0, Monitors}, Cons0), Waiting = update_waiting_consumer_status(Node, State1, up), - State2 = State1#?MODULE{enqueuers = Enqs1, - waiting_consumers = Waiting}, + State2 = State1#?STATE{enqueuers = Enqs1, + waiting_consumers = Waiting}, {State, Effects} = activate_next_consumer(State2, Effects1), checkout(Meta, State0, State, Effects); apply(_, {nodedown, _Node}, State) -> {State, ok}; -apply(#{index := Idx} = Meta, #purge_nodes{nodes = Nodes}, State0) -> +apply(#{index := _Idx} = Meta, #purge_nodes{nodes = Nodes}, State0) -> {State, Effects} = lists:foldl(fun(Node, {S, E}) -> purge_node(Meta, Node, S, E) end, {State0, []}, Nodes), - update_smallest_raft_index(Idx, ok, State, Effects); -apply(#{index := Idx} = Meta, + {State, ok, Effects}; +apply(#{index := _Idx} = Meta, #update_config{config = #{dead_letter_handler := NewDLH} = Conf}, - #?MODULE{cfg = #cfg{dead_letter_handler = OldDLH, - resource = QRes}, - dlx = DlxState0} = State0) -> - {DlxState, Effects0} = rabbit_fifo_dlx:update_config(OldDLH, NewDLH, QRes, DlxState0), - State1 = update_config(Conf, State0#?MODULE{dlx = DlxState}), - {State, Reply, Effects} = checkout(Meta, State0, State1, Effects0), - update_smallest_raft_index(Idx, Reply, State, Effects); -apply(_Meta, {machine_version, FromVersion, ToVersion}, V0State) -> - State = convert(FromVersion, ToVersion, V0State), + #?STATE{cfg = #cfg{dead_letter_handler = OldDLH, + resource = QRes}, + dlx = DlxState0} = State0) -> + {DlxState, Effects0} = rabbit_fifo_dlx:update_config(OldDLH, NewDLH, QRes, + DlxState0), + State1 = update_config(Conf, State0#?STATE{dlx = DlxState}), + checkout(Meta, State0, State1, Effects0); +apply(Meta, {machine_version, FromVersion, ToVersion}, V0State) -> + State = convert(Meta, FromVersion, ToVersion, V0State), {State, ok, [{aux, {dlx, setup}}]}; -apply(#{index := IncomingRaftIdx} = Meta, {dlx, _} = Cmd, - #?MODULE{cfg = #cfg{dead_letter_handler = DLH}, +apply(#{index := _IncomingRaftIdx} = Meta, {dlx, _} = Cmd, + #?STATE{cfg = #cfg{dead_letter_handler = DLH}, dlx = DlxState0} = State0) -> {DlxState, Effects0} = rabbit_fifo_dlx:apply(Meta, Cmd, DLH, DlxState0), - State1 = State0#?MODULE{dlx = DlxState}, - {State, ok, Effects} = checkout(Meta, State0, State1, Effects0), - update_smallest_raft_index(IncomingRaftIdx, State, Effects); + State1 = State0#?STATE{dlx = DlxState}, + checkout(Meta, State0, State1, Effects0); apply(_Meta, Cmd, State) -> %% handle unhandled commands gracefully rabbit_log:debug("rabbit_fifo: unhandled command ~W", [Cmd, 10]), {State, ok, []}. -convert_msg({RaftIdx, {Header, empty}}) when is_integer(RaftIdx) -> - ?MSG(RaftIdx, Header); -convert_msg({RaftIdx, {Header, _Msg}}) when is_integer(RaftIdx) -> - ?MSG(RaftIdx, Header); -convert_msg({'$empty_msg', Header}) -> - %% dummy index - ?MSG(undefined, Header); -convert_msg({'$prefix_msg', Header}) -> - %% dummy index - ?MSG(undefined, Header); -convert_msg({Header, empty}) -> - convert_msg(Header); -convert_msg(Header) when ?IS_HEADER(Header) -> - ?MSG(undefined, Header). - -convert_consumer_v1_to_v2({ConsumerTag, Pid}, CV1) -> - Meta = element(2, CV1), - CheckedOut = element(3, CV1), - NextMsgId = element(4, CV1), - Credit = element(5, CV1), - DeliveryCount = element(6, CV1), - CreditMode = element(7, CV1), - LifeTime = element(8, CV1), - Status = element(9, CV1), - Priority = element(10, CV1), - #consumer{cfg = #consumer_cfg{tag = ConsumerTag, - pid = Pid, - meta = Meta, - credit_mode = CreditMode, - lifetime = LifeTime, - priority = Priority}, - credit = Credit, - status = Status, - delivery_count = DeliveryCount, - next_msg_id = NextMsgId, - checked_out = maps:map( - fun (_, {Tag, _} = Msg) when is_atom(Tag) -> - convert_msg(Msg); - (_, {_Seq, Msg}) -> - convert_msg(Msg) - end, CheckedOut) - }. - -convert_v1_to_v2(V1State0) -> - V1State = rabbit_fifo_v1:enqueue_all_pending(V1State0), - IndexesV1 = rabbit_fifo_v1:get_field(ra_indexes, V1State), - ReturnsV1 = rabbit_fifo_v1:get_field(returns, V1State), - MessagesV1 = rabbit_fifo_v1:get_field(messages, V1State), - ConsumersV1 = rabbit_fifo_v1:get_field(consumers, V1State), - WaitingConsumersV1 = rabbit_fifo_v1:get_field(waiting_consumers, V1State), - %% remove all raft idx in messages from index - {_, PrefReturns, _, PrefMsgs} = rabbit_fifo_v1:get_field(prefix_msgs, V1State), - V2PrefMsgs = lists:foldl(fun(Hdr, Acc) -> - lqueue:in(convert_msg(Hdr), Acc) - end, lqueue:new(), PrefMsgs), - V2PrefReturns = lists:foldl(fun(Hdr, Acc) -> - lqueue:in(convert_msg(Hdr), Acc) - end, lqueue:new(), PrefReturns), - MessagesV2 = lqueue:fold(fun ({_, Msg}, Acc) -> - lqueue:in(convert_msg(Msg), Acc) - end, V2PrefMsgs, MessagesV1), - ReturnsV2 = lqueue:fold(fun ({_SeqId, Msg}, Acc) -> - lqueue:in(convert_msg(Msg), Acc) - end, V2PrefReturns, ReturnsV1), - ConsumersV2 = maps:map( - fun (ConsumerId, CV1) -> - convert_consumer_v1_to_v2(ConsumerId, CV1) - end, ConsumersV1), - WaitingConsumersV2 = lists:map( - fun ({ConsumerId, CV1}) -> - {ConsumerId, convert_consumer_v1_to_v2(ConsumerId, CV1)} - end, WaitingConsumersV1), - EnqueuersV1 = rabbit_fifo_v1:get_field(enqueuers, V1State), - EnqueuersV2 = maps:map(fun (_EnqPid, Enq) -> - Enq#enqueuer{unused = undefined} - end, EnqueuersV1), - - %% do after state conversion - %% The (old) format of dead_letter_handler in RMQ < v3.10 is: - %% {Module, Function, Args} - %% The (new) format of dead_letter_handler in RMQ >= v3.10 is: - %% undefined | {at_most_once, {Module, Function, Args}} | at_least_once - %% - %% Note that the conversion must convert both from old format to new format - %% as well as from new format to new format. The latter is because quorum queues - %% created in RMQ >= v3.10 are still initialised with rabbit_fifo_v0 as described in - %% https://github.com/rabbitmq/ra/blob/e0d1e6315a45f5d3c19875d66f9d7bfaf83a46e3/src/ra_machine.erl#L258-L265 - DLH = case rabbit_fifo_v1:get_cfg_field(dead_letter_handler, V1State) of - {_M, _F, _A = [_DLX = undefined|_]} -> - %% queue was declared in RMQ < v3.10 and no DLX configured - undefined; - {_M, _F, _A} = MFA -> - %% queue was declared in RMQ < v3.10 and DLX configured - {at_most_once, MFA}; - Other -> - Other - end, - - Cfg = #cfg{name = rabbit_fifo_v1:get_cfg_field(name, V1State), - resource = rabbit_fifo_v1:get_cfg_field(resource, V1State), - release_cursor_interval = rabbit_fifo_v1:get_cfg_field(release_cursor_interval, V1State), - dead_letter_handler = DLH, - become_leader_handler = rabbit_fifo_v1:get_cfg_field(become_leader_handler, V1State), - %% TODO: what if policy enabling reject_publish was applied before conversion? - overflow_strategy = rabbit_fifo_v1:get_cfg_field(overflow_strategy, V1State), - max_length = rabbit_fifo_v1:get_cfg_field(max_length, V1State), - max_bytes = rabbit_fifo_v1:get_cfg_field(max_bytes, V1State), - consumer_strategy = rabbit_fifo_v1:get_cfg_field(consumer_strategy, V1State), - delivery_limit = rabbit_fifo_v1:get_cfg_field(delivery_limit, V1State), - expires = rabbit_fifo_v1:get_cfg_field(expires, V1State) - }, - - MessagesConsumersV2 = maps:fold(fun(_ConsumerId, #consumer{checked_out = Checked}, Acc) -> - Acc + maps:size(Checked) - end, 0, ConsumersV2), - MessagesWaitingConsumersV2 = lists:foldl(fun({_ConsumerId, #consumer{checked_out = Checked}}, Acc) -> - Acc + maps:size(Checked) - end, 0, WaitingConsumersV2), - MessagesTotal = lqueue:len(MessagesV2) + - lqueue:len(ReturnsV2) + - MessagesConsumersV2 + - MessagesWaitingConsumersV2, - - #?MODULE{cfg = Cfg, - messages = MessagesV2, - messages_total = MessagesTotal, - returns = ReturnsV2, - enqueue_count = rabbit_fifo_v1:get_field(enqueue_count, V1State), - enqueuers = EnqueuersV2, - ra_indexes = IndexesV1, - release_cursors = rabbit_fifo_v1:get_field(release_cursors, V1State), - consumers = ConsumersV2, - service_queue = rabbit_fifo_v1:get_field(service_queue, V1State), - msg_bytes_enqueue = rabbit_fifo_v1:get_field(msg_bytes_enqueue, V1State), - msg_bytes_checkout = rabbit_fifo_v1:get_field(msg_bytes_checkout, V1State), - waiting_consumers = WaitingConsumersV2, - last_active = rabbit_fifo_v1:get_field(last_active, V1State) - }. - -convert_v2_to_v3(#rabbit_fifo{consumers = ConsumersV2} = StateV2) -> - ConsumersV3 = maps:map(fun(_, C) -> - convert_consumer_v2_to_v3(C) - end, ConsumersV2), - StateV2#rabbit_fifo{consumers = ConsumersV3}. - -convert_consumer_v2_to_v3(C = #consumer{cfg = Cfg = #consumer_cfg{credit_mode = simple_prefetch, - meta = #{prefetch := Prefetch}}}) -> - C#consumer{cfg = Cfg#consumer_cfg{credit_mode = {simple_prefetch, Prefetch}}}; -convert_consumer_v2_to_v3(C) -> - C. +convert_v3_to_v4(#{} = _Meta, StateV3) -> + %% TODO: consider emitting release cursors as checkpoints + Messages0 = rabbit_fifo_v3:get_field(messages, StateV3), + Returns0 = lqueue:to_list(rabbit_fifo_v3:get_field(returns, StateV3)), + Consumers0 = rabbit_fifo_v3:get_field(consumers, StateV3), + Consumers = maps:map( + fun (_, #consumer{checked_out = Ch0} = C) -> + Ch = maps:map( + fun (_, ?MSG(I, #{delivery_count := DC} = H)) -> + ?MSG(I, H#{acquired_count => DC}); + (_, Msg) -> + Msg + end, Ch0), + C#consumer{checked_out = Ch} + end, Consumers0), + Returns = lqueue:from_list( + lists:map(fun (?MSG(I, #{delivery_count := DC} = H)) -> + ?MSG(I, H#{acquired_count => DC}); + (Msg) -> + Msg + end, Returns0)), + + Messages = rabbit_fifo_q:from_lqueue(Messages0), + Cfg = rabbit_fifo_v3:get_field(cfg, StateV3), + #?STATE{cfg = Cfg#cfg{unused_1 = ?NIL}, + messages = Messages, + messages_total = rabbit_fifo_v3:get_field(messages_total, StateV3), + returns = Returns, + enqueue_count = rabbit_fifo_v3:get_field(enqueue_count, StateV3), + enqueuers = rabbit_fifo_v3:get_field(enqueuers, StateV3), + ra_indexes = rabbit_fifo_v3:get_field(ra_indexes, StateV3), + consumers = Consumers, + service_queue = rabbit_fifo_v3:get_field(service_queue, StateV3), + dlx = rabbit_fifo_v3:get_field(dlx, StateV3), + msg_bytes_enqueue = rabbit_fifo_v3:get_field(msg_bytes_enqueue, StateV3), + msg_bytes_checkout = rabbit_fifo_v3:get_field(msg_bytes_checkout, StateV3), + waiting_consumers = rabbit_fifo_v3:get_field(waiting_consumers, StateV3), + last_active = rabbit_fifo_v3:get_field(last_active, StateV3), + msg_cache = rabbit_fifo_v3:get_field(msg_cache, StateV3), + unused_1 = []}. purge_node(Meta, Node, State, Effects) -> lists:foldl(fun(Pid, {S0, E0}) -> {S, E} = handle_down(Meta, Pid, S0), {S, E0 ++ E} - end, {State, Effects}, all_pids_for(Node, State)). + end, {State, Effects}, + all_pids_for(Node, State)). -%% any downs that re not noconnection -handle_down(Meta, Pid, #?MODULE{consumers = Cons0, - enqueuers = Enqs0} = State0) -> +%% any downs that are not noconnection +handle_down(Meta, Pid, #?STATE{consumers = Cons0, + enqueuers = Enqs0} = State0) -> % Remove any enqueuer for the down pid - State1 = State0#?MODULE{enqueuers = maps:remove(Pid, Enqs0)}, + State1 = State0#?STATE{enqueuers = maps:remove(Pid, Enqs0)}, {Effects1, State2} = handle_waiting_consumer_down(Pid, State1), % return checked out messages to main queue % Find the consumers for the down pid - DownConsumers = maps:keys( - maps:filter(fun({_, P}, _) -> P =:= Pid end, Cons0)), - lists:foldl(fun(ConsumerId, {S, E}) -> - cancel_consumer(Meta, ConsumerId, S, E, down) + DownConsumers = maps:keys(maps:filter(fun(_CKey, ?CONSUMER_PID(P)) -> + P =:= Pid + end, Cons0)), + lists:foldl(fun(ConsumerKey, {S, E}) -> + cancel_consumer(Meta, ConsumerKey, S, E, down) end, {State2, Effects1}, DownConsumers). consumer_active_flag_update_function( - #?MODULE{cfg = #cfg{consumer_strategy = competing}}) -> - fun(State, ConsumerId, Consumer, Active, ActivityStatus, Effects) -> - consumer_update_active_effects(State, ConsumerId, Consumer, Active, + #?STATE{cfg = #cfg{consumer_strategy = competing}}) -> + fun(State, _ConsumerKey, Consumer, Active, ActivityStatus, Effects) -> + consumer_update_active_effects(State, Consumer, Active, ActivityStatus, Effects) end; consumer_active_flag_update_function( - #?MODULE{cfg = #cfg{consumer_strategy = single_active}}) -> + #?STATE{cfg = #cfg{consumer_strategy = single_active}}) -> fun(_, _, _, _, _, Effects) -> Effects end. handle_waiting_consumer_down(_Pid, - #?MODULE{cfg = #cfg{consumer_strategy = competing}} = State) -> + #?STATE{cfg = #cfg{consumer_strategy = competing}} + = State) -> {[], State}; handle_waiting_consumer_down(_Pid, - #?MODULE{cfg = #cfg{consumer_strategy = single_active}, - waiting_consumers = []} = State) -> + #?STATE{cfg = #cfg{consumer_strategy = single_active}, + waiting_consumers = []} = State) -> {[], State}; handle_waiting_consumer_down(Pid, - #?MODULE{cfg = #cfg{consumer_strategy = single_active}, - waiting_consumers = WaitingConsumers0} = State0) -> + #?STATE{cfg = #cfg{consumer_strategy = single_active}, + waiting_consumers = WaitingConsumers0} + = State0) -> % get cancel effects for down waiting consumers - Down = lists:filter(fun({{_, P}, _}) -> P =:= Pid end, + Down = lists:filter(fun({_, ?CONSUMER_PID(P)}) -> P =:= Pid end, WaitingConsumers0), - Effects = lists:foldl(fun ({ConsumerId, _}, Effects) -> + Effects = lists:foldl(fun ({_ConsumerKey, Consumer}, Effects) -> + ConsumerId = consumer_id(Consumer), cancel_consumer_effects(ConsumerId, State0, Effects) end, [], Down), % update state to have only up waiting consumers - StillUp = lists:filter(fun({{_, P}, _}) -> P =/= Pid end, + StillUp = lists:filter(fun({_CKey, ?CONSUMER_PID(P)}) -> + P =/= Pid + end, WaitingConsumers0), - State = State0#?MODULE{waiting_consumers = StillUp}, + State = State0#?STATE{waiting_consumers = StillUp}, {Effects, State}. update_waiting_consumer_status(Node, - #?MODULE{waiting_consumers = WaitingConsumers}, + #?STATE{waiting_consumers = WaitingConsumers}, Status) -> - [begin - case node(Pid) of - Node -> - {ConsumerId, Consumer#consumer{status = Status}}; - _ -> - {ConsumerId, Consumer} - end - end || {{_, Pid} = ConsumerId, Consumer} <- WaitingConsumers, - Consumer#consumer.status =/= cancelled]. + sort_waiting( + [case node(Pid) of + Node -> + {ConsumerKey, Consumer#consumer{status = Status}}; + _ -> + {ConsumerKey, Consumer} + end || {ConsumerKey, ?CONSUMER_PID(Pid) = Consumer} + <- WaitingConsumers, Consumer#consumer.status =/= cancelled]). -spec state_enter(ra_server:ra_state() | eol, state()) -> ra_machine:effects(). -state_enter(RaState, #?MODULE{cfg = #cfg{dead_letter_handler = DLH, - resource = QRes}, - dlx = DlxState} = State) -> +state_enter(RaState, #?STATE{cfg = #cfg{dead_letter_handler = DLH, + resource = QRes}, + dlx = DlxState} = State) -> Effects = rabbit_fifo_dlx:state_enter(RaState, QRes, DLH, DlxState), state_enter0(RaState, State, Effects). -state_enter0(leader, #?MODULE{consumers = Cons, - enqueuers = Enqs, - waiting_consumers = WaitingConsumers, - cfg = #cfg{name = Name, - resource = Resource, - become_leader_handler = BLH} - } = State, +state_enter0(leader, #?STATE{consumers = Cons, + enqueuers = Enqs, + waiting_consumers = WaitingConsumers, + cfg = #cfg{name = Name, + resource = Resource, + become_leader_handler = BLH} + } = State, Effects0) -> TimerEffs = timer_effect(erlang:system_time(millisecond), State, Effects0), % return effects to monitor all current consumers and enqueuers Pids = lists:usort(maps:keys(Enqs) - ++ [P || {_, P} <- maps:keys(Cons)] - ++ [P || {{_, P}, _} <- WaitingConsumers]), + ++ [P || ?CONSUMER_PID(P) <- maps:values(Cons)] + ++ [P || {_, ?CONSUMER_PID(P)} <- WaitingConsumers]), Mons = [{monitor, process, P} || P <- Pids], Nots = [{send_msg, P, leader_change, ra_event} || P <- Pids], NodeMons = lists:usort([{monitor, node, node(P)} || P <- Pids]), - FHReservation = [{mod_call, rabbit_quorum_queue, - file_handle_leader_reservation, [Resource]}], NotifyDecs = notify_decorators_startup(Resource), - Effects = TimerEffs ++ Mons ++ Nots ++ NodeMons ++ FHReservation ++ [NotifyDecs], + Effects = TimerEffs ++ Mons ++ Nots ++ NodeMons ++ [NotifyDecs], case BLH of undefined -> Effects; {Mod, Fun, Args} -> [{mod_call, Mod, Fun, Args ++ [Name]} | Effects] end; -state_enter0(eol, #?MODULE{enqueuers = Enqs, - consumers = Custs0, - waiting_consumers = WaitingConsumers0}, +state_enter0(eol, #?STATE{enqueuers = Enqs, + consumers = Cons0, + waiting_consumers = WaitingConsumers0}, Effects) -> - Custs = maps:fold(fun({_, P}, V, S) -> S#{P => V} end, #{}, Custs0), - WaitingConsumers1 = lists:foldl(fun({{_, P}, V}, Acc) -> Acc#{P => V} end, - #{}, WaitingConsumers0), + Custs = maps:fold(fun(_K, ?CONSUMER_PID(P) = V, S) -> + S#{P => V} + end, #{}, Cons0), + WaitingConsumers1 = lists:foldl(fun({_, ?CONSUMER_PID(P) = V}, Acc) -> + Acc#{P => V} + end, #{}, WaitingConsumers0), AllConsumers = maps:merge(Custs, WaitingConsumers1), [{send_msg, P, eol, ra_event} || P <- maps:keys(maps:merge(Enqs, AllConsumers))] ++ - [{aux, eol}, - {mod_call, rabbit_quorum_queue, file_handle_release_reservation, []} | Effects]; -state_enter0(State, #?MODULE{cfg = #cfg{resource = _Resource}}, Effects) - when State =/= leader -> - FHReservation = {mod_call, rabbit_quorum_queue, file_handle_other_reservation, []}, - [FHReservation | Effects]; + [{aux, eol} + | Effects]; state_enter0(_, _, Effects) -> %% catch all as not handling all states Effects. -spec tick(non_neg_integer(), state()) -> ra_machine:effects(). -tick(Ts, #?MODULE{cfg = #cfg{name = _Name, - resource = QName}} = State) -> +tick(Ts, #?STATE{cfg = #cfg{resource = QName}} = State) -> case is_expired(Ts, State) of true -> [{mod_call, rabbit_quorum_queue, spawn_deleter, [QName]}]; @@ -903,18 +819,18 @@ tick(Ts, #?MODULE{cfg = #cfg{name = _Name, end. -spec overview(state()) -> map(). -overview(#?MODULE{consumers = Cons, - enqueuers = Enqs, - release_cursors = Cursors, - enqueue_count = EnqCount, - msg_bytes_enqueue = EnqueueBytes, - msg_bytes_checkout = CheckoutBytes, - cfg = Cfg, - dlx = DlxState, - waiting_consumers = WaitingConsumers} = State) -> +overview(#?STATE{consumers = Cons, + enqueuers = Enqs, + enqueue_count = EnqCount, + msg_bytes_enqueue = EnqueueBytes, + msg_bytes_checkout = CheckoutBytes, + cfg = Cfg, + dlx = DlxState, + messages = Messages, + returns = Returns, + waiting_consumers = WaitingConsumers} = State) -> Conf = #{name => Cfg#cfg.name, resource => Cfg#cfg.resource, - release_cursor_interval => Cfg#cfg.release_cursor_interval, dead_lettering_enabled => undefined =/= Cfg#cfg.dead_letter_handler, max_length => Cfg#cfg.max_length, max_bytes => Cfg#cfg.max_bytes, @@ -924,38 +840,47 @@ overview(#?MODULE{consumers = Cons, delivery_limit => Cfg#cfg.delivery_limit }, SacOverview = case active_consumer(Cons) of - {SacConsumerId, _} -> + {SacConsumerKey, SacCon} -> + SacConsumerId = consumer_id(SacCon), NumWaiting = length(WaitingConsumers), #{single_active_consumer_id => SacConsumerId, + single_active_consumer_key => SacConsumerKey, single_active_num_waiting_consumers => NumWaiting}; _ -> #{} end, - Overview = #{type => ?MODULE, + MsgsRet = lqueue:len(Returns), + #{num_hi := MsgsHi, + num_no := MsgsNo} = rabbit_fifo_q:overview(Messages), + + Overview = #{type => ?STATE, config => Conf, num_consumers => map_size(Cons), num_active_consumers => query_consumer_count(State), num_checked_out => num_checked_out(State), num_enqueuers => maps:size(Enqs), num_ready_messages => messages_ready(State), - num_in_memory_ready_messages => 0, %% backwards compat + num_ready_messages_high => MsgsHi, + num_ready_messages_normal => MsgsNo, + num_ready_messages_return => MsgsRet, num_messages => messages_total(State), - num_release_cursors => lqueue:len(Cursors), - release_cursors => [I || {_, I, _} <- lqueue:to_list(Cursors)], - release_cursor_enqueue_counter => EnqCount, + num_release_cursors => 0, %% backwards compat enqueue_message_bytes => EnqueueBytes, checkout_message_bytes => CheckoutBytes, + release_cursors => [], %% backwards compat in_memory_message_bytes => 0, %% backwards compat + num_in_memory_ready_messages => 0, %% backwards compat + release_cursor_enqueue_counter => EnqCount, smallest_raft_index => smallest_raft_index(State) }, DlxOverview = rabbit_fifo_dlx:overview(DlxState), maps:merge(maps:merge(Overview, DlxOverview), SacOverview). --spec get_checked_out(consumer_id(), msg_id(), msg_id(), state()) -> +-spec get_checked_out(consumer_key(), msg_id(), msg_id(), state()) -> [delivery_msg()]. -get_checked_out(Cid, From, To, #?MODULE{consumers = Consumers}) -> - case Consumers of - #{Cid := #consumer{checked_out = Checked}} -> +get_checked_out(CKey, From, To, #?STATE{consumers = Consumers}) -> + case find_consumer(CKey, Consumers) of + {_CKey, #consumer{checked_out = Checked}} -> [begin ?MSG(I, H) = maps:get(K, Checked), {K, {I, H}} @@ -965,15 +890,22 @@ get_checked_out(Cid, From, To, #?MODULE{consumers = Consumers}) -> end. -spec version() -> pos_integer(). -version() -> 3. +version() -> 4. which_module(0) -> rabbit_fifo_v0; which_module(1) -> rabbit_fifo_v1; -which_module(2) -> ?MODULE; -which_module(3) -> ?MODULE. - --define(AUX, aux_v2). - +which_module(2) -> rabbit_fifo_v3; +which_module(3) -> rabbit_fifo_v3; +which_module(4) -> ?MODULE. + +-define(AUX, aux_v3). + +-record(checkpoint, {index :: ra:index(), + timestamp :: milliseconds(), + smallest_index :: undefined | ra:index(), + messages_total :: non_neg_integer(), + indexes = ?CHECK_MIN_INDEXES :: non_neg_integer(), + unused_1 = ?NIL}). -record(aux_gc, {last_raft_idx = 0 :: ra:index()}). -record(aux, {name :: atom(), capacity :: term(), @@ -982,58 +914,100 @@ which_module(3) -> ?MODULE. last_decorators_state :: term(), capacity :: term(), gc = #aux_gc{} :: #aux_gc{}, - tick_pid, - unused2}). + tick_pid :: undefined | pid(), + cache = #{} :: map(), + last_checkpoint :: #checkpoint{}}). init_aux(Name) when is_atom(Name) -> %% TODO: catch specific exception throw if table already exists ok = ra_machine_ets:create_table(rabbit_fifo_usage, [named_table, set, public, {write_concurrency, true}]), - Now = erlang:monotonic_time(micro_seconds), + Now = erlang:monotonic_time(microsecond), #?AUX{name = Name, - capacity = {inactive, Now, 1, 1.0}}. + capacity = {inactive, Now, 1, 1.0}, + last_checkpoint = #checkpoint{index = 0, + timestamp = erlang:system_time(millisecond), + messages_total = 0, + unused_1 = ?NIL}}. handle_aux(RaftState, Tag, Cmd, #aux{name = Name, capacity = Cap, - gc = Gc}, Log, MacState) -> + gc = Gc}, RaAux) -> %% convert aux state to new version - Aux = #?AUX{name = Name, - capacity = Cap, - gc = Gc}, - handle_aux(RaftState, Tag, Cmd, Aux, Log, MacState); -handle_aux(leader, _, garbage_collection, Aux, Log, MacState) -> - {no_reply, force_eval_gc(Log, MacState, Aux), Log}; -handle_aux(follower, _, garbage_collection, Aux, Log, MacState) -> - {no_reply, force_eval_gc(Log, MacState, Aux), Log}; -handle_aux(_RaftState, cast, {#return{msg_ids = MsgIds, - consumer_id = ConsumerId}, Corr, Pid}, - Aux0, Log0, #?MODULE{cfg = #cfg{delivery_limit = undefined}, - consumers = Consumers}) -> - case Consumers of - #{ConsumerId := #consumer{checked_out = Checked}} -> - {Log, ToReturn} = - maps:fold( - fun (MsgId, ?MSG(Idx, Header), {L0, Acc}) -> - %% it is possible this is not found if the consumer - %% crashed and the message got removed - case ra_log:fetch(Idx, L0) of - {{_, _, {_, _, Cmd, _}}, L} -> - Msg = get_msg(Cmd), - {L, [{MsgId, Idx, Header, Msg} | Acc]}; - {undefined, L} -> - {L, Acc} - end - end, {Log0, []}, maps:with(MsgIds, Checked)), + AuxV2 = init_aux(Name), + Aux = AuxV2#?AUX{capacity = Cap, + gc = Gc}, + handle_aux(RaftState, Tag, Cmd, Aux, RaAux); +handle_aux(RaftState, Tag, Cmd, AuxV2, RaAux) + when element(1, AuxV2) == aux_v2 -> + Name = element(2, AuxV2), + AuxV3 = init_aux(Name), + handle_aux(RaftState, Tag, Cmd, AuxV3, RaAux); +handle_aux(leader, cast, eval, + #?AUX{last_decorators_state = LastDec, + last_checkpoint = Check0} = Aux0, + RaAux) -> + #?STATE{cfg = #cfg{resource = QName}} = MacState = + ra_aux:machine_state(RaAux), - Appends = make_requeue(ConsumerId, {notify, Corr, Pid}, - lists:sort(ToReturn), []), - {no_reply, Aux0, Log, Appends}; + Ts = erlang:system_time(millisecond), + {Check, Effects0} = do_checkpoints(Ts, Check0, RaAux, false), + + %% this is called after each batch of commands have been applied + %% set timer for message expire + %% should really be the last applied index ts but this will have to do + Effects1 = timer_effect(Ts, MacState, Effects0), + case query_notify_decorators_info(MacState) of + LastDec -> + {no_reply, Aux0#?AUX{last_checkpoint = Check}, RaAux, Effects1}; + {MaxActivePriority, IsEmpty} = NewLast -> + Effects = [notify_decorators_effect(QName, MaxActivePriority, IsEmpty) + | Effects1], + {no_reply, Aux0#?AUX{last_checkpoint = Check, + last_decorators_state = NewLast}, RaAux, Effects} + end; +handle_aux(_RaftState, cast, eval, + #?AUX{last_checkpoint = Check0} = Aux0, + RaAux) -> + Ts = erlang:system_time(millisecond), + {Check, Effects} = do_checkpoints(Ts, Check0, RaAux, false), + {no_reply, Aux0#?AUX{last_checkpoint = Check}, RaAux, Effects}; +handle_aux(_RaftState, cast, {#return{msg_ids = MsgIds, + consumer_key = Key} = Ret, Corr, Pid}, + Aux0, RaAux0) -> + case ra_aux:machine_state(RaAux0) of + #?STATE{cfg = #cfg{delivery_limit = undefined}, + consumers = Consumers} -> + case find_consumer(Key, Consumers) of + {ConsumerKey, #consumer{checked_out = Checked}} -> + {RaAux, ToReturn} = + maps:fold( + fun (MsgId, ?MSG(Idx, Header), {RA0, Acc}) -> + %% it is possible this is not found if the consumer + %% crashed and the message got removed + case ra_aux:log_fetch(Idx, RA0) of + {{_Term, _Meta, Cmd}, RA} -> + Msg = get_msg(Cmd), + {RA, [{MsgId, Idx, Header, Msg} | Acc]}; + {undefined, RA} -> + {RA, Acc} + end + end, {RaAux0, []}, maps:with(MsgIds, Checked)), + + Appends = make_requeue(ConsumerKey, {notify, Corr, Pid}, + lists:sort(ToReturn), []), + {no_reply, Aux0, RaAux, Appends}; + _ -> + {no_reply, Aux0, RaAux0} + end; _ -> - {no_reply, Aux0, Log0} + %% for returns with a delivery limit set we can just return as before + {no_reply, Aux0, RaAux0, [{append, Ret, {notify, Corr, Pid}}]} end; -handle_aux(leader, _, {handle_tick, [QName, Overview, Nodes]}, - #?AUX{tick_pid = Pid} = Aux, Log, _) -> +handle_aux(leader, _, {handle_tick, [QName, Overview0, Nodes]}, + #?AUX{tick_pid = Pid} = Aux, RaAux) -> + Overview = Overview0#{members_info => ra_aux:members_info(RaAux)}, NewPid = case process_is_alive(Pid) of false -> @@ -1044,99 +1018,105 @@ handle_aux(leader, _, {handle_tick, [QName, Overview, Nodes]}, %% Active TICK pid, do nothing Pid end, - {no_reply, Aux#?AUX{tick_pid = NewPid}, Log}; -handle_aux(_, _, {get_checked_out, ConsumerId, MsgIds}, - Aux0, Log0, #?MODULE{cfg = #cfg{}, - consumers = Consumers}) -> + + %% TODO: check consumer timeouts + {no_reply, Aux#?AUX{tick_pid = NewPid}, RaAux, []}; +handle_aux(_, _, {get_checked_out, ConsumerKey, MsgIds}, Aux0, RaAux0) -> + #?STATE{cfg = #cfg{}, + consumers = Consumers} = ra_aux:machine_state(RaAux0), case Consumers of - #{ConsumerId := #consumer{checked_out = Checked}} -> - {Log, IdMsgs} = + #{ConsumerKey := #consumer{checked_out = Checked}} -> + {RaState, IdMsgs} = maps:fold( - fun (MsgId, ?MSG(Idx, Header), {L0, Acc}) -> + fun (MsgId, ?MSG(Idx, Header), {S0, Acc}) -> %% it is possible this is not found if the consumer %% crashed and the message got removed - case ra_log:fetch(Idx, L0) of - {{_, _, {_, _, Cmd, _}}, L} -> + case ra_aux:log_fetch(Idx, S0) of + {{_Term, _Meta, Cmd}, S} -> Msg = get_msg(Cmd), - {L, [{MsgId, {Header, Msg}} | Acc]}; - {undefined, L} -> - {L, Acc} + {S, [{MsgId, {Header, Msg}} | Acc]}; + {undefined, S} -> + {S, Acc} end - end, {Log0, []}, maps:with(MsgIds, Checked)), - {reply, {ok, IdMsgs}, Aux0, Log}; + end, {RaAux0, []}, maps:with(MsgIds, Checked)), + {reply, {ok, IdMsgs}, Aux0, RaState}; _ -> - {reply, {error, consumer_not_found}, Aux0, Log0} - end; -handle_aux(leader, cast, {#return{} = Ret, Corr, Pid}, - Aux0, Log, #?MODULE{}) -> - %% for returns with a delivery limit set we can just return as before - {no_reply, Aux0, Log, [{append, Ret, {notify, Corr, Pid}}]}; -handle_aux(leader, cast, eval, #?AUX{last_decorators_state = LastDec} = Aux0, - Log, #?MODULE{cfg = #cfg{resource = QName}} = MacState) -> - %% this is called after each batch of commands have been applied - %% set timer for message expire - %% should really be the last applied index ts but this will have to do - Ts = erlang:system_time(millisecond), - Effects0 = timer_effect(Ts, MacState, []), - case query_notify_decorators_info(MacState) of - LastDec -> - {no_reply, Aux0, Log, Effects0}; - {MaxActivePriority, IsEmpty} = NewLast -> - Effects = [notify_decorators_effect(QName, MaxActivePriority, IsEmpty) - | Effects0], - {no_reply, Aux0#?AUX{last_decorators_state = NewLast}, Log, Effects} + {reply, {error, consumer_not_found}, Aux0, RaAux0} end; -handle_aux(_RaftState, cast, eval, Aux0, Log, _MacState) -> - {no_reply, Aux0, Log}; -handle_aux(_RaState, cast, Cmd, #?AUX{capacity = Use0} = Aux0, - Log, _MacState) +handle_aux(_RaState, cast, Cmd, #?AUX{capacity = Use0} = Aux0, RaAux) when Cmd == active orelse Cmd == inactive -> - {no_reply, Aux0#?AUX{capacity = update_use(Use0, Cmd)}, Log}; + {no_reply, Aux0#?AUX{capacity = update_use(Use0, Cmd)}, RaAux}; handle_aux(_RaState, cast, tick, #?AUX{name = Name, capacity = Use0} = State0, - Log, MacState) -> + RaAux) -> true = ets:insert(rabbit_fifo_usage, {Name, capacity(Use0)}), - Aux = eval_gc(Log, MacState, State0), - {no_reply, Aux, Log}; -handle_aux(_RaState, cast, eol, #?AUX{name = Name} = Aux, Log, _) -> + Aux = eval_gc(RaAux, ra_aux:machine_state(RaAux), State0), + Effs = case smallest_raft_index(ra_aux:machine_state(RaAux)) of + undefined -> + [{release_cursor, ra_aux:last_applied(RaAux)}]; + Smallest -> + [{release_cursor, Smallest}] + end, + {no_reply, Aux, RaAux, Effs}; +handle_aux(_RaState, cast, eol, #?AUX{name = Name} = Aux, RaAux) -> ets:delete(rabbit_fifo_usage, Name), - {no_reply, Aux, Log}; -handle_aux(_RaState, {call, _From}, oldest_entry_timestamp, Aux, - Log, #?MODULE{} = State) -> - Ts = case smallest_raft_index(State) of - %% if there are no entries, we return current timestamp - %% so that any previously obtained entries are considered older than this - undefined -> - erlang:system_time(millisecond); - Idx when is_integer(Idx) -> - %% TODO: make more defensive to avoid potential crash - {{_, _, {_, Meta, _, _}}, _Log1} = ra_log:fetch(Idx, Log), - #{ts := Timestamp} = Meta, - Timestamp - end, - {reply, {ok, Ts}, Aux, Log}; + {no_reply, Aux, RaAux}; +handle_aux(_RaState, {call, _From}, oldest_entry_timestamp, + #?AUX{cache = Cache} = Aux0, RaAux0) -> + {CachedIdx, CachedTs} = maps:get(oldest_entry, Cache, + {undefined, undefined}), + case smallest_raft_index(ra_aux:machine_state(RaAux0)) of + %% if there are no entries, we return current timestamp + %% so that any previously obtained entries are considered + %% older than this + undefined -> + Aux1 = Aux0#?AUX{cache = maps:remove(oldest_entry, Cache)}, + {reply, {ok, erlang:system_time(millisecond)}, Aux1, RaAux0}; + CachedIdx -> + %% cache hit + {reply, {ok, CachedTs}, Aux0, RaAux0}; + Idx when is_integer(Idx) -> + case ra_aux:log_fetch(Idx, RaAux0) of + {{_Term, #{ts := Timestamp}, _Cmd}, RaAux} -> + Aux1 = Aux0#?AUX{cache = Cache#{oldest_entry => + {Idx, Timestamp}}}, + {reply, {ok, Timestamp}, Aux1, RaAux}; + {undefined, RaAux} -> + %% fetch failed + {reply, {error, failed_to_get_timestamp}, Aux0, RaAux} + end + end; handle_aux(_RaState, {call, _From}, {peek, Pos}, Aux0, - Log0, MacState) -> - case rabbit_fifo:query_peek(Pos, MacState) of + RaAux0) -> + MacState = ra_aux:machine_state(RaAux0), + case query_peek(Pos, MacState) of {ok, ?MSG(Idx, Header)} -> %% need to re-hydrate from the log - {{_, _, {_, _, Cmd, _}}, Log} = ra_log:fetch(Idx, Log0), + {{_, _, Cmd}, RaAux} = ra_aux:log_fetch(Idx, RaAux0), Msg = get_msg(Cmd), - {reply, {ok, {Header, Msg}}, Aux0, Log}; + {reply, {ok, {Header, Msg}}, Aux0, RaAux}; Err -> - {reply, Err, Aux0, Log0} + {reply, Err, Aux0, RaAux0} end; -handle_aux(RaState, _, {dlx, _} = Cmd, Aux0, Log, - #?MODULE{dlx = DlxState, - cfg = #cfg{dead_letter_handler = DLH, - resource = QRes}}) -> +handle_aux(_, _, garbage_collection, Aux, RaAux) -> + {no_reply, force_eval_gc(RaAux, Aux), RaAux}; +handle_aux(_RaState, _, force_checkpoint, + #?AUX{last_checkpoint = Check0} = Aux, RaAux) -> + Ts = erlang:system_time(millisecond), + {Check, Effects} = do_checkpoints(Ts, Check0, RaAux, true), + {no_reply, Aux#?AUX{last_checkpoint= Check}, RaAux, Effects}; +handle_aux(RaState, _, {dlx, _} = Cmd, Aux0, RaAux) -> + #?STATE{dlx = DlxState, + cfg = #cfg{dead_letter_handler = DLH, + resource = QRes}} = ra_aux:machine_state(RaAux), Aux = rabbit_fifo_dlx:handle_aux(RaState, Cmd, Aux0, QRes, DLH, DlxState), - {no_reply, Aux, Log}. + {no_reply, Aux, RaAux}. -eval_gc(Log, #?MODULE{cfg = #cfg{resource = QR}} = MacState, +eval_gc(RaAux, MacState, #?AUX{gc = #aux_gc{last_raft_idx = LastGcIdx} = Gc} = AuxState) -> - {Idx, _} = ra_log:last_index_term(Log), + {Idx, _} = ra_aux:log_last_index_term(RaAux), + #?STATE{cfg = #cfg{resource = QR}} = ra_aux:machine_state(RaAux), {memory, Mem} = erlang:process_info(self(), memory), case messages_total(MacState) of 0 when Idx > LastGcIdx andalso @@ -1151,9 +1131,10 @@ eval_gc(Log, #?MODULE{cfg = #cfg{resource = QR}} = MacState, AuxState end. -force_eval_gc(Log, #?MODULE{cfg = #cfg{resource = QR}}, +force_eval_gc(RaAux, #?AUX{gc = #aux_gc{last_raft_idx = LastGcIdx} = Gc} = AuxState) -> - {Idx, _} = ra_log:last_index_term(Log), + {Idx, _} = ra_aux:log_last_index_term(RaAux), + #?STATE{cfg = #cfg{resource = QR}} = ra_aux:machine_state(RaAux), {memory, Mem} = erlang:process_info(self(), memory), case Idx > LastGcIdx of true -> @@ -1176,7 +1157,7 @@ process_is_alive(_) -> query_messages_ready(State) -> messages_ready(State). -query_messages_checked_out(#?MODULE{consumers = Consumers}) -> +query_messages_checked_out(#?STATE{consumers = Consumers}) -> maps:fold(fun (_, #consumer{checked_out = C}, S) -> maps:size(C) + S end, 0, Consumers). @@ -1184,32 +1165,34 @@ query_messages_checked_out(#?MODULE{consumers = Consumers}) -> query_messages_total(State) -> messages_total(State). -query_processes(#?MODULE{enqueuers = Enqs, consumers = Cons0}) -> - Cons = maps:fold(fun({_, P}, V, S) -> S#{P => V} end, #{}, Cons0), +query_processes(#?STATE{enqueuers = Enqs, consumers = Cons0}) -> + Cons = maps:fold(fun(_, ?CONSUMER_PID(P) = V, S) -> + S#{P => V} + end, #{}, Cons0), maps:keys(maps:merge(Enqs, Cons)). -query_ra_indexes(#?MODULE{ra_indexes = RaIndexes}) -> +query_ra_indexes(#?STATE{ra_indexes = RaIndexes}) -> RaIndexes. -query_waiting_consumers(#?MODULE{waiting_consumers = WaitingConsumers}) -> +query_waiting_consumers(#?STATE{waiting_consumers = WaitingConsumers}) -> WaitingConsumers. -query_consumer_count(#?MODULE{consumers = Consumers, - waiting_consumers = WaitingConsumers}) -> - Up = maps:filter(fun(_ConsumerId, #consumer{status = Status}) -> +query_consumer_count(#?STATE{consumers = Consumers, + waiting_consumers = WaitingConsumers}) -> + Up = maps:filter(fun(_ConsumerKey, #consumer{status = Status}) -> Status =/= suspected_down end, Consumers), maps:size(Up) + length(WaitingConsumers). -query_consumers(#?MODULE{consumers = Consumers, - waiting_consumers = WaitingConsumers, - cfg = #cfg{consumer_strategy = ConsumerStrategy}} = State) -> +query_consumers(#?STATE{consumers = Consumers, + waiting_consumers = WaitingConsumers, + cfg = #cfg{consumer_strategy = ConsumerStrategy}} + = State) -> ActiveActivityStatusFun = - case ConsumerStrategy of + case ConsumerStrategy of competing -> - fun(_ConsumerId, - #consumer{status = Status}) -> + fun(_ConsumerKey, #consumer{status = Status}) -> case Status of suspected_down -> {false, Status}; @@ -1219,7 +1202,7 @@ query_consumers(#?MODULE{consumers = Consumers, end; single_active -> SingleActiveConsumer = query_single_active_consumer(State), - fun({Tag, Pid} = _Consumer, _) -> + fun(_, ?CONSUMER_TAG_PID(Tag, Pid)) -> case SingleActiveConsumer of {value, {Tag, Pid}} -> {true, single_active}; @@ -1231,12 +1214,14 @@ query_consumers(#?MODULE{consumers = Consumers, FromConsumers = maps:fold(fun (_, #consumer{status = cancelled}, Acc) -> Acc; - ({Tag, Pid}, - #consumer{cfg = #consumer_cfg{meta = Meta}} = Consumer, + (Key, + #consumer{cfg = #consumer_cfg{tag = Tag, + pid = Pid, + meta = Meta}} = Consumer, Acc) -> {Active, ActivityStatus} = - ActiveActivityStatusFun({Tag, Pid}, Consumer), - maps:put({Tag, Pid}, + ActiveActivityStatusFun(Key, Consumer), + maps:put(Key, {Pid, Tag, maps:get(ack, Meta, undefined), maps:get(prefetch, Meta, undefined), @@ -1246,46 +1231,49 @@ query_consumers(#?MODULE{consumers = Consumers, maps:get(username, Meta, undefined)}, Acc) end, #{}, Consumers), - FromWaitingConsumers = - lists:foldl(fun ({_, #consumer{status = cancelled}}, Acc) -> - Acc; - ({{Tag, Pid}, - #consumer{cfg = #consumer_cfg{meta = Meta}} = Consumer}, - Acc) -> - {Active, ActivityStatus} = - ActiveActivityStatusFun({Tag, Pid}, Consumer), - maps:put({Tag, Pid}, - {Pid, Tag, - maps:get(ack, Meta, undefined), - maps:get(prefetch, Meta, undefined), - Active, - ActivityStatus, - maps:get(args, Meta, []), - maps:get(username, Meta, undefined)}, - Acc) - end, #{}, WaitingConsumers), - maps:merge(FromConsumers, FromWaitingConsumers). - - -query_single_active_consumer( - #?MODULE{cfg = #cfg{consumer_strategy = single_active}, - consumers = Consumers}) -> + FromWaitingConsumers = + lists:foldl( + fun ({_, #consumer{status = cancelled}}, + Acc) -> + Acc; + ({Key, + #consumer{cfg = #consumer_cfg{tag = Tag, + pid = Pid, + meta = Meta}} = Consumer}, + Acc) -> + {Active, ActivityStatus} = + ActiveActivityStatusFun(Key, Consumer), + maps:put(Key, + {Pid, Tag, + maps:get(ack, Meta, undefined), + maps:get(prefetch, Meta, undefined), + Active, + ActivityStatus, + maps:get(args, Meta, []), + maps:get(username, Meta, undefined)}, + Acc) + end, #{}, WaitingConsumers), + maps:merge(FromConsumers, FromWaitingConsumers). + + +query_single_active_consumer(#?STATE{cfg = #cfg{consumer_strategy = single_active}, + consumers = Consumers}) -> case active_consumer(Consumers) of undefined -> {error, no_value}; - {ActiveCid, _} -> - {value, ActiveCid} + {_CKey, ?CONSUMER_TAG_PID(Tag, Pid)} -> + {value, {Tag, Pid}} end; query_single_active_consumer(_) -> disabled. -query_stat(#?MODULE{consumers = Consumers} = State) -> +query_stat(#?STATE{consumers = Consumers} = State) -> {messages_ready(State), maps:size(Consumers)}. -query_in_memory_usage(#?MODULE{ }) -> +query_in_memory_usage(#?STATE{ }) -> {0, 0}. -query_stat_dlx(#?MODULE{dlx = DlxState}) -> +query_stat_dlx(#?STATE{dlx = DlxState}) -> rabbit_fifo_dlx:stat(DlxState). query_peek(Pos, State0) when Pos > 0 -> @@ -1299,7 +1287,7 @@ query_peek(Pos, State0) when Pos > 0 -> query_peek(Pos-1, State) end. -query_notify_decorators_info(#?MODULE{consumers = Consumers} = State) -> +query_notify_decorators_info(#?STATE{consumers = Consumers} = State) -> MaxActivePriority = maps:fold( fun(_, #consumer{credit = C, status = up, @@ -1323,14 +1311,19 @@ usage(Name) when is_atom(Name) -> [{_, Use}] -> Use end. +-spec is_v4() -> boolean(). +is_v4() -> + %% Quorum queue v4 is introduced in RabbitMQ 4.0.0 + rabbit_feature_flags:is_enabled('rabbitmq_4.0.0'). + %%% Internal -messages_ready(#?MODULE{messages = M, - returns = R}) -> - lqueue:len(M) + lqueue:len(R). +messages_ready(#?STATE{messages = M, + returns = R}) -> + rabbit_fifo_q:len(M) + lqueue:len(R). -messages_total(#?MODULE{messages_total = Total, - dlx = DlxState}) -> +messages_total(#?STATE{messages_total = Total, + dlx = DlxState}) -> {DlxTotal, _} = rabbit_fifo_dlx:stat(DlxState), Total + DlxTotal. @@ -1339,18 +1332,18 @@ update_use({inactive, _, _, _} = CUInfo, inactive) -> update_use({active, _, _} = CUInfo, active) -> CUInfo; update_use({active, Since, Avg}, inactive) -> - Now = erlang:monotonic_time(micro_seconds), + Now = erlang:monotonic_time(microsecond), {inactive, Now, Now - Since, Avg}; update_use({inactive, Since, Active, Avg}, active) -> - Now = erlang:monotonic_time(micro_seconds), + Now = erlang:monotonic_time(microsecond), {active, Now, use_avg(Active, Now - Since, Avg)}. capacity({active, Since, Avg}) -> - use_avg(erlang:monotonic_time(micro_seconds) - Since, 0, Avg); + use_avg(erlang:monotonic_time(microsecond) - Since, 0, Avg); capacity({inactive, _, 1, 1.0}) -> 1.0; capacity({inactive, Since, Active, Avg}) -> - use_avg(Active, erlang:monotonic_time(micro_seconds) - Since, Avg). + use_avg(Active, erlang:monotonic_time(microsecond) - Since, Avg). use_avg(0, 0, Avg) -> Avg; @@ -1364,119 +1357,161 @@ moving_average(Time, HalfLife, Next, Current) -> Weight = math:exp(Time * math:log(0.5) / HalfLife), Next * (1 - Weight) + Current * Weight. -num_checked_out(#?MODULE{consumers = Cons}) -> +num_checked_out(#?STATE{consumers = Cons}) -> maps:fold(fun (_, #consumer{checked_out = C}, Acc) -> maps:size(C) + Acc end, 0, Cons). -cancel_consumer(Meta, ConsumerId, - #?MODULE{cfg = #cfg{consumer_strategy = competing}} = State, +cancel_consumer(Meta, ConsumerKey, + #?STATE{cfg = #cfg{consumer_strategy = competing}} = State, Effects, Reason) -> - cancel_consumer0(Meta, ConsumerId, State, Effects, Reason); -cancel_consumer(Meta, ConsumerId, - #?MODULE{cfg = #cfg{consumer_strategy = single_active}, - waiting_consumers = []} = State, + cancel_consumer0(Meta, ConsumerKey, State, Effects, Reason); +cancel_consumer(Meta, ConsumerKey, + #?STATE{cfg = #cfg{consumer_strategy = single_active}, + waiting_consumers = []} = State, Effects, Reason) -> %% single active consumer on, no consumers are waiting - cancel_consumer0(Meta, ConsumerId, State, Effects, Reason); -cancel_consumer(Meta, ConsumerId, - #?MODULE{consumers = Cons0, - cfg = #cfg{consumer_strategy = single_active}, - waiting_consumers = Waiting0} = State0, + cancel_consumer0(Meta, ConsumerKey, State, Effects, Reason); +cancel_consumer(Meta, ConsumerKey, + #?STATE{consumers = Cons0, + cfg = #cfg{consumer_strategy = single_active}, + waiting_consumers = Waiting0} = State0, Effects0, Reason) -> %% single active consumer on, consumers are waiting case Cons0 of - #{ConsumerId := #consumer{status = _}} -> + #{ConsumerKey := #consumer{status = _}} -> % The active consumer is to be removed - {State1, Effects1} = cancel_consumer0(Meta, ConsumerId, State0, - Effects0, Reason), - activate_next_consumer(State1, Effects1); + cancel_consumer0(Meta, ConsumerKey, State0, + Effects0, Reason); _ -> % The cancelled consumer is not active or cancelled % Just remove it from idle_consumers - Waiting = lists:keydelete(ConsumerId, 1, Waiting0), - Effects = cancel_consumer_effects(ConsumerId, State0, Effects0), - % A waiting consumer isn't supposed to have any checked out messages, - % so nothing special to do here - {State0#?MODULE{waiting_consumers = Waiting}, Effects} + case lists:keyfind(ConsumerKey, 1, Waiting0) of + {_, ?CONSUMER_TAG_PID(T, P)} -> + Waiting = lists:keydelete(ConsumerKey, 1, Waiting0), + Effects = cancel_consumer_effects({T, P}, State0, Effects0), + % A waiting consumer isn't supposed to have any checked out messages, + % so nothing special to do here + {State0#?STATE{waiting_consumers = Waiting}, Effects}; + _ -> + {State0, Effects0} + end end. -consumer_update_active_effects(#?MODULE{cfg = #cfg{resource = QName}}, - ConsumerId, - #consumer{cfg = #consumer_cfg{meta = Meta}}, +consumer_update_active_effects(#?STATE{cfg = #cfg{resource = QName}}, + #consumer{cfg = #consumer_cfg{pid = CPid, + tag = CTag, + meta = Meta}}, Active, ActivityStatus, Effects) -> Ack = maps:get(ack, Meta, undefined), Prefetch = maps:get(prefetch, Meta, undefined), Args = maps:get(args, Meta, []), [{mod_call, rabbit_quorum_queue, update_consumer_handler, - [QName, ConsumerId, false, Ack, Prefetch, Active, ActivityStatus, Args]} + [QName, {CTag, CPid}, false, Ack, Prefetch, Active, ActivityStatus, Args]} | Effects]. -cancel_consumer0(Meta, ConsumerId, - #?MODULE{consumers = C0} = S0, Effects0, Reason) -> +cancel_consumer0(Meta, ConsumerKey, + #?STATE{consumers = C0} = S0, Effects0, Reason) -> case C0 of - #{ConsumerId := Consumer} -> - {S, Effects2} = maybe_return_all(Meta, ConsumerId, Consumer, + #{ConsumerKey := Consumer} -> + {S, Effects2} = maybe_return_all(Meta, ConsumerKey, Consumer, S0, Effects0, Reason), %% The effects are emitted before the consumer is actually removed %% if the consumer has unacked messages. This is a bit weird but %% in line with what classic queues do (from an external point of %% view) - Effects = cancel_consumer_effects(ConsumerId, S, Effects2), + Effects = cancel_consumer_effects(consumer_id(Consumer), S, Effects2), {S, Effects}; _ -> %% already removed: do nothing {S0, Effects0} end. -activate_next_consumer(#?MODULE{cfg = #cfg{consumer_strategy = competing}} = State0, - Effects0) -> - {State0, Effects0}; -activate_next_consumer(#?MODULE{consumers = Cons, - waiting_consumers = Waiting0} = State0, +activate_next_consumer({State, Effects}) -> + activate_next_consumer(State, Effects). + +activate_next_consumer(#?STATE{cfg = #cfg{consumer_strategy = competing}} = State, + Effects) -> + {State, Effects}; +activate_next_consumer(#?STATE{consumers = Cons0, + waiting_consumers = Waiting0} = State0, Effects0) -> - case has_active_consumer(Cons) of - false -> - case lists:filter(fun ({_, #consumer{status = Status}}) -> - Status == up - end, Waiting0) of - [{NextConsumerId, #consumer{cfg = NextCCfg} = NextConsumer} | _] -> - Remaining = lists:keydelete(NextConsumerId, 1, Waiting0), - Consumer = case maps:get(NextConsumerId, Cons, undefined) of - undefined -> - NextConsumer; - Existing -> - %% there was an exisiting non-active consumer - %% just update the existing cancelled consumer - %% with the new config - Existing#consumer{cfg = NextCCfg} - end, - #?MODULE{service_queue = ServiceQueue} = State0, - ServiceQueue1 = maybe_queue_consumer(NextConsumerId, - Consumer, - ServiceQueue), - State = State0#?MODULE{consumers = Cons#{NextConsumerId => Consumer}, - service_queue = ServiceQueue1, - waiting_consumers = Remaining}, - Effects = consumer_update_active_effects(State, NextConsumerId, - Consumer, true, - single_active, Effects0), - {State, Effects}; - [] -> - {State0, Effects0} - end; - true -> + %% invariant, the waiting list always need to be sorted by consumers that are + %% up - then by priority + NextConsumer = + case Waiting0 of + [{_, #consumer{status = up}} = Next | _] -> + Next; + _ -> + undefined + end, + + case {active_consumer(Cons0), NextConsumer} of + {undefined, {NextCKey, #consumer{cfg = NextCCfg} = NextC}} -> + Remaining = tl(Waiting0), + %% TODO: can this happen? + Consumer = case maps:get(NextCKey, Cons0, undefined) of + undefined -> + NextC; + Existing -> + %% there was an exisiting non-active consumer + %% just update the existing cancelled consumer + %% with the new config + Existing#consumer{cfg = NextCCfg} + end, + #?STATE{service_queue = ServiceQueue} = State0, + ServiceQueue1 = maybe_queue_consumer(NextCKey, + Consumer, + ServiceQueue), + State = State0#?STATE{consumers = Cons0#{NextCKey => Consumer}, + service_queue = ServiceQueue1, + waiting_consumers = Remaining}, + Effects = consumer_update_active_effects(State, Consumer, + true, single_active, + Effects0), + {State, Effects}; + {{ActiveCKey, ?CONSUMER_PRIORITY(ActivePriority) = + #consumer{checked_out = ActiveChecked} = Active}, + {NextCKey, ?CONSUMER_PRIORITY(WaitingPriority) = Consumer}} + when WaitingPriority > ActivePriority andalso + map_size(ActiveChecked) == 0 -> + Remaining = tl(Waiting0), + %% the next consumer is a higher priority and should take over + %% and this consumer does not have any pending messages + #?STATE{service_queue = ServiceQueue} = State0, + ServiceQueue1 = maybe_queue_consumer(NextCKey, + Consumer, + ServiceQueue), + Cons1 = Cons0#{NextCKey => Consumer}, + Cons = maps:remove(ActiveCKey, Cons1), + Waiting = add_waiting({ActiveCKey, Active}, Remaining), + State = State0#?STATE{consumers = Cons, + service_queue = ServiceQueue1, + waiting_consumers = Waiting}, + Effects = consumer_update_active_effects(State, Consumer, + true, single_active, + Effects0), + {State, Effects}; + {{ActiveCKey, ?CONSUMER_PRIORITY(ActivePriority) = Active}, + {_NextCKey, ?CONSUMER_PRIORITY(WaitingPriority)}} + when WaitingPriority > ActivePriority -> + %% A higher priority consumer has attached but the current one has + %% pending messages + Cons = maps:update(ActiveCKey, + Active#consumer{status = quiescing}, + Cons0), + {State0#?STATE{consumers = Cons}, Effects0}; + _ -> + %% no activation {State0, Effects0} end. -has_active_consumer(Consumers) -> - active_consumer(Consumers) /= undefined. - -active_consumer({Cid, #consumer{status = up} = Consumer, _I}) -> - {Cid, Consumer}; -active_consumer({_Cid, #consumer{status = _}, I}) -> +active_consumer({CKey, #consumer{status = Status} = Consumer, _I}) + when Status == up orelse Status == quiescing -> + {CKey, Consumer}; +active_consumer({_CKey, #consumer{status = _}, I}) -> active_consumer(maps:next(I)); active_consumer(none) -> undefined; @@ -1484,68 +1519,63 @@ active_consumer(M) when is_map(M) -> I = maps:iterator(M), active_consumer(maps:next(I)). -maybe_return_all(#{system_time := Ts} = Meta, ConsumerId, +is_active(_ConsumerKey, #?STATE{cfg = #cfg{consumer_strategy = competing}}) -> + %% all competing consumers are potentially active + true; +is_active(ConsumerKey, #?STATE{cfg = #cfg{consumer_strategy = single_active}, + consumers = Consumers}) -> + ConsumerKey == active_consumer(Consumers). + +maybe_return_all(#{system_time := Ts} = Meta, ConsumerKey, #consumer{cfg = CCfg} = Consumer, S0, Effects0, Reason) -> case Reason of - consumer_cancel -> - {update_or_remove_sub( - Meta, ConsumerId, + cancel -> + {update_or_remove_con( + Meta, ConsumerKey, Consumer#consumer{cfg = CCfg#consumer_cfg{lifetime = once}, credit = 0, status = cancelled}, S0), Effects0}; - down -> - {S1, Effects1} = return_all(Meta, S0, Effects0, ConsumerId, Consumer), - {S1#?MODULE{consumers = maps:remove(ConsumerId, S1#?MODULE.consumers), - last_active = Ts}, - Effects1} + _ -> + {S1, Effects} = return_all(Meta, S0, Effects0, ConsumerKey, + Consumer, Reason == down), + {S1#?STATE{consumers = maps:remove(ConsumerKey, S1#?STATE.consumers), + last_active = Ts}, + Effects} end. apply_enqueue(#{index := RaftIdx, - system_time := Ts} = Meta, From, Seq, RawMsg, State0) -> - case maybe_enqueue(RaftIdx, Ts, From, Seq, RawMsg, [], State0) of + system_time := Ts} = Meta, From, + Seq, RawMsg, Size, State0) -> + case maybe_enqueue(RaftIdx, Ts, From, Seq, RawMsg, Size, [], State0) of {ok, State1, Effects1} -> - {State, ok, Effects} = checkout(Meta, State0, State1, Effects1), - {maybe_store_release_cursor(RaftIdx, State), ok, Effects}; + checkout(Meta, State0, State1, Effects1); {out_of_sequence, State, Effects} -> {State, not_enqueued, Effects}; {duplicate, State, Effects} -> {State, ok, Effects} end. -decr_total(#?MODULE{messages_total = Tot} = State) -> - State#?MODULE{messages_total = Tot - 1}. +decr_total(#?STATE{messages_total = Tot} = State) -> + State#?STATE{messages_total = Tot - 1}. -drop_head(#?MODULE{ra_indexes = Indexes0} = State0, Effects) -> +drop_head(#?STATE{ra_indexes = Indexes0} = State0, Effects) -> case take_next_msg(State0) of {?MSG(Idx, Header) = Msg, State1} -> Indexes = rabbit_fifo_index:delete(Idx, Indexes0), - State2 = State1#?MODULE{ra_indexes = Indexes}, + State2 = State1#?STATE{ra_indexes = Indexes}, State3 = decr_total(add_bytes_drop(Header, State2)), - #?MODULE{cfg = #cfg{dead_letter_handler = DLH}, - dlx = DlxState} = State = State3, + #?STATE{cfg = #cfg{dead_letter_handler = DLH}, + dlx = DlxState} = State = State3, {_, DlxEffects} = rabbit_fifo_dlx:discard([Msg], maxlen, DLH, DlxState), {State, DlxEffects ++ Effects}; empty -> {State0, Effects} end. -maybe_set_msg_ttl(#basic_message{content = #content{properties = none}}, - RaCmdTs, Header, - #?MODULE{cfg = #cfg{msg_ttl = PerQueueMsgTTL}}) -> - update_expiry_header(RaCmdTs, PerQueueMsgTTL, Header); -maybe_set_msg_ttl(#basic_message{content = #content{properties = Props}}, - RaCmdTs, Header, - #?MODULE{cfg = #cfg{msg_ttl = PerQueueMsgTTL}}) -> - %% rabbit_quorum_queue will leave the properties decoded if and only if - %% per message message TTL is set. - %% We already check in the channel that expiration must be valid. - {ok, PerMsgMsgTTL} = rabbit_basic:parse_expiration(Props), - TTL = min(PerMsgMsgTTL, PerQueueMsgTTL), - update_expiry_header(RaCmdTs, TTL, Header); maybe_set_msg_ttl(Msg, RaCmdTs, Header, - #?MODULE{cfg = #cfg{msg_ttl = MsgTTL}}) -> + #?STATE{cfg = #cfg{msg_ttl = MsgTTL}}) -> case mc:is(Msg) of true -> TTL = min(MsgTTL, mc:ttl(Msg)), @@ -1554,6 +1584,20 @@ maybe_set_msg_ttl(Msg, RaCmdTs, Header, Header end. +maybe_set_msg_delivery_count(Msg, Header) -> + case mc:is(Msg) of + true -> + case mc:get_annotation(delivery_count, Msg) of + undefined -> + Header; + DelCnt -> + update_header(delivery_count, fun (_) -> DelCnt end, + DelCnt, Header) + end; + false -> + Header + end. + update_expiry_header(_, undefined, Header) -> Header; update_expiry_header(RaCmdTs, 0, Header) -> @@ -1569,64 +1613,43 @@ update_expiry_header(RaCmdTs, TTL, Header) -> update_expiry_header(ExpiryTs, Header) -> update_header(expiry, fun(Ts) -> Ts end, ExpiryTs, Header). -maybe_store_release_cursor(RaftIdx, - #?MODULE{cfg = #cfg{release_cursor_interval = {Base, C}} = Cfg, - enqueue_count = EC, - release_cursors = Cursors0} = State0) - when EC >= C -> - case messages_total(State0) of - 0 -> - %% message must have been immediately dropped - State0#?MODULE{enqueue_count = 0}; - Total -> - Interval = case Base of - 0 -> 0; - _ -> - min(max(Total, Base), ?RELEASE_CURSOR_EVERY_MAX) - end, - State = State0#?MODULE{cfg = Cfg#cfg{release_cursor_interval = - {Base, Interval}}}, - Dehydrated = dehydrate_state(State), - Cursor = {release_cursor, RaftIdx, Dehydrated}, - Cursors = lqueue:in(Cursor, Cursors0), - State#?MODULE{enqueue_count = 0, - release_cursors = Cursors} - end; -maybe_store_release_cursor(_RaftIdx, State) -> - State. - -maybe_enqueue(RaftIdx, Ts, undefined, undefined, RawMsg, Effects, - #?MODULE{msg_bytes_enqueue = Enqueue, - enqueue_count = EnqCount, - messages = Messages, - messages_total = Total} = State0) -> +maybe_enqueue(RaftIdx, Ts, undefined, undefined, RawMsg, + {_MetaSize, BodySize}, + Effects, #?STATE{msg_bytes_enqueue = Enqueue, + enqueue_count = EnqCount, + messages = Messages, + messages_total = Total} = State0) -> % direct enqueue without tracking - Size = message_size(RawMsg), - Header = maybe_set_msg_ttl(RawMsg, Ts, Size, State0), + Size = BodySize, + Header0 = maybe_set_msg_ttl(RawMsg, Ts, BodySize, State0), + Header = maybe_set_msg_delivery_count(RawMsg, Header0), Msg = ?MSG(RaftIdx, Header), - State = State0#?MODULE{msg_bytes_enqueue = Enqueue + Size, - enqueue_count = EnqCount + 1, - messages_total = Total + 1, - messages = lqueue:in(Msg, Messages) - }, + PTag = priority_tag(RawMsg), + State = State0#?STATE{msg_bytes_enqueue = Enqueue + Size, + enqueue_count = EnqCount + 1, + messages_total = Total + 1, + messages = rabbit_fifo_q:in(PTag, Msg, Messages) + }, {ok, State, Effects}; -maybe_enqueue(RaftIdx, Ts, From, MsgSeqNo, RawMsg, Effects0, - #?MODULE{msg_bytes_enqueue = Enqueue, - enqueue_count = EnqCount, - enqueuers = Enqueuers0, - messages = Messages, - messages_total = Total} = State0) -> +maybe_enqueue(RaftIdx, Ts, From, MsgSeqNo, RawMsg, + {_MetaSize, BodySize} = Size, + Effects0, #?STATE{msg_bytes_enqueue = Enqueue, + enqueue_count = EnqCount, + enqueuers = Enqueuers0, + messages = Messages, + messages_total = Total} = State0) -> case maps:get(From, Enqueuers0, undefined) of undefined -> - State1 = State0#?MODULE{enqueuers = Enqueuers0#{From => #enqueuer{}}}, + State1 = State0#?STATE{enqueuers = Enqueuers0#{From => #enqueuer{}}}, {Res, State, Effects} = maybe_enqueue(RaftIdx, Ts, From, MsgSeqNo, - RawMsg, Effects0, State1), + RawMsg, Size, Effects0, + State1), {Res, State, [{monitor, process, From} | Effects]}; #enqueuer{next_seqno = MsgSeqNo} = Enq0 -> % it is the next expected seqno - Size = message_size(RawMsg), - Header = maybe_set_msg_ttl(RawMsg, Ts, Size, State0), + Header0 = maybe_set_msg_ttl(RawMsg, Ts, BodySize, State0), + Header = maybe_set_msg_delivery_count(RawMsg, Header0), Msg = ?MSG(RaftIdx, Header), Enq = Enq0#enqueuer{next_seqno = MsgSeqNo + 1}, MsgCache = case can_immediately_deliver(State0) of @@ -1635,13 +1658,14 @@ maybe_enqueue(RaftIdx, Ts, From, MsgSeqNo, RawMsg, Effects0, false -> undefined end, - State = State0#?MODULE{msg_bytes_enqueue = Enqueue + Size, - enqueue_count = EnqCount + 1, - messages_total = Total + 1, - messages = lqueue:in(Msg, Messages), - enqueuers = Enqueuers0#{From => Enq}, - msg_cache = MsgCache - }, + PTag = priority_tag(RawMsg), + State = State0#?STATE{msg_bytes_enqueue = Enqueue + BodySize, + enqueue_count = EnqCount + 1, + messages_total = Total + 1, + messages = rabbit_fifo_q:in(PTag, Msg, Messages), + enqueuers = Enqueuers0#{From => Enq}, + msg_cache = MsgCache + }, {ok, State, Effects0}; #enqueuer{next_seqno = Next} when MsgSeqNo > Next -> @@ -1652,52 +1676,53 @@ maybe_enqueue(RaftIdx, Ts, From, MsgSeqNo, RawMsg, Effects0, {duplicate, State0, Effects0} end. -return(#{index := IncomingRaftIdx, machine_version := MachineVersion} = Meta, - ConsumerId, Returned, Effects0, State0) -> - {State1, Effects1} = maps:fold( - fun(MsgId, Msg, {S0, E0}) -> - return_one(Meta, MsgId, Msg, S0, E0, ConsumerId) - end, {State0, Effects0}, Returned), - State2 = - case State1#?MODULE.consumers of - #{ConsumerId := Con} - when MachineVersion >= 3 -> - update_or_remove_sub(Meta, ConsumerId, Con, State1); - #{ConsumerId := Con0} - when MachineVersion =:= 2 -> - Credit = increase_credit(Meta, Con0, map_size(Returned)), - Con = Con0#consumer{credit = Credit}, - update_or_remove_sub(Meta, ConsumerId, Con, State1); - _ -> - State1 - end, - {State, ok, Effects} = checkout(Meta, State0, State2, Effects1), - update_smallest_raft_index(IncomingRaftIdx, State, Effects). +return(#{} = Meta, ConsumerKey, MsgIds, IncrDelCount, Anns, + Checked, Effects0, State0) + when is_map(Anns) -> + %% We requeue in the same order as messages got returned by the client. + {State1, Effects1} = + lists:foldl( + fun(MsgId, Acc = {S0, E0}) -> + case Checked of + #{MsgId := Msg} -> + return_one(Meta, MsgId, Msg, IncrDelCount, Anns, + S0, E0, ConsumerKey); + #{} -> + Acc + end + end, {State0, Effects0}, MsgIds), + State2 = case State1#?STATE.consumers of + #{ConsumerKey := Con} -> + update_or_remove_con(Meta, ConsumerKey, Con, State1); + _ -> + State1 + end, + checkout(Meta, State0, State2, Effects1). % used to process messages that are finished -complete(Meta, ConsumerId, [DiscardedMsgId], +complete(Meta, ConsumerKey, [MsgId], #consumer{checked_out = Checked0} = Con0, - #?MODULE{ra_indexes = Indexes0, - msg_bytes_checkout = BytesCheckout, - messages_total = Tot} = State0) -> - case maps:take(DiscardedMsgId, Checked0) of + #?STATE{ra_indexes = Indexes0, + msg_bytes_checkout = BytesCheckout, + messages_total = Tot} = State0) -> + case maps:take(MsgId, Checked0) of {?MSG(Idx, Hdr), Checked} -> SettledSize = get_header(size, Hdr), Indexes = rabbit_fifo_index:delete(Idx, Indexes0), Con = Con0#consumer{checked_out = Checked, - credit = increase_credit(Meta, Con0, 1)}, - State1 = update_or_remove_sub(Meta, ConsumerId, Con, State0), - State1#?MODULE{ra_indexes = Indexes, - msg_bytes_checkout = BytesCheckout - SettledSize, - messages_total = Tot - 1}; + credit = increase_credit(Con0, 1)}, + State1 = update_or_remove_con(Meta, ConsumerKey, Con, State0), + State1#?STATE{ra_indexes = Indexes, + msg_bytes_checkout = BytesCheckout - SettledSize, + messages_total = Tot - 1}; error -> State0 end; -complete(Meta, ConsumerId, DiscardedMsgIds, +complete(Meta, ConsumerKey, MsgIds, #consumer{checked_out = Checked0} = Con0, - #?MODULE{ra_indexes = Indexes0, - msg_bytes_checkout = BytesCheckout, - messages_total = Tot} = State0) -> + #?STATE{ra_indexes = Indexes0, + msg_bytes_checkout = BytesCheckout, + messages_total = Tot} = State0) -> {SettledSize, Checked, Indexes} = lists:foldl( fun (MsgId, {S0, Ch0, Idxs}) -> @@ -1708,94 +1733,51 @@ complete(Meta, ConsumerId, DiscardedMsgIds, error -> {S0, Ch0, Idxs} end - end, {0, Checked0, Indexes0}, DiscardedMsgIds), + end, {0, Checked0, Indexes0}, MsgIds), Len = map_size(Checked0) - map_size(Checked), Con = Con0#consumer{checked_out = Checked, - credit = increase_credit(Meta, Con0, Len)}, - State1 = update_or_remove_sub(Meta, ConsumerId, Con, State0), - State1#?MODULE{ra_indexes = Indexes, - msg_bytes_checkout = BytesCheckout - SettledSize, - messages_total = Tot - Len}. - -increase_credit(_Meta, #consumer{cfg = #consumer_cfg{lifetime = once}, - credit = Credit}, _) -> + credit = increase_credit(Con0, Len)}, + State1 = update_or_remove_con(Meta, ConsumerKey, Con, State0), + State1#?STATE{ra_indexes = Indexes, + msg_bytes_checkout = BytesCheckout - SettledSize, + messages_total = Tot - Len}. + +increase_credit(#consumer{cfg = #consumer_cfg{lifetime = once}, + credit = Credit}, _) -> %% once consumers cannot increment credit Credit; -increase_credit(_Meta, #consumer{cfg = #consumer_cfg{lifetime = auto, - credit_mode = credited}, - credit = Credit}, _) -> +increase_credit(#consumer{cfg = #consumer_cfg{lifetime = auto, + credit_mode = credited}, + credit = Credit}, _) -> %% credit_mode: `credited' also doesn't automatically increment credit Credit; -increase_credit(#{machine_version := MachineVersion}, - #consumer{cfg = #consumer_cfg{credit_mode = {simple_prefetch, MaxCredit}}, +increase_credit(#consumer{cfg = #consumer_cfg{lifetime = auto, + credit_mode = {credited, _}}, + credit = Credit}, _) -> + %% credit_mode: `credited' also doesn't automatically increment credit + Credit; +increase_credit(#consumer{cfg = #consumer_cfg{credit_mode = + {simple_prefetch, MaxCredit}}, credit = Current}, Credit) - when MachineVersion >= 3 andalso MaxCredit > 0 -> + when MaxCredit > 0 -> min(MaxCredit, Current + Credit); -increase_credit(_Meta, #consumer{credit = Current}, Credit) -> +increase_credit(#consumer{credit = Current}, Credit) -> Current + Credit. -complete_and_checkout(#{index := IncomingRaftIdx} = Meta, MsgIds, ConsumerId, +complete_and_checkout(#{} = Meta, MsgIds, ConsumerKey, #consumer{} = Con0, Effects0, State0) -> - State1 = complete(Meta, ConsumerId, MsgIds, Con0, State0), - {State, ok, Effects} = checkout(Meta, State0, State1, Effects0), - update_smallest_raft_index(IncomingRaftIdx, State, Effects). + State1 = complete(Meta, ConsumerKey, MsgIds, Con0, State0), + %% a completion could have removed the active/quiescing consumer + {State2, Effects1} = activate_next_consumer(State1, Effects0), + checkout(Meta, State0, State2, Effects1). cancel_consumer_effects(ConsumerId, - #?MODULE{cfg = #cfg{resource = QName}} = _State, - Effects) -> + #?STATE{cfg = #cfg{resource = QName}}, + Effects) when is_tuple(ConsumerId) -> [{mod_call, rabbit_quorum_queue, cancel_consumer_handler, [QName, ConsumerId]} | Effects]. -update_smallest_raft_index(Idx, State, Effects) -> - update_smallest_raft_index(Idx, ok, State, Effects). - -update_smallest_raft_index(IncomingRaftIdx, Reply, - #?MODULE{cfg = Cfg, - release_cursors = Cursors0} = State0, - Effects) -> - Total = messages_total(State0), - %% TODO: optimise - case smallest_raft_index(State0) of - undefined when Total == 0 -> - % there are no messages on queue anymore and no pending enqueues - % we can forward release_cursor all the way until - % the last received command, hooray - %% reset the release cursor interval - #cfg{release_cursor_interval = {Base, _}} = Cfg, - RCI = {Base, Base}, - State = State0#?MODULE{cfg = Cfg#cfg{release_cursor_interval = RCI}, - release_cursors = lqueue:new(), - enqueue_count = 0}, - {State, Reply, Effects ++ [{release_cursor, IncomingRaftIdx, State}]}; - undefined -> - {State0, Reply, Effects}; - Smallest when is_integer(Smallest) -> - case find_next_cursor(Smallest, Cursors0) of - empty -> - {State0, Reply, Effects}; - {Cursor, Cursors} -> - %% we can emit a release cursor when we've passed the smallest - %% release cursor available. - {State0#?MODULE{release_cursors = Cursors}, Reply, - Effects ++ [Cursor]} - end - end. - -find_next_cursor(Idx, Cursors) -> - find_next_cursor(Idx, Cursors, empty). - -find_next_cursor(Smallest, Cursors0, Potential) -> - case lqueue:out(Cursors0) of - {{value, {_, Idx, _} = Cursor}, Cursors} when Idx < Smallest -> - %% we found one but it may not be the largest one - find_next_cursor(Smallest, Cursors, Cursor); - _ when Potential == empty -> - empty; - _ -> - {Potential, Cursors0} - end. - update_msg_header(Key, Fun, Def, ?MSG(Idx, Header)) -> ?MSG(Idx, update_header(Key, Fun, Def, Header)). @@ -1806,11 +1788,12 @@ update_header(Key, UpdateFun, Default, Size) when is_integer(Size) -> update_header(Key, UpdateFun, Default, #{size => Size}); update_header(Key, UpdateFun, Default, ?TUPLE(Size, Expiry)) - when is_integer(Size), is_integer(Expiry) -> + when is_integer(Size) andalso + is_integer(Expiry) -> update_header(Key, UpdateFun, Default, #{size => Size, expiry => Expiry}); update_header(Key, UpdateFun, Default, Header) - when is_map(Header), is_map_key(size, Header) -> + when is_map_key(size, Header) -> maps:update_with(Key, UpdateFun, Default, Header). get_msg_header(?MSG(_Idx, Header)) -> @@ -1835,76 +1818,87 @@ get_header(Key, Header) when is_map(Header) andalso is_map_key(size, Header) -> maps:get(Key, Header, undefined). -return_one(#{machine_version := MachineVersion} = Meta, - MsgId, Msg0, - #?MODULE{returns = Returns, - consumers = Consumers, - dlx = DlxState0, - cfg = #cfg{delivery_limit = DeliveryLimit, - dead_letter_handler = DLH}} = State0, - Effects0, ConsumerId) -> - #consumer{checked_out = Checked0} = Con0 = maps:get(ConsumerId, Consumers), - Msg = update_msg_header(delivery_count, fun incr/1, 1, Msg0), +annotate_msg(Header, Msg0) -> + case mc:is(Msg0) of + true when is_map(Header) -> + Msg = maps:fold(fun (K, V, Acc) -> + mc:set_annotation(K, V, Acc) + end, Msg0, maps:get(anns, Header, #{})), + case Header of + #{delivery_count := DelCount} -> + mc:set_annotation(delivery_count, DelCount, Msg); + _ -> + Msg + end; + _ -> + Msg0 + end. + +return_one(Meta, MsgId, ?MSG(_, _) = Msg0, DelivFailed, Anns, + #?STATE{returns = Returns, + consumers = Consumers, + dlx = DlxState0, + cfg = #cfg{delivery_limit = DeliveryLimit, + dead_letter_handler = DLH}} = State0, + Effects0, ConsumerKey) -> + #consumer{checked_out = Checked0} = Con0 = maps:get(ConsumerKey, Consumers), + Msg = incr_msg(Msg0, DelivFailed, Anns), Header = get_msg_header(Msg), - case get_header(delivery_count, Header) of - DeliveryCount when DeliveryCount > DeliveryLimit -> - {DlxState, DlxEffects} = rabbit_fifo_dlx:discard([Msg], delivery_limit, DLH, DlxState0), - State1 = State0#?MODULE{dlx = DlxState}, - State = complete(Meta, ConsumerId, [MsgId], Con0, State1), + case get_header(acquired_count, Header) of + AcquiredCount when AcquiredCount > DeliveryLimit -> + {DlxState, DlxEffects} = + rabbit_fifo_dlx:discard([Msg], delivery_limit, DLH, DlxState0), + State1 = State0#?STATE{dlx = DlxState}, + State = complete(Meta, ConsumerKey, [MsgId], Con0, State1), {State, DlxEffects ++ Effects0}; _ -> Checked = maps:remove(MsgId, Checked0), - Con = case MachineVersion of - V when V >= 3 -> - Con0#consumer{checked_out = Checked, - credit = increase_credit(Meta, Con0, 1)}; - 2 -> - Con0#consumer{checked_out = Checked} - end, + Con = Con0#consumer{checked_out = Checked, + credit = increase_credit(Con0, 1)}, {add_bytes_return( Header, - State0#?MODULE{consumers = Consumers#{ConsumerId => Con}, - returns = lqueue:in(Msg, Returns)}), + State0#?STATE{consumers = Consumers#{ConsumerKey => Con}, + returns = lqueue:in(Msg, Returns)}), Effects0} end. -return_all(Meta, #?MODULE{consumers = Cons} = State0, Effects0, ConsumerId, - #consumer{checked_out = Checked} = Con) -> - State = State0#?MODULE{consumers = Cons#{ConsumerId => Con}}, +return_all(Meta, #?STATE{consumers = Cons} = State0, Effects0, ConsumerKey, + #consumer{checked_out = Checked} = Con, DelivFailed) -> + State = State0#?STATE{consumers = Cons#{ConsumerKey => Con}}, lists:foldl(fun ({MsgId, Msg}, {S, E}) -> - return_one(Meta, MsgId, Msg, S, E, ConsumerId) + return_one(Meta, MsgId, Msg, DelivFailed, #{}, + S, E, ConsumerKey) end, {State, Effects0}, lists:sort(maps:to_list(Checked))). checkout(Meta, OldState, State0, Effects0) -> checkout(Meta, OldState, State0, Effects0, ok). checkout(#{index := Index} = Meta, - #?MODULE{cfg = #cfg{resource = _QName}} = OldState, + #?STATE{} = OldState, State0, Effects0, Reply) -> - {#?MODULE{cfg = #cfg{dead_letter_handler = DLH}, - dlx = DlxState0} = State1, ExpiredMsg, Effects1} = + {#?STATE{cfg = #cfg{dead_letter_handler = DLH}, + dlx = DlxState0} = State1, _ExpiredMsg, Effects1} = checkout0(Meta, checkout_one(Meta, false, State0, Effects0), #{}), {DlxState, DlxDeliveryEffects} = rabbit_fifo_dlx:checkout(DLH, DlxState0), %% TODO: only update dlx state if it has changed? - State2 = State1#?MODULE{msg_cache = undefined, %% by this time the cache should be used - dlx = DlxState}, + %% by this time the cache should be used + State2 = State1#?STATE{msg_cache = undefined, + dlx = DlxState}, Effects2 = DlxDeliveryEffects ++ Effects1, case evaluate_limit(Index, false, OldState, State2, Effects2) of - {State, false, Effects} when ExpiredMsg == false -> - {State, Reply, Effects}; {State, _, Effects} -> - update_smallest_raft_index(Index, Reply, State, Effects) + {State, Reply, Effects} end. -checkout0(Meta, {success, ConsumerId, MsgId, - ?MSG(_RaftIdx, _Header) = Msg, ExpiredMsg, State, Effects}, +checkout0(Meta, {success, ConsumerKey, MsgId, + ?MSG(_, _) = Msg, ExpiredMsg, State, Effects}, SendAcc0) -> DelMsg = {MsgId, Msg}, - SendAcc = case maps:get(ConsumerId, SendAcc0, undefined) of + SendAcc = case maps:get(ConsumerKey, SendAcc0, undefined) of undefined -> - SendAcc0#{ConsumerId => [DelMsg]}; + SendAcc0#{ConsumerKey => [DelMsg]}; LogMsgs -> - SendAcc0#{ConsumerId => [DelMsg | LogMsgs]} + SendAcc0#{ConsumerKey => [DelMsg | LogMsgs]} end, checkout0(Meta, checkout_one(Meta, ExpiredMsg, State, Effects), SendAcc); checkout0(_Meta, {_Activity, ExpiredMsg, State0, Effects0}, SendAcc) -> @@ -1912,13 +1906,13 @@ checkout0(_Meta, {_Activity, ExpiredMsg, State0, Effects0}, SendAcc) -> {State0, ExpiredMsg, lists:reverse(Effects)}. evaluate_limit(_Index, Result, _BeforeState, - #?MODULE{cfg = #cfg{max_length = undefined, - max_bytes = undefined}} = State, + #?STATE{cfg = #cfg{max_length = undefined, + max_bytes = undefined}} = State, Effects) -> {State, Result, Effects}; evaluate_limit(Index, Result, BeforeState, - #?MODULE{cfg = #cfg{overflow_strategy = Strategy}, - enqueuers = Enqs0} = State0, + #?STATE{cfg = #cfg{overflow_strategy = Strategy}, + enqueuers = Enqs0} = State0, Effects0) -> case is_over_limit(State0) of true when Strategy == drop_head -> @@ -1929,7 +1923,7 @@ evaluate_limit(Index, Result, BeforeState, %% they need to block {Enqs, Effects} = maps:fold( - fun (P, #enqueuer{blocked = undefined} = E0, {Enqs, Acc}) -> + fun (P, #enqueuer{blocked = undefined} = E0, {Enqs, Acc}) -> E = E0#enqueuer{blocked = Index}, {Enqs#{P => E}, [{send_msg, P, {queue_status, reject_publish}, @@ -1937,7 +1931,7 @@ evaluate_limit(Index, Result, BeforeState, (_P, _E, Acc) -> Acc end, {Enqs0, Effects0}, Enqs0), - {State0#?MODULE{enqueuers = Enqs}, Result, Effects}; + {State0#?STATE{enqueuers = Enqs}, Result, Effects}; false when Strategy == reject_publish -> %% TODO: optimise as this case gets called for every command %% pretty much @@ -1955,7 +1949,7 @@ evaluate_limit(Index, Result, BeforeState, (_P, _E, Acc) -> Acc end, {Enqs0, Effects0}, Enqs0), - {State0#?MODULE{enqueuers = Enqs}, Result, Effects}; + {State0#?STATE{enqueuers = Enqs}, Result, Effects}; _ -> {State0, Result, Effects0} end; @@ -1992,39 +1986,41 @@ add_delivery_effects(Effects0, AccMap, State) -> end, Efs, chunk_disk_msgs(DiskMsgs, 0, [[]])) end, Effects0, AccMap). -take_next_msg(#?MODULE{returns = Returns0, - messages = Messages0, - ra_indexes = Indexes0 - } = State) -> +take_next_msg(#?STATE{returns = Returns0, + messages = Messages0, + ra_indexes = Indexes0 + } = State) -> case lqueue:out(Returns0) of {{value, NextMsg}, Returns} -> - {NextMsg, State#?MODULE{returns = Returns}}; + {NextMsg, State#?STATE{returns = Returns}}; {empty, _} -> - case lqueue:out(Messages0) of - {empty, _} -> + case rabbit_fifo_q:out(Messages0) of + empty -> empty; - {{value, ?MSG(RaftIdx, _) = Msg}, Messages} -> + {?MSG(RaftIdx, _) = Msg, Messages} -> %% add index here Indexes = rabbit_fifo_index:append(RaftIdx, Indexes0), - {Msg, State#?MODULE{messages = Messages, - ra_indexes = Indexes}} + {Msg, State#?STATE{messages = Messages, + ra_indexes = Indexes}} end end. -get_next_msg(#?MODULE{returns = Returns0, - messages = Messages0}) -> +get_next_msg(#?STATE{returns = Returns0, + messages = Messages0}) -> case lqueue:get(Returns0, empty) of empty -> - lqueue:get(Messages0, empty); + rabbit_fifo_q:get(Messages0); Msg -> Msg end. -delivery_effect({CTag, CPid}, [{MsgId, ?MSG(Idx, Header)}], - #?MODULE{msg_cache = {Idx, RawMsg}}) -> +delivery_effect(ConsumerKey, [{MsgId, ?MSG(Idx, Header)}], + #?STATE{msg_cache = {Idx, RawMsg}} = State) -> + {CTag, CPid} = consumer_id(ConsumerKey, State), {send_msg, CPid, {delivery, CTag, [{MsgId, {Header, RawMsg}}]}, - [local, ra_event]}; -delivery_effect({CTag, CPid}, Msgs, _State) -> + ?DELIVERY_SEND_MSG_OPTS}; +delivery_effect(ConsumerKey, Msgs, State) -> + {CTag, CPid} = consumer_id(ConsumerKey, State), RaftIdxs = lists:foldr(fun ({_, ?MSG(I, _)}, Acc) -> [I | Acc] end, [], Msgs), @@ -2034,7 +2030,8 @@ delivery_effect({CTag, CPid}, Msgs, _State) -> fun (Cmd, {MsgId, ?MSG(_Idx, Header)}) -> {MsgId, {Header, get_msg(Cmd)}} end, Log, Msgs), - [{send_msg, CPid, {delivery, CTag, DelMsgs}, [local, ra_event]}] + [{send_msg, CPid, {delivery, CTag, DelMsgs}, + ?DELIVERY_SEND_MSG_OPTS}] end, {local, node(CPid)}}. @@ -2048,60 +2045,66 @@ reply_log_effect(RaftIdx, MsgId, Header, Ready, From) -> checkout_one(#{system_time := Ts} = Meta, ExpiredMsg0, InitState0, Effects0) -> %% Before checking out any messsage to any consumer, %% first remove all expired messages from the head of the queue. - {ExpiredMsg, #?MODULE{service_queue = SQ0, - messages = Messages0, - msg_bytes_checkout = BytesCheckout, - msg_bytes_enqueue = BytesEnqueue, - consumers = Cons0} = InitState, Effects1} = + {ExpiredMsg, #?STATE{service_queue = SQ0, + messages = Messages0, + msg_bytes_checkout = BytesCheckout, + msg_bytes_enqueue = BytesEnqueue, + consumers = Cons0} = InitState, Effects1} = expire_msgs(Ts, ExpiredMsg0, InitState0, Effects0), case priority_queue:out(SQ0) of - {{value, ConsumerId}, SQ1} - when is_map_key(ConsumerId, Cons0) -> + {{value, ConsumerKey}, SQ1} + when is_map_key(ConsumerKey, Cons0) -> case take_next_msg(InitState) of - {ConsumerMsg, State0} -> + {Msg, State0} -> %% there are consumers waiting to be serviced %% process consumer checkout - case maps:get(ConsumerId, Cons0) of - #consumer{credit = 0} -> - %% no credit but was still on queue - %% can happen when draining - %% recurse without consumer on queue + case maps:get(ConsumerKey, Cons0) of + #consumer{credit = Credit, + status = Status} + when Credit =:= 0 orelse + Status =/= up -> + %% not an active consumer but still in the consumers + %% map - this can happen when draining + %% or when higher priority single active consumers + %% take over, recurse without consumer in service + %% queue checkout_one(Meta, ExpiredMsg, - InitState#?MODULE{service_queue = SQ1}, Effects1); - #consumer{status = cancelled} -> - checkout_one(Meta, ExpiredMsg, - InitState#?MODULE{service_queue = SQ1}, Effects1); - #consumer{status = suspected_down} -> - checkout_one(Meta, ExpiredMsg, - InitState#?MODULE{service_queue = SQ1}, Effects1); + InitState#?STATE{service_queue = SQ1}, + Effects1); #consumer{checked_out = Checked0, next_msg_id = Next, credit = Credit, - delivery_count = DelCnt} = Con0 -> - Checked = maps:put(Next, ConsumerMsg, Checked0), + delivery_count = DelCnt0, + cfg = Cfg} = Con0 -> + Checked = maps:put(Next, Msg, Checked0), + DelCnt = case credit_api_v2(Cfg) of + true -> add(DelCnt0, 1); + false -> DelCnt0 + 1 + end, Con = Con0#consumer{checked_out = Checked, next_msg_id = Next + 1, credit = Credit - 1, - delivery_count = DelCnt + 1}, - Size = get_header(size, get_msg_header(ConsumerMsg)), - State = update_or_remove_sub( - Meta, ConsumerId, Con, - State0#?MODULE{service_queue = SQ1, - msg_bytes_checkout = BytesCheckout + Size, - msg_bytes_enqueue = BytesEnqueue - Size}), - {success, ConsumerId, Next, ConsumerMsg, ExpiredMsg, + delivery_count = DelCnt}, + Size = get_header(size, get_msg_header(Msg)), + State1 = + State0#?STATE{service_queue = SQ1, + msg_bytes_checkout = BytesCheckout + Size, + msg_bytes_enqueue = BytesEnqueue - Size}, + State = update_or_remove_con( + Meta, ConsumerKey, Con, State1), + {success, ConsumerKey, Next, Msg, ExpiredMsg, State, Effects1} end; empty -> {nochange, ExpiredMsg, InitState, Effects1} end; {{value, _ConsumerId}, SQ1} -> - %% consumer did not exist but was queued, recurse + %% consumer was not active but was queued, recurse checkout_one(Meta, ExpiredMsg, - InitState#?MODULE{service_queue = SQ1}, Effects1); + InitState#?STATE{service_queue = SQ1}, Effects1); {empty, _} -> - case lqueue:len(Messages0) of + case rabbit_fifo_q:len(Messages0) of 0 -> {nochange, ExpiredMsg, InitState, Effects1}; _ -> @@ -2127,25 +2130,30 @@ expire_msgs(RaCmdTs, Result, State, Effects) -> expire(RaCmdTs, State0, Effects) -> {?MSG(Idx, Header) = Msg, - #?MODULE{cfg = #cfg{dead_letter_handler = DLH}, - dlx = DlxState0, - ra_indexes = Indexes0, - messages_total = Tot, - msg_bytes_enqueue = MsgBytesEnqueue} = State1} = take_next_msg(State0), - {DlxState, DlxEffects} = rabbit_fifo_dlx:discard([Msg], expired, DLH, DlxState0), + #?STATE{cfg = #cfg{dead_letter_handler = DLH}, + dlx = DlxState0, + ra_indexes = Indexes0, + messages_total = Tot, + msg_bytes_enqueue = MsgBytesEnqueue} = State1} = + take_next_msg(State0), + {DlxState, DlxEffects} = rabbit_fifo_dlx:discard([Msg], expired, + DLH, DlxState0), Indexes = rabbit_fifo_index:delete(Idx, Indexes0), - State = State1#?MODULE{dlx = DlxState, - ra_indexes = Indexes, - messages_total = Tot - 1, - msg_bytes_enqueue = MsgBytesEnqueue - get_header(size, Header)}, + State = State1#?STATE{dlx = DlxState, + ra_indexes = Indexes, + messages_total = Tot - 1, + msg_bytes_enqueue = + MsgBytesEnqueue - get_header(size, Header)}, expire_msgs(RaCmdTs, true, State, DlxEffects ++ Effects). timer_effect(RaCmdTs, State, Effects) -> T = case get_next_msg(State) of ?MSG(_, ?TUPLE(Size, Expiry)) - when is_integer(Size), is_integer(Expiry) -> + when is_integer(Size) andalso + is_integer(Expiry) -> %% Next message contains 'expiry' header. - %% (Re)set timer so that mesage will be dropped or dead-lettered on time. + %% (Re)set timer so that message will be dropped or + %% dead-lettered on time. max(0, Expiry - RaCmdTs); ?MSG(_, #{expiry := Expiry}) when is_integer(Expiry) -> @@ -2157,31 +2165,42 @@ timer_effect(RaCmdTs, State, Effects) -> end, [{timer, expire_msgs, T} | Effects]. -update_or_remove_sub(Meta, ConsumerId, +update_or_remove_con(Meta, ConsumerKey, #consumer{cfg = #consumer_cfg{lifetime = once}, checked_out = Checked, credit = 0} = Con, - #?MODULE{consumers = Cons} = State) -> + #?STATE{consumers = Cons} = State) -> case map_size(Checked) of 0 -> #{system_time := Ts} = Meta, % we're done with this consumer - State#?MODULE{consumers = maps:remove(ConsumerId, Cons), - last_active = Ts}; + State#?STATE{consumers = maps:remove(ConsumerKey, Cons), + last_active = Ts}; _ -> % there are unsettled items so need to keep around - State#?MODULE{consumers = maps:put(ConsumerId, Con, Cons)} + State#?STATE{consumers = maps:put(ConsumerKey, Con, Cons)} end; -update_or_remove_sub(_Meta, ConsumerId, - #consumer{cfg = #consumer_cfg{}} = Con, - #?MODULE{consumers = Cons, - service_queue = ServiceQueue} = State) -> - State#?MODULE{consumers = maps:put(ConsumerId, Con, Cons), - service_queue = uniq_queue_in(ConsumerId, Con, ServiceQueue)}. - -uniq_queue_in(Key, #consumer{credit = Credit, - status = up, - cfg = #consumer_cfg{priority = P}}, ServiceQueue) +update_or_remove_con(_Meta, ConsumerKey, + #consumer{status = quiescing, + checked_out = Checked} = Con0, + #?STATE{consumers = Cons, + waiting_consumers = Waiting} = State) + when map_size(Checked) == 0 -> + Con = Con0#consumer{status = up}, + State#?STATE{consumers = maps:remove(ConsumerKey, Cons), + waiting_consumers = add_waiting({ConsumerKey, Con}, Waiting)}; +update_or_remove_con(_Meta, ConsumerKey, + #consumer{} = Con, + #?STATE{consumers = Cons, + service_queue = ServiceQueue} = State) -> + State#?STATE{consumers = maps:put(ConsumerKey, Con, Cons), + service_queue = maybe_queue_consumer(ConsumerKey, Con, + ServiceQueue)}. + +maybe_queue_consumer(Key, #consumer{credit = Credit, + status = up, + cfg = #consumer_cfg{priority = P}}, + ServiceQueue) when Credit > 0 -> % TODO: queue:member could surely be quite expensive, however the practical % number of unique consumers may not be large enough for it to matter @@ -2191,70 +2210,100 @@ uniq_queue_in(Key, #consumer{credit = Credit, false -> priority_queue:in(Key, P, ServiceQueue) end; -uniq_queue_in(_Key, _Consumer, ServiceQueue) -> +maybe_queue_consumer(_Key, _Consumer, ServiceQueue) -> ServiceQueue. -update_consumer(Meta, {Tag, Pid} = ConsumerId, ConsumerMeta, - {Life, Credit, Mode0} = Spec, Priority, - #?MODULE{cfg = #cfg{consumer_strategy = competing}, - consumers = Cons0} = State0) -> +update_consumer(Meta, ConsumerKey, {Tag, Pid}, ConsumerMeta, + {Life, Mode} = Spec, Priority, + #?STATE{cfg = #cfg{consumer_strategy = competing}, + consumers = Cons0} = State0) -> Consumer = case Cons0 of - #{ConsumerId := #consumer{} = Consumer0} -> - merge_consumer(Meta, Consumer0, ConsumerMeta, Spec, Priority); + #{ConsumerKey := #consumer{} = Consumer0} -> + merge_consumer(Meta, Consumer0, ConsumerMeta, + Spec, Priority); _ -> - Mode = credit_mode(Meta, Credit, Mode0), + Credit = included_credit(Mode), + DeliveryCount = initial_delivery_count(Mode), #consumer{cfg = #consumer_cfg{tag = Tag, pid = Pid, lifetime = Life, meta = ConsumerMeta, priority = Priority, credit_mode = Mode}, - credit = Credit} + credit = Credit, + delivery_count = DeliveryCount} end, - {Consumer, update_or_remove_sub(Meta, ConsumerId, Consumer, State0)}; -update_consumer(Meta, {Tag, Pid} = ConsumerId, ConsumerMeta, - {Life, Credit, Mode0} = Spec, Priority, - #?MODULE{cfg = #cfg{consumer_strategy = single_active}, - consumers = Cons0, - waiting_consumers = Waiting, - service_queue = _ServiceQueue0} = State0) -> + {Consumer, update_or_remove_con(Meta, ConsumerKey, Consumer, State0)}; +update_consumer(Meta, ConsumerKey, {Tag, Pid}, ConsumerMeta, + {Life, Mode} = Spec, Priority, + #?STATE{cfg = #cfg{consumer_strategy = single_active}, + consumers = Cons0, + waiting_consumers = Waiting0, + service_queue = _ServiceQueue0} = State) -> %% if it is the current active consumer, just update %% if it is a cancelled active consumer, add to waiting unless it is the only %% one, then merge case active_consumer(Cons0) of - {ConsumerId, #consumer{status = up} = Consumer0} -> + {ConsumerKey, #consumer{status = up} = Consumer0} -> Consumer = merge_consumer(Meta, Consumer0, ConsumerMeta, Spec, Priority), - {Consumer, update_or_remove_sub(Meta, ConsumerId, Consumer, State0)}; - undefined when is_map_key(ConsumerId, Cons0) -> + {Consumer, update_or_remove_con(Meta, ConsumerKey, Consumer, State)}; + undefined when is_map_key(ConsumerKey, Cons0) -> %% there is no active consumer and the current consumer is in the %% consumers map and thus must be cancelled, in this case we can just %% merge and effectively make this the current active one - Consumer0 = maps:get(ConsumerId, Cons0), + Consumer0 = maps:get(ConsumerKey, Cons0), Consumer = merge_consumer(Meta, Consumer0, ConsumerMeta, Spec, Priority), - {Consumer, update_or_remove_sub(Meta, ConsumerId, Consumer, State0)}; + {Consumer, update_or_remove_con(Meta, ConsumerKey, Consumer, State)}; _ -> %% add as a new waiting consumer - Mode = credit_mode(Meta, Credit, Mode0), + Credit = included_credit(Mode), + DeliveryCount = initial_delivery_count(Mode), Consumer = #consumer{cfg = #consumer_cfg{tag = Tag, pid = Pid, lifetime = Life, meta = ConsumerMeta, priority = Priority, credit_mode = Mode}, - credit = Credit}, - - {Consumer, - State0#?MODULE{waiting_consumers = - Waiting ++ [{ConsumerId, Consumer}]}} + credit = Credit, + delivery_count = DeliveryCount}, + Waiting = add_waiting({ConsumerKey, Consumer}, Waiting0), + {Consumer, State#?STATE{waiting_consumers = Waiting}} end. -merge_consumer(Meta, #consumer{cfg = CCfg, checked_out = Checked} = Consumer, - ConsumerMeta, {Life, Credit, Mode0}, Priority) -> +add_waiting({Key, _} = New, Waiting) -> + sort_waiting(lists:keystore(Key, 1, Waiting, New)). + +sort_waiting(Waiting) -> + lists:sort(fun + ({_, ?CONSUMER_PRIORITY(P1) = #consumer{status = up}}, + {_, ?CONSUMER_PRIORITY(P2) = #consumer{status = up}}) + when P1 =/= P2 -> + P2 =< P1; + ({C1, #consumer{status = up, + credit = Cr1}}, + {C2, #consumer{status = up, + credit = Cr2}}) -> + %% both are up, priority the same + if Cr1 == Cr2 -> + %% same credit + %% sort by key, first attached priority + C1 =< C2; + true -> + %% else sort by credit + Cr2 =< Cr1 + end; + (_, {_, #consumer{status = Status}}) -> + %% not up + Status /= up + end, Waiting). + +merge_consumer(_Meta, #consumer{cfg = CCfg, checked_out = Checked} = Consumer, + ConsumerMeta, {Life, Mode}, Priority) -> + Credit = included_credit(Mode), NumChecked = map_size(Checked), NewCredit = max(0, Credit - NumChecked), - Mode = credit_mode(Meta, Credit, Mode0), Consumer#consumer{cfg = CCfg#consumer_cfg{priority = Priority, meta = ConsumerMeta, credit_mode = Mode, @@ -2262,49 +2311,138 @@ merge_consumer(Meta, #consumer{cfg = CCfg, checked_out = Checked} = Consumer, status = up, credit = NewCredit}. -credit_mode(#{machine_version := Vsn}, Credit, simple_prefetch) - when Vsn >= 3 -> - {simple_prefetch, Credit}; -credit_mode(_, _, Mode) -> - Mode. +included_credit({simple_prefetch, Credit}) -> + Credit; +included_credit({credited, _}) -> + 0; +included_credit(credited) -> + 0. -maybe_queue_consumer(ConsumerId, #consumer{credit = Credit} = Con, - ServiceQueue0) -> - case Credit > 0 of +credit_active_consumer( + #credit{credit = LinkCreditRcv, + delivery_count = DeliveryCountRcv, + drain = Drain, + consumer_key = ConsumerKey}, + #consumer{delivery_count = DeliveryCountSnd, + cfg = Cfg} = Con0, + Meta, + #?STATE{consumers = Cons0, + service_queue = ServiceQueue0} = State0) -> + LinkCreditSnd = link_credit_snd(DeliveryCountRcv, LinkCreditRcv, + DeliveryCountSnd, Cfg), + %% grant the credit + Con1 = Con0#consumer{credit = LinkCreditSnd}, + ServiceQueue = maybe_queue_consumer(ConsumerKey, Con1, ServiceQueue0), + State1 = State0#?STATE{service_queue = ServiceQueue, + consumers = maps:update(ConsumerKey, Con1, Cons0)}, + {State2, ok, Effects} = checkout(Meta, State0, State1, []), + + #?STATE{consumers = Cons1 = #{ConsumerKey := Con2}} = State2, + #consumer{cfg = #consumer_cfg{pid = CPid, + tag = CTag}, + credit = PostCred, + delivery_count = PostDeliveryCount} = Con2, + Available = messages_ready(State2), + case credit_api_v2(Cfg) of true -> - % consumer needs service - check if already on service queue - uniq_queue_in(ConsumerId, Con, ServiceQueue0); + {Credit, DeliveryCount, State} = + case Drain andalso PostCred > 0 of + true -> + AdvancedDeliveryCount = add(PostDeliveryCount, PostCred), + ZeroCredit = 0, + Con = Con2#consumer{delivery_count = AdvancedDeliveryCount, + credit = ZeroCredit}, + Cons = maps:update(ConsumerKey, Con, Cons1), + State3 = State2#?STATE{consumers = Cons}, + {ZeroCredit, AdvancedDeliveryCount, State3}; + false -> + {PostCred, PostDeliveryCount, State2} + end, + %% We must send the delivery effects to the queue client + %% before credit_reply such that session process can send to + %% AMQP 1.0 client TRANSFERs before FLOW. + {State, ok, Effects ++ [{send_msg, CPid, + {credit_reply, CTag, DeliveryCount, + Credit, Available, Drain}, + ?DELIVERY_SEND_MSG_OPTS}]}; false -> - ServiceQueue0 + %% We must always send a send_credit_reply because basic.credit + %% is synchronous. + %% Additionally, we keep the bug of credit API v1 that we + %% send to queue client the + %% send_drained reply before the delivery effects (resulting + %% in the wrong behaviour that the session process sends to + %% AMQP 1.0 client the FLOW before the TRANSFERs). + %% We have to keep this bug because old rabbit_fifo_client + %% implementations expect a send_drained Ra reply + %% (they can't handle such a Ra effect). + CreditReply = {send_credit_reply, Available}, + case Drain of + true -> + AdvancedDeliveryCount = PostDeliveryCount + PostCred, + Con = Con2#consumer{delivery_count = AdvancedDeliveryCount, + credit = 0}, + Cons = maps:update(ConsumerKey, Con, Cons1), + State = State2#?STATE{consumers = Cons}, + Reply = {multi, [CreditReply, + {send_drained, {CTag, PostCred}}]}, + {State, Reply, Effects}; + false -> + {State2, CreditReply, Effects} + end + end. + +credit_inactive_consumer( + #credit{credit = LinkCreditRcv, + delivery_count = DeliveryCountRcv, + drain = Drain, + consumer_key = ConsumerKey}, + #consumer{cfg = #consumer_cfg{pid = CPid, + tag = CTag} = Cfg, + delivery_count = DeliveryCountSnd} = Con0, + Waiting0, State0) -> + %% No messages are available for inactive consumers. + Available = 0, + LinkCreditSnd = link_credit_snd(DeliveryCountRcv, + LinkCreditRcv, + DeliveryCountSnd, + Cfg), + case credit_api_v2(Cfg) of + true -> + {Credit, DeliveryCount} = + case Drain of + true -> + %% By issuing drain=true, the client says "either send a transfer or a flow frame". + %% Since there are no messages to send to an inactive consumer, we advance the + %% delivery-count consuming all link-credit and send a credit_reply with drain=true + %% to the session which causes the session to send a flow frame to the client. + AdvancedDeliveryCount = add(DeliveryCountSnd, LinkCreditSnd), + {0, AdvancedDeliveryCount}; + false -> + {LinkCreditSnd, DeliveryCountSnd} + end, + %% Grant the credit. + Con = Con0#consumer{credit = Credit, + delivery_count = DeliveryCount}, + Waiting = add_waiting({ConsumerKey, Con}, Waiting0), + State = State0#?STATE{waiting_consumers = Waiting}, + {State, ok, + {send_msg, CPid, + {credit_reply, CTag, DeliveryCount, Credit, Available, Drain}, + ?DELIVERY_SEND_MSG_OPTS}}; + false -> + %% Credit API v1 doesn't support draining an inactive consumer. + %% Grant the credit. + Con = Con0#consumer{credit = LinkCreditSnd}, + Waiting = add_waiting({ConsumerKey, Con}, Waiting0), + State = State0#?STATE{waiting_consumers = Waiting}, + {State, {send_credit_reply, Available}} end. -%% creates a dehydrated version of the current state to be cached and -%% potentially used to for a snaphot at a later point -dehydrate_state(#?MODULE{cfg = #cfg{}, - dlx = DlxState} = State) -> - % no messages are kept in memory, no need to - % overly mutate the current state apart from removing indexes and cursors - State#?MODULE{ra_indexes = rabbit_fifo_index:empty(), - release_cursors = lqueue:new(), - enqueue_count = 0, - msg_cache = undefined, - dlx = rabbit_fifo_dlx:dehydrate(DlxState)}. - -%% make the state suitable for equality comparison -normalize(#?MODULE{ra_indexes = _Indexes, - returns = Returns, - messages = Messages, - release_cursors = Cursors, - dlx = DlxState} = State) -> - State#?MODULE{returns = lqueue:from_list(lqueue:to_list(Returns)), - messages = lqueue:from_list(lqueue:to_list(Messages)), - release_cursors = lqueue:from_list(lqueue:to_list(Cursors)), - dlx = rabbit_fifo_dlx:normalize(DlxState)}. - -is_over_limit(#?MODULE{cfg = #cfg{max_length = undefined, +is_over_limit(#?STATE{cfg = #cfg{max_length = undefined, max_bytes = undefined}}) -> false; -is_over_limit(#?MODULE{cfg = #cfg{max_length = MaxLength, +is_over_limit(#?STATE{cfg = #cfg{max_length = MaxLength, max_bytes = MaxBytes}, msg_bytes_enqueue = BytesEnq, dlx = DlxState} = State) -> @@ -2312,10 +2450,10 @@ is_over_limit(#?MODULE{cfg = #cfg{max_length = MaxLength, (messages_ready(State) + NumDlx > MaxLength) orelse (BytesEnq + BytesDlx > MaxBytes). -is_below_soft_limit(#?MODULE{cfg = #cfg{max_length = undefined, +is_below_soft_limit(#?STATE{cfg = #cfg{max_length = undefined, max_bytes = undefined}}) -> false; -is_below_soft_limit(#?MODULE{cfg = #cfg{max_length = MaxLength, +is_below_soft_limit(#?STATE{cfg = #cfg{max_length = MaxLength, max_bytes = MaxBytes}, msg_bytes_enqueue = BytesEnq, dlx = DlxState} = State) -> @@ -2328,40 +2466,82 @@ is_below(undefined, _Num) -> is_below(Val, Num) when is_integer(Val) andalso is_integer(Num) -> Num =< trunc(Val * ?LOW_LIMIT). --spec make_enqueue(option(pid()), option(msg_seqno()), raw_msg()) -> protocol(). +-spec make_enqueue(option(pid()), option(msg_seqno()), raw_msg()) -> + protocol(). make_enqueue(Pid, Seq, Msg) -> - #enqueue{pid = Pid, seq = Seq, msg = Msg}. + case is_v4() of + true when is_pid(Pid) andalso + is_integer(Seq) -> + %% more compact format + #?ENQ_V2{seq = Seq, + msg = Msg, + size = ?SIZE(Msg)}; + _ -> + #enqueue{pid = Pid, seq = Seq, msg = Msg} + end. -spec make_register_enqueuer(pid()) -> protocol(). make_register_enqueuer(Pid) -> #register_enqueuer{pid = Pid}. --spec make_checkout(consumer_id(), - checkout_spec(), consumer_meta()) -> protocol(). -make_checkout({_, _} = ConsumerId, Spec, Meta) -> +-spec make_checkout(consumer_id(), checkout_spec(), consumer_meta()) -> + protocol(). +make_checkout({_, _} = ConsumerId, Spec0, Meta) -> + Spec = case is_v4() of + false when Spec0 == remove -> + %% if v4 is not active, fall back to cancel spec + cancel; + _ -> + Spec0 + end, #checkout{consumer_id = ConsumerId, spec = Spec, meta = Meta}. --spec make_settle(consumer_id(), [msg_id()]) -> protocol(). -make_settle(ConsumerId, MsgIds) when is_list(MsgIds) -> - #settle{consumer_id = ConsumerId, msg_ids = MsgIds}. +-spec make_settle(consumer_key(), [msg_id()]) -> protocol(). +make_settle(ConsumerKey, MsgIds) when is_list(MsgIds) -> + #settle{consumer_key = ConsumerKey, msg_ids = MsgIds}. -spec make_return(consumer_id(), [msg_id()]) -> protocol(). -make_return(ConsumerId, MsgIds) -> - #return{consumer_id = ConsumerId, msg_ids = MsgIds}. +make_return(ConsumerKey, MsgIds) -> + #return{consumer_key = ConsumerKey, msg_ids = MsgIds}. + +-spec is_return(protocol()) -> boolean(). +is_return(Command) -> + is_record(Command, return). -spec make_discard(consumer_id(), [msg_id()]) -> protocol(). -make_discard(ConsumerId, MsgIds) -> - #discard{consumer_id = ConsumerId, msg_ids = MsgIds}. +make_discard(ConsumerKey, MsgIds) -> + #discard{consumer_key = ConsumerKey, msg_ids = MsgIds}. --spec make_credit(consumer_id(), non_neg_integer(), non_neg_integer(), - boolean()) -> protocol(). -make_credit(ConsumerId, Credit, DeliveryCount, Drain) -> - #credit{consumer_id = ConsumerId, +-spec make_credit(consumer_key(), rabbit_queue_type:credit(), + non_neg_integer(), boolean()) -> protocol(). +make_credit(Key, Credit, DeliveryCount, Drain) -> + #credit{consumer_key = Key, credit = Credit, delivery_count = DeliveryCount, drain = Drain}. +-spec make_modify(consumer_key(), [msg_id()], + boolean(), boolean(), mc:annotations()) -> protocol(). +make_modify(ConsumerKey, MsgIds, DeliveryFailed, UndeliverableHere, Anns) + when is_list(MsgIds) andalso + is_boolean(DeliveryFailed) andalso + is_boolean(UndeliverableHere) andalso + is_map(Anns) -> + case is_v4() of + true -> + #modify{consumer_key = ConsumerKey, + msg_ids = MsgIds, + delivery_failed = DeliveryFailed, + undeliverable_here = UndeliverableHere, + annotations = Anns}; + false when UndeliverableHere -> + make_discard(ConsumerKey, MsgIds); + false -> + make_return(ConsumerKey, MsgIds) + end. + + -spec make_purge() -> protocol(). make_purge() -> #purge{}. @@ -2377,52 +2557,47 @@ make_update_config(Config) -> #update_config{config = Config}. add_bytes_drop(Header, - #?MODULE{msg_bytes_enqueue = Enqueue} = State) -> + #?STATE{msg_bytes_enqueue = Enqueue} = State) -> Size = get_header(size, Header), - State#?MODULE{msg_bytes_enqueue = Enqueue - Size}. + State#?STATE{msg_bytes_enqueue = Enqueue - Size}. add_bytes_return(Header, - #?MODULE{msg_bytes_checkout = Checkout, + #?STATE{msg_bytes_checkout = Checkout, msg_bytes_enqueue = Enqueue} = State) -> Size = get_header(size, Header), - State#?MODULE{msg_bytes_checkout = Checkout - Size, - msg_bytes_enqueue = Enqueue + Size}. + State#?STATE{msg_bytes_checkout = Checkout - Size, + msg_bytes_enqueue = Enqueue + Size}. -message_size(#basic_message{content = Content}) -> - #content{payload_fragments_rev = PFR} = Content, - iolist_size(PFR); message_size(B) when is_binary(B) -> byte_size(B); message_size(Msg) -> case mc:is(Msg) of true -> - {_, PayloadSize} = mc:size(Msg), - PayloadSize; + mc:size(Msg); false -> %% probably only hit this for testing so ok to use erts_debug - erts_debug:size(Msg) + {0, erts_debug:size(Msg)} end. - -all_nodes(#?MODULE{consumers = Cons0, - enqueuers = Enqs0, - waiting_consumers = WaitingConsumers0}) -> - Nodes0 = maps:fold(fun({_, P}, _, Acc) -> +all_nodes(#?STATE{consumers = Cons0, + enqueuers = Enqs0, + waiting_consumers = WaitingConsumers0}) -> + Nodes0 = maps:fold(fun(_, ?CONSUMER_PID(P), Acc) -> Acc#{node(P) => ok} end, #{}, Cons0), Nodes1 = maps:fold(fun(P, _, Acc) -> Acc#{node(P) => ok} end, Nodes0, Enqs0), maps:keys( - lists:foldl(fun({{_, P}, _}, Acc) -> + lists:foldl(fun({_, ?CONSUMER_PID(P)}, Acc) -> Acc#{node(P) => ok} end, Nodes1, WaitingConsumers0)). -all_pids_for(Node, #?MODULE{consumers = Cons0, - enqueuers = Enqs0, - waiting_consumers = WaitingConsumers0}) -> - Cons = maps:fold(fun({_, P}, _, Acc) +all_pids_for(Node, #?STATE{consumers = Cons0, + enqueuers = Enqs0, + waiting_consumers = WaitingConsumers0}) -> + Cons = maps:fold(fun(_, ?CONSUMER_PID(P), Acc) when node(P) =:= Node -> [P | Acc]; (_, _, Acc) -> Acc @@ -2432,17 +2607,18 @@ all_pids_for(Node, #?MODULE{consumers = Cons0, [P | Acc]; (_, _, Acc) -> Acc end, Cons, Enqs0), - lists:foldl(fun({{_, P}, _}, Acc) + lists:foldl(fun({_, ?CONSUMER_PID(P)}, Acc) when node(P) =:= Node -> [P | Acc]; (_, Acc) -> Acc end, Enqs, WaitingConsumers0). -suspected_pids_for(Node, #?MODULE{consumers = Cons0, - enqueuers = Enqs0, - waiting_consumers = WaitingConsumers0}) -> - Cons = maps:fold(fun({_, P}, - #consumer{status = suspected_down}, +suspected_pids_for(Node, #?STATE{consumers = Cons0, + enqueuers = Enqs0, + waiting_consumers = WaitingConsumers0}) -> + Cons = maps:fold(fun(_Key, + #consumer{cfg = #consumer_cfg{pid = P}, + status = suspected_down}, Acc) when node(P) =:= Node -> [P | Acc]; @@ -2453,16 +2629,17 @@ suspected_pids_for(Node, #?MODULE{consumers = Cons0, [P | Acc]; (_, _, Acc) -> Acc end, Cons, Enqs0), - lists:foldl(fun({{_, P}, - #consumer{status = suspected_down}}, Acc) + lists:foldl(fun({_Key, + #consumer{cfg = #consumer_cfg{pid = P}, + status = suspected_down}}, Acc) when node(P) =:= Node -> [P | Acc]; (_, Acc) -> Acc end, Enqs, WaitingConsumers0). -is_expired(Ts, #?MODULE{cfg = #cfg{expires = Expires}, - last_active = LastActive, - consumers = Consumers}) +is_expired(Ts, #?STATE{cfg = #cfg{expires = Expires}, + last_active = LastActive, + consumers = Consumers}) when is_number(LastActive) andalso is_number(Expires) -> %% TODO: should it be active consumers? Active = maps:filter(fun (_, #consumer{status = suspected_down}) -> @@ -2475,13 +2652,17 @@ is_expired(Ts, #?MODULE{cfg = #cfg{expires = Expires}, is_expired(_Ts, _State) -> false. -get_priority_from_args(#{args := Args}) -> +get_priority(#{priority := Priority}) -> + Priority; +get_priority(#{args := Args}) -> + %% fallback, v3 option case rabbit_misc:table_lookup(Args, <<"x-priority">>) of - {_Key, Value} -> + {_Type, Value} -> Value; - _ -> 0 + _ -> + 0 end; -get_priority_from_args(_) -> +get_priority(_) -> 0. notify_decorators_effect(QName, MaxActivePriority, IsEmpty) -> @@ -2492,41 +2673,38 @@ notify_decorators_startup(QName) -> {mod_call, rabbit_quorum_queue, spawn_notify_decorators, [QName, startup, []]}. -convert(To, To, State) -> +convert(_Meta, To, To, State) -> State; -convert(0, To, State) -> - convert(1, To, rabbit_fifo_v1:convert_v0_to_v1(State)); -convert(1, To, State) -> - convert(2, To, convert_v1_to_v2(State)); -convert(2, To, State) -> - convert(3, To, convert_v2_to_v3(State)). - -smallest_raft_index(#?MODULE{messages = Messages, - ra_indexes = Indexes, - dlx = DlxState}) -> +convert(Meta, 0, To, State) -> + convert(Meta, 1, To, rabbit_fifo_v1:convert_v0_to_v1(State)); +convert(Meta, 1, To, State) -> + convert(Meta, 2, To, rabbit_fifo_v3:convert_v1_to_v2(State)); +convert(Meta, 2, To, State) -> + convert(Meta, 3, To, rabbit_fifo_v3:convert_v2_to_v3(State)); +convert(Meta, 3, To, State) -> + convert(Meta, 4, To, convert_v3_to_v4(Meta, State)). + +smallest_raft_index(#?STATE{messages = Messages, + ra_indexes = Indexes, + dlx = DlxState}) -> SmallestDlxRaIdx = rabbit_fifo_dlx:smallest_raft_index(DlxState), - SmallestMsgsRaIdx = case lqueue:get(Messages, undefined) of - ?MSG(I, _) when is_integer(I) -> - I; - _ -> - undefined - end, + SmallestMsgsRaIdx = rabbit_fifo_q:get_lowest_index(Messages), SmallestRaIdx = rabbit_fifo_index:smallest(Indexes), lists:min([SmallestDlxRaIdx, SmallestMsgsRaIdx, SmallestRaIdx]). -make_requeue(ConsumerId, Notify, [{MsgId, Idx, Header, Msg}], Acc) -> +make_requeue(ConsumerKey, Notify, [{MsgId, Idx, Header, Msg}], Acc) -> lists:reverse([{append, - #requeue{consumer_id = ConsumerId, + #requeue{consumer_key = ConsumerKey, index = Idx, header = Header, msg_id = MsgId, msg = Msg}, Notify} | Acc]); -make_requeue(ConsumerId, Notify, [{MsgId, Idx, Header, Msg} | Rem], Acc) -> - make_requeue(ConsumerId, Notify, Rem, +make_requeue(ConsumerKey, Notify, [{MsgId, Idx, Header, Msg} | Rem], Acc) -> + make_requeue(ConsumerKey, Notify, Rem, [{append, - #requeue{consumer_id = ConsumerId, + #requeue{consumer_key = ConsumerKey, index = Idx, header = Header, msg_id = MsgId, @@ -2536,8 +2714,8 @@ make_requeue(ConsumerId, Notify, [{MsgId, Idx, Header, Msg} | Rem], Acc) -> make_requeue(_ConsumerId, _Notify, [], []) -> []. -can_immediately_deliver(#?MODULE{service_queue = SQ, - consumers = Consumers} = State) -> +can_immediately_deliver(#?STATE{service_queue = SQ, + consumers = Consumers} = State) -> case messages_ready(State) of 0 when map_size(Consumers) > 0 -> %% TODO: is is probably good enough but to be 100% we'd need to @@ -2550,7 +2728,213 @@ can_immediately_deliver(#?MODULE{service_queue = SQ, incr(I) -> I + 1. +get_msg(#?ENQ_V2{msg = M}) -> + M; get_msg(#enqueue{msg = M}) -> M; get_msg(#requeue{msg = M}) -> M. + +initial_delivery_count({credited, Count}) -> + %% credit API v2 + Count; +initial_delivery_count(_) -> + %% credit API v1 + 0. + +credit_api_v2(#consumer_cfg{credit_mode = {credited, _}}) -> + true; +credit_api_v2(_) -> + false. + +link_credit_snd(DeliveryCountRcv, LinkCreditRcv, DeliveryCountSnd, ConsumerCfg) -> + case credit_api_v2(ConsumerCfg) of + true -> + amqp10_util:link_credit_snd(DeliveryCountRcv, LinkCreditRcv, DeliveryCountSnd); + false -> + C = DeliveryCountRcv + LinkCreditRcv - DeliveryCountSnd, + %% C can be negative when receiver decreases credits while messages are in flight. + max(0, C) + end. + +consumer_id(#consumer{cfg = Cfg}) -> + {Cfg#consumer_cfg.tag, Cfg#consumer_cfg.pid}. + +consumer_id(Key, #?STATE{consumers = Consumers}) + when is_integer(Key) -> + consumer_id(maps:get(Key, Consumers)); +consumer_id({_, _} = ConsumerId, _State) -> + ConsumerId. + + +consumer_key_from_id(ConsumerId, #?STATE{consumers = Consumers}) + when is_map_key(ConsumerId, Consumers) -> + {ok, ConsumerId}; +consumer_key_from_id(ConsumerId, #?STATE{consumers = Consumers, + waiting_consumers = Waiting}) -> + case consumer_key_from_id(ConsumerId, maps:next(maps:iterator(Consumers))) of + {ok, _} = Res -> + Res; + error -> + %% scan the waiting consumers + case lists:search(fun ({_K, ?CONSUMER_TAG_PID(T, P)}) -> + {T, P} == ConsumerId + end, Waiting) of + {value, {K, _}} -> + {ok, K}; + false -> + error + end + end; +consumer_key_from_id({CTag, CPid}, {Key, ?CONSUMER_TAG_PID(T, P), _I}) + when T == CTag andalso P == CPid -> + {ok, Key}; +consumer_key_from_id(ConsumerId, {_, _, I}) -> + consumer_key_from_id(ConsumerId, maps:next(I)); +consumer_key_from_id(_ConsumerId, none) -> + error. + +consumer_cancel_info(ConsumerKey, #?STATE{consumers = Consumers}) -> + case Consumers of + #{ConsumerKey := #consumer{checked_out = Checked}} -> + #{key => ConsumerKey, + num_checked_out => map_size(Checked)}; + _ -> + #{} + end. + +find_consumer(Key, Consumers) -> + case Consumers of + #{Key := Con} -> + {Key, Con}; + _ when is_tuple(Key) -> + %% sometimes rabbit_fifo_client may send a settle, return etc + %% by it's ConsumerId even if it was created with an integer key + %% as it may have lost it's state after a consumer cancel + maps_search(fun (_K, ?CONSUMER_TAG_PID(Tag, Pid)) -> + Key == {Tag, Pid} + end, Consumers); + _ -> + undefined + end. + +maps_search(_Pred, none) -> + undefined; +maps_search(Pred, {K, V, I}) -> + case Pred(K, V) of + true -> + {K, V}; + false -> + maps_search(Pred, maps:next(I)) + end; +maps_search(Pred, Map) when is_map(Map) -> + maps_search(Pred, maps:next(maps:iterator(Map))). + +priority_tag(Msg) -> + case mc:is(Msg) of + true -> + case mc:priority(Msg) of + P when is_integer(P) andalso + P > 4 -> + hi; + _ -> + no + end; + false -> + no + end. + + +do_checkpoints(Ts, + #checkpoint{index = ChIdx, + timestamp = ChTime, + smallest_index = LastSmallest, + indexes = MinIndexes} = Check0, RaAux, Force) -> + LastAppliedIdx = ra_aux:last_applied(RaAux), + IndexesSince = LastAppliedIdx - ChIdx, + #?STATE{} = MacState = ra_aux:machine_state(RaAux), + TimeSince = Ts - ChTime, + NewSmallest = case smallest_raft_index(MacState) of + undefined -> + LastAppliedIdx; + Smallest -> + Smallest + end, + MsgsTot = messages_total(MacState), + {CheckMinInterval, CheckMinIndexes, CheckMaxIndexes} = + persistent_term:get(quorum_queue_checkpoint_config, + {?CHECK_MIN_INTERVAL_MS, ?CHECK_MIN_INDEXES, + ?CHECK_MAX_INDEXES}), + EnoughTimeHasPassed = TimeSince > CheckMinInterval, + + %% enough time has passed and enough indexes have been committed + case (IndexesSince > MinIndexes andalso + EnoughTimeHasPassed) orelse + %% the queue is empty and some commands have been + %% applied since the last checkpoint + (MsgsTot == 0 andalso + IndexesSince > CheckMinIndexes andalso + EnoughTimeHasPassed) orelse + Force of + true -> + %% take fewer checkpoints the more messages there are on queue + NextIndexes = min(max(MsgsTot, CheckMinIndexes), CheckMaxIndexes), + %% take a checkpoint; + {#checkpoint{index = LastAppliedIdx, + timestamp = Ts, + smallest_index = NewSmallest, + messages_total = MsgsTot, + indexes = NextIndexes}, + [{checkpoint, LastAppliedIdx, MacState} | + release_cursor(LastSmallest, NewSmallest)]}; + false -> + {Check0#checkpoint{smallest_index = NewSmallest}, + release_cursor(LastSmallest, NewSmallest)} + end. + +release_cursor(LastSmallest, Smallest) + when is_integer(LastSmallest) andalso + is_integer(Smallest) andalso + Smallest > LastSmallest -> + [{release_cursor, Smallest}]; +release_cursor(_, _) -> + []. + +discard(Meta, MsgIds, ConsumerKey, + #consumer{checked_out = Checked} = Con, + DelFailed, Anns, + #?STATE{cfg = #cfg{dead_letter_handler = DLH}, + dlx = DlxState0} = State0) -> + %% We publish to dead-letter exchange in the same order + %% as messages got rejected by the client. + DiscardMsgs = lists:filtermap( + fun(Id) -> + case maps:get(Id, Checked, undefined) of + undefined -> + false; + Msg0 -> + {true, incr_msg(Msg0, DelFailed, Anns)} + end + end, MsgIds), + {DlxState, Effects} = rabbit_fifo_dlx:discard(DiscardMsgs, rejected, + DLH, DlxState0), + State = State0#?STATE{dlx = DlxState}, + complete_and_checkout(Meta, MsgIds, ConsumerKey, Con, Effects, State). + +incr_msg(Msg0, DelFailed, Anns) -> + Msg1 = update_msg_header(acquired_count, fun incr/1, 1, Msg0), + Msg2 = case map_size(Anns) > 0 of + true -> + update_msg_header(anns, fun(A) -> + maps:merge(A, Anns) + end, Anns, + Msg1); + false -> + Msg1 + end, + case DelFailed of + true -> + update_msg_header(delivery_count, fun incr/1, 1, Msg2); + false -> + Msg2 + end. diff --git a/deps/rabbit/src/rabbit_fifo.hrl b/deps/rabbit/src/rabbit_fifo.hrl index bfb0fcfbf907..f88893374f75 100644 --- a/deps/rabbit/src/rabbit_fifo.hrl +++ b/deps/rabbit/src/rabbit_fifo.hrl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% macros for memory optimised tuple structures %% [A|B] saves 1 byte compared to {A,B} @@ -12,11 +12,15 @@ %% Raw message data is always stored on disk. -define(MSG(Index, Header), ?TUPLE(Index, Header)). +-define(NIL, []). + -define(IS_HEADER(H), (is_integer(H) andalso H >= 0) orelse is_list(H) orelse (is_map(H) andalso is_map_key(size, H))). +-define(DELIVERY_SEND_MSG_OPTS, [local, ra_event]). + -type optimised_tuple(A, B) :: nonempty_improper_list(A, B). -type option(T) :: undefined | T. @@ -37,12 +41,14 @@ -type msg_header() :: msg_size() | optimised_tuple(msg_size(), Expiry :: milliseconds()) | #{size := msg_size(), + acquired_count => non_neg_integer(), delivery_count => non_neg_integer(), expiry => milliseconds()}. %% The message header: %% size: The size of the message payload in bytes. -%% delivery_count: the number of unsuccessful delivery attempts. +%% delivery_count: The number of unsuccessful delivery attempts. %% A non-zero value indicates a previous attempt. +%% return_count: The number of explicit returns. %% expiry: Epoch time in ms when a message expires. Set during enqueue. %% Value is determined by per-queue or per-message message TTL. %% If it contains only the size it can be condensed to an integer. @@ -51,44 +57,52 @@ -type msg_size() :: non_neg_integer(). %% the size in bytes of the msg payload --type msg() :: optimised_tuple(option(ra:index()), msg_header()). +-type msg() :: optimised_tuple(ra:index(), msg_header()). -type delivery_msg() :: {msg_id(), {msg_header(), raw_msg()}}. %% A tuple consisting of the message id, and the headered message. --type consumer_tag() :: binary(). -%% An arbitrary binary tag used to distinguish between different consumers -%% set up by the same process. See: {@link rabbit_fifo_client:checkout/3.} - --type delivery() :: {delivery, consumer_tag(), [delivery_msg()]}. +-type delivery() :: {delivery, rabbit_types:ctag(), [delivery_msg()]}. %% Represents the delivery of one or more rabbit_fifo messages. --type consumer_id() :: {consumer_tag(), pid()}. +-type consumer_id() :: {rabbit_types:ctag(), pid()}. %% The entity that receives messages. Uniquely identifies a consumer. --type credit_mode() :: credited | - %% machine_version 2 - simple_prefetch | - %% machine_version 3 - {simple_prefetch, MaxCredit :: non_neg_integer()}. +-type consumer_idx() :: ra:index(). +%% v4 can reference consumers by the raft index they were added at. +%% The entity that receives messages. Uniquely identifies a consumer. +-type consumer_key() :: consumer_id() | consumer_idx(). + +-type credit_mode() :: + {credited, InitialDeliveryCount :: rabbit_queue_type:delivery_count()} | + %% machine_version 2 + {simple_prefetch, MaxCredit :: non_neg_integer()}. %% determines how credit is replenished --type checkout_spec() :: {once | auto, Num :: non_neg_integer(), - credit_mode()} | +-type checkout_spec() :: {once | auto, + Num :: non_neg_integer(), + credited | simple_prefetch} | + {dequeue, settled | unsettled} | - cancel. + cancel | remove | + %% new v4 format + {once | auto, credit_mode()}. -type consumer_meta() :: #{ack => boolean(), username => binary(), prefetch => non_neg_integer(), - args => list()}. + args => list(), + priority => non_neg_integer() + }. %% static meta data associated with a consumer -type applied_mfa() :: {module(), atom(), list()}. % represents a partially applied module call --define(RELEASE_CURSOR_EVERY, 2048). --define(RELEASE_CURSOR_EVERY_MAX, 3_200_000). +-define(CHECK_MIN_INTERVAL_MS, 1000). +-define(CHECK_MIN_INDEXES, 4096). +-define(CHECK_MAX_INDEXES, 666_667). + -define(USE_AVG_HALF_LIFE, 10000.0). %% an average QQ without any message uses about 100KB so setting this limit %% to ~10 times that should be relatively safe. @@ -98,57 +112,55 @@ -define(LOW_LIMIT, 0.8). -define(DELIVERY_CHUNK_LIMIT_B, 128_000). +-type milliseconds() :: non_neg_integer(). -record(consumer_cfg, {meta = #{} :: consumer_meta(), pid :: pid(), - tag :: consumer_tag(), + tag :: rabbit_types:ctag(), %% the mode of how credit is incremented %% simple_prefetch: credit is re-filled as deliveries are settled %% or returned. %% credited: credit can only be changed by receiving a consumer_credit - %% command: `{consumer_credit, ReceiverDeliveryCount, Credit}' - credit_mode :: credit_mode(), % part of snapshot data + %% command: `{credit, ReceiverDeliveryCount, Credit}' + credit_mode :: credited | credit_mode(), lifetime = once :: once | auto, - priority = 0 :: non_neg_integer()}). + priority = 0 :: integer()}). -record(consumer, {cfg = #consumer_cfg{}, - status = up :: up | suspected_down | cancelled | waiting, - next_msg_id = 0 :: msg_id(), % part of snapshot data + status = up :: up | suspected_down | cancelled | quiescing, + next_msg_id = 0 :: msg_id(), checked_out = #{} :: #{msg_id() => msg()}, %% max number of messages that can be sent %% decremented for each delivery - credit = 0 : non_neg_integer(), - %% total number of checked out messages - ever - %% incremented for each delivery - delivery_count = 0 :: non_neg_integer() + credit = 0 :: non_neg_integer(), + %% AMQP 1.0 §2.6.7 + delivery_count :: rabbit_queue_type:delivery_count() }). -type consumer() :: #consumer{}. -type consumer_strategy() :: competing | single_active. --type milliseconds() :: non_neg_integer(). - -type dead_letter_handler() :: option({at_most_once, applied_mfa()} | at_least_once). -record(enqueuer, {next_seqno = 1 :: msg_seqno(), % out of order enqueues - sorted list - unused, + unused = ?NIL, status = up :: up | suspected_down, %% it is useful to have a record of when this was blocked %% so that we can retry sending the block effect if %% the publisher did not receive the initial one blocked :: option(ra:index()), - unused_1, - unused_2 + unused_1 = ?NIL, + unused_2 = ?NIL }). -record(cfg, {name :: atom(), resource :: rabbit_types:r('queue'), - release_cursor_interval :: option({non_neg_integer(), non_neg_integer()}), + unused_1 = ?NIL, dead_letter_handler :: dead_letter_handler(), become_leader_handler :: option(applied_mfa()), overflow_strategy = drop_head :: drop_head | reject_publish, @@ -160,18 +172,14 @@ delivery_limit :: option(non_neg_integer()), expires :: option(milliseconds()), msg_ttl :: option(milliseconds()), - unused_1, - unused_2 + unused_2 = ?NIL, + unused_3 = ?NIL }). --type prefix_msgs() :: {list(), list()} | - {non_neg_integer(), list(), - non_neg_integer(), list()}. - -record(rabbit_fifo, {cfg :: #cfg{}, % unassigned messages - messages = lqueue:new() :: lqueue:lqueue(msg()), + messages = rabbit_fifo_q:new() :: rabbit_fifo_q:state(), messages_total = 0 :: non_neg_integer(), % queue of returned msg_in_ids - when checking out it picks from returns = lqueue:new() :: lqueue:lqueue(term()), @@ -187,36 +195,31 @@ % index when there are large gaps but should be faster than gb_trees % for normal appending operations as it's backed by a map ra_indexes = rabbit_fifo_index:empty() :: rabbit_fifo_index:state(), - %% A release cursor is essentially a snapshot for a past raft index. - %% Working assumption: Messages are consumed in a FIFO-ish order because - %% the log is truncated only until the oldest message. - release_cursors = lqueue:new() :: lqueue:lqueue({release_cursor, - ra:index(), #rabbit_fifo{}}), + unused_1 = ?NIL, % consumers need to reflect consumer state at time of snapshot - consumers = #{} :: #{consumer_id() => consumer()}, + consumers = #{} :: #{consumer_key() => consumer()}, % consumers that require further service are queued here service_queue = priority_queue:new() :: priority_queue:q(), %% state for at-least-once dead-lettering dlx = rabbit_fifo_dlx:init() :: rabbit_fifo_dlx:state(), msg_bytes_enqueue = 0 :: non_neg_integer(), msg_bytes_checkout = 0 :: non_neg_integer(), - %% waiting consumers, one is picked active consumer is cancelled or dies + %% one is picked if active consumer is cancelled or dies %% used only when single active consumer is on - waiting_consumers = [] :: [{consumer_id(), consumer()}], + waiting_consumers = [] :: [{consumer_key(), consumer()}], last_active :: option(non_neg_integer()), msg_cache :: option({ra:index(), raw_msg()}), - unused_2 + unused_2 = ?NIL }). -type config() :: #{name := atom(), queue_resource := rabbit_types:r('queue'), dead_letter_handler => dead_letter_handler(), become_leader_handler => applied_mfa(), - release_cursor_interval => non_neg_integer(), + checkpoint_min_indexes => non_neg_integer(), + checkpoint_max_indexes => non_neg_integer(), max_length => non_neg_integer(), max_bytes => non_neg_integer(), - max_in_memory_length => non_neg_integer(), - max_in_memory_bytes => non_neg_integer(), overflow_strategy => drop_head | reject_publish, single_active_consumer_on => boolean(), delivery_limit => non_neg_integer(), diff --git a/deps/rabbit/src/rabbit_fifo_client.erl b/deps/rabbit/src/rabbit_fifo_client.erl index 8e19e6a29303..20d57d89577f 100644 --- a/deps/rabbit/src/rabbit_fifo_client.erl +++ b/deps/rabbit/src/rabbit_fifo_client.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% %% @doc Provides an easy to consume API for interacting with the {@link rabbit_fifo.} @@ -14,15 +14,17 @@ -export([ init/1, init/2, - checkout/5, - cancel_checkout/2, + checkout/4, + cancel_checkout/3, enqueue/3, enqueue/4, dequeue/4, settle/3, return/3, discard/3, - credit/4, + modify/6, + credit_v1/4, + credit/5, handle_ra_event/4, untracked_enqueue/2, purge/1, @@ -37,17 +39,18 @@ -define(SOFT_LIMIT, 32). -define(TIMER_TIME, 10000). -define(COMMAND_TIMEOUT, 30000). +-define(UNLIMITED_PREFETCH_COUNT, 2000). %% something large for ra -type seq() :: non_neg_integer(). --type action() :: {send_credit_reply, Available :: non_neg_integer()} | - {send_drained, CTagCredit :: - {rabbit_fifo:consumer_tag(), non_neg_integer()}} | - rabbit_queue_type:action(). --type actions() :: [action()]. --record(consumer, {last_msg_id :: seq() | -1 | undefined, +-record(consumer, {key :: rabbit_fifo:consumer_key(), + % status = up :: up | cancelled, + last_msg_id :: seq() | -1 | undefined, ack = false :: boolean(), - delivery_count = 0 :: non_neg_integer()}). + %% Remove this field when feature flag rabbitmq_4.0.0 becomes required. + delivery_count :: {credit_api_v1, rabbit_queue_type:delivery_count()} | + credit_api_v2 + }). -record(cfg, {servers = [] :: [ra:server_id()], soft_limit = ?SOFT_LIMIT :: non_neg_integer(), @@ -61,22 +64,17 @@ next_enqueue_seq = 1 :: seq(), %% indicates that we've exceeded the soft limit slow = false :: boolean(), - unsent_commands = #{} :: #{rabbit_fifo:consumer_id() => + unsent_commands = #{} :: #{rabbit_fifo:consumer_key() => {[seq()], [seq()], [seq()]}}, pending = #{} :: #{seq() => {term(), rabbit_fifo:command()}}, - consumer_deliveries = #{} :: #{rabbit_fifo:consumer_tag() => - #consumer{}}, + consumers = #{} :: #{rabbit_types:ctag() => #consumer{}}, timer_state :: term() }). -opaque state() :: #state{}. --export_type([ - state/0, - actions/0 - ]). - +-export_type([state/0]). %% @doc Create the initial state for a new rabbit_fifo sessions. A state is needed %% to interact with a rabbit_fifo queue using @module. @@ -111,13 +109,16 @@ init(Servers, SoftLimit) -> %% by the {@link handle_ra_event/2. handle_ra_event/2} function. -spec enqueue(rabbit_amqqueue:name(), Correlation :: term(), Msg :: term(), State :: state()) -> - {ok, state(), actions()} | {reject_publish, state()}. + {ok, state(), rabbit_queue_type:actions()} | {reject_publish, state()}. enqueue(QName, Correlation, Msg, #state{queue_status = undefined, next_enqueue_seq = 1, cfg = #cfg{servers = Servers, timeout = Timeout}} = State0) -> %% the first publish, register and enqueuer for this process. + %% TODO: we _only_ need to pre-register an enqueuer to discover if the + %% queue overflow is `reject_publish` and the queue can accept new messages + %% if the queue does not have `reject_publish` set we can skip this step Reg = rabbit_fifo:make_register_enqueuer(self()), case ra:process_command(Servers, Reg, Timeout) of {ok, reject_publish, Leader} -> @@ -141,7 +142,7 @@ enqueue(_QName, _Correlation, _Msg, cfg = #cfg{}} = State) -> {reject_publish, State}; enqueue(QName, Correlation, Msg, - #state{slow = Slow, + #state{slow = WasSlow, pending = Pending, queue_status = go, next_seq = Seq, @@ -151,19 +152,15 @@ enqueue(QName, Correlation, Msg, % by default there is no correlation id Cmd = rabbit_fifo:make_enqueue(self(), EnqueueSeq, Msg), ok = ra:pipeline_command(ServerId, Cmd, Seq, low), - Tag = case map_size(Pending) >= SftLmt of - true -> slow; - false -> ok - end, + IsSlow = map_size(Pending) >= SftLmt, State = State0#state{pending = Pending#{Seq => {Correlation, Cmd}}, next_seq = Seq + 1, next_enqueue_seq = EnqueueSeq + 1, - slow = Tag == slow}, - case Tag of - slow when not Slow -> - {ok, set_timer(QName, State), [{block, cluster_name(State)}]}; - _ -> - {ok, State, []} + slow = IsSlow}, + if IsSlow andalso not WasSlow -> + {ok, set_timer(QName, State), [{block, cluster_name(State)}]}; + true -> + {ok, State, []} end. %% @doc Enqueues a message. @@ -177,7 +174,7 @@ enqueue(QName, Correlation, Msg, %% by the {@link handle_ra_event/2. handle_ra_event/2} function. %% -spec enqueue(rabbit_amqqueue:name(), Msg :: term(), State :: state()) -> - {ok, state(), actions()} | {reject_publish, state()}. + {ok, state(), rabbit_queue_type:actions()} | {reject_publish, state()}. enqueue(QName, Msg, State) -> enqueue(QName, undefined, Msg, State). @@ -193,13 +190,15 @@ enqueue(QName, Msg, State) -> %% @param State The {@module} state. %% %% @returns `{ok, IdMsg, State}' or `{error | timeout, term()}' --spec dequeue(rabbit_amqqueue:name(), rabbit_fifo:consumer_tag(), +-spec dequeue(rabbit_amqqueue:name(), rabbit_types:ctag(), Settlement :: settled | unsettled, state()) -> {ok, non_neg_integer(), term(), non_neg_integer()} | {empty, state()} | {error | timeout, term()}. dequeue(QueueName, ConsumerTag, Settlement, #state{cfg = #cfg{timeout = Timeout}} = State0) -> ServerId = pick_server(State0), + %% dequeue never really needs to assign a consumer key so we just use + %% the old ConsumerId format here ConsumerId = consumer_id(ConsumerTag), case ra:process_command(ServerId, rabbit_fifo:make_checkout(ConsumerId, @@ -209,14 +208,9 @@ dequeue(QueueName, ConsumerTag, Settlement, {ok, {dequeue, empty}, Leader} -> {empty, State0#state{leader = Leader}}; {ok, {dequeue, {MsgId, {MsgHeader, Msg0}}, MsgsReady}, Leader} -> - Count = case MsgHeader of - #{delivery_count := C} -> C; - _ -> 0 - end, - IsDelivered = Count > 0, - Msg = add_delivery_count_header(Msg0, Count), + {Msg, Redelivered} = add_delivery_count_header(Msg0, MsgHeader), {ok, MsgsReady, - {QueueName, qref(Leader), MsgId, IsDelivered, Msg}, + {QueueName, qref(Leader), MsgId, Redelivered, Msg}, State0#state{leader = Leader}}; {ok, {error, _} = Err, _Leader} -> Err; @@ -224,33 +218,44 @@ dequeue(QueueName, ConsumerTag, Settlement, Err end. -add_delivery_count_header(Msg, Count) -> - case mc:is(Msg) of - true when is_integer(Count) andalso - Count > 0 -> - mc:set_annotation(<<"x-delivery-count">>, Count, Msg); - _ -> - Msg - end. - +add_delivery_count_header(Msg0, #{acquired_count := AcqCount} = Header) + when is_integer(AcqCount) -> + Msg = case mc:is(Msg0) of + true -> + Msg1 = mc:set_annotation(<<"x-delivery-count">>, AcqCount, Msg0), + %% the "delivery-count" header in the AMQP spec does not include + %% returns (released outcomes) + rabbit_fifo:annotate_msg(Header, Msg1); + false -> + Msg0 + end, + Redelivered = AcqCount > 0, + {Msg, Redelivered}; +add_delivery_count_header(Msg, #{delivery_count := DC} = Header) -> + %% there was a delivery count but no acquired count, this means the message + %% was delivered from a quorum queue running v3 so we patch this up here + add_delivery_count_header(Msg, Header#{acquired_count => DC}); +add_delivery_count_header(Msg, _Header) -> + {Msg, false}. %% @doc Settle a message. Permanently removes message from the queue. %% @param ConsumerTag the tag uniquely identifying the consumer. %% @param MsgIds the message ids received with the {@link rabbit_fifo:delivery/0.} %% @param State the {@module} state %% --spec settle(rabbit_fifo:consumer_tag(), [rabbit_fifo:msg_id()], state()) -> +-spec settle(rabbit_types:ctag(), [rabbit_fifo:msg_id()], state()) -> {state(), list()}. settle(ConsumerTag, [_|_] = MsgIds, #state{slow = false} = State0) -> + ConsumerKey = consumer_key(ConsumerTag, State0), ServerId = pick_server(State0), - Cmd = rabbit_fifo:make_settle(consumer_id(ConsumerTag), MsgIds), + Cmd = rabbit_fifo:make_settle(ConsumerKey, MsgIds), {send_command(ServerId, undefined, Cmd, normal, State0), []}; settle(ConsumerTag, [_|_] = MsgIds, #state{unsent_commands = Unsent0} = State0) -> - ConsumerId = consumer_id(ConsumerTag), + ConsumerKey = consumer_key(ConsumerTag, State0), %% we've reached the soft limit so will stash the command to be %% sent once we have seen enough notifications - Unsent = maps:update_with(ConsumerId, + Unsent = maps:update_with(ConsumerKey, fun ({Settles, Returns, Discards}) -> %% MsgIds has fewer elements than Settles. %% Therefore put it on the left side of the ++ operator. @@ -267,19 +272,19 @@ settle(ConsumerTag, [_|_] = MsgIds, %% @returns %% `{State, list()}' if the command was successfully sent. %% --spec return(rabbit_fifo:consumer_tag(), [rabbit_fifo:msg_id()], state()) -> +-spec return(rabbit_types:ctag(), [rabbit_fifo:msg_id()], state()) -> {state(), list()}. return(ConsumerTag, [_|_] = MsgIds, #state{slow = false} = State0) -> + ConsumerKey = consumer_key(ConsumerTag, State0), ServerId = pick_server(State0), - % TODO: make rabbit_fifo return support lists of message ids - Cmd = rabbit_fifo:make_return(consumer_id(ConsumerTag), MsgIds), + Cmd = rabbit_fifo:make_return(ConsumerKey, MsgIds), {send_command(ServerId, undefined, Cmd, normal, State0), []}; return(ConsumerTag, [_|_] = MsgIds, #state{unsent_commands = Unsent0} = State0) -> - ConsumerId = consumer_id(ConsumerTag), + ConsumerKey = consumer_key(ConsumerTag, State0), %% we've reached the soft limit so will stash the command to be %% sent once we have seen enough notifications - Unsent = maps:update_with(ConsumerId, + Unsent = maps:update_with(ConsumerKey, fun ({Settles, Returns, Discards}) -> {Settles, Returns ++ MsgIds, Discards} end, {[], MsgIds, []}, Unsent0), @@ -292,23 +297,38 @@ return(ConsumerTag, [_|_] = MsgIds, %% @param MsgIds the message ids to discard %% from {@link rabbit_fifo:delivery/0.} %% @param State the {@module} state --spec discard(rabbit_fifo:consumer_tag(), [rabbit_fifo:msg_id()], state()) -> +-spec discard(rabbit_types:ctag(), [rabbit_fifo:msg_id()], state()) -> {state(), list()}. discard(ConsumerTag, [_|_] = MsgIds, #state{slow = false} = State0) -> + ConsumerKey = consumer_key(ConsumerTag, State0), ServerId = pick_server(State0), - Cmd = rabbit_fifo:make_discard(consumer_id(ConsumerTag), MsgIds), + Cmd = rabbit_fifo:make_discard(ConsumerKey, MsgIds), {send_command(ServerId, undefined, Cmd, normal, State0), []}; discard(ConsumerTag, [_|_] = MsgIds, #state{unsent_commands = Unsent0} = State0) -> - ConsumerId = consumer_id(ConsumerTag), + ConsumerKey = consumer_key(ConsumerTag, State0), %% we've reached the soft limit so will stash the command to be %% sent once we have seen enough notifications - Unsent = maps:update_with(ConsumerId, + Unsent = maps:update_with(ConsumerKey, fun ({Settles, Returns, Discards}) -> {Settles, Returns, Discards ++ MsgIds} end, {[], [], MsgIds}, Unsent0), {State0#state{unsent_commands = Unsent}, []}. +-spec modify(rabbit_types:ctag(), [rabbit_fifo:msg_id()], + boolean(), boolean(), mc:annotations(), state()) -> + {state(), list()}. +modify(ConsumerTag, [_|_] = MsgIds, DelFailed, Undel, Anns, + #state{} = State0) -> + ConsumerKey = consumer_key(ConsumerTag, State0), + %% we need to send any pending settles, discards or returns before we + %% send the modify as this cannot be batched + %% as it contains message specific annotations + State1 = send_pending(ConsumerKey, State0), + ServerId = pick_server(State1), + Cmd = rabbit_fifo:make_modify(ConsumerKey, MsgIds, DelFailed, Undel, Anns), + {send_command(ServerId, undefined, Cmd, normal, State1), []}. + %% @doc Register with the rabbit_fifo queue to "checkout" messages as they %% become available. %% @@ -325,30 +345,46 @@ discard(ConsumerTag, [_|_] = MsgIds, %% @param State The {@module} state. %% %% @returns `{ok, State}' or `{error | timeout, term()}' --spec checkout(rabbit_fifo:consumer_tag(), - NumUnsettled :: non_neg_integer(), +-spec checkout(rabbit_types:ctag(), CreditMode :: rabbit_fifo:credit_mode(), Meta :: rabbit_fifo:consumer_meta(), - state()) -> {ok, state()} | {error | timeout, term()}. -checkout(ConsumerTag, NumUnsettled, CreditMode, Meta, - #state{consumer_deliveries = CDels0} = State0) -> + state()) -> + {ok, ConsumerInfos :: map(), state()} | + {error | timeout, term()}. +checkout(ConsumerTag, CreditMode, #{} = Meta, + #state{consumers = CDels0} = State0) + when is_binary(ConsumerTag) andalso + is_tuple(CreditMode) -> Servers = sorted_servers(State0), - ConsumerId = {ConsumerTag, self()}, - Cmd = rabbit_fifo:make_checkout(ConsumerId, - {auto, NumUnsettled, CreditMode}, - Meta), + ConsumerId = consumer_id(ConsumerTag), + Spec = case rabbit_fifo:is_v4() of + true -> + case CreditMode of + {simple_prefetch, 0} -> + {auto, {simple_prefetch, + ?UNLIMITED_PREFETCH_COUNT}}; + _ -> + {auto, CreditMode} + end; + false -> + case CreditMode of + {credited, _} -> + {auto, 0, credited}; + {simple_prefetch, 0} -> + {auto, ?UNLIMITED_PREFETCH_COUNT, simple_prefetch}; + {simple_prefetch, Num} -> + {auto, Num, simple_prefetch} + end + end, + Cmd = rabbit_fifo:make_checkout(ConsumerId, Spec, Meta), %% ??? Ack = maps:get(ack, Meta, true), case try_process_command(Servers, Cmd, State0) of - {ok, Reply, Leader} -> + {ok, {ok, Reply}, Leader} -> LastMsgId = case Reply of - ok -> - %% this is the pre 3.11.1 / 3.10.9 - %% reply format - -1; - {ok, #{num_checked_out := NumChecked, - next_msg_id := NextMsgId}} -> + #{num_checked_out := NumChecked, + next_msg_id := NextMsgId} -> case NumChecked > 0 of true -> %% we cannot know if the pending messages @@ -362,12 +398,21 @@ checkout(ConsumerTag, NumUnsettled, CreditMode, Meta, NextMsgId - 1 end end, + DeliveryCount = case rabbit_fifo:is_v4() of + true -> credit_api_v2; + false -> {credit_api_v1, 0} + end, + ConsumerKey = maps:get(key, Reply, ConsumerId), SDels = maps:update_with( - ConsumerTag, fun (C) -> C#consumer{ack = Ack} end, - #consumer{last_msg_id = LastMsgId, - ack = Ack}, CDels0), - {ok, State0#state{leader = Leader, - consumer_deliveries = SDels}}; + ConsumerTag, + fun (C) -> C#consumer{ack = Ack} end, + #consumer{key = ConsumerKey, + last_msg_id = LastMsgId, + ack = Ack, + delivery_count = DeliveryCount}, + CDels0), + {ok, Reply, State0#state{leader = Leader, + consumers = SDels}}; Err -> Err end. @@ -385,31 +430,38 @@ query_single_active_consumer(#state{leader = Leader}) -> Err end. +-spec credit_v1(rabbit_types:ctag(), + Credit :: non_neg_integer(), + Drain :: boolean(), + state()) -> + {state(), rabbit_queue_type:actions()}. +credit_v1(ConsumerTag, Credit, Drain, + #state{consumers = CDels} = State) -> + #consumer{delivery_count = {credit_api_v1, Count}} = maps:get(ConsumerTag, CDels), + credit(ConsumerTag, Count, Credit, Drain, State). + %% @doc Provide credit to the queue %% %% This only has an effect if the consumer uses credit mode: credited %% @param ConsumerTag a unique tag to identify this particular consumer. -%% @param Credit the amount of credit to provide to theq queue +%% @param Credit the amount of credit to provide to the queue %% @param Drain tells the queue to use up any credit that cannot be immediately %% fulfilled. (i.e. there are not enough messages on queue to use up all the %% provided credit). --spec credit(rabbit_fifo:consumer_tag(), - Credit :: non_neg_integer(), +%% @param Reply true if the queue client requests a credit_reply queue action +-spec credit(rabbit_types:ctag(), + rabbit_queue_type:delivery_count(), + rabbit_queue_type:credit(), Drain :: boolean(), state()) -> - {state(), actions()}. -credit(ConsumerTag, Credit, Drain, - #state{consumer_deliveries = CDels} = State0) -> - ConsumerId = consumer_id(ConsumerTag), - %% the last received msgid provides us with the delivery count if we - %% add one as it is 0 indexed - C = maps:get(ConsumerTag, CDels, #consumer{last_msg_id = -1}), - ServerId = pick_server(State0), - Cmd = rabbit_fifo:make_credit(ConsumerId, Credit, - C#consumer.last_msg_id + 1, Drain), - {send_command(ServerId, undefined, Cmd, normal, State0), []}. - -%% @doc Cancels a checkout with the rabbit_fifo queue for the consumer tag + {state(), rabbit_queue_type:actions()}. +credit(ConsumerTag, DeliveryCount, Credit, Drain, State) -> + ConsumerKey = consumer_key(ConsumerTag, State), + ServerId = pick_server(State), + Cmd = rabbit_fifo:make_credit(ConsumerKey, Credit, DeliveryCount, Drain), + {send_command(ServerId, undefined, Cmd, normal, State), []}. + +%% @doc Cancels a checkout with the rabbit_fifo queue for the consumer tag %% %% This is a synchronous call. I.e. the call will block until the command %% has been accepted by the ra process or it times out. @@ -418,18 +470,29 @@ credit(ConsumerTag, Credit, Drain, %% @param State The {@module} state. %% %% @returns `{ok, State}' or `{error | timeout, term()}' --spec cancel_checkout(rabbit_fifo:consumer_tag(), state()) -> +-spec cancel_checkout(rabbit_types:ctag(), rabbit_queue_type:cancel_reason(), state()) -> {ok, state()} | {error | timeout, term()}. -cancel_checkout(ConsumerTag, #state{consumer_deliveries = CDels} = State0) -> - Servers = sorted_servers(State0), - ConsumerId = {ConsumerTag, self()}, - Cmd = rabbit_fifo:make_checkout(ConsumerId, cancel, #{}), - State = State0#state{consumer_deliveries = maps:remove(ConsumerTag, CDels)}, - case try_process_command(Servers, Cmd, State) of - {ok, _, Leader} -> - {ok, State#state{leader = Leader}}; - Err -> - Err +cancel_checkout(ConsumerTag, Reason, + #state{consumers = Consumers} = State0) + when is_atom(Reason) -> + case Consumers of + #{ConsumerTag := #consumer{key = Cid}} -> + Servers = sorted_servers(State0), + ConsumerId = {ConsumerTag, self()}, + State1 = send_pending(Cid, State0), + Cmd = rabbit_fifo:make_checkout(ConsumerId, Reason, #{}), + State = State1#state{consumers = maps:remove(ConsumerTag, Consumers)}, + case try_process_command(Servers, Cmd, State) of + {ok, _, Leader} -> + {ok, State#state{leader = Leader}}; + Err -> + Err + end; + _ -> + %% TODO: when we implement the `delete' checkout spec we could + %% fallback to that to make sure there is little chance a consumer + %% sticks around in the machine + {ok, State0} end. %% @doc Purges all the messages from a rabbit_fifo queue and returns the number @@ -521,27 +584,27 @@ update_machine_state(Server, Conf) -> %% with them. -spec handle_ra_event(rabbit_amqqueue:name(), ra:server_id(), ra_server_proc:ra_event_body(), state()) -> - {internal, Correlators :: [term()], actions(), state()} | - {rabbit_fifo:client_msg(), state()} | {eol, actions()}. + {internal, Correlators :: [term()], rabbit_queue_type:actions(), state()} | + {rabbit_fifo:client_msg(), state()} | {eol, rabbit_queue_type:actions()}. handle_ra_event(QName, From, {applied, Seqs}, #state{cfg = #cfg{soft_limit = SftLmt}} = State0) -> - {Corrs, Actions0, State1} = lists:foldl(fun seq_applied/2, - {[], [], State0#state{leader = From}}, - Seqs), + {Corrs, ActionsRev, State1} = lists:foldl(fun seq_applied/2, + {[], [], State0#state{leader = From}}, + Seqs), + Actions0 = lists:reverse(ActionsRev), Actions = case Corrs of [] -> - lists:reverse(Actions0); + Actions0; _ -> %%TODO consider using lists:foldr/3 above because %% Corrs is returned in the wrong order here. %% The wrong order does not matter much because the channel sorts the %% sequence numbers before confirming to the client. But rabbit_fifo_client %% is sequence numer agnostic: it handles any correlation terms. - [{settled, QName, Corrs} - | lists:reverse(Actions0)] + [{settled, QName, Corrs} | Actions0] end, - case maps:size(State1#state.pending) < SftLmt of + case map_size(State1#state.pending) < SftLmt of true when State1#state.slow == true -> % we have exited soft limit state % send any unsent commands and cancel the time as @@ -572,6 +635,10 @@ handle_ra_event(QName, From, {applied, Seqs}, end; handle_ra_event(QName, From, {machine, {delivery, _ConsumerTag, _} = Del}, State0) -> handle_delivery(QName, From, Del, State0); +handle_ra_event(_QName, _From, {machine, Action}, State) + when element(1, Action) =:= credit_reply orelse + element(1, Action) =:= credit_reply_v1 -> + {ok, State, [Action]}; handle_ra_event(_QName, _, {machine, {queue_status, Status}}, #state{} = State) -> %% just set the queue status @@ -654,7 +721,7 @@ seq_applied({Seq, Response}, when Response /= not_enqueued -> {[Corr | Corrs], Actions, State#state{pending = Pending}}; _ -> - {Corrs, Actions, State#state{}} + {Corrs, Actions, State} end; seq_applied(_Seq, Acc) -> Acc. @@ -667,14 +734,12 @@ maybe_add_action({multi, Actions}, Acc0, State0) -> lists:foldl(fun (Act, {Acc, State}) -> maybe_add_action(Act, Acc, State) end, {Acc0, State0}, Actions); -maybe_add_action({send_drained, {Tag, Credit}} = Action, Acc, - #state{consumer_deliveries = CDels} = State) -> - %% add credit to consumer delivery_count - C = maps:get(Tag, CDels), - {[Action | Acc], - State#state{consumer_deliveries = - update_consumer(Tag, C#consumer.last_msg_id, - Credit, C, CDels)}}; +maybe_add_action({send_drained, {Tag, Credit}}, Acc, State0) -> + %% This function clause should be deleted when + %% feature flag rabbitmq_4.0.0 becomes required. + State = add_delivery_count(Credit, Tag, State0), + Action = {credit_reply_v1, Tag, Credit, _Avail = 0, _Drain = true}, + {[Action | Acc], State}; maybe_add_action(Action, Acc, State) -> %% anything else is assumed to be an action {[Action | Acc], State}. @@ -703,7 +768,7 @@ maybe_auto_ack(false, {deliver, Tag, _Ack, Msgs} = Deliver, State0) -> {ok, State, [Deliver] ++ Actions}. handle_delivery(QName, Leader, {delivery, Tag, [{FstId, _} | _] = IdMsgs}, - #state{consumer_deliveries = CDels0} = State0) + #state{consumers = CDels0} = State0) when is_map_key(Tag, CDels0) -> QRef = qref(Leader), {LastId, _} = lists:last(IdMsgs), @@ -719,7 +784,7 @@ handle_delivery(QName, Leader, {delivery, Tag, [{FstId, _} | _] = IdMsgs}, %% In this case we can't reliably know what the next expected message %% id should be so have to accept whatever message comes next maybe_auto_ack(Ack, Del, - State0#state{consumer_deliveries = + State0#state{consumers = update_consumer(Tag, LastId, length(IdMsgs), C, CDels0)}); @@ -739,7 +804,7 @@ handle_delivery(QName, Leader, {delivery, Tag, [{FstId, _} | _] = IdMsgs}, XDel = {deliver, Tag, Ack, transform_msgs(QName, QRef, Missing ++ IdMsgs)}, maybe_auto_ack(Ack, XDel, - State0#state{consumer_deliveries = + State0#state{consumers = update_consumer(Tag, LastId, length(IdMsgs) + NumMissing, C, CDels0)}) @@ -755,14 +820,14 @@ handle_delivery(QName, Leader, {delivery, Tag, [{FstId, _} | _] = IdMsgs}, C when FstId =:= 0 -> % the very first delivery maybe_auto_ack(Ack, Del, - State0#state{consumer_deliveries = + State0#state{consumers = update_consumer(Tag, LastId, length(IdMsgs), C#consumer{last_msg_id = LastId}, CDels0)}) end; handle_delivery(_QName, _Leader, {delivery, Tag, [_ | _] = IdMsgs}, - #state{consumer_deliveries = CDels0} = State0) + #state{consumers = CDels0} = State0) when not is_map_key(Tag, CDels0) -> %% Note: %% https://github.com/rabbitmq/rabbitmq-server/issues/3729 @@ -775,30 +840,31 @@ handle_delivery(_QName, _Leader, {delivery, Tag, [_ | _] = IdMsgs}, transform_msgs(QName, QRef, Msgs) -> lists:map( fun({MsgId, {MsgHeader, Msg0}}) -> - {Msg, Redelivered} = case MsgHeader of - #{delivery_count := C} -> - {add_delivery_count_header(Msg0, C), true}; - _ -> - {Msg0, false} - end, - + {Msg, Redelivered} = add_delivery_count_header(Msg0, MsgHeader), {QName, QRef, MsgId, Redelivered, Msg} end, Msgs). -update_consumer(Tag, LastId, DelCntIncr, - #consumer{delivery_count = D} = C, Consumers) -> - maps:put(Tag, - C#consumer{last_msg_id = LastId, - delivery_count = D + DelCntIncr}, - Consumers). - +update_consumer(Tag, LastId, DelCntIncr, Consumer, Consumers) -> + D = case Consumer#consumer.delivery_count of + credit_api_v2 -> credit_api_v2; + {credit_api_v1, Count} -> {credit_api_v1, Count + DelCntIncr} + end, + maps:update(Tag, + Consumer#consumer{last_msg_id = LastId, + delivery_count = D}, + Consumers). + +add_delivery_count(DelCntIncr, Tag, #state{consumers = CDels0} = State) -> + Con = #consumer{last_msg_id = LastMsgId} = maps:get(Tag, CDels0), + CDels = update_consumer(Tag, LastMsgId, DelCntIncr, Con, CDels0), + State#state{consumers = CDels}. get_missing_deliveries(State, From, To, ConsumerTag) -> %% find local server - ConsumerId = consumer_id(ConsumerTag), - rabbit_log:debug("get_missing_deliveries for ~w from ~b to ~b", - [ConsumerId, From, To]), - Cmd = {get_checked_out, ConsumerId, lists:seq(From, To)}, + ConsumerKey = consumer_key(ConsumerTag, State), + rabbit_log:debug("get_missing_deliveries for consumer '~s' from ~b to ~b", + [ConsumerTag, From, To]), + Cmd = {get_checked_out, ConsumerKey, lists:seq(From, To)}, ServerId = find_local_or_leader(State), case ra:aux_command(ServerId, Cmd) of {ok, Missing} -> @@ -826,35 +892,32 @@ sorted_servers(#state{leader = Leader, cfg = #cfg{servers = Servers}}) -> [Leader | lists:delete(Leader, Servers)]. -consumer_id(ConsumerTag) -> +consumer_key(ConsumerTag, #state{consumers = Consumers}) -> + case Consumers of + #{ConsumerTag := #consumer{key = Key}} -> + Key; + _ -> + %% if no consumer found fall back to using the ConsumerId + consumer_id(ConsumerTag) + end. + +consumer_id(ConsumerTag) when is_binary(ConsumerTag) -> {ConsumerTag, self()}. -send_command(Server, Correlation, Command, _Priority, - #state{pending = Pending, - next_seq = Seq, - cfg = #cfg{soft_limit = SftLmt}} = State) - when element(1, Command) == return -> - %% returns are sent to the aux machine for pre-evaluation - ok = ra:cast_aux_command(Server, {Command, Seq, self()}), - Tag = case map_size(Pending) >= SftLmt of - true -> slow; - false -> ok - end, - State#state{pending = Pending#{Seq => {Correlation, Command}}, - next_seq = Seq + 1, - slow = Tag == slow}; send_command(Server, Correlation, Command, Priority, #state{pending = Pending, next_seq = Seq, cfg = #cfg{soft_limit = SftLmt}} = State) -> - ok = ra:pipeline_command(Server, Command, Seq, Priority), - Tag = case map_size(Pending) >= SftLmt of - true -> slow; - false -> ok - end, + ok = case rabbit_fifo:is_return(Command) of + true -> + %% returns are sent to the aux machine for pre-evaluation + ra:cast_aux_command(Server, {Command, Seq, self()}); + _ -> + ra:pipeline_command(Server, Command, Seq, Priority) + end, State#state{pending = Pending#{Seq => {Correlation, Command}}, next_seq = Seq + 1, - slow = Tag == slow}. + slow = map_size(Pending) >= SftLmt}. resend_command(ServerId, Correlation, Command, #state{pending = Pending, @@ -923,3 +986,21 @@ qref(Ref) -> Ref. atom(). cluster_name(#state{cfg = #cfg{servers = [{Name, _Node} | _]}}) -> Name. + +send_pending(Cid, #state{unsent_commands = Unsent} = State0) -> + Commands = case Unsent of + #{Cid := {Settled, Returns, Discards}} -> + add_command(Cid, settle, Settled, + add_command(Cid, return, Returns, + add_command(Cid, discard, + Discards, []))); + _ -> + [] + end, + ServerId = pick_server(State0), + %% send all the settlements, discards and returns + State1 = lists:foldl(fun (C, S0) -> + send_command(ServerId, undefined, C, + normal, S0) + end, State0, Commands), + State1#state{unsent_commands = maps:remove(Cid, Unsent)}. diff --git a/deps/rabbit/src/rabbit_fifo_dlx.erl b/deps/rabbit/src/rabbit_fifo_dlx.erl index 4ac50c2d60d8..4e787172d1a4 100644 --- a/deps/rabbit/src/rabbit_fifo_dlx.erl +++ b/deps/rabbit/src/rabbit_fifo_dlx.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. -module(rabbit_fifo_dlx). @@ -23,7 +23,6 @@ state_enter/4, handle_aux/6, dehydrate/1, - normalize/1, stat/1, update_config/4, smallest_raft_index/1 @@ -160,21 +159,20 @@ discard(Msgs0, Reason, {at_most_once, {Mod, Fun, Args}}, State) -> Lookup = maps:from_list(lists:zip(Idxs, Log)), Msgs = [begin Cmd = maps:get(Idx, Lookup), - rabbit_fifo:get_msg(Cmd) - end || ?MSG(Idx, _) <- Msgs0], + %% ensure header delivery count + %% is copied to the message container + annotate_msg(H, rabbit_fifo:get_msg(Cmd)) + end || ?MSG(Idx, H) <- Msgs0], [{mod_call, Mod, Fun, Args ++ [Reason, Msgs]}] end}, {State, [Effect]}; discard(Msgs, Reason, at_least_once, State0) when Reason =/= maxlen -> - State = lists:foldl(fun(?MSG(Idx, _) = Msg0, + State = lists:foldl(fun(?MSG(Idx, _) = Msg, #?MODULE{discards = D0, msg_bytes = B0, ra_indexes = I0} = S0) -> - MsgSize = size_in_bytes(Msg0), - %% Condense header to an integer representing the message size. - %% We need neither delivery_count nor expiry anymore. - Msg = ?MSG(Idx, MsgSize), + MsgSize = size_in_bytes(Msg), D = lqueue:in(?TUPLE(Reason, Msg), D0), B = B0 + MsgSize, I = rabbit_fifo_index:append(Idx, I0), @@ -192,8 +190,8 @@ checkout(at_least_once, #?MODULE{consumer = #dlx_consumer{}} = State) -> checkout(_, State) -> {State, []}. -checkout0({success, MsgId, ?TUPLE(Reason, ?MSG(Idx, _)), State}, SendAcc) -> - DelMsg = {Idx, {Reason, MsgId}}, +checkout0({success, MsgId, ?TUPLE(Reason, ?MSG(Idx, H)), State}, SendAcc) -> + DelMsg = {Idx, {Reason, H, MsgId}}, checkout0(checkout_one(State), [DelMsg | SendAcc]); checkout0(#?MODULE{consumer = #dlx_consumer{pid = Pid}} = State, SendAcc) -> Effects = delivery_effects(Pid, SendAcc), @@ -233,9 +231,11 @@ delivery_effects(CPid, Msgs0) -> {RaftIdxs, RsnIds} = lists:unzip(Msgs1), [{log, RaftIdxs, fun(Log) -> - Msgs = lists:zipwith(fun (Cmd, {Reason, MsgId}) -> - {MsgId, {Reason, rabbit_fifo:get_msg(Cmd)}} - end, Log, RsnIds), + Msgs = lists:zipwith( + fun (Cmd, {Reason, H, MsgId}) -> + {MsgId, {Reason, + annotate_msg(H, rabbit_fifo:get_msg(Cmd))}} + end, Log, RsnIds), [{send_msg, CPid, {dlx_event, self(), {dlx_delivery, Msgs}}, [cast]}] end}]. @@ -357,14 +357,10 @@ handle_aux(_, _, Aux, _, _, _) -> dehydrate(State) -> State#?MODULE{ra_indexes = rabbit_fifo_index:empty()}. --spec normalize(state()) -> - state(). -normalize(#?MODULE{discards = Discards, - ra_indexes = Indexes} = State) -> - State#?MODULE{discards = lqueue:from_list(lqueue:to_list(Discards)), - ra_indexes = rabbit_fifo_index:normalize(Indexes)}. - -spec smallest_raft_index(state()) -> option(non_neg_integer()). smallest_raft_index(#?MODULE{ra_indexes = Indexes}) -> rabbit_fifo_index:smallest(Indexes). + +annotate_msg(H, Msg) -> + rabbit_fifo:annotate_msg(H, Msg). diff --git a/deps/rabbit/src/rabbit_fifo_dlx.hrl b/deps/rabbit/src/rabbit_fifo_dlx.hrl index b63071f6d926..44bebc8942ea 100644 --- a/deps/rabbit/src/rabbit_fifo_dlx.hrl +++ b/deps/rabbit/src/rabbit_fifo_dlx.hrl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. -record(dlx_consumer, {pid :: pid(), diff --git a/deps/rabbit/src/rabbit_fifo_dlx_client.erl b/deps/rabbit/src/rabbit_fifo_dlx_client.erl index a21327881b4c..f1a90f2d95ab 100644 --- a/deps/rabbit/src/rabbit_fifo_dlx_client.erl +++ b/deps/rabbit/src/rabbit_fifo_dlx_client.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. -module(rabbit_fifo_dlx_client). diff --git a/deps/rabbit/src/rabbit_fifo_dlx_sup.erl b/deps/rabbit/src/rabbit_fifo_dlx_sup.erl index af7621f3f9b4..ff02257f55fa 100644 --- a/deps/rabbit/src/rabbit_fifo_dlx_sup.erl +++ b/deps/rabbit/src/rabbit_fifo_dlx_sup.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2018-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. -module(rabbit_fifo_dlx_sup). diff --git a/deps/rabbit/src/rabbit_fifo_dlx_worker.erl b/deps/rabbit/src/rabbit_fifo_dlx_worker.erl index 65813b68c5bb..e998d27ecebb 100644 --- a/deps/rabbit/src/rabbit_fifo_dlx_worker.erl +++ b/deps/rabbit/src/rabbit_fifo_dlx_worker.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% One rabbit_fifo_dlx_worker process exists per (source) quorum queue that has at-least-once dead lettering %% enabled. The rabbit_fifo_dlx_worker process is co-located on the quorum queue leader node. @@ -33,7 +33,7 @@ %% gen_server callbacks -export([init/1, terminate/2, handle_continue/2, handle_cast/2, handle_call/3, handle_info/2, - code_change/3, format_status/2]). + code_change/3, format_status/1]). -define(HIBERNATE_AFTER, 4*60*1000). @@ -46,6 +46,8 @@ consumed_msg_id :: non_neg_integer(), delivery :: mc:state(), reason :: rabbit_dead_letter:reason(), + %% routing keys (including CC keys) the message was published with to the source quorum queue + original_routing_keys :: [rabbit_types:routing_key(),...], %% target queues for which publisher confirm has not been received yet unsettled = [] :: [rabbit_amqqueue:name()], %% target queues for which publisher rejection was received recently @@ -315,23 +317,28 @@ forward(ConsumedMsg, ConsumedMsgId, ConsumedQRef, DLX, Reason, Now = os:system_time(millisecond), #resource{name = SourceQName} = ConsumedQRef, #resource{name = DLXName} = DLXRef, + OriginalRoutingKeys = mc:routing_keys(ConsumedMsg), DLRKeys = case RKey of undefined -> - mc:get_annotation(routing_keys, ConsumedMsg); + OriginalRoutingKeys; _ -> [RKey] end, - Msg0 = mc:record_death(Reason, SourceQName, ConsumedMsg), + Env = case rabbit_feature_flags:is_enabled(?FF_MC_DEATHS_V2) of + true -> #{}; + false -> #{?FF_MC_DEATHS_V2 => false} + end, + Msg0 = mc:record_death(Reason, SourceQName, ConsumedMsg, Env), Msg1 = mc:set_ttl(undefined, Msg0), - Msg2 = mc:set_annotation(routing_keys, DLRKeys, Msg1), - Msg = mc:set_annotation(exchange, DLXName, Msg2), + Msg2 = mc:set_annotation(?ANN_ROUTING_KEYS, DLRKeys, Msg1), + Msg = mc:set_annotation(?ANN_EXCHANGE, DLXName, Msg2), {TargetQs, State3} = case DLX of not_found -> {[], State0}; _ -> RouteToQs0 = rabbit_exchange:route(DLX, Msg), - {RouteToQs1, Cycles} = rabbit_dead_letter:detect_cycles( + {Cycles, RouteToQs1} = rabbit_dead_letter:detect_cycles( Reason, Msg, RouteToQs0), State1 = log_cycles(Cycles, [RKey], State0), RouteToQs2 = rabbit_amqqueue:lookup_many(RouteToQs1), @@ -347,7 +354,8 @@ forward(ConsumedMsg, ConsumedMsgId, ConsumedQRef, DLX, Reason, Pend0 = #pending{consumed_msg_id = ConsumedMsgId, consumed_at = Now, delivery = Msg, - reason = Reason}, + reason = Reason, + original_routing_keys = OriginalRoutingKeys}, case TargetQs of [] -> %% We can't deliver this message since there is no target queue we can route to. @@ -453,15 +461,18 @@ redeliver_messages(#state{pendings = Pendings, end, State, Pendings) end. -redeliver(#pending{delivery = Msg} = Pend, - DLX, OutSeq, #state{routing_key = undefined} = State) -> +redeliver(#pending{original_routing_keys = RKeys} = Pend, + DLX, + OutSeq, + #state{routing_key = undefined} = State) -> %% No dead-letter-routing-key defined for source quorum queue. %% Therefore use all of messages's original routing keys (which can include CC and BCC recipients). %% This complies with the behaviour of the rabbit_dead_letter module. - %% We stored these original routing keys in the 1st (i.e. most recent) x-death entry. - {_, #death{routing_keys = Routes}} = mc:last_death(Msg), - redeliver0(Pend, DLX, Routes, OutSeq, State); -redeliver(Pend, DLX, OutSeq, #state{routing_key = DLRKey} = State) -> + redeliver0(Pend, DLX, RKeys, OutSeq, State); +redeliver(Pend, + DLX, + OutSeq, + #state{routing_key = DLRKey} = State) -> redeliver0(Pend, DLX, [DLRKey], OutSeq, State). redeliver0(#pending{delivery = Msg0, @@ -478,8 +489,8 @@ redeliver0(#pending{delivery = Msg0, when is_list(DLRKeys) -> #resource{name = DLXName} = DLXRef, Msg1 = mc:set_ttl(undefined, Msg0), - Msg2 = mc:set_annotation(routing_keys, DLRKeys, Msg1), - Msg = mc:set_annotation(exchange, DLXName, Msg2), + Msg2 = mc:set_annotation(?ANN_ROUTING_KEYS, DLRKeys, Msg1), + Msg = mc:set_annotation(?ANN_EXCHANGE, DLXName, Msg2), %% Because of implicit default bindings rabbit_exchange:route/2 can route to target %% queues that do not exist. Therefore, filter out non-existent target queues. RouteToQs0 = queue_names( @@ -501,7 +512,7 @@ redeliver0(#pending{delivery = Msg0, %% Note that a quorum queue client does not redeliver on our behalf if it previously %% rejected the message. This is why we always redeliver rejected messages here. RouteToQs1 = Unsettled -- clients_redeliver(Unsettled0, QTypeState), - {RouteToQs, Cycles} = rabbit_dead_letter:detect_cycles(Reason, Msg, RouteToQs1), + {Cycles, RouteToQs} = rabbit_dead_letter:detect_cycles(Reason, Msg, RouteToQs1), State1 = log_cycles(Cycles, DLRKeys, State0), case RouteToQs of [] -> @@ -570,35 +581,37 @@ queue_names(Qs) when is_list(Qs) -> lists:map(fun amqqueue:get_name/1, Qs). -format_status(_Opt, [_PDict, #state{ - queue_ref = QueueRef, - exchange_ref = ExchangeRef, - routing_key = RoutingKey, - dlx_client_state = DlxClientState, - queue_type_state = QueueTypeState, - pendings = Pendings, - settled_ids = SettledIds, - next_out_seq = NextOutSeq, - settle_timeout = SettleTimeout, - timer = Timer, - logged = Logged - }]) -> - S = #{queue_ref => QueueRef, - exchange_ref => ExchangeRef, - routing_key => RoutingKey, - dlx_client_state => rabbit_fifo_dlx_client:overview(DlxClientState), - queue_type_state => QueueTypeState, - pendings => maps:map(fun(_, P) -> format_pending(P) end, Pendings), - settled_ids => SettledIds, - next_out_seq => NextOutSeq, - settle_timeout => SettleTimeout, - timer_is_active => Timer =/= undefined, - logged => Logged}, - [{data, [{"State", S}]}]. +format_status(#{state := #state{ + queue_ref = QueueRef, + exchange_ref = ExchangeRef, + routing_key = RoutingKey, + dlx_client_state = DlxClientState, + queue_type_state = QueueTypeState, + pendings = Pendings, + settled_ids = SettledIds, + next_out_seq = NextOutSeq, + settle_timeout = SettleTimeout, + timer = Timer, + logged = Logged + }} = Status) -> + Status#{state := + #{queue_ref => QueueRef, + exchange_ref => ExchangeRef, + routing_key => RoutingKey, + dlx_client_state => rabbit_fifo_dlx_client:overview(DlxClientState), + queue_type_state => QueueTypeState, + pendings => maps:map(fun(_, P) -> format_pending(P) end, Pendings), + settled_ids => SettledIds, + next_out_seq => NextOutSeq, + settle_timeout => SettleTimeout, + timer_is_active => Timer =/= undefined, + logged => Logged} + }. format_pending(#pending{consumed_msg_id = ConsumedMsgId, delivery = _DoNotLogLargeBinary, reason = Reason, + original_routing_keys = OriginalRoutingKeys, unsettled = Unsettled, rejected = Rejected, settled = Settled, @@ -607,6 +620,7 @@ format_pending(#pending{consumed_msg_id = ConsumedMsgId, consumed_at = ConsumedAt}) -> #{consumed_msg_id => ConsumedMsgId, reason => Reason, + original_routing_keys => OriginalRoutingKeys, unsettled => Unsettled, rejected => Rejected, settled => Settled, diff --git a/deps/rabbit/src/rabbit_fifo_index.erl b/deps/rabbit/src/rabbit_fifo_index.erl index b20604386b8d..8a8fbbdb9e07 100644 --- a/deps/rabbit/src/rabbit_fifo_index.erl +++ b/deps/rabbit/src/rabbit_fifo_index.erl @@ -7,8 +7,7 @@ delete/2, size/1, smallest/1, - map/2, - normalize/1 + map/2 ]). -compile({no_auto_import, [size/1]}). @@ -105,10 +104,6 @@ find_next(Next, Last, Map) -> find_next(Next+1, Last, Map) end. --spec normalize(state()) -> state(). -normalize(State) -> - State#?MODULE{largest = undefined}. - -ifdef(TEST). -include_lib("eunit/include/eunit.hrl"). diff --git a/deps/rabbit/src/rabbit_fifo_q.erl b/deps/rabbit/src/rabbit_fifo_q.erl new file mode 100644 index 000000000000..3ddf165a03bc --- /dev/null +++ b/deps/rabbit/src/rabbit_fifo_q.erl @@ -0,0 +1,152 @@ +-module(rabbit_fifo_q). + +-include("rabbit_fifo.hrl"). +-export([ + new/0, + in/3, + out/1, + get/1, + len/1, + from_lqueue/1, + get_lowest_index/1, + overview/1 + ]). + +-define(WEIGHT, 2). +-define(NON_EMPTY, {_, [_|_]}). +-define(EMPTY, {[], []}). + +%% a weighted priority queue with only two priorities + +-record(?MODULE, {hi = ?EMPTY :: {list(msg()), list(msg())}, %% high + no = ?EMPTY :: {list(msg()), list(msg())}, %% normal + len = 0 :: non_neg_integer(), + dequeue_counter = 0 :: non_neg_integer()}). + +-opaque state() :: #?MODULE{}. + +-export_type([state/0]). + +-spec new() -> state(). +new() -> + #?MODULE{}. + +-spec in(hi | no, msg(), state()) -> state(). +in(hi, Item, #?MODULE{hi = Hi, len = Len} = State) -> + State#?MODULE{hi = in(Item, Hi), + len = Len + 1}; +in(no, Item, #?MODULE{no = No, len = Len} = State) -> + State#?MODULE{no = in(Item, No), + len = Len + 1}. + +-spec out(state()) -> + empty | {msg(), state()}. +out(#?MODULE{len = 0}) -> + empty; +out(#?MODULE{hi = Hi0, + no = No0, + len = Len, + dequeue_counter = C0} = State) -> + C = case C0 of + ?WEIGHT -> + 0; + _ -> + C0 + 1 + end, + case next(State) of + {hi, Msg} -> + {Msg, State#?MODULE{hi = drop(Hi0), + dequeue_counter = C, + len = Len - 1}}; + {no, Msg} -> + {Msg, State#?MODULE{no = drop(No0), + dequeue_counter = C, + len = Len - 1}} + end. + +-spec get(state()) -> empty | msg(). +get(#?MODULE{len = 0}) -> + empty; +get(#?MODULE{} = State) -> + {_, Msg} = next(State), + Msg. + +-spec len(state()) -> non_neg_integer(). +len(#?MODULE{len = Len}) -> + Len. + +-spec from_lqueue(lqueue:lqueue(msg())) -> state(). +from_lqueue(LQ) -> + lqueue:fold(fun (Item, Acc) -> + in(no, Item, Acc) + end, new(), LQ). + +-spec get_lowest_index(state()) -> undefined | ra:index(). +get_lowest_index(#?MODULE{len = 0}) -> + undefined; +get_lowest_index(#?MODULE{hi = Hi, no = No}) -> + case peek(Hi) of + empty -> + ?MSG(NoIdx, _) = peek(No), + NoIdx; + ?MSG(HiIdx, _) -> + case peek(No) of + ?MSG(NoIdx, _) -> + min(HiIdx, NoIdx); + empty -> + HiIdx + end + end. + +-spec overview(state()) -> + #{len := non_neg_integer(), + num_hi := non_neg_integer(), + num_no := non_neg_integer(), + lowest_index := ra:index()}. +overview(#?MODULE{len = Len, + hi = {Hi1, Hi2}, + no = _} = State) -> + %% TODO: this could be very slow with large backlogs, + %% consider keeping a separate counter for 'hi', 'no' messages + NumHi = length(Hi1) + length(Hi2), + #{len => Len, + num_hi => NumHi, + num_no => Len - NumHi, + lowest_index => get_lowest_index(State)}. + +%% internals + +next(#?MODULE{hi = ?NON_EMPTY = Hi, + no = ?NON_EMPTY = No, + dequeue_counter = ?WEIGHT}) -> + ?MSG(HiIdx, _) = HiMsg = peek(Hi), + ?MSG(NoIdx, _) = NoMsg = peek(No), + %% always favour hi priority messages when it is safe to do so, + %% i.e. the index is lower than the next index for the 'no' queue + case HiIdx < NoIdx of + true -> + {hi, HiMsg}; + false -> + {no, NoMsg} + end; +next(#?MODULE{hi = ?NON_EMPTY = Hi}) -> + {hi, peek(Hi)}; +next(#?MODULE{no = No}) -> + {no, peek(No)}. + +%% invariant, if the queue is non empty so is the Out (right) list. +in(X, ?EMPTY) -> + {[], [X]}; +in(X, {In, Out}) -> + {[X | In], Out}. + +peek(?EMPTY) -> + empty; +peek({_, [H | _]}) -> + H. + +drop({In, [_]}) -> + %% the last Out one + {[], lists:reverse(In)}; +drop({In, [_ | Out]}) -> + {In, Out}. diff --git a/deps/rabbit/src/rabbit_fifo_v0.erl b/deps/rabbit/src/rabbit_fifo_v0.erl index 8a7a4acd8c8c..3ada7f56b23f 100644 --- a/deps/rabbit/src/rabbit_fifo_v0.erl +++ b/deps/rabbit/src/rabbit_fifo_v0.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_fifo_v0). @@ -548,7 +548,6 @@ state_enter(leader, #?STATE{consumers = Cons, enqueuers = Enqs, waiting_consumers = WaitingConsumers, cfg = #cfg{name = Name, - resource = Resource, become_leader_handler = BLH}, prefix_msgs = {0, [], 0, []} }) -> @@ -559,8 +558,7 @@ state_enter(leader, #?STATE{consumers = Cons, Mons = [{monitor, process, P} || P <- Pids], Nots = [{send_msg, P, leader_change, ra_event} || P <- Pids], NodeMons = lists:usort([{monitor, node, node(P)} || P <- Pids]), - FHReservation = [{mod_call, rabbit_quorum_queue, file_handle_leader_reservation, [Resource]}], - Effects = Mons ++ Nots ++ NodeMons ++ FHReservation, + Effects = Mons ++ Nots ++ NodeMons, case BLH of undefined -> Effects; @@ -575,11 +573,7 @@ state_enter(eol, #?STATE{enqueuers = Enqs, #{}, WaitingConsumers0), AllConsumers = maps:merge(Custs, WaitingConsumers1), [{send_msg, P, eol, ra_event} - || P <- maps:keys(maps:merge(Enqs, AllConsumers))] ++ - [{mod_call, rabbit_quorum_queue, file_handle_release_reservation, []}]; -state_enter(State, #?STATE{cfg = #cfg{resource = _Resource}}) when State =/= leader -> - FHReservation = {mod_call, rabbit_quorum_queue, file_handle_other_reservation, []}, - [FHReservation]; + || P <- maps:keys(maps:merge(Enqs, AllConsumers))]; state_enter(_, _) -> %% catch all as not handling all states []. diff --git a/deps/rabbit/src/rabbit_fifo_v1.erl b/deps/rabbit/src/rabbit_fifo_v1.erl index 046c0937d5cd..98b762b08520 100644 --- a/deps/rabbit/src/rabbit_fifo_v1.erl +++ b/deps/rabbit/src/rabbit_fifo_v1.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_fifo_v1). @@ -676,7 +676,6 @@ state_enter(leader, #?STATE{consumers = Cons, enqueuers = Enqs, waiting_consumers = WaitingConsumers, cfg = #cfg{name = Name, - resource = Resource, become_leader_handler = BLH}, prefix_msgs = {0, [], 0, []} }) -> @@ -687,8 +686,7 @@ state_enter(leader, #?STATE{consumers = Cons, Mons = [{monitor, process, P} || P <- Pids], Nots = [{send_msg, P, leader_change, ra_event} || P <- Pids], NodeMons = lists:usort([{monitor, node, node(P)} || P <- Pids]), - FHReservation = [{mod_call, rabbit_quorum_queue, file_handle_leader_reservation, [Resource]}], - Effects = Mons ++ Nots ++ NodeMons ++ FHReservation, + Effects = Mons ++ Nots ++ NodeMons, case BLH of undefined -> Effects; @@ -704,11 +702,7 @@ state_enter(eol, #?STATE{enqueuers = Enqs, AllConsumers = maps:merge(Custs, WaitingConsumers1), [{send_msg, P, eol, ra_event} || P <- maps:keys(maps:merge(Enqs, AllConsumers))] ++ - [{aux, eol}, - {mod_call, rabbit_quorum_queue, file_handle_release_reservation, []}]; -state_enter(State, #?STATE{cfg = #cfg{resource = _Resource}}) when State =/= leader -> - FHReservation = {mod_call, rabbit_quorum_queue, file_handle_other_reservation, []}, - [FHReservation]; + [{aux, eol}]; state_enter(_, _) -> %% catch all as not handling all states []. diff --git a/deps/rabbit/src/rabbit_fifo_v3.erl b/deps/rabbit/src/rabbit_fifo_v3.erl new file mode 100644 index 000000000000..60ee6be9dc4b --- /dev/null +++ b/deps/rabbit/src/rabbit_fifo_v3.erl @@ -0,0 +1,2574 @@ +%% This Source Code Form is subject to the terms of the Mozilla Public +%% License, v. 2.0. If a copy of the MPL was not distributed with this +%% file, You can obtain one at https://mozilla.org/MPL/2.0/. +%% +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. + +-module(rabbit_fifo_v3). + +-behaviour(ra_machine). + +-compile(inline_list_funcs). +-compile(inline). +-compile({no_auto_import, [apply/3]}). +-dialyzer(no_improper_lists). + +-include("rabbit_fifo_v3.hrl"). +-include_lib("rabbit_common/include/rabbit.hrl"). + +-define(STATE, rabbit_fifo). + +-export([ + %% ra_machine callbacks + init/1, + apply/3, + state_enter/2, + tick/2, + overview/1, + + get_checked_out/4, + %% versioning + version/0, + which_module/1, + %% aux + init_aux/1, + handle_aux/6, + % queries + query_messages_ready/1, + query_messages_checked_out/1, + query_messages_total/1, + query_processes/1, + query_ra_indexes/1, + query_waiting_consumers/1, + query_consumer_count/1, + query_consumers/1, + query_stat/1, + query_stat_dlx/1, + query_single_active_consumer/1, + query_in_memory_usage/1, + query_peek/2, + query_notify_decorators_info/1, + usage/1, + + %% misc + dehydrate_state/1, + get_msg_header/1, + get_header/2, + get_msg/1, + + %% protocol helpers + make_enqueue/3, + make_register_enqueuer/1, + make_checkout/3, + make_settle/2, + make_return/2, + make_discard/2, + make_credit/4, + make_purge/0, + make_purge_nodes/1, + make_update_config/1, + make_garbage_collection/0, + convert_v1_to_v2/1, + convert_v2_to_v3/1, + + get_field/2 + ]). + +-ifdef(TEST). +-export([update_header/4, + chunk_disk_msgs/3]). +-endif. + +%% command records representing all the protocol actions that are supported +-record(enqueue, {pid :: option(pid()), + seq :: option(msg_seqno()), + msg :: raw_msg()}). +-record(requeue, {consumer_id :: consumer_id(), + msg_id :: msg_id(), + index :: ra:index(), + header :: msg_header(), + msg :: raw_msg()}). +-record(register_enqueuer, {pid :: pid()}). +-record(checkout, {consumer_id :: consumer_id(), + spec :: checkout_spec(), + meta :: consumer_meta()}). +-record(settle, {consumer_id :: consumer_id(), + msg_ids :: [msg_id()]}). +-record(return, {consumer_id :: consumer_id(), + msg_ids :: [msg_id()]}). +-record(discard, {consumer_id :: consumer_id(), + msg_ids :: [msg_id()]}). +-record(credit, {consumer_id :: consumer_id(), + credit :: non_neg_integer(), + delivery_count :: non_neg_integer(), + drain :: boolean()}). +-record(purge, {}). +-record(purge_nodes, {nodes :: [node()]}). +-record(update_config, {config :: config()}). +-record(garbage_collection, {}). + +-opaque protocol() :: + #enqueue{} | + #requeue{} | + #register_enqueuer{} | + #checkout{} | + #settle{} | + #return{} | + #discard{} | + #credit{} | + #purge{} | + #purge_nodes{} | + #update_config{} | + #garbage_collection{}. + +-type command() :: protocol() | + rabbit_fifo_dlx:protocol() | + ra_machine:builtin_command(). +%% all the command types supported by ra fifo + +-type client_msg() :: delivery(). +%% the messages `rabbit_fifo' can send to consumers. + +-opaque state() :: #?STATE{}. + +-export_type([protocol/0, + delivery/0, + command/0, + credit_mode/0, + consumer_tag/0, + consumer_meta/0, + consumer_id/0, + client_msg/0, + msg/0, + msg_id/0, + msg_seqno/0, + delivery_msg/0, + state/0, + config/0]). + +%% This function is never called since only rabbit_fifo_v0:init/1 is called. +%% See https://github.com/rabbitmq/ra/blob/e0d1e6315a45f5d3c19875d66f9d7bfaf83a46e3/src/ra_machine.erl#L258-L265 +-spec init(config()) -> state(). +init(#{name := Name, + queue_resource := Resource} = Conf) -> + update_config(Conf, #?STATE{cfg = #cfg{name = Name, + resource = Resource}}). + +update_config(Conf, State) -> + DLH = maps:get(dead_letter_handler, Conf, undefined), + BLH = maps:get(become_leader_handler, Conf, undefined), + RCI = maps:get(release_cursor_interval, Conf, ?RELEASE_CURSOR_EVERY), + Overflow = maps:get(overflow_strategy, Conf, drop_head), + MaxLength = maps:get(max_length, Conf, undefined), + MaxBytes = maps:get(max_bytes, Conf, undefined), + DeliveryLimit = maps:get(delivery_limit, Conf, undefined), + Expires = maps:get(expires, Conf, undefined), + MsgTTL = maps:get(msg_ttl, Conf, undefined), + ConsumerStrategy = case maps:get(single_active_consumer_on, Conf, false) of + true -> + single_active; + false -> + competing + end, + Cfg = State#?STATE.cfg, + RCISpec = {RCI, RCI}, + + LastActive = maps:get(created, Conf, undefined), + State#?STATE{cfg = Cfg#cfg{release_cursor_interval = RCISpec, + dead_letter_handler = DLH, + become_leader_handler = BLH, + overflow_strategy = Overflow, + max_length = MaxLength, + max_bytes = MaxBytes, + consumer_strategy = ConsumerStrategy, + delivery_limit = DeliveryLimit, + expires = Expires, + msg_ttl = MsgTTL}, + last_active = LastActive}. + +% msg_ids are scoped per consumer +% ra_indexes holds all raft indexes for enqueues currently on queue +-spec apply(ra_machine:command_meta_data(), command(), state()) -> + {state(), Reply :: term(), ra_machine:effects()} | + {state(), Reply :: term()}. +apply(Meta, #enqueue{pid = From, seq = Seq, + msg = RawMsg}, State00) -> + apply_enqueue(Meta, From, Seq, RawMsg, State00); +apply(_Meta, #register_enqueuer{pid = Pid}, + #?STATE{enqueuers = Enqueuers0, + cfg = #cfg{overflow_strategy = Overflow}} = State0) -> + State = case maps:is_key(Pid, Enqueuers0) of + true -> + %% if the enqueuer exits just echo the overflow state + State0; + false -> + State0#?STATE{enqueuers = Enqueuers0#{Pid => #enqueuer{}}} + end, + Res = case is_over_limit(State) of + true when Overflow == reject_publish -> + reject_publish; + _ -> + ok + end, + {State, Res, [{monitor, process, Pid}]}; +apply(Meta, + #settle{msg_ids = MsgIds, consumer_id = ConsumerId}, + #?STATE{consumers = Cons0} = State) -> + case Cons0 of + #{ConsumerId := Con0} -> + complete_and_checkout(Meta, MsgIds, ConsumerId, + Con0, [], State); + _ -> + {State, ok} + end; +apply(Meta, #discard{msg_ids = MsgIds, consumer_id = ConsumerId}, + #?STATE{consumers = Cons, + dlx = DlxState0, + cfg = #cfg{dead_letter_handler = DLH}} = State0) -> + case Cons of + #{ConsumerId := #consumer{checked_out = Checked} = Con} -> + % Publishing to dead-letter exchange must maintain same order as messages got rejected. + DiscardMsgs = lists:filtermap(fun(Id) -> + case maps:get(Id, Checked, undefined) of + undefined -> + false; + Msg -> + {true, Msg} + end + end, MsgIds), + {DlxState, Effects} = rabbit_fifo_dlx:discard(DiscardMsgs, rejected, DLH, DlxState0), + State = State0#?STATE{dlx = DlxState}, + complete_and_checkout(Meta, MsgIds, ConsumerId, Con, Effects, State); + _ -> + {State0, ok} + end; +apply(Meta, #return{msg_ids = MsgIds, consumer_id = ConsumerId}, + #?STATE{consumers = Cons0} = State) -> + case Cons0 of + #{ConsumerId := #consumer{checked_out = Checked0}} -> + Returned = maps:with(MsgIds, Checked0), + return(Meta, ConsumerId, Returned, [], State); + _ -> + {State, ok} + end; +apply(#{index := Idx} = Meta, + #requeue{consumer_id = ConsumerId, + msg_id = MsgId, + index = OldIdx, + header = Header0, + msg = _Msg}, + #?STATE{consumers = Cons0, + messages = Messages, + ra_indexes = Indexes0, + enqueue_count = EnqCount} = State00) -> + case Cons0 of + #{ConsumerId := #consumer{checked_out = Checked0} = Con0} + when is_map_key(MsgId, Checked0) -> + %% construct a message with the current raft index + %% and update delivery count before adding it to the message queue + Header = update_header(delivery_count, fun incr/1, 1, Header0), + State0 = add_bytes_return(Header, State00), + Con = Con0#consumer{checked_out = maps:remove(MsgId, Checked0), + credit = increase_credit(Meta, Con0, 1)}, + State1 = State0#?STATE{ra_indexes = rabbit_fifo_index:delete(OldIdx, Indexes0), + messages = lqueue:in(?MSG(Idx, Header), Messages), + enqueue_count = EnqCount + 1}, + State2 = update_or_remove_sub(Meta, ConsumerId, Con, State1), + {State, Ret, Effs} = checkout(Meta, State0, State2, []), + update_smallest_raft_index(Idx, Ret, + maybe_store_release_cursor(Idx, State), + Effs); + _ -> + {State00, ok, []} + end; +apply(Meta, #credit{credit = NewCredit, delivery_count = RemoteDelCnt, + drain = Drain, consumer_id = ConsumerId}, + #?STATE{consumers = Cons0, + service_queue = ServiceQueue0, + waiting_consumers = Waiting0} = State0) -> + case Cons0 of + #{ConsumerId := #consumer{delivery_count = DelCnt} = Con0} -> + %% this can go below 0 when credit is reduced + C = max(0, RemoteDelCnt + NewCredit - DelCnt), + %% grant the credit + Con1 = Con0#consumer{credit = C}, + ServiceQueue = maybe_queue_consumer(ConsumerId, Con1, + ServiceQueue0), + Cons = maps:put(ConsumerId, Con1, Cons0), + {State1, ok, Effects} = + checkout(Meta, State0, + State0#?STATE{service_queue = ServiceQueue, + consumers = Cons}, []), + Response = {send_credit_reply, messages_ready(State1)}, + %% by this point all checkouts for the updated credit value + %% should be processed so we can evaluate the drain + case Drain of + false -> + %% just return the result of the checkout + {State1, Response, Effects}; + true -> + Con = #consumer{credit = PostCred} = + maps:get(ConsumerId, State1#?STATE.consumers), + %% add the outstanding credit to the delivery count + DeliveryCount = Con#consumer.delivery_count + PostCred, + Consumers = maps:put(ConsumerId, + Con#consumer{delivery_count = DeliveryCount, + credit = 0}, + State1#?STATE.consumers), + Drained = Con#consumer.credit, + {CTag, _} = ConsumerId, + {State1#?STATE{consumers = Consumers}, + %% returning a multi response with two client actions + %% for the channel to execute + {multi, [Response, {send_drained, {CTag, Drained}}]}, + Effects} + end; + _ when Waiting0 /= [] -> + %% there are waiting consuemrs + case lists:keytake(ConsumerId, 1, Waiting0) of + {value, {_, Con0 = #consumer{delivery_count = DelCnt}}, Waiting} -> + %% the consumer is a waiting one + %% grant the credit + C = max(0, RemoteDelCnt + NewCredit - DelCnt), + Con = Con0#consumer{credit = C}, + State = State0#?STATE{waiting_consumers = + [{ConsumerId, Con} | Waiting]}, + {State, {send_credit_reply, messages_ready(State)}}; + false -> + {State0, ok} + end; + _ -> + %% credit for unknown consumer - just ignore + {State0, ok} + end; +apply(_, #checkout{spec = {dequeue, _}}, + #?STATE{cfg = #cfg{consumer_strategy = single_active}} = State0) -> + {State0, {error, {unsupported, single_active_consumer}}}; +apply(#{index := Index, + system_time := Ts, + from := From} = Meta, #checkout{spec = {dequeue, Settlement}, + meta = ConsumerMeta, + consumer_id = ConsumerId}, + #?STATE{consumers = Consumers} = State00) -> + %% dequeue always updates last_active + State0 = State00#?STATE{last_active = Ts}, + %% all dequeue operations result in keeping the queue from expiring + Exists = maps:is_key(ConsumerId, Consumers), + case messages_ready(State0) of + 0 -> + update_smallest_raft_index(Index, {dequeue, empty}, State0, []); + _ when Exists -> + %% a dequeue using the same consumer_id isn't possible at this point + {State0, {dequeue, empty}}; + _ -> + {_, State1} = update_consumer(Meta, ConsumerId, ConsumerMeta, + {once, 1, simple_prefetch}, 0, + State0), + case checkout_one(Meta, false, State1, []) of + {success, _, MsgId, ?MSG(RaftIdx, Header), ExpiredMsg, State2, Effects0} -> + {State4, Effects1} = case Settlement of + unsettled -> + {_, Pid} = ConsumerId, + {State2, [{monitor, process, Pid} | Effects0]}; + settled -> + %% immediately settle the checkout + {State3, _, SettleEffects} = + apply(Meta, make_settle(ConsumerId, [MsgId]), + State2), + {State3, SettleEffects ++ Effects0} + end, + Effects2 = [reply_log_effect(RaftIdx, MsgId, Header, messages_ready(State4), From) | Effects1], + {State, DroppedMsg, Effects} = evaluate_limit(Index, false, State0, State4, + Effects2), + Reply = '$ra_no_reply', + case {DroppedMsg, ExpiredMsg} of + {false, false} -> + {State, Reply, Effects}; + _ -> + update_smallest_raft_index(Index, Reply, State, Effects) + end; + {nochange, _ExpiredMsg = true, State2, Effects0} -> + %% All ready messages expired. + State3 = State2#?STATE{consumers = maps:remove(ConsumerId, State2#?STATE.consumers)}, + {State, _, Effects} = evaluate_limit(Index, false, State0, State3, Effects0), + update_smallest_raft_index(Index, {dequeue, empty}, State, Effects) + end + end; +apply(#{index := Idx} = Meta, + #checkout{spec = cancel, + consumer_id = ConsumerId}, State0) -> + {State1, Effects1} = cancel_consumer(Meta, ConsumerId, State0, [], + consumer_cancel), + {State, Reply, Effects} = checkout(Meta, State0, State1, Effects1), + update_smallest_raft_index(Idx, Reply, State, Effects); +apply(Meta, #checkout{spec = Spec, meta = ConsumerMeta, + consumer_id = {_, Pid} = ConsumerId}, State0) -> + Priority = get_priority_from_args(ConsumerMeta), + {Consumer, State1} = update_consumer(Meta, ConsumerId, ConsumerMeta, + Spec, Priority, State0), + {State2, Effs} = activate_next_consumer(State1, []), + #consumer{checked_out = Checked, + credit = Credit, + delivery_count = DeliveryCount, + next_msg_id = NextMsgId} = Consumer, + + %% reply with a consumer summary + Reply = {ok, #{next_msg_id => NextMsgId, + credit => Credit, + delivery_count => DeliveryCount, + num_checked_out => map_size(Checked)}}, + checkout(Meta, State0, State2, [{monitor, process, Pid} | Effs], Reply); +apply(#{index := Index}, #purge{}, + #?STATE{messages_total = Total, + returns = Returns, + ra_indexes = Indexes0 + } = State0) -> + NumReady = messages_ready(State0), + Indexes = case Total of + NumReady -> + %% All messages are either in 'messages' queue or 'returns' queue. + %% No message is awaiting acknowledgement. + %% Optimization: empty all 'ra_indexes'. + rabbit_fifo_index:empty(); + _ -> + %% Some messages are checked out to consumers awaiting acknowledgement. + %% Therefore we cannot empty all 'ra_indexes'. + %% We only need to delete the indexes from the 'returns' queue because + %% messages of the 'messages' queue are not part of the 'ra_indexes'. + lqueue:fold(fun(?MSG(I, _), Acc) -> + rabbit_fifo_index:delete(I, Acc) + end, Indexes0, Returns) + end, + State1 = State0#?STATE{ra_indexes = Indexes, + messages = lqueue:new(), + messages_total = Total - NumReady, + returns = lqueue:new(), + msg_bytes_enqueue = 0 + }, + Effects0 = [garbage_collection], + Reply = {purge, NumReady}, + {State, _, Effects} = evaluate_limit(Index, false, State0, + State1, Effects0), + update_smallest_raft_index(Index, Reply, State, Effects); +apply(#{index := Idx}, #garbage_collection{}, State) -> + update_smallest_raft_index(Idx, ok, State, [{aux, garbage_collection}]); +apply(Meta, {timeout, expire_msgs}, State) -> + checkout(Meta, State, State, []); +apply(#{system_time := Ts, machine_version := MachineVersion} = Meta, + {down, Pid, noconnection}, + #?STATE{consumers = Cons0, + cfg = #cfg{consumer_strategy = single_active}, + waiting_consumers = Waiting0, + enqueuers = Enqs0} = State0) -> + Node = node(Pid), + %% if the pid refers to an active or cancelled consumer, + %% mark it as suspected and return it to the waiting queue + {State1, Effects0} = + maps:fold(fun({_, P} = Cid, C0, {S0, E0}) + when node(P) =:= Node -> + %% the consumer should be returned to waiting + %% and checked out messages should be returned + Effs = consumer_update_active_effects( + S0, Cid, C0, false, suspected_down, E0), + C1 = case MachineVersion of + V when V >= 3 -> + C0; + 2 -> + Checked = C0#consumer.checked_out, + Credit = increase_credit(Meta, C0, maps:size(Checked)), + C0#consumer{credit = Credit} + end, + {St, Effs1} = return_all(Meta, S0, Effs, Cid, C1), + %% if the consumer was cancelled there is a chance it got + %% removed when returning hence we need to be defensive here + Waiting = case St#?STATE.consumers of + #{Cid := C} -> + Waiting0 ++ [{Cid, C}]; + _ -> + Waiting0 + end, + {St#?STATE{consumers = maps:remove(Cid, St#?STATE.consumers), + waiting_consumers = Waiting, + last_active = Ts}, + Effs1}; + (_, _, S) -> + S + end, {State0, []}, Cons0), + WaitingConsumers = update_waiting_consumer_status(Node, State1, + suspected_down), + + %% select a new consumer from the waiting queue and run a checkout + State2 = State1#?STATE{waiting_consumers = WaitingConsumers}, + {State, Effects1} = activate_next_consumer(State2, Effects0), + + %% mark any enquers as suspected + Enqs = maps:map(fun(P, E) when node(P) =:= Node -> + E#enqueuer{status = suspected_down}; + (_, E) -> E + end, Enqs0), + Effects = [{monitor, node, Node} | Effects1], + checkout(Meta, State0, State#?STATE{enqueuers = Enqs}, Effects); +apply(#{system_time := Ts, machine_version := MachineVersion} = Meta, + {down, Pid, noconnection}, + #?STATE{consumers = Cons0, + enqueuers = Enqs0} = State0) -> + %% A node has been disconnected. This doesn't necessarily mean that + %% any processes on this node are down, they _may_ come back so here + %% we just mark them as suspected (effectively deactivated) + %% and return all checked out messages to the main queue for delivery to any + %% live consumers + %% + %% all pids for the disconnected node will be marked as suspected not just + %% the one we got the `down' command for + Node = node(Pid), + + {State, Effects1} = + maps:fold( + fun({_, P} = Cid, #consumer{checked_out = Checked0, + status = up} = C0, + {St0, Eff}) when node(P) =:= Node -> + C = case MachineVersion of + V when V >= 3 -> + C0#consumer{status = suspected_down}; + 2 -> + Credit = increase_credit(Meta, C0, map_size(Checked0)), + C0#consumer{status = suspected_down, + credit = Credit} + end, + {St, Eff0} = return_all(Meta, St0, Eff, Cid, C), + Eff1 = consumer_update_active_effects(St, Cid, C, false, + suspected_down, Eff0), + {St, Eff1}; + (_, _, {St, Eff}) -> + {St, Eff} + end, {State0, []}, Cons0), + Enqs = maps:map(fun(P, E) when node(P) =:= Node -> + E#enqueuer{status = suspected_down}; + (_, E) -> E + end, Enqs0), + + % Monitor the node so that we can "unsuspect" these processes when the node + % comes back, then re-issue all monitors and discover the final fate of + % these processes + + Effects = [{monitor, node, Node} | Effects1], + checkout(Meta, State0, State#?STATE{enqueuers = Enqs, + last_active = Ts}, Effects); +apply(#{index := Idx} = Meta, {down, Pid, _Info}, State0) -> + {State1, Effects1} = handle_down(Meta, Pid, State0), + {State, Reply, Effects} = checkout(Meta, State0, State1, Effects1), + update_smallest_raft_index(Idx, Reply, State, Effects); +apply(Meta, {nodeup, Node}, #?STATE{consumers = Cons0, + enqueuers = Enqs0, + service_queue = _SQ0} = State0) -> + %% A node we are monitoring has come back. + %% If we have suspected any processes of being + %% down we should now re-issue the monitors for them to detect if they're + %% actually down or not + Monitors = [{monitor, process, P} + || P <- suspected_pids_for(Node, State0)], + + Enqs1 = maps:map(fun(P, E) when node(P) =:= Node -> + E#enqueuer{status = up}; + (_, E) -> E + end, Enqs0), + ConsumerUpdateActiveFun = consumer_active_flag_update_function(State0), + %% mark all consumers as up + {State1, Effects1} = + maps:fold(fun({_, P} = ConsumerId, C, {SAcc, EAcc}) + when (node(P) =:= Node) and + (C#consumer.status =/= cancelled) -> + EAcc1 = ConsumerUpdateActiveFun(SAcc, ConsumerId, + C, true, up, EAcc), + {update_or_remove_sub(Meta, ConsumerId, + C#consumer{status = up}, + SAcc), EAcc1}; + (_, _, Acc) -> + Acc + end, {State0, Monitors}, Cons0), + Waiting = update_waiting_consumer_status(Node, State1, up), + State2 = State1#?STATE{enqueuers = Enqs1, + waiting_consumers = Waiting}, + {State, Effects} = activate_next_consumer(State2, Effects1), + checkout(Meta, State0, State, Effects); +apply(_, {nodedown, _Node}, State) -> + {State, ok}; +apply(#{index := Idx} = Meta, #purge_nodes{nodes = Nodes}, State0) -> + {State, Effects} = lists:foldl(fun(Node, {S, E}) -> + purge_node(Meta, Node, S, E) + end, {State0, []}, Nodes), + update_smallest_raft_index(Idx, ok, State, Effects); +apply(#{index := Idx} = Meta, + #update_config{config = #{dead_letter_handler := NewDLH} = Conf}, + #?STATE{cfg = #cfg{dead_letter_handler = OldDLH, + resource = QRes}, + dlx = DlxState0} = State0) -> + {DlxState, Effects0} = rabbit_fifo_dlx:update_config(OldDLH, NewDLH, QRes, DlxState0), + State1 = update_config(Conf, State0#?STATE{dlx = DlxState}), + {State, Reply, Effects} = checkout(Meta, State0, State1, Effects0), + update_smallest_raft_index(Idx, Reply, State, Effects); +apply(_Meta, {machine_version, FromVersion, ToVersion}, V0State) -> + State = convert(FromVersion, ToVersion, V0State), + {State, ok, [{aux, {dlx, setup}}]}; +apply(#{index := IncomingRaftIdx} = Meta, {dlx, _} = Cmd, + #?STATE{cfg = #cfg{dead_letter_handler = DLH}, + dlx = DlxState0} = State0) -> + {DlxState, Effects0} = rabbit_fifo_dlx:apply(Meta, Cmd, DLH, DlxState0), + State1 = State0#?STATE{dlx = DlxState}, + {State, ok, Effects} = checkout(Meta, State0, State1, Effects0), + update_smallest_raft_index(IncomingRaftIdx, State, Effects); +apply(_Meta, Cmd, State) -> + %% handle unhandled commands gracefully + rabbit_log:debug("rabbit_fifo: unhandled command ~W", [Cmd, 10]), + {State, ok, []}. + +convert_msg({RaftIdx, {Header, empty}}) when is_integer(RaftIdx) -> + ?MSG(RaftIdx, Header); +convert_msg({RaftIdx, {Header, _Msg}}) when is_integer(RaftIdx) -> + ?MSG(RaftIdx, Header); +convert_msg({'$empty_msg', Header}) -> + %% dummy index + ?MSG(undefined, Header); +convert_msg({'$prefix_msg', Header}) -> + %% dummy index + ?MSG(undefined, Header); +convert_msg({Header, empty}) -> + convert_msg(Header); +convert_msg(Header) when ?IS_HEADER(Header) -> + ?MSG(undefined, Header). + +convert_consumer_v1_to_v2({ConsumerTag, Pid}, CV1) -> + Meta = element(2, CV1), + CheckedOut = element(3, CV1), + NextMsgId = element(4, CV1), + Credit = element(5, CV1), + DeliveryCount = element(6, CV1), + CreditMode = element(7, CV1), + LifeTime = element(8, CV1), + Status = element(9, CV1), + Priority = element(10, CV1), + #consumer{cfg = #consumer_cfg{tag = ConsumerTag, + pid = Pid, + meta = Meta, + credit_mode = CreditMode, + lifetime = LifeTime, + priority = Priority}, + credit = Credit, + status = Status, + delivery_count = DeliveryCount, + next_msg_id = NextMsgId, + checked_out = maps:map( + fun (_, {Tag, _} = Msg) when is_atom(Tag) -> + convert_msg(Msg); + (_, {_Seq, Msg}) -> + convert_msg(Msg) + end, CheckedOut) + }. + +convert_v1_to_v2(V1State0) -> + V1State = rabbit_fifo_v1:enqueue_all_pending(V1State0), + IndexesV1 = rabbit_fifo_v1:get_field(ra_indexes, V1State), + ReturnsV1 = rabbit_fifo_v1:get_field(returns, V1State), + MessagesV1 = rabbit_fifo_v1:get_field(messages, V1State), + ConsumersV1 = rabbit_fifo_v1:get_field(consumers, V1State), + WaitingConsumersV1 = rabbit_fifo_v1:get_field(waiting_consumers, V1State), + %% remove all raft idx in messages from index + {_, PrefReturns, _, PrefMsgs} = rabbit_fifo_v1:get_field(prefix_msgs, V1State), + V2PrefMsgs = lists:foldl(fun(Hdr, Acc) -> + lqueue:in(convert_msg(Hdr), Acc) + end, lqueue:new(), PrefMsgs), + V2PrefReturns = lists:foldl(fun(Hdr, Acc) -> + lqueue:in(convert_msg(Hdr), Acc) + end, lqueue:new(), PrefReturns), + MessagesV2 = lqueue:fold(fun ({_, Msg}, Acc) -> + lqueue:in(convert_msg(Msg), Acc) + end, V2PrefMsgs, MessagesV1), + ReturnsV2 = lqueue:fold(fun ({_SeqId, Msg}, Acc) -> + lqueue:in(convert_msg(Msg), Acc) + end, V2PrefReturns, ReturnsV1), + ConsumersV2 = maps:map( + fun (ConsumerId, CV1) -> + convert_consumer_v1_to_v2(ConsumerId, CV1) + end, ConsumersV1), + WaitingConsumersV2 = lists:map( + fun ({ConsumerId, CV1}) -> + {ConsumerId, convert_consumer_v1_to_v2(ConsumerId, CV1)} + end, WaitingConsumersV1), + EnqueuersV1 = rabbit_fifo_v1:get_field(enqueuers, V1State), + EnqueuersV2 = maps:map(fun (_EnqPid, Enq) -> + Enq#enqueuer{unused = undefined} + end, EnqueuersV1), + + %% do after state conversion + %% The (old) format of dead_letter_handler in RMQ < v3.10 is: + %% {Module, Function, Args} + %% The (new) format of dead_letter_handler in RMQ >= v3.10 is: + %% undefined | {at_most_once, {Module, Function, Args}} | at_least_once + %% + %% Note that the conversion must convert both from old format to new format + %% as well as from new format to new format. The latter is because quorum queues + %% created in RMQ >= v3.10 are still initialised with rabbit_fifo_v0 as described in + %% https://github.com/rabbitmq/ra/blob/e0d1e6315a45f5d3c19875d66f9d7bfaf83a46e3/src/ra_machine.erl#L258-L265 + DLH = case rabbit_fifo_v1:get_cfg_field(dead_letter_handler, V1State) of + {_M, _F, _A = [_DLX = undefined|_]} -> + %% queue was declared in RMQ < v3.10 and no DLX configured + undefined; + {_M, _F, _A} = MFA -> + %% queue was declared in RMQ < v3.10 and DLX configured + {at_most_once, MFA}; + Other -> + Other + end, + + Cfg = #cfg{name = rabbit_fifo_v1:get_cfg_field(name, V1State), + resource = rabbit_fifo_v1:get_cfg_field(resource, V1State), + release_cursor_interval = rabbit_fifo_v1:get_cfg_field(release_cursor_interval, V1State), + dead_letter_handler = DLH, + become_leader_handler = rabbit_fifo_v1:get_cfg_field(become_leader_handler, V1State), + %% TODO: what if policy enabling reject_publish was applied before conversion? + overflow_strategy = rabbit_fifo_v1:get_cfg_field(overflow_strategy, V1State), + max_length = rabbit_fifo_v1:get_cfg_field(max_length, V1State), + max_bytes = rabbit_fifo_v1:get_cfg_field(max_bytes, V1State), + consumer_strategy = rabbit_fifo_v1:get_cfg_field(consumer_strategy, V1State), + delivery_limit = rabbit_fifo_v1:get_cfg_field(delivery_limit, V1State), + expires = rabbit_fifo_v1:get_cfg_field(expires, V1State) + }, + + MessagesConsumersV2 = maps:fold(fun(_ConsumerId, #consumer{checked_out = Checked}, Acc) -> + Acc + maps:size(Checked) + end, 0, ConsumersV2), + MessagesWaitingConsumersV2 = lists:foldl(fun({_ConsumerId, #consumer{checked_out = Checked}}, Acc) -> + Acc + maps:size(Checked) + end, 0, WaitingConsumersV2), + MessagesTotal = lqueue:len(MessagesV2) + + lqueue:len(ReturnsV2) + + MessagesConsumersV2 + + MessagesWaitingConsumersV2, + + #?STATE{cfg = Cfg, + messages = MessagesV2, + messages_total = MessagesTotal, + returns = ReturnsV2, + enqueue_count = rabbit_fifo_v1:get_field(enqueue_count, V1State), + enqueuers = EnqueuersV2, + ra_indexes = IndexesV1, + release_cursors = rabbit_fifo_v1:get_field(release_cursors, V1State), + consumers = ConsumersV2, + service_queue = rabbit_fifo_v1:get_field(service_queue, V1State), + msg_bytes_enqueue = rabbit_fifo_v1:get_field(msg_bytes_enqueue, V1State), + msg_bytes_checkout = rabbit_fifo_v1:get_field(msg_bytes_checkout, V1State), + waiting_consumers = WaitingConsumersV2, + last_active = rabbit_fifo_v1:get_field(last_active, V1State) + }. + +convert_v2_to_v3(#rabbit_fifo{consumers = ConsumersV2} = StateV2) -> + ConsumersV3 = maps:map(fun(_, C) -> + convert_consumer_v2_to_v3(C) + end, ConsumersV2), + StateV2#rabbit_fifo{consumers = ConsumersV3}. + +get_field(Field, State) -> + Fields = record_info(fields, ?STATE), + Index = record_index_of(Field, Fields), + element(Index, State). + +record_index_of(F, Fields) -> + index_of(2, F, Fields). + +index_of(_, F, []) -> + exit({field_not_found, F}); +index_of(N, F, [F | _]) -> + N; +index_of(N, F, [_ | T]) -> + index_of(N+1, F, T). + +convert_consumer_v2_to_v3(C = #consumer{cfg = Cfg = #consumer_cfg{credit_mode = simple_prefetch, + meta = #{prefetch := Prefetch}}}) -> + C#consumer{cfg = Cfg#consumer_cfg{credit_mode = {simple_prefetch, Prefetch}}}; +convert_consumer_v2_to_v3(C) -> + C. + +purge_node(Meta, Node, State, Effects) -> + lists:foldl(fun(Pid, {S0, E0}) -> + {S, E} = handle_down(Meta, Pid, S0), + {S, E0 ++ E} + end, {State, Effects}, all_pids_for(Node, State)). + +%% any downs that re not noconnection +handle_down(Meta, Pid, #?STATE{consumers = Cons0, + enqueuers = Enqs0} = State0) -> + % Remove any enqueuer for the down pid + State1 = State0#?STATE{enqueuers = maps:remove(Pid, Enqs0)}, + {Effects1, State2} = handle_waiting_consumer_down(Pid, State1), + % return checked out messages to main queue + % Find the consumers for the down pid + DownConsumers = maps:keys( + maps:filter(fun({_, P}, _) -> P =:= Pid end, Cons0)), + lists:foldl(fun(ConsumerId, {S, E}) -> + cancel_consumer(Meta, ConsumerId, S, E, down) + end, {State2, Effects1}, DownConsumers). + +consumer_active_flag_update_function( + #?STATE{cfg = #cfg{consumer_strategy = competing}}) -> + fun(State, ConsumerId, Consumer, Active, ActivityStatus, Effects) -> + consumer_update_active_effects(State, ConsumerId, Consumer, Active, + ActivityStatus, Effects) + end; +consumer_active_flag_update_function( + #?STATE{cfg = #cfg{consumer_strategy = single_active}}) -> + fun(_, _, _, _, _, Effects) -> + Effects + end. + +handle_waiting_consumer_down(_Pid, + #?STATE{cfg = #cfg{consumer_strategy = competing}} = State) -> + {[], State}; +handle_waiting_consumer_down(_Pid, + #?STATE{cfg = #cfg{consumer_strategy = single_active}, + waiting_consumers = []} = State) -> + {[], State}; +handle_waiting_consumer_down(Pid, + #?STATE{cfg = #cfg{consumer_strategy = single_active}, + waiting_consumers = WaitingConsumers0} = State0) -> + % get cancel effects for down waiting consumers + Down = lists:filter(fun({{_, P}, _}) -> P =:= Pid end, + WaitingConsumers0), + Effects = lists:foldl(fun ({ConsumerId, _}, Effects) -> + cancel_consumer_effects(ConsumerId, State0, + Effects) + end, [], Down), + % update state to have only up waiting consumers + StillUp = lists:filter(fun({{_, P}, _}) -> P =/= Pid end, + WaitingConsumers0), + State = State0#?STATE{waiting_consumers = StillUp}, + {Effects, State}. + +update_waiting_consumer_status(Node, + #?STATE{waiting_consumers = WaitingConsumers}, + Status) -> + [begin + case node(Pid) of + Node -> + {ConsumerId, Consumer#consumer{status = Status}}; + _ -> + {ConsumerId, Consumer} + end + end || {{_, Pid} = ConsumerId, Consumer} <- WaitingConsumers, + Consumer#consumer.status =/= cancelled]. + +-spec state_enter(ra_server:ra_state() | eol, state()) -> + ra_machine:effects(). +state_enter(RaState, #?STATE{cfg = #cfg{dead_letter_handler = DLH, + resource = QRes}, + dlx = DlxState} = State) -> + Effects = rabbit_fifo_dlx:state_enter(RaState, QRes, DLH, DlxState), + state_enter0(RaState, State, Effects). + +state_enter0(leader, #?STATE{consumers = Cons, + enqueuers = Enqs, + waiting_consumers = WaitingConsumers, + cfg = #cfg{name = Name, + resource = Resource, + become_leader_handler = BLH} + } = State, + Effects0) -> + TimerEffs = timer_effect(erlang:system_time(millisecond), State, Effects0), + % return effects to monitor all current consumers and enqueuers + Pids = lists:usort(maps:keys(Enqs) + ++ [P || {_, P} <- maps:keys(Cons)] + ++ [P || {{_, P}, _} <- WaitingConsumers]), + Mons = [{monitor, process, P} || P <- Pids], + Nots = [{send_msg, P, leader_change, ra_event} || P <- Pids], + NodeMons = lists:usort([{monitor, node, node(P)} || P <- Pids]), + FHReservation = [{mod_call, rabbit_quorum_queue, + file_handle_leader_reservation, [Resource]}], + NotifyDecs = notify_decorators_startup(Resource), + Effects = TimerEffs ++ Mons ++ Nots ++ NodeMons ++ FHReservation ++ [NotifyDecs], + case BLH of + undefined -> + Effects; + {Mod, Fun, Args} -> + [{mod_call, Mod, Fun, Args ++ [Name]} | Effects] + end; +state_enter0(eol, #?STATE{enqueuers = Enqs, + consumers = Custs0, + waiting_consumers = WaitingConsumers0}, + Effects) -> + Custs = maps:fold(fun({_, P}, V, S) -> S#{P => V} end, #{}, Custs0), + WaitingConsumers1 = lists:foldl(fun({{_, P}, V}, Acc) -> Acc#{P => V} end, + #{}, WaitingConsumers0), + AllConsumers = maps:merge(Custs, WaitingConsumers1), + [{send_msg, P, eol, ra_event} + || P <- maps:keys(maps:merge(Enqs, AllConsumers))] ++ + [{aux, eol}, + {mod_call, rabbit_quorum_queue, file_handle_release_reservation, []} | Effects]; +state_enter0(State, #?STATE{cfg = #cfg{resource = _Resource}}, Effects) + when State =/= leader -> + FHReservation = {mod_call, rabbit_quorum_queue, file_handle_other_reservation, []}, + [FHReservation | Effects]; +state_enter0(_, _, Effects) -> + %% catch all as not handling all states + Effects. + +-spec tick(non_neg_integer(), state()) -> ra_machine:effects(). +tick(Ts, #?STATE{cfg = #cfg{name = _Name, + resource = QName}} = State) -> + case is_expired(Ts, State) of + true -> + [{mod_call, rabbit_quorum_queue, spawn_deleter, [QName]}]; + false -> + [{aux, {handle_tick, [QName, overview(State), all_nodes(State)]}}] + end. + +-spec overview(state()) -> map(). +overview(#?STATE{consumers = Cons, + enqueuers = Enqs, + release_cursors = Cursors, + enqueue_count = EnqCount, + msg_bytes_enqueue = EnqueueBytes, + msg_bytes_checkout = CheckoutBytes, + cfg = Cfg, + dlx = DlxState, + waiting_consumers = WaitingConsumers} = State) -> + Conf = #{name => Cfg#cfg.name, + resource => Cfg#cfg.resource, + release_cursor_interval => Cfg#cfg.release_cursor_interval, + dead_lettering_enabled => undefined =/= Cfg#cfg.dead_letter_handler, + max_length => Cfg#cfg.max_length, + max_bytes => Cfg#cfg.max_bytes, + consumer_strategy => Cfg#cfg.consumer_strategy, + expires => Cfg#cfg.expires, + msg_ttl => Cfg#cfg.msg_ttl, + delivery_limit => Cfg#cfg.delivery_limit + }, + SacOverview = case active_consumer(Cons) of + {SacConsumerId, _} -> + NumWaiting = length(WaitingConsumers), + #{single_active_consumer_id => SacConsumerId, + single_active_num_waiting_consumers => NumWaiting}; + _ -> + #{} + end, + Overview = #{type => ?STATE, + config => Conf, + num_consumers => map_size(Cons), + num_active_consumers => query_consumer_count(State), + num_checked_out => num_checked_out(State), + num_enqueuers => maps:size(Enqs), + num_ready_messages => messages_ready(State), + num_in_memory_ready_messages => 0, %% backwards compat + num_messages => messages_total(State), + num_release_cursors => lqueue:len(Cursors), + release_cursors => [I || {_, I, _} <- lqueue:to_list(Cursors)], + release_cursor_enqueue_counter => EnqCount, + enqueue_message_bytes => EnqueueBytes, + checkout_message_bytes => CheckoutBytes, + in_memory_message_bytes => 0, %% backwards compat + smallest_raft_index => smallest_raft_index(State) + }, + DlxOverview = rabbit_fifo_dlx:overview(DlxState), + maps:merge(maps:merge(Overview, DlxOverview), SacOverview). + +-spec get_checked_out(consumer_id(), msg_id(), msg_id(), state()) -> + [delivery_msg()]. +get_checked_out(Cid, From, To, #?STATE{consumers = Consumers}) -> + case Consumers of + #{Cid := #consumer{checked_out = Checked}} -> + [begin + ?MSG(I, H) = maps:get(K, Checked), + {K, {I, H}} + end || K <- lists:seq(From, To), maps:is_key(K, Checked)]; + _ -> + [] + end. + +-spec version() -> pos_integer(). +version() -> 3. + +which_module(0) -> rabbit_fifo_v0; +which_module(1) -> rabbit_fifo_v1; +which_module(2) -> ?STATE; +which_module(3) -> ?STATE. + +-define(AUX, aux_v2). + +-record(aux_gc, {last_raft_idx = 0 :: ra:index()}). +-record(aux, {name :: atom(), + capacity :: term(), + gc = #aux_gc{} :: #aux_gc{}}). +-record(?AUX, {name :: atom(), + last_decorators_state :: term(), + capacity :: term(), + gc = #aux_gc{} :: #aux_gc{}, + tick_pid, + cache = #{} :: map()}). + +init_aux(Name) when is_atom(Name) -> + %% TODO: catch specific exception throw if table already exists + ok = ra_machine_ets:create_table(rabbit_fifo_usage, + [named_table, set, public, + {write_concurrency, true}]), + Now = erlang:monotonic_time(micro_seconds), + #?AUX{name = Name, + capacity = {inactive, Now, 1, 1.0}}. + +handle_aux(RaftState, Tag, Cmd, #aux{name = Name, + capacity = Cap, + gc = Gc}, Log, MacState) -> + %% convert aux state to new version + Aux = #?AUX{name = Name, + capacity = Cap, + gc = Gc}, + handle_aux(RaftState, Tag, Cmd, Aux, Log, MacState); +handle_aux(leader, _, garbage_collection, Aux, Log, MacState) -> + {no_reply, force_eval_gc(Log, MacState, Aux), Log}; +handle_aux(follower, _, garbage_collection, Aux, Log, MacState) -> + {no_reply, force_eval_gc(Log, MacState, Aux), Log}; +handle_aux(_RaftState, cast, {#return{msg_ids = MsgIds, + consumer_id = ConsumerId}, Corr, Pid}, + Aux0, Log0, #?STATE{cfg = #cfg{delivery_limit = undefined}, + consumers = Consumers}) -> + case Consumers of + #{ConsumerId := #consumer{checked_out = Checked}} -> + {Log, ToReturn} = + maps:fold( + fun (MsgId, ?MSG(Idx, Header), {L0, Acc}) -> + %% it is possible this is not found if the consumer + %% crashed and the message got removed + case ra_log:fetch(Idx, L0) of + {{_, _, {_, _, Cmd, _}}, L} -> + Msg = get_msg(Cmd), + {L, [{MsgId, Idx, Header, Msg} | Acc]}; + {undefined, L} -> + {L, Acc} + end + end, {Log0, []}, maps:with(MsgIds, Checked)), + + Appends = make_requeue(ConsumerId, {notify, Corr, Pid}, + lists:sort(ToReturn), []), + {no_reply, Aux0, Log, Appends}; + _ -> + {no_reply, Aux0, Log0} + end; +handle_aux(leader, _, {handle_tick, [QName, Overview, Nodes]}, + #?AUX{tick_pid = Pid} = Aux, Log, _) -> + NewPid = + case process_is_alive(Pid) of + false -> + %% No active TICK pid + %% this function spawns and returns the tick process pid + rabbit_quorum_queue:handle_tick(QName, Overview, Nodes); + true -> + %% Active TICK pid, do nothing + Pid + end, + {no_reply, Aux#?AUX{tick_pid = NewPid}, Log}; +handle_aux(_, _, {get_checked_out, ConsumerId, MsgIds}, + Aux0, Log0, #?STATE{cfg = #cfg{}, + consumers = Consumers}) -> + case Consumers of + #{ConsumerId := #consumer{checked_out = Checked}} -> + {Log, IdMsgs} = + maps:fold( + fun (MsgId, ?MSG(Idx, Header), {L0, Acc}) -> + %% it is possible this is not found if the consumer + %% crashed and the message got removed + case ra_log:fetch(Idx, L0) of + {{_, _, {_, _, Cmd, _}}, L} -> + Msg = get_msg(Cmd), + {L, [{MsgId, {Header, Msg}} | Acc]}; + {undefined, L} -> + {L, Acc} + end + end, {Log0, []}, maps:with(MsgIds, Checked)), + {reply, {ok, IdMsgs}, Aux0, Log}; + _ -> + {reply, {error, consumer_not_found}, Aux0, Log0} + end; +handle_aux(leader, cast, {#return{} = Ret, Corr, Pid}, + Aux0, Log, #?STATE{}) -> + %% for returns with a delivery limit set we can just return as before + {no_reply, Aux0, Log, [{append, Ret, {notify, Corr, Pid}}]}; +handle_aux(leader, cast, eval, #?AUX{last_decorators_state = LastDec} = Aux0, + Log, #?STATE{cfg = #cfg{resource = QName}} = MacState) -> + %% this is called after each batch of commands have been applied + %% set timer for message expire + %% should really be the last applied index ts but this will have to do + Ts = erlang:system_time(millisecond), + Effects0 = timer_effect(Ts, MacState, []), + case query_notify_decorators_info(MacState) of + LastDec -> + {no_reply, Aux0, Log, Effects0}; + {MaxActivePriority, IsEmpty} = NewLast -> + Effects = [notify_decorators_effect(QName, MaxActivePriority, IsEmpty) + | Effects0], + {no_reply, Aux0#?AUX{last_decorators_state = NewLast}, Log, Effects} + end; +handle_aux(_RaftState, cast, eval, Aux0, Log, _MacState) -> + {no_reply, Aux0, Log}; +handle_aux(_RaState, cast, Cmd, #?AUX{capacity = Use0} = Aux0, + Log, _MacState) + when Cmd == active orelse Cmd == inactive -> + {no_reply, Aux0#?AUX{capacity = update_use(Use0, Cmd)}, Log}; +handle_aux(_RaState, cast, tick, #?AUX{name = Name, + capacity = Use0} = State0, + Log, MacState) -> + true = ets:insert(rabbit_fifo_usage, + {Name, capacity(Use0)}), + Aux = eval_gc(Log, MacState, State0), + {no_reply, Aux, Log}; +handle_aux(_RaState, cast, eol, #?AUX{name = Name} = Aux, Log, _) -> + ets:delete(rabbit_fifo_usage, Name), + {no_reply, Aux, Log}; +handle_aux(_RaState, {call, _From}, oldest_entry_timestamp, + #?AUX{cache = Cache} = Aux0, + Log0, #?STATE{} = State) -> + {CachedIdx, CachedTs} = maps:get(oldest_entry, Cache, {undefined, undefined}), + case smallest_raft_index(State) of + %% if there are no entries, we return current timestamp + %% so that any previously obtained entries are considered + %% older than this + undefined -> + Aux1 = Aux0#?AUX{cache = maps:remove(oldest_entry, Cache)}, + {reply, {ok, erlang:system_time(millisecond)}, Aux1, Log0}; + CachedIdx -> + %% cache hit + {reply, {ok, CachedTs}, Aux0, Log0}; + Idx when is_integer(Idx) -> + case ra_log:fetch(Idx, Log0) of + {{_, _, {_, #{ts := Timestamp}, _, _}}, Log1} -> + Aux1 = Aux0#?AUX{cache = Cache#{oldest_entry => + {Idx, Timestamp}}}, + {reply, {ok, Timestamp}, Aux1, Log1}; + {undefined, Log1} -> + %% fetch failed + {reply, {error, failed_to_get_timestamp}, Aux0, Log1} + end + end; +handle_aux(_RaState, {call, _From}, {peek, Pos}, Aux0, + Log0, MacState) -> + case query_peek(Pos, MacState) of + {ok, ?MSG(Idx, Header)} -> + %% need to re-hydrate from the log + {{_, _, {_, _, Cmd, _}}, Log} = ra_log:fetch(Idx, Log0), + Msg = get_msg(Cmd), + {reply, {ok, {Header, Msg}}, Aux0, Log}; + Err -> + {reply, Err, Aux0, Log0} + end; +handle_aux(RaState, _, {dlx, _} = Cmd, Aux0, Log, + #?STATE{dlx = DlxState, + cfg = #cfg{dead_letter_handler = DLH, + resource = QRes}}) -> + Aux = rabbit_fifo_dlx:handle_aux(RaState, Cmd, Aux0, QRes, DLH, DlxState), + {no_reply, Aux, Log}. + +eval_gc(Log, #?STATE{cfg = #cfg{resource = QR}} = MacState, + #?AUX{gc = #aux_gc{last_raft_idx = LastGcIdx} = Gc} = AuxState) -> + {Idx, _} = ra_log:last_index_term(Log), + {memory, Mem} = erlang:process_info(self(), memory), + case messages_total(MacState) of + 0 when Idx > LastGcIdx andalso + Mem > ?GC_MEM_LIMIT_B -> + garbage_collect(), + {memory, MemAfter} = erlang:process_info(self(), memory), + rabbit_log:debug("~ts: full GC sweep complete. " + "Process memory changed from ~.2fMB to ~.2fMB.", + [rabbit_misc:rs(QR), Mem/?MB, MemAfter/?MB]), + AuxState#?AUX{gc = Gc#aux_gc{last_raft_idx = Idx}}; + _ -> + AuxState + end. + +force_eval_gc(Log, #?STATE{cfg = #cfg{resource = QR}}, + #?AUX{gc = #aux_gc{last_raft_idx = LastGcIdx} = Gc} = AuxState) -> + {Idx, _} = ra_log:last_index_term(Log), + {memory, Mem} = erlang:process_info(self(), memory), + case Idx > LastGcIdx of + true -> + garbage_collect(), + {memory, MemAfter} = erlang:process_info(self(), memory), + rabbit_log:debug("~ts: full GC sweep complete. " + "Process memory changed from ~.2fMB to ~.2fMB.", + [rabbit_misc:rs(QR), Mem/?MB, MemAfter/?MB]), + AuxState#?AUX{gc = Gc#aux_gc{last_raft_idx = Idx}}; + false -> + AuxState + end. + +process_is_alive(Pid) when is_pid(Pid) -> + is_process_alive(Pid); +process_is_alive(_) -> + false. +%%% Queries + +query_messages_ready(State) -> + messages_ready(State). + +query_messages_checked_out(#?STATE{consumers = Consumers}) -> + maps:fold(fun (_, #consumer{checked_out = C}, S) -> + maps:size(C) + S + end, 0, Consumers). + +query_messages_total(State) -> + messages_total(State). + +query_processes(#?STATE{enqueuers = Enqs, consumers = Cons0}) -> + Cons = maps:fold(fun({_, P}, V, S) -> S#{P => V} end, #{}, Cons0), + maps:keys(maps:merge(Enqs, Cons)). + + +query_ra_indexes(#?STATE{ra_indexes = RaIndexes}) -> + RaIndexes. + +query_waiting_consumers(#?STATE{waiting_consumers = WaitingConsumers}) -> + WaitingConsumers. + +query_consumer_count(#?STATE{consumers = Consumers, + waiting_consumers = WaitingConsumers}) -> + Up = maps:filter(fun(_ConsumerId, #consumer{status = Status}) -> + Status =/= suspected_down + end, Consumers), + maps:size(Up) + length(WaitingConsumers). + +query_consumers(#?STATE{consumers = Consumers, + waiting_consumers = WaitingConsumers, + cfg = #cfg{consumer_strategy = ConsumerStrategy}} = State) -> + ActiveActivityStatusFun = + case ConsumerStrategy of + competing -> + fun(_ConsumerId, + #consumer{status = Status}) -> + case Status of + suspected_down -> + {false, Status}; + _ -> + {true, Status} + end + end; + single_active -> + SingleActiveConsumer = query_single_active_consumer(State), + fun({Tag, Pid} = _Consumer, _) -> + case SingleActiveConsumer of + {value, {Tag, Pid}} -> + {true, single_active}; + _ -> + {false, waiting} + end + end + end, + FromConsumers = + maps:fold(fun (_, #consumer{status = cancelled}, Acc) -> + Acc; + ({Tag, Pid}, + #consumer{cfg = #consumer_cfg{meta = Meta}} = Consumer, + Acc) -> + {Active, ActivityStatus} = + ActiveActivityStatusFun({Tag, Pid}, Consumer), + maps:put({Tag, Pid}, + {Pid, Tag, + maps:get(ack, Meta, undefined), + maps:get(prefetch, Meta, undefined), + Active, + ActivityStatus, + maps:get(args, Meta, []), + maps:get(username, Meta, undefined)}, + Acc) + end, #{}, Consumers), + FromWaitingConsumers = + lists:foldl(fun ({_, #consumer{status = cancelled}}, Acc) -> + Acc; + ({{Tag, Pid}, + #consumer{cfg = #consumer_cfg{meta = Meta}} = Consumer}, + Acc) -> + {Active, ActivityStatus} = + ActiveActivityStatusFun({Tag, Pid}, Consumer), + maps:put({Tag, Pid}, + {Pid, Tag, + maps:get(ack, Meta, undefined), + maps:get(prefetch, Meta, undefined), + Active, + ActivityStatus, + maps:get(args, Meta, []), + maps:get(username, Meta, undefined)}, + Acc) + end, #{}, WaitingConsumers), + maps:merge(FromConsumers, FromWaitingConsumers). + + +query_single_active_consumer( + #?STATE{cfg = #cfg{consumer_strategy = single_active}, + consumers = Consumers}) -> + case active_consumer(Consumers) of + undefined -> + {error, no_value}; + {ActiveCid, _} -> + {value, ActiveCid} + end; +query_single_active_consumer(_) -> + disabled. + +query_stat(#?STATE{consumers = Consumers} = State) -> + {messages_ready(State), maps:size(Consumers)}. + +query_in_memory_usage(#?STATE{ }) -> + {0, 0}. + +query_stat_dlx(#?STATE{dlx = DlxState}) -> + rabbit_fifo_dlx:stat(DlxState). + +query_peek(Pos, State0) when Pos > 0 -> + case take_next_msg(State0) of + empty -> + {error, no_message_at_pos}; + {Msg, _State} + when Pos == 1 -> + {ok, Msg}; + {_Msg, State} -> + query_peek(Pos-1, State) + end. + +query_notify_decorators_info(#?STATE{consumers = Consumers} = State) -> + MaxActivePriority = maps:fold( + fun(_, #consumer{credit = C, + status = up, + cfg = #consumer_cfg{priority = P}}, + MaxP) when C > 0 -> + case MaxP of + empty -> P; + MaxP when MaxP > P -> MaxP; + _ -> P + end; + (_, _, MaxP) -> + MaxP + end, empty, Consumers), + IsEmpty = (messages_ready(State) == 0), + {MaxActivePriority, IsEmpty}. + +-spec usage(atom()) -> float(). +usage(Name) when is_atom(Name) -> + case ets:lookup(rabbit_fifo_usage, Name) of + [] -> 0.0; + [{_, Use}] -> Use + end. + +%%% Internal + +messages_ready(#?STATE{messages = M, + returns = R}) -> + lqueue:len(M) + lqueue:len(R). + +messages_total(#?STATE{messages_total = Total, + dlx = DlxState}) -> + {DlxTotal, _} = rabbit_fifo_dlx:stat(DlxState), + Total + DlxTotal. + +update_use({inactive, _, _, _} = CUInfo, inactive) -> + CUInfo; +update_use({active, _, _} = CUInfo, active) -> + CUInfo; +update_use({active, Since, Avg}, inactive) -> + Now = erlang:monotonic_time(micro_seconds), + {inactive, Now, Now - Since, Avg}; +update_use({inactive, Since, Active, Avg}, active) -> + Now = erlang:monotonic_time(micro_seconds), + {active, Now, use_avg(Active, Now - Since, Avg)}. + +capacity({active, Since, Avg}) -> + use_avg(erlang:monotonic_time(micro_seconds) - Since, 0, Avg); +capacity({inactive, _, 1, 1.0}) -> + 1.0; +capacity({inactive, Since, Active, Avg}) -> + use_avg(Active, erlang:monotonic_time(micro_seconds) - Since, Avg). + +use_avg(0, 0, Avg) -> + Avg; +use_avg(Active, Inactive, Avg) -> + Time = Inactive + Active, + moving_average(Time, ?USE_AVG_HALF_LIFE, Active / Time, Avg). + +moving_average(_Time, _, Next, undefined) -> + Next; +moving_average(Time, HalfLife, Next, Current) -> + Weight = math:exp(Time * math:log(0.5) / HalfLife), + Next * (1 - Weight) + Current * Weight. + +num_checked_out(#?STATE{consumers = Cons}) -> + maps:fold(fun (_, #consumer{checked_out = C}, Acc) -> + maps:size(C) + Acc + end, 0, Cons). + +cancel_consumer(Meta, ConsumerId, + #?STATE{cfg = #cfg{consumer_strategy = competing}} = State, + Effects, Reason) -> + cancel_consumer0(Meta, ConsumerId, State, Effects, Reason); +cancel_consumer(Meta, ConsumerId, + #?STATE{cfg = #cfg{consumer_strategy = single_active}, + waiting_consumers = []} = State, + Effects, Reason) -> + %% single active consumer on, no consumers are waiting + cancel_consumer0(Meta, ConsumerId, State, Effects, Reason); +cancel_consumer(Meta, ConsumerId, + #?STATE{consumers = Cons0, + cfg = #cfg{consumer_strategy = single_active}, + waiting_consumers = Waiting0} = State0, + Effects0, Reason) -> + %% single active consumer on, consumers are waiting + case Cons0 of + #{ConsumerId := #consumer{status = _}} -> + % The active consumer is to be removed + {State1, Effects1} = cancel_consumer0(Meta, ConsumerId, State0, + Effects0, Reason), + activate_next_consumer(State1, Effects1); + _ -> + % The cancelled consumer is not active or cancelled + % Just remove it from idle_consumers + Waiting = lists:keydelete(ConsumerId, 1, Waiting0), + Effects = cancel_consumer_effects(ConsumerId, State0, Effects0), + % A waiting consumer isn't supposed to have any checked out messages, + % so nothing special to do here + {State0#?STATE{waiting_consumers = Waiting}, Effects} + end. + +consumer_update_active_effects(#?STATE{cfg = #cfg{resource = QName}}, + ConsumerId, + #consumer{cfg = #consumer_cfg{meta = Meta}}, + Active, ActivityStatus, + Effects) -> + Ack = maps:get(ack, Meta, undefined), + Prefetch = maps:get(prefetch, Meta, undefined), + Args = maps:get(args, Meta, []), + [{mod_call, rabbit_quorum_queue, update_consumer_handler, + [QName, ConsumerId, false, Ack, Prefetch, Active, ActivityStatus, Args]} + | Effects]. + +cancel_consumer0(Meta, ConsumerId, + #?STATE{consumers = C0} = S0, Effects0, Reason) -> + case C0 of + #{ConsumerId := Consumer} -> + {S, Effects2} = maybe_return_all(Meta, ConsumerId, Consumer, + S0, Effects0, Reason), + + %% The effects are emitted before the consumer is actually removed + %% if the consumer has unacked messages. This is a bit weird but + %% in line with what classic queues do (from an external point of + %% view) + Effects = cancel_consumer_effects(ConsumerId, S, Effects2), + {S, Effects}; + _ -> + %% already removed: do nothing + {S0, Effects0} + end. + +activate_next_consumer(#?STATE{cfg = #cfg{consumer_strategy = competing}} = State0, + Effects0) -> + {State0, Effects0}; +activate_next_consumer(#?STATE{consumers = Cons, + waiting_consumers = Waiting0} = State0, + Effects0) -> + case has_active_consumer(Cons) of + false -> + case lists:filter(fun ({_, #consumer{status = Status}}) -> + Status == up + end, Waiting0) of + [{NextConsumerId, #consumer{cfg = NextCCfg} = NextConsumer} | _] -> + Remaining = lists:keydelete(NextConsumerId, 1, Waiting0), + Consumer = case maps:get(NextConsumerId, Cons, undefined) of + undefined -> + NextConsumer; + Existing -> + %% there was an exisiting non-active consumer + %% just update the existing cancelled consumer + %% with the new config + Existing#consumer{cfg = NextCCfg} + end, + #?STATE{service_queue = ServiceQueue} = State0, + ServiceQueue1 = maybe_queue_consumer(NextConsumerId, + Consumer, + ServiceQueue), + State = State0#?STATE{consumers = Cons#{NextConsumerId => Consumer}, + service_queue = ServiceQueue1, + waiting_consumers = Remaining}, + Effects = consumer_update_active_effects(State, NextConsumerId, + Consumer, true, + single_active, Effects0), + {State, Effects}; + [] -> + {State0, Effects0} + end; + true -> + {State0, Effects0} + end. + +has_active_consumer(Consumers) -> + active_consumer(Consumers) /= undefined. + +active_consumer({Cid, #consumer{status = up} = Consumer, _I}) -> + {Cid, Consumer}; +active_consumer({_Cid, #consumer{status = _}, I}) -> + active_consumer(maps:next(I)); +active_consumer(none) -> + undefined; +active_consumer(M) when is_map(M) -> + I = maps:iterator(M), + active_consumer(maps:next(I)). + +maybe_return_all(#{system_time := Ts} = Meta, ConsumerId, + #consumer{cfg = CCfg} = Consumer, S0, + Effects0, Reason) -> + case Reason of + consumer_cancel -> + {update_or_remove_sub( + Meta, ConsumerId, + Consumer#consumer{cfg = CCfg#consumer_cfg{lifetime = once}, + credit = 0, + status = cancelled}, + S0), Effects0}; + down -> + {S1, Effects1} = return_all(Meta, S0, Effects0, ConsumerId, Consumer), + {S1#?STATE{consumers = maps:remove(ConsumerId, S1#?STATE.consumers), + last_active = Ts}, + Effects1} + end. + +apply_enqueue(#{index := RaftIdx, + system_time := Ts} = Meta, From, Seq, RawMsg, State0) -> + case maybe_enqueue(RaftIdx, Ts, From, Seq, RawMsg, [], State0) of + {ok, State1, Effects1} -> + {State, ok, Effects} = checkout(Meta, State0, State1, Effects1), + {maybe_store_release_cursor(RaftIdx, State), ok, Effects}; + {out_of_sequence, State, Effects} -> + {State, not_enqueued, Effects}; + {duplicate, State, Effects} -> + {State, ok, Effects} + end. + +decr_total(#?STATE{messages_total = Tot} = State) -> + State#?STATE{messages_total = Tot - 1}. + +drop_head(#?STATE{ra_indexes = Indexes0} = State0, Effects) -> + case take_next_msg(State0) of + {?MSG(Idx, Header) = Msg, State1} -> + Indexes = rabbit_fifo_index:delete(Idx, Indexes0), + State2 = State1#?STATE{ra_indexes = Indexes}, + State3 = decr_total(add_bytes_drop(Header, State2)), + #?STATE{cfg = #cfg{dead_letter_handler = DLH}, + dlx = DlxState} = State = State3, + {_, DlxEffects} = rabbit_fifo_dlx:discard([Msg], maxlen, DLH, DlxState), + {State, DlxEffects ++ Effects}; + empty -> + {State0, Effects} + end. + +maybe_set_msg_ttl(#basic_message{content = #content{properties = none}}, + RaCmdTs, Header, + #?STATE{cfg = #cfg{msg_ttl = PerQueueMsgTTL}}) -> + update_expiry_header(RaCmdTs, PerQueueMsgTTL, Header); +maybe_set_msg_ttl(#basic_message{content = #content{properties = Props}}, + RaCmdTs, Header, + #?STATE{cfg = #cfg{msg_ttl = PerQueueMsgTTL}}) -> + %% rabbit_quorum_queue will leave the properties decoded if and only if + %% per message message TTL is set. + %% We already check in the channel that expiration must be valid. + {ok, PerMsgMsgTTL} = rabbit_basic:parse_expiration(Props), + TTL = min(PerMsgMsgTTL, PerQueueMsgTTL), + update_expiry_header(RaCmdTs, TTL, Header); +maybe_set_msg_ttl(Msg, RaCmdTs, Header, + #?STATE{cfg = #cfg{msg_ttl = MsgTTL}}) -> + case mc:is(Msg) of + true -> + TTL = min(MsgTTL, mc:ttl(Msg)), + update_expiry_header(RaCmdTs, TTL, Header); + false -> + Header + end. + +update_expiry_header(_, undefined, Header) -> + Header; +update_expiry_header(RaCmdTs, 0, Header) -> + %% We do not comply exactly with the "TTL=0 models AMQP immediate flag" semantics + %% as done for classic queues where the message is discarded if it cannot be + %% consumed immediately. + %% Instead, we discard the message if it cannot be consumed within the same millisecond + %% when it got enqueued. This behaviour should be good enough. + update_expiry_header(RaCmdTs + 1, Header); +update_expiry_header(RaCmdTs, TTL, Header) -> + update_expiry_header(RaCmdTs + TTL, Header). + +update_expiry_header(ExpiryTs, Header) -> + update_header(expiry, fun(Ts) -> Ts end, ExpiryTs, Header). + +maybe_store_release_cursor(RaftIdx, + #?STATE{cfg = #cfg{release_cursor_interval = {Base, C}} = Cfg, + enqueue_count = EC, + release_cursors = Cursors0} = State0) + when EC >= C -> + case messages_total(State0) of + 0 -> + %% message must have been immediately dropped + State0#?STATE{enqueue_count = 0}; + Total -> + Interval = case Base of + 0 -> 0; + _ -> + min(max(Total, Base), ?RELEASE_CURSOR_EVERY_MAX) + end, + State = State0#?STATE{cfg = Cfg#cfg{release_cursor_interval = + {Base, Interval}}}, + Dehydrated = dehydrate_state(State), + Cursor = {release_cursor, RaftIdx, Dehydrated}, + Cursors = lqueue:in(Cursor, Cursors0), + State#?STATE{enqueue_count = 0, + release_cursors = Cursors} + end; +maybe_store_release_cursor(_RaftIdx, State) -> + State. + +maybe_enqueue(RaftIdx, Ts, undefined, undefined, RawMsg, Effects, + #?STATE{msg_bytes_enqueue = Enqueue, + enqueue_count = EnqCount, + messages = Messages, + messages_total = Total} = State0) -> + % direct enqueue without tracking + Size = message_size(RawMsg), + Header = maybe_set_msg_ttl(RawMsg, Ts, Size, State0), + Msg = ?MSG(RaftIdx, Header), + State = State0#?STATE{msg_bytes_enqueue = Enqueue + Size, + enqueue_count = EnqCount + 1, + messages_total = Total + 1, + messages = lqueue:in(Msg, Messages) + }, + {ok, State, Effects}; +maybe_enqueue(RaftIdx, Ts, From, MsgSeqNo, RawMsg, Effects0, + #?STATE{msg_bytes_enqueue = Enqueue, + enqueue_count = EnqCount, + enqueuers = Enqueuers0, + messages = Messages, + messages_total = Total} = State0) -> + + case maps:get(From, Enqueuers0, undefined) of + undefined -> + State1 = State0#?STATE{enqueuers = Enqueuers0#{From => #enqueuer{}}}, + {Res, State, Effects} = maybe_enqueue(RaftIdx, Ts, From, MsgSeqNo, + RawMsg, Effects0, State1), + {Res, State, [{monitor, process, From} | Effects]}; + #enqueuer{next_seqno = MsgSeqNo} = Enq0 -> + % it is the next expected seqno + Size = message_size(RawMsg), + Header = maybe_set_msg_ttl(RawMsg, Ts, Size, State0), + Msg = ?MSG(RaftIdx, Header), + Enq = Enq0#enqueuer{next_seqno = MsgSeqNo + 1}, + MsgCache = case can_immediately_deliver(State0) of + true -> + {RaftIdx, RawMsg}; + false -> + undefined + end, + State = State0#?STATE{msg_bytes_enqueue = Enqueue + Size, + enqueue_count = EnqCount + 1, + messages_total = Total + 1, + messages = lqueue:in(Msg, Messages), + enqueuers = Enqueuers0#{From => Enq}, + msg_cache = MsgCache + }, + {ok, State, Effects0}; + #enqueuer{next_seqno = Next} + when MsgSeqNo > Next -> + %% TODO: when can this happen? + {out_of_sequence, State0, Effects0}; + #enqueuer{next_seqno = Next} when MsgSeqNo =< Next -> + % duplicate delivery + {duplicate, State0, Effects0} + end. + +return(#{index := IncomingRaftIdx, machine_version := MachineVersion} = Meta, + ConsumerId, Returned, Effects0, State0) -> + {State1, Effects1} = maps:fold( + fun(MsgId, Msg, {S0, E0}) -> + return_one(Meta, MsgId, Msg, S0, E0, ConsumerId) + end, {State0, Effects0}, Returned), + State2 = + case State1#?STATE.consumers of + #{ConsumerId := Con} + when MachineVersion >= 3 -> + update_or_remove_sub(Meta, ConsumerId, Con, State1); + #{ConsumerId := Con0} + when MachineVersion =:= 2 -> + Credit = increase_credit(Meta, Con0, map_size(Returned)), + Con = Con0#consumer{credit = Credit}, + update_or_remove_sub(Meta, ConsumerId, Con, State1); + _ -> + State1 + end, + {State, ok, Effects} = checkout(Meta, State0, State2, Effects1), + update_smallest_raft_index(IncomingRaftIdx, State, Effects). + +% used to process messages that are finished +complete(Meta, ConsumerId, [DiscardedMsgId], + #consumer{checked_out = Checked0} = Con0, + #?STATE{ra_indexes = Indexes0, + msg_bytes_checkout = BytesCheckout, + messages_total = Tot} = State0) -> + case maps:take(DiscardedMsgId, Checked0) of + {?MSG(Idx, Hdr), Checked} -> + SettledSize = get_header(size, Hdr), + Indexes = rabbit_fifo_index:delete(Idx, Indexes0), + Con = Con0#consumer{checked_out = Checked, + credit = increase_credit(Meta, Con0, 1)}, + State1 = update_or_remove_sub(Meta, ConsumerId, Con, State0), + State1#?STATE{ra_indexes = Indexes, + msg_bytes_checkout = BytesCheckout - SettledSize, + messages_total = Tot - 1}; + error -> + State0 + end; +complete(Meta, ConsumerId, DiscardedMsgIds, + #consumer{checked_out = Checked0} = Con0, + #?STATE{ra_indexes = Indexes0, + msg_bytes_checkout = BytesCheckout, + messages_total = Tot} = State0) -> + {SettledSize, Checked, Indexes} + = lists:foldl( + fun (MsgId, {S0, Ch0, Idxs}) -> + case maps:take(MsgId, Ch0) of + {?MSG(Idx, Hdr), Ch} -> + S = get_header(size, Hdr) + S0, + {S, Ch, rabbit_fifo_index:delete(Idx, Idxs)}; + error -> + {S0, Ch0, Idxs} + end + end, {0, Checked0, Indexes0}, DiscardedMsgIds), + Len = map_size(Checked0) - map_size(Checked), + Con = Con0#consumer{checked_out = Checked, + credit = increase_credit(Meta, Con0, Len)}, + State1 = update_or_remove_sub(Meta, ConsumerId, Con, State0), + State1#?STATE{ra_indexes = Indexes, + msg_bytes_checkout = BytesCheckout - SettledSize, + messages_total = Tot - Len}. + +increase_credit(_Meta, #consumer{cfg = #consumer_cfg{lifetime = once}, + credit = Credit}, _) -> + %% once consumers cannot increment credit + Credit; +increase_credit(_Meta, #consumer{cfg = #consumer_cfg{lifetime = auto, + credit_mode = credited}, + credit = Credit}, _) -> + %% credit_mode: `credited' also doesn't automatically increment credit + Credit; +increase_credit(#{machine_version := MachineVersion}, + #consumer{cfg = #consumer_cfg{credit_mode = {simple_prefetch, MaxCredit}}, + credit = Current}, Credit) + when MachineVersion >= 3 andalso MaxCredit > 0 -> + min(MaxCredit, Current + Credit); +increase_credit(_Meta, #consumer{credit = Current}, Credit) -> + Current + Credit. + +complete_and_checkout(#{index := IncomingRaftIdx} = Meta, MsgIds, ConsumerId, + #consumer{} = Con0, + Effects0, State0) -> + State1 = complete(Meta, ConsumerId, MsgIds, Con0, State0), + {State, ok, Effects} = checkout(Meta, State0, State1, Effects0), + update_smallest_raft_index(IncomingRaftIdx, State, Effects). + +cancel_consumer_effects(ConsumerId, + #?STATE{cfg = #cfg{resource = QName}} = _State, + Effects) -> + [{mod_call, rabbit_quorum_queue, + cancel_consumer_handler, [QName, ConsumerId]} | Effects]. + +update_smallest_raft_index(Idx, State, Effects) -> + update_smallest_raft_index(Idx, ok, State, Effects). + +update_smallest_raft_index(IncomingRaftIdx, Reply, + #?STATE{cfg = Cfg, + release_cursors = Cursors0} = State0, + Effects) -> + Total = messages_total(State0), + %% TODO: optimise + case smallest_raft_index(State0) of + undefined when Total == 0 -> + % there are no messages on queue anymore and no pending enqueues + % we can forward release_cursor all the way until + % the last received command, hooray + %% reset the release cursor interval + #cfg{release_cursor_interval = {Base, _}} = Cfg, + RCI = {Base, Base}, + State = State0#?STATE{cfg = Cfg#cfg{release_cursor_interval = RCI}, + release_cursors = lqueue:new(), + enqueue_count = 0}, + {State, Reply, Effects ++ [{release_cursor, IncomingRaftIdx, State}]}; + undefined -> + {State0, Reply, Effects}; + Smallest when is_integer(Smallest) -> + case find_next_cursor(Smallest, Cursors0) of + empty -> + {State0, Reply, Effects}; + {Cursor, Cursors} -> + %% we can emit a release cursor when we've passed the smallest + %% release cursor available. + {State0#?STATE{release_cursors = Cursors}, Reply, + Effects ++ [Cursor]} + end + end. + +find_next_cursor(Idx, Cursors) -> + find_next_cursor(Idx, Cursors, empty). + +find_next_cursor(Smallest, Cursors0, Potential) -> + case lqueue:out(Cursors0) of + {{value, {_, Idx, _} = Cursor}, Cursors} when Idx < Smallest -> + %% we found one but it may not be the largest one + find_next_cursor(Smallest, Cursors, Cursor); + _ when Potential == empty -> + empty; + _ -> + {Potential, Cursors0} + end. + +update_msg_header(Key, Fun, Def, ?MSG(Idx, Header)) -> + ?MSG(Idx, update_header(Key, Fun, Def, Header)). + +update_header(expiry, _, Expiry, Size) + when is_integer(Size) -> + ?TUPLE(Size, Expiry); +update_header(Key, UpdateFun, Default, Size) + when is_integer(Size) -> + update_header(Key, UpdateFun, Default, #{size => Size}); +update_header(Key, UpdateFun, Default, ?TUPLE(Size, Expiry)) + when is_integer(Size), is_integer(Expiry) -> + update_header(Key, UpdateFun, Default, #{size => Size, + expiry => Expiry}); +update_header(Key, UpdateFun, Default, Header) + when is_map(Header), is_map_key(size, Header) -> + maps:update_with(Key, UpdateFun, Default, Header). + +get_msg_header(?MSG(_Idx, Header)) -> + Header. + +get_header(size, Size) + when is_integer(Size) -> + Size; +get_header(_Key, Size) + when is_integer(Size) -> + undefined; +get_header(size, ?TUPLE(Size, Expiry)) + when is_integer(Size), is_integer(Expiry) -> + Size; +get_header(expiry, ?TUPLE(Size, Expiry)) + when is_integer(Size), is_integer(Expiry) -> + Expiry; +get_header(_Key, ?TUPLE(Size, Expiry)) + when is_integer(Size), is_integer(Expiry) -> + undefined; +get_header(Key, Header) + when is_map(Header) andalso is_map_key(size, Header) -> + maps:get(Key, Header, undefined). + +return_one(#{machine_version := MachineVersion} = Meta, + MsgId, Msg0, + #?STATE{returns = Returns, + consumers = Consumers, + dlx = DlxState0, + cfg = #cfg{delivery_limit = DeliveryLimit, + dead_letter_handler = DLH}} = State0, + Effects0, ConsumerId) -> + #consumer{checked_out = Checked0} = Con0 = maps:get(ConsumerId, Consumers), + Msg = update_msg_header(delivery_count, fun incr/1, 1, Msg0), + Header = get_msg_header(Msg), + case get_header(delivery_count, Header) of + DeliveryCount when DeliveryCount > DeliveryLimit -> + {DlxState, DlxEffects} = rabbit_fifo_dlx:discard([Msg], delivery_limit, DLH, DlxState0), + State1 = State0#?STATE{dlx = DlxState}, + State = complete(Meta, ConsumerId, [MsgId], Con0, State1), + {State, DlxEffects ++ Effects0}; + _ -> + Checked = maps:remove(MsgId, Checked0), + Con = case MachineVersion of + V when V >= 3 -> + Con0#consumer{checked_out = Checked, + credit = increase_credit(Meta, Con0, 1)}; + 2 -> + Con0#consumer{checked_out = Checked} + end, + {add_bytes_return( + Header, + State0#?STATE{consumers = Consumers#{ConsumerId => Con}, + returns = lqueue:in(Msg, Returns)}), + Effects0} + end. + +return_all(Meta, #?STATE{consumers = Cons} = State0, Effects0, ConsumerId, + #consumer{checked_out = Checked} = Con) -> + State = State0#?STATE{consumers = Cons#{ConsumerId => Con}}, + lists:foldl(fun ({MsgId, Msg}, {S, E}) -> + return_one(Meta, MsgId, Msg, S, E, ConsumerId) + end, {State, Effects0}, lists:sort(maps:to_list(Checked))). + +checkout(Meta, OldState, State0, Effects0) -> + checkout(Meta, OldState, State0, Effects0, ok). + +checkout(#{index := Index} = Meta, + #?STATE{cfg = #cfg{resource = _QName}} = OldState, + State0, Effects0, Reply) -> + {#?STATE{cfg = #cfg{dead_letter_handler = DLH}, + dlx = DlxState0} = State1, ExpiredMsg, Effects1} = + checkout0(Meta, checkout_one(Meta, false, State0, Effects0), #{}), + {DlxState, DlxDeliveryEffects} = rabbit_fifo_dlx:checkout(DLH, DlxState0), + %% TODO: only update dlx state if it has changed? + State2 = State1#?STATE{msg_cache = undefined, %% by this time the cache should be used + dlx = DlxState}, + Effects2 = DlxDeliveryEffects ++ Effects1, + case evaluate_limit(Index, false, OldState, State2, Effects2) of + {State, false, Effects} when ExpiredMsg == false -> + {State, Reply, Effects}; + {State, _, Effects} -> + update_smallest_raft_index(Index, Reply, State, Effects) + end. + +checkout0(Meta, {success, ConsumerId, MsgId, + ?MSG(_RaftIdx, _Header) = Msg, ExpiredMsg, State, Effects}, + SendAcc0) -> + DelMsg = {MsgId, Msg}, + SendAcc = case maps:get(ConsumerId, SendAcc0, undefined) of + undefined -> + SendAcc0#{ConsumerId => [DelMsg]}; + LogMsgs -> + SendAcc0#{ConsumerId => [DelMsg | LogMsgs]} + end, + checkout0(Meta, checkout_one(Meta, ExpiredMsg, State, Effects), SendAcc); +checkout0(_Meta, {_Activity, ExpiredMsg, State0, Effects0}, SendAcc) -> + Effects = add_delivery_effects(Effects0, SendAcc, State0), + {State0, ExpiredMsg, lists:reverse(Effects)}. + +evaluate_limit(_Index, Result, _BeforeState, + #?STATE{cfg = #cfg{max_length = undefined, + max_bytes = undefined}} = State, + Effects) -> + {State, Result, Effects}; +evaluate_limit(Index, Result, BeforeState, + #?STATE{cfg = #cfg{overflow_strategy = Strategy}, + enqueuers = Enqs0} = State0, + Effects0) -> + case is_over_limit(State0) of + true when Strategy == drop_head -> + {State, Effects} = drop_head(State0, Effects0), + evaluate_limit(Index, true, BeforeState, State, Effects); + true when Strategy == reject_publish -> + %% generate send_msg effect for each enqueuer to let them know + %% they need to block + {Enqs, Effects} = + maps:fold( + fun (P, #enqueuer{blocked = undefined} = E0, {Enqs, Acc}) -> + E = E0#enqueuer{blocked = Index}, + {Enqs#{P => E}, + [{send_msg, P, {queue_status, reject_publish}, + [ra_event]} | Acc]}; + (_P, _E, Acc) -> + Acc + end, {Enqs0, Effects0}, Enqs0), + {State0#?STATE{enqueuers = Enqs}, Result, Effects}; + false when Strategy == reject_publish -> + %% TODO: optimise as this case gets called for every command + %% pretty much + Before = is_below_soft_limit(BeforeState), + case {Before, is_below_soft_limit(State0)} of + {false, true} -> + %% we have moved below the lower limit + {Enqs, Effects} = + maps:fold( + fun (P, #enqueuer{} = E0, {Enqs, Acc}) -> + E = E0#enqueuer{blocked = undefined}, + {Enqs#{P => E}, + [{send_msg, P, {queue_status, go}, [ra_event]} + | Acc]}; + (_P, _E, Acc) -> + Acc + end, {Enqs0, Effects0}, Enqs0), + {State0#?STATE{enqueuers = Enqs}, Result, Effects}; + _ -> + {State0, Result, Effects0} + end; + false -> + {State0, Result, Effects0} + end. + + +%% [6,5,4,3,2,1] -> [[1,2],[3,4],[5,6]] +chunk_disk_msgs([], _Bytes, [[] | Chunks]) -> + Chunks; +chunk_disk_msgs([], _Bytes, Chunks) -> + Chunks; +chunk_disk_msgs([{_MsgId, ?MSG(_RaftIdx, Header)} = Msg | Rem], + Bytes, Chunks) + when Bytes >= ?DELIVERY_CHUNK_LIMIT_B -> + Size = get_header(size, Header), + chunk_disk_msgs(Rem, Size, [[Msg] | Chunks]); +chunk_disk_msgs([{_MsgId, ?MSG(_RaftIdx, Header)} = Msg | Rem], Bytes, + [CurChunk | Chunks]) -> + Size = get_header(size, Header), + chunk_disk_msgs(Rem, Bytes + Size, [[Msg | CurChunk] | Chunks]). + +add_delivery_effects(Effects0, AccMap, _State) + when map_size(AccMap) == 0 -> + %% does this ever happen? + Effects0; +add_delivery_effects(Effects0, AccMap, State) -> + maps:fold(fun (C, DiskMsgs, Efs) + when is_list(DiskMsgs) -> + lists:foldl( + fun (Msgs, E) -> + [delivery_effect(C, Msgs, State) | E] + end, Efs, chunk_disk_msgs(DiskMsgs, 0, [[]])) + end, Effects0, AccMap). + +take_next_msg(#?STATE{returns = Returns0, + messages = Messages0, + ra_indexes = Indexes0 + } = State) -> + case lqueue:out(Returns0) of + {{value, NextMsg}, Returns} -> + {NextMsg, State#?STATE{returns = Returns}}; + {empty, _} -> + case lqueue:out(Messages0) of + {empty, _} -> + empty; + {{value, ?MSG(RaftIdx, _) = Msg}, Messages} -> + %% add index here + Indexes = rabbit_fifo_index:append(RaftIdx, Indexes0), + {Msg, State#?STATE{messages = Messages, + ra_indexes = Indexes}} + end + end. + +get_next_msg(#?STATE{returns = Returns0, + messages = Messages0}) -> + case lqueue:get(Returns0, empty) of + empty -> + lqueue:get(Messages0, empty); + Msg -> + Msg + end. + +delivery_effect({CTag, CPid}, [{MsgId, ?MSG(Idx, Header)}], + #?STATE{msg_cache = {Idx, RawMsg}}) -> + {send_msg, CPid, {delivery, CTag, [{MsgId, {Header, RawMsg}}]}, + [local, ra_event]}; +delivery_effect({CTag, CPid}, Msgs, _State) -> + RaftIdxs = lists:foldr(fun ({_, ?MSG(I, _)}, Acc) -> + [I | Acc] + end, [], Msgs), + {log, RaftIdxs, + fun(Log) -> + DelMsgs = lists:zipwith( + fun (Cmd, {MsgId, ?MSG(_Idx, Header)}) -> + {MsgId, {Header, get_msg(Cmd)}} + end, Log, Msgs), + [{send_msg, CPid, {delivery, CTag, DelMsgs}, [local, ra_event]}] + end, + {local, node(CPid)}}. + +reply_log_effect(RaftIdx, MsgId, Header, Ready, From) -> + {log, [RaftIdx], + fun ([Cmd]) -> + [{reply, From, {wrap_reply, + {dequeue, {MsgId, {Header, get_msg(Cmd)}}, Ready}}}] + end}. + +checkout_one(#{system_time := Ts} = Meta, ExpiredMsg0, InitState0, Effects0) -> + %% Before checking out any messsage to any consumer, + %% first remove all expired messages from the head of the queue. + {ExpiredMsg, #?STATE{service_queue = SQ0, + messages = Messages0, + msg_bytes_checkout = BytesCheckout, + msg_bytes_enqueue = BytesEnqueue, + consumers = Cons0} = InitState, Effects1} = + expire_msgs(Ts, ExpiredMsg0, InitState0, Effects0), + + case priority_queue:out(SQ0) of + {{value, ConsumerId}, SQ1} + when is_map_key(ConsumerId, Cons0) -> + case take_next_msg(InitState) of + {ConsumerMsg, State0} -> + %% there are consumers waiting to be serviced + %% process consumer checkout + case maps:get(ConsumerId, Cons0) of + #consumer{credit = 0} -> + %% no credit but was still on queue + %% can happen when draining + %% recurse without consumer on queue + checkout_one(Meta, ExpiredMsg, + InitState#?STATE{service_queue = SQ1}, Effects1); + #consumer{status = cancelled} -> + checkout_one(Meta, ExpiredMsg, + InitState#?STATE{service_queue = SQ1}, Effects1); + #consumer{status = suspected_down} -> + checkout_one(Meta, ExpiredMsg, + InitState#?STATE{service_queue = SQ1}, Effects1); + #consumer{checked_out = Checked0, + next_msg_id = Next, + credit = Credit, + delivery_count = DelCnt} = Con0 -> + Checked = maps:put(Next, ConsumerMsg, Checked0), + Con = Con0#consumer{checked_out = Checked, + next_msg_id = Next + 1, + credit = Credit - 1, + delivery_count = DelCnt + 1}, + Size = get_header(size, get_msg_header(ConsumerMsg)), + State = update_or_remove_sub( + Meta, ConsumerId, Con, + State0#?STATE{service_queue = SQ1, + msg_bytes_checkout = BytesCheckout + Size, + msg_bytes_enqueue = BytesEnqueue - Size}), + {success, ConsumerId, Next, ConsumerMsg, ExpiredMsg, + State, Effects1} + end; + empty -> + {nochange, ExpiredMsg, InitState, Effects1} + end; + {{value, _ConsumerId}, SQ1} -> + %% consumer did not exist but was queued, recurse + checkout_one(Meta, ExpiredMsg, + InitState#?STATE{service_queue = SQ1}, Effects1); + {empty, _} -> + case lqueue:len(Messages0) of + 0 -> + {nochange, ExpiredMsg, InitState, Effects1}; + _ -> + {inactive, ExpiredMsg, InitState, Effects1} + end + end. + +%% dequeue all expired messages +expire_msgs(RaCmdTs, Result, State, Effects) -> + %% In the normal case, there are no expired messages. + %% Therefore, first lqueue:get/2 to check whether we need to lqueue:out/1 + %% because the latter can be much slower than the former. + case get_next_msg(State) of + ?MSG(_, ?TUPLE(Size, Expiry)) + when is_integer(Size), is_integer(Expiry), RaCmdTs >= Expiry -> + expire(RaCmdTs, State, Effects); + ?MSG(_, #{expiry := Expiry}) + when is_integer(Expiry), RaCmdTs >= Expiry -> + expire(RaCmdTs, State, Effects); + _ -> + {Result, State, Effects} + end. + +expire(RaCmdTs, State0, Effects) -> + {?MSG(Idx, Header) = Msg, + #?STATE{cfg = #cfg{dead_letter_handler = DLH}, + dlx = DlxState0, + ra_indexes = Indexes0, + messages_total = Tot, + msg_bytes_enqueue = MsgBytesEnqueue} = State1} = take_next_msg(State0), + {DlxState, DlxEffects} = rabbit_fifo_dlx:discard([Msg], expired, DLH, DlxState0), + Indexes = rabbit_fifo_index:delete(Idx, Indexes0), + State = State1#?STATE{dlx = DlxState, + ra_indexes = Indexes, + messages_total = Tot - 1, + msg_bytes_enqueue = MsgBytesEnqueue - get_header(size, Header)}, + expire_msgs(RaCmdTs, true, State, DlxEffects ++ Effects). + +timer_effect(RaCmdTs, State, Effects) -> + T = case get_next_msg(State) of + ?MSG(_, ?TUPLE(Size, Expiry)) + when is_integer(Size), is_integer(Expiry) -> + %% Next message contains 'expiry' header. + %% (Re)set timer so that mesage will be dropped or dead-lettered on time. + max(0, Expiry - RaCmdTs); + ?MSG(_, #{expiry := Expiry}) + when is_integer(Expiry) -> + max(0, Expiry - RaCmdTs); + _ -> + %% Next message does not contain 'expiry' header. + %% Therefore, do not set timer or cancel timer if it was set. + infinity + end, + [{timer, expire_msgs, T} | Effects]. + +update_or_remove_sub(Meta, ConsumerId, + #consumer{cfg = #consumer_cfg{lifetime = once}, + checked_out = Checked, + credit = 0} = Con, + #?STATE{consumers = Cons} = State) -> + case map_size(Checked) of + 0 -> + #{system_time := Ts} = Meta, + % we're done with this consumer + State#?STATE{consumers = maps:remove(ConsumerId, Cons), + last_active = Ts}; + _ -> + % there are unsettled items so need to keep around + State#?STATE{consumers = maps:put(ConsumerId, Con, Cons)} + end; +update_or_remove_sub(_Meta, ConsumerId, + #consumer{cfg = #consumer_cfg{}} = Con, + #?STATE{consumers = Cons, + service_queue = ServiceQueue} = State) -> + State#?STATE{consumers = maps:put(ConsumerId, Con, Cons), + service_queue = uniq_queue_in(ConsumerId, Con, ServiceQueue)}. + +uniq_queue_in(Key, #consumer{credit = Credit, + status = up, + cfg = #consumer_cfg{priority = P}}, ServiceQueue) + when Credit > 0 -> + % TODO: queue:member could surely be quite expensive, however the practical + % number of unique consumers may not be large enough for it to matter + case priority_queue:member(Key, ServiceQueue) of + true -> + ServiceQueue; + false -> + priority_queue:in(Key, P, ServiceQueue) + end; +uniq_queue_in(_Key, _Consumer, ServiceQueue) -> + ServiceQueue. + +update_consumer(Meta, {Tag, Pid} = ConsumerId, ConsumerMeta, + {Life, Credit, Mode0} = Spec, Priority, + #?STATE{cfg = #cfg{consumer_strategy = competing}, + consumers = Cons0} = State0) -> + Consumer = case Cons0 of + #{ConsumerId := #consumer{} = Consumer0} -> + merge_consumer(Meta, Consumer0, ConsumerMeta, Spec, Priority); + _ -> + Mode = credit_mode(Meta, Credit, Mode0), + #consumer{cfg = #consumer_cfg{tag = Tag, + pid = Pid, + lifetime = Life, + meta = ConsumerMeta, + priority = Priority, + credit_mode = Mode}, + credit = Credit} + end, + {Consumer, update_or_remove_sub(Meta, ConsumerId, Consumer, State0)}; +update_consumer(Meta, {Tag, Pid} = ConsumerId, ConsumerMeta, + {Life, Credit, Mode0} = Spec, Priority, + #?STATE{cfg = #cfg{consumer_strategy = single_active}, + consumers = Cons0, + waiting_consumers = Waiting, + service_queue = _ServiceQueue0} = State0) -> + %% if it is the current active consumer, just update + %% if it is a cancelled active consumer, add to waiting unless it is the only + %% one, then merge + case active_consumer(Cons0) of + {ConsumerId, #consumer{status = up} = Consumer0} -> + Consumer = merge_consumer(Meta, Consumer0, ConsumerMeta, + Spec, Priority), + {Consumer, update_or_remove_sub(Meta, ConsumerId, Consumer, State0)}; + undefined when is_map_key(ConsumerId, Cons0) -> + %% there is no active consumer and the current consumer is in the + %% consumers map and thus must be cancelled, in this case we can just + %% merge and effectively make this the current active one + Consumer0 = maps:get(ConsumerId, Cons0), + Consumer = merge_consumer(Meta, Consumer0, ConsumerMeta, + Spec, Priority), + {Consumer, update_or_remove_sub(Meta, ConsumerId, Consumer, State0)}; + _ -> + %% add as a new waiting consumer + Mode = credit_mode(Meta, Credit, Mode0), + Consumer = #consumer{cfg = #consumer_cfg{tag = Tag, + pid = Pid, + lifetime = Life, + meta = ConsumerMeta, + priority = Priority, + credit_mode = Mode}, + credit = Credit}, + + {Consumer, + State0#?STATE{waiting_consumers = + Waiting ++ [{ConsumerId, Consumer}]}} + end. + +merge_consumer(Meta, #consumer{cfg = CCfg, checked_out = Checked} = Consumer, + ConsumerMeta, {Life, Credit, Mode0}, Priority) -> + NumChecked = map_size(Checked), + NewCredit = max(0, Credit - NumChecked), + Mode = credit_mode(Meta, Credit, Mode0), + Consumer#consumer{cfg = CCfg#consumer_cfg{priority = Priority, + meta = ConsumerMeta, + credit_mode = Mode, + lifetime = Life}, + status = up, + credit = NewCredit}. + +credit_mode(#{machine_version := Vsn}, Credit, simple_prefetch) + when Vsn >= 3 -> + {simple_prefetch, Credit}; +credit_mode(_, _, Mode) -> + Mode. + +maybe_queue_consumer(ConsumerId, #consumer{credit = Credit} = Con, + ServiceQueue0) -> + case Credit > 0 of + true -> + % consumer needs service - check if already on service queue + uniq_queue_in(ConsumerId, Con, ServiceQueue0); + false -> + ServiceQueue0 + end. + +%% creates a dehydrated version of the current state to be cached and +%% potentially used to for a snaphot at a later point +dehydrate_state(#?STATE{cfg = #cfg{}, + dlx = DlxState} = State) -> + % no messages are kept in memory, no need to + % overly mutate the current state apart from removing indexes and cursors + State#?STATE{ra_indexes = rabbit_fifo_index:empty(), + release_cursors = lqueue:new(), + enqueue_count = 0, + msg_cache = undefined, + dlx = rabbit_fifo_dlx:dehydrate(DlxState)}. + +is_over_limit(#?STATE{cfg = #cfg{max_length = undefined, + max_bytes = undefined}}) -> + false; +is_over_limit(#?STATE{cfg = #cfg{max_length = MaxLength, + max_bytes = MaxBytes}, + msg_bytes_enqueue = BytesEnq, + dlx = DlxState} = State) -> + {NumDlx, BytesDlx} = rabbit_fifo_dlx:stat(DlxState), + (messages_ready(State) + NumDlx > MaxLength) orelse + (BytesEnq + BytesDlx > MaxBytes). + +is_below_soft_limit(#?STATE{cfg = #cfg{max_length = undefined, + max_bytes = undefined}}) -> + false; +is_below_soft_limit(#?STATE{cfg = #cfg{max_length = MaxLength, + max_bytes = MaxBytes}, + msg_bytes_enqueue = BytesEnq, + dlx = DlxState} = State) -> + {NumDlx, BytesDlx} = rabbit_fifo_dlx:stat(DlxState), + is_below(MaxLength, messages_ready(State) + NumDlx) andalso + is_below(MaxBytes, BytesEnq + BytesDlx). + +is_below(undefined, _Num) -> + true; +is_below(Val, Num) when is_integer(Val) andalso is_integer(Num) -> + Num =< trunc(Val * ?LOW_LIMIT). + +-spec make_enqueue(option(pid()), option(msg_seqno()), raw_msg()) -> protocol(). +make_enqueue(Pid, Seq, Msg) -> + #enqueue{pid = Pid, seq = Seq, msg = Msg}. + +-spec make_register_enqueuer(pid()) -> protocol(). +make_register_enqueuer(Pid) -> + #register_enqueuer{pid = Pid}. + +-spec make_checkout(consumer_id(), + checkout_spec(), consumer_meta()) -> protocol(). +make_checkout({_, _} = ConsumerId, Spec, Meta) -> + #checkout{consumer_id = ConsumerId, + spec = Spec, meta = Meta}. + +-spec make_settle(consumer_id(), [msg_id()]) -> protocol(). +make_settle(ConsumerId, MsgIds) when is_list(MsgIds) -> + #settle{consumer_id = ConsumerId, msg_ids = MsgIds}. + +-spec make_return(consumer_id(), [msg_id()]) -> protocol(). +make_return(ConsumerId, MsgIds) -> + #return{consumer_id = ConsumerId, msg_ids = MsgIds}. + +-spec make_discard(consumer_id(), [msg_id()]) -> protocol(). +make_discard(ConsumerId, MsgIds) -> + #discard{consumer_id = ConsumerId, msg_ids = MsgIds}. + +-spec make_credit(consumer_id(), non_neg_integer(), non_neg_integer(), + boolean()) -> protocol(). +make_credit(ConsumerId, Credit, DeliveryCount, Drain) -> + #credit{consumer_id = ConsumerId, + credit = Credit, + delivery_count = DeliveryCount, + drain = Drain}. + +-spec make_purge() -> protocol(). +make_purge() -> #purge{}. + +-spec make_garbage_collection() -> protocol(). +make_garbage_collection() -> #garbage_collection{}. + +-spec make_purge_nodes([node()]) -> protocol(). +make_purge_nodes(Nodes) -> + #purge_nodes{nodes = Nodes}. + +-spec make_update_config(config()) -> protocol(). +make_update_config(Config) -> + #update_config{config = Config}. + +add_bytes_drop(Header, + #?STATE{msg_bytes_enqueue = Enqueue} = State) -> + Size = get_header(size, Header), + State#?STATE{msg_bytes_enqueue = Enqueue - Size}. + + +add_bytes_return(Header, + #?STATE{msg_bytes_checkout = Checkout, + msg_bytes_enqueue = Enqueue} = State) -> + Size = get_header(size, Header), + State#?STATE{msg_bytes_checkout = Checkout - Size, + msg_bytes_enqueue = Enqueue + Size}. + +message_size(#basic_message{content = Content}) -> + #content{payload_fragments_rev = PFR} = Content, + iolist_size(PFR); +message_size(B) when is_binary(B) -> + byte_size(B); +message_size(Msg) -> + case mc:is(Msg) of + true -> + {_, PayloadSize} = mc:size(Msg), + PayloadSize; + false -> + %% probably only hit this for testing so ok to use erts_debug + erts_debug:size(Msg) + end. + + +all_nodes(#?STATE{consumers = Cons0, + enqueuers = Enqs0, + waiting_consumers = WaitingConsumers0}) -> + Nodes0 = maps:fold(fun({_, P}, _, Acc) -> + Acc#{node(P) => ok} + end, #{}, Cons0), + Nodes1 = maps:fold(fun(P, _, Acc) -> + Acc#{node(P) => ok} + end, Nodes0, Enqs0), + maps:keys( + lists:foldl(fun({{_, P}, _}, Acc) -> + Acc#{node(P) => ok} + end, Nodes1, WaitingConsumers0)). + +all_pids_for(Node, #?STATE{consumers = Cons0, + enqueuers = Enqs0, + waiting_consumers = WaitingConsumers0}) -> + Cons = maps:fold(fun({_, P}, _, Acc) + when node(P) =:= Node -> + [P | Acc]; + (_, _, Acc) -> Acc + end, [], Cons0), + Enqs = maps:fold(fun(P, _, Acc) + when node(P) =:= Node -> + [P | Acc]; + (_, _, Acc) -> Acc + end, Cons, Enqs0), + lists:foldl(fun({{_, P}, _}, Acc) + when node(P) =:= Node -> + [P | Acc]; + (_, Acc) -> Acc + end, Enqs, WaitingConsumers0). + +suspected_pids_for(Node, #?STATE{consumers = Cons0, + enqueuers = Enqs0, + waiting_consumers = WaitingConsumers0}) -> + Cons = maps:fold(fun({_, P}, + #consumer{status = suspected_down}, + Acc) + when node(P) =:= Node -> + [P | Acc]; + (_, _, Acc) -> Acc + end, [], Cons0), + Enqs = maps:fold(fun(P, #enqueuer{status = suspected_down}, Acc) + when node(P) =:= Node -> + [P | Acc]; + (_, _, Acc) -> Acc + end, Cons, Enqs0), + lists:foldl(fun({{_, P}, + #consumer{status = suspected_down}}, Acc) + when node(P) =:= Node -> + [P | Acc]; + (_, Acc) -> Acc + end, Enqs, WaitingConsumers0). + +is_expired(Ts, #?STATE{cfg = #cfg{expires = Expires}, + last_active = LastActive, + consumers = Consumers}) + when is_number(LastActive) andalso is_number(Expires) -> + %% TODO: should it be active consumers? + Active = maps:filter(fun (_, #consumer{status = suspected_down}) -> + false; + (_, _) -> + true + end, Consumers), + + Ts > (LastActive + Expires) andalso maps:size(Active) == 0; +is_expired(_Ts, _State) -> + false. + +get_priority_from_args(#{args := Args}) -> + case rabbit_misc:table_lookup(Args, <<"x-priority">>) of + {_Key, Value} -> + Value; + _ -> 0 + end; +get_priority_from_args(_) -> + 0. + +notify_decorators_effect(QName, MaxActivePriority, IsEmpty) -> + {mod_call, rabbit_quorum_queue, spawn_notify_decorators, + [QName, consumer_state_changed, [MaxActivePriority, IsEmpty]]}. + +notify_decorators_startup(QName) -> + {mod_call, rabbit_quorum_queue, spawn_notify_decorators, + [QName, startup, []]}. + +convert(To, To, State) -> + State; +convert(0, To, State) -> + convert(1, To, rabbit_fifo_v1:convert_v0_to_v1(State)); +convert(1, To, State) -> + convert(2, To, convert_v1_to_v2(State)); +convert(2, To, State) -> + convert(3, To, convert_v2_to_v3(State)). + +smallest_raft_index(#?STATE{messages = Messages, + ra_indexes = Indexes, + dlx = DlxState}) -> + SmallestDlxRaIdx = rabbit_fifo_dlx:smallest_raft_index(DlxState), + SmallestMsgsRaIdx = case lqueue:get(Messages, undefined) of + ?MSG(I, _) when is_integer(I) -> + I; + _ -> + undefined + end, + SmallestRaIdx = rabbit_fifo_index:smallest(Indexes), + lists:min([SmallestDlxRaIdx, SmallestMsgsRaIdx, SmallestRaIdx]). + +make_requeue(ConsumerId, Notify, [{MsgId, Idx, Header, Msg}], Acc) -> + lists:reverse([{append, + #requeue{consumer_id = ConsumerId, + index = Idx, + header = Header, + msg_id = MsgId, + msg = Msg}, + Notify} + | Acc]); +make_requeue(ConsumerId, Notify, [{MsgId, Idx, Header, Msg} | Rem], Acc) -> + make_requeue(ConsumerId, Notify, Rem, + [{append, + #requeue{consumer_id = ConsumerId, + index = Idx, + header = Header, + msg_id = MsgId, + msg = Msg}, + noreply} + | Acc]); +make_requeue(_ConsumerId, _Notify, [], []) -> + []. + +can_immediately_deliver(#?STATE{service_queue = SQ, + consumers = Consumers} = State) -> + case messages_ready(State) of + 0 when map_size(Consumers) > 0 -> + %% TODO: is is probably good enough but to be 100% we'd need to + %% scan all consumers and ensure at least one has credit + priority_queue:is_empty(SQ) == false; + _ -> + false + end. + +incr(I) -> + I + 1. + +get_msg(#enqueue{msg = M}) -> + M; +get_msg(#requeue{msg = M}) -> + M. diff --git a/deps/rabbit/src/rabbit_fifo_v3.hrl b/deps/rabbit/src/rabbit_fifo_v3.hrl new file mode 100644 index 000000000000..9b1078265dc6 --- /dev/null +++ b/deps/rabbit/src/rabbit_fifo_v3.hrl @@ -0,0 +1,226 @@ +%% This Source Code Form is subject to the terms of the Mozilla Public +%% License, v. 2.0. If a copy of the MPL was not distributed with this +%% file, You can obtain one at https://mozilla.org/MPL/2.0/. +%% +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. + +%% macros for memory optimised tuple structures +%% [A|B] saves 1 byte compared to {A,B} +-define(TUPLE(A, B), [A | B]). + +%% We only hold Raft index and message header in memory. +%% Raw message data is always stored on disk. +-define(MSG(Index, Header), ?TUPLE(Index, Header)). + +-define(IS_HEADER(H), + (is_integer(H) andalso H >= 0) orelse + is_list(H) orelse + (is_map(H) andalso is_map_key(size, H))). + +-type optimised_tuple(A, B) :: nonempty_improper_list(A, B). + +-type option(T) :: undefined | T. + +-type raw_msg() :: term(). +%% The raw message. It is opaque to rabbit_fifo. + +-type msg_id() :: non_neg_integer(). +%% A consumer-scoped monotonically incrementing integer included with a +%% {@link delivery/0.}. Used to settle deliveries using +%% {@link rabbit_fifo_client:settle/3.} + +-type msg_seqno() :: non_neg_integer(). +%% A sender process scoped monotonically incrementing integer included +%% in enqueue messages. Used to ensure ordering of messages send from the +%% same process + +-type msg_header() :: msg_size() | + optimised_tuple(msg_size(), Expiry :: milliseconds()) | + #{size := msg_size(), + delivery_count => non_neg_integer(), + expiry => milliseconds()}. +%% The message header: +%% size: The size of the message payload in bytes. +%% delivery_count: the number of unsuccessful delivery attempts. +%% A non-zero value indicates a previous attempt. +%% expiry: Epoch time in ms when a message expires. Set during enqueue. +%% Value is determined by per-queue or per-message message TTL. +%% If it contains only the size it can be condensed to an integer. +%% If it contains only the size and expiry it can be condensed to an improper list. + +-type msg_size() :: non_neg_integer(). +%% the size in bytes of the msg payload + +-type msg() :: optimised_tuple(option(ra:index()), msg_header()). + +-type delivery_msg() :: {msg_id(), {msg_header(), raw_msg()}}. +%% A tuple consisting of the message id, and the headered message. + +-type consumer_tag() :: binary(). +%% An arbitrary binary tag used to distinguish between different consumers +%% set up by the same process. See: {@link rabbit_fifo_client:checkout/3.} + +-type delivery() :: {delivery, consumer_tag(), [delivery_msg()]}. +%% Represents the delivery of one or more rabbit_fifo messages. + +-type consumer_id() :: {consumer_tag(), pid()}. +%% The entity that receives messages. Uniquely identifies a consumer. + +-type credit_mode() :: credited | + %% machine_version 2 + simple_prefetch | + %% machine_version 3 + {simple_prefetch, MaxCredit :: non_neg_integer()}. +%% determines how credit is replenished + +-type checkout_spec() :: {once | auto, Num :: non_neg_integer(), + credit_mode()} | + {dequeue, settled | unsettled} | + cancel. + +-type consumer_meta() :: #{ack => boolean(), + username => binary(), + prefetch => non_neg_integer(), + args => list()}. +%% static meta data associated with a consumer + +-type applied_mfa() :: {module(), atom(), list()}. +% represents a partially applied module call + +-define(RELEASE_CURSOR_EVERY, 2048). +-define(RELEASE_CURSOR_EVERY_MAX, 3_200_000). +-define(USE_AVG_HALF_LIFE, 10000.0). +%% an average QQ without any message uses about 100KB so setting this limit +%% to ~10 times that should be relatively safe. +-define(GC_MEM_LIMIT_B, 2_000_000). + +-define(MB, 1_048_576). +-define(LOW_LIMIT, 0.8). +-define(DELIVERY_CHUNK_LIMIT_B, 128_000). + +-record(consumer_cfg, + {meta = #{} :: consumer_meta(), + pid :: pid(), + tag :: consumer_tag(), + %% the mode of how credit is incremented + %% simple_prefetch: credit is re-filled as deliveries are settled + %% or returned. + %% credited: credit can only be changed by receiving a consumer_credit + %% command: `{consumer_credit, ReceiverDeliveryCount, Credit}' + credit_mode :: credit_mode(), % part of snapshot data + lifetime = once :: once | auto, + priority = 0 :: non_neg_integer()}). + +-record(consumer, + {cfg = #consumer_cfg{}, + status = up :: up | suspected_down | cancelled | waiting, + next_msg_id = 0 :: msg_id(), % part of snapshot data + checked_out = #{} :: #{msg_id() => msg()}, + %% max number of messages that can be sent + %% decremented for each delivery + credit = 0 : non_neg_integer(), + %% total number of checked out messages - ever + %% incremented for each delivery + delivery_count = 0 :: non_neg_integer() + }). + +-type consumer() :: #consumer{}. + +-type consumer_strategy() :: competing | single_active. + +-type milliseconds() :: non_neg_integer(). + +-type dead_letter_handler() :: option({at_most_once, applied_mfa()} | at_least_once). + +-record(enqueuer, + {next_seqno = 1 :: msg_seqno(), + % out of order enqueues - sorted list + unused, + status = up :: up | suspected_down, + %% it is useful to have a record of when this was blocked + %% so that we can retry sending the block effect if + %% the publisher did not receive the initial one + blocked :: option(ra:index()), + unused_1, + unused_2 + }). + +-record(cfg, + {name :: atom(), + resource :: rabbit_types:r('queue'), + release_cursor_interval :: option({non_neg_integer(), non_neg_integer()}), + dead_letter_handler :: dead_letter_handler(), + become_leader_handler :: option(applied_mfa()), + overflow_strategy = drop_head :: drop_head | reject_publish, + max_length :: option(non_neg_integer()), + max_bytes :: option(non_neg_integer()), + %% whether single active consumer is on or not for this queue + consumer_strategy = competing :: consumer_strategy(), + %% the maximum number of unsuccessful delivery attempts permitted + delivery_limit :: option(non_neg_integer()), + expires :: option(milliseconds()), + msg_ttl :: option(milliseconds()), + unused_1, + unused_2 + }). + +-type prefix_msgs() :: {list(), list()} | + {non_neg_integer(), list(), + non_neg_integer(), list()}. + +-record(rabbit_fifo, + {cfg :: #cfg{}, + % unassigned messages + messages = lqueue:new() :: lqueue:lqueue(msg()), + messages_total = 0 :: non_neg_integer(), + % queue of returned msg_in_ids - when checking out it picks from + returns = lqueue:new() :: lqueue:lqueue(term()), + % a counter of enqueues - used to trigger shadow copy points + % reset to 0 when release_cursor gets stored + enqueue_count = 0 :: non_neg_integer(), + % a map containing all the live processes that have ever enqueued + % a message to this queue + enqueuers = #{} :: #{pid() => #enqueuer{}}, + % index of all messages that have been delivered at least once + % used to work out the smallest live raft index + % rabbit_fifo_index can be slow when calculating the smallest + % index when there are large gaps but should be faster than gb_trees + % for normal appending operations as it's backed by a map + ra_indexes = rabbit_fifo_index:empty() :: rabbit_fifo_index:state(), + %% A release cursor is essentially a snapshot for a past raft index. + %% Working assumption: Messages are consumed in a FIFO-ish order because + %% the log is truncated only until the oldest message. + release_cursors = lqueue:new() :: lqueue:lqueue({release_cursor, + ra:index(), #rabbit_fifo{}}), + % consumers need to reflect consumer state at time of snapshot + consumers = #{} :: #{consumer_id() => consumer()}, + % consumers that require further service are queued here + service_queue = priority_queue:new() :: priority_queue:q(), + %% state for at-least-once dead-lettering + dlx = rabbit_fifo_dlx:init() :: rabbit_fifo_dlx:state(), + msg_bytes_enqueue = 0 :: non_neg_integer(), + msg_bytes_checkout = 0 :: non_neg_integer(), + %% waiting consumers, one is picked active consumer is cancelled or dies + %% used only when single active consumer is on + waiting_consumers = [] :: [{consumer_id(), consumer()}], + last_active :: option(non_neg_integer()), + msg_cache :: option({ra:index(), raw_msg()}), + unused_2 + }). + +-type config() :: #{name := atom(), + queue_resource := rabbit_types:r('queue'), + dead_letter_handler => dead_letter_handler(), + become_leader_handler => applied_mfa(), + release_cursor_interval => non_neg_integer(), + max_length => non_neg_integer(), + max_bytes => non_neg_integer(), + max_in_memory_length => non_neg_integer(), + max_in_memory_bytes => non_neg_integer(), + overflow_strategy => drop_head | reject_publish, + single_active_consumer_on => boolean(), + delivery_limit => non_neg_integer(), + expires => non_neg_integer(), + msg_ttl => non_neg_integer(), + created => non_neg_integer() + }. diff --git a/deps/rabbit/src/rabbit_file.erl b/deps/rabbit/src/rabbit_file.erl index 8115be6923df..1f7182611992 100644 --- a/deps/rabbit/src/rabbit_file.erl +++ b/deps/rabbit/src/rabbit_file.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2011-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_file). @@ -18,8 +18,6 @@ -export([filename_as_a_directory/1]). -export([filename_to_binary/1, binary_to_filename/1]). --import(file_handle_cache, [with_handle/1, with_handle/2]). - -define(TMP_EXT, ".tmp"). %%---------------------------------------------------------------------------- @@ -56,7 +54,7 @@ file_size(File) -> -spec ensure_dir((file:filename())) -> ok_or_error(). -ensure_dir(File) -> with_handle(fun () -> ensure_dir_internal(File) end). +ensure_dir(File) -> ensure_dir_internal(File). ensure_dir_internal("/") -> ok; @@ -81,16 +79,17 @@ wildcard(Pattern, Dir) -> -spec list_dir(file:filename()) -> rabbit_types:ok_or_error2([file:filename()], any()). -list_dir(Dir) -> with_handle(fun () -> prim_file:list_dir(Dir) end). +list_dir(Dir) -> prim_file:list_dir(Dir). read_file_info(File) -> - with_handle(fun () -> file:read_file_info(File, [raw]) end). + file:read_file_info(File, [raw]). -spec read_term_file (file:filename()) -> {'ok', [any()]} | rabbit_types:error(any()). read_term_file(File) -> try + %% @todo OTP-27+ has file:read_file(File, [raw]). F = fun() -> {ok, FInfo} = file:read_file_info(File, [raw]), {ok, Fd} = file:open(File, [read, raw, binary]), @@ -100,7 +99,7 @@ read_term_file(File) -> file:close(Fd) end end, - {ok, Data} = with_handle(F), + {ok, Data} = F(), {ok, Tokens, _} = erl_scan:string(binary_to_list(Data)), TokenGroups = group_tokens(Tokens), {ok, [begin @@ -166,22 +165,19 @@ with_synced_copy(Path, Modes, Fun) -> true -> {error, append_not_supported, Path}; false -> - with_handle( - fun () -> - Bak = Path ++ ?TMP_EXT, - case prim_file:open(Bak, Modes) of - {ok, Hdl} -> - try - Result = Fun(Hdl), - ok = prim_file:sync(Hdl), - ok = prim_file:rename(Bak, Path), - Result - after - prim_file:close(Hdl) - end; - {error, _} = E -> E - end - end) + Bak = Path ++ ?TMP_EXT, + case prim_file:open(Bak, Modes) of + {ok, Hdl} -> + try + Result = Fun(Hdl), + ok = prim_file:sync(Hdl), + ok = prim_file:rename(Bak, Path), + Result + after + prim_file:close(Hdl) + end; + {error, _} = E -> E + end end. %% TODO the semantics of this function are rather odd. But see bug 25021. @@ -198,16 +194,12 @@ append_file(File, Suffix) -> append_file(_, _, "") -> ok; append_file(File, 0, Suffix) -> - with_handle(fun () -> - case prim_file:open([File, Suffix], [append]) of - {ok, Fd} -> prim_file:close(Fd); - Error -> Error - end - end); + case prim_file:open([File, Suffix], [append]) of + {ok, Fd} -> prim_file:close(Fd); + Error -> Error + end; append_file(File, _, Suffix) -> - case with_handle(2, fun () -> - file:copy(File, {[File, Suffix], [append]}) - end) of + case file:copy(File, {[File, Suffix], [append]}) of {ok, _BytesCopied} -> ok; Error -> Error end. @@ -223,21 +215,19 @@ ensure_parent_dirs_exist(Filename) -> -spec rename(file:filename(), file:filename()) -> ok_or_error(). -rename(Old, New) -> with_handle(fun () -> prim_file:rename(Old, New) end). +rename(Old, New) -> prim_file:rename(Old, New). -spec delete([file:filename()]) -> ok_or_error(). -delete(File) -> with_handle(fun () -> prim_file:delete(File) end). +delete(File) -> prim_file:delete(File). -spec recursive_delete([file:filename()]) -> rabbit_types:ok_or_error({file:filename(), any()}). recursive_delete(Files) -> - with_handle( - fun () -> lists:foldl(fun (Path, ok) -> recursive_delete1(Path); - (_Path, {error, _Err} = Error) -> Error - end, ok, Files) - end). + lists:foldl(fun (Path, ok) -> recursive_delete1(Path); + (_Path, {error, _Err} = Error) -> Error + end, ok, Files). recursive_delete1(Path) -> case is_dir_no_handle(Path) and not(is_symlink_no_handle(Path)) of @@ -258,6 +248,7 @@ recursive_delete1(Path) -> ok -> case prim_file:del_dir(Path) of ok -> ok; + {error, ebusy} -> ok; %% Can't delete a mount point {error, Err} -> {error, {Path, Err}} end; {error, _Err} = Error -> @@ -314,10 +305,8 @@ recursive_copy(Src, Dest) -> lock_file(Path) -> case is_file(Path) of true -> {error, eexist}; - false -> with_handle( - fun () -> {ok, Lock} = prim_file:open(Path, [write]), - ok = prim_file:close(Lock) - end) + false -> {ok, Lock} = prim_file:open(Path, [write]), + ok = prim_file:close(Lock) end. -spec filename_as_a_directory(file:filename()) -> file:filename(). diff --git a/deps/rabbit/src/rabbit_global_counters.erl b/deps/rabbit/src/rabbit_global_counters.erl index 6dfca8f2d1ba..b5cdc5b627e1 100644 --- a/deps/rabbit/src/rabbit_global_counters.erl +++ b/deps/rabbit/src/rabbit_global_counters.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_global_counters). @@ -93,7 +93,6 @@ -define(MESSAGES_GET_EMPTY, 6). -define(MESSAGES_REDELIVERED, 7). -define(MESSAGES_ACKNOWLEDGED, 8). -%% Note: ?NUM_PROTOCOL_QUEUE_TYPE_COUNTERS needs to be up-to-date. See include/rabbit_global_counters.hrl -define(PROTOCOL_QUEUE_TYPE_COUNTERS, [ { @@ -131,13 +130,15 @@ ]). boot_step() -> - %% Protocol counters - init([{protocol, amqp091}]), - - %% Protocol & Queue Type counters - init([{protocol, amqp091}, {queue_type, rabbit_classic_queue}]), - init([{protocol, amqp091}, {queue_type, rabbit_quorum_queue}]), - init([{protocol, amqp091}, {queue_type, rabbit_stream_queue}]), + [begin + %% Protocol counters + init([{protocol, Proto}]), + + %% Protocol & Queue Type counters + init([{protocol, Proto}, {queue_type, rabbit_classic_queue}]), + init([{protocol, Proto}, {queue_type, rabbit_quorum_queue}]), + init([{protocol, Proto}, {queue_type, rabbit_stream_queue}]) + end || Proto <- [amqp091, amqp10]], %% Dead Letter counters %% diff --git a/deps/rabbit/src/rabbit_guid.erl b/deps/rabbit/src/rabbit_guid.erl index e7e8aed6633f..f637064fb8bf 100644 --- a/deps/rabbit/src/rabbit_guid.erl +++ b/deps/rabbit/src/rabbit_guid.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_guid). diff --git a/deps/rabbit/src/rabbit_health_check.erl b/deps/rabbit/src/rabbit_health_check.erl index 9039a5df0e22..32223e1a43f5 100644 --- a/deps/rabbit/src/rabbit_health_check.erl +++ b/deps/rabbit/src/rabbit_health_check.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_health_check). diff --git a/deps/rabbit/src/rabbit_khepri.erl b/deps/rabbit/src/rabbit_khepri.erl new file mode 100644 index 000000000000..98428f45a099 --- /dev/null +++ b/deps/rabbit/src/rabbit_khepri.erl @@ -0,0 +1,1796 @@ +%% This Source Code Form is subject to the terms of the Mozilla Public +%% License, v. 2.0. If a copy of the MPL was not distributed with this +%% file, You can obtain one at https://mozilla.org/MPL/2.0/. +%% +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. +%% + +%% @doc Khepri database uses wrapper. +%% +%% This module has three purposes: +%% +%%
      +%%
    1. It provides a wrapper API on top of the regular Khepri API. The main +%% goal of this wrapper is to make sure the correct store identifier is being +%% used.
    2. +%%
    3. It is responsible for managing the Khepri database and clustering.
    4. +%%
    5. It provides functions to help with the transition from Mnesia to +%% Khepri.
    6. +%%
    +%% +%% == Khepri API wrapper == +%% +%% Most Khepri regular functions are wrapped by this module, but not all of +%% them. The reason is that the missing functions were not used so far. Feel +%% free to add another wrapper when the need arises. +%% +%% See
    Khepri's documentation +%% to learn how to use its API. +%% +%% +%% == Transition from Mnesia to Khepri == +%% +%% Until Mnesia code is removed, RabbitMQ should support both databases and +%% allow to migrate data from Mnesia to Khepri at runtime. The `khepri_db' +%% feature flag, its associated callback functions and the +%% `khepri_mnesia_migration' application take care of the one-time migration. +%% +%% To make database reads and writes work before, during and after the +%% migration, one can use the following functions: +%%
      +%%
    • {@link is_enabled/0}, {@link is_enabled/1}
    • +%%
    • {@link get_feature_state/0}, {@link get_feature_state/1}
    • +%%
    • {@link handle_fallback/1}
    • +%%
    +%% +%% {@link is_enabled/0} and {@link is_enabled/1} query the state of the +%% `khepri_db' feature flag state and return `true' if Khepri is the active +%% database or `false' if Mnesia is the active one. Furthermore, it will block +%% during the migration. +%% +%% {@link get_feature_state/0} and {@link get_feature_state/1} query the same +%% feature flag state. However, they do not block during the migration and +%% return `enabled' if Khepri is active, `disabled' if Mnesia is active, or +%% `state_changing' if RabbitMQ is between these two states. +%% +%% Finally {@link handle_fallback/1}, is a helper that takes two anonymous +%% functions: one for Mnesia and one for Khepri. If Khepri is already enabled, +%% its associated anonymous function is executed. Otherwise, the Mnesia one is +%% executed. If the migration runs concurrently, whether it started before or +%% during the execution of the Mnesia-specific anonymous function, {@link +%% handle_fallback/1} will watch for "no exists" table exceptions from Mnesia +%% and will retry the Mnesia functino or run the Khepri function accordingly. +%% The Mnesia function must be idempotent because it can be executed multiple +%% times. +%% +%% Which function to use then? +%% +%% If you want to read from or write to one or more Mnesia tables or the +%% Khepri store, you should use {@link handle_fallback/1}: +%%
    +%% rabbit_khepri:handle_fallback(
    +%%   #{mnesia => fun() -> do_something_with_mnesia_tables() end,
    +%%     khepri => fun() -> do_something_with_khepri_store() end).
    +%% 
    +%% +%% However, if you call into Mnesia but that doesn't involve reading or +%% writing to tables (e.g. querying the cluster status), you need to use +%% {@link is_enabled/0} or {@link get_feature_state/0}, depending on whether +%% you want to block or not. Most of the time, you want the call to block to +%% not have to deal with the intermediate state. For example: +%%
    +%% case rabbit_khepri:is_enabled() of
    +%%     true  -> do_something_with_khepri();
    +%%     false -> do_something_with_mnesia()
    +%% end.
    +%% 
    + +-module(rabbit_khepri). + +-include_lib("kernel/include/logger.hrl"). +-include_lib("stdlib/include/assert.hrl"). + +-include_lib("khepri/include/khepri.hrl"). +-include_lib("rabbit_common/include/logging.hrl"). +-include_lib("rabbit_common/include/rabbit.hrl"). + +-export([setup/0, + setup/1, + init/0, + can_join_cluster/1, + add_member/2, + remove_member/1, + members/0, + locally_known_members/0, + nodes/0, + locally_known_nodes/0, + get_ra_cluster_name/0, + get_store_id/0, + transfer_leadership/1, + + is_empty/0, + create/2, + adv_create/2, + update/2, + cas/3, + fold/3, fold/4, + foreach/2, + filter/2, + + get/1, + get/2, + get_many/1, + adv_get/1, + adv_get_many/1, + match/1, + match/2, + exists/1, + list/1, + list_child_nodes/1, + count_children/1, + + put/2, put/3, + adv_put/2, + clear_payload/1, + delete/1, delete/2, + delete_or_fail/1, + adv_delete_many/1, + + transaction/1, + transaction/2, + transaction/3, + + clear_store/0, + + dir/0, + info/0, + + handle_async_ret/1, + + status/0]). +%% Used during migration to join the standalone Khepri nodes and form the +%% equivalent cluster +-export([khepri_db_migration_enable/1, + khepri_db_migration_post_enable/1, + is_enabled/0, is_enabled/1, + get_feature_state/0, get_feature_state/1, + handle_fallback/1]). +-export([do_join/1]). +%% To add the current node to an existing cluster +-export([leave_cluster/1]). +-export([check_cluster_consistency/0, + check_cluster_consistency/2, + node_info/0]). +-export([reset/0, + force_reset/0]). +-export([cluster_status_from_khepri/0, + cli_cluster_status/0]). + +%% Path functions +-export([if_has_data/1, + if_has_data_wildcard/0]). + +-export([force_shrink_member_to_current_member/0]). + +%% Helpers for working with the Khepri API / types. +-export([collect_payloads/1, + collect_payloads/2]). + +-ifdef(TEST). +-export([force_metadata_store/1, + clear_forced_metadata_store/0]). +-endif. + +-type timeout_error() :: khepri:error(timeout). +%% Commands like 'put'/'delete' etc. might time out in Khepri. It might take +%% the leader longer to apply the command and reply to the caller than the +%% configured timeout. This error is easy to reproduce - a cluster which is +%% only running a minority of nodes will consistently return `{error, timeout}` +%% for commands until the cluster majority can be re-established. Commands +%% returning `{error, timeout}` are a likely (but not certain) indicator that +%% the node which submitted the command is running in a minority. + +-export_type([timeout_error/0]). + +-compile({no_auto_import, [get/1, get/2, nodes/0]}). + +-define(RA_SYSTEM, coordination). +-define(RA_CLUSTER_NAME, rabbitmq_metadata). +-define(RA_FRIENDLY_NAME, "RabbitMQ metadata store"). +-define(STORE_ID, ?RA_CLUSTER_NAME). +-define(MIGRATION_ID, <<"rabbitmq_metadata">>). + +%% By default we should try to reply from the cluster member that makes a +%% request to change the store. Projections are immediately consistent on the +%% node that issues the reply effect and eventually consistent everywhere else. +%% There isn't a performance penalty for replying from the local node and if +%% the local node isn't a part of the cluster, the reply will come from the +%% leader instead. +-define(DEFAULT_COMMAND_OPTIONS, #{reply_from => local}). + +%% Mnesia tables to migrate and cleanup. +%% +%% This table order is important. For instance, user permissions depend on +%% both vhosts and users to exist in the metadata store. +%% +%% Channel and connection tracking are core features with difference: tables +%% cannot be predeclared as they include the node name + +-rabbit_mnesia_tables_to_khepri_db( + [ + {rabbit_vhost, rabbit_db_vhost_m2k_converter}, + {rabbit_user, rabbit_db_user_m2k_converter}, + {rabbit_user_permission, rabbit_db_user_m2k_converter}, + {rabbit_topic_permission, rabbit_db_user_m2k_converter}, + {rabbit_runtime_parameters, rabbit_db_rtparams_m2k_converter}, + {rabbit_queue, rabbit_db_queue_m2k_converter}, + {rabbit_exchange, rabbit_db_exchange_m2k_converter}, + {rabbit_exchange_serial, rabbit_db_exchange_m2k_converter}, + {rabbit_route, rabbit_db_binding_m2k_converter}, + {rabbit_node_maintenance_states, rabbit_db_maintenance_m2k_converter}, + {mirrored_sup_childspec, rabbit_db_msup_m2k_converter}, + + rabbit_durable_queue, + rabbit_durable_exchange, + rabbit_durable_route, + rabbit_semi_durable_route, + rabbit_reverse_route, + rabbit_index_route + ]). + +%% ------------------------------------------------------------------- +%% API wrapping Khepri. +%% ------------------------------------------------------------------- + +-spec setup() -> ok | no_return(). +%% @private + +setup() -> + setup(rabbit_prelaunch:get_context()). + +-spec setup(map()) -> ok | no_return(). +%% @private + +setup(_) -> + ?LOG_DEBUG("Starting Khepri-based " ?RA_FRIENDLY_NAME), + ok = ensure_ra_system_started(), + Timeout = application:get_env(rabbit, khepri_default_timeout, 30000), + ok = application:set_env( + [{khepri, [{default_timeout, Timeout}, + {default_store_id, ?STORE_ID}]}], + [{persistent, true}]), + RaServerConfig = #{cluster_name => ?RA_CLUSTER_NAME, + friendly_name => ?RA_FRIENDLY_NAME}, + case khepri:start(?RA_SYSTEM, RaServerConfig) of + {ok, ?STORE_ID} -> + wait_for_leader(), + wait_for_register_projections(), + ?LOG_DEBUG( + "Khepri-based " ?RA_FRIENDLY_NAME " ready", + #{domain => ?RMQLOG_DOMAIN_GLOBAL}), + ok; + {error, _} = Error -> + exit(Error) + end. + +wait_for_leader() -> + wait_for_leader(retry_timeout(), retry_limit()). + +retry_timeout() -> + case application:get_env(rabbit, khepri_leader_wait_retry_timeout) of + {ok, T} -> T; + undefined -> 30000 + end. + +retry_limit() -> + case application:get_env(rabbit, khepri_leader_wait_retry_limit) of + {ok, T} -> T; + undefined -> 10 + end. + +wait_for_leader(_Timeout, 0) -> + exit(timeout_waiting_for_leader); +wait_for_leader(Timeout, Retries) -> + rabbit_log:info("Waiting for Khepri leader for ~tp ms, ~tp retries left", + [Timeout, Retries - 1]), + Options = #{timeout => Timeout, + favor => low_latency}, + case khepri:exists(?STORE_ID, [], Options) of + Exists when is_boolean(Exists) -> + rabbit_log:info("Khepri leader elected"), + ok; + {error, timeout} -> %% Khepri >= 0.14.0 + wait_for_leader(Timeout, Retries -1); + {error, {timeout, _ServerId}} -> %% Khepri < 0.14.0 + wait_for_leader(Timeout, Retries -1); + {error, Reason} -> + throw(Reason) + end. + +wait_for_register_projections() -> + wait_for_register_projections(retry_timeout(), retry_limit()). + +wait_for_register_projections(_Timeout, 0) -> + exit(timeout_waiting_for_khepri_projections); +wait_for_register_projections(Timeout, Retries) -> + rabbit_log:info("Waiting for Khepri projections for ~tp ms, ~tp retries left", + [Timeout, Retries - 1]), + try + register_projections() + catch + throw : timeout -> + wait_for_register_projections(Timeout, Retries -1) + end. + +%% @private + +-spec init() -> Ret when + Ret :: ok | timeout_error(). + +init() -> + case members() of + [] -> + timer:sleep(1000), + init(); + Members -> + ?LOG_NOTICE( + "Found the following metadata store members: ~p", [Members], + #{domain => ?RMQLOG_DOMAIN_DB}), + %% Delete transient queues on init. + %% Note that we also do this in the + %% `rabbit_amqqueue:on_node_down/1' callback. We must try this + %% deletion during init because the cluster may have been in a + %% minority when this node went down. We wait for a majority while + %% booting (via `rabbit_khepri:setup/0') though so this deletion is + %% likely to succeed. + rabbit_amqqueue:delete_transient_queues_on_node(node()) + end. + +%% @private + +can_join_cluster(DiscoveryNode) when is_atom(DiscoveryNode) -> + ThisNode = node(), + try + ClusterNodes0 = erpc:call( + DiscoveryNode, + rabbit_khepri, locally_known_nodes, []), + ClusterNodes1 = ClusterNodes0 -- [ThisNode], + {ok, ClusterNodes1} + catch + _:Reason -> + {error, Reason} + end. + +%% @private + +add_member(JoiningNode, JoinedNode) + when JoiningNode =:= node() andalso is_atom(JoinedNode) -> + Ret = do_join(JoinedNode), + post_add_member(JoiningNode, JoinedNode, Ret); +add_member(JoiningNode, JoinedNode) when is_atom(JoinedNode) -> + Ret = rabbit_misc:rpc_call( + JoiningNode, rabbit_khepri, do_join, [JoinedNode]), + post_add_member(JoiningNode, JoinedNode, Ret); +add_member(JoiningNode, [_ | _] = Cluster) -> + JoinedNode = pick_node_in_cluster(Cluster), + ?LOG_INFO( + "Khepri clustering: Attempt to add node ~p to cluster ~0p " + "through node ~p", + [JoiningNode, Cluster, JoinedNode], + #{domain => ?RMQLOG_DOMAIN_GLOBAL}), + %% Recurse with a single node taken in the `Cluster' list. + add_member(JoiningNode, JoinedNode). + +pick_node_in_cluster([_ | _] = Cluster) when is_list(Cluster) -> + ThisNode = node(), + case lists:member(ThisNode, Cluster) of + true -> ThisNode; + false -> hd(Cluster) + end. + +do_join(RemoteNode) when RemoteNode =/= node() -> + ThisNode = node(), + + ?LOG_DEBUG( + "Khepri clustering: Trying to add this node (~p) to cluster \"~s\" " + "through node ~p", + [ThisNode, ?RA_CLUSTER_NAME, RemoteNode], + #{domain => ?RMQLOG_DOMAIN_GLOBAL}), + + %% Ensure the local Khepri store is running before we can reset it. It + %% could be stopped if RabbitMQ is not running for instance. + ok = setup(), + khepri:info(?RA_CLUSTER_NAME), + + %% Ensure the remote node is reachable before we add it. + case net_adm:ping(RemoteNode) of + pong -> + %% We verify the cluster membership before adding `ThisNode' to + %% `RemoteNode''s cluster. We do it mostly to keep the same + %% behavior as what we do with Mnesia. Otherwise, the interest is + %% limited given the check and the actual join are not atomic. + + ?LOG_DEBUG( + "Adding this node (~p) to Khepri cluster \"~s\" through " + "node ~p", + [ThisNode, ?RA_CLUSTER_NAME, RemoteNode], + #{domain => ?RMQLOG_DOMAIN_GLOBAL}), + + %% If the remote node to add is running RabbitMQ, we need to put it + %% in maintenance mode at least. We remember that state to revive + %% the node only if it was fully running before this code. + IsRunning = rabbit:is_running(ThisNode), + AlreadyBeingDrained = + rabbit_maintenance:is_being_drained_consistent_read(ThisNode), + NeedToRevive = IsRunning andalso not AlreadyBeingDrained, + maybe_drain_node(IsRunning), + + %% Joining a cluster includes a reset of the local Khepri store. + Ret = khepri_cluster:join(?RA_CLUSTER_NAME, RemoteNode), + + %% Revive the remote node if it was running and not under + %% maintenance before we changed the cluster membership. + maybe_revive_node(NeedToRevive), + + Ret; + pang -> + {error, {node_unreachable, RemoteNode}} + end. + +maybe_drain_node(true) -> + ok = rabbit_maintenance:drain(); +maybe_drain_node(false) -> + ok. + +maybe_revive_node(true) -> + ok = rabbit_maintenance:revive(); +maybe_revive_node(false) -> + ok. + +post_add_member(JoiningNode, JoinedNode, ok) -> + ?LOG_INFO( + "Khepri clustering: Node ~p successfully added to cluster \"~s\" " + "through node ~p", + [JoiningNode, ?RA_CLUSTER_NAME, JoinedNode], + #{domain => ?RMQLOG_DOMAIN_GLOBAL}), + ok; +post_add_member(JoiningNode, JoinedNode, Error) -> + ?LOG_INFO( + "Khepri clustering: Failed to add node ~p to cluster \"~s\" " + "through ~p: ~p", + [JoiningNode, ?RA_CLUSTER_NAME, JoinedNode, Error], + #{domain => ?RMQLOG_DOMAIN_GLOBAL}), + Error. + +%% @private + +leave_cluster(Node) -> + retry_khepri_op(fun() -> remove_member(Node) end, 60). + +%% @private + +remove_member(NodeToRemove) when NodeToRemove =/= node() -> + ?LOG_DEBUG( + "Trying to remove node ~s from Khepri cluster \"~s\" on node ~s", + [NodeToRemove, ?RA_CLUSTER_NAME, node()], + #{domain => ?RMQLOG_DOMAIN_GLOBAL}), + + %% Check if the node is part of the cluster. We query the local Ra server + %% only, in case the cluster can't elect a leader right now. + CurrentNodes = locally_known_nodes(), + case lists:member(NodeToRemove, CurrentNodes) of + true -> + %% Ensure the remote node is reachable before we remove it. + case net_adm:ping(NodeToRemove) of + pong -> + remove_reachable_member(NodeToRemove); + pang -> + remove_down_member(NodeToRemove) + end; + false -> + ?LOG_INFO( + "Asked to remove node ~s from Khepri cluster \"~s\" but not " + "member of it: ~p", + [NodeToRemove, ?RA_CLUSTER_NAME, lists:sort(CurrentNodes)], + #{domain => ?RMQLOG_DOMAIN_GLOBAL}), + rabbit_mnesia:e(not_a_cluster_node) + end. + +remove_reachable_member(NodeToRemove) -> + ?LOG_DEBUG( + "Removing remote node ~s from Khepri cluster \"~s\"", + [NodeToRemove, ?RA_CLUSTER_NAME], + #{domain => ?RMQLOG_DOMAIN_GLOBAL}), + + %% We need the Khepri store to run on the node to remove, to be + %% able to reset it. + ok = rabbit_misc:rpc_call( + NodeToRemove, ?MODULE, setup, []), + + Ret = rabbit_misc:rpc_call( + NodeToRemove, khepri_cluster, reset, [?RA_CLUSTER_NAME]), + case Ret of + ok -> + ?LOG_DEBUG( + "Node ~s removed from Khepri cluster \"~s\"", + [NodeToRemove, ?RA_CLUSTER_NAME], + #{domain => ?RMQLOG_DOMAIN_GLOBAL}), + ok; + Error -> + ?LOG_ERROR( + "Failed to remove remote node ~s from Khepri " + "cluster \"~s\": ~p", + [NodeToRemove, ?RA_CLUSTER_NAME, Error], + #{domain => ?RMQLOG_DOMAIN_GLOBAL}), + Error + end. + +remove_down_member(NodeToRemove) -> + ServerRef = khepri_cluster:node_to_member(?STORE_ID, node()), + ServerId = khepri_cluster:node_to_member(?STORE_ID, NodeToRemove), + Timeout = khepri_app:get_default_timeout(), + Ret = ra:remove_member(ServerRef, ServerId, Timeout), + case Ret of + {ok, _, _} -> + ?LOG_DEBUG( + "Node ~s removed from Khepri cluster \"~s\"", + [NodeToRemove, ?RA_CLUSTER_NAME], + #{domain => ?RMQLOG_DOMAIN_GLOBAL}), + ok; + {error, Reason} = Error -> + ?LOG_ERROR( + "Failed to remove remote down node ~s from Khepri " + "cluster \"~s\": ~p", + [NodeToRemove, ?RA_CLUSTER_NAME, Reason], + #{domain => ?RMQLOG_DOMAIN_GLOBAL}), + Error; + {timeout, _LeaderId} -> + ?LOG_ERROR( + "Failed to remove remote down node ~s from Khepri " + "cluster \"~s\" due to timeout", + [NodeToRemove, ?RA_CLUSTER_NAME], + #{domain => ?RMQLOG_DOMAIN_GLOBAL}), + {error, timeout} + end. + +%% @private + +reset() -> + %% Rabbit should be stopped, but Khepri needs to be running. Restart it. + ok = setup(), + ok = khepri_cluster:reset(?RA_CLUSTER_NAME), + ok = khepri:stop(?RA_CLUSTER_NAME). + +%% @private + +force_reset() -> + DataDir = maps:get(data_dir, ra_system:fetch(coordination)), + ok = rabbit_file:recursive_delete(filelib:wildcard(DataDir ++ "/*")). + +%% @private + +force_shrink_member_to_current_member() -> + ok = ra_server_proc:force_shrink_members_to_current_member( + {?RA_CLUSTER_NAME, node()}). + +ensure_ra_system_started() -> + {ok, _} = application:ensure_all_started(khepri), + ok = rabbit_ra_systems:ensure_ra_system_started(?RA_SYSTEM). + +-spec members() -> Members when + Members :: [ra:server_id()]. +%% @doc Returns the list of Ra server identifiers that are part of the +%% cluster. +%% +%% The membership is as it is known to the Ra leader in the cluster. +%% +%% The returned list is empty if there was an error. + +members() -> + case khepri_cluster:members(?RA_CLUSTER_NAME) of + {ok, Members} -> Members; + {error, _Reason} -> [] + end. + +-spec locally_known_members() -> Members when + Members :: [ra:server_id()]. +%% @doc Returns the list of Ra server identifiers that are part of the +%% cluster. +%% +%% The membership is as it is known to the local Ra server and may be +%% inconsistent compared to the "official" membership as seen by the Ra +%% leader. +%% +%% The returned list is empty if there was an error. + +locally_known_members() -> + case khepri_cluster:locally_known_members(?RA_CLUSTER_NAME) of + {ok, Members} -> Members; + {error, _Reason} -> [] + end. + +-spec nodes() -> Nodes when + Nodes :: [node()]. +%% @doc Returns the list of Erlang nodes that are part of the cluster. +%% +%% The membership is as it is known to the Ra leader in the cluster. +%% +%% The returned list is empty if there was an error. + +nodes() -> + case khepri_cluster:nodes(?RA_CLUSTER_NAME) of + {ok, Nodes} -> Nodes; + {error, _Reason} -> [] + end. + +-spec locally_known_nodes() -> Nodes when + Nodes :: [node()]. +%% @doc Returns the list of Erlang node that are part of the cluster. +%% +%% The membership is as it is known to the local Ra server and may be +%% inconsistent compared to the "official" membership as seen by the Ra +%% leader. +%% +%% The returned list is empty if there was an error. + +locally_known_nodes() -> + case khepri_cluster:locally_known_nodes(?RA_CLUSTER_NAME) of + {ok, Nodes} -> Nodes; + {error, _Reason} -> [] + end. + +-spec get_ra_cluster_name() -> RaClusterName when + RaClusterName :: ra:cluster_name(). +%% @doc Returns the Ra cluster name. + +get_ra_cluster_name() -> + ?RA_CLUSTER_NAME. + +-spec get_store_id() -> StoreId when + StoreId :: khepri:store_id(). +%% @doc Returns the Khepri store identifier. + +get_store_id() -> + ?STORE_ID. + +-spec dir() -> Dir when + Dir :: file:filename_all(). +%% @doc Returns the Khepri store directory. +%% +%% This corresponds to the underlying Ra system's directory. + +dir() -> + filename:join(rabbit_mnesia:dir(), atom_to_list(?STORE_ID)). + +-spec transfer_leadership([node()]) -> + {ok, in_progress | undefined | node()} | {error, any()}. +%% @private + +transfer_leadership([]) -> + rabbit_log:warning("Skipping leadership transfer of metadata store: no candidate " + "(online, not under maintenance) nodes to transfer to!"); +transfer_leadership(TransferCandidates) -> + case get_feature_state() of + enabled -> + transfer_leadership0(TransferCandidates); + _ -> + rabbit_log:info("Skipping leadership transfer of metadata store: Khepri is not enabled") + end. + +-spec transfer_leadership0([node()]) -> + {ok, in_progress | undefined | node()} | {error, any()}. +transfer_leadership0([]) -> + rabbit_log:warning("Khepri clustering: failed to transfer leadership, no more candidates available", []), + {error, not_migrated}; +transfer_leadership0([Destination | TransferCandidates]) -> + rabbit_log:info("Khepri clustering: transferring leadership to node ~p", [Destination]), + case ra_leaderboard:lookup_leader(?STORE_ID) of + {Name, Node} = Id when Node == node() -> + Timeout = khepri_app:get_default_timeout(), + case ra:transfer_leadership(Id, {Name, Destination}) of + ok -> + case ra:members(Id, Timeout) of + {_, _, {_, NewNode}} -> + rabbit_log:info("Khepri clustering: successfully transferred leadership to node ~p", [Destination]), + {ok, NewNode}; + {timeout, _} -> + rabbit_log:warning("Khepri clustering: maybe failed to transfer leadership to node ~p, members query has timed out", [Destination]), + {error, not_migrated} + end; + already_leader -> + rabbit_log:info("Khepri clustering: successfully transferred leadership to node ~p, already the leader", [Destination]), + {ok, Destination}; + {error, Reason} -> + rabbit_log:warning("Khepri clustering: failed to transfer leadership to node ~p with the following error ~p", [Destination, Reason]), + transfer_leadership0(TransferCandidates); + {timeout, _} -> + rabbit_log:warning("Khepri clustering: failed to transfer leadership to node ~p with a timeout", [Destination]), + transfer_leadership0(TransferCandidates) + end; + {_, Node} -> + rabbit_log:info("Khepri clustering: skipping leadership transfer, leader is already in node ~p", [Node]), + {ok, Node}; + undefined -> + rabbit_log:info("Khepri clustering: skipping leadership transfer, leader not elected", []), + {ok, undefined} + end. + +%% @private + +status() -> + Nodes = rabbit_nodes:all_running(), + [try + Metrics = get_ra_key_metrics(N), + #{state := RaftState, + membership := Membership, + commit_index := Commit, + term := Term, + last_index := Last, + last_applied := LastApplied, + last_written_index := LastWritten, + snapshot_index := SnapIdx, + machine_version := MacVer} = Metrics, + [{<<"Node Name">>, N}, + {<<"Raft State">>, RaftState}, + {<<"Membership">>, Membership}, + {<<"Last Log Index">>, Last}, + {<<"Last Written">>, LastWritten}, + {<<"Last Applied">>, LastApplied}, + {<<"Commit Index">>, Commit}, + {<<"Snapshot Index">>, SnapIdx}, + {<<"Term">>, Term}, + {<<"Machine Version">>, MacVer} + ] + catch + _:Error -> + [{<<"Node Name">>, N}, + {<<"Raft State">>, Error}, + {<<"Membership">>, <<>>}, + {<<"Last Log Index">>, <<>>}, + {<<"Last Written">>, <<>>}, + {<<"Last Applied">>, <<>>}, + {<<"Commit Index">>, <<>>}, + {<<"Snapshot Index">>, <<>>}, + {<<"Term">>, <<>>}, + {<<"Machine Version">>, <<>>} + ] + end || N <- Nodes]. + +%% @private + +get_ra_key_metrics(Node) -> + ServerId = {?RA_CLUSTER_NAME, Node}, + Metrics0 = ra:key_metrics(ServerId), + MacVer = try + erpc:call(Node, khepri_machine, version, []) + catch + _:{exception, undef, [{khepri_machine, version, _, _} | _]} -> + 0 + end, + Metrics1 = Metrics0#{machine_version => MacVer}, + Metrics1. + +%% @private + +cli_cluster_status() -> + case rabbit:is_running() of + true -> + Nodes = locally_known_nodes(), + [{nodes, [{disc, Nodes}]}, + {running_nodes, [N || N <- Nodes, rabbit_nodes:is_running(N)]}, + {cluster_name, rabbit_nodes:cluster_name()}]; + false -> + [] + end. + +%% @private + +check_cluster_consistency() -> + %% We want to find 0 or 1 consistent nodes. + ReachableNodes = rabbit_nodes:list_reachable(), + case lists:foldl( + fun (Node, {error, _}) -> check_cluster_consistency(Node, true); + (_Node, {ok, Status}) -> {ok, Status} + end, {error, not_found}, nodes_excl_me(ReachableNodes)) + of + {ok, {RemoteAllNodes, _Running}} -> + case ordsets:is_subset(ordsets:from_list(ReachableNodes), + ordsets:from_list(RemoteAllNodes)) of + true -> + ok; + false -> + %% We delete the schema here since we think we are + %% clustered with nodes that are no longer in the + %% cluster and there is no other way to remove + %% them from our schema. On the other hand, we are + %% sure that there is another online node that we + %% can use to sync the tables with. There is a + %% race here: if between this check and the + %% `init_db' invocation the cluster gets + %% disbanded, we're left with a node with no + %% mnesia data that will try to connect to offline + %% nodes. + %% TODO delete schema in khepri ??? + ok + end; + {error, not_found} -> + ok; + {error, _} = E -> + E + end. + +nodes_excl_me(Nodes) -> Nodes -- [node()]. + +%% @private + +check_cluster_consistency(Node, CheckNodesConsistency) -> + case (catch remote_node_info(Node)) of + {badrpc, _Reason} -> + {error, not_found}; + {'EXIT', {badarg, _Reason}} -> + {error, not_found}; + {_OTP, _Rabbit, {error, _Reason}} -> + {error, not_found}; + {_OTP, _Rabbit, {ok, Status}} when CheckNodesConsistency -> + case rabbit_db_cluster:check_compatibility(Node) of + ok -> + case check_nodes_consistency(Node, Status) of + ok -> {ok, Status}; + Error -> Error + end; + Error -> + Error + end; + {_OTP, _Rabbit, {ok, Status}} -> + case rabbit_db_cluster:check_compatibility(Node) of + ok -> {ok, Status}; + Error -> Error + end + end. + +remote_node_info(Node) -> + rpc:call(Node, ?MODULE, node_info, []). + +check_nodes_consistency(Node, {RemoteAllNodes, _RemoteRunningNodes}) -> + case me_in_nodes(RemoteAllNodes) of + true -> + ok; + false -> + {error, {inconsistent_cluster, + format_inconsistent_cluster_message(node(), Node)}} + end. + +format_inconsistent_cluster_message(Thinker, Dissident) -> + rabbit_misc:format("Khepri: node ~tp thinks it's clustered " + "with node ~tp, but ~tp disagrees", + [Thinker, Dissident, Dissident]). + +me_in_nodes(Nodes) -> lists:member(node(), Nodes). + +%% @private + +node_info() -> + {rabbit_misc:otp_release(), + rabbit_misc:version(), + cluster_status_from_khepri()}. + +%% @private + +cluster_status_from_khepri() -> + try + _ = get_ra_key_metrics(node()), + All = locally_known_nodes(), + Running = lists:filter( + fun(N) -> + rabbit_nodes:is_running(N) + end, All), + {ok, {All, Running}} + catch + _:_ -> + {error, khepri_not_running} + end. + +%% ------------------------------------------------------------------- +%% "Proxy" functions to Khepri API. +%% ------------------------------------------------------------------- + +%% They just add the store ID to every calls. +%% +%% The only exceptions are get() and match() which both call khepri:get() +%% behind the scene with different options. +%% +%% They are some additional functions too, because they are useful in +%% RabbitMQ. They might be moved to Khepri in the future. + +is_empty() -> khepri:is_empty(?STORE_ID). + +create(Path, Data) -> + khepri:create(?STORE_ID, Path, Data, ?DEFAULT_COMMAND_OPTIONS). +adv_create(Path, Data) -> adv_create(Path, Data, #{}). +adv_create(Path, Data, Options0) -> + Options = maps:merge(?DEFAULT_COMMAND_OPTIONS, Options0), + khepri_adv:create(?STORE_ID, Path, Data, Options). +update(Path, Data) -> + khepri:update(?STORE_ID, Path, Data, ?DEFAULT_COMMAND_OPTIONS). +cas(Path, Pattern, Data) -> + khepri:compare_and_swap( + ?STORE_ID, Path, Pattern, Data, ?DEFAULT_COMMAND_OPTIONS). + +fold(Path, Pred, Acc) -> + khepri:fold(?STORE_ID, Path, Pred, Acc, #{favor => low_latency}). + +fold(Path, Pred, Acc, Options) -> + Options1 = Options#{favor => low_latency}, + khepri:fold(?STORE_ID, Path, Pred, Acc, Options1). + +foreach(Path, Pred) -> + khepri:foreach(?STORE_ID, Path, Pred, #{favor => low_latency}). + +filter(Path, Pred) -> + khepri:filter(?STORE_ID, Path, Pred, #{favor => low_latency}). + +get(Path) -> + khepri:get(?STORE_ID, Path, #{favor => low_latency}). + +get(Path, Options) -> + Options1 = Options#{favor => low_latency}, + khepri:get(?STORE_ID, Path, Options1). + +get_many(PathPattern) -> + khepri:get_many(?STORE_ID, PathPattern, #{favor => low_latency}). + +adv_get(Path) -> + khepri_adv:get(?STORE_ID, Path, #{favor => low_latency}). + +adv_get_many(PathPattern) -> + khepri_adv:get_many(?STORE_ID, PathPattern, #{favor => low_latency}). + +match(Path) -> + match(Path, #{}). + +match(Path, Options) -> + Options1 = Options#{favor => low_latency}, + khepri:get_many(?STORE_ID, Path, Options1). + +exists(Path) -> khepri:exists(?STORE_ID, Path, #{favor => low_latency}). + +list(Path) -> + khepri:get_many( + ?STORE_ID, Path ++ [?KHEPRI_WILDCARD_STAR], #{favor => low_latency}). + +list_child_nodes(Path) -> + Options = #{props_to_return => [child_names], + favor => low_latency}, + case khepri_adv:get_many(?STORE_ID, Path, Options) of + {ok, Result} -> + case maps:values(Result) of + [#{child_names := ChildNames}] -> + {ok, ChildNames}; + [] -> + [] + end; + Error -> + Error + end. + +count_children(Path) -> + Options = #{props_to_return => [child_list_length], + favor => low_latency}, + case khepri_adv:get_many(?STORE_ID, Path, Options) of + {ok, Map} -> + lists:sum([L || #{child_list_length := L} <- maps:values(Map)]); + _ -> + 0 + end. + +clear_payload(Path) -> + khepri:clear_payload(?STORE_ID, Path, ?DEFAULT_COMMAND_OPTIONS). + +delete(Path) -> + khepri:delete_many(?STORE_ID, Path, ?DEFAULT_COMMAND_OPTIONS). + +delete(Path, Options0) -> + Options = maps:merge(?DEFAULT_COMMAND_OPTIONS, Options0), + khepri:delete_many(?STORE_ID, Path, Options). + +delete_or_fail(Path) -> + case khepri_adv:delete(?STORE_ID, Path, ?DEFAULT_COMMAND_OPTIONS) of + {ok, Result} -> + case maps:size(Result) of + 0 -> {error, {node_not_found, #{}}}; + _ -> ok + end; + Error -> + Error + end. + +adv_delete_many(Path) -> + khepri_adv:delete_many(?STORE_ID, Path, ?DEFAULT_COMMAND_OPTIONS). + +put(PathPattern, Data) -> + khepri:put( + ?STORE_ID, PathPattern, Data, ?DEFAULT_COMMAND_OPTIONS). + +put(PathPattern, Data, Options0) -> + Options = maps:merge(?DEFAULT_COMMAND_OPTIONS, Options0), + khepri:put( + ?STORE_ID, PathPattern, Data, Options). + +adv_put(PathPattern, Data) -> + khepri_adv:put( + ?STORE_ID, PathPattern, Data, ?DEFAULT_COMMAND_OPTIONS). + +transaction(Fun) -> + transaction(Fun, auto, #{}). + +transaction(Fun, ReadWrite) -> + transaction(Fun, ReadWrite, #{}). + +transaction(Fun, ReadWrite, Options0) -> + %% If the transaction is read-only, use the same default options we use + %% for most queries. + DefaultQueryOptions = case ReadWrite of + ro -> + #{favor => low_latency}; + _ -> + #{} + end, + Options1 = maps:merge(DefaultQueryOptions, Options0), + Options = maps:merge(?DEFAULT_COMMAND_OPTIONS, Options1), + case khepri:transaction(?STORE_ID, Fun, ReadWrite, Options) of + ok -> ok; + {ok, Result} -> Result; + {error, Reason} -> throw({error, Reason}) + end. + +clear_store() -> + khepri:delete_many(?STORE_ID, "*", ?DEFAULT_COMMAND_OPTIONS). + +info() -> + ok = setup(), + khepri:info(?STORE_ID). + +handle_async_ret(RaEvent) -> + khepri:handle_async_ret(?STORE_ID, RaEvent). + +%% ------------------------------------------------------------------- +%% collect_payloads(). +%% ------------------------------------------------------------------- + +-spec collect_payloads(Props) -> Ret when + Props :: khepri:node_props(), + Ret :: [Payload], + Payload :: term(). + +%% @doc Collects all payloads from a node props map. +%% +%% This is the same as calling `collect_payloads(Props, [])'. +%% +%% @private + +collect_payloads(Props) when is_map(Props) -> + collect_payloads(Props, []). + +-spec collect_payloads(Props, Acc0) -> Ret when + Props :: khepri:node_props(), + Acc0 :: [Payload], + Ret :: [Payload], + Payload :: term(). + +%% @doc Collects all payloads from a node props map into the accumulator list. +%% +%% This is meant to be used with the `khepri_adv' API to easily collect the +%% payloads from the return value of `khepri_adv:delete_many/4' for example. +%% +%% @returns all payloads in the node props map collected into a list, with +%% `Acc0' as the tail. +%% +%% @private + +collect_payloads(Props, Acc0) when is_map(Props) andalso is_list(Acc0) -> + maps:fold( + fun (_Path, #{data := Payload}, Acc) -> + [Payload | Acc]; + (_Path, _NoPayload, Acc) -> + Acc + end, Acc0, Props). + +%% ------------------------------------------------------------------- +%% if_has_data_wildcard(). +%% ------------------------------------------------------------------- + +-spec if_has_data_wildcard() -> Condition when + Condition :: khepri_condition:condition(). + +if_has_data_wildcard() -> + if_has_data([?KHEPRI_WILDCARD_STAR_STAR]). + +%% ------------------------------------------------------------------- +%% if_has_data(). +%% ------------------------------------------------------------------- + +-spec if_has_data(Conditions) -> Condition when + Conditions :: [Condition], + Condition :: khepri_condition:condition(). + +if_has_data(Conditions) -> + #if_all{conditions = Conditions ++ [#if_has_data{has_data = true}]}. + +register_projections() -> + RegisterFuns = [fun register_rabbit_exchange_projection/0, + fun register_rabbit_queue_projection/0, + fun register_rabbit_vhost_projection/0, + fun register_rabbit_users_projection/0, + fun register_rabbit_runtime_parameters_projection/0, + fun register_rabbit_user_permissions_projection/0, + fun register_rabbit_bindings_projection/0, + fun register_rabbit_index_route_projection/0, + fun register_rabbit_topic_graph_projection/0], + [case RegisterFun() of + ok -> + ok; + %% Before Khepri v0.13.0, `khepri:register_projection/1,2,3` would + %% return `{error, exists}` for projections which already exist. + {error, exists} -> + ok; + %% In v0.13.0+, Khepri returns a `?khepri_error(..)` instead. + {error, {khepri, projection_already_exists, _Info}} -> + ok; + {error, Error} -> + throw(Error) + end || RegisterFun <- RegisterFuns], + ok. + +register_rabbit_exchange_projection() -> + Name = rabbit_khepri_exchange, + PathPattern = [rabbit_db_exchange, + exchanges, + _VHost = ?KHEPRI_WILDCARD_STAR, + _Name = ?KHEPRI_WILDCARD_STAR], + KeyPos = #exchange.name, + register_simple_projection(Name, PathPattern, KeyPos). + +register_rabbit_queue_projection() -> + Name = rabbit_khepri_queue, + PathPattern = [rabbit_db_queue, + queues, + _VHost = ?KHEPRI_WILDCARD_STAR, + _Name = ?KHEPRI_WILDCARD_STAR], + KeyPos = 2, %% #amqqueue.name + register_simple_projection(Name, PathPattern, KeyPos). + +register_rabbit_vhost_projection() -> + Name = rabbit_khepri_vhost, + PathPattern = [rabbit_db_vhost, _VHost = ?KHEPRI_WILDCARD_STAR], + KeyPos = 2, %% #vhost.virtual_host + register_simple_projection(Name, PathPattern, KeyPos). + +register_rabbit_users_projection() -> + Name = rabbit_khepri_users, + PathPattern = [rabbit_db_user, + users, + _UserName = ?KHEPRI_WILDCARD_STAR], + KeyPos = 2, %% #internal_user.username + register_simple_projection(Name, PathPattern, KeyPos). + +register_rabbit_runtime_parameters_projection() -> + Name = rabbit_khepri_runtime_parameters, + PathPattern = [rabbit_db_rtparams, + ?KHEPRI_WILDCARD_STAR_STAR], + KeyPos = #runtime_parameters.key, + register_simple_projection(Name, PathPattern, KeyPos). + +register_rabbit_user_permissions_projection() -> + Name = rabbit_khepri_user_permissions, + PathPattern = [rabbit_db_user, + users, + _UserName = ?KHEPRI_WILDCARD_STAR, + user_permissions, + _VHost = ?KHEPRI_WILDCARD_STAR], + KeyPos = #user_permission.user_vhost, + register_simple_projection(Name, PathPattern, KeyPos). + +register_simple_projection(Name, PathPattern, KeyPos) -> + Options = #{keypos => KeyPos}, + Projection = khepri_projection:new(Name, copy, Options), + khepri:register_projection(?RA_CLUSTER_NAME, PathPattern, Projection). + +register_rabbit_bindings_projection() -> + MapFun = fun(_Path, Binding) -> + #route{binding = Binding} + end, + ProjectionFun = projection_fun_for_sets(MapFun), + Options = #{keypos => #route.binding}, + Projection = khepri_projection:new( + rabbit_khepri_bindings, ProjectionFun, Options), + PathPattern = [rabbit_db_binding, + routes, + _VHost = ?KHEPRI_WILDCARD_STAR, + _ExchangeName = ?KHEPRI_WILDCARD_STAR, + _Kind = ?KHEPRI_WILDCARD_STAR, + _DstName = ?KHEPRI_WILDCARD_STAR, + _RoutingKey = ?KHEPRI_WILDCARD_STAR], + khepri:register_projection(?RA_CLUSTER_NAME, PathPattern, Projection). + +register_rabbit_index_route_projection() -> + MapFun = fun(Path, _) -> + [rabbit_db_binding, routes, VHost, ExchangeName, Kind, + DstName, RoutingKey] = Path, + Exchange = rabbit_misc:r(VHost, exchange, ExchangeName), + Destination = rabbit_misc:r(VHost, Kind, DstName), + SourceKey = {Exchange, RoutingKey}, + #index_route{source_key = SourceKey, + destination = Destination} + end, + ProjectionFun = projection_fun_for_sets(MapFun), + Options = #{type => bag, keypos => #index_route.source_key}, + Projection = khepri_projection:new( + rabbit_khepri_index_route, ProjectionFun, Options), + DirectOrFanout = #if_data_matches{pattern = #{type => '$1'}, + conditions = [{'andalso', + {'=/=', '$1', headers}, + {'=/=', '$1', topic}}]}, + PathPattern = [rabbit_db_binding, + routes, + _VHost = ?KHEPRI_WILDCARD_STAR, + _Exchange = DirectOrFanout, + _Kind = ?KHEPRI_WILDCARD_STAR, + _DstName = ?KHEPRI_WILDCARD_STAR, + _RoutingKey = ?KHEPRI_WILDCARD_STAR], + khepri:register_projection(?RA_CLUSTER_NAME, PathPattern, Projection). + +%% Routing information is stored in the Khepri store as a `set'. +%% In order to turn these bindings into records in an ETS `bag', we use a +%% `khepri_projection:extended_projection_fun()' to determine the changes +%% `khepri_projection' should apply to the ETS table using set algebra. +projection_fun_for_sets(MapFun) -> + fun + (Table, Path, #{data := OldPayload}, #{data := NewPayload}) -> + Deletions = sets:subtract(OldPayload, NewPayload), + Creations = sets:subtract(NewPayload, OldPayload), + sets:fold( + fun(Element, _Acc) -> + ets:delete_object(Table, MapFun(Path, Element)) + end, [], Deletions), + ets:insert(Table, [MapFun(Path, Element) || + Element <- sets:to_list(Creations)]); + (Table, Path, _OldProps, #{data := NewPayload}) -> + ets:insert(Table, [MapFun(Path, Element) || + Element <- sets:to_list(NewPayload)]); + + (Table, Path, #{data := OldPayload}, _NewProps) -> + sets:fold( + fun(Element, _Acc) -> + ets:delete_object(Table, MapFun(Path, Element)) + end, [], OldPayload); + (_Table, _Path, _OldProps, _NewProps) -> + ok + end. + +register_rabbit_topic_graph_projection() -> + Name = rabbit_khepri_topic_trie, + %% This projection calls some external functions which are disallowed by + %% Horus because they interact with global or random state. We explicitly + %% allow them here for performance reasons. + ShouldProcessFun = + fun (rabbit_db_topic_exchange, split_topic_key_binary, 1, _From) -> + %% This function uses `persistent_term' to store a lazily compiled + %% binary pattern. + false; + (erlang, make_ref, 0, _From) -> + %% Randomness is discouraged in Ra effects since the effects are + %% executed separately by each cluster member. We'll use a random + %% value for trie node IDs but these IDs will live as long as the + %% projection table and do not need to be stable or reproducible + %% across restarts or across Erlang nodes. + false; + (ets, _F, _A, _From) -> + false; + (M, F, A, From) -> + khepri_tx_adv:should_process_function(M, F, A, From) + end, + Options = #{keypos => #topic_trie_edge.trie_edge, + standalone_fun_options => + #{should_process_function => ShouldProcessFun}}, + ProjectionFun = + fun(Table, Path, OldProps, NewProps) -> + [rabbit_db_binding, routes, + VHost, ExchangeName, _Kind, _DstName, RoutingKey] = Path, + Exchange = rabbit_misc:r(VHost, exchange, ExchangeName), + Words = rabbit_db_topic_exchange:split_topic_key_binary(RoutingKey), + case {OldProps, NewProps} of + {#{data := OldBindings}, #{data := NewBindings}} -> + ToInsert = sets:subtract(NewBindings, OldBindings), + ToDelete = sets:subtract(OldBindings, NewBindings), + follow_down_update( + Table, Exchange, Words, + fun(ExistingBindings) -> + sets:union( + sets:subtract(ExistingBindings, ToDelete), + ToInsert) + end); + {_, #{data := NewBindings}} -> + follow_down_update( + Table, Exchange, Words, + fun(ExistingBindings) -> + sets:union(ExistingBindings, NewBindings) + end); + {#{data := OldBindings}, _} -> + follow_down_update( + Table, Exchange, Words, + fun(ExistingBindings) -> + sets:subtract(ExistingBindings, OldBindings) + end); + {_, _} -> + ok + end + end, + Projection = khepri_projection:new(Name, ProjectionFun, Options), + PathPattern = [rabbit_db_binding, + routes, + _VHost = ?KHEPRI_WILDCARD_STAR, + _Exchange = #if_data_matches{pattern = #{type => topic}}, + _Kind = ?KHEPRI_WILDCARD_STAR, + _DstName = ?KHEPRI_WILDCARD_STAR, + _RoutingKey = ?KHEPRI_WILDCARD_STAR], + khepri:register_projection(?RA_CLUSTER_NAME, PathPattern, Projection). + +-spec follow_down_update(Table, Exchange, Words, UpdateFn) -> Ret when + Table :: ets:tid(), + Exchange :: rabbit_types:exchange_name(), + Words :: [binary()], + BindingsSet :: sets:set(rabbit_types:binding()), + UpdateFn :: fun((BindingsSet) -> BindingsSet), + Ret :: ok. + +follow_down_update(Table, Exchange, Words, UpdateFn) -> + follow_down_update(Table, Exchange, root, Words, UpdateFn), + ok. + +-spec follow_down_update(Table, Exchange, NodeId, Words, UpdateFn) -> Ret when + Table :: ets:tid(), + Exchange :: rabbit_types:exchange_name(), + NodeId :: root | rabbit_guid:guid(), + Words :: [binary()], + BindingsSet :: sets:set(rabbit_types:binding()), + UpdateFn :: fun((BindingsSet) -> BindingsSet), + Ret :: keep | delete. + +follow_down_update(Table, Exchange, FromNodeId, [To | Rest], UpdateFn) -> + TrieEdge = #trie_edge{exchange_name = Exchange, + node_id = FromNodeId, + word = To}, + ToNodeId = case ets:lookup(Table, TrieEdge) of + [#topic_trie_edge{node_id = ExistingId}] -> + ExistingId; + [] -> + %% The Khepri topic graph table uses references for node + %% IDs instead of `rabbit_guid:gen/0' used by mnesia. + %% This is possible because the topic graph table is + %% never persisted to disk. References take up slightly + %% less memory and are very cheap to produce compared to + %% `rabbit_guid' (which requires the `rabbit_guid' + %% genserver to be online). + NewNodeId = make_ref(), + NewEdge = #topic_trie_edge{trie_edge = TrieEdge, + node_id = NewNodeId}, + %% Create the intermediary node. + ets:insert(Table, NewEdge), + NewNodeId + end, + case follow_down_update(Table, Exchange, ToNodeId, Rest, UpdateFn) of + delete -> + OutEdgePattern = #topic_trie_edge{trie_edge = + TrieEdge#trie_edge{word = '_'}, + node_id = '_'}, + case ets:match(Table, OutEdgePattern, 1) of + '$end_of_table' -> + ets:delete(Table, TrieEdge), + delete; + {_Match, _Continuation} -> + keep + end; + keep -> + keep + end; +follow_down_update(Table, Exchange, LeafNodeId, [], UpdateFn) -> + TrieEdge = #trie_edge{exchange_name = Exchange, + node_id = LeafNodeId, + word = bindings}, + Bindings = case ets:lookup(Table, TrieEdge) of + [#topic_trie_edge{node_id = + {bindings, ExistingBindings}}] -> + ExistingBindings; + [] -> + sets:new([{version, 2}]) + end, + NewBindings = UpdateFn(Bindings), + case sets:is_empty(NewBindings) of + true -> + %% If the bindings have been deleted, delete the trie edge and + %% any edges that no longer lead to any bindings or other edges. + ets:delete(Table, TrieEdge), + delete; + false -> + ToNodeId = {bindings, NewBindings}, + Edge = #topic_trie_edge{trie_edge = TrieEdge, node_id = ToNodeId}, + ets:insert(Table, Edge), + keep + end. + +retry_khepri_op(Fun, 0) -> + Fun(); +retry_khepri_op(Fun, N) -> + case Fun() of + {error, {no_more_servers_to_try, Reasons}} = Err -> + case lists:member({error,cluster_change_not_permitted}, Reasons) of + true -> + timer:sleep(1000), + retry_khepri_op(Fun, N - 1); + false -> + Err + end; + {no_more_servers_to_try, Reasons} = Err -> + case lists:member({error,cluster_change_not_permitted}, Reasons) of + true -> + timer:sleep(1000), + retry_khepri_op(Fun, N - 1); + false -> + Err + end; + {error, cluster_change_not_permitted} -> + timer:sleep(1000), + retry_khepri_op(Fun, N - 1); + Any -> + Any + end. + +%% ------------------------------------------------------------------- +%% Mnesia->Khepri migration code. +%% ------------------------------------------------------------------- + +-spec is_enabled() -> IsEnabled when + IsEnabled :: boolean(). +%% @doc Returns true if Khepri is enabled, false otherwise. +%% +%% This function will block while the feature flag is being enabled and Mnesia +%% tables are migrated. + +is_enabled() -> + is_enabled__internal(blocking). + +-spec is_enabled(Node) -> IsEnabled when + Node :: node(), + IsEnabled :: boolean(). +%% @doc Returns true if Khepri is enabled on node `Node', false otherwise. +%% +%% This function will block while the feature flag is being enabled and Mnesia +%% tables are migrated. + +is_enabled(Node) -> + try + erpc:call(Node, ?MODULE, ?FUNCTION_NAME, []) + catch + error:{exception, undef, [{?MODULE, ?FUNCTION_NAME, _, _} | _]} -> + false + end. + +-spec get_feature_state() -> State when + State :: enabled | state_changing | disabled. +%% @doc Returns the current state of the Khepri use. +%% +%% This function will not block while the feature flag is being enabled and +%% Mnesia tables are migrated. It is your responsibility to handle the +%% intermediate state. + +get_feature_state() -> + Ret = is_enabled__internal(non_blocking), + case Ret of + true -> enabled; + false -> disabled; + state_changing -> Ret + end. + +-spec get_feature_state(Node) -> State when + Node :: node(), + State :: enabled | state_changing | disabled. +%% @doc Returns the current state of the Khepri use on node `Node'. +%% +%% This function will not block while the feature flag is being enabled and +%% Mnesia tables are migrated. It is your responsibility to handle the +%% intermediate state. + +get_feature_state(Node) -> + try + erpc:call(Node, ?MODULE, ?FUNCTION_NAME, []) + catch + error:{exception, undef, [{?MODULE, ?FUNCTION_NAME, _, _} | _]} -> + disabled + end. + +%% @private + +khepri_db_migration_enable(#{feature_name := FeatureName}) -> + case sync_cluster_membership_from_mnesia(FeatureName) of + ok -> migrate_mnesia_tables(FeatureName); + Error -> Error + end. + +%% @private + +khepri_db_migration_post_enable( + #{feature_name := FeatureName, enabled := true}) -> + ?LOG_DEBUG( + "Feature flag `~s`: cleaning up after finished migration", + [FeatureName], + #{domain => ?RMQLOG_DOMAIN_DB}), + _ = mnesia_to_khepri:cleanup_after_table_copy(?STORE_ID, ?MIGRATION_ID), + + rabbit_mnesia:stop_mnesia(), + + %% We delete all Mnesia-related files in the data directory. This is in + %% case this node joins a Mnesia-based cluster: it will be reset and switch + %% back from Khepri to Mnesia. If there were Mnesia files left, Mnesia + %% would restart with stale/incorrect data. + MsgStoreDir = filename:dirname(rabbit_vhost:msg_store_dir_base()), + DataDir = rabbit:data_dir(), + MnesiaAndMsgStoreFiles = rabbit_mnesia:mnesia_and_msg_store_files(), + MnesiaFiles0 = MnesiaAndMsgStoreFiles -- [filename:basename(MsgStoreDir)], + MnesiaFiles = [filename:join(DataDir, File) || File <- MnesiaFiles0], + NodeMonitorFiles = [rabbit_node_monitor:cluster_status_filename(), + rabbit_node_monitor:running_nodes_filename()], + _ = rabbit_file:recursive_delete(MnesiaFiles ++ NodeMonitorFiles), + + ok; +khepri_db_migration_post_enable( + #{feature_name := FeatureName, enabled := false}) -> + ?LOG_DEBUG( + "Feature flag `~s`: cleaning up after aborted migration", + [FeatureName], + #{domain => ?RMQLOG_DOMAIN_DB}), + _ = mnesia_to_khepri:rollback_table_copy(?STORE_ID, ?MIGRATION_ID), + ok. + +-spec sync_cluster_membership_from_mnesia(FeatureName) -> Ret when + FeatureName :: rabbit_feature_flags:feature_name(), + Ret :: ok | {error, Reason}, + Reason :: any(). +%% @doc Initializes the Khepri cluster based on the Mnesia cluster. +%% +%% It uses the `khepri_mnesia_migration' application to synchronize membership +%% between both cluster. +%% +%% This function is called as part of the `enable' callback of the `khepri_db' +%% feature flag. + +sync_cluster_membership_from_mnesia(FeatureName) -> + %Lock = {{FeatureName, ?FUNCTION_NAME}, self()}, + %global:set_lock(Lock), + try + %% We use a global lock because `rabbit_khepri:setup()' on one node + %% can't run concurrently with the membership sync on another node: + %% the reset which is part of a join might conflict with the start in + %% `rabbit_khepri:setup()'. + sync_cluster_membership_from_mnesia_locked(FeatureName) + after + %global:del_lock(Lock) + ok + end. + +sync_cluster_membership_from_mnesia_locked(FeatureName) -> + rabbit_mnesia:ensure_mnesia_running(), + + try + ?LOG_INFO( + "Feature flag `~s`: syncing cluster membership", + [FeatureName], + #{domain => ?RMQLOG_DOMAIN_DB}), + Ret = mnesia_to_khepri:sync_cluster_membership(?STORE_ID), + ?LOG_INFO( + "Feature flag `~s`: cluster membership synchronized; " + "members are: ~1p", + [FeatureName, lists:sort(nodes())], + #{domain => ?RMQLOG_DOMAIN_DB}), + Ret + catch + error:{khepri_mnesia_migration_ex, _, _} = Error -> + ?LOG_ERROR( + "Feature flag `~s`: failed to sync membership: ~p", + [FeatureName, Error], + #{domain => ?RMQLOG_DOMAIN_DB}), + {error, Error} + end. + +migrate_mnesia_tables(FeatureName) -> + LoadedPlugins = load_disabled_plugins(), + Migrations = discover_mnesia_tables_to_migrate(), + Ret = do_migrate_mnesia_tables(FeatureName, Migrations), + unload_disabled_plugins(LoadedPlugins), + Ret. + +load_disabled_plugins() -> + #{plugins_path := PluginsPath} = rabbit_prelaunch:get_context(), + %% We need to call the application master in a short-lived process, just in + %% case it can't answer. This can happen if `rabbit` is stopped + %% concurrently. In this case, the application master is busy trying to + %% stop `rabbit`. However, `rabbit` is waiting for any feature flag + %% operations to finish before it stops. + %% + %% By using this short-lived process and killing it after some time, we + %% prevent a deadlock with the application master. + Parent = self(), + Loader = spawn_link( + fun() -> + Plugins = [P#plugin.name + || P <- rabbit_plugins:list(PluginsPath)], + Plugins1 = lists:map( + fun(Plugin) -> + case application:load(Plugin) of + ok -> {Plugin, true}; + _ -> {Plugin, false} + end + end, Plugins), + Parent ! {plugins_loading, Plugins1}, + erlang:unlink(Parent) + end), + receive + {plugins_loading, Plugins} -> + Plugins + after 60_000 -> + erlang:unlink(Loader), + throw( + {failed_to_discover_mnesia_tables_to_migrate, + plugins_loading_timeout}) + end. + +unload_disabled_plugins(Plugins) -> + %% See `load_disabled_plugins/0' for the reason why we use a short-lived + %% process here. + Parent = self(), + Unloader = spawn_link( + fun() -> + lists:foreach( + fun + ({Plugin, true}) -> _ = application:unload(Plugin); + ({_Plugin, false}) -> ok + end, Plugins), + Parent ! plugins_unloading + end), + receive + plugins_unloading -> + ok + after 30_000 -> + erlang:unlink(Unloader), + throw( + {failed_to_discover_mnesia_tables_to_migrate, + plugins_unloading_timeout}) + end. + +discover_mnesia_tables_to_migrate() -> + Apps = rabbit_misc:rabbitmq_related_apps(), + AttrsPerApp = rabbit_misc:module_attributes_from_apps( + rabbit_mnesia_tables_to_khepri_db, Apps), + discover_mnesia_tables_to_migrate1(AttrsPerApp, #{}). + +discover_mnesia_tables_to_migrate1( + [{App, _Module, Migrations} | Rest], + MigrationsPerApp) + when is_list(Migrations) -> + Migrations0 = maps:get(App, MigrationsPerApp, []), + Migrations1 = Migrations0 ++ Migrations, + MigrationsPerApp1 = MigrationsPerApp#{App => Migrations1}, + discover_mnesia_tables_to_migrate1(Rest, MigrationsPerApp1); +discover_mnesia_tables_to_migrate1([], MigrationsPerApp) -> + %% We list the applications involved and make sure `rabbit' is handled + %% first. + Apps = lists:sort( + fun + (rabbit, _) -> true; + (_, rabbit) -> false; + (A, B) -> A =< B + end, + maps:keys(MigrationsPerApp)), + lists:foldl( + fun(App, Acc) -> + Acc ++ maps:get(App, MigrationsPerApp) + end, [], Apps). + +do_migrate_mnesia_tables(FeatureName, Migrations) -> + Tables = lists:map( + fun + ({Table, _Mod}) when is_atom(Table) -> Table; + (Table) when is_atom(Table) -> Table + end, + Migrations), + ?LOG_NOTICE( + "Feature flags: `~ts`: starting migration of ~b tables from Mnesia " + "to Khepri; expect decrease in performance and increase in memory " + "footprint", + [FeatureName, length(Migrations)], + #{domain => ?RMQLOG_DOMAIN_DB}), + rabbit_table:wait(Tables, _Retry = true), + Ret = mnesia_to_khepri:copy_tables( + ?STORE_ID, ?MIGRATION_ID, Tables, + {rabbit_db_m2k_converter, Migrations}), + case Ret of + ok -> + ?LOG_NOTICE( + "Feature flags: `~ts`: migration from Mnesia to Khepri " + "finished", + [FeatureName], + #{domain => ?RMQLOG_DOMAIN_DB}), + ok; + {error, _} = Error -> + ?LOG_ERROR( + "Feature flags: `~ts`: failed to migrate Mnesia tables to " + "Khepri:~n ~p", + [FeatureName, Error], + #{domain => ?RMQLOG_DOMAIN_DB}), + {error, {migration_failure, Error}} + end. + +-spec handle_fallback(Funs) -> Ret when + Funs :: #{mnesia := Fun, khepri := Fun | Ret}, + Fun :: fun(() -> Ret), + Ret :: any(). +%% @doc Runs the function corresponding to the used database engine. +%% +%% If the `khepri_db' feature flag is already enabled, it executes the `Fun' +%% corresponding to Khepri directly and returns its value. +%% +%% Otherwise, it tries `Fun' corresponding to Mnesia first. It relies on the +%% "no table" exception from Mnesia to check the state of the feature flag +%% again and possibly switch th Khepri's `Fun'. +%% +%% Mnesia's `Fun' may be executed several times. Therefore, it must be +%% idempotent. +%% +%% Because this relies on the "no exists" table exception, the Mnesia function +%% must read from and/or write to Mnesia tables for this to work. If your +%% function does not access Mnesia tables, please use {@link is_enabled/0} +%% instead. +%% +%% @returns the return value of `Fun'. + +handle_fallback(#{mnesia := MnesiaFun, khepri := KhepriFunOrRet}) + when is_function(MnesiaFun, 0) -> + case get_feature_state() of + enabled when is_function(KhepriFunOrRet, 0) -> + KhepriFunOrRet(); + enabled -> + KhepriFunOrRet; + _ -> + mnesia_to_khepri:handle_fallback( + ?STORE_ID, ?MIGRATION_ID, MnesiaFun, KhepriFunOrRet) + end. + +-ifdef(TEST). +-define(FORCED_MDS_KEY, {?MODULE, forced_metadata_store}). + +force_metadata_store(Backend) -> + persistent_term:put(?FORCED_MDS_KEY, Backend). + +get_forced_metadata_store() -> + persistent_term:get(?FORCED_MDS_KEY, undefined). + +clear_forced_metadata_store() -> + _ = persistent_term:erase(?FORCED_MDS_KEY), + ok. + +is_enabled__internal(Blocking) -> + case get_forced_metadata_store() of + khepri -> + ?assert( + rabbit_feature_flags:is_enabled(khepri_db, non_blocking)), + true; + mnesia -> + ?assertNot( + rabbit_feature_flags:is_enabled(khepri_db, non_blocking)), + false; + undefined -> + rabbit_feature_flags:is_enabled(khepri_db, Blocking) + end. +-else. +is_enabled__internal(Blocking) -> + rabbit_feature_flags:is_enabled(khepri_db, Blocking). +-endif. diff --git a/deps/rabbit/src/rabbit_limiter.erl b/deps/rabbit/src/rabbit_limiter.erl index eb4df6268554..f19d20ca4d78 100644 --- a/deps/rabbit/src/rabbit_limiter.erl +++ b/deps/rabbit/src/rabbit_limiter.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% %% The purpose of the limiter is to stem the flow of messages from @@ -62,12 +62,11 @@ %% that's what the limit_prefetch/3, unlimit_prefetch/1, %% get_prefetch_limit/1 API functions are about. They also tell the %% limiter queue state (via the queue) about consumer credit -%% changes and message acknowledgement - that's what credit/5 and +%% changes and message acknowledgement - that's what credit/4 and %% ack_from_queue/3 are for. %% -%% 2. Queues also tell the limiter queue state about the queue -%% becoming empty (via drained/1) and consumers leaving (via -%% forget_consumer/2). +%% 2. Queues also tell the limiter queue state about consumers leaving +%% (via forget_consumer/2). %% %% 3. Queues register with the limiter - this happens as part of %% activate/1. @@ -120,8 +119,8 @@ get_prefetch_limit/1, ack/2, pid/1]). %% queue API -export([client/1, activate/1, can_send/3, resume/1, deactivate/1, - is_suspended/1, is_consumer_blocked/2, credit/5, ack_from_queue/3, - drained/1, forget_consumer/2]). + is_suspended/1, is_consumer_blocked/2, credit/4, ack_from_queue/3, + forget_consumer/2]). %% callbacks -export([init/1, terminate/2, code_change/3, handle_call/3, handle_cast/2, handle_info/2, prioritise_call/4]). @@ -136,7 +135,7 @@ -type qstate() :: #qstate{pid :: pid() | none, state :: 'dormant' | 'active' | 'suspended'}. --type credit_mode() :: 'manual' | 'drain' | 'auto'. +-type credit_mode() :: auto | manual. %%---------------------------------------------------------------------------- @@ -259,18 +258,11 @@ is_consumer_blocked(#qstate{credits = Credits}, CTag) -> {value, #credit{}} -> true end. --spec credit - (qstate(), rabbit_types:ctag(), non_neg_integer(), credit_mode(), - boolean()) -> - {boolean(), qstate()}. - -credit(Limiter = #qstate{credits = Credits}, CTag, Crd, Mode, IsEmpty) -> - {Res, Cr} = - case IsEmpty andalso Mode =:= drain of - true -> {true, #credit{credit = 0, mode = manual}}; - false -> {false, #credit{credit = Crd, mode = Mode}} - end, - {Res, Limiter#qstate{credits = enter_credit(CTag, Cr, Credits)}}. +-spec credit(qstate(), rabbit_types:ctag(), non_neg_integer(), credit_mode()) -> + qstate(). +credit(Limiter = #qstate{credits = Credits}, CTag, Crd, Mode) -> + Cr = #credit{credit = Crd, mode = Mode}, + Limiter#qstate{credits = enter_credit(CTag, Cr, Credits)}. -spec ack_from_queue(qstate(), rabbit_types:ctag(), non_neg_integer()) -> {boolean(), qstate()}. @@ -286,20 +278,6 @@ ack_from_queue(Limiter = #qstate{credits = Credits}, CTag, Credit) -> end, {Unblocked, Limiter#qstate{credits = Credits1}}. --spec drained(qstate()) -> - {[{rabbit_types:ctag(), non_neg_integer()}], qstate()}. - -drained(Limiter = #qstate{credits = Credits}) -> - Drain = fun(C) -> C#credit{credit = 0, mode = manual} end, - {CTagCredits, Credits2} = - rabbit_misc:gb_trees_fold( - fun (CTag, C = #credit{credit = Crd, mode = drain}, {Acc, Creds0}) -> - {[{CTag, Crd} | Acc], update_credit(CTag, Drain(C), Creds0)}; - (_CTag, #credit{credit = _Crd, mode = _Mode}, {Acc, Creds0}) -> - {Acc, Creds0} - end, {[], Credits}, Credits), - {CTagCredits, Limiter#qstate{credits = Credits2}}. - -spec forget_consumer(qstate(), rabbit_types:ctag()) -> qstate(). forget_consumer(Limiter = #qstate{credits = Credits}, CTag) -> @@ -309,13 +287,6 @@ forget_consumer(Limiter = #qstate{credits = Credits}, CTag) -> %% Queue-local code %%---------------------------------------------------------------------------- -%% We want to do all the AMQP 1.0-ish link level credit calculations -%% in the queue (to do them elsewhere introduces a ton of -%% races). However, it's a big chunk of code that is conceptually very -%% linked to the limiter concept. So we get the queue to hold a bit of -%% state for us (#qstate.credits), and maintain a fiction that the -%% limiter is making the decisions... - decrement_credit(CTag, Credits) -> case gb_trees:lookup(CTag, Credits) of {value, C = #credit{credit = Credit}} -> @@ -325,16 +296,10 @@ decrement_credit(CTag, Credits) -> end. enter_credit(CTag, C, Credits) -> - gb_trees:enter(CTag, ensure_credit_invariant(C), Credits). + gb_trees:enter(CTag, C, Credits). update_credit(CTag, C, Credits) -> - gb_trees:update(CTag, ensure_credit_invariant(C), Credits). - -ensure_credit_invariant(C = #credit{credit = 0, mode = drain}) -> - %% Using up all credit implies no need to send a 'drained' event - C#credit{mode = manual}; -ensure_credit_invariant(C) -> - C. + gb_trees:update(CTag, C, Credits). %%---------------------------------------------------------------------------- %% gen_server callbacks diff --git a/deps/rabbit/src/rabbit_log_channel.erl b/deps/rabbit/src/rabbit_log_channel.erl index efeac31e8939..a8d600e61054 100644 --- a/deps/rabbit/src/rabbit_log_channel.erl +++ b/deps/rabbit/src/rabbit_log_channel.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2021 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% %% @doc Compatibility module for the old Lager-based logging API. diff --git a/deps/rabbit/src/rabbit_log_connection.erl b/deps/rabbit/src/rabbit_log_connection.erl index 0dd8fe6e81f2..a2e266718a7d 100644 --- a/deps/rabbit/src/rabbit_log_connection.erl +++ b/deps/rabbit/src/rabbit_log_connection.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2021 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% %% @doc Compatibility module for the old Lager-based logging API. diff --git a/deps/rabbit/src/rabbit_log_mirroring.erl b/deps/rabbit/src/rabbit_log_mirroring.erl index b7e6d7365073..2553a236939e 100644 --- a/deps/rabbit/src/rabbit_log_mirroring.erl +++ b/deps/rabbit/src/rabbit_log_mirroring.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2021 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% %% @doc Compatibility module for the old Lager-based logging API. diff --git a/deps/rabbit/src/rabbit_log_prelaunch.erl b/deps/rabbit/src/rabbit_log_prelaunch.erl index 93600087f3fe..287c371144d6 100644 --- a/deps/rabbit/src/rabbit_log_prelaunch.erl +++ b/deps/rabbit/src/rabbit_log_prelaunch.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2021 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% %% @doc Compatibility module for the old Lager-based logging API. diff --git a/deps/rabbit/src/rabbit_log_queue.erl b/deps/rabbit/src/rabbit_log_queue.erl index 08632c015c92..dcab5c591abd 100644 --- a/deps/rabbit/src/rabbit_log_queue.erl +++ b/deps/rabbit/src/rabbit_log_queue.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2021 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% %% @doc Compatibility module for the old Lager-based logging API. diff --git a/deps/rabbit/src/rabbit_log_tail.erl b/deps/rabbit/src/rabbit_log_tail.erl index fd724acbade5..b8ac4a45b96f 100644 --- a/deps/rabbit/src/rabbit_log_tail.erl +++ b/deps/rabbit/src/rabbit_log_tail.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_log_tail). diff --git a/deps/rabbit/src/rabbit_logger_exchange_h.erl b/deps/rabbit/src/rabbit_logger_exchange_h.erl index 607a3a7295a7..781e4ce6203a 100644 --- a/deps/rabbit/src/rabbit_logger_exchange_h.erl +++ b/deps/rabbit/src/rabbit_logger_exchange_h.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2021 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_logger_exchange_h). @@ -12,7 +12,6 @@ -include_lib("rabbit_common/include/rabbit.hrl"). -include_lib("rabbit_common/include/rabbit_framing.hrl"). -include_lib("rabbit_common/include/logging.hrl"). - %% logger callbacks -export([log/2, adding_handler/1, removing_handler/1, changing_config/3, filter_config/1]). @@ -54,12 +53,15 @@ do_log(LogEvent, #{config := #{exchange := Exchange}} = Config) -> PBasic = log_event_to_amqp_msg(LogEvent, Config), Body = try_format_body(LogEvent, Config), Content = rabbit_basic:build_content(PBasic, Body), - Anns = #{exchange => Exchange#resource.name, - routing_keys => [RoutingKey]}, - Msg = mc:init(mc_amqpl, Content, Anns), - case rabbit_queue_type:publish_at_most_once(Exchange, Msg) of - ok -> ok; - {error, not_found} -> ok + case mc_amqpl:message(Exchange, RoutingKey, Content) of + {ok, Msg} -> + case rabbit_queue_type:publish_at_most_once(Exchange, Msg) of + ok -> ok; + {error, not_found} -> ok + end; + {error, _Reason} -> + %% it would be good to log this error but can we? + ok end. removing_handler(Config) -> @@ -124,24 +126,38 @@ start_setup_proc(#{config := InternalConfig} = Config) -> {ok, DefaultVHost} = application:get_env(rabbit, default_vhost), Exchange = rabbit_misc:r(DefaultVHost, exchange, ?LOG_EXCH_NAME), InternalConfig1 = InternalConfig#{exchange => Exchange}, - - Pid = spawn(fun() -> setup_proc(Config#{config => InternalConfig1}) end), + Pid = spawn(fun() -> + wait_for_initial_pass(60), + setup_proc(Config#{config => InternalConfig1}) + end), InternalConfig2 = InternalConfig1#{setup_proc => Pid}, Config#{config => InternalConfig2}. +%% Declaring an exchange requires the metadata store to be ready +%% which happens on a boot step after the second phase of the prelaunch. +%% This function waits for the store initialisation. +wait_for_initial_pass(0) -> + ok; +wait_for_initial_pass(N) -> + case rabbit_db:is_init_finished() of + false -> + timer:sleep(1000), + wait_for_initial_pass(N - 1); + true -> + ok + end. + setup_proc( - #{config := #{exchange := #resource{name = Name, - virtual_host = VHost}}} = Config) -> + #{config := #{exchange := Exchange}} = Config) -> case declare_exchange(Config) of ok -> ?LOG_INFO( - "Logging to exchange '~ts' in vhost '~ts' ready", [Name, VHost], + "Logging to ~ts ready", [rabbit_misc:rs(Exchange)], #{domain => ?RMQLOG_DOMAIN_GLOBAL}); error -> ?LOG_DEBUG( - "Logging to exchange '~ts' in vhost '~ts' not ready, " - "trying again in ~b second(s)", - [Name, VHost, ?DECL_EXCHANGE_INTERVAL_SECS], + "Logging to ~ts not ready, trying again in ~b second(s)", + [rabbit_misc:rs(Exchange), ?DECL_EXCHANGE_INTERVAL_SECS], #{domain => ?RMQLOG_DOMAIN_GLOBAL}), receive stop -> ok @@ -150,36 +166,45 @@ setup_proc( end end. -declare_exchange( - #{config := #{exchange := #resource{name = Name, - virtual_host = VHost} = Exchange}}) -> - try - %% Durable. - #exchange{} = rabbit_exchange:declare( - Exchange, topic, true, false, true, [], - ?INTERNAL_USER), - ?LOG_DEBUG( - "Declared exchange '~ts' in vhost '~ts'", - [Name, VHost], - #{domain => ?RMQLOG_DOMAIN_GLOBAL}), - ok +declare_exchange(#{config := #{exchange := Exchange}}) -> + try rabbit_exchange:declare( + Exchange, topic, true, false, true, [], ?INTERNAL_USER) of + {ok, #exchange{}} -> + ?LOG_DEBUG( + "Declared ~ts", + [rabbit_misc:rs(Exchange)], + #{domain => ?RMQLOG_DOMAIN_GLOBAL}), + ok; + {error, timeout} -> + ?LOG_DEBUG( + "Could not declare ~ts because the operation timed out", + [rabbit_misc:rs(Exchange)], + #{domain => ?RMQLOG_DOMAIN_GLOBAL}), + error catch Class:Reason -> ?LOG_DEBUG( - "Could not declare exchange '~ts' in vhost '~ts', " - "reason: ~0p:~0p", - [Name, VHost, Class, Reason], + "Could not declare ~ts, reason: ~0p:~0p", + [rabbit_misc:rs(Exchange), Class, Reason], #{domain => ?RMQLOG_DOMAIN_GLOBAL}), error end. unconfigure_exchange( - #{config := #{exchange := #resource{name = Name, - virtual_host = VHost} = Exchange, + #{config := #{exchange := Exchange, setup_proc := Pid}}) -> Pid ! stop, - _ = rabbit_exchange:delete(Exchange, false, ?INTERNAL_USER), + case rabbit_exchange:ensure_deleted(Exchange, false, ?INTERNAL_USER) of + ok -> + ok; + {error, timeout} -> + ?LOG_ERROR( + "Could not delete ~ts due to a timeout", + [rabbit_misc:rs(Exchange)], + #{domain => ?RMQLOG_DOMAIN_GLOBAL}), + ok + end, ?LOG_INFO( - "Logging to exchange '~ts' in vhost '~ts' disabled", - [Name, VHost], + "Logging to ~ts disabled", + [rabbit_misc:rs(Exchange)], #{domain => ?RMQLOG_DOMAIN_GLOBAL}). diff --git a/deps/rabbit/src/rabbit_looking_glass.erl b/deps/rabbit/src/rabbit_looking_glass.erl deleted file mode 100644 index 6a88642f4ff3..000000000000 --- a/deps/rabbit/src/rabbit_looking_glass.erl +++ /dev/null @@ -1,102 +0,0 @@ -%% This Source Code Form is subject to the terms of the Mozilla Public -%% License, v. 2.0. If a copy of the MPL was not distributed with this -%% file, You can obtain one at https://mozilla.org/MPL/2.0/. -%% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. -%% - --module(rabbit_looking_glass). - --ignore_xref([ - {lg, trace, 4}, - {lg, stop, 0}, - {lg_callgrind, profile_many, 3} -]). --ignore_xref([{maps, from_list, 1}]). - --export([boot/0]). --export([trace/1, trace_qq/0, profile/0, profile/1]). --export([connections/0]). - -boot() -> - case os:getenv("RABBITMQ_TRACER") of - false -> - ok; - On when On =:= "1" orelse On =:= "true" -> - rabbit_log:info("Loading Looking Glass profiler for interactive use"), - case application:ensure_all_started(looking_glass) of - {ok, _} -> ok; - {error, Error} -> - rabbit_log:error("Failed to start Looking Glass, reason: ~tp", [Error]) - end; - Value -> - Input = parse_value(Value), - rabbit_log:info( - "Enabling Looking Glass profiler, input value: ~tp", - [Input] - ), - {ok, _} = application:ensure_all_started(looking_glass), - lg:trace( - Input, - lg_file_tracer, - "traces.lz4", - maps:from_list([ - {mode, profile}, - {process_dump, true}, - {running, true}, - {send, true}] - ) - ) - end. - -trace(Input) -> - lg:trace(Input, - lg_file_tracer, - "traces.lz4", - maps:from_list([ - {mode, profile}, - {process_dump, true}, - {running, true}, - {send, true}] - )). - -trace_qq() -> - dbg:stop(), - lg:trace([ra_server, - ra_server_proc, - rabbit_fifo, - queue, - rabbit_fifo_index - ], - lg_file_tracer, - "traces.lz4", - maps:from_list([ - {mode, profile} - % {process_dump, true}, - % {running, true}, - % {send, true} - ] - )), - timer:sleep(10000), - _ = lg:stop(), - profile(). - -profile() -> - profile("callgrind.out"). - -profile(Filename) -> - lg_callgrind:profile_many("traces.lz4.*", Filename, #{running => true}). - -%% -%% Implementation -%% - -parse_value(Value) -> - [begin - [Mod, Fun] = string:tokens(C, ":"), - {callback, list_to_atom(Mod), list_to_atom(Fun)} - end || C <- string:tokens(Value, ",")]. - -connections() -> - Pids = [Pid || {{conns_sup, _}, Pid} <- ets:tab2list(ranch_server)], - ['_', {scope, Pids}]. diff --git a/deps/rabbit/src/rabbit_maintenance.erl b/deps/rabbit/src/rabbit_maintenance.erl index 342b1763c018..5e22a8217bbf 100644 --- a/deps/rabbit/src/rabbit_maintenance.erl +++ b/deps/rabbit/src/rabbit_maintenance.erl @@ -2,13 +2,20 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2018-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_maintenance). -include_lib("rabbit_common/include/rabbit.hrl"). +%% FIXME: Ra consistent queries are currently fragile in the sense that the +%% query function may run on a remote node and the function reference or MFA +%% may not be valid on that node. That's why consistent queries in this module +%% are in fact local queries when Khepri is enabled. +%% +%% See `rabbit_db_maintenance:get_consistent_in_khepri/1'. + -export([ is_enabled/0, drain/0, @@ -27,8 +34,7 @@ primary_replica_transfer_candidate_nodes/0, random_primary_replica_transfer_candidate_node/2, transfer_leadership_of_quorum_queues/1, - transfer_leadership_of_classic_mirrored_queues/1, - boot/0 + table_definitions/0 ]). -define(DEFAULT_STATUS, regular). @@ -44,13 +50,8 @@ %% Boot %% --rabbit_boot_step({rabbit_maintenance_mode_state, - [{description, "initializes maintenance mode state"}, - {mfa, {?MODULE, boot, []}}, - {requires, networking}]}). - -boot() -> - rabbit_db_maintenance:setup_schema(). +table_definitions() -> + rabbit_db_maintenance:table_definitions(). %% %% API @@ -85,6 +86,8 @@ drain() -> _Pid -> transfer_leadership_of_stream_coordinator(TransferCandidates) end, + transfer_leadership_of_metadata_store(TransferCandidates), + %% allow plugins to react rabbit_event:notify(maintenance_draining, #{ reason => <<"node is being put into maintenance">> @@ -209,43 +212,17 @@ transfer_leadership_of_quorum_queues(_TransferCandidates) -> end || Q <- Queues], rabbit_log:info("Leadership transfer for quorum queues hosted on this node has been initiated"). --spec transfer_leadership_of_classic_mirrored_queues([node()]) -> ok. -%% This function is no longer used by maintanence mode. We retain it in case -%% classic mirrored queue leadership transfer would be reconsidered. -%% -%% With a lot of CMQs in a cluster, the transfer procedure can take prohibitively long -%% for a pre-upgrade task. -transfer_leadership_of_classic_mirrored_queues([]) -> - rabbit_log:warning("Skipping leadership transfer of classic mirrored queues: no candidate " - "(online, not under maintenance) nodes to transfer to!"); -transfer_leadership_of_classic_mirrored_queues(TransferCandidates) -> - Queues = rabbit_amqqueue:list_local_mirrored_classic_queues(), - ReadableCandidates = readable_candidate_list(TransferCandidates), - rabbit_log:info("Will transfer leadership of ~b classic mirrored queues hosted on this node to these peer nodes: ~ts", - [length(Queues), ReadableCandidates]), - [begin - Name = amqqueue:get_name(Q), - ExistingReplicaNodes = [node(Pid) || Pid <- amqqueue:get_sync_slave_pids(Q)], - rabbit_log:debug("Local ~ts has replicas on nodes ~ts", - [rabbit_misc:rs(Name), readable_candidate_list(ExistingReplicaNodes)]), - case random_primary_replica_transfer_candidate_node(TransferCandidates, ExistingReplicaNodes) of - {ok, Pick} -> - rabbit_log:debug("Will transfer leadership of local ~ts. Planned target node: ~ts", - [rabbit_misc:rs(Name), Pick]), - case rabbit_mirror_queue_misc:migrate_leadership_to_existing_replica(Q, Pick) of - {migrated, NewPrimary} -> - rabbit_log:debug("Successfully transferred leadership of queue ~ts to node ~ts", - [rabbit_misc:rs(Name), NewPrimary]); - Other -> - rabbit_log:warning("Could not transfer leadership of queue ~ts: ~tp", - [rabbit_misc:rs(Name), Other]) - end; - undefined -> - rabbit_log:warning("Could not transfer leadership of queue ~ts: no suitable candidates?", - [Name]) - end - end || Q <- Queues], - rabbit_log:info("Leadership transfer for local classic mirrored queues is complete"). +transfer_leadership_of_metadata_store(TransferCandidates) -> + rabbit_log:info("Will transfer leadership of metadata store with current leader on this node", + []), + case rabbit_khepri:transfer_leadership(TransferCandidates) of + {ok, Node} when Node == node(); Node == undefined -> + rabbit_log:info("Skipping leadership transfer of metadata store: current leader is not on this node"); + {ok, Node} -> + rabbit_log:info("Leadership transfer for metadata store on this node has been done. The new leader is ~p", [Node]); + Error -> + rabbit_log:warning("Skipping leadership transfer of metadata store: ~p", [Error]) + end. -spec transfer_leadership_of_stream_coordinator([node()]) -> ok. transfer_leadership_of_stream_coordinator([]) -> @@ -314,24 +291,21 @@ random_nth(Nodes) -> revive_local_quorum_queue_replicas() -> Queues = rabbit_amqqueue:list_local_followers(), - [begin - Name = amqqueue:get_name(Q), - rabbit_log:debug("Will trigger a leader election for local quorum queue ~ts", - [rabbit_misc:rs(Name)]), - %% start local QQ replica (Ra server) of this queue - {Prefix, _Node} = amqqueue:get_pid(Q), - RaServer = {Prefix, node()}, - rabbit_log:debug("Will start Ra server ~tp", [RaServer]), - case rabbit_quorum_queue:restart_server(RaServer) of - ok -> - rabbit_log:debug("Successfully restarted Ra server ~tp", [RaServer]); - {error, {already_started, _Pid}} -> - rabbit_log:debug("Ra server ~tp is already running", [RaServer]); - {error, nodedown} -> - rabbit_log:error("Failed to restart Ra server ~tp: target node was reported as down") - end - end || Q <- Queues], - rabbit_log:info("Restart of local quorum queue replicas is complete"). + %% NB: this function ignores the first argument so we can just pass the + %% empty binary as the vhost name. + {Recovered, Failed} = rabbit_quorum_queue:recover(<<>>, Queues), + rabbit_log:debug("Successfully revived ~b quorum queue replicas", + [length(Recovered)]), + case length(Failed) of + 0 -> + ok; + NumFailed -> + rabbit_log:error("Failed to revive ~b quorum queue replicas", + [NumFailed]) + end, + + rabbit_log:info("Restart of local quorum queue replicas is complete"), + ok. %% %% Implementation @@ -349,6 +323,3 @@ ok_or_first_error(ok, Acc) -> Acc; ok_or_first_error({error, _} = Err, _Acc) -> Err. - -readable_candidate_list(Nodes) -> - string:join(lists:map(fun rabbit_data_coercion:to_list/1, Nodes), ", "). diff --git a/deps/rabbit/src/rabbit_memory_monitor.erl b/deps/rabbit/src/rabbit_memory_monitor.erl deleted file mode 100644 index 4e71c35a1c5d..000000000000 --- a/deps/rabbit/src/rabbit_memory_monitor.erl +++ /dev/null @@ -1,262 +0,0 @@ -%% This Source Code Form is subject to the terms of the Mozilla Public -%% License, v. 2.0. If a copy of the MPL was not distributed with this -%% file, You can obtain one at https://mozilla.org/MPL/2.0/. -%% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. -%% - - -%% This module handles the node-wide memory statistics. -%% It receives statistics from all queues, counts the desired -%% queue length (in seconds), and sends this information back to -%% queues. - --module(rabbit_memory_monitor). - --behaviour(gen_server2). - --export([start_link/0, register/2, deregister/1, - report_ram_duration/2, stop/0, conserve_resources/3, memory_use/1]). - --export([init/1, handle_call/3, handle_cast/2, handle_info/2, - terminate/2, code_change/3]). - --record(process, {pid, reported, sent, callback, monitor}). - --record(state, {timer, %% 'internal_update' timer - queue_durations, %% ets #process - queue_duration_sum, %% sum of all queue_durations - queue_duration_count, %% number of elements in sum - desired_duration, %% the desired queue duration - disk_alarm %% disable paging, disk alarm has fired - }). - --define(SERVER, ?MODULE). --define(TABLE_NAME, ?MODULE). - -%% If all queues are pushed to disk (duration 0), then the sum of -%% their reported lengths will be 0. If memory then becomes available, -%% unless we manually intervene, the sum will remain 0, and the queues -%% will never get a non-zero duration. Thus when the mem use is < -%% SUM_INC_THRESHOLD, increase the sum artificially by SUM_INC_AMOUNT. --define(SUM_INC_THRESHOLD, 0.95). --define(SUM_INC_AMOUNT, 1.0). - --define(EPSILON, 0.000001). %% less than this and we clamp to 0 - -%%---------------------------------------------------------------------------- -%% Public API -%%---------------------------------------------------------------------------- - --spec start_link() -> rabbit_types:ok_pid_or_error(). - -start_link() -> - gen_server2:start_link({local, ?SERVER}, ?MODULE, [], []). - --spec register(pid(), {atom(),atom(),[any()]}) -> 'ok'. - -register(Pid, MFA = {_M, _F, _A}) -> - gen_server2:call(?SERVER, {register, Pid, MFA}, infinity). - --spec deregister(pid()) -> 'ok'. - -deregister(Pid) -> - gen_server2:cast(?SERVER, {deregister, Pid}). - --spec report_ram_duration - (pid(), float() | 'infinity') -> number() | 'infinity'. - -report_ram_duration(Pid, QueueDuration) -> - gen_server2:call(?SERVER, - {report_ram_duration, Pid, QueueDuration}, infinity). - --spec stop() -> 'ok'. - -stop() -> - gen_server2:cast(?SERVER, stop). - --spec conserve_resources(pid(), - rabbit_alarm:resource_alarm_source(), - rabbit_alarm:resource_alert()) -> ok. -%% Paging should be enabled/disabled only in response to disk resource alarms -%% for the current node. -conserve_resources(Pid, disk, {_, Conserve, Node}) when node(Pid) =:= Node -> - gen_server2:cast(Pid, {disk_alarm, Conserve}); -conserve_resources(_Pid, _Source, _Conserve) -> - ok. - -memory_use(Type) -> - vm_memory_monitor:get_memory_use(Type). - -%%---------------------------------------------------------------------------- -%% Gen_server callbacks -%%---------------------------------------------------------------------------- - -init([]) -> - {ok, Interval} = application:get_env(rabbit, memory_monitor_interval), - {ok, TRef} = timer:send_interval(Interval, update), - - Ets = ets:new(?TABLE_NAME, [set, private, {keypos, #process.pid}]), - Alarms = rabbit_alarm:register(self(), {?MODULE, conserve_resources, []}), - {ok, internal_update( - #state { timer = TRef, - queue_durations = Ets, - queue_duration_sum = 0.0, - queue_duration_count = 0, - desired_duration = infinity, - disk_alarm = lists:member(disk, Alarms)})}. - -handle_call({report_ram_duration, Pid, QueueDuration}, From, - State = #state { queue_duration_sum = Sum, - queue_duration_count = Count, - queue_durations = Durations, - desired_duration = SendDuration }) -> - - [Proc = #process { reported = PrevQueueDuration }] = - ets:lookup(Durations, Pid), - - gen_server2:reply(From, SendDuration), - - {Sum1, Count1} = - case {PrevQueueDuration, QueueDuration} of - {infinity, infinity} -> {Sum, Count}; - {infinity, _} -> {Sum + QueueDuration, Count + 1}; - {_, infinity} -> {Sum - PrevQueueDuration, Count - 1}; - {_, _} -> {Sum - PrevQueueDuration + QueueDuration, - Count} - end, - true = ets:insert(Durations, Proc #process { reported = QueueDuration, - sent = SendDuration }), - {noreply, State #state { queue_duration_sum = zero_clamp(Sum1), - queue_duration_count = Count1 }}; - -handle_call({register, Pid, MFA}, _From, - State = #state { queue_durations = Durations }) -> - MRef = erlang:monitor(process, Pid), - true = ets:insert(Durations, #process { pid = Pid, reported = infinity, - sent = infinity, callback = MFA, - monitor = MRef }), - {reply, ok, State}; - -handle_call(_Request, _From, State) -> - {noreply, State}. - -handle_cast({disk_alarm, Alarm}, State = #state{disk_alarm = Alarm}) -> - {noreply, State}; - -handle_cast({disk_alarm, Alarm}, State) -> - {noreply, internal_update(State#state{disk_alarm = Alarm})}; - -handle_cast({deregister, Pid}, State) -> - {noreply, internal_deregister(Pid, true, State)}; - -handle_cast(stop, State) -> - {stop, normal, State}; - -handle_cast(_Request, State) -> - {noreply, State}. - -handle_info(update, State) -> - {noreply, internal_update(State)}; - -handle_info({'DOWN', _MRef, process, Pid, _Reason}, State) -> - {noreply, internal_deregister(Pid, false, State)}; - -handle_info(_Info, State) -> - {noreply, State}. - -terminate(_Reason, #state { timer = TRef }) -> - _ = timer:cancel(TRef), - ok. - -code_change(_OldVsn, State, _Extra) -> - {ok, State}. - - -%%---------------------------------------------------------------------------- -%% Internal functions -%%---------------------------------------------------------------------------- - -zero_clamp(Sum) when Sum < ?EPSILON -> 0.0; -zero_clamp(Sum) -> Sum. - -internal_deregister(Pid, Demonitor, - State = #state { queue_duration_sum = Sum, - queue_duration_count = Count, - queue_durations = Durations }) -> - case ets:lookup(Durations, Pid) of - [] -> State; - [#process { reported = PrevQueueDuration, monitor = MRef }] -> - true = case Demonitor of - true -> erlang:demonitor(MRef); - false -> true - end, - {Sum1, Count1} = - case PrevQueueDuration of - infinity -> {Sum, Count}; - _ -> {zero_clamp(Sum - PrevQueueDuration), - Count - 1} - end, - true = ets:delete(Durations, Pid), - State #state { queue_duration_sum = Sum1, - queue_duration_count = Count1 } - end. - -internal_update(State = #state{queue_durations = Durations, - desired_duration = DesiredDurationAvg, - disk_alarm = DiskAlarm}) -> - DesiredDurationAvg1 = desired_duration_average(State), - ShouldInform = should_inform_predicate(DiskAlarm), - case ShouldInform(DesiredDurationAvg, DesiredDurationAvg1) of - true -> inform_queues(ShouldInform, DesiredDurationAvg1, Durations); - false -> ok - end, - State#state{desired_duration = DesiredDurationAvg1}. - -desired_duration_average(#state{disk_alarm = true}) -> - infinity; -desired_duration_average(#state{disk_alarm = false, - queue_duration_sum = Sum, - queue_duration_count = Count}) -> - {ok, LimitThreshold} = - application:get_env(rabbit, vm_memory_high_watermark_paging_ratio), - MemoryRatio = memory_use(ratio), - if MemoryRatio =:= infinity -> - 0.0; - MemoryRatio < LimitThreshold orelse Count == 0 -> - infinity; - MemoryRatio < ?SUM_INC_THRESHOLD -> - ((Sum + ?SUM_INC_AMOUNT) / Count) / MemoryRatio; - true -> - (Sum / Count) / MemoryRatio - end. - -inform_queues(ShouldInform, DesiredDurationAvg, Durations) -> - true = - ets:foldl( - fun (Proc = #process{reported = QueueDuration, - sent = PrevSendDuration, - callback = {M, F, A}}, true) -> - case ShouldInform(PrevSendDuration, DesiredDurationAvg) - andalso ShouldInform(QueueDuration, DesiredDurationAvg) of - true -> ok = erlang:apply( - M, F, A ++ [DesiredDurationAvg]), - ets:insert( - Durations, - Proc#process{sent = DesiredDurationAvg}); - false -> true - end - end, true, Durations). - -%% In normal use, we only inform queues immediately if the desired -%% duration has decreased, we want to ensure timely paging. -should_inform_predicate(false) -> fun greater_than/2; -%% When the disk alarm has gone off though, we want to inform queues -%% immediately if the desired duration has *increased* - we want to -%% ensure timely stopping paging. -should_inform_predicate(true) -> fun (D1, D2) -> greater_than(D2, D1) end. - -greater_than(infinity, infinity) -> false; -greater_than(infinity, _D2) -> true; -greater_than(_D1, infinity) -> false; -greater_than(D1, D2) -> D1 > D2. diff --git a/deps/rabbit/src/rabbit_message_interceptor.erl b/deps/rabbit/src/rabbit_message_interceptor.erl index 4d4d87cd9928..436284e5454a 100644 --- a/deps/rabbit/src/rabbit_message_interceptor.erl +++ b/deps/rabbit/src/rabbit_message_interceptor.erl @@ -2,13 +2,14 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% This module exists since 3.12 replacing plugins rabbitmq-message-timestamp %% and rabbitmq-routing-node-stamp. Instead of using these plugins, RabbitMQ core can %% now be configured to add such headers. This enables non-AMQP 0.9.1 protocols (that %% do not use rabbit_channel) to also add AMQP 0.9.1 headers to incoming messages. -module(rabbit_message_interceptor). +-include("mc.hrl"). -export([intercept/1]). @@ -17,7 +18,7 @@ -spec intercept(mc:state()) -> mc:state(). intercept(Msg) -> - Interceptors = persistent_term:get({rabbit, incoming_message_interceptors}, []), + Interceptors = persistent_term:get(incoming_message_interceptors, []), lists:foldl(fun({InterceptorName, Overwrite}, M) -> intercept(M, InterceptorName, Overwrite) end, Msg, Interceptors). @@ -26,9 +27,9 @@ intercept(Msg, set_header_routing_node, Overwrite) -> Node = atom_to_binary(node()), set_annotation(Msg, ?HEADER_ROUTING_NODE, Node, Overwrite); intercept(Msg0, set_header_timestamp, Overwrite) -> - Millis = os:system_time(millisecond), - Msg = set_annotation(Msg0, ?HEADER_TIMESTAMP, Millis, Overwrite), - set_timestamp(Msg, Millis, Overwrite). + Ts = mc:get_annotation(?ANN_RECEIVED_AT_TIMESTAMP, Msg0), + Msg = set_annotation(Msg0, ?HEADER_TIMESTAMP, Ts, Overwrite), + set_timestamp(Msg, Ts, Overwrite). -spec set_annotation(mc:state(), mc:ann_key(), mc:ann_value(), boolean()) -> mc:state(). set_annotation(Msg, Key, Value, Overwrite) -> @@ -45,5 +46,5 @@ set_timestamp(Msg, Timestamp, Overwrite) -> {Ts, false} when is_integer(Ts) -> Msg; _ -> - mc:set_annotation(timestamp, Timestamp, Msg) + mc:set_annotation(?ANN_TIMESTAMP, Timestamp, Msg) end. diff --git a/deps/rabbit/src/rabbit_metrics.erl b/deps/rabbit/src/rabbit_metrics.erl index fad6c159ab51..82235830a978 100644 --- a/deps/rabbit/src/rabbit_metrics.erl +++ b/deps/rabbit/src/rabbit_metrics.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_metrics). diff --git a/deps/rabbit/src/rabbit_mirror_queue_coordinator.erl b/deps/rabbit/src/rabbit_mirror_queue_coordinator.erl deleted file mode 100644 index e6b2b90b3678..000000000000 --- a/deps/rabbit/src/rabbit_mirror_queue_coordinator.erl +++ /dev/null @@ -1,468 +0,0 @@ -%% This Source Code Form is subject to the terms of the Mozilla Public -%% License, v. 2.0. If a copy of the MPL was not distributed with this -%% file, You can obtain one at https://mozilla.org/MPL/2.0/. -%% -%% Copyright (c) 2010-2023 VMware, Inc. or its affiliates. All rights reserved. -%% - --module(rabbit_mirror_queue_coordinator). - --export([start_link/4, get_gm/1, ensure_monitoring/2]). - --export([init/1, handle_call/3, handle_cast/2, handle_info/2, terminate/2, - code_change/3, handle_pre_hibernate/1]). - --export([joined/2, members_changed/3, handle_msg/3, handle_terminate/2]). - --behaviour(gen_server2). --behaviour(gm). - --include_lib("rabbit_common/include/rabbit.hrl"). --include("amqqueue.hrl"). --include("gm_specs.hrl"). - --record(state, { q, - gm, - monitors, - death_fun, - depth_fun - }). - -%%---------------------------------------------------------------------------- -%% -%% Mirror Queues -%% -%% A queue with mirrors consists of the following: -%% -%% #amqqueue{ pid, slave_pids } -%% | | -%% +----------+ +-------+--------------+-----------...etc... -%% | | | -%% V V V -%% amqqueue_process---+ mirror-----+ mirror-----+ ...etc... -%% | BQ = master----+ | | BQ = vq | | BQ = vq | -%% | | BQ = vq | | +-+-------+ +-+-------+ -%% | +-+-------+ | | | -%% +-++-----|---------+ | | (some details elided) -%% || | | | -%% || coordinator-+ | | -%% || +-+---------+ | | -%% || | | | -%% || gm-+ -- -- -- -- gm-+- -- -- -- gm-+- -- --...etc... -%% || +--+ +--+ +--+ -%% || -%% consumers -%% -%% The master is merely an implementation of bq, and thus is invoked -%% through the normal bq interface by the amqqueue_process. The mirrors -%% meanwhile are processes in their own right (as is the -%% coordinator). The coordinator and all mirrors belong to the same gm -%% group. Every member of a gm group receives messages sent to the gm -%% group. Because the master is the bq of amqqueue_process, it doesn't -%% have sole control over its mailbox, and as a result, the master -%% itself cannot be passed messages directly (well, it could by via -%% the amqqueue:run_backing_queue callback but that would induce -%% additional unnecessary loading on the master queue process), yet it -%% needs to react to gm events, such as the death of mirrors. Thus the -%% master creates the coordinator, and it is the coordinator that is -%% the gm callback module and event handler for the master. -%% -%% Consumers are only attached to the master. Thus the master is -%% responsible for informing all mirrors when messages are fetched from -%% the bq, when they're acked, and when they're requeued. -%% -%% The basic goal is to ensure that all mirrors performs actions on -%% their bqs in the same order as the master. Thus the master -%% intercepts all events going to its bq, and suitably broadcasts -%% these events on the gm. The mirrors thus receive two streams of -%% events: one stream is via the gm, and one stream is from channels -%% directly. Whilst the stream via gm is guaranteed to be consistently -%% seen by all mirrors , the same is not true of the stream via -%% channels. For example, in the event of an unexpected death of a -%% channel during a publish, only some of the mirrors may receive that -%% publish. As a result of this problem, the messages broadcast over -%% the gm contain published content, and thus mirrors can operate -%% successfully on messages that they only receive via the gm. -%% -%% The key purpose of also sending messages directly from the channels -%% to the mirrors is that without this, in the event of the death of -%% the master, messages could be lost until a suitable mirror is -%% promoted. However, that is not the only reason. A mirror cannot send -%% confirms for a message until it has seen it from the -%% channel. Otherwise, it might send a confirm to a channel for a -%% message that it might *never* receive from that channel. This can -%% happen because new mirrors join the gm ring (and thus receive -%% messages from the master) before inserting themselves in the -%% queue's mnesia record (which is what channels look at for routing). -%% As it turns out, channels will simply ignore such bogus confirms, -%% but relying on that would introduce a dangerously tight coupling. -%% -%% Hence the mirrors have to wait until they've seen both the publish -%% via gm, and the publish via the channel before they issue the -%% confirm. Either form of publish can arrive first, and a mirror can -%% be upgraded to the master at any point during this -%% process. Confirms continue to be issued correctly, however. -%% -%% Because the mirror is a full process, it impersonates parts of the -%% amqqueue API. However, it does not need to implement all parts: for -%% example, no ack or consumer-related message can arrive directly at -%% a mirror from a channel: it is only publishes that pass both -%% directly to the mirrors and go via gm. -%% -%% Slaves can be added dynamically. When this occurs, there is no -%% attempt made to sync the current contents of the master with the -%% new mirror, thus the mirror will start empty, regardless of the state -%% of the master. Thus the mirror needs to be able to detect and ignore -%% operations which are for messages it has not received: because of -%% the strict FIFO nature of queues in general, this is -%% straightforward - all new publishes that the new mirror receives via -%% gm should be processed as normal, but fetches which are for -%% messages the mirror has never seen should be ignored. Similarly, -%% acks for messages the mirror never fetched should be -%% ignored. Similarly, we don't republish rejected messages that we -%% haven't seen. Eventually, as the master is consumed from, the -%% messages at the head of the queue which were there before the slave -%% joined will disappear, and the mirror will become fully synced with -%% the state of the master. -%% -%% The detection of the sync-status is based on the depth of the BQs, -%% where the depth is defined as the sum of the length of the BQ (as -%% per BQ:len) and the messages pending an acknowledgement. When the -%% depth of the mirror is equal to the master's, then the mirror is -%% synchronised. We only store the difference between the two for -%% simplicity. Comparing the length is not enough since we need to -%% take into account rejected messages which will make it back into -%% the master queue but can't go back in the mirror, since we don't -%% want "holes" in the mirror queue. Note that the depth, and the -%% length likewise, must always be shorter on the mirror - we assert -%% that in various places. In case mirrors are joined to an empty queue -%% which only goes on to receive publishes, they start by asking the -%% master to broadcast its depth. This is enough for mirrors to always -%% be able to work out when their head does not differ from the master -%% (and is much simpler and cheaper than getting the master to hang on -%% to the guid of the msg at the head of its queue). When a mirror is -%% promoted to a master, it unilaterally broadcasts its depth, in -%% order to solve the problem of depth requests from new mirrors being -%% unanswered by a dead master. -%% -%% Obviously, due to the async nature of communication across gm, the -%% mirrors can fall behind. This does not matter from a sync pov: if -%% they fall behind and the master dies then a) no publishes are lost -%% because all publishes go to all mirrors anyway; b) the worst that -%% happens is that acks get lost and so messages come back to -%% life. This is no worse than normal given you never get confirmation -%% that an ack has been received (not quite true with QoS-prefetch, -%% but close enough for jazz). -%% -%% Because acktags are issued by the bq independently, and because -%% there is no requirement for the master and all mirrors to use the -%% same bq, all references to msgs going over gm is by msg_id. Thus -%% upon acking, the master must convert the acktags back to msg_ids -%% (which happens to be what bq:ack returns), then sends the msg_ids -%% over gm, the mirrors must convert the msg_ids to acktags (a mapping -%% the mirrors themselves must maintain). -%% -%% When the master dies, a mirror gets promoted. This will be the -%% eldest mirror, and thus the hope is that that mirror is most likely -%% to be sync'd with the master. The design of gm is that the -%% notification of the death of the master will only appear once all -%% messages in-flight from the master have been fully delivered to all -%% members of the gm group. Thus at this point, the mirror that gets -%% promoted cannot broadcast different events in a different order -%% than the master for the same msgs: there is no possibility for the -%% same msg to be processed by the old master and the new master - if -%% it was processed by the old master then it will have been processed -%% by the mirror before the mirror was promoted, and vice versa. -%% -%% Upon promotion, all msgs pending acks are requeued as normal, the -%% mirror constructs state suitable for use in the master module, and -%% then dynamically changes into an amqqueue_process with the master -%% as the bq, and the slave's bq as the master's bq. Thus the very -%% same process that was the mirror is now a full amqqueue_process. -%% -%% It is important that we avoid memory leaks due to the death of -%% senders (i.e. channels) and partial publications. A sender -%% publishing a message may fail mid way through the publish and thus -%% only some of the mirrors will receive the message. We need the -%% mirrors to be able to detect this and tidy up as necessary to avoid -%% leaks. If we just had the master monitoring all senders then we -%% would have the possibility that a sender appears and only sends the -%% message to a few of the mirrors before dying. Those mirrors would -%% then hold on to the message, assuming they'll receive some -%% instruction eventually from the master. Thus we have both mirrors -%% and the master monitor all senders they become aware of. But there -%% is a race: if the mirror receives a DOWN of a sender, how does it -%% know whether or not the master is going to send it instructions -%% regarding those messages? -%% -%% Whilst the master monitors senders, it can't access its mailbox -%% directly, so it delegates monitoring to the coordinator. When the -%% coordinator receives a DOWN message from a sender, it informs the -%% master via a callback. This allows the master to do any tidying -%% necessary, but more importantly allows the master to broadcast a -%% sender_death message to all the mirrors , saying the sender has -%% died. Once the mirrors receive the sender_death message, they know -%% that they're not going to receive any more instructions from the gm -%% regarding that sender. However, it is possible that the coordinator -%% receives the DOWN and communicates that to the master before the -%% master has finished receiving and processing publishes from the -%% sender. This turns out not to be a problem: the sender has actually -%% died, and so will not need to receive confirms or other feedback, -%% and should further messages be "received" from the sender, the -%% master will ask the coordinator to set up a new monitor, and -%% will continue to process the messages normally. Slaves may thus -%% receive publishes via gm from previously declared "dead" senders, -%% but again, this is fine: should the mirror have just thrown out the -%% message it had received directly from the sender (due to receiving -%% a sender_death message via gm), it will be able to cope with the -%% publication purely from the master via gm. -%% -%% When a mirror receives a DOWN message for a sender, if it has not -%% received the sender_death message from the master via gm already, -%% then it will wait 20 seconds before broadcasting a request for -%% confirmation from the master that the sender really has died. -%% Should a sender have only sent a publish to mirrors , this allows -%% mirrors to inform the master of the previous existence of the -%% sender. The master will thus monitor the sender, receive the DOWN, -%% and subsequently broadcast the sender_death message, allowing the -%% mirrors to tidy up. This process can repeat for the same sender: -%% consider one mirror receives the publication, then the DOWN, then -%% asks for confirmation of death, then the master broadcasts the -%% sender_death message. Only then does another mirror receive the -%% publication and thus set up its monitoring. Eventually that slave -%% too will receive the DOWN, ask for confirmation and the master will -%% monitor the sender again, receive another DOWN, and send out -%% another sender_death message. Given the 20 second delay before -%% requesting death confirmation, this is highly unlikely, but it is a -%% possibility. -%% -%% When the 20 second timer expires, the mirror first checks to see -%% whether it still needs confirmation of the death before requesting -%% it. This prevents unnecessary traffic on gm as it allows one -%% broadcast of the sender_death message to satisfy many mirrors. -%% -%% If we consider the promotion of a mirror at this point, we have two -%% possibilities: that of the mirror that has received the DOWN and is -%% thus waiting for confirmation from the master that the sender -%% really is down; and that of the mirror that has not received the -%% DOWN. In the first case, in the act of promotion to master, the new -%% master will monitor again the dead sender, and after it has -%% finished promoting itself, it should find another DOWN waiting, -%% which it will then broadcast. This will allow mirrors to tidy up as -%% normal. In the second case, we have the possibility that -%% confirmation-of-sender-death request has been broadcast, but that -%% it was broadcast before the master failed, and that the mirror being -%% promoted does not know anything about that sender, and so will not -%% monitor it on promotion. Thus a mirror that broadcasts such a -%% request, at the point of broadcasting it, recurses, setting another -%% 20 second timer. As before, on expiry of the timer, the mirrors -%% checks to see whether it still has not received a sender_death -%% message for the dead sender, and if not, broadcasts a death -%% confirmation request. Thus this ensures that even when a master -%% dies and the new mirror has no knowledge of the dead sender, it will -%% eventually receive a death confirmation request, shall monitor the -%% dead sender, receive the DOWN and broadcast the sender_death -%% message. -%% -%% The preceding commentary deals with the possibility of mirrors -%% receiving publications from senders which the master does not, and -%% the need to prevent memory leaks in such scenarios. The inverse is -%% also possible: a partial publication may cause only the master to -%% receive a publication. It will then publish the message via gm. The -%% mirrors will receive it via gm, will publish it to their BQ and will -%% set up monitoring on the sender. They will then receive the DOWN -%% message and the master will eventually publish the corresponding -%% sender_death message. The mirror will then be able to tidy up its -%% state as normal. -%% -%% Recovery of mirrored queues is straightforward: as nodes die, the -%% remaining nodes record this, and eventually a situation is reached -%% in which only one node is alive, which is the master. This is the -%% only node which, upon recovery, will resurrect a mirrored queue: -%% nodes which die and then rejoin as a mirror will start off empty as -%% if they have no mirrored content at all. This is not surprising: to -%% achieve anything more sophisticated would require the master and -%% recovering mirror to be able to check to see whether they agree on -%% the last seen state of the queue: checking depth alone is not -%% sufficient in this case. -%% -%% For more documentation see the comments in bug 23554. -%% -%%---------------------------------------------------------------------------- - --spec start_link - (amqqueue:amqqueue(), pid() | 'undefined', - rabbit_mirror_queue_master:death_fun(), - rabbit_mirror_queue_master:depth_fun()) -> - rabbit_types:ok_pid_or_error(). - -start_link(Queue, GM, DeathFun, DepthFun) -> - gen_server2:start_link(?MODULE, [Queue, GM, DeathFun, DepthFun], []). - --spec get_gm(pid()) -> pid(). - -get_gm(CPid) -> - gen_server2:call(CPid, get_gm, infinity). - --spec ensure_monitoring(pid(), [pid()]) -> 'ok'. - -ensure_monitoring(CPid, Pids) -> - gen_server2:cast(CPid, {ensure_monitoring, Pids}). - -%% --------------------------------------------------------------------------- -%% gen_server -%% --------------------------------------------------------------------------- - -init([Q, GM, DeathFun, DepthFun]) when ?is_amqqueue(Q) -> - QueueName = amqqueue:get_name(Q), - ?store_proc_name(QueueName), - GM1 = case GM of - undefined -> - {ok, GM2} = gm:start_link( - QueueName, ?MODULE, [self()], - fun rabbit_mnesia:execute_mnesia_transaction/1), - receive {joined, GM2, _Members} -> - ok - end, - GM2; - _ -> - true = link(GM), - GM - end, - {ok, #state { q = Q, - gm = GM1, - monitors = pmon:new(), - death_fun = DeathFun, - depth_fun = DepthFun }, - hibernate, - {backoff, ?HIBERNATE_AFTER_MIN, ?HIBERNATE_AFTER_MIN, ?DESIRED_HIBERNATE}}. - -handle_call(get_gm, _From, State = #state { gm = GM }) -> - reply(GM, State). - -handle_cast({gm_deaths, DeadGMPids}, State = #state{q = Q}) when ?amqqueue_pid_runs_on_local_node(Q) -> - QueueName = amqqueue:get_name(Q), - MPid = amqqueue:get_pid(Q), - case rabbit_mirror_queue_misc:remove_from_queue( - QueueName, MPid, DeadGMPids) of - {ok, MPid, DeadPids, ExtraNodes} -> - rabbit_mirror_queue_misc:report_deaths(MPid, true, QueueName, - DeadPids), - rabbit_mirror_queue_misc:add_mirrors(QueueName, ExtraNodes, async), - noreply(State); - {ok, _MPid0, DeadPids, _ExtraNodes} -> - %% see rabbitmq-server#914; - %% Different mirror is now master, stop current coordinator normally. - %% Initiating queue is now mirror and the least we could do is report - %% deaths which we 'think' we saw. - %% NOTE: Reported deaths here, could be inconsistent. - rabbit_mirror_queue_misc:report_deaths(MPid, false, QueueName, - DeadPids), - {stop, shutdown, State}; - {error, not_found} -> - {stop, normal, State}; - {error, {not_synced, _}} -> - rabbit_log:error("Mirror queue ~tp in unexpected state." - " Promoted to master but already a master.", - [QueueName]), - error(unexpected_mirrored_state) - end; - -handle_cast(request_depth, State = #state{depth_fun = DepthFun, q = QArg}) when ?is_amqqueue(QArg) -> - QName = amqqueue:get_name(QArg), - MPid = amqqueue:get_pid(QArg), - case rabbit_amqqueue:lookup(QName) of - {ok, QFound} when ?amqqueue_pid_equals(QFound, MPid) -> - ok = DepthFun(), - noreply(State); - _ -> - {stop, shutdown, State} - end; - -handle_cast({ensure_monitoring, Pids}, State = #state { monitors = Mons }) -> - noreply(State #state { monitors = pmon:monitor_all(Pids, Mons) }); - -handle_cast({delete_and_terminate, {shutdown, ring_shutdown}}, State) -> - {stop, normal, State}; -handle_cast({delete_and_terminate, Reason}, State) -> - {stop, Reason, State}. - -handle_info({'DOWN', _MonitorRef, process, Pid, _Reason}, - State = #state { monitors = Mons, - death_fun = DeathFun }) -> - noreply(case pmon:is_monitored(Pid, Mons) of - false -> State; - true -> ok = DeathFun(Pid), - State #state { monitors = pmon:erase(Pid, Mons) } - end); - -handle_info(Msg, State) -> - {stop, {unexpected_info, Msg}, State}. - -terminate(_Reason, #state{}) -> - ok. - -code_change(_OldVsn, State, _Extra) -> - {ok, State}. - -handle_pre_hibernate(State = #state { gm = GM }) -> - %% Since GM notifications of deaths are lazy we might not get a - %% timely notification of mirror death if policy changes when - %% everything is idle. So cause some activity just before we - %% sleep. This won't cause us to go into perpetual motion as the - %% heartbeat does not wake up coordinator or mirrors. - gm:broadcast(GM, hibernate_heartbeat), - {hibernate, State}. - -%% --------------------------------------------------------------------------- -%% GM -%% --------------------------------------------------------------------------- - --spec joined(args(), members()) -> callback_result(). - -joined([CPid], Members) -> - CPid ! {joined, self(), Members}, - ok. - --spec members_changed(args(), members(),members()) -> callback_result(). - -members_changed([_CPid], _Births, []) -> - ok; -members_changed([CPid], _Births, Deaths) -> - ok = gen_server2:cast(CPid, {gm_deaths, Deaths}). - --spec handle_msg(args(), pid(), any()) -> callback_result(). - -handle_msg([CPid], _From, request_depth = Msg) -> - ok = gen_server2:cast(CPid, Msg); -handle_msg([CPid], _From, {ensure_monitoring, _Pids} = Msg) -> - ok = gen_server2:cast(CPid, Msg); -handle_msg([_CPid], _From, {delete_and_terminate, _Reason}) -> - %% We tell GM to stop, but we don't instruct the coordinator to - %% stop yet. The GM will first make sure all pending messages were - %% actually delivered. Then it calls handle_terminate/2 below so the - %% coordinator is stopped. - %% - %% If we stop the coordinator right now, remote mirrors could see the - %% coordinator DOWN before delete_and_terminate was delivered to all - %% GMs. One of those GM would be promoted as the master, and this GM - %% would hang forever, waiting for other GMs to stop. - {stop, {shutdown, ring_shutdown}}; -handle_msg([_CPid], _From, _Msg) -> - ok. - --spec handle_terminate(args(), term()) -> any(). - -handle_terminate([CPid], Reason) -> - ok = gen_server2:cast(CPid, {delete_and_terminate, Reason}), - ok. - -%% --------------------------------------------------------------------------- -%% Others -%% --------------------------------------------------------------------------- - -noreply(State) -> - {noreply, State, hibernate}. - -reply(Reply, State) -> - {reply, Reply, State, hibernate}. diff --git a/deps/rabbit/src/rabbit_mirror_queue_master.erl b/deps/rabbit/src/rabbit_mirror_queue_master.erl deleted file mode 100644 index 7cd10e15ca56..000000000000 --- a/deps/rabbit/src/rabbit_mirror_queue_master.erl +++ /dev/null @@ -1,598 +0,0 @@ -%% This Source Code Form is subject to the terms of the Mozilla Public -%% License, v. 2.0. If a copy of the MPL was not distributed with this -%% file, You can obtain one at https://mozilla.org/MPL/2.0/. -%% -%% Copyright (c) 2010-2023 VMware, Inc. or its affiliates. All rights reserved. -%% - --module(rabbit_mirror_queue_master). - --export([init/3, terminate/2, delete_and_terminate/2, - purge/1, purge_acks/1, publish/6, publish_delivered/5, - batch_publish/4, batch_publish_delivered/4, - discard/4, fetch/2, drop/2, ack/2, requeue/2, ackfold/4, fold/3, - len/1, is_empty/1, depth/1, drain_confirmed/1, - dropwhile/2, fetchwhile/4, set_ram_duration_target/2, ram_duration/1, - needs_timeout/1, timeout/1, handle_pre_hibernate/1, resume/1, - msg_rates/1, info/2, invoke/3, is_duplicate/2, set_queue_mode/2, - set_queue_version/2, - zip_msgs_and_acks/4]). - --export([start/2, stop/1, delete_crashed/1]). - --export([promote_backing_queue_state/8, sender_death_fun/0, depth_fun/0]). - --export([init_with_existing_bq/3, stop_mirroring/1, sync_mirrors/3]). - --behaviour(rabbit_backing_queue). - --include("amqqueue.hrl"). - --record(state, { name, - gm, - coordinator, - backing_queue, - backing_queue_state, - seen_status, - confirmed, - known_senders, - wait_timeout - }). - --export_type([death_fun/0, depth_fun/0, stats_fun/0]). - --type death_fun() :: fun ((pid()) -> 'ok'). --type depth_fun() :: fun (() -> 'ok'). --type stats_fun() :: fun ((any()) -> 'ok'). --type master_state() :: #state { name :: rabbit_amqqueue:name(), - gm :: pid(), - coordinator :: pid(), - backing_queue :: atom(), - backing_queue_state :: any(), - seen_status :: map(), - confirmed :: [rabbit_guid:guid()], - known_senders :: sets:set() - }. - -%% For general documentation of HA design, see -%% rabbit_mirror_queue_coordinator - -%% --------------------------------------------------------------------------- -%% Backing queue -%% --------------------------------------------------------------------------- - --spec start(_, _) -> no_return(). -start(_Vhost, _DurableQueues) -> - %% This will never get called as this module will never be - %% installed as the default BQ implementation. - exit({not_valid_for_generic_backing_queue, ?MODULE}). - --spec stop(_) -> no_return(). -stop(_Vhost) -> - %% Same as start/1. - exit({not_valid_for_generic_backing_queue, ?MODULE}). - --spec delete_crashed(_) -> no_return(). -delete_crashed(_QName) -> - exit({not_valid_for_generic_backing_queue, ?MODULE}). - -init(Q, Recover, AsyncCallback) -> - {ok, BQ} = application:get_env(backing_queue_module), - BQS = BQ:init(Q, Recover, AsyncCallback), - State = #state{gm = GM} = init_with_existing_bq(Q, BQ, BQS), - ok = gm:broadcast(GM, {depth, BQ:depth(BQS)}), - State. - --spec init_with_existing_bq(amqqueue:amqqueue(), atom(), any()) -> - master_state(). - -init_with_existing_bq(Q0, BQ, BQS) when ?is_amqqueue(Q0) -> - QName = amqqueue:get_name(Q0), - case rabbit_mirror_queue_coordinator:start_link( - Q0, undefined, sender_death_fun(), depth_fun()) of - {ok, CPid} -> - GM = rabbit_mirror_queue_coordinator:get_gm(CPid), - Self = self(), - Fun = fun () -> - [Q1] = mnesia:read({rabbit_queue, QName}), - true = amqqueue:is_amqqueue(Q1), - GMPids0 = amqqueue:get_gm_pids(Q1), - GMPids1 = [{GM, Self} | GMPids0], - Q2 = amqqueue:set_gm_pids(Q1, GMPids1), - Q3 = amqqueue:set_state(Q2, live), - %% amqqueue migration: - %% The amqqueue was read from this transaction, no - %% need to handle migration. - ok = rabbit_amqqueue:store_queue(Q3) - end, - ok = rabbit_mnesia:execute_mnesia_transaction(Fun), - {_MNode, SNodes} = rabbit_mirror_queue_misc:suggested_queue_nodes(Q0), - %% We need synchronous add here (i.e. do not return until the - %% mirror is running) so that when queue declaration is finished - %% all mirrors are up; we don't want to end up with unsynced mirrors - %% just by declaring a new queue. But add can't be synchronous all - %% the time as it can be called by mirrors and that's - %% deadlock-prone. - rabbit_mirror_queue_misc:add_mirrors(QName, SNodes, sync), - #state{name = QName, - gm = GM, - coordinator = CPid, - backing_queue = BQ, - backing_queue_state = BQS, - seen_status = #{}, - confirmed = [], - known_senders = sets:new([{version, 2}]), - wait_timeout = rabbit_misc:get_env(rabbit, slave_wait_timeout, 15000)}; - {error, Reason} -> - %% The GM can shutdown before the coordinator has started up - %% (lost membership or missing group), thus the start_link of - %% the coordinator returns {error, shutdown} as rabbit_amqqueue_process - % is trapping exists - throw({coordinator_not_started, Reason}) - end. - --spec stop_mirroring(master_state()) -> {atom(), any()}. - -stop_mirroring(State = #state { coordinator = CPid, - backing_queue = BQ, - backing_queue_state = BQS }) -> - unlink(CPid), - stop_all_slaves(shutdown, State), - {BQ, BQS}. - --spec sync_mirrors(stats_fun(), stats_fun(), master_state()) -> - {'ok', master_state()} | {stop, any(), master_state()}. - -sync_mirrors(HandleInfo, EmitStats, - State = #state { name = QName, - gm = GM, - backing_queue = BQ, - backing_queue_state = BQS }) -> - Log = fun (Fmt, Params) -> - rabbit_mirror_queue_misc:log_info( - QName, "Synchronising: " ++ Fmt ++ "", Params) - end, - Log("~tp messages to synchronise", [BQ:len(BQS)]), - {ok, Q} = rabbit_amqqueue:lookup(QName), - SPids = amqqueue:get_slave_pids(Q), - SyncBatchSize = rabbit_mirror_queue_misc:sync_batch_size(Q), - SyncThroughput = rabbit_mirror_queue_misc:default_max_sync_throughput(), - log_mirror_sync_config(Log, SyncBatchSize, SyncThroughput), - Ref = make_ref(), - Syncer = rabbit_mirror_queue_sync:master_prepare(Ref, QName, Log, SPids), - gm:broadcast(GM, {sync_start, Ref, Syncer, SPids}), - S = fun(BQSN) -> State#state{backing_queue_state = BQSN} end, - case rabbit_mirror_queue_sync:master_go( - Syncer, Ref, Log, HandleInfo, EmitStats, SyncBatchSize, SyncThroughput, BQ, BQS) of - {cancelled, BQS1} -> Log(" synchronisation cancelled ", []), - {ok, S(BQS1)}; - {shutdown, R, BQS1} -> {stop, R, S(BQS1)}; - {sync_died, R, BQS1} -> Log("~tp", [R]), - {ok, S(BQS1)}; - {already_synced, BQS1} -> {ok, S(BQS1)}; - {ok, BQS1} -> Log("complete", []), - {ok, S(BQS1)} - end. - -log_mirror_sync_config(Log, SyncBatchSize, 0) -> - Log("batch size: ~tp", [SyncBatchSize]); -log_mirror_sync_config(Log, SyncBatchSize, SyncThroughput) -> - Log("max batch size: ~tp; max sync throughput: ~tp bytes/s", [SyncBatchSize, SyncThroughput]). - -terminate({shutdown, dropped} = Reason, - State = #state { backing_queue = BQ, - backing_queue_state = BQS }) -> - %% Backing queue termination - this node has been explicitly - %% dropped. Normally, non-durable queues would be tidied up on - %% startup, but there's a possibility that we will be added back - %% in without this node being restarted. Thus we must do the full - %% blown delete_and_terminate now, but only locally: we do not - %% broadcast delete_and_terminate. - State#state{backing_queue_state = BQ:delete_and_terminate(Reason, BQS)}; - -terminate(Reason, - State = #state { name = QName, - backing_queue = BQ, - backing_queue_state = BQS }) -> - %% Backing queue termination. The queue is going down but - %% shouldn't be deleted. Most likely safe shutdown of this - %% node. - {ok, Q} = rabbit_amqqueue:lookup(QName), - SSPids = amqqueue:get_sync_slave_pids(Q), - case SSPids =:= [] andalso - rabbit_policy:get(<<"ha-promote-on-shutdown">>, Q) =/= <<"always">> of - true -> %% Remove the whole queue to avoid data loss - rabbit_mirror_queue_misc:log_warning( - QName, "Stopping all nodes on master shutdown since no " - "synchronised mirror (replica) is available", []), - stop_all_slaves(Reason, State); - false -> %% Just let some other mirror take over. - ok - end, - State #state { backing_queue_state = BQ:terminate(Reason, BQS) }. - -delete_and_terminate(Reason, State = #state { backing_queue = BQ, - backing_queue_state = BQS }) -> - stop_all_slaves(Reason, State), - State#state{backing_queue_state = BQ:delete_and_terminate(Reason, BQS)}. - -stop_all_slaves(Reason, #state{name = QName, gm = GM, wait_timeout = WT}) -> - {ok, Q} = rabbit_amqqueue:lookup(QName), - SPids = amqqueue:get_slave_pids(Q), - rabbit_mirror_queue_misc:stop_all_slaves(Reason, SPids, QName, GM, WT). - -purge(State = #state { gm = GM, - backing_queue = BQ, - backing_queue_state = BQS }) -> - ok = gm:broadcast(GM, {drop, 0, BQ:len(BQS), false}), - {Count, BQS1} = BQ:purge(BQS), - {Count, State #state { backing_queue_state = BQS1 }}. - --spec purge_acks(_) -> no_return(). -purge_acks(_State) -> exit({not_implemented, {?MODULE, purge_acks}}). - -publish(Msg, MsgProps, IsDelivered, ChPid, Flow, - State = #state { gm = GM, - seen_status = SS, - backing_queue = BQ, - backing_queue_state = BQS }) -> - MsgId = mc:get_annotation(id, Msg), - {_, Size} = mc:size(Msg), - - false = maps:is_key(MsgId, SS), %% ASSERTION - ok = gm:broadcast(GM, {publish, ChPid, Flow, MsgProps, Msg}, - Size), - BQS1 = BQ:publish(Msg, MsgProps, IsDelivered, ChPid, Flow, BQS), - ensure_monitoring(ChPid, State #state { backing_queue_state = BQS1 }). - -batch_publish(Publishes, ChPid, Flow, - State = #state { gm = GM, - seen_status = SS, - backing_queue = BQ, - backing_queue_state = BQS }) -> - {Publishes1, false, MsgSizes} = - lists:foldl(fun ({Msg, - MsgProps, _IsDelivered}, {Pubs, false, Sizes}) -> - MsgId = mc:get_annotation(id, Msg), - {_, Size} = mc:size(Msg), - {[{Msg, MsgProps, true} | Pubs], %% [0] - false = maps:is_key(MsgId, SS), %% ASSERTION - Sizes + Size} - end, {[], false, 0}, Publishes), - Publishes2 = lists:reverse(Publishes1), - ok = gm:broadcast(GM, {batch_publish, ChPid, Flow, Publishes2}, - MsgSizes), - BQS1 = BQ:batch_publish(Publishes2, ChPid, Flow, BQS), - ensure_monitoring(ChPid, State #state { backing_queue_state = BQS1 }). -%% [0] When the mirror process handles the publish command, it sets the -%% IsDelivered flag to true, so to avoid iterating over the messages -%% again at the mirror, we do it here. - -publish_delivered(Msg, MsgProps, - ChPid, Flow, State = #state { gm = GM, - seen_status = SS, - backing_queue = BQ, - backing_queue_state = BQS }) -> - MsgId = mc:get_annotation(id, Msg), - {_, Size} = mc:size(Msg), - false = maps:is_key(MsgId, SS), %% ASSERTION - ok = gm:broadcast(GM, {publish_delivered, ChPid, Flow, MsgProps, Msg}, - Size), - {AckTag, BQS1} = BQ:publish_delivered(Msg, MsgProps, ChPid, Flow, BQS), - State1 = State #state { backing_queue_state = BQS1 }, - {AckTag, ensure_monitoring(ChPid, State1)}. - -batch_publish_delivered(Publishes, ChPid, Flow, - State = #state { gm = GM, - seen_status = SS, - backing_queue = BQ, - backing_queue_state = BQS }) -> - {false, MsgSizes} = - lists:foldl(fun ({Msg, _MsgProps}, - {false, Sizes}) -> - MsgId = mc:get_annotation(id, Msg), - {_, Size} = mc:size(Msg), - {false = maps:is_key(MsgId, SS), %% ASSERTION - Sizes + Size} - end, {false, 0}, Publishes), - ok = gm:broadcast(GM, {batch_publish_delivered, ChPid, Flow, Publishes}, - MsgSizes), - {AckTags, BQS1} = BQ:batch_publish_delivered(Publishes, ChPid, Flow, BQS), - State1 = State #state { backing_queue_state = BQS1 }, - {AckTags, ensure_monitoring(ChPid, State1)}. - -discard(MsgId, ChPid, Flow, State = #state { gm = GM, - backing_queue = BQ, - backing_queue_state = BQS, - seen_status = SS }) -> - false = maps:is_key(MsgId, SS), %% ASSERTION - ok = gm:broadcast(GM, {discard, ChPid, Flow, MsgId}), - ensure_monitoring(ChPid, - State #state { backing_queue_state = - BQ:discard(MsgId, ChPid, Flow, BQS) }). - -dropwhile(Pred, State = #state{backing_queue = BQ, - backing_queue_state = BQS }) -> - Len = BQ:len(BQS), - {Next, BQS1} = BQ:dropwhile(Pred, BQS), - {Next, drop(Len, false, State #state { backing_queue_state = BQS1 })}. - -fetchwhile(Pred, Fun, Acc, State = #state{backing_queue = BQ, - backing_queue_state = BQS }) -> - Len = BQ:len(BQS), - {Next, Acc1, BQS1} = BQ:fetchwhile(Pred, Fun, Acc, BQS), - {Next, Acc1, drop(Len, true, State #state { backing_queue_state = BQS1 })}. - -drain_confirmed(State = #state { backing_queue = BQ, - backing_queue_state = BQS, - seen_status = SS, - confirmed = Confirmed }) -> - {MsgIds, BQS1} = BQ:drain_confirmed(BQS), - {MsgIds1, SS1} = - lists:foldl( - fun (MsgId, {MsgIdsN, SSN}) -> - %% We will never see 'discarded' here - case maps:find(MsgId, SSN) of - error -> - {[MsgId | MsgIdsN], SSN}; - {ok, published} -> - %% It was published when we were a mirror, - %% and we were promoted before we saw the - %% publish from the channel. We still - %% haven't seen the channel publish, and - %% consequently we need to filter out the - %% confirm here. We will issue the confirm - %% when we see the publish from the channel. - {MsgIdsN, maps:put(MsgId, confirmed, SSN)}; - {ok, confirmed} -> - %% Well, confirms are racy by definition. - {[MsgId | MsgIdsN], SSN} - end - end, {[], SS}, MsgIds), - {Confirmed ++ MsgIds1, State #state { backing_queue_state = BQS1, - seen_status = SS1, - confirmed = [] }}. - -fetch(AckRequired, State = #state { backing_queue = BQ, - backing_queue_state = BQS }) -> - {Result, BQS1} = BQ:fetch(AckRequired, BQS), - State1 = State #state { backing_queue_state = BQS1 }, - {Result, case Result of - empty -> State1; - {_MsgId, _IsDelivered, _AckTag} -> drop_one(AckRequired, State1) - end}. - -drop(AckRequired, State = #state { backing_queue = BQ, - backing_queue_state = BQS }) -> - {Result, BQS1} = BQ:drop(AckRequired, BQS), - State1 = State #state { backing_queue_state = BQS1 }, - {Result, case Result of - empty -> State1; - {_MsgId, _AckTag} -> drop_one(AckRequired, State1) - end}. - -ack(AckTags, State = #state { gm = GM, - backing_queue = BQ, - backing_queue_state = BQS }) -> - {MsgIds, BQS1} = BQ:ack(AckTags, BQS), - case MsgIds of - [] -> ok; - _ -> ok = gm:broadcast(GM, {ack, MsgIds}) - end, - {MsgIds, State #state { backing_queue_state = BQS1 }}. - -requeue(AckTags, State = #state { gm = GM, - backing_queue = BQ, - backing_queue_state = BQS }) -> - {MsgIds, BQS1} = BQ:requeue(AckTags, BQS), - ok = gm:broadcast(GM, {requeue, MsgIds}), - {MsgIds, State #state { backing_queue_state = BQS1 }}. - -ackfold(MsgFun, Acc, State = #state { backing_queue = BQ, - backing_queue_state = BQS }, AckTags) -> - {Acc1, BQS1} = BQ:ackfold(MsgFun, Acc, BQS, AckTags), - {Acc1, State #state { backing_queue_state = BQS1 }}. - -fold(Fun, Acc, State = #state { backing_queue = BQ, - backing_queue_state = BQS }) -> - {Result, BQS1} = BQ:fold(Fun, Acc, BQS), - {Result, State #state { backing_queue_state = BQS1 }}. - -len(#state { backing_queue = BQ, backing_queue_state = BQS }) -> - BQ:len(BQS). - -is_empty(#state { backing_queue = BQ, backing_queue_state = BQS }) -> - BQ:is_empty(BQS). - -depth(#state { backing_queue = BQ, backing_queue_state = BQS }) -> - BQ:depth(BQS). - -set_ram_duration_target(Target, State = #state { backing_queue = BQ, - backing_queue_state = BQS }) -> - State #state { backing_queue_state = - BQ:set_ram_duration_target(Target, BQS) }. - -ram_duration(State = #state { backing_queue = BQ, backing_queue_state = BQS }) -> - {Result, BQS1} = BQ:ram_duration(BQS), - {Result, State #state { backing_queue_state = BQS1 }}. - -needs_timeout(#state { backing_queue = BQ, backing_queue_state = BQS }) -> - BQ:needs_timeout(BQS). - -timeout(State = #state { backing_queue = BQ, backing_queue_state = BQS }) -> - State #state { backing_queue_state = BQ:timeout(BQS) }. - -handle_pre_hibernate(State = #state { backing_queue = BQ, - backing_queue_state = BQS }) -> - State #state { backing_queue_state = BQ:handle_pre_hibernate(BQS) }. - -resume(State = #state { backing_queue = BQ, - backing_queue_state = BQS }) -> - State #state { backing_queue_state = BQ:resume(BQS) }. - -msg_rates(#state { backing_queue = BQ, backing_queue_state = BQS }) -> - BQ:msg_rates(BQS). - -info(backing_queue_status, - State = #state { backing_queue = BQ, backing_queue_state = BQS }) -> - BQ:info(backing_queue_status, BQS) ++ - [ {mirror_seen, maps:size(State #state.seen_status)}, - {mirror_senders, sets:size(State #state.known_senders)} ]; -info(Item, #state { backing_queue = BQ, backing_queue_state = BQS }) -> - BQ:info(Item, BQS). - -invoke(?MODULE, Fun, State) -> - Fun(?MODULE, State); -invoke(Mod, Fun, State = #state { backing_queue = BQ, - backing_queue_state = BQS }) -> - State #state { backing_queue_state = BQ:invoke(Mod, Fun, BQS) }. - -is_duplicate(Message, - State = #state { seen_status = SS, - backing_queue = BQ, - backing_queue_state = BQS, - confirmed = Confirmed }) -> - MsgId = mc:get_annotation(id, Message), - %% Here, we need to deal with the possibility that we're about to - %% receive a message that we've already seen when we were a mirror - %% (we received it via gm). Thus if we do receive such message now - %% via the channel, there may be a confirm waiting to issue for - %% it. - - %% We will never see {published, ChPid, MsgSeqNo} here. - case maps:find(MsgId, SS) of - error -> - %% We permit the underlying BQ to have a peek at it, but - %% only if we ourselves are not filtering out the msg. - {Result, BQS1} = BQ:is_duplicate(Message, BQS), - {Result, State #state { backing_queue_state = BQS1 }}; - {ok, published} -> - %% It already got published when we were a mirror and no - %% confirmation is waiting. amqqueue_process will have, in - %% its msg_id_to_channel mapping, the entry for dealing - %% with the confirm when that comes back in (it's added - %% immediately after calling is_duplicate). The msg is - %% invalid. We will not see this again, nor will we be - %% further involved in confirming this message, so erase. - {{true, drop}, State #state { seen_status = maps:remove(MsgId, SS) }}; - {ok, Disposition} - when Disposition =:= confirmed - %% It got published when we were a mirror via gm, and - %% confirmed some time after that (maybe even after - %% promotion), but before we received the publish from the - %% channel, so couldn't previously know what the - %% msg_seq_no was (and thus confirm as a mirror). So we - %% need to confirm now. As above, amqqueue_process will - %% have the entry for the msg_id_to_channel mapping added - %% immediately after calling is_duplicate/2. - orelse Disposition =:= discarded -> - %% Message was discarded while we were a mirror. Confirm now. - %% As above, amqqueue_process will have the entry for the - %% msg_id_to_channel mapping. - {{true, drop}, State #state { seen_status = maps:remove(MsgId, SS), - confirmed = [MsgId | Confirmed] }} - end. - -set_queue_mode(Mode, State = #state { gm = GM, - backing_queue = BQ, - backing_queue_state = BQS }) -> - ok = gm:broadcast(GM, {set_queue_mode, Mode}), - BQS1 = BQ:set_queue_mode(Mode, BQS), - State #state { backing_queue_state = BQS1 }. - -set_queue_version(Version, State = #state { gm = GM, - backing_queue = BQ, - backing_queue_state = BQS }) -> - ok = gm:broadcast(GM, {set_queue_version, Version}), - BQS1 = BQ:set_queue_version(Version, BQS), - State #state { backing_queue_state = BQS1 }. - -zip_msgs_and_acks(Msgs, AckTags, Accumulator, - #state { backing_queue = BQ, - backing_queue_state = BQS }) -> - BQ:zip_msgs_and_acks(Msgs, AckTags, Accumulator, BQS). - -%% --------------------------------------------------------------------------- -%% Other exported functions -%% --------------------------------------------------------------------------- - --spec promote_backing_queue_state - (rabbit_amqqueue:name(), pid(), atom(), any(), pid(), [any()], - map(), [pid()]) -> - master_state(). - -promote_backing_queue_state(QName, CPid, BQ, BQS, GM, AckTags, Seen, KS) -> - {MsgIds, BQS1} = BQ:requeue(AckTags, BQS), - ok = gm:broadcast(GM, {requeue, MsgIds}), - Len = BQ:len(BQS1), - Depth = BQ:depth(BQS1), - true = Len == Depth, %% ASSERTION: everything must have been requeued - ok = gm:broadcast(GM, {depth, Depth}), - WaitTimeout = rabbit_misc:get_env(rabbit, slave_wait_timeout, 15000), - #state { name = QName, - gm = GM, - coordinator = CPid, - backing_queue = BQ, - backing_queue_state = BQS1, - seen_status = Seen, - confirmed = [], - known_senders = sets:from_list(KS), - wait_timeout = WaitTimeout }. - --spec sender_death_fun() -> death_fun(). - -sender_death_fun() -> - Self = self(), - fun (DeadPid) -> - rabbit_amqqueue:run_backing_queue( - Self, ?MODULE, - fun (?MODULE, State = #state { gm = GM, known_senders = KS }) -> - ok = gm:broadcast(GM, {sender_death, DeadPid}), - KS1 = sets:del_element(DeadPid, KS), - State #state { known_senders = KS1 } - end) - end. - --spec depth_fun() -> depth_fun(). - -depth_fun() -> - Self = self(), - fun () -> - rabbit_amqqueue:run_backing_queue( - Self, ?MODULE, - fun (?MODULE, State = #state { gm = GM, - backing_queue = BQ, - backing_queue_state = BQS }) -> - ok = gm:broadcast(GM, {depth, BQ:depth(BQS)}), - State - end) - end. - -%% --------------------------------------------------------------------------- -%% Helpers -%% --------------------------------------------------------------------------- - -drop_one(AckRequired, State = #state { gm = GM, - backing_queue = BQ, - backing_queue_state = BQS }) -> - ok = gm:broadcast(GM, {drop, BQ:len(BQS), 1, AckRequired}), - State. - -drop(PrevLen, AckRequired, State = #state { gm = GM, - backing_queue = BQ, - backing_queue_state = BQS }) -> - Len = BQ:len(BQS), - case PrevLen - Len of - 0 -> State; - Dropped -> ok = gm:broadcast(GM, {drop, Len, Dropped, AckRequired}), - State - end. - -ensure_monitoring(ChPid, State = #state { coordinator = CPid, - known_senders = KS }) -> - case sets:is_element(ChPid, KS) of - true -> State; - false -> ok = rabbit_mirror_queue_coordinator:ensure_monitoring( - CPid, [ChPid]), - State #state { known_senders = sets:add_element(ChPid, KS) } - end. diff --git a/deps/rabbit/src/rabbit_mirror_queue_misc.erl b/deps/rabbit/src/rabbit_mirror_queue_misc.erl index feabab238a7d..14f9f3d884ef 100644 --- a/deps/rabbit/src/rabbit_mirror_queue_misc.erl +++ b/deps/rabbit/src/rabbit_mirror_queue_misc.erl @@ -2,45 +2,23 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2010-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_mirror_queue_misc). --behaviour(rabbit_policy_validator). --behaviour(rabbit_policy_merge_strategy). -include_lib("stdlib/include/assert.hrl"). -include("amqqueue.hrl"). --export([remove_from_queue/3, on_vhost_up/1, add_mirrors/3, - report_deaths/4, store_updated_slaves/1, - initial_queue_node/2, suggested_queue_nodes/1, actual_queue_nodes/1, - is_mirrored/1, is_mirrored_ha_nodes/1, - update_mirrors/2, update_mirrors/1, validate_policy/1, - merge_policy_value/3, - maybe_auto_sync/1, maybe_drop_master_after_sync/1, - sync_batch_size/1, default_max_sync_throughput/0, - log_info/3, log_warning/3]). --export([stop_all_slaves/5]). - --export([sync_queue/1, cancel_sync_queue/1, queue_length/1]). - --export([get_replicas/1, transfer_leadership/2, migrate_leadership_to_existing_replica/2]). - %% Deprecated feature callback. -export([are_cmqs_used/1]). -%% for testing only --export([module/1]). - -include_lib("rabbit_common/include/rabbit.hrl"). --define(HA_NODES_MODULE, rabbit_mirror_queue_mode_nodes). - -rabbit_deprecated_feature( {classic_queue_mirroring, - #{deprecation_phase => permitted_by_default, + #{deprecation_phase => removed, messages => #{when_permitted => "Classic mirrored queues are deprecated.\n" @@ -67,712 +45,45 @@ "To continue using classic mirrored queues when they are not " "permitted by default, set the following parameter in your " "configuration:\n" - " \"deprecated_features.permit.classic_queue_mirroring = true\"" + " \"deprecated_features.permit.classic_queue_mirroring = true\"", + + when_removed => + "Classic mirrored queues have been removed.\n" }, doc_url => "https://blog.rabbitmq.com/posts/2021/08/4.0-deprecation-announcements/#removal-of-classic-queue-mirroring", callbacks => #{is_feature_used => {?MODULE, are_cmqs_used}} }}). --rabbit_boot_step( - {?MODULE, - [{description, "HA policy validation"}, - {mfa, {rabbit_registry, register, - [policy_validator, <<"ha-mode">>, ?MODULE]}}, - {mfa, {rabbit_registry, register, - [policy_validator, <<"ha-params">>, ?MODULE]}}, - {mfa, {rabbit_registry, register, - [policy_validator, <<"ha-sync-mode">>, ?MODULE]}}, - {mfa, {rabbit_registry, register, - [policy_validator, <<"ha-sync-batch-size">>, ?MODULE]}}, - {mfa, {rabbit_registry, register, - [policy_validator, <<"ha-promote-on-shutdown">>, ?MODULE]}}, - {mfa, {rabbit_registry, register, - [policy_validator, <<"ha-promote-on-failure">>, ?MODULE]}}, - {mfa, {rabbit_registry, register, - [operator_policy_validator, <<"ha-mode">>, ?MODULE]}}, - {mfa, {rabbit_registry, register, - [operator_policy_validator, <<"ha-params">>, ?MODULE]}}, - {mfa, {rabbit_registry, register, - [operator_policy_validator, <<"ha-sync-mode">>, ?MODULE]}}, - {mfa, {rabbit_registry, register, - [policy_merge_strategy, <<"ha-mode">>, ?MODULE]}}, - {mfa, {rabbit_registry, register, - [policy_merge_strategy, <<"ha-params">>, ?MODULE]}}, - {mfa, {rabbit_registry, register, - [policy_merge_strategy, <<"ha-sync-mode">>, ?MODULE]}}, - {requires, rabbit_registry}, - {enables, recovery}]}). - - %%---------------------------------------------------------------------------- -%% Returns {ok, NewMPid, DeadPids, ExtraNodes} - --spec remove_from_queue - (rabbit_amqqueue:name(), pid(), [pid()]) -> - {'ok', pid(), [pid()], [node()]} | {'error', 'not_found'} | - {'error', {'not_synced', [pid()]}}. - -remove_from_queue(QueueName, Self, DeadGMPids) -> - rabbit_mnesia:execute_mnesia_transaction( - fun () -> - %% Someone else could have deleted the queue before we - %% get here. Or, gm group could've altered. see rabbitmq-server#914 - case mnesia:read({rabbit_queue, QueueName}) of - [] -> {error, not_found}; - [Q0] when ?is_amqqueue(Q0) -> - QPid = amqqueue:get_pid(Q0), - SPids = amqqueue:get_slave_pids(Q0), - SyncSPids = amqqueue:get_sync_slave_pids(Q0), - GMPids = amqqueue:get_gm_pids(Q0), - {DeadGM, AliveGM} = lists:partition( - fun ({GM, _}) -> - lists:member(GM, DeadGMPids) - end, GMPids), - DeadPids = [Pid || {_GM, Pid} <- DeadGM], - AlivePids = [Pid || {_GM, Pid} <- AliveGM], - Alive = [Pid || Pid <- [QPid | SPids], - lists:member(Pid, AlivePids)], - {QPid1, SPids1} = case Alive of - [] -> - %% GM altered, & if all pids are - %% perceived as dead, rather do - %% do nothing here, & trust the - %% promoted mirror to have updated - %% mnesia during the alteration. - {QPid, SPids}; - _ -> promote_slave(Alive) - end, - DoNotPromote = SyncSPids =:= [] andalso - rabbit_policy:get(<<"ha-promote-on-failure">>, Q0) =:= <<"when-synced">>, - case {{QPid, SPids}, {QPid1, SPids1}} of - {Same, Same} -> - {ok, QPid1, DeadPids, []}; - _ when QPid1 =/= QPid andalso QPid1 =:= Self andalso DoNotPromote =:= true -> - %% We have been promoted to master - %% but there are no synchronised mirrors - %% hence this node is not synchronised either - %% Bailing out. - {error, {not_synced, SPids1}}; - _ when QPid =:= QPid1 orelse QPid1 =:= Self -> - %% Either master hasn't changed, so - %% we're ok to update mnesia; or we have - %% become the master. If gm altered, - %% we have no choice but to proceed. - Q1 = amqqueue:set_pid(Q0, QPid1), - Q2 = amqqueue:set_slave_pids(Q1, SPids1), - Q3 = amqqueue:set_gm_pids(Q2, AliveGM), - _ = store_updated_slaves(Q3), - %% If we add and remove nodes at the - %% same time we might tell the old - %% master we need to sync and then - %% shut it down. So let's check if - %% the new master needs to sync. - _ = maybe_auto_sync(Q3), - {ok, QPid1, DeadPids, slaves_to_start_on_failure(Q3, DeadGMPids)}; - _ -> - %% Master has changed, and we're not it. - %% [1]. - Q1 = amqqueue:set_slave_pids(Q0, Alive), - Q2 = amqqueue:set_gm_pids(Q1, AliveGM), - _ = store_updated_slaves(Q2), - {ok, QPid1, DeadPids, []} - end - end - end). -%% [1] We still update mnesia here in case the mirror that is supposed -%% to become master dies before it does do so, in which case the dead -%% old master might otherwise never get removed, which in turn might -%% prevent promotion of another mirror (e.g. us). -%% -%% Note however that we do not update the master pid. Otherwise we can -%% have the situation where a mirror updates the mnesia record for a -%% queue, promoting another mirror before that mirror realises it has -%% become the new master, which is bad because it could then mean the -%% mirror (now master) receives messages it's not ready for (for -%% example, new consumers). -%% -%% We set slave_pids to Alive rather than SPids1 since otherwise we'd -%% be removing the pid of the candidate master, which in turn would -%% prevent it from promoting itself. -%% -%% We maintain gm_pids as our source of truth, i.e. it contains the -%% most up-to-date information about which GMs and associated -%% {M,S}Pids are alive. And all pids in slave_pids always have a -%% corresponding entry in gm_pids. By contrast, due to the -%% aforementioned restriction on updating the master pid, that pid may -%% not be present in gm_pids, but only if said master has died. - -%% Sometimes a mirror dying means we need to start more on other -%% nodes - "exactly" mode can cause this to happen. -slaves_to_start_on_failure(Q, DeadGMPids) -> - %% In case Mnesia has not caught up yet, filter out nodes we know - %% to be dead.. - ClusterNodes = rabbit_nodes:list_running() -- - [node(P) || P <- DeadGMPids], - {_, OldNodes, _} = actual_queue_nodes(Q), - {_, NewNodes} = suggested_queue_nodes(Q, ClusterNodes), - NewNodes -- OldNodes. - -on_vhost_up(VHost) -> - QNames = - rabbit_mnesia:execute_mnesia_transaction( - fun () -> - mnesia:foldl( - fun - (Q, QNames0) when not ?amqqueue_vhost_equals(Q, VHost) -> - QNames0; - (Q, QNames0) when ?amqqueue_is_classic(Q) -> - QName = amqqueue:get_name(Q), - Pid = amqqueue:get_pid(Q), - SPids = amqqueue:get_slave_pids(Q), - %% We don't want to pass in the whole - %% cluster - we don't want a situation - %% where starting one node causes us to - %% decide to start a mirror on another - PossibleNodes0 = [node(P) || P <- [Pid | SPids]], - PossibleNodes = - case lists:member(node(), PossibleNodes0) of - true -> PossibleNodes0; - false -> [node() | PossibleNodes0] - end, - {_MNode, SNodes} = suggested_queue_nodes( - Q, PossibleNodes), - case lists:member(node(), SNodes) of - true -> [QName | QNames0]; - false -> QNames0 - end; - (_, QNames0) -> - QNames0 - end, [], rabbit_queue) - end), - _ = [add_mirror(QName, node(), async) || QName <- QNames], - ok. - -drop_mirrors(QName, Nodes) -> - _ = [drop_mirror(QName, Node) || Node <- Nodes], - ok. - -drop_mirror(QName, MirrorNode) -> - case rabbit_amqqueue:lookup(QName) of - {ok, Q} when ?is_amqqueue(Q) -> - Name = amqqueue:get_name(Q), - PrimaryPid = amqqueue:get_pid(Q), - MirrorPids = amqqueue:get_slave_pids(Q), - AllReplicaPids = [PrimaryPid | MirrorPids], - case [Pid || Pid <- AllReplicaPids, node(Pid) =:= MirrorNode] of - [] -> - {error, {queue_not_mirrored_on_node, MirrorNode}}; - [PrimaryPid] when MirrorPids =:= [] -> - {error, cannot_drop_only_mirror}; - [Pid] -> - log_info(Name, "Dropping queue mirror on node ~tp", - [MirrorNode]), - exit(Pid, {shutdown, dropped}), - {ok, dropped} - end; - {error, not_found} = E -> - E - end. - --spec add_mirrors(rabbit_amqqueue:name(), [node()], 'sync' | 'async') -> - 'ok'. - -add_mirrors(QName, Nodes, SyncMode) -> - _ = [add_mirror(QName, Node, SyncMode) || Node <- Nodes], - ok. - -add_mirror(QName, MirrorNode, SyncMode) -> - case rabbit_amqqueue:lookup(QName) of - {ok, Q} -> - rabbit_misc:with_exit_handler( - rabbit_misc:const(ok), - fun () -> - #resource{virtual_host = VHost} = amqqueue:get_name(Q), - case rabbit_vhost_sup_sup:get_vhost_sup(VHost, MirrorNode) of - {ok, _} -> - try - MirrorPid = rabbit_amqqueue_sup_sup:start_queue_process(MirrorNode, Q, slave), - log_info(QName, "Adding mirror on node ~tp: ~tp", [MirrorNode, MirrorPid]), - rabbit_mirror_queue_slave:go(MirrorPid, SyncMode) - of - _ -> ok - catch - error:QError -> - log_warning(QName, - "Unable to start queue mirror on node '~tp'. " - "Target queue supervisor is not running: ~tp", - [MirrorNode, QError]) - end; - {error, Error} -> - log_warning(QName, - "Unable to start queue mirror on node '~tp'. " - "Target virtual host is not running: ~tp", - [MirrorNode, Error]), - ok - end - end); - {error, not_found} = E -> - E - end. - -report_deaths(_MirrorPid, _IsMaster, _QueueName, []) -> - ok; -report_deaths(MirrorPid, IsMaster, QueueName, DeadPids) -> - log_info(QueueName, "~ts replica of queue ~ts detected replica ~ts to be down", - [case IsMaster of - true -> "Primary"; - false -> "Secondary" - end, - rabbit_misc:pid_to_string(MirrorPid), - [[$ , rabbit_misc:pid_to_string(P)] || P <- DeadPids]]). - --spec log_info(rabbit_amqqueue:name(), string(), [any()]) -> 'ok'. - -log_info (QName, Fmt, Args) -> - rabbit_log_mirroring:info("Mirrored ~ts: " ++ Fmt, - [rabbit_misc:rs(QName) | Args]). - --spec log_warning(rabbit_amqqueue:name(), string(), [any()]) -> 'ok'. - -log_warning(QName, Fmt, Args) -> - rabbit_log_mirroring:warning("Mirrored ~ts: " ++ Fmt, - [rabbit_misc:rs(QName) | Args]). - --spec store_updated_slaves(amqqueue:amqqueue()) -> - amqqueue:amqqueue(). - -store_updated_slaves(Q0) when ?is_amqqueue(Q0) -> - SPids = amqqueue:get_slave_pids(Q0), - SSPids = amqqueue:get_sync_slave_pids(Q0), - RS0 = amqqueue:get_recoverable_slaves(Q0), - %% TODO now that we clear sync_slave_pids in rabbit_durable_queue, - %% do we still need this filtering? - SSPids1 = [SSPid || SSPid <- SSPids, lists:member(SSPid, SPids)], - Q1 = amqqueue:set_sync_slave_pids(Q0, SSPids1), - RS1 = update_recoverable(SPids, RS0), - Q2 = amqqueue:set_recoverable_slaves(Q1, RS1), - Q3 = amqqueue:set_state(Q2, live), - %% amqqueue migration: - %% The amqqueue was read from this transaction, no need to handle - %% migration. - ok = rabbit_amqqueue:store_queue(Q3), - %% Wake it up so that we emit a stats event - rabbit_amqqueue:notify_policy_changed(Q3), - Q3. - -%% Recoverable nodes are those which we could promote if the whole -%% cluster were to suddenly stop and we then lose the master; i.e. all -%% nodes with running mirrors , and all stopped nodes which had running -%% mirrors when they were up. -%% -%% Therefore we aim here to add new nodes with mirrors , and remove -%% running nodes without mirrors , We also try to keep the order -%% constant, and similar to the live SPids field (i.e. oldest -%% first). That's not necessarily optimal if nodes spend a long time -%% down, but we don't have a good way to predict what the optimal is -%% in that case anyway, and we assume nodes will not just be down for -%% a long time without being removed. -update_recoverable(SPids, RS) -> - SNodes = [node(SPid) || SPid <- SPids], - RunningNodes = rabbit_nodes:list_running(), - AddNodes = SNodes -- RS, - DelNodes = RunningNodes -- SNodes, %% i.e. running with no slave - (RS -- DelNodes) ++ AddNodes. - -stop_all_slaves(Reason, SPids, QName, GM, WaitTimeout) -> - PidsMRefs = [{Pid, erlang:monitor(process, Pid)} || Pid <- [GM | SPids]], - ok = gm:broadcast(GM, {delete_and_terminate, Reason}), - %% It's possible that we could be partitioned from some mirrors - %% between the lookup and the broadcast, in which case we could - %% monitor them but they would not have received the GM - %% message. So only wait for mirrors which are still - %% not-partitioned. - PendingSlavePids = lists:foldl(fun({Pid, MRef}, Acc) -> - case rabbit_mnesia:on_running_node(Pid) of - true -> - receive - {'DOWN', MRef, process, _Pid, _Info} -> - Acc - after WaitTimeout -> - rabbit_mirror_queue_misc:log_warning( - QName, "Missing 'DOWN' message from ~tp in" - " node ~tp", [Pid, node(Pid)]), - [Pid | Acc] - end; - false -> - Acc - end - end, [], PidsMRefs), - %% Normally when we remove a mirror another mirror or master will - %% notice and update Mnesia. But we just removed them all, and - %% have stopped listening ourselves. So manually clean up. - rabbit_mnesia:execute_mnesia_transaction(fun () -> - [Q0] = mnesia:read({rabbit_queue, QName}), - Q1 = amqqueue:set_gm_pids(Q0, []), - Q2 = amqqueue:set_slave_pids(Q1, []), - %% Restarted mirrors on running nodes can - %% ensure old incarnations are stopped using - %% the pending mirror pids. - Q3 = amqqueue:set_slave_pids_pending_shutdown(Q2, PendingSlavePids), - rabbit_mirror_queue_misc:store_updated_slaves(Q3) - end), - ok = gm:forget_group(QName). - -%%---------------------------------------------------------------------------- - -promote_slave([SPid | SPids]) -> - %% The mirror pids are maintained in descending order of age, so - %% the one to promote is the oldest. - {SPid, SPids}. - --spec initial_queue_node(amqqueue:amqqueue(), node()) -> node(). - -initial_queue_node(Q, DefNode) -> - {MNode, _SNodes} = suggested_queue_nodes(Q, DefNode, rabbit_nodes:list_running()), - MNode. - --spec suggested_queue_nodes(amqqueue:amqqueue()) -> - {node(), [node()]}. - -suggested_queue_nodes(Q) -> suggested_queue_nodes(Q, rabbit_nodes:list_running()). -suggested_queue_nodes(Q, All) -> suggested_queue_nodes(Q, node(), All). - -%% The third argument exists so we can pull a call to -%% rabbit_nodes:list_running() out of a loop or transaction -%% or both. -suggested_queue_nodes(Q, DefNode, All) when ?is_amqqueue(Q) -> - Owner = amqqueue:get_exclusive_owner(Q), - {MNode0, SNodes, SSNodes} = actual_queue_nodes(Q), - MNode = case MNode0 of - none -> DefNode; - _ -> MNode0 - end, - case Owner of - none -> Params = policy(<<"ha-params">>, Q), - case module(Q) of - {ok, M} -> M:suggested_queue_nodes( - Params, MNode, SNodes, SSNodes, All); - _ -> {MNode, []} - end; - _ -> {MNode, []} - end. - -policy(Policy, Q) -> - case rabbit_policy:get(Policy, Q) of - undefined -> none; - P -> P - end. - -module(Q) when ?is_amqqueue(Q) -> - case rabbit_policy:get(<<"ha-mode">>, Q) of - undefined -> not_mirrored; - Mode -> module(Mode) - end; - -module(Mode) when is_binary(Mode) -> - case rabbit_registry:binary_to_type(Mode) of - {error, not_found} -> not_mirrored; - T -> case rabbit_registry:lookup_module(ha_mode, T) of - {ok, Module} -> {ok, Module}; - _ -> not_mirrored - end - end. - -validate_mode(Mode) -> - case module(Mode) of - {ok, _Module} -> - ok; - not_mirrored -> - {error, "~tp is not a valid ha-mode value", [Mode]} - end. - --spec is_mirrored(amqqueue:amqqueue()) -> boolean(). - -is_mirrored(Q) -> - MatchedByPolicy = case module(Q) of - {ok, _} -> true; - _ -> false - end, - MatchedByPolicy andalso (not rabbit_amqqueue:is_exclusive(Q)). - -is_mirrored_ha_nodes(Q) -> - MatchedByPolicy = case module(Q) of - {ok, ?HA_NODES_MODULE} -> true; - _ -> false - end, - MatchedByPolicy andalso (not rabbit_amqqueue:is_exclusive(Q)). - -actual_queue_nodes(Q) when ?is_amqqueue(Q) -> - PrimaryPid = amqqueue:get_pid(Q), - MirrorPids = amqqueue:get_slave_pids(Q), - InSyncMirrorPids = amqqueue:get_sync_slave_pids(Q), - CollectNodes = fun (L) -> [node(Pid) || Pid <- L] end, - NodeHostingPrimary = case PrimaryPid of - none -> none; - _ -> node(PrimaryPid) - end, - {NodeHostingPrimary, CollectNodes(MirrorPids), CollectNodes(InSyncMirrorPids)}. - --spec maybe_auto_sync(amqqueue:amqqueue()) -> 'ok' | pid(). - -maybe_auto_sync(Q) when ?is_amqqueue(Q) -> - QPid = amqqueue:get_pid(Q), - case policy(<<"ha-sync-mode">>, Q) of - <<"automatic">> -> - spawn(fun() -> rabbit_amqqueue:sync_mirrors(QPid) end); - _ -> - ok - end. - -sync_queue(Q0) -> - F = fun - (Q) when ?amqqueue_is_classic(Q) -> - QPid = amqqueue:get_pid(Q), - rabbit_amqqueue:sync_mirrors(QPid); - (Q) when ?amqqueue_is_quorum(Q) -> - {error, quorum_queue_not_supported} - end, - rabbit_amqqueue:with(Q0, F). - -cancel_sync_queue(Q0) -> - F = fun - (Q) when ?amqqueue_is_classic(Q) -> - QPid = amqqueue:get_pid(Q), - rabbit_amqqueue:cancel_sync_mirrors(QPid); - (Q) when ?amqqueue_is_quorum(Q) -> - {error, quorum_queue_not_supported} - end, - rabbit_amqqueue:with(Q0, F). - -sync_batch_size(Q) when ?is_amqqueue(Q) -> - case policy(<<"ha-sync-batch-size">>, Q) of - none -> %% we need this case because none > 1 == true - default_batch_size(); - BatchSize when BatchSize > 1 -> - BatchSize; +are_cmqs_used(_) -> + case rabbit_khepri:get_feature_state() of + enabled -> + false; _ -> - default_batch_size() - end. - --define(DEFAULT_BATCH_SIZE, 4096). - -default_batch_size() -> - rabbit_misc:get_env(rabbit, mirroring_sync_batch_size, - ?DEFAULT_BATCH_SIZE). - --define(DEFAULT_MAX_SYNC_THROUGHPUT, 0). - -default_max_sync_throughput() -> - case application:get_env(rabbit, mirroring_sync_max_throughput) of - {ok, Value} -> - case rabbit_resource_monitor_misc:parse_information_unit(Value) of - {ok, ParsedThroughput} -> - ParsedThroughput; - {error, parse_error} -> - rabbit_log:warning( - "The configured value for the mirroring_sync_max_throughput is " - "not a valid value: ~tp. Disabled sync throughput control. ", - [Value]), - ?DEFAULT_MAX_SYNC_THROUGHPUT - end; - undefined -> - ?DEFAULT_MAX_SYNC_THROUGHPUT - end. - --spec update_mirrors - (amqqueue:amqqueue(), amqqueue:amqqueue()) -> 'ok'. - -update_mirrors(OldQ, NewQ) when ?amqqueue_pids_are_equal(OldQ, NewQ) -> - % Note: we do want to ensure both queues have same pid - QPid = amqqueue:get_pid(OldQ), - QPid = amqqueue:get_pid(NewQ), - case {is_mirrored(OldQ), is_mirrored(NewQ)} of - {false, false} -> ok; - _ -> rabbit_amqqueue:update_mirroring(QPid) - end. - --spec update_mirrors - (amqqueue:amqqueue()) -> 'ok'. - -update_mirrors(Q) when ?is_amqqueue(Q) -> - QName = amqqueue:get_name(Q), - {PreTransferPrimaryNode, PreTransferMirrorNodes, __PreTransferInSyncMirrorNodes} = actual_queue_nodes(Q), - {NewlySelectedPrimaryNode, NewlySelectedMirrorNodes} = suggested_queue_nodes(Q), - PreTransferNodesWithReplicas = [PreTransferPrimaryNode | PreTransferMirrorNodes], - NewlySelectedNodesWithReplicas = [NewlySelectedPrimaryNode | NewlySelectedMirrorNodes], - %% When a mirror dies, remove_from_queue/2 might have to add new - %% mirrors (in "exactly" mode). It will check the queue record to see which - %% mirrors there currently are. If drop_mirror/2 is invoked first - %% then when we end up in remove_from_queue/2 it will not see the - %% mirrors that add_mirror/2 will add, and also want to add them - %% (even though we are not responding to the death of a - %% mirror). Breakage ensues. - add_mirrors(QName, NewlySelectedNodesWithReplicas -- PreTransferNodesWithReplicas, async), - drop_mirrors(QName, PreTransferNodesWithReplicas -- NewlySelectedNodesWithReplicas), - %% This is for the case where no extra nodes were added but we changed to - %% a policy requiring auto-sync. - _ = maybe_auto_sync(Q), - ok. - -queue_length(Q) -> - [{messages, M}] = rabbit_amqqueue:info(Q, [messages]), - M. - -get_replicas(Q) -> - {PrimaryNode, MirrorNodes} = suggested_queue_nodes(Q), - [PrimaryNode] ++ MirrorNodes. - --spec transfer_leadership(amqqueue:amqqueue(), node()) -> {migrated, node()} | {not_migrated, atom()}. -%% Moves the primary replica (leader) of a classic mirrored queue to another node. -%% Target node can be any node in the cluster, and does not have to host a replica -%% of this queue. -transfer_leadership(Q, Destination) -> - QName = amqqueue:get_name(Q), - {PreTransferPrimaryNode, PreTransferMirrorNodes, _PreTransferInSyncMirrorNodes} = actual_queue_nodes(Q), - PreTransferNodesWithReplicas = [PreTransferPrimaryNode | PreTransferMirrorNodes], - - NodesToAddMirrorsOn = [Destination] -- PreTransferNodesWithReplicas, - %% This will wait for the transfer/eager sync to finish before we begin dropping - %% mirrors on the next step. In this case we cannot add mirrors asynchronously - %% as that will race with the dropping step. - add_mirrors(QName, NodesToAddMirrorsOn, sync), - - NodesToDropMirrorsOn = PreTransferNodesWithReplicas -- [Destination], - drop_mirrors(QName, NodesToDropMirrorsOn), - - case wait_for_new_master(QName, Destination) of - not_migrated -> - {not_migrated, undefined}; - {{not_migrated, Destination} = Result, _Q1} -> - Result; - {Result, NewQ} -> - update_mirrors(NewQ), - Result - end. - - --spec migrate_leadership_to_existing_replica(amqqueue:amqqueue(), atom()) -> {migrated, node()} | {not_migrated, atom()}. -%% Moves the primary replica (leader) of a classic mirrored queue to another node -%% which already hosts a replica of this queue. In this case we can stop -%% fewer replicas and reduce the load the operation has on the cluster. -%% Note that there is no guarantee that the queue will actually end up on the -%% destination node. The actual destination node is returned. -migrate_leadership_to_existing_replica(Q, Destination) -> - QName = amqqueue:get_name(Q), - {PreTransferPrimaryNode, PreTransferMirrorNodes, _PreTransferInSyncMirrorNodes} = actual_queue_nodes(Q), - PreTransferNodesWithReplicas = [PreTransferPrimaryNode | PreTransferMirrorNodes], - - NodesToAddMirrorsOn = [Destination] -- PreTransferNodesWithReplicas, - %% This will wait for the transfer/eager sync to finish before we begin dropping - %% mirrors on the next step. In this case we cannot add mirrors asynchronously - %% as that will race with the dropping step. - add_mirrors(QName, NodesToAddMirrorsOn, sync), - - NodesToDropMirrorsOn = [PreTransferPrimaryNode], - drop_mirrors(QName, NodesToDropMirrorsOn), - - case wait_for_different_master(QName, PreTransferPrimaryNode) of - not_migrated -> - {not_migrated, undefined}; - {{not_migrated, Destination} = Result, _Q1} -> - Result; - {Result, NewQ} -> - update_mirrors(NewQ), - Result - end. - --spec wait_for_new_master(rabbit_amqqueue:name(), atom()) -> {{migrated, node()}, amqqueue:amqqueue()} | {{not_migrated, node()}, amqqueue:amqqueue()} | not_migrated. -wait_for_new_master(QName, Destination) -> - wait_for_new_master(QName, Destination, 100). - -wait_for_new_master(QName, _, 0) -> - case rabbit_amqqueue:lookup(QName) of - {error, not_found} -> not_migrated; - {ok, Q} -> {{not_migrated, undefined}, Q} - end; -wait_for_new_master(QName, Destination, N) -> - case rabbit_amqqueue:lookup(QName) of - {error, not_found} -> - not_migrated; - {ok, Q} -> - case amqqueue:get_pid(Q) of - none -> - timer:sleep(100), - wait_for_new_master(QName, Destination, N - 1); - Pid -> - case node(Pid) of - Destination -> - {{migrated, Destination}, Q}; - _ -> - timer:sleep(100), - wait_for_new_master(QName, Destination, N - 1) - end + %% If we are using Mnesia, we want to check manually if the table + %% exists first. Otherwise it can conflict with the way + %% `rabbit_khepri:handle_fallback/1` works. Indeed, this function + %% and `rabbit_khepri:handle_fallback/1` rely on the `no_exists` + %% exception. + AllTables = mnesia:system_info(tables), + RuntimeParamsReady = lists:member( + rabbit_runtime_parameters, AllTables), + case RuntimeParamsReady of + true -> + %% We also wait for the table because it could exist but + %% may be unavailable. For instance, Mnesia needs another + %% replica on another node before it considers it to be + %% available. + rabbit_table:wait( + [rabbit_runtime_parameters], _Retry = true), + are_cmqs_used1(); + false -> + false end end. --spec wait_for_different_master(rabbit_amqqueue:name(), atom()) -> {{migrated, node()}, amqqueue:amqqueue()} | {{not_migrated, node()}, amqqueue:amqqueue()} | not_migrated. -wait_for_different_master(QName, Source) -> - wait_for_different_master(QName, Source, 100). - -wait_for_different_master(QName, _, 0) -> - case rabbit_amqqueue:lookup(QName) of - {error, not_found} -> not_migrated; - {ok, Q} -> {{not_migrated, undefined}, Q} - end; -wait_for_different_master(QName, Source, N) -> - case rabbit_amqqueue:lookup(QName) of - {error, not_found} -> - not_migrated; - {ok, Q} -> - case amqqueue:get_pid(Q) of - none -> - timer:sleep(100), - wait_for_different_master(QName, Source, N - 1); - Pid -> - case node(Pid) of - Source -> - timer:sleep(100), - wait_for_different_master(QName, Source, N - 1); - Destination -> - {{migrated, Destination}, Q} - end - end - end. - - -%% The arrival of a newly synced mirror may cause the master to die if -%% the policy does not want the master but it has been kept alive -%% because there were no synced mirrors. -%% -%% We don't just call update_mirrors/2 here since that could decide to -%% start a mirror for some other reason, and since we are the mirror ATM -%% that allows complicated deadlocks. - --spec maybe_drop_master_after_sync(amqqueue:amqqueue()) -> 'ok'. - -maybe_drop_master_after_sync(Q) when ?is_amqqueue(Q) -> - QName = amqqueue:get_name(Q), - MPid = amqqueue:get_pid(Q), - {DesiredMNode, DesiredSNodes} = suggested_queue_nodes(Q), - case node(MPid) of - DesiredMNode -> ok; - OldMNode -> false = lists:member(OldMNode, DesiredSNodes), %% [0] - _ = drop_mirror(QName, OldMNode), - ok - end, - ok. -%% [0] ASSERTION - if the policy wants the master to change, it has -%% not just shuffled it into the mirrors. All our modes ensure this -%% does not happen, but we should guard against a misbehaving plugin. - -%%---------------------------------------------------------------------------- - -are_cmqs_permitted() -> - FeatureName = classic_queue_mirroring, - rabbit_deprecated_features:is_permitted(FeatureName). - -are_cmqs_used(_) -> +are_cmqs_used1() -> try LocalPolicies = rabbit_policy:list(), LocalOpPolicies = rabbit_policy:list_op(), @@ -792,122 +103,7 @@ has_ha_policies(Policies) -> does_policy_configure_cmq(KeyList) end, Policies). -does_policy_configure_cmq(KeyList) -> +does_policy_configure_cmq(Map) when is_map(Map) -> + is_map_key(<<"ha-mode">>, Map); +does_policy_configure_cmq(KeyList) when is_list(KeyList) -> lists:keymember(<<"ha-mode">>, 1, KeyList). - -validate_policy(KeyList) -> - Mode = proplists:get_value(<<"ha-mode">>, KeyList, none), - Params = proplists:get_value(<<"ha-params">>, KeyList, none), - SyncMode = proplists:get_value(<<"ha-sync-mode">>, KeyList, none), - SyncBatchSize = proplists:get_value( - <<"ha-sync-batch-size">>, KeyList, none), - PromoteOnShutdown = proplists:get_value( - <<"ha-promote-on-shutdown">>, KeyList, none), - PromoteOnFailure = proplists:get_value( - <<"ha-promote-on-failure">>, KeyList, none), - case {are_cmqs_permitted(), Mode, Params, SyncMode, SyncBatchSize, PromoteOnShutdown, PromoteOnFailure} of - {_, none, none, none, none, none, none} -> - ok; - {false, _, _, _, _, _, _} -> - %% If the policy configures classic mirrored queues and this - %% feature is disabled, we consider this policy not valid and deny - %% it. - FeatureName = classic_queue_mirroring, - Warning = rabbit_deprecated_features:get_warning(FeatureName), - {error, "~ts", [Warning]}; - {_, none, _, _, _, _, _} -> - {error, "ha-mode must be specified to specify ha-params, " - "ha-sync-mode or ha-promote-on-shutdown", []}; - _ -> - validate_policies( - [{Mode, fun validate_mode/1}, - {Params, ha_params_validator(Mode)}, - {SyncMode, fun validate_sync_mode/1}, - {SyncBatchSize, fun validate_sync_batch_size/1}, - {PromoteOnShutdown, fun validate_pos/1}, - {PromoteOnFailure, fun validate_pof/1}]) - end. - -ha_params_validator(Mode) -> - fun(Val) -> - {ok, M} = module(Mode), - M:validate_policy(Val) - end. - -validate_policies([]) -> - ok; -validate_policies([{Val, Validator} | Rest]) -> - case Validator(Val) of - ok -> validate_policies(Rest); - E -> E - end. - -validate_sync_mode(SyncMode) -> - case SyncMode of - <<"automatic">> -> ok; - <<"manual">> -> ok; - none -> ok; - Mode -> {error, "ha-sync-mode must be \"manual\" " - "or \"automatic\", got ~tp", [Mode]} - end. - -validate_sync_batch_size(none) -> - ok; -validate_sync_batch_size(N) when is_integer(N) andalso N > 0 -> - ok; -validate_sync_batch_size(N) -> - {error, "ha-sync-batch-size takes an integer greater than 0, " - "~tp given", [N]}. - -validate_pos(PromoteOnShutdown) -> - case PromoteOnShutdown of - <<"always">> -> ok; - <<"when-synced">> -> ok; - none -> ok; - Mode -> {error, "ha-promote-on-shutdown must be " - "\"always\" or \"when-synced\", got ~tp", [Mode]} - end. - -validate_pof(PromoteOnShutdown) -> - case PromoteOnShutdown of - <<"always">> -> ok; - <<"when-synced">> -> ok; - none -> ok; - Mode -> {error, "ha-promote-on-failure must be " - "\"always\" or \"when-synced\", got ~tp", [Mode]} - end. - -merge_policy_value(<<"ha-mode">>, Val, Val) -> - Val; -merge_policy_value(<<"ha-mode">>, <<"all">> = Val, _OpVal) -> - Val; -merge_policy_value(<<"ha-mode">>, _Val, <<"all">> = OpVal) -> - OpVal; -merge_policy_value(<<"ha-mode">>, <<"exactly">> = Val, _OpVal) -> - Val; -merge_policy_value(<<"ha-mode">>, _Val, <<"exactly">> = OpVal) -> - OpVal; -merge_policy_value(<<"ha-sync-mode">>, _Val, OpVal) -> - OpVal; -%% Both values are integers, both are ha-mode 'exactly' -merge_policy_value(<<"ha-params">>, Val, OpVal) when is_integer(Val) - andalso - is_integer(OpVal)-> - if Val > OpVal -> - Val; - true -> - OpVal - end; -%% The integer values is of ha-mode 'exactly', the other is a list and of -%% ha-mode 'nodes'. 'exactly' takes precedence -merge_policy_value(<<"ha-params">>, Val, _OpVal) when is_integer(Val) -> - Val; -merge_policy_value(<<"ha-params">>, _Val, OpVal) when is_integer(OpVal) -> - OpVal; -%% Both values are lists, of ha-mode 'nodes', max length takes precedence. -merge_policy_value(<<"ha-params">>, Val, OpVal) -> - if length(Val) > length(OpVal) -> - Val; - true -> - OpVal - end. diff --git a/deps/rabbit/src/rabbit_mirror_queue_mode.erl b/deps/rabbit/src/rabbit_mirror_queue_mode.erl deleted file mode 100644 index 5b62c58adccd..000000000000 --- a/deps/rabbit/src/rabbit_mirror_queue_mode.erl +++ /dev/null @@ -1,42 +0,0 @@ -%% This Source Code Form is subject to the terms of the Mozilla Public -%% License, v. 2.0. If a copy of the MPL was not distributed with this -%% file, You can obtain one at https://mozilla.org/MPL/2.0/. -%% -%% Copyright (c) 2010-2023 VMware, Inc. or its affiliates. All rights reserved. -%% - --module(rabbit_mirror_queue_mode). - --behaviour(rabbit_registry_class). - --export([added_to_rabbit_registry/2, removed_from_rabbit_registry/1]). - --type master() :: node(). --type slave() :: node(). --type params() :: any(). - --callback description() -> [proplists:property()]. - -%% Called whenever we think we might need to change nodes for a -%% mirrored queue. Note that this is called from a variety of -%% contexts, both inside and outside Mnesia transactions. Ideally it -%% will be pure-functional. -%% -%% Takes: parameters set in the policy, -%% current master, -%% current mirrors, -%% current synchronised mirrors, -%% all nodes to consider -%% -%% Returns: tuple of new master, new mirrors -%% --callback suggested_queue_nodes( - params(), master(), [slave()], [slave()], [node()]) -> - {master(), [slave()]}. - -%% Are the parameters valid for this mode? --callback validate_policy(params()) -> - rabbit_policy_validator:validate_results(). - -added_to_rabbit_registry(_Type, _ModuleName) -> ok. -removed_from_rabbit_registry(_Type) -> ok. diff --git a/deps/rabbit/src/rabbit_mirror_queue_mode_all.erl b/deps/rabbit/src/rabbit_mirror_queue_mode_all.erl deleted file mode 100644 index 190a6edd6a46..000000000000 --- a/deps/rabbit/src/rabbit_mirror_queue_mode_all.erl +++ /dev/null @@ -1,32 +0,0 @@ -%% This Source Code Form is subject to the terms of the Mozilla Public -%% License, v. 2.0. If a copy of the MPL was not distributed with this -%% file, You can obtain one at https://mozilla.org/MPL/2.0/. -%% -%% Copyright (c) 2010-2023 VMware, Inc. or its affiliates. All rights reserved. -%% - --module(rabbit_mirror_queue_mode_all). - --include_lib("rabbit_common/include/rabbit.hrl"). - --behaviour(rabbit_mirror_queue_mode). - --export([description/0, suggested_queue_nodes/5, validate_policy/1]). - --rabbit_boot_step({?MODULE, - [{description, "mirror mode all"}, - {mfa, {rabbit_registry, register, - [ha_mode, <<"all">>, ?MODULE]}}, - {requires, rabbit_registry}, - {enables, kernel_ready}]}). - -description() -> - [{description, <<"Mirror queue to all nodes">>}]. - -suggested_queue_nodes(_Params, MNode, _SNodes, _SSNodes, Poss) -> - {MNode, Poss -- [MNode]}. - -validate_policy(none) -> - ok; -validate_policy(_Params) -> - {error, "ha-mode=\"all\" does not take parameters", []}. diff --git a/deps/rabbit/src/rabbit_mirror_queue_mode_exactly.erl b/deps/rabbit/src/rabbit_mirror_queue_mode_exactly.erl deleted file mode 100644 index af183274b2a2..000000000000 --- a/deps/rabbit/src/rabbit_mirror_queue_mode_exactly.erl +++ /dev/null @@ -1,45 +0,0 @@ -%% This Source Code Form is subject to the terms of the Mozilla Public -%% License, v. 2.0. If a copy of the MPL was not distributed with this -%% file, You can obtain one at https://mozilla.org/MPL/2.0/. -%% -%% Copyright (c) 2010-2023 VMware, Inc. or its affiliates. All rights reserved. -%% - --module(rabbit_mirror_queue_mode_exactly). - --include_lib("rabbit_common/include/rabbit.hrl"). - --behaviour(rabbit_mirror_queue_mode). - --export([description/0, suggested_queue_nodes/5, validate_policy/1]). - --rabbit_boot_step({?MODULE, - [{description, "mirror mode exactly"}, - {mfa, {rabbit_registry, register, - [ha_mode, <<"exactly">>, ?MODULE]}}, - {requires, rabbit_registry}, - {enables, kernel_ready}]}). - -description() -> - [{description, <<"Mirror queue to a specified number of nodes">>}]. - -%% When we need to add nodes, we randomise our candidate list as a -%% crude form of load-balancing. TODO it would also be nice to -%% randomise the list of ones to remove when we have too many - we -%% would have to take account of synchronisation though. -suggested_queue_nodes(Count, MNode, SNodes, _SSNodes, Poss) -> - SCount = Count - 1, - {MNode, case SCount > length(SNodes) of - true -> Cand = shuffle((Poss -- [MNode]) -- SNodes), - SNodes ++ lists:sublist(Cand, SCount - length(SNodes)); - false -> lists:sublist(SNodes, SCount) - end}. - -shuffle(L) -> - {_, L1} = lists:unzip(lists:keysort(1, [{rand:uniform(), N} || N <- L])), - L1. - -validate_policy(N) when is_integer(N) andalso N > 0 -> - ok; -validate_policy(Params) -> - {error, "ha-mode=\"exactly\" takes an integer, ~tp given", [Params]}. diff --git a/deps/rabbit/src/rabbit_mirror_queue_mode_nodes.erl b/deps/rabbit/src/rabbit_mirror_queue_mode_nodes.erl deleted file mode 100644 index 042f928e03f7..000000000000 --- a/deps/rabbit/src/rabbit_mirror_queue_mode_nodes.erl +++ /dev/null @@ -1,69 +0,0 @@ -%% This Source Code Form is subject to the terms of the Mozilla Public -%% License, v. 2.0. If a copy of the MPL was not distributed with this -%% file, You can obtain one at https://mozilla.org/MPL/2.0/. -%% -%% Copyright (c) 2010-2023 VMware, Inc. or its affiliates. All rights reserved. -%% - --module(rabbit_mirror_queue_mode_nodes). - --include_lib("rabbit_common/include/rabbit.hrl"). - --behaviour(rabbit_mirror_queue_mode). - --export([description/0, suggested_queue_nodes/5, validate_policy/1]). - --rabbit_boot_step({?MODULE, - [{description, "mirror mode nodes"}, - {mfa, {rabbit_registry, register, - [ha_mode, <<"nodes">>, ?MODULE]}}, - {requires, rabbit_registry}, - {enables, kernel_ready}]}). - -description() -> - [{description, <<"Mirror queue to specified nodes">>}]. - -suggested_queue_nodes(PolicyNodes0, CurrentMaster, _SNodes, SSNodes, NodesRunningRabbitMQ) -> - PolicyNodes1 = [list_to_atom(binary_to_list(Node)) || Node <- PolicyNodes0], - %% If the current master is not in the nodes specified, then what we want - %% to do depends on whether there are any synchronised mirrors. If there - %% are then we can just kill the current master - the admin has asked for - %% a migration and we should give it to them. If there are not however - %% then we must keep the master around so as not to lose messages. - - PolicyNodes = case SSNodes of - [] -> lists:usort([CurrentMaster | PolicyNodes1]); - _ -> PolicyNodes1 - end, - Unavailable = PolicyNodes -- NodesRunningRabbitMQ, - AvailablePolicyNodes = PolicyNodes -- Unavailable, - case AvailablePolicyNodes of - [] -> %% We have never heard of anything? Not much we can do but - %% keep the master alive. - {CurrentMaster, []}; - _ -> case lists:member(CurrentMaster, AvailablePolicyNodes) of - true -> {CurrentMaster, - AvailablePolicyNodes -- [CurrentMaster]}; - false -> %% Make sure the new master is synced! In order to - %% get here SSNodes must not be empty. - SyncPolicyNodes = [Node || - Node <- AvailablePolicyNodes, - lists:member(Node, SSNodes)], - NewMaster = case SyncPolicyNodes of - [Node | _] -> Node; - [] -> erlang:hd(SSNodes) - end, - {NewMaster, AvailablePolicyNodes -- [NewMaster]} - end - end. - -validate_policy([]) -> - {error, "ha-mode=\"nodes\" list must be non-empty", []}; -validate_policy(Nodes) when is_list(Nodes) -> - case [I || I <- Nodes, not is_binary(I)] of - [] -> ok; - Invalid -> {error, "ha-mode=\"nodes\" takes a list of strings, " - "~tp was not a string", [Invalid]} - end; -validate_policy(Params) -> - {error, "ha-mode=\"nodes\" takes a list, ~tp given", [Params]}. diff --git a/deps/rabbit/src/rabbit_mirror_queue_slave.erl b/deps/rabbit/src/rabbit_mirror_queue_slave.erl deleted file mode 100644 index 12c0233354c3..000000000000 --- a/deps/rabbit/src/rabbit_mirror_queue_slave.erl +++ /dev/null @@ -1,1102 +0,0 @@ -%% This Source Code Form is subject to the terms of the Mozilla Public -%% License, v. 2.0. If a copy of the MPL was not distributed with this -%% file, You can obtain one at https://mozilla.org/MPL/2.0/. -%% -%% Copyright (c) 2010-2023 VMware, Inc. or its affiliates. All rights reserved. -%% - --module(rabbit_mirror_queue_slave). - -%% For general documentation of HA design, see -%% rabbit_mirror_queue_coordinator -%% -%% We receive messages from GM and from publishers, and the gm -%% messages can arrive either before or after the 'actual' message. -%% All instructions from the GM group must be processed in the order -%% in which they're received. - --export([set_maximum_since_use/2, info/1, go/2]). - --export([init/1, handle_call/3, handle_cast/2, handle_info/2, terminate/2, - code_change/3, handle_pre_hibernate/1, format_message_queue/2]). - --export([joined/2, members_changed/3, handle_msg/3, handle_terminate/2, - prioritise_cast/3, prioritise_info/3]). - --behaviour(gen_server2). --behaviour(gm). - --include_lib("rabbit_common/include/rabbit.hrl"). - --include("amqqueue.hrl"). --include("gm_specs.hrl"). - -%%---------------------------------------------------------------------------- - --define(INFO_KEYS, - [pid, - name, - master_pid, - is_synchronised - ]). - --define(SYNC_INTERVAL, 25). %% milliseconds --define(RAM_DURATION_UPDATE_INTERVAL, 5000). --define(DEATH_TIMEOUT, 20000). %% 20 seconds - --record(state, { q, - gm, - backing_queue, - backing_queue_state, - sync_timer_ref, - rate_timer_ref, - - sender_queues, %% :: Pid -> {Q Msg, Set MsgId, ChState} - msg_id_ack, %% :: MsgId -> AckTag - - msg_id_status, - known_senders, - - %% Master depth - local depth - depth_delta - }). - -%%---------------------------------------------------------------------------- - -set_maximum_since_use(QPid, Age) -> - gen_server2:cast(QPid, {set_maximum_since_use, Age}). - - -prioritise_cast(Msg, _Len, _State) -> - case Msg of - {run_backing_queue, _Mod, _Fun} -> 6; - _ -> 0 - end. - -prioritise_info(Msg, _Len, _State) -> - case Msg of - sync_timeout -> 6; - _ -> 0 - end. - -info(QPid) -> gen_server2:call(QPid, info, infinity). - -init(Q) when ?is_amqqueue(Q) -> - QName = amqqueue:get_name(Q), - ?store_proc_name(QName), - {ok, {not_started, Q}, hibernate, - {backoff, ?HIBERNATE_AFTER_MIN, ?HIBERNATE_AFTER_MIN, - ?DESIRED_HIBERNATE}, ?MODULE}. - -go(SPid, sync) -> gen_server2:call(SPid, go, infinity); -go(SPid, async) -> gen_server2:cast(SPid, go). - -handle_go(Q0) when ?is_amqqueue(Q0) -> - QName = amqqueue:get_name(Q0), - %% We join the GM group before we add ourselves to the amqqueue - %% record. As a result: - %% 1. We can receive msgs from GM that correspond to messages we will - %% never receive from publishers. - %% 2. When we receive a message from publishers, we must receive a - %% message from the GM group for it. - %% 3. However, that instruction from the GM group can arrive either - %% before or after the actual message. We need to be able to - %% distinguish between GM instructions arriving early, and case (1) - %% above. - %% - process_flag(trap_exit, true), %% amqqueue_process traps exits too. - {ok, GM} = gm:start_link(QName, ?MODULE, [self()], - fun rabbit_mnesia:execute_mnesia_transaction/1), - MRef = erlang:monitor(process, GM), - %% We ignore the DOWN message because we are also linked and - %% trapping exits, we just want to not get stuck and we will exit - %% later. - receive - {joined, GM} -> erlang:demonitor(MRef, [flush]), - ok; - {'DOWN', MRef, _, _, _} -> ok - end, - Self = self(), - Node = node(), - case rabbit_mnesia:execute_mnesia_transaction( - fun() -> init_it(Self, GM, Node, QName) end) of - {new, QPid, GMPids} -> - ok = file_handle_cache:register_callback( - rabbit_amqqueue, set_maximum_since_use, [Self]), - ok = rabbit_memory_monitor:register( - Self, {rabbit_amqqueue, set_ram_duration_target, [Self]}), - {ok, BQ} = application:get_env(backing_queue_module), - Q1 = amqqueue:set_pid(Q0, QPid), - _ = BQ:delete_crashed(Q1), %% For crash recovery - BQS = bq_init(BQ, Q1, new), - State = #state { q = Q1, - gm = GM, - backing_queue = BQ, - backing_queue_state = BQS, - rate_timer_ref = undefined, - sync_timer_ref = undefined, - - sender_queues = #{}, - msg_id_ack = #{}, - - msg_id_status = #{}, - known_senders = pmon:new(delegate), - - depth_delta = undefined - }, - ok = gm:broadcast(GM, request_depth), - ok = gm:validate_members(GM, [GM | [G || {G, _} <- GMPids]]), - _ = rabbit_mirror_queue_misc:maybe_auto_sync(Q1), - {ok, State}; - {stale, StalePid} -> - rabbit_mirror_queue_misc:log_warning( - QName, "Detected stale classic mirrored queue leader: ~tp", [StalePid]), - gm:leave(GM), - {error, {stale_master_pid, StalePid}}; - duplicate_live_master -> - gm:leave(GM), - {error, {duplicate_live_master, Node}}; - existing -> - gm:leave(GM), - {error, normal}; - master_in_recovery -> - gm:leave(GM), - %% The queue record vanished - we must have a master starting - %% concurrently with us. In that case we can safely decide to do - %% nothing here, and the master will start us in - %% master:init_with_existing_bq/3 - {error, normal} - end. - -init_it(Self, GM, Node, QName) -> - case mnesia:read({rabbit_queue, QName}) of - [Q] when ?is_amqqueue(Q) -> - QPid = amqqueue:get_pid(Q), - SPids = amqqueue:get_slave_pids(Q), - GMPids = amqqueue:get_gm_pids(Q), - PSPids = amqqueue:get_slave_pids_pending_shutdown(Q), - case [Pid || Pid <- [QPid | SPids], node(Pid) =:= Node] of - [] -> _ = stop_pending_slaves(QName, PSPids), - _ = add_slave(Q, Self, GM), - {new, QPid, GMPids}; - [QPid] -> case rabbit_mnesia:is_process_alive(QPid) of - true -> duplicate_live_master; - false -> {stale, QPid} - end; - [SPid] -> case rabbit_mnesia:is_process_alive(SPid) of - true -> existing; - false -> GMPids1 = [T || T = {_, S} <- GMPids, S =/= SPid], - SPids1 = SPids -- [SPid], - Q1 = amqqueue:set_slave_pids(Q, SPids1), - Q2 = amqqueue:set_gm_pids(Q1, GMPids1), - _ = add_slave(Q2, Self, GM), - {new, QPid, GMPids1} - end - end; - [] -> - master_in_recovery - end. - -%% Pending mirrors have been asked to stop by the master, but despite the node -%% being up these did not answer on the expected timeout. Stop local mirrors now. -stop_pending_slaves(QName, Pids) -> - [begin - rabbit_mirror_queue_misc:log_warning( - QName, "Detected a non-responsive classic queue mirror, stopping it: ~tp", [Pid]), - case erlang:process_info(Pid, dictionary) of - undefined -> ok; - {dictionary, Dict} -> - Vhost = QName#resource.virtual_host, - {ok, AmqQSup} = rabbit_amqqueue_sup_sup:find_for_vhost(Vhost), - case proplists:get_value('$ancestors', Dict) of - [Sup, AmqQSup | _] -> - exit(Sup, kill), - exit(Pid, kill); - _ -> - ok - end - end - end || Pid <- Pids, node(Pid) =:= node(), - true =:= erlang:is_process_alive(Pid)]. - -%% Add to the end, so they are in descending order of age, see -%% rabbit_mirror_queue_misc:promote_slave/1 -add_slave(Q0, New, GM) when ?is_amqqueue(Q0) -> - SPids = amqqueue:get_slave_pids(Q0), - GMPids = amqqueue:get_gm_pids(Q0), - SPids1 = SPids ++ [New], - GMPids1 = [{GM, New} | GMPids], - Q1 = amqqueue:set_slave_pids(Q0, SPids1), - Q2 = amqqueue:set_gm_pids(Q1, GMPids1), - rabbit_mirror_queue_misc:store_updated_slaves(Q2). - -handle_call(go, _From, {not_started, Q} = NotStarted) -> - case handle_go(Q) of - {ok, State} -> {reply, ok, State}; - {error, Error} -> {stop, Error, NotStarted} - end; - -handle_call({gm_deaths, DeadGMPids}, From, - State = #state{ gm = GM, q = Q, - backing_queue = BQ, - backing_queue_state = BQS}) when ?is_amqqueue(Q) -> - QName = amqqueue:get_name(Q), - MPid = amqqueue:get_pid(Q), - Self = self(), - case rabbit_mirror_queue_misc:remove_from_queue(QName, Self, DeadGMPids) of - {error, not_found} -> - gen_server2:reply(From, ok), - {stop, normal, State}; - {error, {not_synced, _SPids}} -> - BQ:delete_and_terminate({error, not_synced}, BQS), - {stop, normal, State#state{backing_queue_state = undefined}}; - {ok, Pid, DeadPids, ExtraNodes} -> - rabbit_mirror_queue_misc:report_deaths(Self, false, QName, - DeadPids), - case Pid of - MPid -> - %% master hasn't changed - gen_server2:reply(From, ok), - rabbit_mirror_queue_misc:add_mirrors( - QName, ExtraNodes, async), - noreply(State); - Self -> - %% we've become master - QueueState = promote_me(From, State), - rabbit_mirror_queue_misc:add_mirrors( - QName, ExtraNodes, async), - {become, rabbit_amqqueue_process, QueueState, hibernate}; - _ -> - %% master has changed to not us - gen_server2:reply(From, ok), - %% see rabbitmq-server#914; - %% It's not always guaranteed that we won't have ExtraNodes. - %% If gm alters, master can change to not us with extra nodes, - %% in which case we attempt to add mirrors on those nodes. - case ExtraNodes of - [] -> void; - _ -> rabbit_mirror_queue_misc:add_mirrors( - QName, ExtraNodes, async) - end, - %% Since GM is by nature lazy we need to make sure - %% there is some traffic when a master dies, to - %% make sure all mirrors get informed of the - %% death. That is all process_death does, create - %% some traffic. - ok = gm:broadcast(GM, process_death), - Q1 = amqqueue:set_pid(Q, Pid), - State1 = State#state{q = Q1}, - noreply(State1) - end - end; - -handle_call(info, _From, State) -> - reply(infos(?INFO_KEYS, State), State). - -handle_cast(go, {not_started, Q} = NotStarted) -> - case handle_go(Q) of - {ok, State} -> {noreply, State}; - {error, Error} -> {stop, Error, NotStarted} - end; - -handle_cast({run_backing_queue, Mod, Fun}, State) -> - noreply(run_backing_queue(Mod, Fun, State)); - -handle_cast({gm, Instruction}, State = #state{q = Q0}) when ?is_amqqueue(Q0) -> - QName = amqqueue:get_name(Q0), - case rabbit_amqqueue:lookup(QName) of - {ok, Q1} when ?is_amqqueue(Q1) -> - SPids = amqqueue:get_slave_pids(Q1), - case lists:member(self(), SPids) of - true -> - handle_process_result(process_instruction(Instruction, State)); - false -> - %% Potentially a duplicated mirror caused by a partial partition, - %% will stop as a new mirror could start unaware of our presence - {stop, shutdown, State} - end; - {error, not_found} -> - %% Would not expect this to happen after fixing #953 - {stop, shutdown, State} - end; - -handle_cast({deliver, Delivery = #delivery{sender = Sender, flow = Flow}, true}, - State) -> - %% Asynchronous, non-"mandatory", deliver mode. - %% We are acking messages to the channel process that sent us - %% the message delivery. See - %% rabbit_amqqueue_process:handle_ch_down for more info. - %% If message is rejected by the master, the publish will be nacked - %% even if mirrors confirm it. No need to check for length here. - maybe_flow_ack(Sender, Flow), - noreply(maybe_enqueue_message(Delivery, State)); - -handle_cast({sync_start, Ref, Syncer}, - State = #state { depth_delta = DD, - backing_queue = BQ, - backing_queue_state = BQS }) -> - State1 = #state{rate_timer_ref = TRef} = ensure_rate_timer(State), - S = fun({MA, TRefN, BQSN}) -> - State1#state{depth_delta = undefined, - msg_id_ack = maps:from_list(MA), - rate_timer_ref = TRefN, - backing_queue_state = BQSN} - end, - case rabbit_mirror_queue_sync:slave( - DD, Ref, TRef, Syncer, BQ, BQS, - fun (BQN, BQSN) -> - BQSN1 = update_ram_duration(BQN, BQSN), - TRefN = rabbit_misc:send_after(?RAM_DURATION_UPDATE_INTERVAL, - self(), update_ram_duration), - {TRefN, BQSN1} - end) of - denied -> noreply(State1); - {ok, Res} -> noreply(set_delta(0, S(Res))); - {failed, Res} -> noreply(S(Res)); - {stop, Reason, Res} -> {stop, Reason, S(Res)} - end; - -handle_cast({set_maximum_since_use, Age}, State) -> - ok = file_handle_cache:set_maximum_since_use(Age), - noreply(State); - -handle_cast({set_ram_duration_target, Duration}, - State = #state { backing_queue = BQ, - backing_queue_state = BQS }) -> - BQS1 = BQ:set_ram_duration_target(Duration, BQS), - noreply(State #state { backing_queue_state = BQS1 }); - -handle_cast(policy_changed, State) -> - %% During partial partitions, we might end up receiving messages expected by a master - %% Ignore them - noreply(State). - -handle_info(update_ram_duration, State = #state{backing_queue = BQ, - backing_queue_state = BQS}) -> - BQS1 = update_ram_duration(BQ, BQS), - %% Don't call noreply/1, we don't want to set timers - {State1, Timeout} = next_state(State #state { - rate_timer_ref = undefined, - backing_queue_state = BQS1 }), - {noreply, State1, Timeout}; - -handle_info(sync_timeout, State) -> - noreply(backing_queue_timeout( - State #state { sync_timer_ref = undefined })); - -handle_info(timeout, State) -> - noreply(backing_queue_timeout(State)); - -handle_info({'DOWN', _MonitorRef, process, ChPid, _Reason}, State) -> - local_sender_death(ChPid, State), - noreply(maybe_forget_sender(ChPid, down_from_ch, State)); - -handle_info({'EXIT', _Pid, Reason}, State) -> - {stop, Reason, State}; - -handle_info({bump_credit, Msg}, State) -> - credit_flow:handle_bump_msg(Msg), - noreply(State); - -%% In the event of a short partition during sync we can detect the -%% master's 'death', drop out of sync, and then receive sync messages -%% which were still in flight. Ignore them. -handle_info({sync_msg, _Ref, _Msg, _Props, _Unacked}, State) -> - noreply(State); - -handle_info({sync_complete, _Ref}, State) -> - noreply(State); - -handle_info(Msg, State) -> - {stop, {unexpected_info, Msg}, State}. - -terminate(_Reason, {not_started, _Q}) -> - ok; -terminate(_Reason, #state { backing_queue_state = undefined }) -> - %% We've received a delete_and_terminate from gm, thus nothing to - %% do here. - ok; -terminate({shutdown, dropped} = R, State = #state{backing_queue = BQ, - backing_queue_state = BQS}) -> - %% See rabbit_mirror_queue_master:terminate/2 - terminate_common(State), - BQ:delete_and_terminate(R, BQS); -terminate(shutdown, State) -> - terminate_shutdown(shutdown, State); -terminate({shutdown, _} = R, State) -> - terminate_shutdown(R, State); -terminate(Reason, State = #state{backing_queue = BQ, - backing_queue_state = BQS}) -> - terminate_common(State), - BQ:delete_and_terminate(Reason, BQS). - -%% If the Reason is shutdown, or {shutdown, _}, it is not the queue -%% being deleted: it's just the node going down. Even though we're a -%% mirror, we have no idea whether or not we'll be the only copy coming -%% back up. Thus we must assume we will be, and preserve anything we -%% have on disk. -terminate_shutdown(Reason, State = #state{backing_queue = BQ, - backing_queue_state = BQS}) -> - terminate_common(State), - BQ:terminate(Reason, BQS). - -terminate_common(State) -> - ok = rabbit_memory_monitor:deregister(self()), - _ = stop_rate_timer(stop_sync_timer(State)), - ok. - -code_change(_OldVsn, State, _Extra) -> - {ok, State}. - -handle_pre_hibernate({not_started, _Q} = State) -> - {hibernate, State}; - -handle_pre_hibernate(State = #state { backing_queue = BQ, - backing_queue_state = BQS }) -> - {RamDuration, BQS1} = BQ:ram_duration(BQS), - DesiredDuration = - rabbit_memory_monitor:report_ram_duration(self(), RamDuration), - BQS2 = BQ:set_ram_duration_target(DesiredDuration, BQS1), - BQS3 = BQ:handle_pre_hibernate(BQS2), - {hibernate, stop_rate_timer(State #state { backing_queue_state = BQS3 })}. - -format_message_queue(Opt, MQ) -> rabbit_misc:format_message_queue(Opt, MQ). - -%% --------------------------------------------------------------------------- -%% GM -%% --------------------------------------------------------------------------- - --spec joined(args(), members()) -> callback_result(). - -joined([SPid], _Members) -> SPid ! {joined, self()}, ok. - --spec members_changed(args(), members(),members()) -> callback_result(). - -members_changed([_SPid], _Births, []) -> - ok; -members_changed([ SPid], _Births, Deaths) -> - case rabbit_misc:with_exit_handler( - rabbit_misc:const(ok), - fun() -> - gen_server2:call(SPid, {gm_deaths, Deaths}, infinity) - end) of - ok -> ok; - {promote, CPid} -> {become, rabbit_mirror_queue_coordinator, [CPid]} - end. - --spec handle_msg(args(), pid(), any()) -> callback_result(). - -handle_msg([_SPid], _From, hibernate_heartbeat) -> - %% See rabbit_mirror_queue_coordinator:handle_pre_hibernate/1 - ok; -handle_msg([_SPid], _From, request_depth) -> - %% This is only of value to the master - ok; -handle_msg([_SPid], _From, {ensure_monitoring, _Pid}) -> - %% This is only of value to the master - ok; -handle_msg([_SPid], _From, process_death) -> - %% We must not take any notice of the master death here since it - %% comes without ordering guarantees - there could still be - %% messages from the master we have yet to receive. When we get - %% members_changed, then there will be no more messages. - ok; -handle_msg([CPid], _From, {delete_and_terminate, _Reason} = Msg) -> - ok = gen_server2:cast(CPid, {gm, Msg}), - {stop, {shutdown, ring_shutdown}}; -handle_msg([SPid], _From, {sync_start, Ref, Syncer, SPids}) -> - case lists:member(SPid, SPids) of - true -> gen_server2:cast(SPid, {sync_start, Ref, Syncer}); - false -> ok - end; -handle_msg([SPid], _From, Msg) -> - ok = gen_server2:cast(SPid, {gm, Msg}). - --spec handle_terminate(args(), term()) -> any(). - -handle_terminate([_SPid], _Reason) -> - ok. - -%% --------------------------------------------------------------------------- -%% Others -%% --------------------------------------------------------------------------- - -infos(Items, State) -> [{Item, i(Item, State)} || Item <- Items]. - -i(pid, _State) -> - self(); -i(name, #state{q = Q}) when ?is_amqqueue(Q) -> - amqqueue:get_name(Q); -i(master_pid, #state{q = Q}) when ?is_amqqueue(Q) -> - amqqueue:get_pid(Q); -i(is_synchronised, #state{depth_delta = DD}) -> - DD =:= 0; -i(_, _) -> - ''. - -bq_init(BQ, Q, Recover) -> - Self = self(), - BQ:init(Q, Recover, - fun (Mod, Fun) -> - rabbit_amqqueue:run_backing_queue(Self, Mod, Fun) - end). - -run_backing_queue(rabbit_mirror_queue_master, Fun, State) -> - %% Yes, this might look a little crazy, but see comments in - %% confirm_sender_death/1 - Fun(?MODULE, State); -run_backing_queue(Mod, Fun, State = #state { backing_queue = BQ, - backing_queue_state = BQS }) -> - State #state { backing_queue_state = BQ:invoke(Mod, Fun, BQS) }. - -%% This feature was used by `rabbit_amqqueue_process` and -%% `rabbit_mirror_queue_slave` up-to and including RabbitMQ 3.7.x. It is -%% unused in 3.8.x and thus deprecated. We keep it to support in-place -%% upgrades to 3.8.x (i.e. mixed-version clusters), but it is a no-op -%% starting with that version. -send_mandatory(#delivery{mandatory = false}) -> - ok; -send_mandatory(#delivery{mandatory = true, - sender = SenderPid, - msg_seq_no = MsgSeqNo}) -> - gen_server2:cast(SenderPid, {mandatory_received, MsgSeqNo}). - -send_or_record_confirm(_, #delivery{ confirm = false }, MS, _State) -> - MS; -send_or_record_confirm(Status, #delivery { sender = ChPid, - confirm = true, - msg_seq_no = MsgSeqNo, - message = Msg - }, - MS, #state{q = Q}) -> - MsgId = mc:get_annotation(id, Msg), - IsPersistent = mc:is_persistent(Msg), - case IsPersistent of - true when ?amqqueue_is_durable(Q) andalso - Status == published -> - maps:put(MsgId, {published, ChPid, MsgSeqNo}, MS); - _ -> - ok = rabbit_classic_queue:confirm_to_sender(ChPid, - amqqueue:get_name(Q), - [MsgSeqNo]), - MS - end. - -confirm_messages(MsgIds, State = #state{q = Q, msg_id_status = MS}) -> - QName = amqqueue:get_name(Q), - {CMs, MS1} = - lists:foldl( - fun (MsgId, {CMsN, MSN} = Acc) -> - %% We will never see 'discarded' here - case maps:find(MsgId, MSN) of - error -> - %% If it needed confirming, it'll have - %% already been done. - Acc; - {ok, published} -> - %% Still not seen it from the channel, just - %% record that it's been confirmed. - {CMsN, maps:put(MsgId, confirmed, MSN)}; - {ok, {published, ChPid, MsgSeqNo}} -> - %% Seen from both GM and Channel. Can now - %% confirm. - {rabbit_misc:gb_trees_cons(ChPid, MsgSeqNo, CMsN), - maps:remove(MsgId, MSN)}; - {ok, confirmed} -> - %% It's already been confirmed. This is - %% probably it's been both sync'd to disk - %% and then delivered and ack'd before we've - %% seen the publish from the - %% channel. Nothing to do here. - Acc - end - end, {gb_trees:empty(), MS}, MsgIds), - Fun = fun (Pid, MsgSeqNos) -> - rabbit_classic_queue:confirm_to_sender(Pid, QName, MsgSeqNos) - end, - rabbit_misc:gb_trees_foreach(Fun, CMs), - State #state { msg_id_status = MS1 }. - -handle_process_result({ok, State}) -> noreply(State); -handle_process_result({stop, State}) -> {stop, normal, State}. - --spec promote_me({pid(), term()}, #state{}) -> no_return(). - -promote_me(From, #state { q = Q0, - gm = GM, - backing_queue = BQ, - backing_queue_state = BQS, - rate_timer_ref = RateTRef, - sender_queues = SQ, - msg_id_ack = MA, - msg_id_status = MS, - known_senders = KS}) when ?is_amqqueue(Q0) -> - QName = amqqueue:get_name(Q0), - rabbit_mirror_queue_misc:log_info(QName, "Promoting mirror ~ts to leader", - [rabbit_misc:pid_to_string(self())]), - Q1 = amqqueue:set_pid(Q0, self()), - DeathFun = rabbit_mirror_queue_master:sender_death_fun(), - DepthFun = rabbit_mirror_queue_master:depth_fun(), - {ok, CPid} = rabbit_mirror_queue_coordinator:start_link(Q1, GM, DeathFun, DepthFun), - true = unlink(GM), - gen_server2:reply(From, {promote, CPid}), - - %% Everything that we're monitoring, we need to ensure our new - %% coordinator is monitoring. - MPids = pmon:monitored(KS), - ok = rabbit_mirror_queue_coordinator:ensure_monitoring(CPid, MPids), - - %% We find all the messages that we've received from channels but - %% not from gm, and pass them to the - %% queue_process:init_with_backing_queue_state to be enqueued. - %% - %% We also have to requeue messages which are pending acks: the - %% consumers from the master queue have been lost and so these - %% messages need requeuing. They might also be pending - %% confirmation, and indeed they might also be pending arrival of - %% the publication from the channel itself, if we received both - %% the publication and the fetch via gm first! Requeuing doesn't - %% affect confirmations: if the message was previously pending a - %% confirmation then it still will be, under the same msg_id. So - %% as a master, we need to be prepared to filter out the - %% publication of said messages from the channel (is_duplicate - %% (thus such requeued messages must remain in the msg_id_status - %% (MS) which becomes seen_status (SS) in the master)). - %% - %% Then there are messages we already have in the queue, which are - %% not currently pending acknowledgement: - %% 1. Messages we've only received via gm: - %% Filter out subsequent publication from channel through - %% validate_message. Might have to issue confirms then or - %% later, thus queue_process state will have to know that - %% there's a pending confirm. - %% 2. Messages received via both gm and channel: - %% Queue will have to deal with issuing confirms if necessary. - %% - %% MS contains the following three entry types: - %% - %% a) published: - %% published via gm only; pending arrival of publication from - %% channel, maybe pending confirm. - %% - %% b) {published, ChPid, MsgSeqNo}: - %% published via gm and channel; pending confirm. - %% - %% c) confirmed: - %% published via gm only, and confirmed; pending publication - %% from channel. - %% - %% d) discarded: - %% seen via gm only as discarded. Pending publication from - %% channel - %% - %% The forms a, c and d only, need to go to the master state - %% seen_status (SS). - %% - %% The form b only, needs to go through to the queue_process - %% state to form the msg_id_to_channel mapping (MTC). - %% - %% No messages that are enqueued from SQ at this point will have - %% entries in MS. - %% - %% Messages that are extracted from MA may have entries in MS, and - %% those messages are then requeued. However, as discussed above, - %% this does not affect MS, nor which bits go through to SS in - %% Master, or MTC in queue_process. - - St = [published, confirmed, discarded], - SS = maps:filter(fun (_MsgId, Status) -> lists:member(Status, St) end, MS), - AckTags = [AckTag || {_MsgId, AckTag} <- maps:to_list(MA)], - - MasterState = rabbit_mirror_queue_master:promote_backing_queue_state( - QName, CPid, BQ, BQS, GM, AckTags, SS, MPids), - - MTC = maps:fold(fun (MsgId, {published, ChPid, MsgSeqNo}, MTC0) -> - maps:put(MsgId, {ChPid, MsgSeqNo}, MTC0); - (_Msgid, _Status, MTC0) -> - MTC0 - end, #{}, MS), - Deliveries = [promote_delivery(Delivery) || - {_ChPid, {PubQ, _PendCh, _ChState}} <- maps:to_list(SQ), - Delivery <- queue:to_list(PubQ)], - AwaitGmDown = [ChPid || {ChPid, {_, _, down_from_ch}} <- maps:to_list(SQ)], - KS1 = lists:foldl(fun (ChPid0, KS0) -> - pmon:demonitor(ChPid0, KS0) - end, KS, AwaitGmDown), - rabbit_misc:store_proc_name(rabbit_amqqueue_process, QName), - rabbit_amqqueue_process:init_with_backing_queue_state( - Q1, rabbit_mirror_queue_master, MasterState, RateTRef, Deliveries, KS1, - MTC). - -%% We reset mandatory to false here because we will have sent the -%% mandatory_received already as soon as we got the message. We also -%% need to send an ack for these messages since the channel is waiting -%% for one for the via-GM case and we will not now receive one. -promote_delivery(Delivery = #delivery{sender = Sender, flow = Flow}) -> - maybe_flow_ack(Sender, Flow), - Delivery#delivery{mandatory = false}. - -noreply(State) -> - {NewState, Timeout} = next_state(State), - {noreply, ensure_rate_timer(NewState), Timeout}. - -reply(Reply, State) -> - {NewState, Timeout} = next_state(State), - {reply, Reply, ensure_rate_timer(NewState), Timeout}. - -next_state(State = #state{backing_queue = BQ, backing_queue_state = BQS}) -> - {MsgIds, BQS1} = BQ:drain_confirmed(BQS), - State1 = confirm_messages(MsgIds, - State #state { backing_queue_state = BQS1 }), - case BQ:needs_timeout(BQS1) of - false -> {stop_sync_timer(State1), hibernate }; - idle -> {stop_sync_timer(State1), ?SYNC_INTERVAL}; - timed -> {ensure_sync_timer(State1), 0 } - end. - -backing_queue_timeout(State = #state { backing_queue = BQ, - backing_queue_state = BQS }) -> - State#state{backing_queue_state = BQ:timeout(BQS)}. - -ensure_sync_timer(State) -> - rabbit_misc:ensure_timer(State, #state.sync_timer_ref, - ?SYNC_INTERVAL, sync_timeout). - -stop_sync_timer(State) -> rabbit_misc:stop_timer(State, #state.sync_timer_ref). - -ensure_rate_timer(State) -> - rabbit_misc:ensure_timer(State, #state.rate_timer_ref, - ?RAM_DURATION_UPDATE_INTERVAL, - update_ram_duration). - -stop_rate_timer(State) -> rabbit_misc:stop_timer(State, #state.rate_timer_ref). - -ensure_monitoring(ChPid, State = #state { known_senders = KS }) -> - State #state { known_senders = pmon:monitor(ChPid, KS) }. - -local_sender_death(ChPid, #state { known_senders = KS }) -> - %% The channel will be monitored iff we have received a delivery - %% from it but not heard about its death from the master. So if it - %% is monitored we need to point the death out to the master (see - %% essay). - ok = case pmon:is_monitored(ChPid, KS) of - false -> ok; - true -> confirm_sender_death(ChPid) - end. - -confirm_sender_death(Pid) -> - %% We have to deal with the possibility that we'll be promoted to - %% master before this thing gets run. Consequently we set the - %% module to rabbit_mirror_queue_master so that if we do become a - %% rabbit_amqqueue_process before then, sane things will happen. - Fun = - fun (?MODULE, State = #state { known_senders = KS, - gm = GM }) -> - %% We're running still as a mirror - %% - %% See comment in local_sender_death/2; we might have - %% received a sender_death in the meanwhile so check - %% again. - ok = case pmon:is_monitored(Pid, KS) of - false -> ok; - true -> gm:broadcast(GM, {ensure_monitoring, [Pid]}), - confirm_sender_death(Pid) - end, - State; - (rabbit_mirror_queue_master, State) -> - %% We've become a master. State is now opaque to - %% us. When we became master, if Pid was still known - %% to us then we'd have set up monitoring of it then, - %% so this is now a noop. - State - end, - %% Note that we do not remove our knowledge of this ChPid until we - %% get the sender_death from GM as well as a DOWN notification. - {ok, _TRef} = timer:apply_after( - ?DEATH_TIMEOUT, rabbit_amqqueue, run_backing_queue, - [self(), rabbit_mirror_queue_master, Fun]), - ok. - -forget_sender(_, running) -> false; -forget_sender(down_from_gm, down_from_gm) -> false; %% [1] -forget_sender(down_from_ch, down_from_ch) -> false; -forget_sender(Down1, Down2) when Down1 =/= Down2 -> true. - -%% [1] If another mirror goes through confirm_sender_death/1 before we -%% do we can get two GM sender_death messages in a row for the same -%% channel - don't treat that as anything special. - -%% Record and process lifetime events from channels. Forget all about a channel -%% only when down notifications are received from both the channel and from gm. -maybe_forget_sender(ChPid, ChState, State = #state { sender_queues = SQ, - msg_id_status = MS, - known_senders = KS }) -> - case maps:find(ChPid, SQ) of - error -> - State; - {ok, {MQ, PendCh, ChStateRecord}} -> - case forget_sender(ChState, ChStateRecord) of - true -> - credit_flow:peer_down(ChPid), - State #state { sender_queues = maps:remove(ChPid, SQ), - msg_id_status = lists:foldl( - fun maps:remove/2, - MS, sets:to_list(PendCh)), - known_senders = pmon:demonitor(ChPid, KS) }; - false -> - SQ1 = maps:put(ChPid, {MQ, PendCh, ChState}, SQ), - State #state { sender_queues = SQ1 } - end - end. - -maybe_enqueue_message( - Delivery = #delivery { message = Msg, - sender = ChPid }, - State = #state { sender_queues = SQ, msg_id_status = MS }) -> - MsgId = mc:get_annotation(id, Msg), - send_mandatory(Delivery), %% must do this before confirms - State1 = ensure_monitoring(ChPid, State), - %% We will never see {published, ChPid, MsgSeqNo} here. - case maps:find(MsgId, MS) of - error -> - {MQ, PendingCh, ChState} = get_sender_queue(ChPid, SQ), - MQ1 = queue:in(Delivery, MQ), - SQ1 = maps:put(ChPid, {MQ1, PendingCh, ChState}, SQ), - State1 #state { sender_queues = SQ1 }; - {ok, Status} -> - MS1 = send_or_record_confirm( - Status, Delivery, maps:remove(MsgId, MS), State1), - SQ1 = remove_from_pending_ch(MsgId, ChPid, SQ), - State1 #state { msg_id_status = MS1, - sender_queues = SQ1 } - end. - -get_sender_queue(ChPid, SQ) -> - case maps:find(ChPid, SQ) of - error -> {queue:new(), sets:new([{version, 2}]), running}; - {ok, Val} -> Val - end. - -remove_from_pending_ch(MsgId, ChPid, SQ) -> - case maps:find(ChPid, SQ) of - error -> - SQ; - {ok, {MQ, PendingCh, ChState}} -> - maps:put(ChPid, {MQ, sets:del_element(MsgId, PendingCh), ChState}, - SQ) - end. - -publish_or_discard(Status, ChPid, MsgId, - State = #state { sender_queues = SQ, msg_id_status = MS }) -> - %% We really are going to do the publish/discard right now, even - %% though we may not have seen it directly from the channel. But - %% we cannot issue confirms until the latter has happened. So we - %% need to keep track of the MsgId and its confirmation status in - %% the meantime. - State1 = ensure_monitoring(ChPid, State), - {MQ, PendingCh, ChState} = get_sender_queue(ChPid, SQ), - {MQ1, PendingCh1, MS1} = - case queue:out(MQ) of - {empty, _MQ2} -> - {MQ, sets:add_element(MsgId, PendingCh), - maps:put(MsgId, Status, MS)}; - {{value, Delivery = #delivery { - message = Msg }}, MQ2} -> - case mc:get_annotation(id, Msg) of - MsgId -> - {MQ2, PendingCh, - %% We received the msg from the channel first. Thus - %% we need to deal with confirms here. - send_or_record_confirm(Status, Delivery, MS, State1)}; - _ -> - %% The instruction was sent to us before we were - %% within the slave_pids within the #amqqueue{} - %% record. We'll never receive the message directly - %% from the channel. And the channel will not be - %% expecting any confirms from us. - {MQ, PendingCh, MS} - end - end, - SQ1 = maps:put(ChPid, {MQ1, PendingCh1, ChState}, SQ), - State1 #state { sender_queues = SQ1, msg_id_status = MS1 }. - - -process_instruction({publish, ChPid, Flow, MsgProps, Msg}, State) -> - MsgId = mc:get_annotation(id, Msg), - maybe_flow_ack(ChPid, Flow), - State1 = #state { backing_queue = BQ, backing_queue_state = BQS } = - publish_or_discard(published, ChPid, MsgId, State), - BQS1 = BQ:publish(Msg, MsgProps, true, ChPid, Flow, BQS), - {ok, State1 #state { backing_queue_state = BQS1 }}; -process_instruction({batch_publish, ChPid, Flow, Publishes}, State) -> - maybe_flow_ack(ChPid, Flow), - State1 = #state { backing_queue = BQ, backing_queue_state = BQS } = - lists:foldl(fun ({Msg, _MsgProps, _IsDelivered}, St) -> - MsgId = mc:get_annotation(id, Msg), - publish_or_discard(published, ChPid, MsgId, St) - end, State, Publishes), - BQS1 = BQ:batch_publish(Publishes, ChPid, Flow, BQS), - {ok, State1 #state { backing_queue_state = BQS1 }}; -process_instruction({publish_delivered, ChPid, Flow, MsgProps, Msg}, State) -> - MsgId = mc:get_annotation(id, Msg), - maybe_flow_ack(ChPid, Flow), - State1 = #state { backing_queue = BQ, backing_queue_state = BQS } = - publish_or_discard(published, ChPid, MsgId, State), - true = BQ:is_empty(BQS), - {AckTag, BQS1} = BQ:publish_delivered(Msg, MsgProps, ChPid, Flow, BQS), - {ok, maybe_store_ack(true, MsgId, AckTag, - State1 #state { backing_queue_state = BQS1 })}; -process_instruction({batch_publish_delivered, ChPid, Flow, Publishes}, State) -> - maybe_flow_ack(ChPid, Flow), - {MsgIds, - State1 = #state { backing_queue = BQ, backing_queue_state = BQS }} = - lists:foldl(fun ({Msg, _MsgProps}, - {MsgIds, St}) -> - MsgId = mc:get_annotation(id, Msg), - {[MsgId | MsgIds], - publish_or_discard(published, ChPid, MsgId, St)} - end, {[], State}, Publishes), - true = BQ:is_empty(BQS), - {AckTags, BQS1} = BQ:batch_publish_delivered(Publishes, ChPid, Flow, BQS), - MsgIdsAndAcks = lists:zip(lists:reverse(MsgIds), AckTags), - State2 = lists:foldl( - fun ({MsgId, AckTag}, St) -> - maybe_store_ack(true, MsgId, AckTag, St) - end, State1 #state { backing_queue_state = BQS1 }, - MsgIdsAndAcks), - {ok, State2}; -process_instruction({discard, ChPid, Flow, MsgId}, State) -> - maybe_flow_ack(ChPid, Flow), - State1 = #state { backing_queue = BQ, backing_queue_state = BQS } = - publish_or_discard(discarded, ChPid, MsgId, State), - BQS1 = BQ:discard(MsgId, ChPid, Flow, BQS), - {ok, State1 #state { backing_queue_state = BQS1 }}; -process_instruction({drop, Length, Dropped, AckRequired}, - State = #state { backing_queue = BQ, - backing_queue_state = BQS }) -> - QLen = BQ:len(BQS), - ToDrop = case QLen - Length of - N when N > 0 -> N; - _ -> 0 - end, - State1 = lists:foldl( - fun (const, StateN = #state{backing_queue_state = BQSN}) -> - {{MsgId, AckTag}, BQSN1} = BQ:drop(AckRequired, BQSN), - maybe_store_ack( - AckRequired, MsgId, AckTag, - StateN #state { backing_queue_state = BQSN1 }) - end, State, lists:duplicate(ToDrop, const)), - {ok, case AckRequired of - true -> State1; - false -> update_delta(ToDrop - Dropped, State1) - end}; -process_instruction({ack, MsgIds}, - State = #state { backing_queue = BQ, - backing_queue_state = BQS, - msg_id_ack = MA }) -> - {AckTags, MA1} = msg_ids_to_acktags(MsgIds, MA), - {MsgIds1, BQS1} = BQ:ack(AckTags, BQS), - [] = MsgIds1 -- MsgIds, %% ASSERTION - {ok, update_delta(length(MsgIds1) - length(MsgIds), - State #state { msg_id_ack = MA1, - backing_queue_state = BQS1 })}; -process_instruction({requeue, MsgIds}, - State = #state { backing_queue = BQ, - backing_queue_state = BQS, - msg_id_ack = MA }) -> - {AckTags, MA1} = msg_ids_to_acktags(MsgIds, MA), - {_MsgIds, BQS1} = BQ:requeue(AckTags, BQS), - {ok, State #state { msg_id_ack = MA1, - backing_queue_state = BQS1 }}; -process_instruction({sender_death, ChPid}, - State = #state { known_senders = KS }) -> - %% The channel will be monitored iff we have received a message - %% from it. In this case we just want to avoid doing work if we - %% never got any messages. - {ok, case pmon:is_monitored(ChPid, KS) of - false -> State; - true -> maybe_forget_sender(ChPid, down_from_gm, State) - end}; -process_instruction({depth, Depth}, - State = #state { backing_queue = BQ, - backing_queue_state = BQS }) -> - {ok, set_delta(Depth - BQ:depth(BQS), State)}; - -process_instruction({delete_and_terminate, Reason}, - State = #state { backing_queue = BQ, - backing_queue_state = BQS }) -> - BQ:delete_and_terminate(Reason, BQS), - {stop, State #state { backing_queue_state = undefined }}; -process_instruction({set_queue_mode, Mode}, - State = #state { backing_queue = BQ, - backing_queue_state = BQS }) -> - BQS1 = BQ:set_queue_mode(Mode, BQS), - {ok, State #state { backing_queue_state = BQS1 }}; -process_instruction({set_queue_version, Version}, - State = #state { backing_queue = BQ, - backing_queue_state = BQS }) -> - BQS1 = BQ:set_queue_version(Version, BQS), - {ok, State #state { backing_queue_state = BQS1 }}. - -maybe_flow_ack(Sender, flow) -> credit_flow:ack(Sender); -maybe_flow_ack(_Sender, noflow) -> ok. - -msg_ids_to_acktags(MsgIds, MA) -> - {AckTags, MA1} = - lists:foldl( - fun (MsgId, {Acc, MAN}) -> - case maps:find(MsgId, MA) of - error -> {Acc, MAN}; - {ok, AckTag} -> {[AckTag | Acc], maps:remove(MsgId, MAN)} - end - end, {[], MA}, MsgIds), - {lists:reverse(AckTags), MA1}. - -maybe_store_ack(false, _MsgId, _AckTag, State) -> - State; -maybe_store_ack(true, MsgId, AckTag, State = #state { msg_id_ack = MA }) -> - State #state { msg_id_ack = maps:put(MsgId, AckTag, MA) }. - -set_delta(0, State = #state { depth_delta = undefined }) -> - ok = record_synchronised(State#state.q), - State #state { depth_delta = 0 }; -set_delta(NewDelta, State = #state { depth_delta = undefined }) -> - true = NewDelta > 0, %% assertion - State #state { depth_delta = NewDelta }; -set_delta(NewDelta, State = #state { depth_delta = Delta }) -> - update_delta(NewDelta - Delta, State). - -update_delta(_DeltaChange, State = #state { depth_delta = undefined }) -> - State; -update_delta( DeltaChange, State = #state { depth_delta = 0 }) -> - 0 = DeltaChange, %% assertion: we cannot become unsync'ed - State; -update_delta( DeltaChange, State = #state { depth_delta = Delta }) -> - true = DeltaChange =< 0, %% assertion: we cannot become 'less' sync'ed - set_delta(Delta + DeltaChange, State #state { depth_delta = undefined }). - -update_ram_duration(BQ, BQS) -> - {RamDuration, BQS1} = BQ:ram_duration(BQS), - DesiredDuration = - rabbit_memory_monitor:report_ram_duration(self(), RamDuration), - BQ:set_ram_duration_target(DesiredDuration, BQS1). - -record_synchronised(Q0) when ?is_amqqueue(Q0) -> - QName = amqqueue:get_name(Q0), - Self = self(), - F = fun () -> - case mnesia:read({rabbit_queue, QName}) of - [] -> - ok; - [Q1] when ?is_amqqueue(Q1) -> - SSPids = amqqueue:get_sync_slave_pids(Q1), - SSPids1 = [Self | SSPids], - Q2 = amqqueue:set_sync_slave_pids(Q1, SSPids1), - _ = rabbit_mirror_queue_misc:store_updated_slaves(Q2), - {ok, Q2} - end - end, - case rabbit_mnesia:execute_mnesia_transaction(F) of - ok -> ok; - {ok, Q2} -> rabbit_mirror_queue_misc:maybe_drop_master_after_sync(Q2) - end. diff --git a/deps/rabbit/src/rabbit_mirror_queue_sync.erl b/deps/rabbit/src/rabbit_mirror_queue_sync.erl deleted file mode 100644 index 292b77558e47..000000000000 --- a/deps/rabbit/src/rabbit_mirror_queue_sync.erl +++ /dev/null @@ -1,469 +0,0 @@ -%% This Source Code Form is subject to the terms of the Mozilla Public -%% License, v. 2.0. If a copy of the MPL was not distributed with this -%% file, You can obtain one at https://mozilla.org/MPL/2.0/. -%% -%% Copyright (c) 2010-2023 VMware, Inc. or its affiliates. All rights reserved. -%% - --module(rabbit_mirror_queue_sync). - --include_lib("rabbit_common/include/rabbit.hrl"). - --export([master_prepare/4, master_go/9, slave/7, conserve_resources/3]). - -%% Export for UTs --export([maybe_master_batch_send/2, get_time_diff/3, append_to_acc/4]). - --define(SYNC_PROGRESS_INTERVAL, 1000000). - --define(SYNC_THROUGHPUT_EVAL_INTERVAL_MILLIS, 50). - -%% There are three processes around, the master, the syncer and the -%% slave(s). The syncer is an intermediary, linked to the master in -%% order to make sure we do not mess with the master's credit flow or -%% set of monitors. -%% -%% Interactions -%% ------------ -%% -%% '*' indicates repeating messages. All are standard Erlang messages -%% except sync_start which is sent over GM to flush out any other -%% messages that we might have sent that way already. (credit) is the -%% usual credit_flow bump message every so often. -%% -%% Master Syncer Slave(s) -%% sync_mirrors -> || || -%% || -- (spawns) --> || || -%% || --------- sync_start (over GM) -------> || -%% || || <--- sync_ready ---- || -%% || || (or) || -%% || || <--- sync_deny ----- || -%% || <--- ready ---- || || -%% || <--- next* ---- || || } -%% || ---- msg* ----> || || } loop -%% || || ---- sync_msgs* ---> || } -%% || || <--- (credit)* ----- || } -%% || <--- next ---- || || -%% || ---- done ----> || || -%% || || -- sync_complete --> || -%% || (Dies) || - --type log_fun() :: fun ((string(), [any()]) -> 'ok'). --type bq() :: atom(). --type bqs() :: any(). --type ack() :: any(). --type slave_sync_state() :: {[{rabbit_types:msg_id(), ack()}], timer:tref(), - bqs()}. - -%% --------------------------------------------------------------------------- -%% Master - --spec master_prepare(reference(), rabbit_amqqueue:name(), - log_fun(), [pid()]) -> pid(). - -master_prepare(Ref, QName, Log, SPids) -> - MPid = self(), - spawn_link(fun () -> - ?store_proc_name(QName), - syncer(Ref, Log, MPid, SPids) - end). - --spec master_go(pid(), reference(), log_fun(), - rabbit_mirror_queue_master:stats_fun(), - rabbit_mirror_queue_master:stats_fun(), - non_neg_integer(), - non_neg_integer(), - bq(), bqs()) -> - {'already_synced', bqs()} | {'ok', bqs()} | - {'cancelled', bqs()} | - {'shutdown', any(), bqs()} | - {'sync_died', any(), bqs()}. - -master_go(Syncer, Ref, Log, HandleInfo, EmitStats, SyncBatchSize, SyncThroughput, BQ, BQS) -> - Args = {Syncer, Ref, Log, HandleInfo, EmitStats, rabbit_misc:get_parent()}, - receive - {'EXIT', Syncer, normal} -> {already_synced, BQS}; - {'EXIT', Syncer, Reason} -> {sync_died, Reason, BQS}; - {ready, Syncer} -> EmitStats({syncing, 0}), - master_batch_go0(Args, SyncBatchSize, SyncThroughput, - BQ, BQS) - end. - -master_batch_go0(Args, BatchSize, SyncThroughput, BQ, BQS) -> - FoldFun = - fun (Msg, MsgProps, Unacked, Acc) -> - Acc1 = append_to_acc(Msg, MsgProps, Unacked, Acc), - case maybe_master_batch_send(Acc1, BatchSize) of - true -> master_batch_send(Args, Acc1); - false -> {cont, Acc1} - end - end, - FoldAcc = {[], 0, {0, erlang:monotonic_time(), SyncThroughput}, {0, BQ:depth(BQS)}, erlang:monotonic_time()}, - bq_fold(FoldFun, FoldAcc, Args, BQ, BQS). - -master_batch_send({Syncer, Ref, Log, HandleInfo, EmitStats, Parent}, - {Batch, I, {TotalBytes, LastCheck, SyncThroughput}, {Curr, Len}, Last}) -> - T = maybe_emit_stats(Last, I, EmitStats, Log), - HandleInfo({syncing, I}), - handle_set_maximum_since_use(), - SyncMsg = {msgs, Ref, lists:reverse(Batch)}, - NewAcc = {[], I + length(Batch), {TotalBytes, LastCheck, SyncThroughput}, {Curr, Len}, T}, - master_send_receive(SyncMsg, NewAcc, Syncer, Ref, Parent). - -%% Either send messages when we reach the last one in the queue or -%% whenever we have accumulated BatchSize messages. -maybe_master_batch_send({_, _, _, {Len, Len}, _}, _BatchSize) -> - true; -maybe_master_batch_send({_, _, _, {Curr, _Len}, _}, BatchSize) - when Curr rem BatchSize =:= 0 -> - true; -maybe_master_batch_send({_, _, {TotalBytes, _, SyncThroughput}, {_Curr, _Len}, _}, _BatchSize) - when TotalBytes > SyncThroughput -> - true; -maybe_master_batch_send(_Acc, _BatchSize) -> - false. - -bq_fold(FoldFun, FoldAcc, Args, BQ, BQS) -> - case BQ:fold(FoldFun, FoldAcc, BQS) of - {{shutdown, Reason}, BQS1} -> {shutdown, Reason, BQS1}; - {{sync_died, Reason}, BQS1} -> {sync_died, Reason, BQS1}; - {_, BQS1} -> master_done(Args, BQS1) - end. - -append_to_acc(Msg, MsgProps, Unacked, {Batch, I, {_, _, 0}, {Curr, Len}, T}) -> - {[{Msg, MsgProps, Unacked} | Batch], I, {0, 0, 0}, {Curr + 1, Len}, T}; -append_to_acc(Msg, MsgProps, Unacked, {Batch, I, {TotalBytes, LastCheck, SyncThroughput}, {Curr, Len}, T}) -> - {_, MsgSize} = mc:size(Msg), - {[{Msg, MsgProps, Unacked} | Batch], I, {TotalBytes + MsgSize, LastCheck, SyncThroughput}, {Curr + 1, Len}, T}. - -master_send_receive(SyncMsg, NewAcc, Syncer, Ref, Parent) -> - receive - {'$gen_call', From, - cancel_sync_mirrors} -> stop_syncer(Syncer, {cancel, Ref}), - gen_server2:reply(From, ok), - {stop, cancelled}; - {next, Ref} -> Syncer ! SyncMsg, - {Msgs, I , {TotalBytes, LastCheck, SyncThroughput}, {Curr, Len}, T} = NewAcc, - {NewTotalBytes, NewLastCheck} = maybe_throttle_sync_throughput(TotalBytes, LastCheck, SyncThroughput), - {cont, {Msgs, I, {NewTotalBytes, NewLastCheck, SyncThroughput}, {Curr, Len}, T}}; - {'EXIT', Parent, Reason} -> {stop, {shutdown, Reason}}; - {'EXIT', Syncer, Reason} -> {stop, {sync_died, Reason}} - end. - -maybe_throttle_sync_throughput(_ , _, 0) -> - {0, erlang:monotonic_time()}; -maybe_throttle_sync_throughput(TotalBytes, LastCheck, SyncThroughput) -> - Interval = erlang:convert_time_unit(erlang:monotonic_time() - LastCheck, native, milli_seconds), - case Interval > ?SYNC_THROUGHPUT_EVAL_INTERVAL_MILLIS of - true -> maybe_pause_sync(TotalBytes, Interval, SyncThroughput), - {0, erlang:monotonic_time()}; %% reset TotalBytes counter and LastCheck.; - false -> {TotalBytes, LastCheck} - end. - -maybe_pause_sync(TotalBytes, Interval, SyncThroughput) -> - Delta = get_time_diff(TotalBytes, Interval, SyncThroughput), - pause_queue_sync(Delta). - -pause_queue_sync(0) -> - rabbit_log_mirroring:debug("Sync throughput is ok."); -pause_queue_sync(Delta) -> - rabbit_log_mirroring:debug("Sync throughput exceeds threshold. Pause queue sync for ~tp ms", [Delta]), - timer:sleep(Delta). - -%% Sync throughput computation: -%% - Total bytes have been sent since last check: TotalBytes -%% - Used/Elapsed time since last check: Interval (in milliseconds) -%% - Effective/Used throughput in bytes/s: TotalBytes/Interval * 1000. -%% - When UsedThroughput > SyncThroughput -> we need to slow down to compensate over-used rate. -%% The amount of time to pause queue sync is the different between time needed to broadcast TotalBytes at max throughput -%% and the elapsed time (Interval). -get_time_diff(TotalBytes, Interval, SyncThroughput) -> - rabbit_log_mirroring:debug("Total ~tp bytes has been sent over last ~tp ms. Effective sync througput: ~tp", [TotalBytes, Interval, round(TotalBytes * 1000 / Interval)]), - max(round(TotalBytes/SyncThroughput * 1000 - Interval), 0). - -master_done({Syncer, Ref, _Log, _HandleInfo, _EmitStats, Parent}, BQS) -> - receive - {'$gen_call', From, - cancel_sync_mirrors} -> - stop_syncer(Syncer, {cancel, Ref}), - gen_server2:reply(From, ok), - {cancelled, BQS}; - {cancelled, Ref} -> - {cancelled, BQS}; - {next, Ref} -> - stop_syncer(Syncer, {done, Ref}), - {ok, BQS}; - {'EXIT', Parent, Reason} -> - {shutdown, Reason, BQS}; - {'EXIT', Syncer, Reason} -> - {sync_died, Reason, BQS} - end. - -stop_syncer(Syncer, Msg) -> - unlink(Syncer), - Syncer ! Msg, - receive {'EXIT', Syncer, _} -> ok - after 0 -> ok - end. - -maybe_emit_stats(Last, I, EmitStats, Log) -> - Interval = erlang:convert_time_unit( - erlang:monotonic_time() - Last, native, micro_seconds), - case Interval > ?SYNC_PROGRESS_INTERVAL of - true -> EmitStats({syncing, I}), - Log("~tp messages", [I]), - erlang:monotonic_time(); - false -> Last - end. - -handle_set_maximum_since_use() -> - receive - {'$gen_cast', {set_maximum_since_use, Age}} -> - ok = file_handle_cache:set_maximum_since_use(Age) - after 0 -> - ok - end. - -%% Master -%% --------------------------------------------------------------------------- -%% Syncer - -syncer(Ref, Log, MPid, SPids) -> - [erlang:monitor(process, SPid) || SPid <- SPids], - %% We wait for a reply from the mirrors so that we know they are in - %% a receive block and will thus receive messages we send to them - %% *without* those messages ending up in their gen_server2 pqueue. - case await_slaves(Ref, SPids) of - [] -> Log("all mirrors already synced", []); - SPids1 -> MPid ! {ready, self()}, - Log("mirrors ~tp to sync", [[node(SPid) || SPid <- SPids1]]), - syncer_check_resources(Ref, MPid, SPids1) - end. - -await_slaves(Ref, SPids) -> - [SPid || SPid <- SPids, - rabbit_mnesia:on_running_node(SPid) andalso %% [0] - receive - {sync_ready, Ref, SPid} -> true; - {sync_deny, Ref, SPid} -> false; - {'DOWN', _, process, SPid, _} -> false - end]. -%% [0] This check is in case there's been a partition which has then -%% healed in between the master retrieving the mirror pids from Mnesia -%% and sending 'sync_start' over GM. If so there might be mirrors on the -%% other side of the partition which we can monitor (since they have -%% rejoined the distributed system with us) but which did not get the -%% 'sync_start' and so will not reply. We need to act as though they are -%% down. - -syncer_check_resources(Ref, MPid, SPids) -> - _ = rabbit_alarm:register(self(), {?MODULE, conserve_resources, []}), - %% Before we ask the master node to send the first batch of messages - %% over here, we check if one node is already short on memory. If - %% that's the case, we wait for the alarm to be cleared before - %% starting the syncer loop. - AlarmedNodes = lists:any( - fun - ({{resource_limit, memory, _}, _}) -> true; - ({_, _}) -> false - end, rabbit_alarm:get_alarms()), - if - not AlarmedNodes -> - MPid ! {next, Ref}, - syncer_loop(Ref, MPid, SPids); - true -> - case wait_for_resources(Ref, SPids) of - cancel -> MPid ! {cancelled, Ref}; - SPids1 -> MPid ! {next, Ref}, - syncer_loop(Ref, MPid, SPids1) - end - end. - -syncer_loop(Ref, MPid, SPids) -> - receive - {conserve_resources, memory, true} -> - case wait_for_resources(Ref, SPids) of - cancel -> MPid ! {cancelled, Ref}; - SPids1 -> syncer_loop(Ref, MPid, SPids1) - end; - {conserve_resources, _, _} -> - %% Ignore other alerts. - syncer_loop(Ref, MPid, SPids); - {msgs, Ref, Msgs} -> - SPids1 = wait_for_credit(SPids), - case SPids1 of - [] -> - % Die silently because there are no mirrors left. - ok; - _ -> - _ = broadcast(SPids1, {sync_msgs, Ref, Msgs}), - MPid ! {next, Ref}, - syncer_loop(Ref, MPid, SPids1) - end; - {cancel, Ref} -> - %% We don't tell the mirrors we will die - so when we do - %% they interpret that as a failure, which is what we - %% want. - ok; - {done, Ref} -> - [SPid ! {sync_complete, Ref} || SPid <- SPids] - end. - -broadcast(SPids, Msg) -> - [begin - credit_flow:send(SPid), - SPid ! Msg - end || SPid <- SPids]. - --spec conserve_resources(pid(), - rabbit_alarm:resource_alarm_source(), - rabbit_alarm:resource_alert()) -> ok. -conserve_resources(Pid, Source, {_, Conserve, _}) -> - Pid ! {conserve_resources, Source, Conserve}, - ok. - -wait_for_credit(SPids) -> - case credit_flow:blocked() of - true -> receive - {bump_credit, Msg} -> - credit_flow:handle_bump_msg(Msg), - wait_for_credit(SPids); - {'DOWN', _, process, SPid, _} -> - credit_flow:peer_down(SPid), - wait_for_credit(lists:delete(SPid, SPids)) - end; - false -> SPids - end. - -wait_for_resources(Ref, SPids) -> - erlang:garbage_collect(), - receive - {conserve_resources, memory, false} -> - SPids; - {conserve_resources, _, _} -> - %% Ignore other alerts. - wait_for_resources(Ref, SPids); - {cancel, Ref} -> - %% We don't tell the mirrors we will die - so when we do - %% they interpret that as a failure, which is what we - %% want. - cancel; - {'DOWN', _, process, SPid, _} -> - credit_flow:peer_down(SPid), - SPids1 = wait_for_credit(lists:delete(SPid, SPids)), - wait_for_resources(Ref, SPids1) - end. - -%% Syncer -%% --------------------------------------------------------------------------- -%% Slave - --spec slave(non_neg_integer(), reference(), timer:tref(), pid(), - bq(), bqs(), fun((bq(), bqs()) -> {timer:tref(), bqs()})) -> - 'denied' | - {'ok' | 'failed', slave_sync_state()} | - {'stop', any(), slave_sync_state()}. - -slave(0, Ref, _TRef, Syncer, _BQ, _BQS, _UpdateRamDuration) -> - Syncer ! {sync_deny, Ref, self()}, - denied; - -slave(_DD, Ref, TRef, Syncer, BQ, BQS, UpdateRamDuration) -> - MRef = erlang:monitor(process, Syncer), - Syncer ! {sync_ready, Ref, self()}, - {_MsgCount, BQS1} = BQ:purge(BQ:purge_acks(BQS)), - slave_sync_loop({Ref, MRef, Syncer, BQ, UpdateRamDuration, - rabbit_misc:get_parent()}, {[], TRef, BQS1}). - -slave_sync_loop(Args = {Ref, MRef, Syncer, BQ, UpdateRamDuration, Parent}, - State = {MA, TRef, BQS}) -> - receive - {'DOWN', MRef, process, Syncer, _Reason} -> - %% If the master dies half way we are not in the usual - %% half-synced state (with messages nearer the tail of the - %% queue); instead we have ones nearer the head. If we then - %% sync with a newly promoted master, or even just receive - %% messages from it, we have a hole in the middle. So the - %% only thing to do here is purge. - {_MsgCount, BQS1} = BQ:purge(BQ:purge_acks(BQS)), - credit_flow:peer_down(Syncer), - {failed, {[], TRef, BQS1}}; - {bump_credit, Msg} -> - credit_flow:handle_bump_msg(Msg), - slave_sync_loop(Args, State); - {sync_complete, Ref} -> - erlang:demonitor(MRef, [flush]), - credit_flow:peer_down(Syncer), - {ok, State}; - {'$gen_cast', {set_maximum_since_use, Age}} -> - ok = file_handle_cache:set_maximum_since_use(Age), - slave_sync_loop(Args, State); - {'$gen_cast', {set_ram_duration_target, Duration}} -> - BQS1 = BQ:set_ram_duration_target(Duration, BQS), - slave_sync_loop(Args, {MA, TRef, BQS1}); - {'$gen_cast', {run_backing_queue, Mod, Fun}} -> - BQS1 = BQ:invoke(Mod, Fun, BQS), - slave_sync_loop(Args, {MA, TRef, BQS1}); - update_ram_duration -> - {TRef1, BQS1} = UpdateRamDuration(BQ, BQS), - slave_sync_loop(Args, {MA, TRef1, BQS1}); - {sync_msgs, Ref, Batch} -> - credit_flow:ack(Syncer), - {MA1, BQS1} = process_batch(Batch, MA, BQ, BQS), - slave_sync_loop(Args, {MA1, TRef, BQS1}); - {'EXIT', Parent, Reason} -> - {stop, Reason, State}; - %% If the master throws an exception - {'$gen_cast', {gm, {delete_and_terminate, Reason}}} -> - BQ:delete_and_terminate(Reason, BQS), - {stop, Reason, {[], TRef, undefined}} - end. - -%% We are partitioning messages by the Unacked element in the tuple. -%% when unacked = true, then it's a publish_delivered message, -%% otherwise it's a publish message. -%% -%% Note that we can't first partition the batch and then publish each -%% part, since that would result in re-ordering messages, which we -%% don't want to do. -process_batch([], MA, _BQ, BQS) -> - {MA, BQS}; -process_batch(Batch, MA, BQ, BQS) -> - {_Msg, _MsgProps, Unacked} = hd(Batch), - process_batch(Batch, Unacked, [], MA, BQ, BQS). - -process_batch([{Msg, Props, true = Unacked} | Rest], true = Unacked, - Acc, MA, BQ, BQS) -> - %% publish_delivered messages don't need the IsDelivered flag, - %% therefore we just add {Msg, Props} to the accumulator. - process_batch(Rest, Unacked, [{Msg, props(Props)} | Acc], - MA, BQ, BQS); -process_batch([{Msg, Props, false = Unacked} | Rest], false = Unacked, - Acc, MA, BQ, BQS) -> - %% publish messages needs the IsDelivered flag which is set to true - %% here. - process_batch(Rest, Unacked, [{Msg, props(Props), true} | Acc], - MA, BQ, BQS); -process_batch(Batch, Unacked, Acc, MA, BQ, BQS) -> - {MA1, BQS1} = publish_batch(Unacked, lists:reverse(Acc), MA, BQ, BQS), - process_batch(Batch, MA1, BQ, BQS1). - -%% Unacked msgs are published via batch_publish. -publish_batch(false, Batch, MA, BQ, BQS) -> - batch_publish(Batch, MA, BQ, BQS); -%% Acked msgs are published via batch_publish_delivered. -publish_batch(true, Batch, MA, BQ, BQS) -> - batch_publish_delivered(Batch, MA, BQ, BQS). - - -batch_publish(Batch, MA, BQ, BQS) -> - BQS1 = BQ:batch_publish(Batch, none, noflow, BQS), - {MA, BQS1}. - -batch_publish_delivered(Batch, MA, BQ, BQS) -> - {AckTags, BQS1} = BQ:batch_publish_delivered(Batch, none, noflow, BQS), - MA1 = BQ:zip_msgs_and_acks(Batch, AckTags, MA, BQS1), - {MA1, BQS1}. - -props(Props) -> - Props#message_properties{needs_confirming = false}. diff --git a/deps/rabbit/src/rabbit_mnesia.erl b/deps/rabbit/src/rabbit_mnesia.erl index 1f37c3aa6215..0aa4ae5360b5 100644 --- a/deps/rabbit/src/rabbit_mnesia.erl +++ b/deps/rabbit/src/rabbit_mnesia.erl @@ -2,13 +2,11 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_mnesia). --include_lib("kernel/include/logger.hrl"). - -include_lib("rabbit_common/include/logging.hrl"). -export([%% Main interface @@ -17,12 +15,13 @@ join_cluster/2, reset/0, force_reset/0, - update_cluster_nodes/1, change_cluster_node_type/1, forget_cluster_node/2, force_load_next_boot/0, %% Various queries to get the status of the db + %% %% FIXME: Comment below not true anymore. + %% status/0, is_running/0, is_clustered/0, @@ -35,12 +34,15 @@ dir/0, cluster_status_from_mnesia/0, - %% Operations on the db and utils, mainly used in `rabbit_mnesia_rename' and `rabbit' + %% Operations on the db and utils, mainly used in `rabbit' and Mnesia-era modules + %% (some of which may now be gone) init_db_unchecked/2, copy_db/1, check_mnesia_consistency/1, check_cluster_consistency/0, ensure_mnesia_dir/0, + ensure_mnesia_running/0, + ensure_node_type_is_permitted/1, %% Hooks used in `rabbit_node_monitor' on_node_up/1, @@ -49,7 +51,12 @@ %% Helpers for diagnostics commands schema_info/1, - reset_gracefully/0 + start_mnesia/1, + stop_mnesia/0, + + reset_gracefully/0, + + e/1 ]). %% Mnesia queries @@ -68,6 +75,11 @@ %% Used internally in `rabbit_db_cluster'. -export([members/0]). +%% Used internally in `rabbit_khepri'. +-export([mnesia_and_msg_store_files/0]). + +-export([check_reset_gracefully/0]). + -deprecated({on_running_node, 1, "Use rabbit_process:on_running_node/1 instead"}). -deprecated({is_process_alive, 1, @@ -100,13 +112,13 @@ init() -> ensure_mnesia_running(), ensure_mnesia_dir(), + %% Peer discovery may have been a no-op if it decided that all other nodes + %% should join this one. Therefore, we need to look at if this node is + %% still virgin and finish our init of Mnesia accordingly. In particular, + %% this second part creates all our Mnesia tables. case is_virgin_node() of - true -> - rabbit_log:info("Node database directory at ~ts is empty. " - "Assuming we need to join an existing cluster or initialise from scratch...", - [dir()]), - rabbit_peer_discovery:maybe_create_cluster( - fun create_cluster_callback/2); + true -> + init_db_and_upgrade([node()], disc, true, _Retry = true); false -> NodeType = node_type(), case is_node_type_permitted(NodeType) of @@ -127,23 +139,6 @@ init() -> ok = rabbit_node_monitor:global_sync(), ok. -create_cluster_callback(none, NodeType) -> - DiscNodes = [node()], - NodeType1 = case is_node_type_permitted(NodeType) of - false -> disc; - true -> NodeType - end, - init_db_and_upgrade(DiscNodes, NodeType1, true, _Retry = true), - ok; -create_cluster_callback(RemoteNode, NodeType) -> - {ok, {_, DiscNodes, _}} = discover_cluster0(RemoteNode), - NodeType1 = case is_node_type_permitted(NodeType) of - false -> disc; - true -> NodeType - end, - init_db_and_upgrade(DiscNodes, NodeType1, true, _Retry = true), - ok. - %% Make the node join a cluster. The node will be reset automatically %% before we actually cluster it. The nodes provided will be used to %% find out about the nodes in the cluster. @@ -163,7 +158,6 @@ create_cluster_callback(RemoteNode, NodeType) -> -> {ok, [node()]} | {ok, already_member} | {error, {inconsistent_cluster, string()}}. can_join_cluster(DiscoveryNode) -> - ensure_mnesia_not_running(), ensure_mnesia_dir(), case is_only_clustered_disc_node() of true -> e(clustering_only_disc_node); @@ -211,6 +205,7 @@ join_cluster(ClusterNodes, NodeType) when is_list(ClusterNodes) -> join_cluster(DiscoveryNode, NodeType) when is_atom(DiscoveryNode) -> %% Code to remain compatible with `change_cluster_node_type/1' and older %% CLI. + ensure_mnesia_not_running(), case can_join_cluster(DiscoveryNode) of {ok, ClusterNodes} when is_list(ClusterNodes) -> ok = reset_gracefully(), @@ -230,7 +225,6 @@ join_cluster(DiscoveryNode, NodeType) when is_atom(DiscoveryNode) -> reset() -> ensure_mnesia_not_running(), - rabbit_log:info("Resetting Rabbit", []), reset_gracefully(). -spec force_reset() -> 'ok'. @@ -247,14 +241,17 @@ reset_gracefully() -> %% Force=true here so that reset still works when clustered with a %% node which is down. init_db_with_mnesia(AllNodes, node_type(), false, false, _Retry = false), - case is_only_clustered_disc_node() of - true -> e(resetting_only_disc_node); - false -> ok - end, + check_reset_gracefully(), leave_cluster(), rabbit_misc:ensure_ok(mnesia:delete_schema([node()]), cannot_delete_schema), wipe(). +check_reset_gracefully() -> + case is_only_clustered_disc_node() of + true -> e(resetting_only_disc_node); + false -> ok + end. + wipe() -> %% We need to make sure that we don't end up in a distributed %% Erlang system with nodes while not being in an Mnesia cluster @@ -285,27 +282,6 @@ change_cluster_node_type(Type) -> ok = reset(), ok = join_cluster(Node, Type). --spec update_cluster_nodes(node()) -> 'ok'. - -update_cluster_nodes(DiscoveryNode) -> - ensure_mnesia_not_running(), - ensure_mnesia_dir(), - Status = {AllNodes, _, _} = discover_cluster([DiscoveryNode]), - case rabbit_nodes:me_in_nodes(AllNodes) of - true -> - %% As in `check_consistency/0', we can safely delete the - %% schema here, since it'll be replicated from the other - %% nodes - _ = mnesia:delete_schema([node()]), - rabbit_node_monitor:write_cluster_status(Status), - rabbit_log:info("Updating cluster nodes from ~tp", - [DiscoveryNode]), - init_db_with_mnesia(AllNodes, node_type(), true, true, _Retry = false); - false -> - e(inconsistent_cluster) - end, - ok. - %% We proceed like this: try to remove the node locally. If the node %% is offline, we remove the node if: %% * This node is a disc node @@ -317,9 +293,6 @@ update_cluster_nodes(DiscoveryNode) -> -spec forget_cluster_node(node(), boolean()) -> 'ok'. forget_cluster_node(Node, RemoveWhenOffline) -> - forget_cluster_node(Node, RemoveWhenOffline, true). - -forget_cluster_node(Node, RemoveWhenOffline, EmitNodeDeletedEvent) -> case lists:member(Node, cluster_nodes(all)) of true -> ok; false -> e(not_a_cluster_node) @@ -331,9 +304,6 @@ forget_cluster_node(Node, RemoveWhenOffline, EmitNodeDeletedEvent) -> {false, true} -> rabbit_log:info( "Removing node ~tp from cluster", [Node]), case remove_node_if_mnesia_running(Node) of - ok when EmitNodeDeletedEvent -> - rabbit_event:notify(node_deleted, [{node, Node}]), - ok; ok -> ok; {error, _} = Err -> throw(Err) end @@ -357,7 +327,7 @@ remove_node_offline_node(Node) -> %% We skip the 'node_deleted' event because the %% application is stopped and thus, rabbit_event is not %% enabled. - forget_cluster_node(Node, false, false), + forget_cluster_node(Node, false), force_load_next_boot() after stop_mnesia() @@ -484,7 +454,7 @@ cluster_status(WhichNodes) -> end. members() -> - case rabbit_mnesia:is_running() andalso rabbit_table:is_present() of + case is_running() andalso rabbit_table:is_present() of true -> %% If Mnesia is running locally and some tables exist, we can know %% the database was initialized and we can query the list of @@ -841,12 +811,8 @@ execute_mnesia_transaction(TxFun) -> Res = mnesia:sync_transaction(TxFun), DiskLogAfter = mnesia_dumper:get_log_writes(), case DiskLogAfter == DiskLogBefore of - true -> file_handle_cache_stats:update( - mnesia_ram_tx), - Res; - false -> file_handle_cache_stats:update( - mnesia_disk_tx), - {sync, Res} + true -> Res; + false -> {sync, Res} end; true -> mnesia:sync_transaction(TxFun) end @@ -893,6 +859,9 @@ discover_cluster0(Node) -> %% We only care about disc nodes since ram nodes are supposed to catch %% up only create_schema() -> + %% Assert we are not supposed to use Khepri. + false = rabbit_khepri:is_enabled(), + stop_mnesia(), rabbit_log:debug("Will bootstrap a schema database..."), rabbit_misc:ensure_ok(mnesia:create_schema([node()]), cannot_create_schema), @@ -917,8 +886,8 @@ remove_node_if_mnesia_running(Node) -> %% change being propagated to all nodes case mnesia:del_table_copy(schema, Node) of {atomic, ok} -> - rabbit_amqqueue:forget_all_durable(Node), rabbit_node_monitor:notify_left_cluster(Node), + rabbit_amqqueue:forget_all_durable(Node), ok; {aborted, Reason} -> {error, {failed_to_remove_node, Node, Reason}} @@ -1046,11 +1015,14 @@ with_running_or_clean_mnesia(Fun) -> %% exception of certain files and directories, which can be there very early %% on node boot. is_virgin_node() -> + mnesia_and_msg_store_files() =:= []. + +mnesia_and_msg_store_files() -> case rabbit_file:list_dir(dir()) of {error, enoent} -> - true; + []; {ok, []} -> - true; + []; {ok, List0} -> IgnoredFiles0 = [rabbit_node_monitor:cluster_status_filename(), @@ -1058,14 +1030,16 @@ is_virgin_node() -> rabbit_node_monitor:coordination_filename(), rabbit_node_monitor:stream_filename(), rabbit_node_monitor:default_quorum_filename(), + rabbit_node_monitor:classic_filename(), rabbit_node_monitor:quorum_filename(), - rabbit_feature_flags:enabled_feature_flags_list_file()], + rabbit_feature_flags:enabled_feature_flags_list_file(), + rabbit_khepri:dir()], IgnoredFiles = [filename:basename(File) || File <- IgnoredFiles0], rabbit_log:debug("Files and directories found in node's data directory: ~ts, of them to be ignored: ~ts", [string:join(lists:usort(List0), ", "), string:join(lists:usort(IgnoredFiles), ", ")]), List = List0 -- IgnoredFiles, rabbit_log:debug("Files and directories found in node's data directory sans ignored ones: ~ts", [string:join(lists:usort(List), ", ")]), - List =:= [] + List end. is_only_clustered_disc_node() -> @@ -1081,16 +1055,14 @@ e(Tag) -> throw({error, {Tag, error_description(Tag)}}). error_description(clustering_only_disc_node) -> "You cannot cluster a node if it is the only disc node in its existing " - " cluster. If new nodes joined while this node was offline, use " - "'update_cluster_nodes' to add them manually."; + " cluster."; error_description(resetting_only_disc_node) -> "You cannot reset a node when it is the only disc node in a cluster. " "Please convert another node of the cluster to a disc node first."; error_description(not_clustered) -> "Non-clustered nodes can only be disc nodes."; error_description(no_online_cluster_nodes) -> - "Could not find any online cluster nodes. If the cluster has changed, " - "you can use the 'update_cluster_nodes' command."; + "Could not find any online cluster nodes."; error_description(inconsistent_cluster) -> "The nodes provided do not have this node as part of the cluster."; error_description(not_a_cluster_node) -> @@ -1109,6 +1081,6 @@ error_description(no_running_cluster_nodes) -> "You cannot leave a cluster if no online nodes are present.". format_inconsistent_cluster_message(Thinker, Dissident) -> - rabbit_misc:format("Node ~tp thinks it's clustered " + rabbit_misc:format("Mnesia: node ~tp thinks it's clustered " "with node ~tp, but ~tp disagrees", [Thinker, Dissident, Dissident]). diff --git a/deps/rabbit/src/rabbit_mnesia_rename.erl b/deps/rabbit/src/rabbit_mnesia_rename.erl deleted file mode 100644 index da9e5453b695..000000000000 --- a/deps/rabbit/src/rabbit_mnesia_rename.erl +++ /dev/null @@ -1,301 +0,0 @@ -%% This Source Code Form is subject to the terms of the Mozilla Public -%% License, v. 2.0. If a copy of the MPL was not distributed with this -%% file, You can obtain one at https://mozilla.org/MPL/2.0/. -%% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. -%% - --module(rabbit_mnesia_rename). --include_lib("rabbit_common/include/rabbit.hrl"). - --export([rename/2]). --export([maybe_finish/0, maybe_finish/1]). - --define(CONVERT_TABLES, [schema, rabbit_durable_queue]). - -%% Supports renaming the nodes in the Mnesia database. In order to do -%% this, we take a backup of the database, traverse the backup -%% changing node names and pids as we go, then restore it. -%% -%% That's enough for a standalone node, for clusters the story is more -%% complex. We can take pairs of nodes From and To, but backing up and -%% restoring the database changes schema cookies, so if we just do -%% this on all nodes the cluster will refuse to re-form with -%% "Incompatible schema cookies.". Therefore we do something similar -%% to what we do for upgrades - the first node in the cluster to -%% restart becomes the authority, and other nodes wipe their own -%% Mnesia state and rejoin. They also need to tell Mnesia the old node -%% is not coming back. -%% -%% If we are renaming nodes one at a time then the running cluster -%% might not be aware that a rename has taken place, so after we wipe -%% and rejoin we then update any tables (in practice just -%% rabbit_durable_queue) which should be aware that we have changed. - -%%---------------------------------------------------------------------------- - --spec rename(node(), [{node(), node()}]) -> 'ok'. - -rename(Node, NodeMapList) -> - try - %% Check everything is correct and figure out what we are - %% changing from and to. - {FromNode, ToNode, NodeMap} = prepare(Node, NodeMapList), - - %% We backup and restore Mnesia even if other nodes are - %% running at the time, and defer the final decision about - %% whether to use our mutated copy or rejoin the cluster until - %% we restart. That means we might be mutating our copy of the - %% database while the cluster is running. *Do not* contact the - %% cluster while this is happening, we are likely to get - %% confused. - application:set_env(kernel, dist_auto_connect, never), - - %% Take a copy we can restore from if we abandon the - %% rename. We don't restore from the "backup" since restoring - %% that changes schema cookies and might stop us rejoining the - %% cluster. - ok = rabbit_mnesia:copy_db(mnesia_copy_dir()), - - %% And make the actual changes - become(FromNode), - take_backup(before_backup_name()), - _ = convert_backup(NodeMap, before_backup_name(), after_backup_name()), - ok = rabbit_file:write_term_file(rename_config_name(), - [{FromNode, ToNode}]), - _ = convert_config_files(NodeMap), - become(ToNode), - restore_backup(after_backup_name()), - ok - after - stop_mnesia() - end. - -prepare(Node, NodeMapList) -> - %% If we have a previous rename and haven't started since, give up. - case rabbit_file:is_dir(dir()) of - true -> exit({rename_in_progress, - "Restart node under old name to roll back"}); - false -> ok = rabbit_file:ensure_dir(mnesia_copy_dir()) - end, - - %% Check we don't have two nodes mapped to the same node - {FromNodes, ToNodes} = lists:unzip(NodeMapList), - case length(FromNodes) - length(lists:usort(ToNodes)) of - 0 -> ok; - _ -> exit({duplicate_node, ToNodes}) - end, - - %% Figure out which node we are before and after the change - FromNode = case [From || {From, To} <- NodeMapList, - To =:= Node] of - [N] -> N; - [] -> Node - end, - NodeMap = dict:from_list(NodeMapList), - ToNode = case dict:find(FromNode, NodeMap) of - {ok, N2} -> N2; - error -> FromNode - end, - - %% Check that we are in the cluster, all old nodes are in the - %% cluster, and no new nodes are. - Nodes = rabbit_nodes:list_members(), - case {FromNodes -- Nodes, ToNodes -- (ToNodes -- Nodes), - lists:member(Node, Nodes ++ ToNodes)} of - {[], [], true} -> ok; - {[], [], false} -> exit({i_am_not_involved, Node}); - {F, [], _} -> exit({nodes_not_in_cluster, F}); - {_, T, _} -> exit({nodes_already_in_cluster, T}) - end, - {FromNode, ToNode, NodeMap}. - -take_backup(Backup) -> - start_mnesia(), - %% We backup only local tables: in particular, this excludes the - %% connection tracking tables which have no local replica. - LocalTables = mnesia:system_info(local_tables), - {ok, Name, _Nodes} = mnesia:activate_checkpoint([ - {max, LocalTables} - ]), - ok = mnesia:backup_checkpoint(Name, Backup), - stop_mnesia(). - -restore_backup(Backup) -> - ok = mnesia:install_fallback(Backup, [{scope, local}]), - start_mnesia(), - stop_mnesia(), - rabbit_mnesia:force_load_next_boot(). - --spec maybe_finish() -> ok. - -maybe_finish() -> - AllNodes = rabbit_nodes:list_members(), - maybe_finish(AllNodes). - --spec maybe_finish([node()]) -> 'ok'. - -maybe_finish(AllNodes) -> - case rabbit_file:read_term_file(rename_config_name()) of - {ok, [{FromNode, ToNode}]} -> finish(FromNode, ToNode, AllNodes); - _ -> ok - end. - -finish(FromNode, ToNode, AllNodes) -> - case node() of - ToNode -> - case rabbit_nodes:filter_running(AllNodes) of - [] -> finish_primary(FromNode, ToNode); - _ -> finish_secondary(FromNode, ToNode, AllNodes) - end; - FromNode -> - rabbit_log:info( - "Abandoning rename from ~ts to ~ts since we are still ~ts", - [FromNode, ToNode, FromNode]), - _ = [{ok, _} = file:copy(backup_of_conf(F), F) || F <- config_files()], - ok = rabbit_file:recursive_delete([rabbit_mnesia:dir()]), - ok = rabbit_file:recursive_copy( - mnesia_copy_dir(), rabbit_mnesia:dir()), - delete_rename_files(); - _ -> - %% Boot will almost certainly fail but we might as - %% well just log this - rabbit_log:info( - "Rename attempted from ~ts to ~ts but we are ~ts - ignoring.", - [FromNode, ToNode, node()]) - end. - -finish_primary(FromNode, ToNode) -> - rabbit_log:info("Restarting as primary after rename from ~ts to ~ts", - [FromNode, ToNode]), - delete_rename_files(), - ok. - -finish_secondary(FromNode, ToNode, AllNodes) -> - rabbit_log:info("Restarting as secondary after rename from ~ts to ~ts", - [FromNode, ToNode]), - %% Renaming a node was partially handled by `rabbit_upgrade', the old - %% upgrade mechanism used before we introduced feature flags. The - %% following lines until `init_db_unchecked()' included were part of - %% `rabbit_upgrade:secondary_upgrade(AllNodes)'. - NodeType = node_type_legacy(), - rabbit_misc:ensure_ok(mnesia:delete_schema([node()]), - cannot_delete_schema), - rabbit_misc:ensure_ok(mnesia:start(), cannot_start_mnesia), - ok = rabbit_mnesia:init_db_unchecked(AllNodes, NodeType), - rename_in_running_mnesia(FromNode, ToNode), - delete_rename_files(), - ok. - -node_type_legacy() -> - %% This is pretty ugly but we can't start Mnesia and ask it (will - %% hang), we can't look at the config file (may not include us - %% even if we're a disc node). We also can't use - %% rabbit_mnesia:node_type/0 because that will give false - %% positives on Rabbit up to 2.5.1. - case filelib:is_regular(filename:join(rabbit_mnesia:dir(), "rabbit_durable_exchange.DCD")) of - true -> disc; - false -> ram - end. - -dir() -> rabbit_mnesia:dir() ++ "-rename". -before_backup_name() -> dir() ++ "/backup-before". -after_backup_name() -> dir() ++ "/backup-after". -rename_config_name() -> dir() ++ "/pending.config". -mnesia_copy_dir() -> dir() ++ "/mnesia-copy". - -delete_rename_files() -> ok = rabbit_file:recursive_delete([dir()]). - -start_mnesia() -> rabbit_misc:ensure_ok(mnesia:start(), cannot_start_mnesia), - rabbit_table:force_load(), - rabbit_table:wait_for_replicated(_Retry = false). -stop_mnesia() -> stopped = mnesia:stop(). - -convert_backup(NodeMap, FromBackup, ToBackup) -> - mnesia:traverse_backup( - FromBackup, ToBackup, - fun - (Row, Acc) -> - case lists:member(element(1, Row), ?CONVERT_TABLES) of - true -> {[update_term(NodeMap, Row)], Acc}; - false -> {[Row], Acc} - end - end, switched). - -config_files() -> - [rabbit_node_monitor:running_nodes_filename(), - rabbit_node_monitor:cluster_status_filename()]. - -backup_of_conf(Path) -> - filename:join([dir(), filename:basename(Path)]). - -convert_config_files(NodeMap) -> - [convert_config_file(NodeMap, Path) || Path <- config_files()]. - -convert_config_file(NodeMap, Path) -> - {ok, Term} = rabbit_file:read_term_file(Path), - {ok, _} = file:copy(Path, backup_of_conf(Path)), - ok = rabbit_file:write_term_file(Path, update_term(NodeMap, Term)). - -lookup_node(OldNode, NodeMap) -> - case dict:find(OldNode, NodeMap) of - {ok, NewNode} -> NewNode; - error -> OldNode - end. - -mini_map(FromNode, ToNode) -> dict:from_list([{FromNode, ToNode}]). - -update_term(NodeMap, L) when is_list(L) -> - [update_term(NodeMap, I) || I <- L]; -update_term(NodeMap, T) when is_tuple(T) -> - list_to_tuple(update_term(NodeMap, tuple_to_list(T))); -update_term(NodeMap, Node) when is_atom(Node) -> - lookup_node(Node, NodeMap); -update_term(NodeMap, Pid) when is_pid(Pid) -> - rabbit_misc:pid_change_node(Pid, lookup_node(node(Pid), NodeMap)); -update_term(_NodeMap, Term) -> - Term. - -rename_in_running_mnesia(FromNode, ToNode) -> - All = rabbit_nodes:list_members(), - Running = rabbit_mnesia:cluster_nodes(running), - case {lists:member(FromNode, Running), lists:member(ToNode, All)} of - {false, true} -> ok; - {true, _} -> exit({old_node_running, FromNode}); - {_, false} -> exit({new_node_not_in_cluster, ToNode}) - end, - {atomic, ok} = mnesia:del_table_copy(schema, FromNode), - Map = mini_map(FromNode, ToNode), - {atomic, _} = transform_table(rabbit_durable_queue, Map), - ok. - -transform_table(Table, Map) -> - mnesia:sync_transaction( - fun () -> - _ = mnesia:lock({table, Table}, write), - transform_table(Table, Map, mnesia:first(Table)) - end). - -transform_table(_Table, _Map, '$end_of_table') -> - ok; -transform_table(Table, Map, Key) -> - [Term] = mnesia:read(Table, Key, write), - ok = mnesia:write(Table, update_term(Map, Term), write), - transform_table(Table, Map, mnesia:next(Table, Key)). - -become(BecomeNode) -> - error_logger:tty(false), - case net_adm:ping(BecomeNode) of - pong -> exit({node_running, BecomeNode}); - pang -> ok = net_kernel:stop(), - io:format(" * Impersonating node: ~ts...", [BecomeNode]), - {ok, _} = start_distribution(BecomeNode), - io:format(" done~n", []), - Dir = mnesia:system_info(directory), - io:format(" * Mnesia directory : ~ts~n", [Dir]) - end. - -start_distribution(Name) -> - rabbit_nodes:ensure_epmd(), - NameType = rabbit_nodes_common:name_type(Name), - net_kernel:start([Name, NameType]). diff --git a/deps/rabbit/src/rabbit_msg_file.erl b/deps/rabbit/src/rabbit_msg_file.erl deleted file mode 100644 index c8223a2d5e37..000000000000 --- a/deps/rabbit/src/rabbit_msg_file.erl +++ /dev/null @@ -1,81 +0,0 @@ -%% This Source Code Form is subject to the terms of the Mozilla Public -%% License, v. 2.0. If a copy of the MPL was not distributed with this -%% file, You can obtain one at https://mozilla.org/MPL/2.0/. -%% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. -%% - --module(rabbit_msg_file). - --export([scan/4]). - -%%---------------------------------------------------------------------------- - --include_lib("rabbit_common/include/rabbit_msg_store.hrl"). - --define(INTEGER_SIZE_BYTES, 8). --define(INTEGER_SIZE_BITS, (8 * ?INTEGER_SIZE_BYTES)). --define(WRITE_OK_SIZE_BITS, 8). --define(WRITE_OK_MARKER, 255). --define(FILE_PACKING_ADJUSTMENT, (1 + ?INTEGER_SIZE_BYTES)). --define(MSG_ID_SIZE_BYTES, 16). --define(MSG_ID_SIZE_BITS, (8 * ?MSG_ID_SIZE_BYTES)). --define(SCAN_BLOCK_SIZE, 4194304). %% 4MB - -%%---------------------------------------------------------------------------- - --type io_device() :: any(). --type position() :: non_neg_integer(). --type msg_size() :: non_neg_integer(). --type file_size() :: non_neg_integer(). --type message_accumulator(A) :: - fun (({rabbit_types:msg_id(), msg_size(), position(), binary()}, A) -> - A). - -%%---------------------------------------------------------------------------- - --spec scan(io_device(), file_size(), message_accumulator(A), A) -> - {'ok', A, position()}. - -scan(FileHdl, FileSize, Fun, Acc) when FileSize >= 0 -> - scan(FileHdl, FileSize, <<>>, 0, 0, Fun, Acc). - -scan(_FileHdl, FileSize, _Data, FileSize, ScanOffset, _Fun, Acc) -> - {ok, Acc, ScanOffset}; -scan(FileHdl, FileSize, Data, ReadOffset, ScanOffset, Fun, Acc) -> - Read = lists:min([?SCAN_BLOCK_SIZE, (FileSize - ReadOffset)]), - case file_handle_cache:read(FileHdl, Read) of - {ok, Data1} -> - {Data2, Acc1, ScanOffset1} = - scanner(<>, ScanOffset, Fun, Acc), - ReadOffset1 = ReadOffset + size(Data1), - scan(FileHdl, FileSize, Data2, ReadOffset1, ScanOffset1, Fun, Acc1); - _KO -> - {ok, Acc, ScanOffset} - end. - -scanner(<<>>, Offset, _Fun, Acc) -> - {<<>>, Acc, Offset}; -scanner(<<0:?INTEGER_SIZE_BITS, _Rest/binary>>, Offset, _Fun, Acc) -> - {<<>>, Acc, Offset}; %% Nothing to do other than stop. -scanner(<>, Offset, Fun, Acc) -> - TotalSize = Size + ?FILE_PACKING_ADJUSTMENT, - case WriteMarker of - ?WRITE_OK_MARKER -> - %% Here we take option 5 from - %% https://www.erlang.org/cgi-bin/ezmlm-cgi?2:mss:1569 in - %% which we read the MsgId as a number, and then convert it - %% back to a binary in order to work around bugs in - %% Erlang's GC. - <> = - <>, - <> = - <>, - scanner(Rest, Offset + TotalSize, Fun, - Fun({MsgId, TotalSize, Offset, Msg}, Acc)); - _ -> - scanner(Rest, Offset + TotalSize, Fun, Acc) - end; -scanner(Data, Offset, _Fun, Acc) -> - {Data, Acc, Offset}. diff --git a/deps/rabbit/src/rabbit_msg_record.erl b/deps/rabbit/src/rabbit_msg_record.erl deleted file mode 100644 index 568973034178..000000000000 --- a/deps/rabbit/src/rabbit_msg_record.erl +++ /dev/null @@ -1,527 +0,0 @@ --module(rabbit_msg_record). - --export([ - init/1, - to_iodata/1, - from_amqp091/2, - to_amqp091/1, - add_message_annotations/2, - message_annotation/2, - message_annotation/3, - from_091/2, - to_091/2 - ]). - --include_lib("rabbit_common/include/rabbit_framing.hrl"). --include_lib("amqp10_common/include/amqp10_framing.hrl"). - --type maybe(T) :: T | undefined. --type amqp10_data() :: #'v1_0.data'{} | - [#'v1_0.amqp_sequence'{} | #'v1_0.data'{}] | - #'v1_0.amqp_value'{}. --record(msg, - { - % header :: maybe(#'v1_0.header'{}), - % delivery_annotations :: maybe(#'v1_0.delivery_annotations'{}), - message_annotations :: maybe(#'v1_0.message_annotations'{}) | iodata(), - properties :: maybe(#'v1_0.properties'{}) | iodata(), - application_properties :: maybe(#'v1_0.application_properties'{}) | iodata(), - data :: maybe(amqp10_data()) | iodata() - % footer :: maybe(#'v1_0.footer'{}) - }). - -%% holds static or rarely changing fields --record(cfg, {}). --record(?MODULE, {cfg :: #cfg{}, - msg :: #msg{}}). - --opaque state() :: #?MODULE{}. - --export_type([ - state/0 - ]). - --define(AMQP10_TYPE, <<"amqp-1.0">>). --define(AMQP10_PROPERTIES_HEADER, <<"x-amqp-1.0-properties">>). --define(AMQP10_APP_PROPERTIES_HEADER, <<"x-amqp-1.0-app-properties">>). --define(AMQP10_MESSAGE_ANNOTATIONS_HEADER, <<"x-amqp-1.0-message-annotations">>). - -%% this module acts as a wrapper / converter for the internal binary storage format -%% (AMQP 1.0) and any format it needs to be converted to / from. -%% Efficiency is key. No unnecessary allocations or work should be done until it -%% is absolutely needed - -%% init from an AMQP 1.0 encoded binary --spec init(binary()) -> state(). -init(Bin) when is_binary(Bin) -> - %% TODO: delay parsing until needed - {MA, P, AP, D0} = decode(amqp10_framing:decode_bin(Bin), - {undefined, undefined, undefined, undefined}), - - D1 = case D0 of - Sections when is_list(D0) -> - lists:reverse(Sections); - _ -> - D0 - end, - - #?MODULE{cfg = #cfg{}, - msg = #msg{properties = P, - application_properties = AP, - message_annotations = MA, - data = D1}}. - -decode([], Acc) -> - Acc; -decode([#'v1_0.message_annotations'{} = MA | Rem], {_, P, AP, D}) -> - decode(Rem, {MA, P, AP, D}); -decode([#'v1_0.properties'{} = P | Rem], {MA, _, AP, D}) -> - decode(Rem, {MA, P, AP, D}); -decode([#'v1_0.application_properties'{} = AP | Rem], {MA, P, _, D}) -> - decode(Rem, {MA, P, AP, D}); -decode([#'v1_0.amqp_value'{} = D | Rem], {MA, P, AP, _}) -> - decode(Rem, {MA, P, AP, D}); -decode([#'v1_0.data'{} = D | Rem], {MA, P, AP, undefined}) -> - decode(Rem, {MA, P, AP, D}); -decode([#'v1_0.data'{} = D | Rem], {MA, P, AP, B}) when is_list(B) -> - decode(Rem, {MA, P, AP, [D | B]}); -decode([#'v1_0.data'{} = D | Rem], {MA, P, AP, B}) -> - decode(Rem, {MA, P, AP, [D, B]}); -decode([#'v1_0.amqp_sequence'{} = D | Rem], {MA, P, AP, undefined}) -> - decode(Rem, {MA, P, AP, [D]}); -decode([#'v1_0.amqp_sequence'{} = D | Rem], {MA, P, AP, B}) when is_list(B) -> - decode(Rem, {MA, P, AP, [D | B]}). - - -amqp10_properties_empty(#'v1_0.properties'{message_id = undefined, - user_id = undefined, - to = undefined, - reply_to = undefined, - correlation_id = undefined, - content_type = undefined, - content_encoding = undefined, - creation_time = undefined}) -> - true; -amqp10_properties_empty(_) -> - false. - -%% to realise the final binary data representation --spec to_iodata(state()) -> iodata(). -to_iodata(#?MODULE{msg = #msg{properties = P, - application_properties = AP, - message_annotations = MA, - data = Data}}) -> - [ - case MA of - #'v1_0.message_annotations'{content = []} -> - <<>>; - #'v1_0.message_annotations'{} -> - amqp10_framing:encode_bin(MA); - MsgAnnotBin -> - MsgAnnotBin - end, - case P of - #'v1_0.properties'{} -> - case amqp10_properties_empty(P) of - true -> <<>>; - false -> - amqp10_framing:encode_bin(P) - end; - PropsBin -> - PropsBin - end, - case AP of - #'v1_0.application_properties'{content = []} -> - <<>>; - #'v1_0.application_properties'{} -> - amqp10_framing:encode_bin(AP); - AppPropsBin -> - AppPropsBin - end, - case Data of - DataBin when is_binary(Data) orelse is_list(Data) -> - DataBin; - _ -> - amqp10_framing:encode_bin(Data) - end - ]. - -%% TODO: refine type spec here --spec add_message_annotations(#{binary() => {atom(), term()}}, state()) -> - state(). -add_message_annotations(Anns, - #?MODULE{msg = - #msg{message_annotations = undefined} = Msg} = State) -> - add_message_annotations(Anns, - State#?MODULE{msg = Msg#msg{message_annotations = - #'v1_0.message_annotations'{content = []}}}); -add_message_annotations(Anns, - #?MODULE{msg = - #msg{message_annotations = - #'v1_0.message_annotations'{content = C}} = Msg} = State) -> - Content = maps:fold( - fun (K, {T, V}, Acc) -> - map_add(symbol, K, T, V, Acc) - end, - C, - Anns), - - State#?MODULE{msg = - Msg#msg{message_annotations = - #'v1_0.message_annotations'{content = Content}}}; -add_message_annotations(Anns, - #?MODULE{msg = - #msg{message_annotations = MABin} = Msg} = State0) -> - [MA] = amqp10_framing:decode_bin(iolist_to_binary(MABin)), - State1 = State0#?MODULE{msg = - Msg#msg{message_annotations = MA}}, - add_message_annotations(Anns, State1). - -%% TODO: refine --type amqp10_term() :: {atom(), term()}. - --spec message_annotation(binary(), state()) -> undefined | amqp10_term(). -message_annotation(Key, State) -> - message_annotation(Key, State, undefined). - --spec message_annotation(binary(), state(), undefined | amqp10_term()) -> - undefined | amqp10_term(). -message_annotation(_Key, #?MODULE{msg = #msg{message_annotations = undefined}}, - Default) -> - Default; -message_annotation(Key, - #?MODULE{msg = - #msg{message_annotations = - #'v1_0.message_annotations'{content = Content}}}, - Default) - when is_binary(Key) -> - case lists:search(fun ({{symbol, K}, _}) -> K == Key end, Content) of - {value, {_K, V}} -> - V; - false -> - Default - end. - - -%% take a binary AMQP 1.0 input function, -%% parses it and returns the current parse state -%% this is the input function from storage and from, e.g. socket input --spec from_amqp091(#'P_basic'{}, iodata()) -> state(). -from_amqp091(#'P_basic'{type = T} = PB, Data) -> - MA = from_amqp091_to_amqp10_message_annotations(PB), - P = from_amqp091_to_amqp10_properties(PB), - AP = from_amqp091_to_amqp10_app_properties(PB), - - D = case T of - ?AMQP10_TYPE -> - %% the body is already AMQP 1.0 binary content, so leaving it as-is - Data; - _ -> - #'v1_0.data'{content = Data} - end, - - #?MODULE{cfg = #cfg{}, - msg = #msg{properties = P, - application_properties = AP, - message_annotations = MA, - data = D}}. - -from_amqp091_to_amqp10_properties(#'P_basic'{headers = Headers} = P) when is_list(Headers) -> - case proplists:lookup(?AMQP10_PROPERTIES_HEADER, Headers) of - none -> - convert_amqp091_to_amqp10_properties(P); - {_, _, PropsBin} -> - PropsBin - end; -from_amqp091_to_amqp10_properties(P) -> - convert_amqp091_to_amqp10_properties(P). - -convert_amqp091_to_amqp10_properties(#'P_basic'{message_id = MsgId, - user_id = UserId, - reply_to = ReplyTo, - correlation_id = CorrId, - content_type = ContentType, - content_encoding = ContentEncoding, - timestamp = Timestamp - }) -> - ConvertedTs = case Timestamp of - undefined -> - undefined; - _ -> - Timestamp * 1000 - end, - #'v1_0.properties'{message_id = wrap(utf8, MsgId), - user_id = wrap(binary, UserId), - to = undefined, - reply_to = wrap(utf8, ReplyTo), - correlation_id = wrap(utf8, CorrId), - content_type = wrap(symbol, ContentType), - content_encoding = wrap(symbol, ContentEncoding), - creation_time = wrap(timestamp, ConvertedTs)}. - -from_amqp091_to_amqp10_app_properties(#'P_basic'{headers = Headers} = P) - when is_list(Headers) -> - case proplists:lookup(?AMQP10_APP_PROPERTIES_HEADER, Headers) of - none -> - convert_amqp091_to_amqp10_app_properties(P); - {_, _, AppPropsBin} -> - AppPropsBin - end; -from_amqp091_to_amqp10_app_properties(P) -> - convert_amqp091_to_amqp10_app_properties(P). - -convert_amqp091_to_amqp10_app_properties(#'P_basic'{headers = Headers, - type = Type, - app_id = AppId}) -> - APC0 = [{wrap(utf8, K), from_091(T, V)} || {K, T, V} - <- case Headers of - undefined -> []; - _ -> Headers - end, not unsupported_header_value_type(T), - not filtered_header(K)], - - APC1 = case Type of - ?AMQP10_TYPE -> - %% no need to modify the application properties for the type - %% this info will be restored on decoding if necessary - APC0; - _ -> - map_add(utf8, <<"x-basic-type">>, utf8, Type, APC0) - end, - - %% properties that do not map directly to AMQP 1.0 properties are stored - %% in application properties - APC2 = map_add(utf8, <<"x-basic-app-id">>, utf8, AppId, APC1), - #'v1_0.application_properties'{content = APC2}. - -from_amqp091_to_amqp10_message_annotations(#'P_basic'{headers = Headers} = P) when is_list(Headers) -> - case proplists:lookup(?AMQP10_MESSAGE_ANNOTATIONS_HEADER, Headers) of - none -> - convert_amqp091_to_amqp10_message_annotations(P); - {_, _, MessageAnnotationsBin} -> - MessageAnnotationsBin - end; -from_amqp091_to_amqp10_message_annotations(P) -> - convert_amqp091_to_amqp10_message_annotations(P). - -convert_amqp091_to_amqp10_message_annotations(#'P_basic'{priority = Priority, - delivery_mode = DelMode, - expiration = Expiration}) -> - MAC = map_add(symbol, <<"x-basic-priority">>, ubyte, Priority, - map_add(symbol, <<"x-basic-delivery-mode">>, ubyte, DelMode, - map_add(symbol, <<"x-basic-expiration">>, utf8, Expiration, []))), - - #'v1_0.message_annotations'{content = MAC}. - -map_add(_T, _Key, _Type, undefined, Acc) -> - Acc; -map_add(KeyType, Key, Type, Value, Acc) -> - [{wrap(KeyType, Key), wrap(Type, Value)} | Acc]. - --spec to_amqp091(state()) -> {#'P_basic'{}, iodata()}. -to_amqp091(#?MODULE{msg = #msg{properties = P, - application_properties = APR, - message_annotations = MAR, - data = Data}}) -> - - %% anything else than a single data section is expected to be AMQP 1.0 binary content - %% enforcing this convention - {Payload, IsAmqp10} = case Data of - undefined -> - %% not an expected value, - %% but handling it with an empty binary anyway - {<<>>, false}; - #'v1_0.data'{content = C} -> - {C, false}; - Sections when is_list(Data)-> - B = [amqp10_framing:encode_bin(S) || S <- Sections], - {iolist_to_binary(B), - true}; - V -> - {iolist_to_binary(amqp10_framing:encode_bin(V)), true} - end, - - #'v1_0.properties'{message_id = MsgId, - user_id = UserId, - reply_to = ReplyTo0, - correlation_id = CorrId, - content_type = ContentType, - content_encoding = ContentEncoding, - creation_time = Timestamp} = case P of - undefined -> - #'v1_0.properties'{}; - _ -> - P - end, - - AP0 = case APR of - #'v1_0.application_properties'{content = AC} -> AC; - _ -> [] - end, - MA0 = case MAR of - #'v1_0.message_annotations'{content = MC} -> MC; - _ -> [] - end, - - {Type, AP1} = case {amqp10_map_get(utf8(<<"x-basic-type">>), AP0), IsAmqp10} of - {{undefined, M}, true} -> - {?AMQP10_TYPE, M}; - {{T, M}, _} -> - {T, M} - end, - {AppId, AP} = amqp10_map_get(utf8(<<"x-basic-app-id">>), AP1), - - {Priority, MA1} = amqp10_map_get(symbol(<<"x-basic-priority">>), MA0), - {DelMode, MA2} = amqp10_map_get(symbol(<<"x-basic-delivery-mode">>), MA1), - {Expiration, _MA} = amqp10_map_get(symbol(<<"x-basic-expiration">>), MA2), - - Headers0 = [to_091(unwrap(K), V) || {K, V} <- AP], - {Headers1, MsgId091} = message_id(MsgId, <<"x-message-id-type">>, Headers0), - {Headers, CorrId091} = message_id(CorrId, <<"x-correlation-id-type">>, Headers1), - - BP = #'P_basic'{message_id = MsgId091, - delivery_mode = DelMode, - expiration = Expiration, - user_id = unwrap(UserId), - headers = case Headers of - [] -> undefined; - _ -> Headers - end, - reply_to = unwrap(ReplyTo0), - type = Type, - app_id = AppId, - priority = Priority, - correlation_id = CorrId091, - content_type = unwrap(ContentType), - content_encoding = unwrap(ContentEncoding), - timestamp = case unwrap(Timestamp) of - undefined -> - undefined; - Ts -> - Ts div 1000 - end - }, - {BP, Payload}. - -%%% Internal - -amqp10_map_get(K, AP0) -> - case lists:keytake(K, 1, AP0) of - false -> - {undefined, AP0}; - {value, {_, V}, AP} -> - {unwrap(V), AP} - end. - -wrap(_Type, undefined) -> - undefined; -wrap(Type, Val) -> - {Type, Val}. - -unwrap(undefined) -> - undefined; -unwrap({_Type, V}) -> - V. - -% symbol_for(#'v1_0.properties'{}) -> -% {symbol, <<"amqp:properties:list">>}; - -% number_for(#'v1_0.properties'{}) -> -% {ulong, 115}; -% encode(Frame = #'v1_0.properties'{}) -> -% amqp10_framing:encode_described(list, 115, Frame); - -% encode_described(list, CodeNumber, Frame) -> -% {described, {ulong, CodeNumber}, -% {list, lists:map(fun encode/1, tl(tuple_to_list(Frame)))}}; - -% -spec generate(amqp10_type()) -> iolist(). -% generate({described, Descriptor, Value}) -> -% DescBin = generate(Descriptor), -% ValueBin = generate(Value), -% [ ?DESCRIBED_BIN, DescBin, ValueBin ]. - -to_091(Key, {utf8, V}) when is_binary(V) -> {Key, longstr, V}; -to_091(Key, {long, V}) -> {Key, long, V}; -to_091(Key, {byte, V}) -> {Key, byte, V}; -to_091(Key, {ubyte, V}) -> {Key, unsignedbyte, V}; -to_091(Key, {short, V}) -> {Key, short, V}; -to_091(Key, {ushort, V}) -> {Key, unsignedshort, V}; -to_091(Key, {uint, V}) -> {Key, unsignedint, V}; -to_091(Key, {int, V}) -> {Key, signedint, V}; -to_091(Key, {double, V}) -> {Key, double, V}; -to_091(Key, {float, V}) -> {Key, float, V}; -%% NB: header values can never be shortstr! -to_091(Key, {timestamp, V}) -> {Key, timestamp, V div 1000}; -to_091(Key, {binary, V}) -> {Key, binary, V}; -to_091(Key, {boolean, V}) -> {Key, bool, V}; -to_091(Key, true) -> {Key, bool, true}; -to_091(Key, false) -> {Key, bool, false}; -%% TODO -to_091(Key, undefined) -> {Key, void, undefined}; -to_091(Key, null) -> {Key, void, undefined}. - -from_091(longstr, V) when is_binary(V) -> {utf8, V}; -from_091(long, V) -> {long, V}; -from_091(unsignedbyte, V) -> {ubyte, V}; -from_091(short, V) -> {short, V}; -from_091(unsignedshort, V) -> {ushort, V}; -from_091(unsignedint, V) -> {uint, V}; -from_091(signedint, V) -> {int, V}; -from_091(double, V) -> {double, V}; -from_091(float, V) -> {float, V}; -from_091(bool, V) -> {boolean, V}; -from_091(binary, V) -> {binary, V}; -from_091(timestamp, V) -> {timestamp, V * 1000}; -from_091(byte, V) -> {byte, V}; -from_091(void, _V) -> null. - -utf8(T) -> {utf8, T}. -symbol(T) -> {symbol, T}. - -message_id({uuid, UUID}, HKey, H0) -> - H = [{HKey, longstr, <<"uuid">>} | H0], - {H, rabbit_data_coercion:to_binary(rabbit_guid:to_string(UUID))}; -message_id({ulong, N}, HKey, H0) -> - H = [{HKey, longstr, <<"ulong">>} | H0], - {H, erlang:integer_to_binary(N)}; -message_id({binary, B}, HKey, H0) -> - E = base64:encode(B), - case byte_size(E) > 256 of - true -> - K = binary:replace(HKey, <<"-type">>, <<>>), - {[{K, longstr, B} | H0], undefined}; - false -> - H = [{HKey, longstr, <<"binary">>} | H0], - {H, E} - end; -message_id({utf8, S}, HKey, H0) -> - case byte_size(S) > 256 of - true -> - K = binary:replace(HKey, <<"-type">>, <<>>), - {[{K, longstr, S} | H0], undefined}; - false -> - {H0, S} - end; -message_id(MsgId, _, H) -> - {H, unwrap(MsgId)}. - -unsupported_header_value_type(array) -> - true; -unsupported_header_value_type(table) -> - true; -unsupported_header_value_type(_) -> - false. - -filtered_header(?AMQP10_PROPERTIES_HEADER) -> - true; -filtered_header(?AMQP10_APP_PROPERTIES_HEADER) -> - true; -filtered_header(?AMQP10_MESSAGE_ANNOTATIONS_HEADER) -> - true; -filtered_header(_) -> - false. - --ifdef(TEST). --include_lib("eunit/include/eunit.hrl"). --endif. diff --git a/deps/rabbit/src/rabbit_msg_store.erl b/deps/rabbit/src/rabbit_msg_store.erl index a94e7173f81f..c5b02f6eb9c4 100644 --- a/deps/rabbit/src/rabbit_msg_store.erl +++ b/deps/rabbit/src/rabbit_msg_store.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_msg_store). @@ -24,23 +24,21 @@ %%---------------------------------------------------------------------------- --include_lib("rabbit_common/include/rabbit_msg_store.hrl"). +-include_lib("rabbit_common/include/rabbit.hrl"). -%% We flush to disk when the write buffer gets above the max size, -%% or at an interval to make sure we don't keep the data in memory -%% too long. Confirms are sent after the data is flushed to disk. --define(HANDLE_CACHE_BUFFER_SIZE, 1048576). %% 1MB. --define(SYNC_INTERVAL, 200). %% Milliseconds. +-type(msg() :: any()). + +-record(msg_location, {msg_id, ref_count, file, offset, total_size}). + +%% We flush to disk at an interval to make sure we don't keep +%% the data in memory too long. Confirms are sent after the +%% data is flushed to disk. +-define(SYNC_INTERVAL, 200). %% Milliseconds. -define(CLEAN_FILENAME, "clean.dot"). -define(FILE_SUMMARY_FILENAME, "file_summary.ets"). --define(BINARY_MODE, [raw, binary]). --define(READ_MODE, [read]). --define(WRITE_MODE, [write]). - -define(FILE_EXTENSION, ".rdq"). --define(FILE_EXTENSION_TMP, ".rdt"). %% We keep track of flying messages for writes and removes. The idea is that %% when a remove comes in before we could process the write, we skip the @@ -73,11 +71,8 @@ { %% store directory dir :: file:filename(), - %% the module for index ops, - %% rabbit_msg_store_ets_index by default - index_module, - %% where are messages? - index_state, + %% index table + index_ets, %% current file name as number current_file, %% current file handle since the last fsync? @@ -119,8 +114,7 @@ { server, client_ref, reader, - index_state, - index_module, + index_ets, dir, file_handles_ets, cur_file_cache_ets, @@ -133,8 +127,7 @@ -record(gc_state, { dir, - index_module, - index_state, + index_ets, file_summary_ets, file_handles_ets, msg_store @@ -151,8 +144,7 @@ -export_type([gc_state/0, file_num/0]). -type gc_state() :: #gc_state { dir :: file:filename(), - index_module :: atom(), - index_state :: any(), + index_ets :: ets:tid(), file_summary_ets :: ets:tid(), file_handles_ets :: ets:tid(), msg_store :: server() @@ -165,8 +157,7 @@ server :: server(), client_ref :: client_ref(), reader :: undefined | {non_neg_integer(), file:fd()}, - index_state :: any(), - index_module :: atom(), + index_ets :: any(), %% Stored as binary() as opposed to file:filename() to save memory. dir :: binary(), file_handles_ets :: ets:tid(), @@ -410,7 +401,7 @@ successfully_recovered_state(Server) -> -spec client_init(server(), client_ref(), maybe_msg_id_fun()) -> client_msstate(). client_init(Server, Ref, MsgOnDiskFun) when is_pid(Server); is_atom(Server) -> - {IState, IModule, Dir, FileHandlesEts, CurFileCacheEts, FlyingEts} = + {IndexEts, Dir, FileHandlesEts, CurFileCacheEts, FlyingEts} = gen_server2:call( Server, {new_client_state, Ref, self(), MsgOnDiskFun}, infinity), @@ -419,8 +410,7 @@ client_init(Server, Ref, MsgOnDiskFun) when is_pid(Server); is_atom(Server) -> #client_msstate { server = Server, client_ref = Ref, reader = undefined, - index_state = IState, - index_module = IModule, + index_ets = IndexEts, dir = rabbit_file:filename_to_binary(Dir), file_handles_ets = FileHandlesEts, cur_file_cache_ets = CurFileCacheEts, @@ -471,14 +461,13 @@ write(MsgRef, MsgId, Msg, CState) -> client_write(MsgRef, MsgId, Msg, noflow, CS -spec read(rabbit_types:msg_id(), client_msstate()) -> {rabbit_types:ok(msg()) | 'not_found', client_msstate()}. -read(MsgId, - CState = #client_msstate { cur_file_cache_ets = CurFileCacheEts }) -> - file_handle_cache_stats:update(msg_store_read), +read(MsgId, CState = #client_msstate { index_ets = IndexEts, + cur_file_cache_ets = CurFileCacheEts }) -> %% Check the cur file cache case ets:lookup(CurFileCacheEts, MsgId) of [] -> %% @todo It's probably a bug if we don't get a positive ref count. - case index_lookup_positive_ref_count(MsgId, CState) of + case index_lookup_positive_ref_count(IndexEts, MsgId) of not_found -> {not_found, CState}; MsgLocation -> client_read3(MsgLocation, CState) end; @@ -489,13 +478,7 @@ read(MsgId, -spec read_many([rabbit_types:msg_id()], client_msstate()) -> {#{rabbit_types:msg_id() => msg()}, client_msstate()}. -%% We disable read_many when the index module is not ETS for the time being. -%% We can introduce the new index module callback as a breaking change in 4.0. -read_many(_, CState = #client_msstate{ index_module = IndexMod }) - when IndexMod =/= rabbit_msg_store_ets_index -> - {#{}, CState}; read_many(MsgIds, CState) -> - file_handle_cache_stats:inc(msg_store_read, length(MsgIds)), %% We receive MsgIds in rouhgly the younger->older order so %% we can look for messages in the cache directly. read_many_cache(MsgIds, CState, #{}). @@ -512,8 +495,8 @@ read_many_cache([], CState, Acc) -> {Acc, CState}. %% We will read from disk one file at a time in no particular order. -read_many_disk([MsgId|Tail], CState, Acc) -> - case index_lookup_positive_ref_count(MsgId, CState) of +read_many_disk([MsgId|Tail], CState = #client_msstate{ index_ets = IndexEts }, Acc) -> + case index_lookup_positive_ref_count(IndexEts, MsgId) of %% We ignore this message if it's not found and will try %% to read it individually later instead. We can't call %% the message store and expect good performance here. @@ -525,6 +508,7 @@ read_many_disk([], CState, Acc) -> {Acc, CState}. read_many_file2(MsgIds0, CState = #client_msstate{ dir = Dir, + index_ets = IndexEts, file_handles_ets = FileHandlesEts, reader = Reader0, client_ref = Ref }, Acc0, File) -> @@ -534,7 +518,7 @@ read_many_file2(MsgIds0, CState = #client_msstate{ dir = Dir, %% It's possible that we get no results here if compaction %% was in progress. That's OK: we will try again with those %% MsgIds to get them from the new file. - MsgLocations0 = index_select_from_file(MsgIds0, File, CState), + MsgLocations0 = index_select_from_file(IndexEts, MsgIds0, File), case MsgLocations0 of [] -> read_many_file3(MsgIds0, CState, Acc0, File); @@ -563,10 +547,6 @@ read_many_file2(MsgIds0, CState = #client_msstate{ dir = Dir, read_many_file3(MsgIds, CState#client_msstate{ reader = Reader }, Acc, File) end. -index_select_from_file(MsgIds, File, #client_msstate { index_module = Index, - index_state = State }) -> - Index:select_from_file(MsgIds, File, State). - consolidate_reads([#msg_location{offset=NextOffset, total_size=NextSize}|Locs], [{Offset, Size}|Acc]) when Offset + Size =:= NextOffset -> consolidate_reads(Locs, [{Offset, Size + NextSize}|Acc]); @@ -610,7 +590,6 @@ client_write(MsgRef, MsgId, Msg, Flow, CState = #client_msstate { flying_ets = FlyingEts, cur_file_cache_ets = CurFileCacheEts, client_ref = CRef }) -> - file_handle_cache_stats:update(msg_store_write), %% We are guaranteed that the insert will succeed. %% This is true even for queue crashes because CRef will change. true = ets:insert_new(FlyingEts, {{CRef, MsgRef}, ?FLYING_WRITE}), @@ -628,14 +607,15 @@ client_write(MsgRef, MsgId, Msg, Flow, %% And the file only gets deleted after all data was copied, index %% was updated and file handles got closed. client_read3(#msg_location { msg_id = MsgId, file = File }, - CState = #client_msstate { file_handles_ets = FileHandlesEts, + CState = #client_msstate { index_ets = IndexEts, + file_handles_ets = FileHandlesEts, client_ref = Ref }) -> %% We immediately mark the handle open so that we don't get the %% file truncated while we are reading from it. The file may still %% be truncated past that point but that's OK because we do a second %% index lookup to ensure that we get the updated message location. mark_handle_open(FileHandlesEts, File, Ref), - case index_lookup(MsgId, CState) of + case index_lookup(IndexEts, MsgId) of #msg_location { file = File, ref_count = RefCount } = MsgLocation when RefCount > 0 -> {Msg, CState1} = read_from_disk(MsgLocation, CState), mark_handle_closed(FileHandlesEts, File, Ref), @@ -720,9 +700,6 @@ init([VHost, Type, BaseDir, ClientRefs, StartupFunState]) -> Dir = filename:join(BaseDir, atom_to_list(Type)), Name = filename:join(filename:basename(BaseDir), atom_to_list(Type)), - {ok, IndexModule} = application:get_env(rabbit, msg_store_index_module), - rabbit_log:info("Message store ~tp: using ~tp to provide index", [Name, IndexModule]), - AttemptFileSummaryRecovery = case ClientRefs of %% Transient. @@ -738,8 +715,8 @@ init([VHost, Type, BaseDir, ClientRefs, StartupFunState]) -> %% we start recovering messages from the files on disk. {FileSummaryRecovered, FileSummaryEts} = recover_file_summary(AttemptFileSummaryRecovery, Dir), - {CleanShutdown, IndexState, ClientRefs1} = - recover_index_and_client_refs(IndexModule, FileSummaryRecovered, + {CleanShutdown, IndexEts, ClientRefs1} = + recover_index_and_client_refs(FileSummaryRecovered, ClientRefs, Dir, Name), Clients = maps:from_list( [{CRef, {undefined, undefined}} || @@ -766,8 +743,7 @@ init([VHost, Type, BaseDir, ClientRefs, StartupFunState]) -> {ok, GCPid} = rabbit_msg_store_gc:start_link( #gc_state { dir = Dir, - index_module = IndexModule, - index_state = IndexState, + index_ets = IndexEts, file_summary_ets = FileSummaryEts, file_handles_ets = FileHandlesEts, msg_store = self() @@ -777,8 +753,7 @@ init([VHost, Type, BaseDir, ClientRefs, StartupFunState]) -> ?CREDIT_DISC_BOUND), State = #msstate { dir = Dir, - index_module = IndexModule, - index_state = IndexState, + index_ets = IndexEts, current_file = 0, current_file_handle = undefined, current_file_offset = 0, @@ -825,7 +800,6 @@ prioritise_call(Msg, _From, _Len, _State) -> prioritise_cast(Msg, _Len, _State) -> case Msg of {compacted_file, _File} -> 8; - {set_maximum_since_use, _Age} -> 8; {client_dying, _Pid} -> 7; _ -> 0 end. @@ -841,15 +815,14 @@ handle_call(successfully_recovered_state, _From, State) -> handle_call({new_client_state, CRef, CPid, MsgOnDiskFun}, _From, State = #msstate { dir = Dir, - index_state = IndexState, - index_module = IndexModule, + index_ets = IndexEts, file_handles_ets = FileHandlesEts, cur_file_cache_ets = CurFileCacheEts, flying_ets = FlyingEts, clients = Clients }) -> Clients1 = maps:put(CRef, {CPid, MsgOnDiskFun}, Clients), erlang:monitor(process, CPid), - reply({IndexState, IndexModule, Dir, FileHandlesEts, + reply({IndexEts, Dir, FileHandlesEts, CurFileCacheEts, FlyingEts}, State #msstate { clients = Clients1 }); @@ -877,7 +850,8 @@ handle_cast({client_delete, CRef}, noreply(clear_client(CRef, State1)); handle_cast({write, CRef, MsgRef, MsgId, Flow}, - State = #msstate { cur_file_cache_ets = CurFileCacheEts, + State = #msstate { index_ets = IndexEts, + cur_file_cache_ets = CurFileCacheEts, clients = Clients, credit_disc_bound = CreditDiscBound }) -> case Flow of @@ -905,9 +879,9 @@ handle_cast({write, CRef, MsgRef, MsgId, Flow}, %% or the non-current files. If the message *is* in the %% current file then the cache entry will be removed by %% the normal logic for that in write_message/4 and - %% maybe_roll_to_new_file/2. - case index_lookup(MsgId, State) of - [#msg_location { file = File }] + %% flush_or_roll_to_new_file/2. + case index_lookup(IndexEts, MsgId) of + #msg_location { file = File } when File == State #msstate.current_file -> ok; _ -> @@ -942,7 +916,13 @@ handle_info(sync, State) -> handle_info(timeout, State) -> noreply(internal_sync(State)); -handle_info({timeout, TimerRef, {maybe_gc, Candidates}}, State = #msstate{ gc_check_timer = TimerRef }) -> +handle_info({timeout, TimerRef, {maybe_gc, Candidates0}}, + State = #msstate{ gc_candidates = NewCandidates, + gc_check_timer = TimerRef }) -> + %% We do not want to consider candidates for GC that had + %% a message removed since we sent that maybe_gc message. + %% In that case we simply defer the GC to the next maybe_gc. + Candidates = maps:without(maps:keys(NewCandidates), Candidates0), noreply(maybe_gc(Candidates, State)); %% @todo When a CQ crashes the message store does not remove @@ -962,8 +942,7 @@ handle_info({'DOWN', _MRef, process, Pid, _Reason}, State) -> handle_info({'EXIT', _Pid, Reason}, State) -> {stop, Reason, State}. -terminate(Reason, State = #msstate { index_state = IndexState, - index_module = IndexModule, +terminate(Reason, State = #msstate { index_ets = IndexEts, current_file_handle = CurHdl, gc_pid = GCPid, file_handles_ets = FileHandlesEts, @@ -998,9 +977,8 @@ terminate(Reason, State = #msstate { index_state = IndexState, end, [true = ets:delete(T) || T <- [FileSummaryEts, FileHandlesEts, CurFileCacheEts, FlyingEts]], - IndexModule:terminate(IndexState), - case store_recovery_terms([{client_refs, maps:keys(Clients)}, - {index_module, IndexModule}], Dir) of + index_terminate(IndexEts, Dir), + case store_recovery_terms([{client_refs, maps:keys(Clients)}], Dir) of ok -> rabbit_log:info("Message store for directory '~ts' is stopped", [Dir]), ok; @@ -1009,8 +987,7 @@ terminate(Reason, State = #msstate { index_state = IndexState, " for directory ~tp~nError: ~tp", [Dir, RTErr]) end, - State3 #msstate { index_state = undefined, - current_file_handle = undefined, + State3 #msstate { current_file_handle = undefined, current_file_offset = 0 }. code_change(_OldVsn, State, _Extra) -> @@ -1111,14 +1088,15 @@ write_action({false, not_found}, _MsgId, State) -> write_action({Mask, #msg_location { ref_count = 0, file = File, total_size = TotalSize }}, MsgId, State = #msstate { current_file = CurrentFile, + index_ets = IndexEts, file_summary_ets = FileSummaryEts }) -> case {Mask, ets:lookup(FileSummaryEts, File)} of %% Never increase the ref_count for a file that is about to get deleted. {_, [#file_summary{valid_total_size = 0}]} when File =/= CurrentFile -> - ok = index_delete(MsgId, State), + ok = index_delete(IndexEts, MsgId), {write, State}; {false, [#file_summary { locked = true }]} -> - ok = index_delete(MsgId, State), + ok = index_delete(IndexEts, MsgId), {write, State}; {false_if_increment, [#file_summary { locked = true }]} -> %% The msg for MsgId is older than the client death @@ -1127,13 +1105,13 @@ write_action({Mask, #msg_location { ref_count = 0, file = File, %% ignore this write. {ignore, File, State}; {_Mask, [#file_summary {}]} -> - ok = index_update_ref_count(MsgId, 1, State), + ok = index_update_ref_counter(IndexEts, MsgId, +1), %% Effectively set to 1. State1 = adjust_valid_total_size(File, TotalSize, State), {confirm, File, State1} end; -write_action({_Mask, #msg_location { ref_count = RefCount, file = File }}, - MsgId, State) -> - ok = index_update_ref_count(MsgId, RefCount + 1, State), +write_action({_Mask, #msg_location { file = File }}, + MsgId, State = #msstate{ index_ets = IndexEts }) -> + ok = index_update_ref_counter(IndexEts, MsgId, +1), %% We already know about it, just update counter. Only update %% field otherwise bad interaction with concurrent GC {confirm, File, State}. @@ -1160,7 +1138,7 @@ write_message(MsgId, Msg, CRef, end, CRef, State1) end. -remove_message(MsgId, CRef, State) -> +remove_message(MsgId, CRef, State = #msstate{ index_ets = IndexEts }) -> case should_mask_action(CRef, MsgId, State) of {true, _Location} -> State; @@ -1176,8 +1154,7 @@ remove_message(MsgId, CRef, State) -> when RefCount > 0 -> %% only update field, otherwise bad interaction with %% concurrent GC - Dec = fun () -> index_update_ref_count( - MsgId, RefCount - 1, State) end, + Dec = fun () -> index_update_ref_counter(IndexEts, MsgId, -1) end, case RefCount of %% don't remove from cur_file_cache_ets here because %% there may be further writes in the mailbox for the @@ -1208,28 +1185,127 @@ gc_candidate(File, State = #msstate{ gc_candidates = Candidates, gc_candidate(File, State = #msstate{ gc_candidates = Candidates }) -> State#msstate{ gc_candidates = Candidates#{ File => true }}. -write_message(MsgId, Msg, - State0 = #msstate { current_file_handle = CurHdl, - current_file = CurFile, - current_file_offset = CurOffset, - file_summary_ets = FileSummaryEts }) -> - {MaybeFlush, TotalSize} = writer_append(CurHdl, MsgId, Msg), - State = case MaybeFlush of - flush -> internal_sync(State0); - ok -> State0 - end, - ok = index_insert( +%% This value must be smaller enough than ?SCAN_BLOCK_SIZE +%% to ensure we only ever need 2 reads when scanning files. +%% Hence the choice of 4MB here and 4MiB there, the difference +%% in size being more than enough to ensure that property. +-define(LARGE_MESSAGE_THRESHOLD, 4000000). %% 4MB. + +write_message(MsgId, MsgBody, State) -> + MsgBodyBin = term_to_binary(MsgBody), + %% Large messages get written to their own files. + if + byte_size(MsgBodyBin) >= ?LARGE_MESSAGE_THRESHOLD -> + write_large_message(MsgId, MsgBodyBin, State); + true -> + write_small_message(MsgId, MsgBodyBin, State) + end. + +write_small_message(MsgId, MsgBodyBin, + State = #msstate { current_file_handle = CurHdl, + current_file = CurFile, + current_file_offset = CurOffset, + index_ets = IndexEts, + file_summary_ets = FileSummaryEts }) -> + {MaybeFlush, TotalSize} = writer_append(CurHdl, MsgId, MsgBodyBin), + ok = index_insert(IndexEts, #msg_location { msg_id = MsgId, ref_count = 1, file = CurFile, - offset = CurOffset, total_size = TotalSize }, State), + offset = CurOffset, total_size = TotalSize }), [_,_] = ets:update_counter(FileSummaryEts, CurFile, [{#file_summary.valid_total_size, TotalSize}, {#file_summary.file_size, TotalSize}]), - maybe_roll_to_new_file(CurOffset + TotalSize, + flush_or_roll_to_new_file(CurOffset + TotalSize, MaybeFlush, State #msstate { current_file_offset = CurOffset + TotalSize }). -contains_message(MsgId, From, State) -> - MsgLocation = index_lookup_positive_ref_count(MsgId, State), +flush_or_roll_to_new_file( + Offset, _MaybeFlush, + State = #msstate { dir = Dir, + current_file_handle = CurHdl, + current_file = CurFile, + file_summary_ets = FileSummaryEts, + cur_file_cache_ets = CurFileCacheEts, + file_size_limit = FileSizeLimit }) + when Offset >= FileSizeLimit -> + State1 = internal_sync(State), + ok = writer_close(CurHdl), + NextFile = CurFile + 1, + {ok, NextHdl} = writer_open(Dir, NextFile), + true = ets:insert_new(FileSummaryEts, #file_summary { + file = NextFile, + valid_total_size = 0, + file_size = 0, + locked = false }), + %% Delete messages from the cache that were written to disk. + true = ets:match_delete(CurFileCacheEts, {'_', '_', 0}), + State1 #msstate { current_file_handle = NextHdl, + current_file = NextFile, + current_file_offset = 0 }; +%% If we need to flush, do so here. +flush_or_roll_to_new_file(_, flush, State) -> + internal_sync(State); +flush_or_roll_to_new_file(_, _, State) -> + State. + +write_large_message(MsgId, MsgBodyBin, + State0 = #msstate { dir = Dir, + current_file_handle = CurHdl, + current_file = CurFile, + current_file_offset = CurOffset, + index_ets = IndexEts, + file_summary_ets = FileSummaryEts, + cur_file_cache_ets = CurFileCacheEts }) -> + {LargeMsgFile, LargeMsgHdl} = case CurOffset of + %% We haven't written in the file yet. Use it. + 0 -> + {CurFile, CurHdl}; + %% Flush the current file and close it. Open a new file. + _ -> + ok = writer_flush(CurHdl), + ok = writer_close(CurHdl), + LargeMsgFile0 = CurFile + 1, + {ok, LargeMsgHdl0} = writer_open(Dir, LargeMsgFile0), + {LargeMsgFile0, LargeMsgHdl0} + end, + %% Write the message directly and close the file. + TotalSize = writer_direct_write(LargeMsgHdl, MsgId, MsgBodyBin), + ok = writer_close(LargeMsgHdl), + %% Update ets with the new information. + ok = index_insert(IndexEts, + #msg_location { msg_id = MsgId, ref_count = 1, file = LargeMsgFile, + offset = 0, total_size = TotalSize }), + _ = case CurFile of + %% We didn't open a new file. We must update the existing value. + LargeMsgFile -> + [_,_] = ets:update_counter(FileSummaryEts, LargeMsgFile, + [{#file_summary.valid_total_size, TotalSize}, + {#file_summary.file_size, TotalSize}]); + %% We opened a new file. We can insert it all at once. + _ -> + true = ets:insert_new(FileSummaryEts, #file_summary { + file = LargeMsgFile, + valid_total_size = TotalSize, + file_size = TotalSize, + locked = false }) + end, + %% Roll over to the next file. + NextFile = LargeMsgFile + 1, + {ok, NextHdl} = writer_open(Dir, NextFile), + true = ets:insert_new(FileSummaryEts, #file_summary { + file = NextFile, + valid_total_size = 0, + file_size = 0, + locked = false }), + %% Delete messages from the cache that were written to disk. + true = ets:match_delete(CurFileCacheEts, {'_', '_', 0}), + %% Process confirms (this won't flush; we already did) and continue. + State = internal_sync(State0), + State #msstate { current_file_handle = NextHdl, + current_file = NextFile, + current_file_offset = 0 }. + +contains_message(MsgId, From, State = #msstate{ index_ets = IndexEts }) -> + MsgLocation = index_lookup_positive_ref_count(IndexEts, MsgId), gen_server2:reply(From, MsgLocation =/= not_found), State. @@ -1283,9 +1359,9 @@ record_pending_confirm(CRef, MsgId, State) -> %% rewrite the msg - rewriting it would make it younger than the death %% msg and thus should be ignored. Note that this (correctly) returns %% false when testing to remove the death msg itself. -should_mask_action(CRef, MsgId, - State = #msstate{dying_clients = DyingClients}) -> - case {maps:find(CRef, DyingClients), index_lookup(MsgId, State)} of +should_mask_action(CRef, MsgId, #msstate{ + index_ets = IndexEts, dying_clients = DyingClients}) -> + case {maps:find(CRef, DyingClients), index_lookup(IndexEts, MsgId)} of {error, Location} -> {false, Location}; {{ok, _}, not_found} -> @@ -1325,8 +1401,7 @@ writer_recover(Dir, Num, Offset) -> ok = file:truncate(Fd), {ok, #writer{fd = Fd, buffer = prim_buffer:new()}}. -writer_append(#writer{buffer = Buffer}, MsgId, MsgBody) -> - MsgBodyBin = term_to_binary(MsgBody), +writer_append(#writer{buffer = Buffer}, MsgId, MsgBodyBin) -> MsgBodyBinSize = byte_size(MsgBodyBin), EntrySize = MsgBodyBinSize + 16, %% Size of MsgId + MsgBodyBin. %% We send an iovec to the buffer instead of building a binary. @@ -1354,15 +1429,24 @@ writer_flush(#writer{fd = Fd, buffer = Buffer}) -> file:write(Fd, prim_buffer:read_iovec(Buffer, Size)) end. +%% For large messages we don't buffer anything. Large messages +%% are kept within their own files. +%% +%% This is basically the same as writer_append except no buffering. +writer_direct_write(#writer{fd = Fd}, MsgId, MsgBodyBin) -> + MsgBodyBinSize = byte_size(MsgBodyBin), + EntrySize = MsgBodyBinSize + 16, %% Size of MsgId + MsgBodyBin. + ok = file:write(Fd, [ + <>, + MsgId, + MsgBodyBin, + <<255>> %% OK marker. + ]), + EntrySize + 9. + writer_close(#writer{fd = Fd}) -> file:close(Fd). -open_file(File, Mode) -> - file_handle_cache:open_with_absolute_path( - File, ?BINARY_MODE ++ Mode, - [{write_buffer, ?HANDLE_CACHE_BUFFER_SIZE}, - {read_buffer, ?HANDLE_CACHE_BUFFER_SIZE}]). - mark_handle_open(FileHandlesEts, File, Ref) -> %% This is fine to fail (already exists). Note it could fail with %% the value being close, and not have it updated to open. @@ -1383,85 +1467,203 @@ list_sorted_filenames(Dir, Ext) -> filelib:wildcard("*" ++ Ext, Dir)). %%---------------------------------------------------------------------------- -%% index +%% file scanning +%%---------------------------------------------------------------------------- + +-define(SCAN_BLOCK_SIZE, 4194304). %% 4MB + +scan_file_for_valid_messages(Dir, FileName) -> + scan_file_for_valid_messages(form_filename(Dir, FileName)). + +scan_file_for_valid_messages(Path) -> + case file:open(Path, [read, binary, raw]) of + {ok, Fd} -> + {ok, FileSize} = file:position(Fd, eof), + {ok, _} = file:position(Fd, bof), + Messages = scan(<<>>, Fd, 0, FileSize, #{}, []), + ok = file:close(Fd), + case Messages of + [] -> + {ok, [], 0}; + [{_, TotalSize, Offset}|_] -> + {ok, Messages, Offset + TotalSize} + end; + {error, enoent} -> + {ok, [], 0}; + {error, Reason} -> + {error, {unable_to_scan_file, + filename:basename(Path), + Reason}} + end. + +scan(Buffer, Fd, Offset, FileSize, MsgIdsFound, Acc) -> + case file:read(Fd, ?SCAN_BLOCK_SIZE) of + eof -> + Acc; + {ok, Data0} -> + Data = case Buffer of + <<>> -> Data0; + _ -> <> + end, + scan_data(Data, Fd, Offset, FileSize, MsgIdsFound, Acc) + end. + +%% Message might have been found. +scan_data(<> = Data, + Fd, Offset, FileSize, MsgIdsFound, Acc) + when Size >= 16 -> + <> = MsgIdAndMsg, + case MsgIdsFound of + %% This MsgId was found already. This data is probably + %% a remnant from a previous compaction, but it might + %% simply be a coincidence. Try the next byte. + #{MsgIdInt := true} -> + <<_, Rest2/bits>> = Data, + scan_data(Rest2, Fd, Offset + 1, FileSize, MsgIdsFound, Acc); + %% Data looks to be a message. + _ -> + %% Avoid sub-binary construction. + MsgId = <>, + TotalSize = Size + 9, + scan_data(Rest, Fd, Offset + TotalSize, FileSize, + MsgIdsFound#{MsgIdInt => true}, + [{MsgId, TotalSize, Offset}|Acc]) + end; +%% This might be the start of a message. +scan_data(<> = Data, Fd, Offset, FileSize, MsgIdsFound, Acc) + when byte_size(Rest) < Size + 1, Size < FileSize - Offset -> + scan(Data, Fd, Offset, FileSize, MsgIdsFound, Acc); +scan_data(Data, Fd, Offset, FileSize, MsgIdsFound, Acc) + when byte_size(Data) < 8 -> + scan(Data, Fd, Offset, FileSize, MsgIdsFound, Acc); +%% This is definitely not a message. Try the next byte. +scan_data(<<_, Rest/bits>>, Fd, Offset, FileSize, MsgIdsFound, Acc) -> + scan_data(Rest, Fd, Offset + 1, FileSize, MsgIdsFound, Acc). + +%%---------------------------------------------------------------------------- +%% Ets index %%---------------------------------------------------------------------------- -index_lookup_positive_ref_count(Key, State) -> - case index_lookup(Key, State) of +-define(INDEX_TABLE_NAME, rabbit_msg_store_ets_index). +-define(INDEX_FILE_NAME, "msg_store_index.ets"). + +index_new(Dir) -> + _ = file:delete(filename:join(Dir, ?INDEX_FILE_NAME)), + ets:new(?INDEX_TABLE_NAME, [set, public, {keypos, #msg_location.msg_id}]). + +index_recover(Dir) -> + Path = filename:join(Dir, ?INDEX_FILE_NAME), + case ets:file2tab(Path) of + {ok, IndexEts} -> _ = file:delete(Path), + {ok, IndexEts}; + Error -> Error + end. + +index_lookup(IndexEts, Key) -> + case ets:lookup(IndexEts, Key) of + [] -> not_found; + [Entry] -> Entry + end. + +index_lookup_positive_ref_count(IndexEts, Key) -> + case index_lookup(IndexEts, Key) of not_found -> not_found; #msg_location { ref_count = 0 } -> not_found; #msg_location {} = MsgLocation -> MsgLocation end. -index_update_ref_count(Key, RefCount, State) -> - index_update_fields(Key, {#msg_location.ref_count, RefCount}, State). +%% @todo We currently fetch all and then filter by file. +%% This might lead to too many lookups... How to best +%% optimize this? ets:select didn't seem great. +index_select_from_file(IndexEts, MsgIds, File) -> + All = [index_lookup(IndexEts, Id) || Id <- MsgIds], + [MsgLoc || MsgLoc=#msg_location{file=MsgFile} <- All, MsgFile =:= File]. -index_lookup(Key, #gc_state { index_module = Index, - index_state = State }) -> - Index:lookup(Key, State); +index_insert(IndexEts, Obj) -> + true = ets:insert_new(IndexEts, Obj), + ok. -index_lookup(Key, #client_msstate { index_module = Index, - index_state = State }) -> - Index:lookup(Key, State); +index_update(IndexEts, Obj) -> + true = ets:insert(IndexEts, Obj), + ok. -index_lookup(Key, #msstate { index_module = Index, index_state = State }) -> - Index:lookup(Key, State). +index_update_fields(IndexEts, Key, Updates) -> + true = ets:update_element(IndexEts, Key, Updates), + ok. -index_insert(Obj, #msstate { index_module = Index, index_state = State }) -> - Index:insert(Obj, State). +index_update_ref_counter(IndexEts, Key, RefCount) -> + _ = ets:update_counter(IndexEts, Key, RefCount), + ok. -index_update(Obj, #msstate { index_module = Index, index_state = State }) -> - Index:update(Obj, State). +index_update_ref_counter(IndexEts, Key, RefCount, Default) -> + _ = ets:update_counter(IndexEts, Key, RefCount, Default), + ok. -index_update_fields(Key, Updates, #msstate{ index_module = Index, - index_state = State }) -> - Index:update_fields(Key, Updates, State); -index_update_fields(Key, Updates, #gc_state{ index_module = Index, - index_state = State }) -> - Index:update_fields(Key, Updates, State). +index_delete(IndexEts, Key) -> + true = ets:delete(IndexEts, Key), + ok. -index_delete(Key, #msstate { index_module = Index, index_state = State }) -> - Index:delete(Key, State). +index_delete_object(IndexEts, Obj) -> + true = ets:delete_object(IndexEts, Obj), + ok. -index_delete_object(Obj, #gc_state{ index_module = Index, - index_state = State }) -> - Index:delete_object(Obj, State). +index_clean_up_temporary_reference_count_entries(IndexEts) -> + MatchHead = #msg_location { file = undefined, _ = '_' }, + ets:select_delete(IndexEts, [{MatchHead, [], [true]}]), + ok. -index_clean_up_temporary_reference_count_entries( - #msstate { index_module = Index, - index_state = State }) -> - Index:clean_up_temporary_reference_count_entries_without_file(State). +index_terminate(IndexEts, Dir) -> + case ets:tab2file(IndexEts, filename:join(Dir, ?INDEX_FILE_NAME), + [{extended_info, [object_count]}]) of + ok -> ok; + {error, Err} -> + rabbit_log:error("Unable to save message store index" + " for directory ~tp.~nError: ~tp", + [Dir, Err]) + end, + ets:delete(IndexEts). %%---------------------------------------------------------------------------- %% shutdown and recovery %%---------------------------------------------------------------------------- -recover_index_and_client_refs(IndexModule, _Recover, undefined, Dir, _Name) -> - {false, IndexModule:new(Dir), []}; -recover_index_and_client_refs(IndexModule, false, _ClientRefs, Dir, Name) -> +recover_index_and_client_refs(_Recover, undefined, Dir, _Name) -> + {false, index_new(Dir), []}; +recover_index_and_client_refs(false, _ClientRefs, Dir, Name) -> rabbit_log:warning("Message store ~tp: rebuilding indices from scratch", [Name]), - {false, IndexModule:new(Dir), []}; -recover_index_and_client_refs(IndexModule, true, ClientRefs, Dir, Name) -> + {false, index_new(Dir), []}; +recover_index_and_client_refs(true, ClientRefs, Dir, Name) -> Fresh = fun (ErrorMsg, ErrorArgs) -> rabbit_log:warning("Message store ~tp : " ++ ErrorMsg ++ "~n" "rebuilding indices from scratch", [Name | ErrorArgs]), - {false, IndexModule:new(Dir), []} + {false, index_new(Dir), []} end, case read_recovery_terms(Dir) of {false, Error} -> Fresh("failed to read recovery terms: ~tp", [Error]); {true, Terms} -> RecClientRefs = proplists:get_value(client_refs, Terms, []), - RecIndexModule = proplists:get_value(index_module, Terms), + %% We expect the index module to either be unset or be set + %% to rabbit_msg_store_ets_index. This is needed for graceful + %% upgrade to RabbitMQ 4.0 and above. Starting from 4.0 + %% however RabbitMQ will not save the index module in the + %% recovery terms, so this check can be removed in 4.1 or later. + %% What this effectively does is that for users that had a + %% custom index module in 3.13 we force a dirty recovery + %% to switch them to ets. Others can proceed as normal. + RecIndexModule = proplists:get_value(index_module, Terms, + rabbit_msg_store_ets_index), case (lists:sort(ClientRefs) =:= lists:sort(RecClientRefs) - andalso IndexModule =:= RecIndexModule) of - true -> case IndexModule:recover(Dir) of - {ok, IndexState1} -> - {true, IndexState1, ClientRefs}; + andalso RecIndexModule =:= rabbit_msg_store_ets_index) of + true -> case index_recover(Dir) of + {ok, IndexEts} -> + {true, IndexEts, ClientRefs}; {error, Error} -> Fresh("failed to recover index: ~tp", [Error]) end; + false when RecIndexModule =/= rabbit_msg_store_ets_index -> + Fresh("custom index backends have been removed; using ETS index", []); false -> Fresh("recovery terms differ from present", []) end end. @@ -1497,50 +1699,27 @@ recover_file_summary(true, Dir) -> {error, _Error} -> recover_file_summary(false, Dir) end. -count_msg_refs(Gen, Seed, State) -> +count_msg_refs(Gen, Seed, State = #msstate{ index_ets = IndexEts }) -> case Gen(Seed) of finished -> ok; + %% @todo This is currently required by tests but can't happen otherwise? {_MsgId, 0, Next} -> count_msg_refs(Gen, Next, State); - {MsgId, Delta, Next} -> - ok = case index_lookup(MsgId, State) of - not_found -> - index_insert(#msg_location { msg_id = MsgId, - file = undefined, - ref_count = Delta }, - State); - #msg_location { ref_count = RefCount } = StoreEntry -> - NewRefCount = RefCount + Delta, - case NewRefCount of - 0 -> index_delete(MsgId, State); - _ -> index_update(StoreEntry #msg_location { - ref_count = NewRefCount }, - State) - end - end, + %% This clause is kept for v1 compatibility purposes. + %% It can be removed once we no longer support converting from v1 data. + {MsgId, 1, Next} -> + index_update_ref_counter(IndexEts, MsgId, +1, + #msg_location{msg_id=MsgId, file=undefined, ref_count=1}), + count_msg_refs(Gen, Next, State); + {MsgIds, Next} -> + lists:foreach(fun(MsgId) -> + index_update_ref_counter(IndexEts, MsgId, +1, + #msg_location{msg_id=MsgId, file=undefined, ref_count=1}) + end, MsgIds), count_msg_refs(Gen, Next, State) end. -scan_file_for_valid_messages(File) -> - case open_file(File, ?READ_MODE) of - {ok, Hdl} -> Valid = rabbit_msg_file:scan( - Hdl, filelib:file_size(File), - fun scan_fun/2, []), - ok = file_handle_cache:close(Hdl), - Valid; - {error, enoent} -> {ok, [], 0}; - {error, Reason} -> {error, {unable_to_scan_file, - filename:basename(File), - Reason}} - end. - -scan_file_for_valid_messages(Dir, FileName) -> - scan_file_for_valid_messages(form_filename(Dir, FileName)). - -scan_fun({MsgId, TotalSize, Offset, _Msg}, Acc) -> - [{MsgId, TotalSize, Offset} | Acc]. - build_index(true, _StartupFunState, State = #msstate { file_summary_ets = FileSummaryEts }) -> File = ets:last(FileSummaryEts), @@ -1561,26 +1740,27 @@ build_index(false, {MsgRefDeltaGen, MsgRefDeltaGenInit}, State1, Files)} end. -build_index_worker(Gatherer, State = #msstate { dir = Dir }, +build_index_worker(Gatherer, #msstate { index_ets = IndexEts, dir = Dir }, File, Files) -> FileName = filenum_to_name(File), rabbit_log:debug("Rebuilding message location index from ~ts (~B file(s) remaining)", [form_filename(Dir, FileName), length(Files)]), - {ok, Messages0, FileSize} = + %% The scan function already dealt with duplicate messages + %% within the file. We then get messages in reverse order. + {ok, Messages, FileSize} = scan_file_for_valid_messages(Dir, FileName), - %% The scan gives us messages end-of-file first so we reverse the list - %% in case a compaction had occurred before shutdown to not have to repeat it. - Messages = lists:reverse(Messages0), + %% Valid messages are in file order so the last message is + %% the last message from the list. {ValidMessages, ValidTotalSize} = lists:foldl( fun (Obj = {MsgId, TotalSize, Offset}, {VMAcc, VTSAcc}) -> - %% We only keep the first message in the file. Duplicates (due to compaction) get ignored. - case index_lookup(MsgId, State) of + %% Fan-out may result in the same message data in multiple + %% files so we have to guard against it. + case index_lookup(IndexEts, MsgId) of #msg_location { file = undefined } = StoreEntry -> - ok = index_update(StoreEntry #msg_location { + ok = index_update(IndexEts, StoreEntry #msg_location { file = File, offset = Offset, - total_size = TotalSize }, - State), + total_size = TotalSize }), {[Obj | VMAcc], VTSAcc + TotalSize}; _ -> {VMAcc, VTSAcc} @@ -1594,7 +1774,7 @@ build_index_worker(Gatherer, State = #msstate { dir = Dir }, [] -> case ValidMessages of [] -> 0; _ -> {_MsgId, TotalSize, Offset} = - hd(ValidMessages), + lists:last(ValidMessages), Offset + TotalSize end; [_|_] -> FileSize @@ -1620,11 +1800,12 @@ enqueue_build_index_workers(Gatherer, [File|Files], State) -> enqueue_build_index_workers(Gatherer, Files, State). reduce_index(Gatherer, LastFile, - State = #msstate { file_summary_ets = FileSummaryEts }) -> + State = #msstate { index_ets = IndexEts, + file_summary_ets = FileSummaryEts }) -> case gatherer:out(Gatherer) of empty -> ok = gatherer:stop(Gatherer), - ok = index_clean_up_temporary_reference_count_entries(State), + ok = index_clean_up_temporary_reference_count_entries(IndexEts), Offset = case ets:lookup(FileSummaryEts, LastFile) of [] -> 0; [#file_summary { file_size = FileSize }] -> FileSize @@ -1651,33 +1832,6 @@ rebuild_index(Gatherer, Files, State) -> %% garbage collection / compaction / aggregation -- internal %%---------------------------------------------------------------------------- -maybe_roll_to_new_file( - Offset, - State = #msstate { dir = Dir, - current_file_handle = CurHdl, - current_file = CurFile, - file_summary_ets = FileSummaryEts, - cur_file_cache_ets = CurFileCacheEts, - file_size_limit = FileSizeLimit }) - when Offset >= FileSizeLimit -> - State1 = internal_sync(State), - ok = writer_close(CurHdl), - NextFile = CurFile + 1, - {ok, NextHdl} = writer_open(Dir, NextFile), - true = ets:insert_new(FileSummaryEts, #file_summary { - file = NextFile, - valid_total_size = 0, - file_size = 0, - locked = false }), - %% We only delete messages from the cache that were written to disk - %% in the previous file. - true = ets:match_delete(CurFileCacheEts, {'_', '_', 0}), - State1 #msstate { current_file_handle = NextHdl, - current_file = NextFile, - current_file_offset = 0 }; -maybe_roll_to_new_file(_, State) -> - State. - %% We keep track of files that have seen removes and %% check those periodically for compaction. We only %% compact files that have less than half valid data. @@ -1766,7 +1920,8 @@ delete_file_if_empty(File, State = #msstate { %% We do not try to look at messages that are not the last because we do not want to %% accidentally write over messages that were moved earlier. -compact_file(File, State = #gc_state { file_summary_ets = FileSummaryEts, +compact_file(File, State = #gc_state { index_ets = IndexEts, + file_summary_ets = FileSummaryEts, dir = Dir, msg_store = Server }) -> %% Get metadata about the file. Will be used to calculate @@ -1775,8 +1930,15 @@ compact_file(File, State = #gc_state { file_summary_ets = FileSummaryEts, %% Open the file. FileName = filenum_to_name(File), {ok, Fd} = file:open(form_filename(Dir, FileName), [read, write, binary, raw]), - %% Load the messages. - Messages = load_and_vacuum_message_file(File, State), + %% Load the messages. It's possible to get 0 messages here; + %% that's OK. That means we have little to do as the file is + %% about to be deleted. + {Messages, _} = scan_and_vacuum_message_file(File, State), + %% Blank holes. We must do this first otherwise the file is left + %% with data that may confuse the code (for example data that looks + %% like a message, isn't a message, but spans over a real message). + %% We blank more than is likely required but better safe than sorry. + blank_holes_in_file(Fd, Messages), %% Compact the file. {ok, TruncateSize, IndexUpdates} = do_compact_file(Fd, 0, Messages, lists:reverse(Messages), []), %% Sync and close the file. @@ -1786,9 +1948,8 @@ compact_file(File, State = #gc_state { file_summary_ets = FileSummaryEts, %% be there for readers. Note that it's OK if we crash at any point before we %% update the index because the old data is still there until we truncate. lists:foreach(fun ({UpdateMsgId, UpdateOffset}) -> - ok = index_update_fields(UpdateMsgId, - [{#msg_location.offset, UpdateOffset}], - State) + ok = index_update_fields(IndexEts, UpdateMsgId, + [{#msg_location.offset, UpdateOffset}]) end, IndexUpdates), %% We can truncate only if there are no files opened before this timestamp. ThresholdTimestamp = erlang:monotonic_time(), @@ -1821,6 +1982,32 @@ compact_file(File, State = #gc_state { file_summary_ets = FileSummaryEts, garbage_collect(), ok. +%% We must special case the blanking of the beginning of the file. +blank_holes_in_file(Fd, [#msg_location{ offset = Offset }|Tail]) + when Offset =/= 0 -> + Bytes = <<0:Offset/unit:8>>, + ok = file:pwrite(Fd, 0, Bytes), + blank_holes_in_file1(Fd, Tail); +blank_holes_in_file(Fd, Messages) -> + blank_holes_in_file1(Fd, Messages). + +blank_holes_in_file1(Fd, [ + #msg_location{ offset = OneOffset, total_size = OneSize }, + #msg_location{ offset = TwoOffset } = Two + |Tail]) when OneOffset + OneSize < TwoOffset -> + Offset = OneOffset + OneSize, + Size = TwoOffset - Offset, + Bytes = <<0:Size/unit:8>>, + ok = file:pwrite(Fd, Offset, Bytes), + blank_holes_in_file1(Fd, [Two|Tail]); +%% We either have only one message left, or contiguous messages. +blank_holes_in_file1(Fd, [_|Tail]) -> + blank_holes_in_file1(Fd, Tail); +%% No need to blank the hole past the last message as we will +%% not write there (no confusion possible) and truncate afterwards. +blank_holes_in_file1(_, []) -> + ok. + %% If the message at the end fits into the hole we have found, we copy it there. %% We will do the ets:updates after the data is synced to disk. do_compact_file(Fd, Offset, Start = [#msg_location{ offset = StartMsgOffset }|_], @@ -1907,38 +2094,30 @@ delete_file(File, State = #gc_state { file_summary_ets = FileSummaryEts, ok end. -load_and_vacuum_message_file(File, State) -> - Messages0 = index_select_all_from_file(File, State), - %% Cleanup messages that have 0 ref_count. - Messages = lists:foldl(fun - (Entry = #msg_location{ ref_count = 0 }, Acc) -> - ok = index_delete_object(Entry, State), - Acc; - (Entry, Acc) -> - [Entry|Acc] - end, [], Messages0), - lists:keysort(#msg_location.offset, Messages). - -index_select_all_from_file(File, #gc_state { index_module = Index, - index_state = State }) -> - Index:select_all_from_file(File, State). - -scan_and_vacuum_message_file(File, State = #gc_state { dir = Dir }) -> +scan_and_vacuum_message_file(File, #gc_state{ index_ets = IndexEts, dir = Dir }) -> %% Messages here will be end-of-file at start-of-list {ok, Messages, _FileSize} = scan_file_for_valid_messages(Dir, filenum_to_name(File)), %% foldl will reverse so will end up with msgs in ascending offset order lists:foldl( fun ({MsgId, TotalSize, Offset}, Acc = {List, Size}) -> - case index_lookup(MsgId, State) of + case index_lookup(IndexEts, MsgId) of #msg_location { file = File, total_size = TotalSize, offset = Offset, ref_count = 0 } = Entry -> - ok = index_delete_object(Entry, State), + index_delete_object(IndexEts, Entry), Acc; #msg_location { file = File, total_size = TotalSize, offset = Offset } = Entry -> {[ Entry | List ], TotalSize + Size}; - _ -> + %% Fan-out may remove the entry but also write a new + %% entry in a different file when it needs to write + %% a message and the existing reference is in a file + %% that's about to be deleted. So we explicitly accept + %% these cases and ignore this message. + #msg_location { file = OtherFile, total_size = TotalSize } + when File =/= OtherFile -> + Acc; + not_found -> Acc end end, {[], 0}, Messages). diff --git a/deps/rabbit/src/rabbit_msg_store_ets_index.erl b/deps/rabbit/src/rabbit_msg_store_ets_index.erl deleted file mode 100644 index e7b9cd389854..000000000000 --- a/deps/rabbit/src/rabbit_msg_store_ets_index.erl +++ /dev/null @@ -1,92 +0,0 @@ -%% This Source Code Form is subject to the terms of the Mozilla Public -%% License, v. 2.0. If a copy of the MPL was not distributed with this -%% file, You can obtain one at https://mozilla.org/MPL/2.0/. -%% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. -%% - --module(rabbit_msg_store_ets_index). - --include_lib("rabbit_common/include/rabbit_msg_store.hrl"). - --behaviour(rabbit_msg_store_index). - --export([new/1, recover/1, - lookup/2, select_from_file/3, select_all_from_file/2, insert/2, update/2, update_fields/3, delete/2, - delete_object/2, clean_up_temporary_reference_count_entries_without_file/1, terminate/1]). - --define(MSG_LOC_NAME, rabbit_msg_store_ets_index). --define(FILENAME, "msg_store_index.ets"). - --record(state, - {table, - %% Stored as binary() as opposed to file:filename() to save memory. - dir :: binary()}). - -new(Dir) -> - _ = file:delete(filename:join(Dir, ?FILENAME)), - Tid = ets:new(?MSG_LOC_NAME, [set, public, {keypos, #msg_location.msg_id}]), - #state { table = Tid, dir = rabbit_file:filename_to_binary(Dir) }. - -recover(Dir) -> - Path = filename:join(Dir, ?FILENAME), - case ets:file2tab(Path) of - {ok, Tid} -> _ = file:delete(Path), - {ok, #state { table = Tid, dir = rabbit_file:filename_to_binary(Dir) }}; - Error -> Error - end. - -lookup(Key, State) -> - case ets:lookup(State #state.table, Key) of - [] -> not_found; - [Entry] -> Entry - end. - -%% @todo We currently fetch all and then filter by file. -%% This might lead to too many lookups... How to best -%% optimize this? ets:select didn't seem great. -select_from_file(MsgIds, File, State) -> - All = [lookup(Id, State) || Id <- MsgIds], - [MsgLoc || MsgLoc=#msg_location{file=MsgFile} <- All, MsgFile =:= File]. - -%% Note that this function is not terribly efficient and should only be -%% used for compaction or similar. -select_all_from_file(File, State) -> - ets:match_object(State #state.table, #msg_location { file = File, _ = '_' }). - -insert(Obj, State) -> - true = ets:insert_new(State #state.table, Obj), - ok. - -update(Obj, State) -> - true = ets:insert(State #state.table, Obj), - ok. - -update_fields(Key, Updates, State) -> - true = ets:update_element(State #state.table, Key, Updates), - ok. - -delete(Key, State) -> - true = ets:delete(State #state.table, Key), - ok. - -delete_object(Obj, State) -> - true = ets:delete_object(State #state.table, Obj), - ok. - -clean_up_temporary_reference_count_entries_without_file(State) -> - MatchHead = #msg_location { file = undefined, _ = '_' }, - ets:select_delete(State #state.table, [{MatchHead, [], [true]}]), - ok. - -terminate(#state { table = MsgLocations, dir = DirBin }) -> - Dir = rabbit_file:binary_to_filename(DirBin), - case ets:tab2file(MsgLocations, filename:join(Dir, ?FILENAME), - [{extended_info, [object_count]}]) of - ok -> ok; - {error, Err} -> - rabbit_log:error("Unable to save message store index" - " for directory ~tp.~nError: ~tp", - [Dir, Err]) - end, - ets:delete(MsgLocations). diff --git a/deps/rabbit/src/rabbit_msg_store_gc.erl b/deps/rabbit/src/rabbit_msg_store_gc.erl index 355ce75da5e0..8ede8c1a3f97 100644 --- a/deps/rabbit/src/rabbit_msg_store_gc.erl +++ b/deps/rabbit/src/rabbit_msg_store_gc.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_msg_store_gc). @@ -11,10 +11,8 @@ -export([start_link/1, compact/2, truncate/4, delete/2, stop/1]). --export([set_maximum_since_use/2]). - -export([init/1, handle_call/3, handle_cast/2, handle_info/2, - terminate/2, code_change/3, prioritise_cast/3]). + terminate/2, code_change/3]). -record(state, { pending, @@ -53,23 +51,13 @@ delete(Server, File) -> stop(Server) -> gen_server2:call(Server, stop, infinity). --spec set_maximum_since_use(pid(), non_neg_integer()) -> 'ok'. - -set_maximum_since_use(Pid, Age) -> - gen_server2:cast(Pid, {set_maximum_since_use, Age}). - %%---------------------------------------------------------------------------- init([MsgStoreState]) -> - ok = file_handle_cache:register_callback(?MODULE, set_maximum_since_use, - [self()]), {ok, #state { pending = #{}, msg_store_state = MsgStoreState }, hibernate, {backoff, ?HIBERNATE_AFTER_MIN, ?HIBERNATE_AFTER_MIN, ?DESIRED_HIBERNATE}}. -prioritise_cast({set_maximum_since_use, _Age}, _Len, _State) -> 8; -prioritise_cast(_Msg, _Len, _State) -> 0. - handle_call(stop, _From, State) -> {stop, normal, ok, State}. @@ -94,11 +82,7 @@ handle_cast({truncate, File, TruncateSize, ThresholdTimestamp}, State = #state{p handle_cast({delete, File}, State = #state{pending = Pending}) -> %% We drop any pending action because deletion takes precedence over truncation. State1 = State#state{pending = maps:remove(File, Pending)}, - {noreply, attempt_action(delete, [File], State1), hibernate}; - -handle_cast({set_maximum_since_use, Age}, State) -> - ok = file_handle_cache:set_maximum_since_use(Age), - {noreply, State, hibernate}. + {noreply, attempt_action(delete, [File], State1), hibernate}. %% Run all pending actions. handle_info({timeout, TimerRef, do_pending}, diff --git a/deps/rabbit/src/rabbit_networking.erl b/deps/rabbit/src/rabbit_networking.erl index 6f702fe25cf0..508e0a0e2b9f 100644 --- a/deps/rabbit/src/rabbit_networking.erl +++ b/deps/rabbit/src/rabbit_networking.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_networking). @@ -34,12 +34,13 @@ force_connection_event_refresh/1, force_non_amqp_connection_event_refresh/1, handshake/2, tcp_host/1, ranch_ref/1, ranch_ref/2, ranch_ref_of_protocol/1, - listener_of_protocol/1, stop_ranch_listener_of_protocol/1]). + listener_of_protocol/1, stop_ranch_listener_of_protocol/1, + list_local_connections_of_protocol/1]). %% Used by TCP-based transports, e.g. STOMP adapter -export([tcp_listener_addresses/1, tcp_listener_spec/9, tcp_listener_spec/10, tcp_listener_spec/11, - ensure_ssl/0, fix_ssl_options/1, poodle_check/1]). + ensure_ssl/0, fix_ssl_options/1]). -export([tcp_listener_started/4, tcp_listener_stopped/4]). @@ -49,9 +50,7 @@ -export([ local_connections/0, - local_non_amqp_connections/0, - %% prefer local_connections/0 - connections_local/0 + local_non_amqp_connections/0 ]). -include_lib("rabbit_common/include/rabbit.hrl"). @@ -127,12 +126,7 @@ boot_tls(NumAcceptors, ConcurrentConnsSupsCount) -> ok; {ok, SslListeners} -> SslOpts = ensure_ssl(), - case poodle_check('AMQP') of - ok -> _ = [start_ssl_listener(L, SslOpts, NumAcceptors, ConcurrentConnsSupsCount) - || L <- SslListeners], - ok; - danger -> ok - end, + _ = [start_ssl_listener(L, SslOpts, NumAcceptors, ConcurrentConnsSupsCount) || L <- SslListeners], ok end. @@ -144,33 +138,6 @@ ensure_ssl() -> {ok, SslOptsConfig0} = application:get_env(rabbit, ssl_options), rabbit_ssl_options:fix(SslOptsConfig0). --spec poodle_check(atom()) -> 'ok' | 'danger'. - -poodle_check(Context) -> - {ok, Vsn} = application:get_key(ssl, vsn), - case rabbit_misc:version_compare(Vsn, "5.3", gte) of %% R16B01 - true -> ok; - false -> case application:get_env(rabbit, ssl_allow_poodle_attack) of - {ok, true} -> ok; - _ -> log_poodle_fail(Context), - danger - end - end. - -log_poodle_fail(Context) -> - rabbit_log:error( - "The installed version of Erlang (~ts) contains the bug OTP-10905,~n" - "which makes it impossible to disable SSLv3. This makes the system~n" - "vulnerable to the POODLE attack. SSL listeners for ~ts have therefore~n" - "been disabled.~n~n" - "You are advised to upgrade to a recent Erlang version; R16B01 is the~n" - "first version in which this bug is fixed, but later is usually~n" - "better.~n~n" - "If you cannot upgrade now and want to re-enable SSL listeners, you can~n" - "set the config item 'ssl_allow_poodle_attack' to 'true' in the~n" - "'rabbit' section of your configuration file.", - [rabbit_misc:otp_release(), Context]). - fix_ssl_options(Config) -> rabbit_ssl_options:fix(Config). @@ -286,6 +253,13 @@ stop_ranch_listener_of_protocol(Protocol) -> ranch:stop_listener(Ref) end. +-spec list_local_connections_of_protocol(atom()) -> [pid()]. +list_local_connections_of_protocol(Protocol) -> + case ranch_ref_of_protocol(Protocol) of + undefined -> []; + AcceptorRef -> ranch:procs(AcceptorRef, connections) + end. + -spec start_tcp_listener( listener_config(), integer()) -> 'ok' | {'error', term()}. @@ -447,6 +421,8 @@ active_listeners() -> -spec node_listeners(node()) -> [rabbit_types:listener()]. +node_listeners(Node) when node() == Node -> + ets:tab2list(?ETS_TABLE); node_listeners(Node) -> case rabbit_misc:rpc_call(Node, ets, tab2list, [?ETS_TABLE]) of {badrpc, _} -> @@ -478,19 +454,15 @@ register_connection(Pid) -> pg_local:join(rabbit_connections, Pid). unregister_connection(Pid) -> pg_local:leave(rabbit_connections, Pid). -spec connections() -> [rabbit_types:connection()]. - connections() -> Nodes = rabbit_nodes:list_running(), - rabbit_misc:append_rpc_all_nodes(Nodes, rabbit_networking, connections_local, [], ?RPC_TIMEOUT). + rabbit_misc:append_rpc_all_nodes(Nodes, rabbit_networking, local_connections, [], ?RPC_TIMEOUT). -spec local_connections() -> [rabbit_types:connection()]. -%% @doc Returns pids of AMQP 0-9-1 and AMQP 1.0 connections local to this node. local_connections() -> - connections_local(). - --spec connections_local() -> [rabbit_types:connection()]. -%% @deprecated Prefer {@link local_connections} -connections_local() -> pg_local:get_members(rabbit_connections). + Amqp091Pids = pg_local:get_members(rabbit_connections), + Amqp10Pids = rabbit_amqp1_0:list_local(), + Amqp10Pids ++ Amqp091Pids. -spec register_non_amqp_connection(pid()) -> ok. @@ -540,21 +512,16 @@ emit_connection_info_all(Nodes, Items, Ref, AggregatorPid) -> emit_connection_info_local(Items, Ref, AggregatorPid) -> rabbit_control_misc:emitting_map_with_exit_handler( AggregatorPid, Ref, fun(Q) -> connection_info(Q, Items) end, - connections_local()). + local_connections()). -spec close_connection(pid(), string()) -> 'ok'. - close_connection(Pid, Explanation) -> - case lists:member(Pid, connections()) of - true -> - Res = rabbit_reader:shutdown(Pid, Explanation), - rabbit_log:info("Closing connection ~tp because ~tp", [Pid, Explanation]), - Res; - false -> - rabbit_log:warning("Asked to close connection ~tp (reason: ~tp) " - "but no running cluster node reported it as an active connection. Was it already closed? ", - [Pid, Explanation]), - ok + rabbit_log:info("Closing connection ~tp because ~tp", + [Pid, Explanation]), + try rabbit_reader:shutdown(Pid, Explanation) + catch exit:{Reason, _Location} -> + rabbit_log:warning("Could not close connection ~tp (reason: ~tp): ~p", + [Pid, Explanation, Reason]) end. -spec close_connections([pid()], string()) -> 'ok'. @@ -593,7 +560,7 @@ failed_to_recv_proxy_header(Ref, Error) -> end, rabbit_log:debug(Msg, [Error]), % The following call will clean up resources then exit - _ = ranch:handshake(Ref), + _ = catch ranch:handshake(Ref), exit({shutdown, failed_to_recv_proxy_header}). handshake(Ref, ProxyProtocolEnabled) -> @@ -605,20 +572,24 @@ handshake(Ref, ProxyProtocolEnabled) -> {error, protocol_error, Error} -> failed_to_recv_proxy_header(Ref, Error); {ok, ProxyInfo} -> - {ok, Sock} = ranch:handshake(Ref), - setup_socket(Sock), - {ok, {rabbit_proxy_socket, Sock, ProxyInfo}} + case catch ranch:handshake(Ref) of + {'EXIT', normal} -> + {error, handshake_failed}; + {ok, Sock} -> + ok = tune_buffer_size(Sock), + {ok, {rabbit_proxy_socket, Sock, ProxyInfo}} + end end; false -> - {ok, Sock} = ranch:handshake(Ref), - setup_socket(Sock), - {ok, Sock} + case catch ranch:handshake(Ref) of + {'EXIT', normal} -> + {error, handshake_failed}; + {ok, Sock} -> + ok = tune_buffer_size(Sock), + {ok, Sock} + end end. -setup_socket(Sock) -> - ok = tune_buffer_size(Sock), - ok = file_handle_cache:obtain(). - tune_buffer_size(Sock) -> case tune_buffer_size1(Sock) of ok -> ok; diff --git a/deps/rabbit/src/rabbit_networking_store.erl b/deps/rabbit/src/rabbit_networking_store.erl index b7b96a61d98a..6c8dcf46aafe 100644 --- a/deps/rabbit/src/rabbit_networking_store.erl +++ b/deps/rabbit/src/rabbit_networking_store.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_networking_store). @@ -13,7 +13,7 @@ %% gen_server callbacks -export([init/1, handle_call/3, handle_cast/2, handle_info/2, - terminate/2, code_change/3, format_status/2]). + terminate/2, code_change/3]). -define(SERVER, ?MODULE). @@ -41,6 +41,3 @@ terminate(_Reason, _State) -> code_change(_OldVsn, State, _Extra) -> {ok, State}. - -format_status(_Opt, Status) -> - Status. diff --git a/deps/rabbit/src/rabbit_node_monitor.erl b/deps/rabbit/src/rabbit_node_monitor.erl index fb4b94ff236a..0c3fe24e95a8 100644 --- a/deps/rabbit/src/rabbit_node_monitor.erl +++ b/deps/rabbit/src/rabbit_node_monitor.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_node_monitor). @@ -14,6 +14,7 @@ cluster_status_filename/0, coordination_filename/0, stream_filename/0, quorum_filename/0, default_quorum_filename/0, + classic_filename/0, prepare_cluster_status_files/0, write_cluster_status/1, read_cluster_status/0, update_cluster_status/0, reset_cluster_status/0]). @@ -34,6 +35,7 @@ -define(NODE_REPLY_TIMEOUT, 5000). -define(RABBIT_UP_RPC_TIMEOUT, 2000). -define(RABBIT_DOWN_PING_INTERVAL, 1000). +-define(NODE_DISCONNECTION_TIMEOUT, 1000). -record(state, {monitors, partitions, subscribers, down_ping_timer, keepalive_timer, autoheal, guid, node_guids}). @@ -82,6 +84,9 @@ quorum_filename() -> default_quorum_filename() -> filename:join(rabbit:data_dir(), "quorum"). +classic_filename() -> + filename:join(rabbit:data_dir(), "msg_stores"). + -spec prepare_cluster_status_files() -> 'ok' | no_return(). prepare_cluster_status_files() -> @@ -167,7 +172,7 @@ notify_node_up() -> notify_joined_cluster() -> NewMember = node(), - Nodes = rabbit_nodes:list_running() -- [NewMember], + Nodes = alive_rabbit_nodes(rabbit_nodes:list_consistent_members()) -- [NewMember], gen_server:abcast(Nodes, ?SERVER, {joined_cluster, node(), rabbit_db_cluster:node_type()}), @@ -176,7 +181,7 @@ notify_joined_cluster() -> -spec notify_left_cluster(node()) -> 'ok'. notify_left_cluster(Node) -> - Nodes = rabbit_nodes:list_running(), + Nodes = alive_rabbit_nodes(), gen_server:abcast(Nodes, ?SERVER, {left_cluster, Node}), ok. @@ -384,17 +389,27 @@ init([]) -> %% happen. process_flag(trap_exit, true), _ = net_kernel:monitor_nodes(true, [nodedown_reason]), - {ok, _} = mnesia:subscribe(system), - %% If the node has been restarted, Mnesia can trigger a system notification - %% before the monitor subscribes to receive them. To avoid autoheal blocking due to - %% the inconsistent database event never arriving, we being monitoring all running - %% nodes as early as possible. The rest of the monitoring ops will only be triggered - %% when notifications arrive. - Nodes = possibly_partitioned_nodes(), - startup_log(Nodes), - Monitors = lists:foldl(fun(Node, Monitors0) -> - pmon:monitor({rabbit, Node}, Monitors0) - end, pmon:new(), Nodes), + Monitors = case rabbit_khepri:is_enabled() of + true -> + startup_log(), + pmon:new(); + false -> + {ok, _} = mnesia:subscribe(system), + + %% If the node has been restarted, Mnesia can trigger a + %% system notification before the monitor subscribes to + %% receive them. To avoid autoheal blocking due to the + %% inconsistent database event never arriving, we being + %% monitoring all running nodes as early as possible. + %% The rest of the monitoring ops will only be + %% triggered when notifications arrive. + Nodes = possibly_partitioned_nodes(), + startup_log(Nodes), + lists:foldl( + fun(Node, Monitors0) -> + pmon:monitor({rabbit, Node}, Monitors0) + end, pmon:new(), Nodes) + end, {ok, ensure_keepalive_timer(#state{monitors = Monitors, subscribers = pmon:new(), partitions = [], @@ -557,13 +572,18 @@ handle_cast({partial_partition_disconnect, Other}, State) -> handle_cast({node_up, Node, NodeType}, State = #state{monitors = Monitors}) -> rabbit_log:info("rabbit on node ~tp up", [Node]), - {AllNodes, DiscNodes, RunningNodes} = read_cluster_status(), - write_cluster_status({add_node(Node, AllNodes), - case NodeType of - disc -> add_node(Node, DiscNodes); - ram -> DiscNodes - end, - add_node(Node, RunningNodes)}), + case rabbit_khepri:is_enabled() of + true -> + ok; + false -> + {AllNodes, DiscNodes, RunningNodes} = read_cluster_status(), + write_cluster_status({add_node(Node, AllNodes), + case NodeType of + disc -> add_node(Node, DiscNodes); + ram -> DiscNodes + end, + add_node(Node, RunningNodes)}) + end, ok = handle_live_rabbit(Node), Monitors1 = case pmon:is_monitored({rabbit, Node}, Monitors) of true -> @@ -574,21 +594,33 @@ handle_cast({node_up, Node, NodeType}, {noreply, maybe_autoheal(State#state{monitors = Monitors1})}; handle_cast({joined_cluster, Node, NodeType}, State) -> - {AllNodes, DiscNodes, RunningNodes} = read_cluster_status(), - write_cluster_status({add_node(Node, AllNodes), - case NodeType of - disc -> add_node(Node, DiscNodes); - ram -> DiscNodes - end, - RunningNodes}), + case rabbit_khepri:is_enabled() of + true -> + ok; + false -> + {AllNodes, DiscNodes, RunningNodes} = read_cluster_status(), + write_cluster_status({add_node(Node, AllNodes), + case NodeType of + disc -> add_node(Node, DiscNodes); + ram -> DiscNodes + end, + RunningNodes}) + end, rabbit_log:debug("Node '~tp' has joined the cluster", [Node]), rabbit_event:notify(node_added, [{node, Node}]), {noreply, State}; handle_cast({left_cluster, Node}, State) -> - {AllNodes, DiscNodes, RunningNodes} = read_cluster_status(), - write_cluster_status({del_node(Node, AllNodes), del_node(Node, DiscNodes), - del_node(Node, RunningNodes)}), + case rabbit_khepri:is_enabled() of + true -> + ok; + false -> + {AllNodes, DiscNodes, RunningNodes} = read_cluster_status(), + write_cluster_status( + {del_node(Node, AllNodes), del_node(Node, DiscNodes), + del_node(Node, RunningNodes)}) + end, + rabbit_event:notify(node_deleted, [{node, Node}]), {noreply, State}; handle_cast({subscribe, Pid}, State = #state{subscribers = Subscribers}) -> @@ -603,8 +635,14 @@ handle_cast(_Msg, State) -> handle_info({'DOWN', _MRef, process, {rabbit, Node}, _Reason}, State = #state{monitors = Monitors, subscribers = Subscribers}) -> rabbit_log:info("rabbit on node ~tp down", [Node]), - {AllNodes, DiscNodes, RunningNodes} = read_cluster_status(), - write_cluster_status({AllNodes, DiscNodes, del_node(Node, RunningNodes)}), + case rabbit_khepri:is_enabled() of + true -> + ok; + false -> + {AllNodes, DiscNodes, RunningNodes} = read_cluster_status(), + write_cluster_status( + {AllNodes, DiscNodes, del_node(Node, RunningNodes)}) + end, _ = [P ! {node_down, Node} || P <- pmon:monitored(Subscribers)], {noreply, handle_dead_rabbit( Node, @@ -614,24 +652,13 @@ handle_info({'DOWN', _MRef, process, Pid, _Reason}, State = #state{subscribers = Subscribers}) -> {noreply, State#state{subscribers = pmon:erase(Pid, Subscribers)}}; -handle_info({nodedown, Node, Info}, State = #state{guid = MyGUID, - node_guids = GUIDs}) -> +handle_info({nodedown, Node, Info}, State) -> rabbit_log:info("node ~tp down: ~tp", [Node, proplists:get_value(nodedown_reason, Info)]), - Check = fun (N, CheckGUID, DownGUID) -> - cast(N, {check_partial_partition, - Node, node(), DownGUID, CheckGUID, MyGUID}) - end, - _ = case maps:find(Node, GUIDs) of - {ok, DownGUID} -> Alive = rabbit_mnesia:cluster_nodes(running) - -- [node(), Node], - [case maps:find(N, GUIDs) of - {ok, CheckGUID} -> Check(N, CheckGUID, DownGUID); - error -> ok - end || N <- Alive]; - error -> ok - end, - {noreply, handle_dead_node(Node, State)}; + case rabbit_khepri:is_enabled() of + true -> {noreply, State}; + false -> handle_nodedown_using_mnesia(Node, State) + end; handle_info({nodeup, Node, _Info}, State) -> rabbit_log:info("node ~tp up", [Node]), @@ -703,6 +730,23 @@ code_change(_OldVsn, State, _Extra) -> %% Functions that call the module specific hooks when nodes go up/down %%---------------------------------------------------------------------------- +handle_nodedown_using_mnesia(Node, State = #state{guid = MyGUID, + node_guids = GUIDs}) -> + Check = fun (N, CheckGUID, DownGUID) -> + cast(N, {check_partial_partition, + Node, node(), DownGUID, CheckGUID, MyGUID}) + end, + _ = case maps:find(Node, GUIDs) of + {ok, DownGUID} -> Alive = rabbit_mnesia:cluster_nodes(running) + -- [node(), Node], + [case maps:find(N, GUIDs) of + {ok, CheckGUID} -> Check(N, CheckGUID, DownGUID); + error -> ok + end || N <- Alive]; + error -> ok + end, + {noreply, handle_dead_node(Node, State)}. + handle_dead_node(Node, State = #state{autoheal = Autoheal}) -> %% In general in rabbit_node_monitor we care about whether the %% rabbit application is up rather than the node; we do this so @@ -807,15 +851,22 @@ wait_for_cluster_recovery(Condition) -> wait_for_cluster_recovery(Condition) end. -handle_dead_rabbit(Node, State = #state{partitions = Partitions, - autoheal = Autoheal}) -> +handle_dead_rabbit(Node, State) -> %% TODO: This may turn out to be a performance hog when there are %% lots of nodes. We really only need to execute some of these %% statements on *one* node, rather than all of them. ok = rabbit_amqqueue:on_node_down(Node), ok = rabbit_alarm:on_node_down(Node), - ok = rabbit_mnesia:on_node_down(Node), ok = rabbit_quorum_queue_periodic_membership_reconciliation:on_node_down(Node), + State1 = case rabbit_khepri:is_enabled() of + true -> State; + false -> on_node_down_using_mnesia(Node, State) + end, + ensure_ping_timer(State1). + +on_node_down_using_mnesia(Node, State = #state{partitions = Partitions, + autoheal = Autoheal}) -> + ok = rabbit_mnesia:on_node_down(Node), %% If we have been partitioned, and we are now in the only remaining %% partition, we no longer care about partitions - forget them. Note %% that we do not attempt to deal with individual (other) partitions @@ -827,9 +878,8 @@ handle_dead_rabbit(Node, State = #state{partitions = Partitions, [] -> []; _ -> Partitions end, - ensure_ping_timer( - State#state{partitions = Partitions1, - autoheal = rabbit_autoheal:rabbit_down(Node, Autoheal)}). + State#state{partitions = Partitions1, + autoheal = rabbit_autoheal:rabbit_down(Node, Autoheal)}. ensure_ping_timer(State) -> rabbit_misc:ensure_timer( @@ -844,13 +894,26 @@ ensure_keepalive_timer(State) -> handle_live_rabbit(Node) -> ok = rabbit_amqqueue:on_node_up(Node), ok = rabbit_alarm:on_node_up(Node), - ok = rabbit_mnesia:on_node_up(Node), + case rabbit_khepri:is_enabled() of + true -> ok; + false -> on_node_up_using_mnesia(Node) + end, + ok = rabbit_vhosts:on_node_up(Node), ok = rabbit_quorum_queue_periodic_membership_reconciliation:on_node_up(Node). -maybe_autoheal(State = #state{partitions = []}) -> +on_node_up_using_mnesia(Node) -> + ok = rabbit_mnesia:on_node_up(Node). + +maybe_autoheal(State) -> + case rabbit_khepri:is_enabled() of + true -> State; + false -> maybe_autoheal1(State) + end. + +maybe_autoheal1(State = #state{partitions = []}) -> State; -maybe_autoheal(State = #state{autoheal = AState}) -> +maybe_autoheal1(State = #state{autoheal = AState}) -> case all_nodes_up() of true -> State#state{autoheal = rabbit_autoheal:maybe_start(AState)}; false -> State @@ -894,13 +957,23 @@ upgrade_to_full_partition(Proxy) -> %% detect a very short partition. So we want to force a slightly %% longer disconnect. Unfortunately we don't have a way to blacklist %% individual nodes; the best we can do is turn off auto-connect -%% altogether. +%% altogether. If Node is not already part of the connected nodes, then +%% there's no need to repeat disabling dist_auto_connect and executing +%% disconnect_node/1, which can result in application_controller +%% timeouts and crash node monitor process. This also implies that +%% the already disconnected node was already processed. In an +%% unstable network, if we get consecutive 'up' and 'down' messages, +%% then we expect disconnect_node/1 to be executed. disconnect(Node) -> - application:set_env(kernel, dist_auto_connect, never), - erlang:disconnect_node(Node), - timer:sleep(1000), - application:unset_env(kernel, dist_auto_connect), - ok. + case lists:member(Node, nodes()) of + true -> + application:set_env(kernel, dist_auto_connect, never), + erlang:disconnect_node(Node), + timer:sleep(?NODE_DISCONNECTION_TIMEOUT), + application:unset_env(kernel, dist_auto_connect); + false -> + ok + end. %%-------------------------------------------------------------------- @@ -953,11 +1026,13 @@ alive_nodes() -> rabbit_nodes:list_reachable(). alive_nodes(Nodes) -> rabbit_nodes:filter_reachable(Nodes). -alive_rabbit_nodes() -> rabbit_nodes:list_running(). +alive_rabbit_nodes() -> + alive_rabbit_nodes(rabbit_nodes:list_members()). -spec alive_rabbit_nodes([node()]) -> [node()]. alive_rabbit_nodes(Nodes) -> + ok = ping(Nodes), rabbit_nodes:filter_running(Nodes). %% This one is allowed to connect! @@ -965,14 +1040,24 @@ alive_rabbit_nodes(Nodes) -> -spec ping_all() -> 'ok'. ping_all() -> - [net_adm:ping(N) || N <- rabbit_nodes:list_members()], + ping(rabbit_nodes:list_members()). + +ping(Nodes) -> + _ = [net_adm:ping(N) || N <- Nodes], ok. possibly_partitioned_nodes() -> alive_rabbit_nodes() -- rabbit_mnesia:cluster_nodes(running). -startup_log([]) -> - rabbit_log:info("Starting rabbit_node_monitor", []); +startup_log() -> + rabbit_log:info("Starting rabbit_node_monitor (partition handling strategy unapplicable with Khepri)", []). + startup_log(Nodes) -> - rabbit_log:info("Starting rabbit_node_monitor, might be partitioned from ~tp", - [Nodes]). + {ok, M} = application:get_env(rabbit, cluster_partition_handling), + startup_log(Nodes, M). + +startup_log([], PartitionHandling) -> + rabbit_log:info("Starting rabbit_node_monitor (in ~tp mode)", [PartitionHandling]); +startup_log(Nodes, PartitionHandling) -> + rabbit_log:info("Starting rabbit_node_monitor (in ~tp mode), might be partitioned from ~tp", + [PartitionHandling, Nodes]). diff --git a/deps/rabbit/src/rabbit_nodes.erl b/deps/rabbit/src/rabbit_nodes.erl index bfc5b9521659..03c56afb173c 100644 --- a/deps/rabbit/src/rabbit_nodes.erl +++ b/deps/rabbit/src/rabbit_nodes.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_nodes). @@ -15,7 +15,7 @@ is_running/2, is_process_running/2, cluster_name/0, set_cluster_name/1, set_cluster_name/2, ensure_epmd/0, all_running/0, - is_member/1, list_members/0, + is_member/1, list_members/0, list_consistent_members/0, filter_members/1, is_reachable/1, list_reachable/0, list_unreachable/0, filter_reachable/1, filter_unreachable/1, @@ -35,7 +35,6 @@ -deprecated({all, 0, "Use rabbit_nodes:list_members/0 instead"}). -deprecated({all_running, 0, "Use rabbit_nodes:list_running/0 instead"}). --include_lib("kernel/include/inet.hrl"). -include_lib("rabbit_common/include/rabbit.hrl"). -define(SAMPLING_INTERVAL, 1000). @@ -183,6 +182,14 @@ is_member(Node) when is_atom(Node) -> list_members() -> rabbit_db_cluster:members(). +-spec list_consistent_members() -> Nodes when + Nodes :: [node()]. +%% @doc Returns the list of nodes in the cluster as reported by the leader. +%% + +list_consistent_members() -> + rabbit_db_cluster:consistent_members(). + -spec filter_members(Nodes) -> Nodes when Nodes :: [node()]. %% @doc Filters the given list of nodes to only select those belonging to the @@ -366,9 +373,10 @@ filter_not_running(Nodes) -> do_filter_running(Members) -> %% All clustered members where `rabbit' is running, regardless if they are %% under maintenance or not. + ReachableMembers = do_filter_reachable(Members), Rets = erpc:multicall( - Members, rabbit, is_running, [], ?FILTER_RPC_TIMEOUT), - RetPerMember = lists:zip(Members, Rets), + ReachableMembers, rabbit, is_running, [], ?FILTER_RPC_TIMEOUT), + RetPerMember = lists:zip(ReachableMembers, Rets), lists:filtermap( fun ({Member, {ok, true}}) -> diff --git a/deps/rabbit/src/rabbit_observer_cli.erl b/deps/rabbit/src/rabbit_observer_cli.erl index dde43155d23e..f5c77d0f7150 100644 --- a/deps/rabbit/src/rabbit_observer_cli.erl +++ b/deps/rabbit/src/rabbit_observer_cli.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2021 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_observer_cli). diff --git a/deps/rabbit/src/rabbit_observer_cli_classic_queues.erl b/deps/rabbit/src/rabbit_observer_cli_classic_queues.erl index 505395f15adb..23f8c9063554 100644 --- a/deps/rabbit/src/rabbit_observer_cli_classic_queues.erl +++ b/deps/rabbit/src/rabbit_observer_cli_classic_queues.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2021 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_observer_cli_classic_queues). diff --git a/deps/rabbit/src/rabbit_observer_cli_quorum_queues.erl b/deps/rabbit/src/rabbit_observer_cli_quorum_queues.erl index cd77ef240183..3577644ca964 100644 --- a/deps/rabbit/src/rabbit_observer_cli_quorum_queues.erl +++ b/deps/rabbit/src/rabbit_observer_cli_quorum_queues.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2021 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_observer_cli_quorum_queues). @@ -110,9 +110,9 @@ sheet_header() -> #{title => "", width => 6, shortcut => "LW"}, #{title => "", width => 5, shortcut => "CL"} ]. - + sheet_body(PrevState) -> - RaStates = ets:tab2list(ra_state), + {_, RaStates} = rabbit_quorum_queue:all_replica_states(), Body = [begin #resource{name = Name, virtual_host = Vhost} = R = amqqueue:get_name(Q), case rabbit_amqqueue:pid_of(Q) of @@ -134,15 +134,17 @@ sheet_body(PrevState) -> [ Pid, QName, - case proplists:get_value(InternalName, RaStates) of + case maps:get(InternalName, RaStates, undefined) of leader -> "L"; follower -> "F"; + promotable -> "f"; %% temporary non-voter + non_voter -> "-"; %% permanent non-voter _ -> "?" end, format_int(proplists:get_value(memory, ProcInfo)), format_int(proplists:get_value(message_queue_len, ProcInfo)), format_int(maps:get(commands, QQCounters)), - case proplists:get_value(InternalName, RaStates) of + case maps:get(InternalName, RaStates, undefined) of leader -> format_int(maps:get(snapshots_written, QQCounters)); follower -> format_int(maps:get(snapshot_installed, QQCounters)); _ -> "?" diff --git a/deps/rabbit/src/rabbit_osiris_metrics.erl b/deps/rabbit/src/rabbit_osiris_metrics.erl index 928da30bcb0f..a29ea4c65754 100644 --- a/deps/rabbit/src/rabbit_osiris_metrics.erl +++ b/deps/rabbit/src/rabbit_osiris_metrics.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_osiris_metrics). @@ -27,7 +27,8 @@ members, memory, readers, - consumers + consumers, + segments ]). -record(state, {timeout :: non_neg_integer()}). diff --git a/deps/rabbit/src/rabbit_parameter_validation.erl b/deps/rabbit/src/rabbit_parameter_validation.erl index 504e8f7a2573..9b41fe35d315 100644 --- a/deps/rabbit/src/rabbit_parameter_validation.erl +++ b/deps/rabbit/src/rabbit_parameter_validation.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_parameter_validation). diff --git a/deps/rabbit/src/rabbit_peer_discovery.erl b/deps/rabbit/src/rabbit_peer_discovery.erl index a7cf9b6cf252..a55e9a5e38e3 100644 --- a/deps/rabbit/src/rabbit_peer_discovery.erl +++ b/deps/rabbit/src/rabbit_peer_discovery.erl @@ -2,12 +2,13 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_peer_discovery). -include_lib("kernel/include/logger.hrl"). +-include_lib("stdlib/include/assert.hrl"). -include_lib("rabbit_common/include/logging.hrl"). @@ -15,21 +16,30 @@ %% API %% --export([maybe_init/0, maybe_create_cluster/1, discover_cluster_nodes/0, - backend/0, node_type/0, - normalize/1, format_discovered_nodes/1, log_configured_backend/0, - register/0, unregister/0, maybe_register/0, maybe_unregister/0, - lock/0, unlock/1, discovery_retries/0]). --export([append_node_prefix/1, node_prefix/0, locking_retry_timeout/0, - lock_acquisition_failure_mode/0]). +-export([maybe_init/0, + sync_desired_cluster/0, + maybe_register/0, + maybe_unregister/0, + discover_cluster_nodes/0]). +-export([backend/0, + node_type/0, + normalize/1, + append_node_prefix/1, + node_prefix/0]). +-export([do_query_node_props/1, + group_leader_proxy/2]). -ifdef(TEST). --export([maybe_create_cluster/3]). +-export([query_node_props/1, + sort_nodes_and_props/1, + join_selected_node/3]). -endif. --type create_cluster_callback() :: fun((node(), - rabbit_db_cluster:node_type()) - -> ok). +-type backend() :: atom(). +-type node_and_props() :: {node(), + [node()], + non_neg_integer(), + boolean() | undefined}. -define(DEFAULT_BACKEND, rabbit_peer_discovery_classic_config). @@ -41,43 +51,36 @@ -define(DEFAULT_PREFIX, "rabbit"). %% default discovery retries and interval. --define(DEFAULT_DISCOVERY_RETRY_COUNT, 10). --define(DEFAULT_DISCOVERY_RETRY_INTERVAL_MS, 500). +-define(DEFAULT_DISCOVERY_RETRY_COUNT, 30). +-define(DEFAULT_DISCOVERY_RETRY_INTERVAL_MS, 1000). -define(NODENAME_PART_SEPARATOR, "@"). --spec backend() -> atom(). +-define(PT_PEER_DISC_BACKEND, {?MODULE, backend}). -backend() -> - case application:get_env(rabbit, cluster_formation) of - {ok, Proplist} -> - proplists:get_value(peer_discovery_backend, Proplist, ?DEFAULT_BACKEND); - undefined -> - ?DEFAULT_BACKEND - end. +-compile({no_auto_import, [register/1, unregister/1]}). +-spec backend() -> backend(). +backend() -> + case application:get_env(rabbit, cluster_formation) of + {ok, Proplist} -> + Backend = proplists:get_value( + peer_discovery_backend, Proplist, ?DEFAULT_BACKEND), + ?assert(is_atom(Backend)), + Backend; + undefined -> + ?DEFAULT_BACKEND + end. -spec node_type() -> rabbit_types:node_type(). node_type() -> - case application:get_env(rabbit, cluster_formation) of - {ok, Proplist} -> - proplists:get_value(node_type, Proplist, ?DEFAULT_NODE_TYPE); - undefined -> - ?DEFAULT_NODE_TYPE - end. - --spec locking_retry_timeout() -> {Retries :: integer(), Timeout :: integer()}. - -locking_retry_timeout() -> case application:get_env(rabbit, cluster_formation) of {ok, Proplist} -> - Retries = proplists:get_value(lock_retry_limit, Proplist, 10), - Timeout = proplists:get_value(lock_retry_timeout, Proplist, 30000), - {Retries, Timeout}; + proplists:get_value(node_type, Proplist, ?DEFAULT_NODE_TYPE); undefined -> - {10, 30000} + ?DEFAULT_NODE_TYPE end. -spec lock_acquisition_failure_mode() -> ignore | fail. @@ -90,283 +93,957 @@ lock_acquisition_failure_mode() -> fail end. --spec log_configured_backend() -> ok. - -log_configured_backend() -> - rabbit_log:info("Configured peer discovery backend: ~ts", [backend()]). +-spec maybe_init() -> ok. +%% @doc Initializes the peer discovery subsystem. maybe_init() -> Backend = backend(), + ?LOG_INFO( + "Peer discovery: configured backend: ~tp", + [Backend], + #{domain => ?RMQLOG_DOMAIN_PEER_DISC}), + + %% We cache the configured backend as well. This is used by + %% `sync_desired_cluster/0' and `maybe_unregister/0', to ensure that the + %% same backend is used to create/sync the cluster and (un)register the + %% node, even if the configuration changed in between. + persistent_term:put(?PT_PEER_DISC_BACKEND, Backend), + _ = code:ensure_loaded(Backend), case erlang:function_exported(Backend, init, 0) of true -> - rabbit_log:debug("Peer discovery backend supports initialisation"), + ?LOG_DEBUG( + "Peer discovery: backend supports initialisation", + #{domain => ?RMQLOG_DOMAIN_PEER_DISC}), case Backend:init() of ok -> - rabbit_log:debug("Peer discovery backend initialisation succeeded"), + ?LOG_DEBUG( + "Peer discovery: backend initialisation succeeded", + #{domain => ?RMQLOG_DOMAIN_PEER_DISC}), ok; - {error, Error} -> - rabbit_log:warning("Peer discovery backend initialisation failed: ~tp.", [Error]), + {error, _Reason} = Error -> + ?LOG_WARNING( + "Peer discovery: backend initialisation failed: ~tp.", + [Error], + #{domain => ?RMQLOG_DOMAIN_PEER_DISC}), ok end; false -> - rabbit_log:debug("Peer discovery backend does not support initialisation"), + ?LOG_DEBUG( + "Peer discovery: backend does not support initialisation", + #{domain => ?RMQLOG_DOMAIN_PEER_DISC}), ok end. -maybe_create_cluster(CreateClusterCallback) -> - {Retries, Timeout} = locking_retry_timeout(), - maybe_create_cluster(Retries, Timeout, CreateClusterCallback). - -maybe_create_cluster(0, _, CreateClusterCallback) - when is_function(CreateClusterCallback, 2) -> - case lock_acquisition_failure_mode() of - ignore -> - ?LOG_WARNING( - "Peer discovery: Could not acquire a peer discovery lock, " - "out of retries", [], +-spec sync_desired_cluster() -> ok. +%% @doc Creates or synchronizes the cluster membership of this node based on a +%% peer discovery backend. +%% +%% If the peer discovery backend finds nodes that this node should cluster +%% with, this function calls {@link rabbit_db_cluster:join/2} to join one of +%% these nodes. +%% +%% This function always returns `ok', regardless if this node joined a cluster +%% or it should boot as a standalone node. +%% +%% Currently, it only expands the cluster. It won't take care of kicking +%% members that are not listed by the backend. + +sync_desired_cluster() -> + Backend = persistent_term:get(?PT_PEER_DISC_BACKEND), + + %% We handle retries at the top level: steps are followed sequentially and + %% if one of them fails, we retry the whole process. + {Retries, RetryDelay} = discovery_retries(), + + sync_desired_cluster(Backend, Retries, RetryDelay). + +-spec sync_desired_cluster(Backend, RetriesLeft, RetryDelay) -> ok when + Backend :: backend(), + RetriesLeft :: non_neg_integer(), + RetryDelay :: non_neg_integer(). +%% @private + +sync_desired_cluster(Backend, RetriesLeft, RetryDelay) -> + %% The peer discovery process follows the following steps: + %% 1. It uses the configured backend to query the nodes that should form + %% a cluster. It takes care of checking the validity of the returned + %% values: the list of nodes is made of atoms and the node type is + %% valid. + %% 2. It queries some properties for each node in the list. This is used + %% to filter out unreachable nodes and to sort the final list. The + %% sorting is important because it determines which node it will try + %% to join. + %% 3. It joins the selected node using a regular `join_cluster' if the + %% selected node's DB layer is ready. This step is protected by a + %% lock if the backend supports this mechanism. However, if it is not + %% ready, we retry the whole process. + case discover_cluster_nodes(Backend) of + {ok, {[ThisNode], _NodeType}} when ThisNode =:= node() -> + ?LOG_DEBUG( + "Peer discovery: no nodes to cluster with according to " + "backend; proceeding as a standalone node", #{domain => ?RMQLOG_DOMAIN_PEER_DISC}), - run_peer_discovery(CreateClusterCallback), - maybe_register(); - fail -> - exit(cannot_acquire_startup_lock) - end; -maybe_create_cluster(Retries, Timeout, CreateClusterCallback) - when is_function(CreateClusterCallback, 2) -> - LockResult = lock(), + ok; + {ok, {DiscoveredNodes, NodeType}} -> + NodeAlreadySelected = is_atom(DiscoveredNodes), + NodesAndProps = case NodeAlreadySelected of + true -> + ?LOG_DEBUG( + "Peer discovery: node '~ts' already " + "selected by backend", + [DiscoveredNodes], + #{domain => ?RMQLOG_DOMAIN_PEER_DISC}), + query_node_props([DiscoveredNodes]); + false -> + query_node_props(DiscoveredNodes) + end, + CanUse = ( + NodeAlreadySelected orelse + can_use_discovered_nodes(DiscoveredNodes, NodesAndProps)), + case CanUse of + true -> + case select_node_to_join(NodesAndProps) of + SelectedNode when SelectedNode =/= false -> + Ret = join_selected_node( + Backend, SelectedNode, NodeType), + case Ret of + ok -> + %% TODO: Check if there are multiple + %% "concurrent" clusters in + %% `NodesAndProps' instead of one, or + %% standalone ready nodes that joined no + %% one. + %% + %% TODO: After some delay, re-evaluate + %% peer discovery, in case there are again + %% multiple clusters or standalone ready + %% nodes. + %% + %% TODO: Remove members which are not in + %% the list returned by the backend. + ok; + {error, _Reason} -> + retry_sync_desired_cluster( + Backend, RetriesLeft, RetryDelay) + end; + false -> + retry_sync_desired_cluster( + Backend, RetriesLeft, RetryDelay) + end; + false -> + retry_sync_desired_cluster( + Backend, RetriesLeft, RetryDelay) + end; + {error, _Reason} -> + retry_sync_desired_cluster(Backend, RetriesLeft, RetryDelay) + end. + +-spec retry_sync_desired_cluster(Backend, RetriesLeft, RetryDelay) -> ok when + Backend :: backend(), + RetriesLeft :: non_neg_integer(), + RetryDelay :: non_neg_integer(). +%% @private + +retry_sync_desired_cluster(Backend, RetriesLeft, RetryDelay) + when RetriesLeft > 0 -> + RetriesLeft1 = RetriesLeft - 1, ?LOG_DEBUG( - "Peer discovery: rabbit_peer_discovery:lock/0 returned ~tp", - [LockResult], + "Peer discovery: retrying to create/sync cluster in ~b ms " + "(~b attempts left)", + [RetryDelay, RetriesLeft1], #{domain => ?RMQLOG_DOMAIN_PEER_DISC}), - case LockResult of - not_supported -> - run_peer_discovery(CreateClusterCallback), - maybe_register(); - {ok, Data} -> + timer:sleep(RetryDelay), + sync_desired_cluster(Backend, RetriesLeft1, RetryDelay); +retry_sync_desired_cluster(_Backend, 0, _RetryDelay) -> + ?LOG_ERROR( + "Peer discovery: could not discover and join another node; " + "proceeding as a standalone node", + #{domain => ?RMQLOG_DOMAIN_PEER_DISC}), + ok. + +-spec discover_cluster_nodes() -> {ok, Discovery} when + Discovery :: {DiscoveredNodes | SelectedNode, NodeType}, + DiscoveredNodes :: [node()], + SelectedNode :: node(), + NodeType :: rabbit_types:node_type(). +%% @doc Queries the peer discovery backend to discover nodes. +%% +%% This is used by the CLI. + +discover_cluster_nodes() -> + Backend = persistent_term:get(?PT_PEER_DISC_BACKEND, backend()), + discover_cluster_nodes(Backend). + +-spec discover_cluster_nodes(Backend) -> Ret when + Backend :: backend(), + Ret :: {ok, Discovery} | {error, Reason}, + Discovery :: {DiscoveredNodes | SelectedNode, NodeType}, + DiscoveredNodes :: [node()], + SelectedNode :: node(), + NodeType :: rabbit_types:node_type(), + Reason :: any(). +%% @private + +discover_cluster_nodes(Backend) -> + %% The returned list of nodes and the node type are only sanity-checked by + %% this function. In other words, the list contains atoms and the node + %% type is valid. Nodes availability and inter-node compatibility are + %% taken care of later. + Ret = Backend:list_nodes(), + ?LOG_DEBUG( + "Peer discovery: backend returned the following configuration:~n" + " ~tp", + [Ret], + #{domain => ?RMQLOG_DOMAIN_PEER_DISC}), + case normalize(Ret) of + {ok, {DiscoveredNodes, NodeType} = Discovery} -> + check_discovered_nodes_list_validity(DiscoveredNodes, NodeType), + {ok, Discovery}; + {error, _} = Error -> + ?LOG_ERROR( + "Peer discovery: failed to query the list of nodes from the " + "backend: ~0tp", + [Error], + #{domain => ?RMQLOG_DOMAIN_PEER_DISC}), + Error + end. + +-spec check_discovered_nodes_list_validity(DiscoveredNodes, NodeType) -> + Ret when + DiscoveredNodes :: [node()] | node(), + NodeType :: rabbit_types:node_type(), + Ret :: ok. +%% @private + +check_discovered_nodes_list_validity(DiscoveredNodes, NodeType) + when is_list(DiscoveredNodes) andalso + NodeType =:= disc orelse NodeType =:= disk orelse NodeType =:= ram -> + BadNodenames = lists:filter( + fun(Nodename) -> not is_atom(Nodename) end, + DiscoveredNodes), + case BadNodenames of + [] -> ok; + _ -> e({invalid_cluster_node_names, BadNodenames}) + end; +check_discovered_nodes_list_validity(SelectedNode, NodeType) + when NodeType =:= disc orelse NodeType =:= disk orelse NodeType =:= ram -> + case is_atom(SelectedNode) of + true -> ok; + false -> e({invalid_cluster_node_names, SelectedNode}) + end; +check_discovered_nodes_list_validity(DiscoveredNodes, BadNodeType) + when is_list(DiscoveredNodes) -> + e({invalid_cluster_node_type, BadNodeType}). + +-spec query_node_props(Nodes) -> NodesAndProps when + Nodes :: [node()], + NodesAndProps :: [node_and_props()]. +%% @doc Queries properties for each node in `Nodes' and sorts the list using +%% these properties. +%% +%% The following properties are queried: +%%
      +%%
    • the cluster membership of the node, i.e. the list of nodes it is +%% clustered with, including itself
    • +%%
    • the node's Erlang VM start time
    • +%%
    +%% +%% If a node can't be queried because it is unavailable, it is excluded from +%% the returned list. +%% +%% These properties are then used to sort the list of nodes according to the +%% following criterias: +%%
      +%%
    1. Nodes are sorted by cluster size, from the bigger to the smaller.
    2. +%%
    3. For nodes with the same cluster size, nodes are sorted by start time, +%% from the oldest node to the youngest.
    4. +%%
    5. For nodes with the same cluster size and start time, nodes are sorted by +%% names, alphabetically.
    6. +%% +%% The goal is that every nodes returned by the backend will select the same +%% node to join (the first in the list). The cluster size criteria is here to +%% make sure the node joins the cluster that is being expanded instead of +%% another standalone node. The start time is used because it's a better +%% criteria to sort nodes in a deterministic way than their name in case nodes +%% start in an arbitrary number and the first node in alphabetical order +%% becomes available later. +%% +%% An example of what we want to avoid is e.g. node 4 joining node 1, then node +%% 1 joining node 2. Indeed, now that peer discovery uses {@link +%% rabbit_db_cluster:join/2} instead of its own code path, there is the risk +%% that node 1 kicks node 4 out of the cluster by joining node 2 because +%% `join_cluster' includes a reset. +%% +%% @private + +query_node_props(Nodes) when Nodes =/= [] -> + {Prefix, Suffix} = rabbit_nodes_common:parts(node()), + PeerName = peer:random_name(Prefix), + %% We go through a temporary hidden node to query all other discovered + %% peers properties, instead of querying them directly. + %% + %% The reason is that we don't want that Erlang automatically connect all + %% nodes together as a side effect (to form the full mesh network by + %% default). If we let Erlang do that, we may interfere with the Feature + %% flags controller which is globally registered when it performs an + %% operation. If all nodes become connected, it's possible two or more + %% globally registered controllers end up connected before they are ready + %% to be clustered, and thus in the same "namespace". `global' will kill + %% all but one of them. + %% + %% By using a temporary intermediate hidden node, we ask Erlang not to + %% connect everyone automatically. + Context = rabbit_prelaunch:get_context(), + VMArgs0 = ["-hidden"], + VMArgs1 = case init:get_argument(boot) of + {ok, [[BootFileArg]]} -> + ["-boot", BootFileArg | VMArgs0]; + _ -> + %% Note: start_clean is the default boot file + %% defined in rabbitmq-defaults / CLEAN_BOOT_FILE + ["-boot", "start_clean" | VMArgs0] + end, + VMArgs2 = case Context of + #{erlang_cookie := ErlangCookie, + var_origins := #{erlang_cookie := environment}} -> + ["-setcookie", atom_to_list(ErlangCookie) | VMArgs1]; + _ -> + case init:get_argument(setcookie) of + {ok, [[SetCookieArg]]} -> + ["-setcookie", SetCookieArg | VMArgs1]; + _ -> + VMArgs1 + end + end, + VMArgs3 = maybe_add_proto_dist_arguments(VMArgs2), + VMArgs4 = maybe_add_inetrc_arguments(VMArgs3), + VMArgs5 = maybe_add_tls_arguments(VMArgs4), + PeerStartArg0 = #{name => PeerName, + args => VMArgs5, + connection => standard_io, + wait_boot => infinity}, + PeerStartArg = case Context of + #{nodename_type := longnames} -> + PeerStartArg0#{host => Suffix, + longnames => true}; + _ -> + PeerStartArg0 + end, + ?LOG_DEBUG("Peer discovery: peer node arguments: ~tp", + [PeerStartArg]), + case peer:start(PeerStartArg) of + {ok, Pid, Peer} -> + ?LOG_DEBUG( + "Peer discovery: using temporary hidden node '~ts' to query " + "discovered peers properties", + [Peer], + #{domain => ?RMQLOG_DOMAIN_PEER_DISC}), try - run_peer_discovery(CreateClusterCallback), - maybe_register() + peer:call(Pid, ?MODULE, do_query_node_props, [Nodes], 180000) after - unlock(Data) + peer:stop(Pid) end; - {error, _Reason} -> - timer:sleep(Timeout), - maybe_create_cluster( - Retries - 1, Timeout, CreateClusterCallback) + {error, _} = Error -> + ?LOG_ERROR( + "Peer discovery: failed to start temporary hidden node to " + "query discovered peers' properties", + #{domain => ?RMQLOG_DOMAIN_PEER_DISC}), + throw(Error) + end; +query_node_props([]) -> + []. + +maybe_add_proto_dist_arguments(VMArgs) -> + case init:get_argument(proto_dist) of + {ok, [[Val]]} -> + %% See net_kernel.erl / protocol_childspecs/1. + Mod = list_to_existing_atom(Val ++ "_dist"), + ModDir = filename:dirname(code:which(Mod)), + ["-proto_dist", Val, "-pa", ModDir | VMArgs]; + _ -> + VMArgs end. --spec run_peer_discovery(CreateClusterCallback) -> Ret when - CreateClusterCallback :: create_cluster_callback(), - Ret :: ok | {Nodes, NodeType}, - Nodes :: [node()], - NodeType :: rabbit_db_cluster:node_type(). +maybe_add_inetrc_arguments(VMArgs) -> + %% If an inetrc file is configured, we need to use it for the temporary + %% hidden node too. + case application:get_env(kernel, inetrc) of + {ok, Val} -> + maybe_add_inetrc_arguments1(VMArgs, Val); + undefined -> + case os:getenv("ERL_INETRC") of + Val when is_list(Val) -> + maybe_add_inetrc_arguments1(VMArgs, Val); + false -> + VMArgs + end + end. -run_peer_discovery(CreateClusterCallback) -> - {RetriesLeft, DelayInterval} = discovery_retries(), - run_peer_discovery_with_retries( - RetriesLeft, DelayInterval, CreateClusterCallback). +maybe_add_inetrc_arguments1(VMArgs, Val) -> + %% The filename argument must be passed as a quoted string so that the + %% command line is correctly parsed as an Erlang string by the temporary + %% hidden node. + ValString = rabbit_misc:format("~0p", [Val]), + ["-kernel", "inetrc", ValString | VMArgs]. --spec run_peer_discovery_with_retries( - Retries, DelayInterval, CreateClusterCallback) -> ok when - CreateClusterCallback :: create_cluster_callback(), - Retries :: non_neg_integer(), - DelayInterval :: non_neg_integer(). +maybe_add_tls_arguments(VMArgs) -> + %% In the next case, RabbitMQ has been configured with additional Erlang VM + %% arguments such as this: + %% + %% SERVER_ADDITIONAL_ERL_ARGS="-pa $ERL_SSL_PATH -proto_dist inet_tls + %% -ssl_dist_opt server_cacertfile /etc/rabbitmq/ca_certificate.pem + %% -ssl_dist_opt server_certfile /etc/rabbitmq/server_certificate.pem + %% -ssl_dist_opt server_keyfile /etc/rabbitmq/server_key.pem + %% -ssl_dist_opt server_verify verify_peer + %% -ssl_dist_opt server_fail_if_no_peer_cert true + %% -ssl_dist_opt client_cacertfile /etc/rabbitmq/ca_certificate.pem + %% -ssl_dist_opt client_certfile /etc/rabbitmq/client_certificate.pem + %% -ssl_dist_opt client_keyfile /etc/rabbitmq/client_key.pem + %% -ssl_dist_opt client_verify verify_peer" + %% + %% `init:get_argument(ssl_dist_opt)' returns the following data structure: + %% + %% (rabbit@rmq0.local)1> init:get_argument(ssl_dist_opt). + %% {ok,[["server_cacertfile", "/etc/rabbitmq/ca_certificate.pem"], + %% ["server_certfile", "/etc/rabbitmq/server_certificate.pem"], + %% ["server_keyfile","/etc/rabbitmq/server_key.pem"], + %% ["server_verify","verify_peer"], + %% ["server_fail_if_no_peer_cert","true"], + %% ["client_cacertfile","/etc/rabbitmq/ca_certificate.pem"], + %% ["client_certfile", "/etc/rabbitmq/client_certificate.pem"], + %% ["client_keyfile","/etc/rabbitmq/client_key.pem"], + %% ["client_verify","verify_peer"]]} + %% + %% Which is then translated into arguments to `peer:start/1': + %% #{args => + %% ["-ssl_dist_opt","server_cacertfile", + %% "/etc/rabbitmq/ca_certificate.pem", + %% "-ssl_dist_opt","server_certfile", + %% "/etc/rabbitmq/server_rmq2.local_certificate.pem", + %% "-ssl_dist_opt","server_keyfile", + %% "/etc/rabbitmq/server_rmq2.local_key.pem", + %% "-ssl_dist_opt","server_verify", + %% "verify_peer","-ssl_dist_opt", + %% "server_fail_if_no_peer_cert", + %% "true","-ssl_dist_opt", + %% "client_cacertfile", + %% "/etc/rabbitmq/ca_certificate.pem", + %% "-ssl_dist_opt","client_certfile", + %% "/etc/rabbitmq/client_rmq2.local_certificate.pem", + %% "-ssl_dist_opt","client_keyfile", + %% "/etc/rabbitmq/client_rmq2.local_key.pem", + %% "-ssl_dist_opt","client_verify", + %% "verify_peer","-pa", + %% "/usr/local/lib/erlang/lib/ssl-11.0.3/ebin", + %% "-proto_dist","inet_tls","-boot", + %% "no_dot_erlang","-hidden"], + VMArgs1 = case init:get_argument(ssl_dist_opt) of + {ok, SslDistOpts0} -> + SslDistOpts1 = [["-ssl_dist_opt" | SslDistOpt] + || SslDistOpt <- SslDistOpts0], + SslDistOpts2 = lists:concat(SslDistOpts1), + SslDistOpts2 ++ VMArgs; + _ -> + VMArgs + end, + %% In the next case, RabbitMQ has been configured with additional Erlang VM + %% arguments such as this: + %% + %% SERVER_ADDITIONAL_ERL_ARGS="-pa $ERL_SSL_PATH -proto_dist inet_tls + %% -ssl_dist_optfile /etc/rabbitmq/inter_node_tls.config" + %% + %% This code adds the `ssl_dist_optfile' argument to the peer node's + %% argument list. + VMArgs2 = case init:get_argument(ssl_dist_optfile) of + {ok, [[SslDistOptfileArg]]} -> + ["-ssl_dist_optfile", SslDistOptfileArg | VMArgs1]; + _ -> + VMArgs1 + end, + VMArgs2. + +do_query_node_props(Nodes) when Nodes =/= [] -> + %% Make sure all log messages are forwarded from this temporary hidden + %% node to the upstream node, regardless of their level. + _ = logger:set_primary_config(level, debug), + + %% The group leader for all processes on this temporary hidden node is the + %% calling process' group leader on the upstream node. + %% + %% When we use `erpc:call/4' (or the multicall equivalent) to execute code + %% on one of the `Nodes', the remotely executed code will also use the + %% calling process' group leader by default. + %% + %% We use this temporary hidden node to ensure the downstream node will + %% not connected to the upstream node. Therefore, we must change the group + %% leader as well, otherwise any I/O from the downstream node will send a + %% message to the upstream node's group leader and thus open a connection. + %% This would defeat the entire purpose of this temporary hidden node. + %% + %% To avoid this, we start a proxy process which we will use as a group + %% leader. This process will send all messages it receives to the group + %% leader on the upstream node. + %% + %% There is one caveat: the logger (local to the temporary hidden node) + %% forwards log messages to the upstream logger (on the upstream node) + %% only if the group leader of that message is a remote PID. Because we + %% set a local PID, it stops forwarding log messages originating from that + %% temporary hidden node. That's why we use `with_group_leader_proxy/2' to + %% set the group leader to our proxy only around the use of `erpc'. + %% + %% That's a lot just to keep logging working while not reveal the upstream + %% node to the downstream node... + Parent = self(), + UpstreamGroupLeader = erlang:group_leader(), + ProxyGroupLeader = spawn_link( + ?MODULE, group_leader_proxy, + [Parent, UpstreamGroupLeader]), + + %% TODO: Replace with `rabbit_nodes:list_members/0' when the oldest + %% supported version has it. + MembersPerNode = with_group_leader_proxy( + ProxyGroupLeader, + fun() -> + erpc:multicall(Nodes, rabbit_nodes, all, []) + end), + query_node_props1(Nodes, MembersPerNode, [], ProxyGroupLeader). + +with_group_leader_proxy(ProxyGroupLeader, Fun) -> + UpstreamGroupLeader = erlang:group_leader(), + try + true = erlang:group_leader(ProxyGroupLeader, self()), + Fun() + after + true = erlang:group_leader(UpstreamGroupLeader, self()) + end. -run_peer_discovery_with_retries( - 0, _DelayInterval, _CreateClusterCallback) -> - ok; -run_peer_discovery_with_retries( - RetriesLeft, DelayInterval, CreateClusterCallback) -> - FindBadNodeNames = fun - (Name, BadNames) when is_atom(Name) -> BadNames; - (Name, BadNames) -> [Name | BadNames] - end, - {DiscoveredNodes0, NodeType} = - case discover_cluster_nodes() of - {error, Reason} -> - RetriesLeft1 = RetriesLeft - 1, - ?LOG_ERROR( - "Peer discovery: Failed to discover nodes: ~tp. " - "Will retry after a delay of ~b ms, ~b retries left...", - [Reason, DelayInterval, RetriesLeft1], - #{domain => ?RMQLOG_DOMAIN_PEER_DISC}), - timer:sleep(DelayInterval), - run_peer_discovery_with_retries( - RetriesLeft1, DelayInterval, CreateClusterCallback); - {ok, {Nodes, Type} = Config} - when is_list(Nodes) andalso - (Type == disc orelse Type == disk orelse Type == ram) -> - case lists:foldr(FindBadNodeNames, [], Nodes) of - [] -> Config; - BadNames -> e({invalid_cluster_node_names, BadNames}) - end; - {ok, {_, BadType}} when BadType /= disc andalso BadType /= ram -> - e({invalid_cluster_node_type, BadType}); - {ok, _} -> - e(invalid_cluster_nodes_conf) - end, - DiscoveredNodes = lists:usort(DiscoveredNodes0), - ?LOG_INFO( - "Peer discovery: All discovered existing cluster peers: ~ts", - [format_discovered_nodes(DiscoveredNodes)], +group_leader_proxy(Parent, UpstreamGroupLeader) -> + receive + stop_proxy -> + erlang:unlink(Parent), + Parent ! proxy_stopped; + Message -> + UpstreamGroupLeader ! Message, + group_leader_proxy(Parent, UpstreamGroupLeader) + end. + +query_node_props1( + [Node | Nodes], [{ok, Members} | MembersPerNode], NodesAndProps, + ProxyGroupLeader) -> + NodeAndProps = {Node, Members}, + NodesAndProps1 = [NodeAndProps | NodesAndProps], + query_node_props1(Nodes, MembersPerNode, NodesAndProps1, ProxyGroupLeader); +query_node_props1( + [Node | Nodes], [{error, _} = Error | MembersPerNode], NodesAndProps, + ProxyGroupLeader) -> + %% We consider that an error means the remote node is unreachable or not + %% ready. Therefore, we exclude it from the list of discovered nodes as we + %% won't be able to join it anyway. + ?LOG_DEBUG( + "Peer discovery: failed to query cluster members of node '~ts': ~0tp~n" + "Peer discovery: node '~ts' excluded from the discovered nodes", + [Node, Error, Node], #{domain => ?RMQLOG_DOMAIN_PEER_DISC}), - Peers = rabbit_nodes:nodes_excl_me(DiscoveredNodes), - case Peers of - [] -> - ?LOG_INFO( - "Peer discovery: Discovered no peer nodes to cluster with. " - "Some discovery backends can filter nodes out based on a " - "readiness criteria. " - "Enabling debug logging might help troubleshoot.", - #{domain => ?RMQLOG_DOMAIN_PEER_DISC}), - CreateClusterCallback(none, disc); - _ -> - ?LOG_INFO( - "Peer discovery: Peer nodes we can cluster with: ~ts", - [format_discovered_nodes(Peers)], + query_node_props1(Nodes, MembersPerNode, NodesAndProps, ProxyGroupLeader); +query_node_props1([], [], NodesAndProps, ProxyGroupLeader) -> + NodesAndProps1 = lists:reverse(NodesAndProps), + query_node_props2(NodesAndProps1, [], ProxyGroupLeader). + +query_node_props2([{Node, Members} | Rest], NodesAndProps, ProxyGroupLeader) -> + try + erpc:call( + Node, logger, debug, + ["Peer discovery: temporary hidden node '~ts' queries properties " + "from node '~ts'", [node(), Node]]), + StartTime = get_node_start_time(Node, microsecond, ProxyGroupLeader), + IsReady = is_node_db_ready(Node, ProxyGroupLeader), + NodeAndProps = {Node, Members, StartTime, IsReady}, + NodesAndProps1 = [NodeAndProps | NodesAndProps], + query_node_props2(Rest, NodesAndProps1, ProxyGroupLeader) + catch + _:Error:_ -> + %% If one of the erpc calls we use to get the start time fails, + %% there is something wrong with the remote node because it + %% doesn't depend on RabbitMQ. We exclude it from the discovered + %% nodes. + ?LOG_DEBUG( + "Peer discovery: failed to query start time of node '~ts': " + "~0tp~n" + "Peer discovery: node '~ts' excluded from the discovered nodes", + [Node, Error, Node], #{domain => ?RMQLOG_DOMAIN_PEER_DISC}), - join_discovered_peers(Peers, NodeType, CreateClusterCallback) + query_node_props2(Rest, NodesAndProps, ProxyGroupLeader) + end; +query_node_props2([], NodesAndProps, ProxyGroupLeader) -> + NodesAndProps1 = lists:reverse(NodesAndProps), + NodesAndProps2 = sort_nodes_and_props(NodesAndProps1), + %% Wait for the proxy group leader to flush its inbox. + ProxyGroupLeader ! stop_proxy, + receive + proxy_stopped -> + ok + after 120_000 -> + ok + end, + ?assertEqual([], nodes()), + ?assert(length(NodesAndProps2) =< length(nodes(hidden))), + NodesAndProps2. + +-spec get_node_start_time(Node, Unit, ProxyGroupLeader) -> StartTime when + Node :: node(), + Unit :: erlang:time_unit(), + ProxyGroupLeader :: pid(), + StartTime :: non_neg_integer(). +%% @doc Returns the start time of the given `Node' in `Unit'. +%% +%% The start time is an arbitrary point in time (in the past or the future), +%% expressed native time unit. It is a monotonic time that is specific to that +%% node. It can't be compared as is with other nodes' start time. To convert it +%% to a system time so that we can compare it, we must add the node's time +%% offset. +%% +%% Both the start time and the time offset are expressed in native time unit. +%% Again, this can't be compared to other nodes' native time unit values. We +%% must convert it to a common time unit first. +%% +%% See the documentation of {@link erlang:time_offset/0} at +%% https://www.erlang.org/doc/man/erlang#time_offset-0 to get the full +%% explanation of the computation. +%% +%% @private + +get_node_start_time(Node, Unit, ProxyGroupLeader) -> + with_group_leader_proxy( + ProxyGroupLeader, + fun() -> + NativeStartTime = erpc:call( + Node, erlang, system_info, [start_time]), + TimeOffset = erpc:call(Node, erlang, time_offset, []), + SystemStartTime = NativeStartTime + TimeOffset, + StartTime = erpc:call( + Node, erlang, convert_time_unit, + [SystemStartTime, native, Unit]), + StartTime + end). + +-spec is_node_db_ready(Node, ProxyGroupLeader) -> IsReady when + Node :: node(), + ProxyGroupLeader :: pid(), + IsReady :: boolean() | undefined. +%% @doc Returns if the node's DB layer is ready or not. +%% +%% @private + +is_node_db_ready(Node, ProxyGroupLeader) -> + %% This code is running from a temporary hidden node. We derive the real + %% node interested in the properties from the group leader. + UpstreamGroupLeader = erlang:group_leader(), + ThisNode = node(UpstreamGroupLeader), + case Node of + ThisNode -> + %% The current node is running peer discovery, thus way before we + %% mark the DB layer as ready. Consider it ready in this case, + %% otherwise if the current node is selected, it will loop forever + %% waiting for itself to be ready. + true; + _ -> + with_group_leader_proxy( + ProxyGroupLeader, + fun() -> + try + erpc:call(Node, rabbit_db, is_init_finished, []) + catch + _:{exception, undef, + [{rabbit_db, is_init_finished, _, _} | _]} -> + undefined + end + end) end. --spec e(any()) -> no_return(). +-spec sort_nodes_and_props(NodesAndProps) -> + SortedNodesAndProps when + NodesAndProps :: [node_and_props()], + SortedNodesAndProps :: [node_and_props()]. +%% @doc Sorts the list of nodes according to their properties. +%% +%% See {@link query_node_props/1} for an explanation of the criterias used to +%% sort the list. +%% +%% @see query_node_props/1. +%% +%% @private + +sort_nodes_and_props(NodesAndProps) -> + NodesAndProps1 = lists:sort( + fun( + {NodeA, MembersA, StartTimeA, _IsReadyA}, + {NodeB, MembersB, StartTimeB, _IsReadyB}) -> + length(MembersA) > length(MembersB) orelse + (length(MembersA) =:= length(MembersB) andalso + StartTimeA < StartTimeB) orelse + (length(MembersA) =:= length(MembersB) andalso + StartTimeA =:= StartTimeB andalso + NodeA =< NodeB) + end, NodesAndProps), + ?LOG_DEBUG( + lists:flatten( + ["Peer discovery: sorted list of nodes and their properties " + "considered to create/sync the cluster:"] ++ + ["~n - ~0tp" || _ <- NodesAndProps1]), + NodesAndProps1, + #{domain => ?RMQLOG_DOMAIN_PEER_DISC}), + NodesAndProps1. + +-spec can_use_discovered_nodes(DiscoveredNodes, NodesAndProps) -> CanUse when + DiscoveredNodes :: [node()], + NodesAndProps :: [node_and_props()], + CanUse :: boolean(). +%% @doc Indicates if the list of discovered nodes is good enough to proceed +%% with peer discovery. +%% +%% It is possible that we queried the backend early enough that it doesn't yet +%% know about the nodes that should form a cluster. To reduce the chance of a +%% list of nodes which makes little sense, we checks two criterias: +%%
        +%%
      • We want that this node is part of the list.
      • +%%
      • If we have a cluster size hint and the expected size is greater than 1, +%% we want the list to have at least two nodes. The cluster size hint is +%% computed from the configured target cluster size hint and the length of the +%% nodes list returned by the backend. This function picks the maximum of the +%% two. This is useful for backends such as the classic config one where the +%% returned list is static (i.e. it can be used as the cluster size hint).
      • +%%
      +%% +%% @private -e(Tag) -> throw({error, {Tag, error_description(Tag)}}). +can_use_discovered_nodes(DiscoveredNodes, NodesAndProps) + when NodesAndProps =/= [] -> + Nodes = [Node || {Node, _Members, _StartTime, _IsReady} <- NodesAndProps], -error_description({invalid_cluster_node_names, BadNames}) -> - "In the 'cluster_nodes' configuration key, the following node names " - "are invalid: " ++ lists:flatten(io_lib:format("~tp", [BadNames])); -error_description({invalid_cluster_node_type, BadType}) -> - "In the 'cluster_nodes' configuration key, the node type is invalid " - "(expected 'disc' or 'ram'): " ++ - lists:flatten(io_lib:format("~tp", [BadType])); -error_description(invalid_cluster_nodes_conf) -> - "The 'cluster_nodes' configuration key is invalid, it must be of the " - "form {[Nodes], Type}, where Nodes is a list of node names and " - "Type is either 'disc' or 'ram'". - -%% Attempts to join discovered, reachable and compatible (in terms of Mnesia -%% internal protocol version and such) cluster peers in order. -join_discovered_peers(TryNodes, NodeType, CreateClusterCallback) -> - {RetriesLeft, DelayInterval} = discovery_retries(), - join_discovered_peers_with_retries( - TryNodes, NodeType, RetriesLeft, DelayInterval, CreateClusterCallback). - -join_discovered_peers_with_retries( - TryNodes, _NodeType, 0, _DelayInterval, CreateClusterCallback) -> + ThisNode = node(), + ThisNodeIsIncluded = lists:member(ThisNode, Nodes), + case ThisNodeIsIncluded of + true -> + ok; + false -> + ?LOG_DEBUG( + "Peer discovery: not satisfyied with discovered peers: the " + "list does not contain this node", + #{domain => ?RMQLOG_DOMAIN_PEER_DISC}) + end, + + %% We consider that the list of nodes returned by the backend can be a + %% cluster size hint too. That's why we pick the maximum between the + %% configured one and the list length. + ClusterSizeHint = erlang:max( + rabbit_nodes:target_cluster_size_hint(), + length(DiscoveredNodes)), + HasEnoughNodes = ClusterSizeHint =< 1 orelse length(Nodes) >= 2, + case HasEnoughNodes of + true -> + ok; + false -> + ?LOG_DEBUG( + "Peer discovery: not satisfyied with discovered peers: the " + "list should contain at least two nodes with a configured " + "cluster size hint of ~b nodes", + [ClusterSizeHint], + #{domain => ?RMQLOG_DOMAIN_PEER_DISC}) + end, + + ThisNodeIsIncluded andalso HasEnoughNodes; +can_use_discovered_nodes(_DiscoveredNodes, []) -> + ?LOG_INFO( + "Peer discovery: discovered no peer nodes to cluster with. " + "Some discovery backends can filter nodes out based on a " + "readiness criteria. " + "Enabling debug logging might help troubleshoot.", + #{domain => ?RMQLOG_DOMAIN_PEER_DISC}), + false. + +-spec select_node_to_join(NodesAndProps) -> SelectedNode when + NodesAndProps :: nonempty_list(node_and_props()), + SelectedNode :: node() | false. +%% @doc Selects the node to join among the sorted list of nodes. +%% +%% The selection is simple: we take the first entry. It corresponds to the +%% oldest node we could reach, clustered with the greatest number of nodes. +%% +%% However if the node's DB layer is not ready, we return `false'. This will +%% tell the calling function to retry the whole process. +%% +%% @private + +select_node_to_join([{Node, _Members, _StartTime, _IsReady} | _]) + when Node =:= node() -> + ?LOG_INFO( + "Peer discovery: node '~ts' selected for auto-clustering", + [Node], + #{domain => ?RMQLOG_DOMAIN_PEER_DISC}), + Node; +select_node_to_join([{Node, _Members, _StartTime, IsReady} | _]) + when IsReady =/= false -> + ?LOG_INFO( + "Peer discovery: node '~ts' selected for auto-clustering", + [Node], + #{domain => ?RMQLOG_DOMAIN_PEER_DISC}), + Node; +select_node_to_join([{Node, _Members, _StartTime, false} | _]) -> ?LOG_INFO( - "Peer discovery: Could not successfully contact any node of: ~ts " - "(as in Erlang distribution). " - "Starting as a blank standalone node...", - [string:join(lists:map(fun atom_to_list/1, TryNodes), ",")], + "Peer discovery: node '~ts' selected for auto-clustering but its " + "DB layer is not ready; waiting before retrying...", + [Node], + #{domain => ?RMQLOG_DOMAIN_PEER_DISC}), + false. + +-spec join_selected_node(Backend, Node, NodeType) -> Ret when + Backend :: backend(), + Node :: node(), + NodeType :: rabbit_types:node_type(), + Ret :: ok | {error, Reason}, + Reason :: any(). +%% @doc Joins the selected node. +%% +%% This function relies on {@link rabbit_db_cluster:join/2}. It acquires a lock +%% before proceeding with the join if the backend provides such a mechanism. +%% +%% If the selected node is this node, this is a no-op and no lock is acquired. +%% +%% @private + +join_selected_node(_Backend, ThisNode, _NodeType) when ThisNode =:= node() -> + ?LOG_DEBUG( + "Peer discovery: the selected node is this node; proceed with boot", #{domain => ?RMQLOG_DOMAIN_PEER_DISC}), - init_single_node(CreateClusterCallback); -join_discovered_peers_with_retries( - TryNodes, NodeType, RetriesLeft, DelayInterval, CreateClusterCallback) -> - case find_reachable_peer_to_cluster_with(TryNodes) of - {ok, Node} -> - ?LOG_INFO( - "Peer discovery: Node '~ts' selected for auto-clustering", - [Node], + ok; +join_selected_node(Backend, SelectedNode, NodeType) -> + ?LOG_DEBUG( + "Peer discovery: trying to acquire lock", + #{domain => ?RMQLOG_DOMAIN_PEER_DISC}), + LockResult = lock(Backend, SelectedNode), + ?LOG_DEBUG( + "Peer discovery: rabbit_peer_discovery:lock/0 returned ~0tp", + [LockResult], + #{domain => ?RMQLOG_DOMAIN_PEER_DISC}), + case LockResult of + not_supported -> + ?LOG_DEBUG( + "Peer discovery: no lock acquired", + #{domain => ?RMQLOG_DOMAIN_PEER_DISC}), + join_selected_node_locked(SelectedNode, NodeType); + {ok, Data} -> + ?LOG_DEBUG( + "Peer discovery: lock acquired", #{domain => ?RMQLOG_DOMAIN_PEER_DISC}), - create_cluster(Node, NodeType, CreateClusterCallback); - none -> - RetriesLeft1 = RetriesLeft - 1, - ?LOG_INFO( - "Peer discovery: Trying to join discovered peers failed. " - "Will retry after a delay of ~b ms, ~b retries left...", - [DelayInterval, RetriesLeft1], + try + join_selected_node_locked(SelectedNode, NodeType) + after + ?LOG_DEBUG( + "Peer discovery: lock released", + #{domain => ?RMQLOG_DOMAIN_PEER_DISC}), + unlock(Backend, Data) + end; + {error, _Reason} = Error -> + ?LOG_WARNING( + "Peer discovery: failed to acquire a lock: ~0tp", + [Error], #{domain => ?RMQLOG_DOMAIN_PEER_DISC}), - timer:sleep(DelayInterval), - join_discovered_peers_with_retries( - TryNodes, NodeType, RetriesLeft1, DelayInterval, - CreateClusterCallback) + case lock_acquisition_failure_mode() of + ignore -> join_selected_node_locked(SelectedNode, NodeType); + fail -> Error + end end. -find_reachable_peer_to_cluster_with([]) -> - none; -find_reachable_peer_to_cluster_with([Node | Nodes]) when Node =/= node() -> - case rabbit_db_cluster:check_compatibility(Node) of - ok -> - {ok, Node}; - Error -> +-spec join_selected_node_locked(Node, NodeType) -> Ret when + Node :: node(), + NodeType :: rabbit_types:node_type(), + Ret :: ok | {error, Reason}, + Reason :: any(). + +join_selected_node_locked(Node, NodeType) -> + %% We used to synchronize feature flags here before we updated the cluster + %% membership. We don't do it anymore because the `join_cluster' code + %% resets the joining node and copies the feature flags states from the + %% cluster. + try + Ret = rabbit_db_cluster:join(Node, NodeType), + ?assertNotEqual({ok, already_member}, Ret), + case Ret of + ok -> + ?LOG_INFO( + "Peer discovery: this node (~ts) successfully joined " + "node '~ts' cluster", + [node(), Node], + #{domain => ?RMQLOG_DOMAIN_PEER_DISC}); + Error1 -> + ?LOG_WARNING( + "Peer discovery: could not auto-cluster with node '~ts': " + "~0tp", + [Node, Error1], + #{domain => ?RMQLOG_DOMAIN_PEER_DISC}) + end, + Ret + catch + throw:Error2 -> ?LOG_WARNING( - "Peer discovery: Could not auto-cluster with node ~ts: ~0p", - [Node, Error], + "Peer discovery: could not auto-cluster with node '~ts': ~0tp", + [Node, Error2], #{domain => ?RMQLOG_DOMAIN_PEER_DISC}), - find_reachable_peer_to_cluster_with(Nodes) - end; -find_reachable_peer_to_cluster_with([Node | Nodes]) when Node =:= node() -> - find_reachable_peer_to_cluster_with(Nodes). - -init_single_node(CreateClusterCallback) -> - IsVirgin = rabbit_db:is_virgin_node(), - rabbit_db_cluster:ensure_feature_flags_are_in_sync([], IsVirgin), - CreateClusterCallback(none, disc), - ok. - -create_cluster(RemoteNode, NodeType, CreateClusterCallback) -> - %% We want to synchronize feature flags first before we update the cluster - %% membership. This is needed to ensure the local list of Mnesia tables - %% matches the rest of the cluster for example, in case a feature flag - %% adds or removes tables. - %% - %% For instance, a feature flag may remove a table (so it's gone from the - %% cluster). If we were to wait for that table locally before - %% synchronizing feature flags, we would wait forever; indeed the feature - %% flag being disabled before sync, `rabbit_table:definitions()' would - %% return the old table. - %% - %% Feature flags need to be synced before any change to Mnesia membership. - %% If enabling feature flags fails, Mnesia could remain in an inconsistent - %% state that prevents later joining the nodes. - IsVirgin = rabbit_db:is_virgin_node(), - rabbit_db_cluster:ensure_feature_flags_are_in_sync([RemoteNode], IsVirgin), - CreateClusterCallback(RemoteNode, NodeType), - rabbit_node_monitor:notify_joined_cluster(), - ok. - -%% This module doesn't currently sanity-check the return value of -%% `Backend:list_nodes()`. Therefore, it could return something invalid: -%% thus the `{œk, any()} in the spec. -%% -%% `rabbit_mnesia:init_from_config()` does some verifications. + Error2 + end. --spec discover_cluster_nodes() -> - {ok, {Nodes :: [node()], NodeType :: rabbit_types:node_type()} | any()} | - {error, Reason :: string()}. +-spec e(any()) -> no_return(). -discover_cluster_nodes() -> - Backend = backend(), - normalize(Backend:list_nodes()). +e(Tag) -> throw({error, {Tag, error_description(Tag)}}). +error_description({invalid_cluster_node_names, BadNames}) -> + "In the 'cluster_nodes' configuration key, the following node names " + "are invalid: " ++ lists:flatten(io_lib:format("~tp", [BadNames])); +error_description({invalid_cluster_node_type, BadType}) -> + "In the 'cluster_nodes' configuration key, the node type is invalid " + "(expected 'disc' or 'ram'): " ++ + lists:flatten(io_lib:format("~tp", [BadType])). -spec maybe_register() -> ok. maybe_register() -> - Backend = backend(), - case Backend:supports_registration() of - true -> - register(), - Backend:post_registration(); - false -> - rabbit_log:info("Peer discovery backend ~ts does not support registration, skipping registration.", [Backend]), - ok - end. - + Backend = persistent_term:get(?PT_PEER_DISC_BACKEND, backend()), + case Backend:supports_registration() of + true -> + ?LOG_DEBUG( + "Peer discovery: registering this node", + #{domain => ?RMQLOG_DOMAIN_PEER_DISC}), + register(Backend), + _ = Backend:post_registration(), + ok; + false -> + ?LOG_DEBUG( + "Peer discovery: registration unsupported, skipping register", + #{domain => ?RMQLOG_DOMAIN_PEER_DISC}), + ok + end. -spec maybe_unregister() -> ok. maybe_unregister() -> - Backend = backend(), - case Backend:supports_registration() of - true -> - unregister(); - false -> - rabbit_log:info("Peer discovery backend ~ts does not support registration, skipping unregistration.", [Backend]), - ok - end. + Backend = persistent_term:get(?PT_PEER_DISC_BACKEND), + case Backend:supports_registration() of + true -> + ?LOG_DEBUG( + "Peer discovery: unregistering this node", + #{domain => ?RMQLOG_DOMAIN_PEER_DISC}), + unregister(Backend); + false -> + ?LOG_DEBUG( + "Peer discovery: registration unsupported, skipping unregister", + #{domain => ?RMQLOG_DOMAIN_PEER_DISC}), + ok + end. --spec discovery_retries() -> {Retries :: integer(), Interval :: integer()}. +-spec discovery_retries() -> {Retries, RetryDelay} when + Retries :: non_neg_integer(), + RetryDelay :: non_neg_integer(). discovery_retries() -> case application:get_env(rabbit, cluster_formation) of @@ -378,57 +1055,105 @@ discovery_retries() -> {?DEFAULT_DISCOVERY_RETRY_COUNT, ?DEFAULT_DISCOVERY_RETRY_INTERVAL_MS} end. --spec register() -> ok. +-spec register(Backend) -> ok when + Backend :: backend(). -register() -> - Backend = backend(), - rabbit_log:info("Will register with peer discovery backend ~ts", [Backend]), - case Backend:register() of - ok -> ok; - {error, Error} -> - rabbit_log:error("Failed to register with peer discovery backend ~ts: ~tp", - [Backend, Error]), - ok - end. +register(Backend) -> + ?LOG_INFO( + "Peer discovery: will register with peer discovery backend ~ts", + [Backend], + #{domain => ?RMQLOG_DOMAIN_PEER_DISC}), + case Backend:register() of + ok -> + ok; + {error, _Reason} = Error -> + ?LOG_ERROR( + "Peer discovery: failed to register with peer discovery " + "backend ~ts: ~tp", + [Backend, Error], + #{domain => ?RMQLOG_DOMAIN_PEER_DISC}), + ok + end. --spec unregister() -> ok. +-spec unregister(Backend) -> ok when + Backend :: backend(). -unregister() -> - Backend = backend(), - rabbit_log:info("Will unregister with peer discovery backend ~ts", [Backend]), +unregister(Backend) -> + ?LOG_INFO( + "Peer discovery: will unregister with peer discovery backend ~ts", + [Backend], + #{domain => ?RMQLOG_DOMAIN_PEER_DISC}), case Backend:unregister() of - ok -> ok; - {error, Error} -> - rabbit_log:error("Failed to unregister with peer discovery backend ~ts: ~tp", - [Backend, Error]), + ok -> + ok; + {error, _Reason} = Error -> + ?LOG_ERROR( + "Peer discovery: failed to unregister with peer discovery " + "backend ~ts: ~tp", + [Backend, Error], + #{domain => ?RMQLOG_DOMAIN_PEER_DISC}), ok end. --spec lock() -> {ok, Data :: term()} | not_supported | {error, Reason :: string()}. +-spec lock(Backend, SelectedNode) -> Ret when + Backend :: backend(), + SelectedNode :: node(), + Ret :: {ok, Data} | not_supported | {error, Reason}, + Data :: any(), + Reason :: string(). -lock() -> - Backend = backend(), - rabbit_log:info("Will try to lock with peer discovery backend ~ts", [Backend]), - case Backend:lock(node()) of +lock(Backend, SelectedNode) -> + ?LOG_INFO( + "Peer discovery: will try to lock with peer discovery backend ~ts", + [Backend], + #{domain => ?RMQLOG_DOMAIN_PEER_DISC}), + %% We want to acquire a lock for two nodes: this one and the selected + %% node. This protects against concurrent cluster joins. + %% + %% Some backends used to use the entire list of discovered nodes and used + %% `global' as the lock implementation. This was a problem because a side + %% effect was that all discovered Erlang nodes were connected to each + %% other. This led to conflicts in the global process name registry and + %% thus processes killed randomly. This was the case with the feature + %% flags controller for instance. + %% + %% Peer discovery shouldn't connect to all discovered nodes before it is + %% ready to actually join another node. And it should only connect to that + %% specific node, not all of them. + ThisNode = node(), + NodesToLock = [ThisNode, SelectedNode], + case Backend:lock(NodesToLock) of {error, Reason} = Error -> - rabbit_log:error("Failed to lock with peer discovery backend ~ts: ~tp", - [Backend, Reason]), + ?LOG_ERROR( + "Peer discovery: failed to lock with peer discovery " + "backend ~ts: ~0tp", + [Backend, Reason], + #{domain => ?RMQLOG_DOMAIN_PEER_DISC}), Error; Any -> Any end. --spec unlock(Data :: term()) -> ok | {error, Reason :: string()}. +-spec unlock(Backend, Data) -> Ret when + Backend :: backend(), + Data :: any(), + Ret :: ok | {error, Reason}, + Reason :: string(). -unlock(Data) -> - Backend = backend(), - rabbit_log:info("Will try to unlock with peer discovery backend ~ts", [Backend]), +unlock(Backend, Data) -> + ?LOG_INFO( + "Peer discovery: will try to unlock with peer discovery " + "backend ~ts", + [Backend], + #{domain => ?RMQLOG_DOMAIN_PEER_DISC}), case Backend:unlock(Data) of {error, Reason} = Error -> - rabbit_log:error("Failed to unlock with peer discovery backend ~ts: ~tp, " - "lock data: ~tp", - [Backend, Reason, Data]), + ?LOG_ERROR( + "Peer discovery: failed to unlock with peer discovery " + "backend ~ts: ~0tp, lock data: ~0tp", + [Backend, Reason, Data], + #{domain => ?RMQLOG_DOMAIN_PEER_DISC}), Error; Any -> Any @@ -442,10 +1167,10 @@ unlock(Data) -> {Nodes :: [node()], NodeType :: rabbit_types:node_type()} | {ok, Nodes :: [node()]} | - {ok, {Nodes :: [node()], + {ok, {Nodes :: [node()] | node(), NodeType :: rabbit_types:node_type()}} | {error, Reason :: string()}) -> - {ok, {Nodes :: [node()], NodeType :: rabbit_types:node_type()}} | + {ok, {Nodes :: [node()] | node(), NodeType :: rabbit_types:node_type()}} | {error, Reason :: string()}. normalize(Nodes) when is_list(Nodes) -> @@ -456,19 +1181,12 @@ normalize({ok, Nodes}) when is_list(Nodes) -> {ok, {Nodes, disc}}; normalize({ok, {Nodes, NodeType}}) when is_list(Nodes) andalso is_atom(NodeType) -> {ok, {Nodes, NodeType}}; +normalize({ok, {Node, NodeType}}) + when is_atom(Node) andalso is_atom(NodeType) -> + {ok, {Node, NodeType}}; normalize({error, Reason}) -> {error, Reason}. --spec format_discovered_nodes(Nodes :: list()) -> string(). - -format_discovered_nodes(Nodes) -> - %% NOTE: in OTP 21 string:join/2 is deprecated but still available. - %% Its recommended replacement is not a drop-in one, though, so - %% we will not be switching just yet. - string:join(lists:map(fun rabbit_data_coercion:to_list/1, Nodes), ", "). - - - -spec node_prefix() -> string(). node_prefix() -> @@ -477,8 +1195,6 @@ node_prefix() -> [_] -> ?DEFAULT_PREFIX end. - - -spec append_node_prefix(Value :: binary() | string()) -> string(). append_node_prefix(Value) when is_binary(Value) orelse is_list(Value) -> diff --git a/deps/rabbit/src/rabbit_peer_discovery_classic_config.erl b/deps/rabbit/src/rabbit_peer_discovery_classic_config.erl index 14cb059cf650..6aa50602c673 100644 --- a/deps/rabbit/src/rabbit_peer_discovery_classic_config.erl +++ b/deps/rabbit/src/rabbit_peer_discovery_classic_config.erl @@ -2,14 +2,12 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_peer_discovery_classic_config). -behaviour(rabbit_peer_discovery_backend). --include_lib("rabbit_common/include/rabbit.hrl"). - -export([list_nodes/0, supports_registration/0, register/0, unregister/0, post_registration/0, lock/1, unlock/1]). @@ -22,15 +20,49 @@ list_nodes() -> case application:get_env(rabbit, cluster_nodes, {[], disc}) of - {_Nodes, _NodeType} = Pair -> {ok, Pair}; - Nodes when is_list(Nodes) -> {ok, {Nodes, disc}} + {Nodes, NodeType} -> + check_local_node(Nodes), + check_duplicates(Nodes), + {ok, {add_this_node(Nodes), NodeType}}; + Nodes when is_list(Nodes) -> + check_local_node(Nodes), + check_duplicates(Nodes), + {ok, {add_this_node(Nodes), disc}} + end. + +add_this_node(Nodes) -> + ThisNode = node(), + case lists:member(ThisNode, Nodes) of + true -> Nodes; + false -> [ThisNode | Nodes] + end. + +check_duplicates(Nodes) -> + case (length(lists:usort(Nodes)) == length(Nodes)) of + true -> + ok; + false -> + rabbit_log:warning("Classic peer discovery backend: list of " + "nodes contains duplicates ~0tp", + [Nodes]) + end. + +check_local_node(Nodes) -> + case lists:member(node(), Nodes) of + true -> + ok; + false -> + rabbit_log:warning("Classic peer discovery backend: list of " + "nodes does not contain the local node ~0tp", + [Nodes]) end. --spec lock(Node :: node()) -> {ok, {{ResourceId :: string(), LockRequesterId :: node()}, Nodes :: [node()]}} | - {error, Reason :: string()}. +-spec lock(Nodes :: [node()]) -> + {ok, {{ResourceId :: string(), LockRequesterId :: node()}, Nodes :: [node()]}} | + {error, Reason :: string()}. -lock(Node) -> - {ok, {Nodes, _NodeType}} = list_nodes(), +lock(Nodes) -> + Node = node(), case lists:member(Node, Nodes) of false when Nodes =/= [] -> rabbit_log:warning("Local node ~ts is not part of configured nodes ~tp. " diff --git a/deps/rabbit/src/rabbit_peer_discovery_dns.erl b/deps/rabbit/src/rabbit_peer_discovery_dns.erl index 88634d139957..a550ae91f3f8 100644 --- a/deps/rabbit/src/rabbit_peer_discovery_dns.erl +++ b/deps/rabbit/src/rabbit_peer_discovery_dns.erl @@ -2,14 +2,12 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_peer_discovery_dns). -behaviour(rabbit_peer_discovery_backend). --include_lib("rabbit_common/include/rabbit.hrl"). - -export([list_nodes/0, supports_registration/0, register/0, unregister/0, post_registration/0, lock/1, unlock/1]). %% for tests @@ -63,9 +61,9 @@ unregister() -> post_registration() -> ok. --spec lock(Node :: atom()) -> not_supported. +-spec lock(Nodes :: [node()]) -> not_supported. -lock(_Node) -> +lock(_Nodes) -> not_supported. -spec unlock(Data :: term()) -> ok. diff --git a/deps/rabbit/src/rabbit_plugins.erl b/deps/rabbit/src/rabbit_plugins.erl index 5ebd968e8cbb..959a1a6de7cf 100644 --- a/deps/rabbit/src/rabbit_plugins.erl +++ b/deps/rabbit/src/rabbit_plugins.erl @@ -2,13 +2,11 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2011-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_plugins). -include_lib("rabbit_common/include/rabbit.hrl"). --include_lib("stdlib/include/zip.hrl"). - -export([setup/0, active/0, read_enabled/1, list/1, list/2, dependencies/3, running_plugins/0]). -export([ensure/1]). -export([validate_plugins/1, format_invalid_plugins/1]). diff --git a/deps/rabbit/src/rabbit_policies.erl b/deps/rabbit/src/rabbit_policies.erl index a6677e8a534e..66224ce6aa1b 100644 --- a/deps/rabbit/src/rabbit_policies.erl +++ b/deps/rabbit/src/rabbit_policies.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_policies). @@ -25,7 +25,7 @@ register() -> %% Note: there are more validators registered from other modules, - %% such as rabbit_mirror_queue_misc + %% such as rabbit_quorum_queue [rabbit_registry:register(Class, Name, ?MODULE) || {Class, Name} <- [{policy_validator, <<"alternate-exchange">>}, {policy_validator, <<"consumer-timeout">>}, @@ -54,13 +54,17 @@ register() -> {operator_policy_validator, <<"max-in-memory-length">>}, {operator_policy_validator, <<"max-in-memory-bytes">>}, {operator_policy_validator, <<"delivery-limit">>}, + {operator_policy_validator, <<"queue-version">>}, + {operator_policy_validator, <<"overflow">>}, {policy_merge_strategy, <<"expires">>}, {policy_merge_strategy, <<"message-ttl">>}, {policy_merge_strategy, <<"max-length">>}, {policy_merge_strategy, <<"max-length-bytes">>}, {policy_merge_strategy, <<"max-in-memory-length">>}, {policy_merge_strategy, <<"max-in-memory-bytes">>}, - {policy_merge_strategy, <<"delivery-limit">>}]], + {policy_merge_strategy, <<"delivery-limit">>}, + {policy_merge_strategy, <<"queue-version">>}, + {policy_merge_strategy, <<"overflow">>}]], ok. -spec validate_policy([{binary(), term()}]) -> rabbit_policy_validator:validate_results(). @@ -211,5 +215,7 @@ merge_policy_value(<<"max-in-memory-length">>, Val, OpVal) -> min(Val, OpVal); merge_policy_value(<<"max-in-memory-bytes">>, Val, OpVal) -> min(Val, OpVal); merge_policy_value(<<"expires">>, Val, OpVal) -> min(Val, OpVal); merge_policy_value(<<"delivery-limit">>, Val, OpVal) -> min(Val, OpVal); +merge_policy_value(<<"queue-version">>, _Val, OpVal) -> OpVal; +merge_policy_value(<<"overflow">>, _Val, OpVal) -> OpVal; %% use operator policy value for booleans merge_policy_value(_Key, Val, OpVal) when is_boolean(Val) andalso is_boolean(OpVal) -> OpVal. diff --git a/deps/rabbit/src/rabbit_policy.erl b/deps/rabbit/src/rabbit_policy.erl index c4446e72fb8d..61302855e6a6 100644 --- a/deps/rabbit/src/rabbit_policy.erl +++ b/deps/rabbit/src/rabbit_policy.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_policy). @@ -39,7 +39,7 @@ list_formatted/1, list_formatted/3, info_keys/0]). -export([parse_set_op/7, set_op/7, delete_op/3, lookup_op/2, list_op/0, list_op/1, list_op/2, list_formatted_op/1, list_formatted_op/3, - match_all/2, match_as_map/1, match_op_as_map/1, definition_keys/1, + match_all/1, match_all/2, match_as_map/1, match_op_as_map/1, definition_keys/1, list_in/1, list_in/2, list_as_maps/0, list_as_maps/1, list_op_as_maps/0, list_op_as_maps/1 ]). -export([sort_by_priority/1]). @@ -185,27 +185,38 @@ get(Name, EntityName = #resource{virtual_host = VHost}) -> match(EntityName, list(VHost)), match(EntityName, list_op(VHost))). +%% It's exported, so give it a default until all khepri transformation is sorted match(NameOrQueue, Policies) -> - case match_all(NameOrQueue, Policies) of + match(NameOrQueue, Policies, is_policy_applicable). + +match(NameOrQueue, Policies, Function) -> + case match_all(NameOrQueue, Policies, Function) of [] -> undefined; [Policy | _] -> Policy end. +%% It's exported, so give it a default until all khepri transformation is sorted +match_all(NameOrQueue) -> + match_all(NameOrQueue, list()). + match_all(NameOrQueue, Policies) -> - lists:sort(fun priority_comparator/2, [P || P <- Policies, matches(NameOrQueue, P)]). + match_all(NameOrQueue, Policies, is_policy_applicable). + +match_all(NameOrQueue, Policies, Function) -> + lists:sort(fun priority_comparator/2, [P || P <- Policies, matches(NameOrQueue, P, Function)]). -matches(Q, Policy) when ?is_amqqueue(Q) -> +matches(Q, Policy, Function) when ?is_amqqueue(Q) -> #resource{name = Name, virtual_host = VHost} = amqqueue:get_name(Q), matches_queue_type(queue, amqqueue:get_type(Q), pget('apply-to', Policy)) andalso - is_applicable(Q, pget(definition, Policy)) andalso + is_applicable(Q, pget(definition, Policy), Function) andalso match =:= re:run(Name, pget(pattern, Policy), [{capture, none}]) andalso VHost =:= pget(vhost, Policy); -matches(#resource{kind = queue} = Resource, Policy) -> +matches(#resource{kind = queue} = Resource, Policy, Function) -> {ok, Q} = rabbit_amqqueue:lookup(Resource), - matches(Q, Policy); -matches(#resource{name = Name, kind = Kind, virtual_host = VHost} = Resource, Policy) -> + matches(Q, Policy, Function); +matches(#resource{name = Name, kind = Kind, virtual_host = VHost} = Resource, Policy, Function) -> matches_type(Kind, pget('apply-to', Policy)) andalso - is_applicable(Resource, pget(definition, Policy)) andalso + is_applicable(Resource, pget(definition, Policy), Function) andalso match =:= re:run(Name, pget(pattern, Policy), [{capture, none}]) andalso VHost =:= pget(vhost, Policy). @@ -389,9 +400,6 @@ notify_clear(VHost, <<"operator_policy">>, Name, ActingUser) -> %%---------------------------------------------------------------------------- -%% [1] We need to prevent this from becoming O(n^2) in a similar -%% manner to rabbit_binding:remove_for_{source,destination}. So see -%% the comment in rabbit_binding:lock_route_tables/0 for more rationale. %% [2] We could be here in a post-tx fun after the vhost has been %% deleted; in which case it's fine to do nothing. update_matched_objects(VHost, PolicyDef, ActingUser) -> @@ -492,11 +500,11 @@ matches_queue_type(queue, _, _) -> false. priority_comparator(A, B) -> pget(priority, A) >= pget(priority, B). -is_applicable(Q, Policy) when ?is_amqqueue(Q) -> - rabbit_amqqueue:is_policy_applicable(Q, rabbit_data_coercion:to_list(Policy)); -is_applicable(#resource{kind = queue} = Resource, Policy) -> - rabbit_amqqueue:is_policy_applicable(Resource, rabbit_data_coercion:to_list(Policy)); -is_applicable(_, _) -> +is_applicable(Q, Policy, Function) when ?is_amqqueue(Q) -> + rabbit_amqqueue:Function(Q, rabbit_data_coercion:to_list(Policy)); +is_applicable(#resource{kind = queue} = Resource, Policy, Function) -> + rabbit_amqqueue:Function(Resource, rabbit_data_coercion:to_list(Policy)); +is_applicable(_, _, _) -> true. %%---------------------------------------------------------------------------- diff --git a/deps/rabbit/src/rabbit_policy_merge_strategy.erl b/deps/rabbit/src/rabbit_policy_merge_strategy.erl index 21bb1f759881..3a2a838d8214 100644 --- a/deps/rabbit/src/rabbit_policy_merge_strategy.erl +++ b/deps/rabbit/src/rabbit_policy_merge_strategy.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_policy_merge_strategy). diff --git a/deps/rabbit/src/rabbit_prelaunch_cluster.erl b/deps/rabbit/src/rabbit_prelaunch_cluster.erl index 7effd20cc4c1..61aa77ccd7ea 100644 --- a/deps/rabbit/src/rabbit_prelaunch_cluster.erl +++ b/deps/rabbit/src/rabbit_prelaunch_cluster.erl @@ -6,27 +6,21 @@ -export([setup/1]). -setup(Context) -> +setup(_Context) -> ?LOG_DEBUG( "~n== Clustering ==", [], #{domain => ?RMQLOG_DOMAIN_PRELAUNCH}), - ?LOG_DEBUG( - "Preparing cluster status files", [], - #{domain => ?RMQLOG_DOMAIN_PRELAUNCH}), - rabbit_node_monitor:prepare_cluster_status_files(), - case Context of - #{initial_pass := true} -> - %% Renaming a node was partially handled by `rabbit_upgrade', the - %% old upgrade mechanism used before we introduced feature flags. - %% The following call to `rabbit_mnesia_rename' was part of - %% `rabbit_upgrade:maybe_upgrade_mnesia()'. + + case rabbit_khepri:is_enabled() of + true -> + ok; + false -> ?LOG_DEBUG( - "Finish node renaming (if any)", [], + "Preparing cluster status files", [], #{domain => ?RMQLOG_DOMAIN_PRELAUNCH}), - ok = rabbit_mnesia_rename:maybe_finish(); - _ -> - ok + rabbit_node_monitor:prepare_cluster_status_files() end, + ?LOG_DEBUG( "Checking cluster consistency", [], #{domain => ?RMQLOG_DOMAIN_PRELAUNCH}), diff --git a/deps/rabbit/src/rabbit_prelaunch_enabled_plugins_file.erl b/deps/rabbit/src/rabbit_prelaunch_enabled_plugins_file.erl index d8380b425409..255616556ee1 100644 --- a/deps/rabbit/src/rabbit_prelaunch_enabled_plugins_file.erl +++ b/deps/rabbit/src/rabbit_prelaunch_enabled_plugins_file.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_prelaunch_enabled_plugins_file). diff --git a/deps/rabbit/src/rabbit_prelaunch_feature_flags.erl b/deps/rabbit/src/rabbit_prelaunch_feature_flags.erl index 56cd4d7e8042..cc8918a6b085 100644 --- a/deps/rabbit/src/rabbit_prelaunch_feature_flags.erl +++ b/deps/rabbit/src/rabbit_prelaunch_feature_flags.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_prelaunch_feature_flags). diff --git a/deps/rabbit/src/rabbit_prelaunch_logging.erl b/deps/rabbit/src/rabbit_prelaunch_logging.erl index 0132c549584a..15d9bad7fa2f 100644 --- a/deps/rabbit/src/rabbit_prelaunch_logging.erl +++ b/deps/rabbit/src/rabbit_prelaunch_logging.erl @@ -2,11 +2,11 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2019-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% %% @author The RabbitMQ team -%% @copyright 2019-2023 VMware, Inc. or its affiliates. +%% @copyright 2007-2024 Broadcom. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% %% @doc %% This module manages the configuration of the Erlang Logger facility. In diff --git a/deps/rabbit/src/rabbit_prequeue.erl b/deps/rabbit/src/rabbit_prequeue.erl deleted file mode 100644 index 27aac858532c..000000000000 --- a/deps/rabbit/src/rabbit_prequeue.erl +++ /dev/null @@ -1,100 +0,0 @@ -%% This Source Code Form is subject to the terms of the Mozilla Public -%% License, v. 2.0. If a copy of the MPL was not distributed with this -%% file, You can obtain one at https://mozilla.org/MPL/2.0/. -%% -%% Copyright (c) 2010-2023 VMware, Inc. or its affiliates. All rights reserved. -%% - --module(rabbit_prequeue). - -%% This is the initial gen_server that all queue processes start off -%% as. It handles the decision as to whether we need to start a new -%% mirror, a new master/unmirrored, or whether we are restarting (and -%% if so, as what). Thus a crashing queue process can restart from here -%% and always do the right thing. - --export([start_link/3]). - --export([init/1, handle_call/3, handle_cast/2, handle_info/2, terminate/2, - code_change/3]). - --behaviour(gen_server2). - --include_lib("rabbit_common/include/rabbit.hrl"). --include("amqqueue.hrl"). - -%%---------------------------------------------------------------------------- - --export_type([start_mode/0]). - --type start_mode() :: 'declare' | 'recovery' | 'slave'. - -%%---------------------------------------------------------------------------- - --spec start_link(amqqueue:amqqueue(), start_mode(), pid()) - -> rabbit_types:ok_pid_or_error(). - -start_link(Q, StartMode, Marker) -> - gen_server2:start_link(?MODULE, {Q, StartMode, Marker}, []). - -%%---------------------------------------------------------------------------- - -init({Q, StartMode, Marker}) -> - init(Q, case {is_process_alive(Marker), StartMode} of - {true, slave} -> slave; - {true, _} -> master; - {false, _} -> restart - end). - -init(Q, master) -> rabbit_amqqueue_process:init(Q); -init(Q, slave) -> rabbit_mirror_queue_slave:init(Q); - -init(Q0, restart) when ?is_amqqueue(Q0) -> - QueueName = amqqueue:get_name(Q0), - {ok, Q1} = rabbit_amqqueue:lookup(QueueName), - QPid = amqqueue:get_pid(Q1), - SPids = amqqueue:get_slave_pids(Q1), - LocalOrMasterDown = node(QPid) =:= node() - orelse not rabbit_process:on_running_node(QPid), - Slaves = [SPid || SPid <- SPids, rabbit_process:is_process_alive(SPid)], - case rabbit_process:is_process_alive(QPid) of - true -> false = LocalOrMasterDown, %% assertion - rabbit_mirror_queue_slave:go(self(), async), - rabbit_mirror_queue_slave:init(Q1); %% [1] - false -> case LocalOrMasterDown andalso Slaves =:= [] of - true -> crash_restart(Q1); %% [2] - false -> timer:sleep(25), - init(Q1, restart) %% [3] - end - end. -%% [1] There is a master on another node. Regardless of whether we -%% were originally a master or a mirror, we are now a new slave. -%% -%% [2] Nothing is alive. We are the last best hope. Try to restart as a master. -%% -%% [3] The current master is dead but either there are alive mirrors to -%% take over or it's all happening on a different node anyway. This is -%% not a stable situation. Sleep and wait for somebody else to make a -%% move. - -crash_restart(Q0) when ?is_amqqueue(Q0) -> - QueueName = amqqueue:get_name(Q0), - rabbit_log:error("Restarting crashed ~ts.", [rabbit_misc:rs(QueueName)]), - gen_server2:cast(self(), init), - Q1 = amqqueue:set_pid(Q0, self()), - rabbit_amqqueue_process:init(Q1). - -%%---------------------------------------------------------------------------- - -%% This gen_server2 always hands over to some other module at the end -%% of init/1. --spec handle_call(_, _, _) -> no_return(). -handle_call(_Msg, _From, _State) -> exit(unreachable). --spec handle_cast(_, _) -> no_return(). -handle_cast(_Msg, _State) -> exit(unreachable). --spec handle_info(_, _) -> no_return(). -handle_info(_Msg, _State) -> exit(unreachable). --spec terminate(_, _) -> no_return(). -terminate(_Reason, _State) -> exit(unreachable). --spec code_change(_, _, _) -> no_return(). -code_change(_OldVsn, _State, _Extra) -> exit(unreachable). diff --git a/deps/rabbit/src/rabbit_priority_queue.erl b/deps/rabbit/src/rabbit_priority_queue.erl index d649773190d5..6e08a44f565f 100644 --- a/deps/rabbit/src/rabbit_priority_queue.erl +++ b/deps/rabbit/src/rabbit_priority_queue.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2015-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_priority_queue). @@ -26,11 +26,10 @@ -export([init/3, terminate/2, delete_and_terminate/2, delete_crashed/1, purge/1, purge_acks/1, - publish/6, publish_delivered/5, discard/4, drain_confirmed/1, - batch_publish/4, batch_publish_delivered/4, + publish/5, publish_delivered/4, discard/3, drain_confirmed/1, dropwhile/2, fetchwhile/4, fetch/2, drop/2, ack/2, requeue/2, ackfold/4, fold/3, len/1, is_empty/1, depth/1, - set_ram_duration_target/2, ram_duration/1, needs_timeout/1, timeout/1, + update_rates/1, needs_timeout/1, timeout/1, handle_pre_hibernate/1, resume/1, msg_rates/1, info/2, invoke/3, is_duplicate/2, set_queue_mode/2, set_queue_version/2, @@ -130,7 +129,9 @@ priorities(Q) when ?is_amqqueue(Q) -> case lists:member(Type, Ints) of false -> none; true -> - Max = min(RequestedMax, ?MAX_SUPPORTED_PRIORITY), + %% make sure the value is no greater than ?MAX_SUPPORTED_PRIORITY but + %% also is not negative + Max = max(1, min(RequestedMax, ?MAX_SUPPORTED_PRIORITY)), lists:reverse(lists:seq(0, Max)) end; _ -> none @@ -199,54 +200,23 @@ purge_acks(State = #state{bq = BQ}) -> purge_acks(State = #passthrough{bq = BQ, bqs = BQS}) -> ?passthrough1(purge_acks(BQS)). -publish(Msg, MsgProps, IsDelivered, ChPid, Flow, State = #state{bq = BQ}) -> +publish(Msg, MsgProps, IsDelivered, ChPid, State = #state{bq = BQ}) -> pick1(fun (_P, BQSN) -> - BQ:publish(Msg, MsgProps, IsDelivered, ChPid, Flow, BQSN) + BQ:publish(Msg, MsgProps, IsDelivered, ChPid, BQSN) end, Msg, State); -publish(Msg, MsgProps, IsDelivered, ChPid, Flow, +publish(Msg, MsgProps, IsDelivered, ChPid, State = #passthrough{bq = BQ, bqs = BQS}) -> - ?passthrough1(publish(Msg, MsgProps, IsDelivered, ChPid, Flow, BQS)). - -batch_publish(Publishes, ChPid, Flow, State = #state{bq = BQ, bqss = [{MaxP, _} |_]}) -> - PubMap = partition_publish_batch(Publishes, MaxP), - lists:foldl( - fun ({Priority, Pubs}, St) -> - pick1(fun (_P, BQSN) -> - BQ:batch_publish(Pubs, ChPid, Flow, BQSN) - end, Priority, St) - end, State, maps:to_list(PubMap)); -batch_publish(Publishes, ChPid, Flow, - State = #passthrough{bq = BQ, bqs = BQS}) -> - ?passthrough1(batch_publish(Publishes, ChPid, Flow, BQS)). - -publish_delivered(Msg, MsgProps, ChPid, Flow, State = #state{bq = BQ}) -> + ?passthrough1(publish(Msg, MsgProps, IsDelivered, ChPid, BQS)). + +publish_delivered(Msg, MsgProps, ChPid, State = #state{bq = BQ}) -> pick2(fun (P, BQSN) -> {AckTag, BQSN1} = BQ:publish_delivered( - Msg, MsgProps, ChPid, Flow, BQSN), + Msg, MsgProps, ChPid, BQSN), {{P, AckTag}, BQSN1} end, Msg, State); -publish_delivered(Msg, MsgProps, ChPid, Flow, +publish_delivered(Msg, MsgProps, ChPid, State = #passthrough{bq = BQ, bqs = BQS}) -> - ?passthrough2(publish_delivered(Msg, MsgProps, ChPid, Flow, BQS)). - -batch_publish_delivered(Publishes, ChPid, Flow, State = #state{bq = BQ, bqss = [{MaxP, _} |_]}) -> - PubMap = partition_publish_delivered_batch(Publishes, MaxP), - {PrioritiesAndAcks, State1} = - lists:foldl( - fun ({Priority, Pubs}, {PriosAndAcks, St}) -> - {PriosAndAcks1, St1} = - pick2(fun (P, BQSN) -> - {AckTags, BQSN1} = - BQ:batch_publish_delivered( - Pubs, ChPid, Flow, BQSN), - {priority_on_acktags(P, AckTags), BQSN1} - end, Priority, St), - {[PriosAndAcks1 | PriosAndAcks], St1} - end, {[], State}, maps:to_list(PubMap)), - {lists:reverse(PrioritiesAndAcks), State1}; -batch_publish_delivered(Publishes, ChPid, Flow, - State = #passthrough{bq = BQ, bqs = BQS}) -> - ?passthrough2(batch_publish_delivered(Publishes, ChPid, Flow, BQS)). + ?passthrough2(publish_delivered(Msg, MsgProps, ChPid, BQS)). %% TODO this is a hack. The BQ api does not give us enough information %% here - if we had the Msg we could look at its priority and forward @@ -256,14 +226,14 @@ batch_publish_delivered(Publishes, ChPid, Flow, %% are talking to VQ*. discard/4 is used by HA, but that's "above" us %% (if in use) so we don't break that either, just some hypothetical %% alternate BQ implementation. -discard(_MsgId, _ChPid, _Flow, State = #state{}) -> +discard(_MsgId, _ChPid, State = #state{}) -> State; %% We should have something a bit like this here: %% pick1(fun (_P, BQSN) -> - %% BQ:discard(MsgId, ChPid, Flow, BQSN) + %% BQ:discard(MsgId, ChPid, BQSN) %% end, Msg, State); -discard(MsgId, ChPid, Flow, State = #passthrough{bq = BQ, bqs = BQS}) -> - ?passthrough1(discard(MsgId, ChPid, Flow, BQS)). +discard(MsgId, ChPid, State = #passthrough{bq = BQ, bqs = BQS}) -> + ?passthrough1(discard(MsgId, ChPid, BQS)). drain_confirmed(State = #state{bq = BQ}) -> fold_append2(fun (_P, BQSN) -> BQ:drain_confirmed(BQSN) end, State); @@ -358,18 +328,10 @@ depth(#state{bq = BQ, bqss = BQSs}) -> depth(#passthrough{bq = BQ, bqs = BQS}) -> BQ:depth(BQS). -set_ram_duration_target(DurationTarget, State = #state{bq = BQ}) -> - foreach1(fun (_P, BQSN) -> - BQ:set_ram_duration_target(DurationTarget, BQSN) - end, State); -set_ram_duration_target(DurationTarget, - State = #passthrough{bq = BQ, bqs = BQS}) -> - ?passthrough1(set_ram_duration_target(DurationTarget, BQS)). - -ram_duration(State = #state{bq = BQ}) -> - fold_min2(fun (_P, BQSN) -> BQ:ram_duration(BQSN) end, State); -ram_duration(State = #passthrough{bq = BQ, bqs = BQS}) -> - ?passthrough2(ram_duration(BQS)). +update_rates(State = #state{bq = BQ}) -> + foreach1(fun (_P, BQSN) -> BQ:update_rates(BQSN) end, State); +update_rates(State = #passthrough{bq = BQ, bqs = BQS}) -> + ?passthrough1(update_rates(BQS)). needs_timeout(#state{bq = BQ, bqss = BQSs}) -> fold0(fun (_P, _BQSN, timed) -> timed; @@ -413,6 +375,8 @@ info(backing_queue_status, #state{bq = BQ, bqss = BQSs}) -> end, nothing, BQSs); info(head_message_timestamp, #state{bq = BQ, bqss = BQSs}) -> find_head_message_timestamp(BQ, BQSs, ''); +info(oldest_message_received_timestamp, #state{bq = BQ, bqss = BQSs}) -> + find_oldest_message_received_timestamp(BQ, BQSs); info(online, _) -> ''; info(Item, #state{bq = BQ, bqss = BQSs}) -> @@ -526,13 +490,6 @@ fold_add2(Fun, State) -> {add_maybe_infinity(Res, Acc), BQSN1} end, 0, State). -%% Fold over results assuming results are numbers and we want the minimum -fold_min2(Fun, State) -> - fold2(fun (P, BQSN, Acc) -> - {Res, BQSN1} = Fun(P, BQSN), - {erlang:min(Res, Acc), BQSN1} - end, infinity, State). - %% Fold over results assuming results are lists and we want to append %% them, and also that we have some AckTags we want to pass in to each %% invocation. @@ -597,10 +554,6 @@ a(State = #state{bqss = BQSs}) -> end. %%---------------------------------------------------------------------------- -partition_publish_batch(Publishes, MaxP) -> - partition_publishes( - Publishes, fun ({Msg, _, _}) -> Msg end, MaxP). - partition_publish_delivered_batch(Publishes, MaxP) -> partition_publishes( Publishes, fun ({Msg, _}) -> Msg end, MaxP). @@ -689,6 +642,28 @@ find_head_message_timestamp(BQ, [{_, BQSN} | Rest], Timestamp) -> find_head_message_timestamp(_, [], Timestamp) -> Timestamp. +find_oldest_message_received_timestamp(BQ, BQs) -> + %% Oldest message timestamp among all priority queues + Timestamps = + lists:foldl( + fun({_, BQSN}, Acc) -> + case oldest_message_received_timestamp(BQ, BQSN) of + '' -> Acc; + Ts -> [Ts | Acc] + end + end, [], BQs), + case Timestamps of + [] -> ''; + _ -> lists:min(Timestamps) + end. + +oldest_message_received_timestamp(BQ, BQSN) -> + MsgCount = BQ:len(BQSN) + BQ:info(messages_unacknowledged_ram, BQSN), + if + MsgCount =/= 0 -> BQ:info(oldest_message_received_timestamp, BQSN); + true -> '' + end. + zip_msgs_and_acks(Pubs, AckTags) -> lists:zipwith( fun ({Msg, _Props}, AckTag) -> diff --git a/deps/rabbit/src/rabbit_process.erl b/deps/rabbit/src/rabbit_process.erl index 0fe093ff7fe8..c7ec5d0ed477 100644 --- a/deps/rabbit/src/rabbit_process.erl +++ b/deps/rabbit/src/rabbit_process.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_process). diff --git a/deps/rabbit/src/rabbit_queue_consumers.erl b/deps/rabbit/src/rabbit_queue_consumers.erl index c89f7b85bc2d..a36efe3cb94c 100644 --- a/deps/rabbit/src/rabbit_queue_consumers.erl +++ b/deps/rabbit/src/rabbit_queue_consumers.erl @@ -2,18 +2,19 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_queue_consumers). -export([new/0, max_active_priority/1, inactive/1, all/1, all/3, count/0, - unacknowledged_message_count/0, add/11, remove/3, erase_ch/2, - send_drained/1, deliver/5, record_ack/3, subtract_acks/3, + unacknowledged_message_count/0, add/9, remove/4, erase_ch/2, + deliver/5, record_ack/3, subtract_acks/3, possibly_unblock/3, resume_fun/0, notify_sent_fun/1, activate_limit_fun/0, - credit/7, utilisation/1, capacity/1, is_same/3, get_consumer/1, get/3, - consumer_tag/1, get_infos/1]). + drained/3, process_credit/5, get_link_state/2, + utilisation/1, capacity/1, is_same/3, get_consumer/1, get/3, + consumer_tag/1, get_infos/1, parse_prefetch_count/1]). -export([deactivate_limit_fun/0]). @@ -21,7 +22,8 @@ -define(QUEUE, lqueue). --define(UNSENT_MESSAGE_LIMIT, 200). +-define(KEY_UNSENT_MESSAGE_LIMIT, classic_queue_consumer_unsent_message_limit). +-define(DEFAULT_UNSENT_MESSAGE_LIMIT, 200). %% Utilisation average calculations are all in μs. -define(USE_AVG_HALF_LIFE, 1000000.0). @@ -30,18 +32,26 @@ -record(consumer, {tag, ack_required, prefetch, args, user}). +%% AMQP 1.0 link flow control state, see §2.6.7 +%% Delete atom credit_api_v1 when feature flag rabbitmq_4.0.0 becomes required. +-record(link_state, {delivery_count :: rabbit_queue_type:delivery_count() | credit_api_v1, + credit :: rabbit_queue_type:credit()}). + %% These are held in our process dictionary +%% channel record -record(cr, {ch_pid, monitor_ref, - acktags, - consumer_count, + acktags :: ?QUEUE:?QUEUE({ack(), rabbit_types:ctag() | none}), + consumer_count :: non_neg_integer(), %% Queue of {ChPid, #consumer{}} for consumers which have %% been blocked (rate/prefetch limited) for any reason blocked_consumers, %% The limiter itself limiter, %% Internal flow control for queue -> writer - unsent_message_count}). + unsent_message_count :: non_neg_integer(), + link_states :: #{rabbit_types:ctag() => #link_state{}} + }). %%---------------------------------------------------------------------------- @@ -63,10 +73,15 @@ -spec new() -> state(). -new() -> #state{consumers = priority_queue:new(), - use = {active, - erlang:monotonic_time(micro_seconds), - 1.0}}. +new() -> + Val = application:get_env(rabbit, + ?KEY_UNSENT_MESSAGE_LIMIT, + ?DEFAULT_UNSENT_MESSAGE_LIMIT), + persistent_term:put(?KEY_UNSENT_MESSAGE_LIMIT, Val), + #state{consumers = priority_queue:new(), + use = {active, + erlang:monotonic_time(microsecond), + 1.0}}. -spec max_active_priority(state()) -> integer() | 'infinity' | 'empty'. @@ -120,57 +135,87 @@ count() -> lists:sum([Count || #cr{consumer_count = Count} <- all_ch_record()]). unacknowledged_message_count() -> lists:sum([?QUEUE:len(C#cr.acktags) || C <- all_ch_record()]). --spec add(rabbit_amqqueue:name(), ch(), rabbit_types:ctag(), boolean(), pid() | none, boolean(), - non_neg_integer(), rabbit_framing:amqp_table(), boolean(), - rabbit_types:username(), state()) - -> state(). - -add(QName, ChPid, CTag, NoAck, LimiterPid, LimiterActive, Prefetch, Args, IsEmpty, +-spec add(ch(), rabbit_types:ctag(), boolean(), pid() | none, boolean(), + %% credit API v1 + SimplePrefetch :: non_neg_integer() | + %% credit API v2 + {simple_prefetch, non_neg_integer()} | {credited, rabbit_queue_type:delivery_count()}, + rabbit_framing:amqp_table(), + rabbit_types:username(), state()) -> + state(). + +add(ChPid, CTag, NoAck, LimiterPid, LimiterActive, + ModeOrPrefetch, Args, Username, State = #state{consumers = Consumers, use = CUInfo}) -> - C = #cr{consumer_count = Count, - limiter = Limiter} = ch_record(ChPid, LimiterPid), + C0 = #cr{consumer_count = Count, + limiter = Limiter, + link_states = LinkStates} = ch_record(ChPid, LimiterPid), Limiter1 = case LimiterActive of true -> rabbit_limiter:activate(Limiter); false -> Limiter end, - C1 = C#cr{consumer_count = Count + 1, limiter = Limiter1}, - update_ch_record( - case parse_credit_args(Prefetch, Args) of - {0, auto} -> C1; - {_Credit, auto} when NoAck -> C1; - {Credit, Mode} -> credit_and_drain(QName, - C1, CTag, Credit, Mode, IsEmpty) - end), + C1 = C0#cr{consumer_count = Count + 1, + limiter = Limiter1}, + C = case parse_credit_mode(ModeOrPrefetch, Args) of + {0, auto} -> + C1; + {Credit, auto = Mode} -> + case NoAck of + true -> + C1; + false -> + Limiter2 = rabbit_limiter:credit(Limiter1, CTag, Credit, Mode), + C1#cr{limiter = Limiter2} + end; + {InitialDeliveryCount, manual} -> + C1#cr{link_states = LinkStates#{CTag => #link_state{ + credit = 0, + delivery_count = InitialDeliveryCount}}} + end, + update_ch_record(C), Consumer = #consumer{tag = CTag, ack_required = not NoAck, - prefetch = Prefetch, + prefetch = parse_prefetch_count(ModeOrPrefetch), args = Args, - user = Username}, + user = Username}, State#state{consumers = add_consumer({ChPid, Consumer}, Consumers), use = update_use(CUInfo, active)}. --spec remove(ch(), rabbit_types:ctag(), state()) -> - 'not_found' | state(). - -remove(ChPid, CTag, State = #state{consumers = Consumers}) -> +-spec remove(ch(), rabbit_types:ctag(), rabbit_queue_type:cancel_reason(), state()) -> + not_found | {[ack()], state()}. +remove(ChPid, CTag, Reason, State = #state{consumers = Consumers}) -> case lookup_ch(ChPid) of not_found -> not_found; - C = #cr{consumer_count = Count, - limiter = Limiter, - blocked_consumers = Blocked} -> - Blocked1 = remove_consumer(ChPid, CTag, Blocked), + C = #cr{acktags = AckTags0, + consumer_count = Count, + limiter = Limiter, + blocked_consumers = Blocked, + link_states = LinkStates} -> + {Acks, AckTags} = case Reason of + remove -> + AckTags1 = ?QUEUE:to_list(AckTags0), + {AckTags2, AckTags3} = lists:partition( + fun({_, Tag}) -> + Tag =:= CTag + end, AckTags1), + {lists:map(fun({Ack, _}) -> Ack end, AckTags2), + ?QUEUE:from_list(AckTags3)}; + _ -> + {[], AckTags0} + end, Limiter1 = case Count of 1 -> rabbit_limiter:deactivate(Limiter); _ -> Limiter end, Limiter2 = rabbit_limiter:forget_consumer(Limiter1, CTag), - update_ch_record(C#cr{consumer_count = Count - 1, - limiter = Limiter2, - blocked_consumers = Blocked1}), - State#state{consumers = - remove_consumer(ChPid, CTag, Consumers)} + update_ch_record(C#cr{acktags = AckTags, + consumer_count = Count - 1, + limiter = Limiter2, + blocked_consumers = remove_consumer(ChPid, CTag, Blocked), + link_states = maps:remove(CTag, LinkStates)}), + {Acks, State#state{consumers = remove_consumer(ChPid, CTag, Consumers)}} end. -spec erase_ch(ch(), state()) -> @@ -192,11 +237,6 @@ erase_ch(ChPid, State = #state{consumers = Consumers}) -> State#state{consumers = remove_consumers(ChPid, Consumers)}} end. --spec send_drained(rabbit_amqqueue:name()) -> 'ok'. -send_drained(QName) -> - [update_ch_record(send_drained(QName, C)) || C <- all_ch_record()], - ok. - -spec deliver(fun ((boolean()) -> {fetch_result(), T}), rabbit_amqqueue:name(), state(), boolean(), none | {ch(), rabbit_types:ctag()} | {ch(), consumer()}) -> @@ -209,10 +249,12 @@ deliver(FetchFun, QName, State, SingleActiveConsumerIsOn, ActiveConsumer) -> deliver(_FetchFun, _QName, false, State, true, none) -> {undelivered, false, State#state{use = update_use(State#state.use, inactive)}}; -deliver(FetchFun, QName, false, State = #state{consumers = Consumers}, true, SingleActiveConsumer) -> +deliver(FetchFun, QName, false, State = #state{consumers = Consumers}, true, + SingleActiveConsumer) -> {ChPid, Consumer} = SingleActiveConsumer, - %% blocked (rate/prefetch limited) consumers are removed from the queue state, but not the exclusive_consumer field, - %% so we need to do this check to avoid adding the exclusive consumer to the channel record + %% blocked (rate/prefetch limited) consumers are removed from the queue state, + %% but not the exclusive_consumer field, so we need to do this check to + %% avoid adding the exclusive consumer to the channel record %% over and over case is_blocked(SingleActiveConsumer) of true -> @@ -246,23 +288,50 @@ deliver(FetchFun, QName, ConsumersChanged, end end. -deliver_to_consumer(FetchFun, E = {ChPid, Consumer}, QName) -> - C = lookup_ch(ChPid), - case is_ch_blocked(C) of - true -> - block_consumer(C, E), - undelivered; - false -> case rabbit_limiter:can_send(C#cr.limiter, - Consumer#consumer.ack_required, - Consumer#consumer.tag) of - {suspend, Limiter} -> - block_consumer(C#cr{limiter = Limiter}, E), - undelivered; - {continue, Limiter} -> - {delivered, deliver_to_consumer( - FetchFun, Consumer, - C#cr{limiter = Limiter}, QName)} - end +deliver_to_consumer(FetchFun, + E = {ChPid, Consumer = #consumer{tag = CTag}}, + QName) -> + C = #cr{link_states = LinkStates} = lookup_ch(ChPid), + case LinkStates of + #{CTag := #link_state{delivery_count = DeliveryCount0, + credit = Credit} = LinkState0} -> + %% bypass credit flow for link credit consumers + %% as it is handled separately + case Credit > 0 of + true -> + DeliveryCount = case DeliveryCount0 of + credit_api_v1 -> + DeliveryCount0; + _ -> + serial_number:add(DeliveryCount0, 1) + end, + LinkState = LinkState0#link_state{delivery_count = DeliveryCount, + credit = Credit - 1}, + C1 = C#cr{link_states = maps:update(CTag, LinkState, LinkStates)}, + {delivered, deliver_to_consumer(FetchFun, Consumer, C1, QName)}; + false -> + block_consumer(C, E), + undelivered + end; + _ -> + %% not a link credit consumer, use credit flow + case is_ch_blocked(C) of + true -> + block_consumer(C, E), + undelivered; + false -> + case rabbit_limiter:can_send(C#cr.limiter, + Consumer#consumer.ack_required, + CTag) of + {suspend, Limiter} -> + block_consumer(C#cr{limiter = Limiter}, E), + undelivered; + {continue, Limiter} -> + {delivered, deliver_to_consumer( + FetchFun, Consumer, + C#cr{limiter = Limiter}, QName)} + end + end end. deliver_to_consumer(FetchFun, @@ -349,11 +418,21 @@ possibly_unblock(Update, ChPid, State) -> end end. -unblock(C = #cr{blocked_consumers = BlockedQ, limiter = Limiter}, +unblock(C = #cr{blocked_consumers = BlockedQ, + limiter = Limiter, + link_states = LinkStates}, State = #state{consumers = Consumers, use = Use}) -> case lists:partition( fun({_P, {_ChPid, #consumer{tag = CTag}}}) -> - rabbit_limiter:is_consumer_blocked(Limiter, CTag) + case maps:find(CTag, LinkStates) of + {ok, #link_state{credit = Credits}} + when Credits > 0 -> + false; + {ok, _Exhausted} -> + true; + error -> + rabbit_limiter:is_consumer_blocked(Limiter, CTag) + end end, priority_queue:to_list(BlockedQ)) of {_, []} -> update_ch_record(C), @@ -395,28 +474,61 @@ deactivate_limit_fun() -> C#cr{limiter = rabbit_limiter:deactivate(Limiter)} end. --spec credit(rabbit_amqqueue:name(), boolean(), integer(), boolean(), ch(), - rabbit_types:ctag(), - state()) -> 'unchanged' | {'unblocked', state()}. +-spec drained(rabbit_queue_type:delivery_count() | credit_api_v1, ch(), rabbit_types:ctag()) -> + ok. +drained(AdvancedDeliveryCount, ChPid, CTag) -> + case lookup_ch(ChPid) of + C0 = #cr{link_states = LinkStates = #{CTag := LinkState0}} -> + LinkState = LinkState0#link_state{delivery_count = AdvancedDeliveryCount, + credit = 0}, + C = C0#cr{link_states = maps:update(CTag, LinkState, LinkStates)}, + update_ch_record(C); + _ -> + ok + end. -credit(QName, IsEmpty, Credit, Drain, ChPid, CTag, State) -> +-spec process_credit(rabbit_queue_type:delivery_count() | credit_api_v1, + rabbit_queue_type:credit(), ch(), rabbit_types:ctag(), state()) -> + 'unchanged' | {'unblocked', state()}. +process_credit(DeliveryCountRcv, LinkCredit, ChPid, CTag, State) -> case lookup_ch(ChPid) of - not_found -> - unchanged; - #cr{limiter = Limiter} = C -> - C1 = #cr{limiter = Limiter1} = - credit_and_drain(QName, C, CTag, Credit, drain_mode(Drain), IsEmpty), - case is_ch_blocked(C1) orelse - (not rabbit_limiter:is_consumer_blocked(Limiter, CTag)) orelse - rabbit_limiter:is_consumer_blocked(Limiter1, CTag) of - true -> update_ch_record(C1), - unchanged; - false -> unblock(C1, State) - end + #cr{link_states = LinkStates = #{CTag := LinkState = + #link_state{delivery_count = DeliveryCountSnd, + credit = OldLinkCreditSnd}}, + unsent_message_count = _Count} = C0 -> + LinkCreditSnd = case DeliveryCountSnd of + credit_api_v1 -> + %% LinkCredit refers to LinkCreditSnd + LinkCredit; + _ -> + %% credit API v2 + %% LinkCredit refers to LinkCreditRcv + amqp10_util:link_credit_snd( + DeliveryCountRcv, LinkCredit, DeliveryCountSnd) + end, + C = C0#cr{link_states = maps:update(CTag, LinkState#link_state{credit = LinkCreditSnd}, LinkStates)}, + case OldLinkCreditSnd > 0 orelse + LinkCreditSnd < 1 of + true -> + update_ch_record(C), + unchanged; + false -> + unblock(C, State) + end; + _ -> + unchanged end. -drain_mode(true) -> drain; -drain_mode(false) -> manual. +-spec get_link_state(pid(), rabbit_types:ctag()) -> + {rabbit_queue_type:delivery_count() | credit_api_v1, rabbit_queue_type:credit()} | not_found. +get_link_state(ChPid, CTag) -> + case lookup_ch(ChPid) of + #cr{link_states = #{CTag := #link_state{delivery_count = DeliveryCount, + credit = Credit}}} -> + {DeliveryCount, Credit}; + _ -> + not_found + end. -spec utilisation(state()) -> ratio(). utilisation(State) -> @@ -465,14 +577,39 @@ consumer_tag(#consumer{tag = CTag}) -> %%---------------------------------------------------------------------------- -parse_credit_args(Default, Args) -> +%% credit API v2 uses mode +parse_prefetch_count({simple_prefetch, Prefetch}) -> + Prefetch; +parse_prefetch_count({credited, _InitialDeliveryCount}) -> + 0; +%% credit API v1 uses prefetch +parse_prefetch_count(Prefetch) + when is_integer(Prefetch) -> + Prefetch. + +-spec parse_credit_mode(rabbit_queue_type:consume_mode(), rabbit_framing:amqp_table()) -> + {Prefetch :: non_neg_integer(), auto | manual}. + +%% credit API v2 +parse_credit_mode({simple_prefetch, Prefetch}, _Args) -> + {Prefetch, auto}; +parse_credit_mode({credited, InitialDeliveryCount}, _Args) -> + {InitialDeliveryCount, manual}; +%% credit API v1 +%% i.e. below function clause should be deleted when feature flag rabbitmq_4.0.0 becomes required: +parse_credit_mode(Prefetch, Args) + when is_integer(Prefetch) -> case rabbit_misc:table_lookup(Args, <<"x-credit">>) of - {table, T} -> case {rabbit_misc:table_lookup(T, <<"credit">>), - rabbit_misc:table_lookup(T, <<"drain">>)} of - {{long, C}, {bool, D}} -> {C, drain_mode(D)}; - _ -> {Default, auto} - end; - undefined -> {Default, auto} + {table, T} -> + case {rabbit_misc:table_lookup(T, <<"credit">>), + rabbit_misc:table_lookup(T, <<"drain">>)} of + {{long, 0}, {bool, false}} -> + {credit_api_v1, manual}; + _ -> + {Prefetch, auto} + end; + undefined -> + {Prefetch, auto} end. lookup_ch(ChPid) -> @@ -492,7 +629,8 @@ ch_record(ChPid, LimiterPid) -> consumer_count = 0, blocked_consumers = priority_queue:new(), limiter = Limiter, - unsent_message_count = 0}, + unsent_message_count = 0, + link_states = #{}}, put(Key, C), C; C = #cr{} -> C @@ -522,33 +660,17 @@ block_consumer(C = #cr{blocked_consumers = Blocked}, QEntry) -> update_ch_record(C#cr{blocked_consumers = add_consumer(QEntry, Blocked)}). is_ch_blocked(#cr{unsent_message_count = Count, limiter = Limiter}) -> - Count >= ?UNSENT_MESSAGE_LIMIT orelse rabbit_limiter:is_suspended(Limiter). - -send_drained(QName, C = #cr{ch_pid = ChPid, limiter = Limiter}) -> - case rabbit_limiter:drained(Limiter) of - {[], Limiter} -> C; - {CTagCredits, Limiter2} -> - ok = rabbit_classic_queue:send_drained(ChPid, QName, CTagCredits), - C#cr{limiter = Limiter2} - end. - -credit_and_drain(QName, C = #cr{ch_pid = ChPid, limiter = Limiter}, - CTag, Credit, Mode, IsEmpty) -> - case rabbit_limiter:credit(Limiter, CTag, Credit, Mode, IsEmpty) of - {true, Limiter1} -> - ok = rabbit_classic_queue:send_drained(ChPid, QName, [{CTag, Credit}]), - C#cr{limiter = Limiter1}; - {false, Limiter1} -> C#cr{limiter = Limiter1} - end. + UnsentMessageLimit = persistent_term:get(?KEY_UNSENT_MESSAGE_LIMIT), + Count >= UnsentMessageLimit orelse rabbit_limiter:is_suspended(Limiter). tags(CList) -> [CTag || {_P, {_ChPid, #consumer{tag = CTag}}} <- CList]. -add_consumer({ChPid, Consumer = #consumer{args = Args}}, Queue) -> +add_consumer(Key = {_ChPid, #consumer{args = Args}}, Queue) -> Priority = case rabbit_misc:table_lookup(Args, <<"x-priority">>) of {_, P} -> P; _ -> 0 end, - priority_queue:in({ChPid, Consumer}, Priority, Queue). + priority_queue:in(Key, Priority, Queue). remove_consumer(ChPid, CTag, Queue) -> priority_queue:filter(fun ({CP, #consumer{tag = CT}}) -> diff --git a/deps/rabbit/src/rabbit_queue_decorator.erl b/deps/rabbit/src/rabbit_queue_decorator.erl index 4edc93d7e70c..7e672c0dd901 100644 --- a/deps/rabbit/src/rabbit_queue_decorator.erl +++ b/deps/rabbit/src/rabbit_queue_decorator.erl @@ -2,12 +2,11 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_queue_decorator). --include_lib("rabbit_common/include/rabbit.hrl"). -include("amqqueue.hrl"). -export([select/1, set/1, register/2, unregister/1]). @@ -72,5 +71,5 @@ maybe_recover(Q0) when ?is_amqqueue(Q0) -> _ -> %% TODO LRB JSP 160169569 should startup be passed Q1 here? _ = [M:startup(Q0) || M <- New -- Old], - rabbit_amqqueue:update_decorators(Name) + rabbit_amqqueue:update_decorators(Name, Decs1) end. diff --git a/deps/rabbit/src/rabbit_queue_index.erl b/deps/rabbit/src/rabbit_queue_index.erl index 689ff730730e..77c47c42df2d 100644 --- a/deps/rabbit/src/rabbit_queue_index.erl +++ b/deps/rabbit/src/rabbit_queue_index.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_queue_index). @@ -313,7 +313,9 @@ init_for_conversion(#resource{ virtual_host = VHost } = Name, OnSyncFun, OnSyncM 'undefined' | non_neg_integer(), qistate()}. recover(#resource{ virtual_host = VHost } = Name, Terms, MsgStoreRecovered, - ContainsCheckFun, OnSyncFun, OnSyncMsgFun, Context) -> + ContainsCheckFun, OnSyncFun, OnSyncMsgFun, + %% We only allow using this module when converting to v2. + convert) -> #{segment_entry_count := SegmentEntryCount} = rabbit_vhost:read_config(VHost), put(segment_entry_count, SegmentEntryCount), VHostDir = rabbit_vhost:msg_store_dir_path(VHost), @@ -323,10 +325,10 @@ recover(#resource{ virtual_host = VHost } = Name, Terms, MsgStoreRecovered, CleanShutdown = Terms /= non_clean_shutdown, case CleanShutdown andalso MsgStoreRecovered of true -> case proplists:get_value(segments, Terms, non_clean_shutdown) of - non_clean_shutdown -> init_dirty(false, ContainsCheckFun, State1, Context); + non_clean_shutdown -> init_dirty(false, ContainsCheckFun, State1); RecoveredCounts -> init_clean(RecoveredCounts, State1) end; - false -> init_dirty(CleanShutdown, ContainsCheckFun, State1, Context) + false -> init_dirty(CleanShutdown, ContainsCheckFun, State1) end. -spec terminate(rabbit_types:vhost(), [any()], qistate()) -> qistate(). @@ -644,7 +646,7 @@ init_clean(RecoveredCounts, State) -> -define(RECOVER_BYTES, 2). -define(RECOVER_COUNTER_SIZE, 2). -init_dirty(CleanShutdown, ContainsCheckFun, State, Context) -> +init_dirty(CleanShutdown, ContainsCheckFun, State) -> %% Recover the journal completely. This will also load segments %% which have entries in the journal and remove duplicates. The %% counts will correctly reflect the combination of the segment @@ -679,84 +681,7 @@ init_dirty(CleanShutdown, ContainsCheckFun, State, Context) -> %% recovery fails with a crash. State2 = flush_journal(State1 #qistate { segments = Segments1, dirty_count = DirtyCount }), - case Context of - convert -> - {Count, Bytes, State2}; - main -> - %% We try to see if there are segment files from the v2 index. - case rabbit_file:wildcard(".*\\.qi", Dir) of - %% We are recovering a dirty queue that was using the v2 index or in - %% the process of converting from v2 to v1. - [_|_] -> - #resource{virtual_host = VHost, name = QName} = State2#qistate.queue_name, - rabbit_log:info("Queue ~ts in vhost ~ts recovered ~b total messages before resuming convert", - [QName, VHost, Count]), - CountersRef = counters:new(?RECOVER_COUNTER_SIZE, []), - State3 = recover_index_v2_dirty(State2, ContainsCheckFun, CountersRef), - {Count + counters:get(CountersRef, ?RECOVER_COUNT), - Bytes + counters:get(CountersRef, ?RECOVER_BYTES), - State3}; - %% Otherwise keep default values. - [] -> - {Count, Bytes, State2} - end - end. - -recover_index_v2_dirty(State0 = #qistate { queue_name = Name, - on_sync = OnSyncFun, - on_sync_msg = OnSyncMsgFun }, - ContainsCheckFun, CountersRef) -> - #resource{virtual_host = VHost, name = QName} = Name, - rabbit_log:info("Converting queue ~ts in vhost ~ts from v2 to v1 after unclean shutdown", [QName, VHost]), - %% We cannot use the counts/bytes because some messages may be in both - %% the v1 and v2 indexes after a crash. - {_, _, V2State} = rabbit_classic_queue_index_v2:recover(Name, non_clean_shutdown, true, - ContainsCheckFun, OnSyncFun, OnSyncMsgFun, - convert), - State = recover_index_v2_common(State0, V2State, CountersRef), - rabbit_log:info("Queue ~ts in vhost ~ts converted ~b total messages from v2 to v1", - [QName, VHost, counters:get(CountersRef, ?RECOVER_COUNT)]), - State. - -%% At this point all messages are persistent because transient messages -%% were dropped during the v2 index recovery. -recover_index_v2_common(State0 = #qistate { queue_name = Name, dir = Dir }, - V2State, CountersRef) -> - %% Use a temporary per-queue store state to read embedded messages. - StoreState0 = rabbit_classic_queue_store_v2:init(Name), - %% Go through the v2 index and publish messages to v1 index. - {LoSeqId, HiSeqId, _} = rabbit_classic_queue_index_v2:bounds(V2State), - %% When resuming after a crash we need to double check the messages that are both - %% in the v1 and v2 index (effectively the messages below the upper bound of the - %% v1 index that are about to be written to it). - {_, V1HiSeqId, _} = bounds(State0), - SkipFun = fun - (SeqId, FunState0) when SeqId < V1HiSeqId -> - case read(SeqId, SeqId + 1, FunState0) of - %% Message already exists, skip. - {[_], FunState} -> - {skip, FunState}; - %% Message doesn't exist, write. - {[], FunState} -> - {write, FunState} - end; - %% Message is out of bounds of the v1 index. - (_, FunState) -> - {write, FunState} - end, - %% We use a common function also used with conversion on policy change. - {State1, _StoreState} = rabbit_variable_queue:convert_from_v2_to_v1_loop(Name, State0, V2State, StoreState0, - {CountersRef, ?RECOVER_COUNT, ?RECOVER_BYTES}, - LoSeqId, HiSeqId, SkipFun), - %% Delete any remaining v2 index files. - OldFiles = rabbit_file:wildcard(".*\\.qi", Dir) - ++ rabbit_file:wildcard(".*\\.qs", Dir), - _ = [rabbit_file:delete(filename:join(Dir, F)) || F <- OldFiles], - %% Ensure that everything in the v1 index is written to disk. - State = flush(State1), - %% Clean up all the garbage that we have surely been creating. - garbage_collect(), - State. + {Count, Bytes, State2}. terminate(State = #qistate { journal_handle = JournalHdl, segments = Segments }) -> @@ -997,8 +922,6 @@ append_journal_to_segment(#segment { journal_entries = JEntries, case array:sparse_size(JEntries) of 0 -> Segment; _ -> - file_handle_cache_stats:update(queue_index_write), - {ok, Hdl} = file_handle_cache:open_with_absolute_path( Path, ?WRITE_MODE, [{write_buffer, infinity}]), @@ -1247,7 +1170,6 @@ load_segment(KeepAcked, #segment { path = Path }) -> case rabbit_file:is_file(Path) of false -> Empty; true -> Size = rabbit_file:file_size(Path), - file_handle_cache_stats:update(queue_index_read), {ok, Hdl} = file_handle_cache:open_with_absolute_path( Path, ?READ_MODE, []), {ok, 0} = file_handle_cache:position(Hdl, bof), diff --git a/deps/rabbit/src/rabbit_queue_location.erl b/deps/rabbit/src/rabbit_queue_location.erl index 6471a0e4ab28..b2561dd477f5 100644 --- a/deps/rabbit/src/rabbit_queue_location.erl +++ b/deps/rabbit/src/rabbit_queue_location.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_queue_location). @@ -10,9 +10,28 @@ -include("amqqueue.hrl"). -export([queue_leader_locators/0, - select_leader_and_followers/2]). + select_leader_and_followers/2, + master_locator_permitted/0]). --define(QUEUE_LEADER_LOCATORS_DEPRECATED, [<<"random">>, <<"least-leaders">>]). +%% these are needed because of they are called with ?MODULE: +%% to allow mecking them in tests +-export([node/0, + queues_per_node/2]). + +-ifdef(TEST). +-export([select_members/7, leader_node/6, leader_locator/1]). +-endif. + +-rabbit_deprecated_feature( + {queue_master_locator, + #{deprecation_phase => permitted_by_default, + messages => + #{when_permitted => + "queue-master-locator is deprecated. " + "queue-leader-locator should be used instead (allowed values are 'client-local' and 'balanced')"}} + }). + +-define(QUEUE_LEADER_LOCATORS_DEPRECATED, [<<"random">>, <<"least-leaders">>, <<"min-masters">>]). -define(QUEUE_LEADER_LOCATORS, [<<"client-local">>, <<"balanced">>] ++ ?QUEUE_LEADER_LOCATORS_DEPRECATED). -define(QUEUE_COUNT_START_RANDOM_SELECTION, 1_000). @@ -26,21 +45,31 @@ queue_leader_locators() -> -spec select_leader_and_followers(amqqueue:amqqueue(), pos_integer()) -> {Leader :: node(), Followers :: [node()]}. select_leader_and_followers(Q, Size) - when (?amqqueue_is_quorum(Q) orelse ?amqqueue_is_stream(Q)) andalso is_integer(Size) -> + when (?amqqueue_is_quorum(Q) orelse ?amqqueue_is_stream(Q) orelse ?amqqueue_is_classic(Q)) andalso is_integer(Size) -> + LeaderLocator = leader_locator(Q), + QueueType = amqqueue:get_type(Q), + do_select_leader_and_followers(Size, QueueType, LeaderLocator). + +-spec do_select_leader_and_followers(pos_integer(), atom(), queue_leader_locator()) -> + {Leader :: node(), Followers :: [node()]}. +do_select_leader_and_followers(1, _, <<"client-local">>) -> + %% optimisation for classic queues + {?MODULE:node(), []}; +do_select_leader_and_followers(Size, QueueType, LeaderLocator) -> AllNodes = rabbit_nodes:list_members(), RunningNodes = rabbit_nodes:filter_running(AllNodes), - true = lists:member(node(), AllNodes), - QueueType = amqqueue:get_type(Q), + true = lists:member(?MODULE:node(), AllNodes), GetQueues0 = get_queues_for_type(QueueType), + %% TODO do we always need the queue count? it can be expensive, check if it can be skipped! + %% for example, for random QueueCount = rabbit_amqqueue:count(), QueueCountStartRandom = application:get_env(rabbit, queue_count_start_random_selection, ?QUEUE_COUNT_START_RANDOM_SELECTION), - {Replicas, GetQueues} = select_replicas(Size, AllNodes, RunningNodes, + {Members, GetQueues} = select_members(Size, QueueType, AllNodes, RunningNodes, QueueCount, QueueCountStartRandom, GetQueues0), - LeaderLocator = leader_locator(Q), - Leader = leader_node(LeaderLocator, Replicas, RunningNodes, + Leader = leader_node(LeaderLocator, Members, RunningNodes, QueueCount, QueueCountStartRandom, GetQueues), - Followers = lists:delete(Leader, Replicas), + Followers = lists:delete(Leader, Members), {Leader, Followers}. -spec leader_locator(amqqueue:amqqueue()) -> @@ -51,7 +80,15 @@ leader_locator(Q) -> fun (PolVal, _ArgVal) -> PolVal end, Q) of undefined -> - application:get_env(rabbit, queue_leader_locator, undefined); + case rabbit_queue_type_util:args_policy_lookup( + <<"queue-master-locator">>, + fun (PolVal, _ArgVal) -> PolVal end, + Q) of + undefined -> + application:get_env(rabbit, queue_leader_locator, undefined); + Val -> + Val + end; Val -> Val end, @@ -61,37 +98,48 @@ leader_locator0(<<"client-local">>) -> <<"client-local">>; leader_locator0(<<"balanced">>) -> <<"balanced">>; -%% 'random' and 'least-leaders' are deprecated +%% 'random', 'least-leaders' and 'min-masters' are deprecated leader_locator0(<<"random">>) -> <<"balanced">>; leader_locator0(<<"least-leaders">>) -> <<"balanced">>; +leader_locator0(<<"min-masters">>) -> + <<"balanced">>; leader_locator0(_) -> %% default <<"client-local">>. --spec select_replicas(pos_integer(), [node(),...], [node(),...], +-spec select_members(pos_integer(), rabbit_queue_type:queue_type(), [node(),...], [node(),...], non_neg_integer(), non_neg_integer(), function()) -> {[node(),...], function()}. -select_replicas(Size, AllNodes, _, _, _, Fun) +select_members(Size, _, AllNodes, _, _, _, Fun) when length(AllNodes) =< Size -> {AllNodes, Fun}; +%% Classic queues: above the threshold, pick a random node +%% For classic queues, when there's a lot of queues, if we knew that the +%% distribution of queues between nodes is relatively even, it'd be better +%% to declare this queue locally rather than randomly. However, currently, +%% counting queues on each node is relatively expensive. Users can use +%% the client-local strategy if they know their connections are well balanced +select_members(1, rabbit_classic_queue, _, RunningNodes, _, _, GetQueues) -> + {RunningNodes, GetQueues}; +%% Quorum queues and streams %% Select nodes in the following order: %% 1. Local node to have data locality for declaring client. %% 2. Running nodes. -%% 3.1. If there are many queues: Randomly to avoid expensive calculation of counting replicas +%% 3.1. If there are many queues: Randomly to avoid expensive calculation of counting members %% per node. Random replica selection is good enough for most use cases. -%% 3.2. If there are few queues: Nodes with least replicas to have a "balanced" RabbitMQ cluster. -select_replicas(Size, AllNodes, RunningNodes, QueueCount, QueueCountStartRandom, GetQueues) +%% 3.2. If there are few queues: Nodes with least members to have a "balanced" RabbitMQ cluster. +select_members(Size, _, AllNodes, RunningNodes, QueueCount, QueueCountStartRandom, GetQueues) when QueueCount >= QueueCountStartRandom -> - L0 = shuffle(lists:delete(node(), AllNodes)), + L0 = shuffle(lists:delete(?MODULE:node(), AllNodes)), L1 = lists:sort(fun(X, _Y) -> lists:member(X, RunningNodes) end, L0), {L, _} = lists:split(Size - 1, L1), - {[node() | L], GetQueues}; -select_replicas(Size, AllNodes, RunningNodes, _, _, GetQueues) -> - Counters0 = maps:from_list([{N, 0} || N <- lists:delete(node(), AllNodes)]), + {[?MODULE:node() | L], GetQueues}; +select_members(Size, _, AllNodes, RunningNodes, _, _, GetQueues) -> + Counters0 = maps:from_list([{N, 0} || N <- lists:delete(?MODULE:node(), AllNodes)]), Queues = GetQueues(), Counters = lists:foldl(fun(Q, Acc) -> #{nodes := Nodes} = amqqueue:get_type_state(Q), @@ -116,46 +164,34 @@ select_replicas(Size, AllNodes, RunningNodes, _, _, GetQueues) -> end, L0), {L2, _} = lists:split(Size - 1, L1), L = lists:map(fun({N, _}) -> N end, L2), - {[node() | L], fun() -> Queues end}. + {[?MODULE:node() | L], fun() -> Queues end}. -spec leader_node(queue_leader_locator(), [node(),...], [node(),...], non_neg_integer(), non_neg_integer(), function()) -> node(). leader_node(<<"client-local">>, _, _, _, _, _) -> - node(); + ?MODULE:node(); leader_node(<<"balanced">>, Nodes0, RunningNodes, QueueCount, QueueCountStartRandom, _) when QueueCount >= QueueCountStartRandom -> Nodes = potential_leaders(Nodes0, RunningNodes), lists:nth(rand:uniform(length(Nodes)), Nodes); -leader_node(<<"balanced">>, Nodes0, RunningNodes, _, _, GetQueues) +leader_node(<<"balanced">>, Members0, RunningNodes, _, _, GetQueues) when is_function(GetQueues, 0) -> - Nodes = potential_leaders(Nodes0, RunningNodes), - Counters0 = maps:from_list([{N, 0} || N <- Nodes]), - Counters = lists:foldl(fun(Q, Acc) -> - case amqqueue:get_pid(Q) of - {RaName, LeaderNode} - when is_atom(RaName), is_atom(LeaderNode), is_map_key(LeaderNode, Acc) -> - maps:update_with(LeaderNode, fun(C) -> C+1 end, Acc); - StreamLeaderPid - when is_pid(StreamLeaderPid), is_map_key(node(StreamLeaderPid), Acc) -> - maps:update_with(node(StreamLeaderPid), fun(C) -> C+1 end, Acc); - _ -> - Acc - end - end, Counters0, GetQueues()), + Members = potential_leaders(Members0, RunningNodes), + Counters = ?MODULE:queues_per_node(Members, GetQueues), {Node, _} = hd(lists:keysort(2, maps:to_list(Counters))), Node. -potential_leaders(Replicas, RunningNodes) -> +potential_leaders(Members, RunningNodes) -> case lists:filter(fun(R) -> lists:member(R, RunningNodes) - end, Replicas) of + end, Members) of [] -> - Replicas; - RunningReplicas -> - case rabbit_maintenance:filter_out_drained_nodes_local_read(RunningReplicas) of + Members; + RunningMembers -> + case rabbit_maintenance:filter_out_drained_nodes_local_read(RunningMembers) of [] -> - RunningReplicas; + RunningMembers; Filtered -> Filtered end @@ -170,3 +206,25 @@ shuffle(L0) when is_list(L0) -> L1 = lists:map(fun(E) -> {rand:uniform(), E} end, L0), L = lists:keysort(1, L1), lists:map(fun({_, E}) -> E end, L). + +queues_per_node(Nodes, GetQueues) -> + Counters0 = maps:from_list([{N, 0} || N <- Nodes]), + lists:foldl(fun(Q, Acc) -> + case amqqueue:get_pid(Q) of + {RaName, LeaderNode} %% quorum queues + when is_atom(RaName), is_atom(LeaderNode), is_map_key(LeaderNode, Acc) -> + maps:update_with(LeaderNode, fun(C) -> C+1 end, Acc); + Pid %% classic queues and streams + when is_pid(Pid), is_map_key(node(Pid), Acc) -> + maps:update_with(node(Pid), fun(C) -> C+1 end, Acc); + _ -> + Acc + end + end, Counters0, GetQueues()). + +%% for unit testing +-spec node() -> node(). +node() -> erlang:node(). + +master_locator_permitted() -> + rabbit_deprecated_features:is_permitted(queue_master_locator). diff --git a/deps/rabbit/src/rabbit_queue_location_client_local.erl b/deps/rabbit/src/rabbit_queue_location_client_local.erl deleted file mode 100644 index d380ce0ec5f4..000000000000 --- a/deps/rabbit/src/rabbit_queue_location_client_local.erl +++ /dev/null @@ -1,39 +0,0 @@ -%% This Source Code Form is subject to the terms of the Mozilla Public -%% License, v. 2.0. If a copy of the MPL was not distributed with this -%% file, You can obtain one at https://mozilla.org/MPL/2.0/. -%% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. -%% - --module(rabbit_queue_location_client_local). --behaviour(rabbit_queue_master_locator). - --include_lib("rabbit_common/include/rabbit.hrl"). --include("amqqueue.hrl"). - --export([description/0, queue_master_location/1]). - --rabbit_boot_step({?MODULE, - [{description, "locate queue master client local"}, - {mfa, {rabbit_registry, register, - [queue_master_locator, - <<"client-local">>, ?MODULE]}}, - {requires, rabbit_registry}, - {enables, kernel_ready}]}). - - -%%--------------------------------------------------------------------------- -%% Queue Master Location Callbacks -%%--------------------------------------------------------------------------- - -description() -> - [{description, <<"Locate queue master node as the client local node">>}]. - -queue_master_location(Q) when ?is_amqqueue(Q) -> - %% unlike with other locator strategies we do not check node maintenance - %% status for two reasons: - %% - %% * nodes in maintenance mode will drop their client connections - %% * with other strategies, if no nodes are available, the current node - %% is returned but this strategy already does just that - {ok, node()}. diff --git a/deps/rabbit/src/rabbit_queue_location_min_masters.erl b/deps/rabbit/src/rabbit_queue_location_min_masters.erl deleted file mode 100644 index a821914a29c8..000000000000 --- a/deps/rabbit/src/rabbit_queue_location_min_masters.erl +++ /dev/null @@ -1,70 +0,0 @@ -%% This Source Code Form is subject to the terms of the Mozilla Public -%% License, v. 2.0. If a copy of the MPL was not distributed with this -%% file, You can obtain one at https://mozilla.org/MPL/2.0/. -%% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. -%% - --module(rabbit_queue_location_min_masters). --behaviour(rabbit_queue_master_locator). - --include_lib("rabbit_common/include/rabbit.hrl"). --include("amqqueue.hrl"). - --export([description/0, queue_master_location/1]). - --rabbit_boot_step({?MODULE, - [{description, "locate queue master min bound queues"}, - {mfa, {rabbit_registry, register, - [queue_master_locator, - <<"min-masters">>, ?MODULE]}}, - {requires, rabbit_registry}, - {enables, kernel_ready}]}). - -%%--------------------------------------------------------------------------- -%% Queue Master Location Callbacks -%%--------------------------------------------------------------------------- - -description() -> - [{description, - <<"Locate queue master node from cluster node with least bound queues">>}]. - -queue_master_location(Q) when ?is_amqqueue(Q) -> - Cluster = rabbit_queue_master_location_misc:all_nodes(Q), - QueueNames = rabbit_amqqueue:list_names(), - MastersPerNode0 = lists:foldl( - fun(#resource{virtual_host = VHost, name = QueueName}, NodeMasters) -> - case rabbit_queue_master_location_misc:lookup_master(QueueName, VHost) of - {ok, Master} when is_atom(Master) -> - case maps:is_key(Master, NodeMasters) of - true -> maps:update_with(Master, - fun(N) -> N + 1 end, - NodeMasters); - false -> NodeMasters - end; - _ -> NodeMasters - end - end, - maps:from_list([{N, 0} || N <- Cluster]), - QueueNames), - - MastersPerNode = maps:filter(fun (Node, _N) -> - not rabbit_maintenance:is_being_drained_local_read(Node) - end, MastersPerNode0), - - case map_size(MastersPerNode) > 0 of - true -> - {MinNode, _NMasters} = maps:fold( - fun(Node, NMasters, init) -> - {Node, NMasters}; - (Node, NMasters, {MinNode, MinMasters}) -> - case NMasters < MinMasters of - true -> {Node, NMasters}; - false -> {MinNode, MinMasters} - end - end, - init, MastersPerNode), - {ok, MinNode}; - false -> - undefined - end. diff --git a/deps/rabbit/src/rabbit_queue_location_random.erl b/deps/rabbit/src/rabbit_queue_location_random.erl deleted file mode 100644 index fd9499cc3384..000000000000 --- a/deps/rabbit/src/rabbit_queue_location_random.erl +++ /dev/null @@ -1,42 +0,0 @@ -%% This Source Code Form is subject to the terms of the Mozilla Public -%% License, v. 2.0. If a copy of the MPL was not distributed with this -%% file, You can obtain one at https://mozilla.org/MPL/2.0/. -%% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. -%% - --module(rabbit_queue_location_random). --behaviour(rabbit_queue_master_locator). - --include_lib("rabbit_common/include/rabbit.hrl"). --include("amqqueue.hrl"). - --export([description/0, queue_master_location/1]). - --rabbit_boot_step({?MODULE, - [{description, "locate queue master random"}, - {mfa, {rabbit_registry, register, - [queue_master_locator, - <<"random">>, ?MODULE]}}, - {requires, rabbit_registry}, - {enables, kernel_ready}]}). - -%%--------------------------------------------------------------------------- -%% Queue Master Location Callbacks -%%--------------------------------------------------------------------------- - -description() -> - [{description, - <<"Locate queue master node from cluster in a random manner">>}]. - -queue_master_location(Q) when ?is_amqqueue(Q) -> - Cluster0 = rabbit_queue_master_location_misc:all_nodes(Q), - Cluster = rabbit_maintenance:filter_out_drained_nodes_local_read(Cluster0), - case Cluster of - [] -> - undefined; - Candidates when is_list(Candidates) -> - RandomPos = erlang:phash2(erlang:monotonic_time(), length(Candidates)), - MasterNode = lists:nth(RandomPos + 1, Candidates), - {ok, MasterNode} - end. diff --git a/deps/rabbit/src/rabbit_queue_location_validator.erl b/deps/rabbit/src/rabbit_queue_location_validator.erl deleted file mode 100644 index 33899ebec8d9..000000000000 --- a/deps/rabbit/src/rabbit_queue_location_validator.erl +++ /dev/null @@ -1,67 +0,0 @@ -%% This Source Code Form is subject to the terms of the Mozilla Public -%% License, v. 2.0. If a copy of the MPL was not distributed with this -%% file, You can obtain one at https://mozilla.org/MPL/2.0/. -%% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. -%% - --module(rabbit_queue_location_validator). --behaviour(rabbit_policy_validator). - --include_lib("rabbit_common/include/rabbit.hrl"). --include("amqqueue.hrl"). - --export([validate_policy/1, validate_strategy/1]). - --rabbit_boot_step({?MODULE, - [{description, "Queue location policy validation"}, - {mfa, {rabbit_registry, register, - [policy_validator, - <<"queue-master-locator">>, - ?MODULE]}}, - {requires, rabbit_registry}, - {enables, recovery}]}). - -validate_policy(KeyList) -> - case proplists:lookup(<<"queue-master-locator">> , KeyList) of - {_, Strategy} -> case validate_strategy(Strategy) of - {error, _, _} = Er -> Er; - _ -> ok - end; - _ -> {error, "queue-master-locator undefined"} - end. - -validate_strategy(Strategy) -> - case module(Strategy) of - R = {ok, _M} -> R; - _ -> - {error, "~tp invalid queue-master-locator value", [Strategy]} - end. - -policy(Policy, Q) -> - case rabbit_policy:get(Policy, Q) of - undefined -> none; - P -> P - end. - -module(Q) when ?is_amqqueue(Q) -> - case policy(<<"queue-master-locator">>, Q) of - undefined -> no_location_strategy; - Mode -> module(Mode) - end; -module(Strategy) when is_binary(Strategy) -> - case rabbit_registry:binary_to_type(Strategy) of - {error, not_found} -> no_location_strategy; - T -> - case rabbit_registry:lookup_module(queue_master_locator, T) of - {ok, Module} -> - case code:which(Module) of - non_existing -> no_location_strategy; - _ -> {ok, Module} - end; - _ -> - no_location_strategy - end - end; -module(Strategy) -> - module(rabbit_data_coercion:to_binary(Strategy)). diff --git a/deps/rabbit/src/rabbit_queue_master_location_misc.erl b/deps/rabbit/src/rabbit_queue_master_location_misc.erl deleted file mode 100644 index cf353fbccb0a..000000000000 --- a/deps/rabbit/src/rabbit_queue_master_location_misc.erl +++ /dev/null @@ -1,108 +0,0 @@ -%% This Source Code Form is subject to the terms of the Mozilla Public -%% License, v. 2.0. If a copy of the MPL was not distributed with this -%% file, You can obtain one at https://mozilla.org/MPL/2.0/. -%% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. -%% - --module(rabbit_queue_master_location_misc). - --include_lib("rabbit_common/include/rabbit.hrl"). --include("amqqueue.hrl"). - --export([lookup_master/2, - lookup_queue/2, - get_location/1, - get_location_mod_by_config/1, - get_location_mod_by_args/1, - get_location_mod_by_policy/1, - all_nodes/1]). - --spec lookup_master(binary(), binary()) -> {ok, node()} | {error, not_found}. -lookup_master(QueueNameBin, VHostPath) when is_binary(QueueNameBin), - is_binary(VHostPath) -> - QueueR = rabbit_misc:r(VHostPath, queue, QueueNameBin), - case rabbit_amqqueue:lookup(QueueR) of - {ok, Queue} when ?amqqueue_has_valid_pid(Queue) -> - Pid = amqqueue:get_pid(Queue), - {ok, node(Pid)}; - Error -> Error - end. - -lookup_queue(QueueNameBin, VHostPath) when is_binary(QueueNameBin), - is_binary(VHostPath) -> - QueueR = rabbit_misc:r(VHostPath, queue, QueueNameBin), - case rabbit_amqqueue:lookup(QueueR) of - Reply = {ok, Queue} when ?is_amqqueue(Queue) -> - Reply; - Error -> - Error - end. - -get_location(Queue) when ?is_amqqueue(Queue) -> - Reply1 = case get_location_mod_by_args(Queue) of - _Err1 = {error, _} -> - case get_location_mod_by_policy(Queue) of - _Err2 = {error, _} -> - case get_location_mod_by_config(Queue) of - Err3 = {error, _} -> Err3; - Reply0 = {ok, _Module} -> Reply0 - end; - Reply0 = {ok, _Module} -> Reply0 - end; - Reply0 = {ok, _Module} -> Reply0 - end, - - case Reply1 of - {ok, CB} -> CB:queue_master_location(Queue); - Error -> Error - end. - -get_location_mod_by_args(Queue) when ?is_amqqueue(Queue) -> - Args = amqqueue:get_arguments(Queue), - case rabbit_misc:table_lookup(Args, <<"x-queue-master-locator">>) of - {_Type, Strategy} -> - case rabbit_queue_location_validator:validate_strategy(Strategy) of - Reply = {ok, _CB} -> Reply; - Error -> Error - end; - _ -> {error, "x-queue-master-locator undefined"} - end. - -get_location_mod_by_policy(Queue) when ?is_amqqueue(Queue) -> - case rabbit_policy:get(<<"queue-master-locator">> , Queue) of - undefined -> {error, "queue-master-locator policy undefined"}; - Strategy -> - case rabbit_queue_location_validator:validate_strategy(Strategy) of - Reply = {ok, _CB} -> Reply; - Error -> Error - end - end. - -get_location_mod_by_config(Queue) when ?is_amqqueue(Queue) -> - case application:get_env(rabbit, queue_master_locator) of - {ok, Strategy} -> - case rabbit_queue_location_validator:validate_strategy(Strategy) of - Reply = {ok, _CB} -> Reply; - Error -> Error - end; - _ -> {error, "queue_master_locator undefined"} - end. - -all_nodes(Queue) when ?is_amqqueue(Queue) -> - handle_is_mirrored_ha_nodes(rabbit_mirror_queue_misc:is_mirrored_ha_nodes(Queue), Queue). - -handle_is_mirrored_ha_nodes(false, _Queue) -> - % Note: ha-mode is NOT 'nodes' - it is either exactly or all, which means - % that any node in the cluster is eligible to be the new queue master node - rabbit_nodes:list_serving(); -handle_is_mirrored_ha_nodes(true, Queue) -> - % Note: ha-mode is 'nodes', which explicitly specifies allowed nodes. - % We must use suggested_queue_nodes to get that list of nodes as the - % starting point for finding the queue master location - handle_suggested_queue_nodes(rabbit_mirror_queue_misc:suggested_queue_nodes(Queue)). - -handle_suggested_queue_nodes({_MNode, []}) -> - rabbit_nodes:list_serving(); -handle_suggested_queue_nodes({MNode, SNodes}) -> - [MNode | SNodes]. diff --git a/deps/rabbit/src/rabbit_queue_master_locator.erl b/deps/rabbit/src/rabbit_queue_master_locator.erl deleted file mode 100644 index cb948e7e978a..000000000000 --- a/deps/rabbit/src/rabbit_queue_master_locator.erl +++ /dev/null @@ -1,19 +0,0 @@ -%% This Source Code Form is subject to the terms of the Mozilla Public -%% License, v. 2.0. If a copy of the MPL was not distributed with this -%% file, You can obtain one at https://mozilla.org/MPL/2.0/. -%% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. -%% - --module(rabbit_queue_master_locator). - --behaviour(rabbit_registry_class). - --export([added_to_rabbit_registry/2, removed_from_rabbit_registry/1]). - --callback description() -> [proplists:property()]. --callback queue_master_location(amqqueue:amqqueue()) -> - {'ok', node()} | {'error', term()}. - -added_to_rabbit_registry(_Type, _ModuleName) -> ok. -removed_from_rabbit_registry(_Type) -> ok. diff --git a/deps/rabbit/src/rabbit_queue_type.erl b/deps/rabbit/src/rabbit_queue_type.erl index 9a269fab25a5..23e588c99e34 100644 --- a/deps/rabbit/src/rabbit_queue_type.erl +++ b/deps/rabbit/src/rabbit_queue_type.erl @@ -2,22 +2,27 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_queue_type). +-feature(maybe_expr, enable). -behaviour(rabbit_registry_class). -include("amqqueue.hrl"). -include_lib("rabbit_common/include/rabbit.hrl"). +-include_lib("amqp10_common/include/amqp10_types.hrl"). -export([ init/0, close/1, discover/1, + short_alias_of/1, feature_flag_name/1, + to_binary/1, default/0, + fallback/0, is_enabled/1, is_compatible/4, declare/2, @@ -27,6 +32,7 @@ purge/1, policy_changed/1, stat/1, + format/2, remove/2, info/2, state_info/1, @@ -36,13 +42,14 @@ %% stateful client API new/2, consume/3, - cancel/5, + cancel/3, handle_down/4, handle_event/3, module/2, deliver/4, settle/5, - credit/5, + credit_v1/5, + credit/6, dequeue/5, fold_state/3, is_policy_applicable/2, @@ -62,29 +69,36 @@ -type queue_name() :: rabbit_amqqueue:name(). -type queue_state() :: term(). --type msg_tag() :: term(). +%% sequence number typically +-type correlation() :: term(). -type arguments() :: queue_arguments | consumer_arguments. --type queue_type() :: rabbit_classic_queue | rabbit_quorum_queue | rabbit_stream_queue. - --export_type([queue_type/0]). +-type queue_type() :: rabbit_classic_queue | rabbit_quorum_queue | rabbit_stream_queue | module(). +%% see AMQP 1.0 §2.6.7 +-type delivery_count() :: sequence_no(). +-type credit() :: uint(). -define(STATE, ?MODULE). -%% Recoverable mirrors shouldn't really be a generic one, but let's keep it here until -%% mirrored queues are deprecated. --define(DOWN_KEYS, [name, durable, auto_delete, arguments, pid, recoverable_slaves, type, state]). +-define(DOWN_KEYS, [name, durable, auto_delete, arguments, pid, type, state]). %% TODO resolve all registered queue types from registry -define(QUEUE_MODULES, [rabbit_classic_queue, rabbit_quorum_queue, rabbit_stream_queue]). -define(KNOWN_QUEUE_TYPES, [<<"classic">>, <<"quorum">>, <<"stream">>]). +-type credit_reply_action() :: {credit_reply, rabbit_types:ctag(), delivery_count(), credit(), + Available :: non_neg_integer(), Drain :: boolean()}. + %% anything that the host process needs to do on behalf of the queue type session -type action() :: %% indicate to the queue type module that a message has been delivered %% fully to the queue - {settled, Success :: boolean(), [msg_tag()]} | + {settled, queue_name(), [correlation()]} | {deliver, rabbit_types:ctag(), boolean(), [rabbit_amqqueue:qmsg()]} | - {block | unblock, QueueName :: term()}. + {block | unblock, QueueName :: term()} | + credit_reply_action() | + %% credit API v1 + {credit_reply_v1, rabbit_types:ctag(), credit(), + Available :: non_neg_integer(), Drain :: boolean()}. -type actions() :: [action()]. @@ -93,44 +107,57 @@ term(). -record(ctx, {module :: module(), - %% "publisher confirm queue accounting" - %% queue type implementation should emit a: - %% {settle, Success :: boolean(), msg_tag()} - %% to either settle or reject the delivery of a - %% message to the queue instance - %% The queue type module will then emit a {confirm | reject, [msg_tag()} - %% action to the channel or channel like process when a msg_tag - %% has reached its conclusion state :: queue_state()}). - -record(?STATE, {ctxs = #{} :: #{queue_name() => #ctx{}} }). -opaque state() :: #?STATE{}. +%% Delete atom 'credit_api_v1' when feature flag rabbitmq_4.0.0 becomes required. +-type consume_mode() :: {simple_prefetch, Prefetch :: non_neg_integer()} | + {credited, Initial :: delivery_count() | credit_api_v1}. -type consume_spec() :: #{no_ack := boolean(), channel_pid := pid(), limiter_pid => pid() | none, limiter_active => boolean(), - prefetch_count => non_neg_integer(), + mode := consume_mode(), consumer_tag := rabbit_types:ctag(), exclusive_consume => boolean(), args => rabbit_framing:amqp_table(), ok_msg := term(), - acting_user := rabbit_types:username()}. - --type delivery_options() :: #{correlation => term(), %% sequence no typically + acting_user := rabbit_types:username()}. +-type cancel_reason() :: cancel | remove. +-type cancel_spec() :: #{consumer_tag := rabbit_types:ctag(), + reason => cancel_reason(), + ok_msg => term(), + user := rabbit_types:username()}. + +-type delivery_options() :: #{correlation => correlation(), atom() => term()}. --type settle_op() :: 'complete' | 'requeue' | 'discard'. +-type settle_op() :: complete | + requeue | + discard | + {modify, + DeliveryFailed :: boolean(), + UndeliverableHere :: boolean(), + Annotations :: mc:annotations()}. -export_type([state/0, + consume_mode/0, consume_spec/0, + cancel_reason/0, + cancel_spec/0, delivery_options/0, + credit_reply_action/0, action/0, actions/0, - settle_op/0]). + settle_op/0, + queue_type/0, + credit/0, + correlation/0, + delivery_count/0]). -callback is_enabled() -> boolean(). @@ -169,7 +196,8 @@ -callback is_stateful() -> boolean(). %% intitialise and return a queue type specific session context --callback init(amqqueue:amqqueue()) -> {ok, queue_state()} | {error, Reason :: term()}. +-callback init(amqqueue:amqqueue()) -> + {ok, queue_state()} | {error, Reason :: term()}. -callback close(queue_state()) -> ok. %% update the queue type state from amqqrecord @@ -178,13 +206,12 @@ -callback consume(amqqueue:amqqueue(), consume_spec(), queue_state()) -> - {ok, queue_state(), actions()} | {error, term()} | + {ok, queue_state(), actions()} | + {error, term()} | {protocol_error, Type :: atom(), Reason :: string(), Args :: term()}. -callback cancel(amqqueue:amqqueue(), - rabbit_types:ctag(), - term(), - rabbit_types:username(), + cancel_spec(), queue_state()) -> {ok, queue_state()} | {error, term()}. @@ -206,8 +233,12 @@ {queue_state(), actions()} | {'protocol_error', Type :: atom(), Reason :: string(), Args :: term()}. --callback credit(queue_name(), rabbit_types:ctag(), - non_neg_integer(), Drain :: boolean(), queue_state()) -> +%% Delete this callback when feature flag rabbitmq_4.0.0 becomes required. +-callback credit_v1(queue_name(), rabbit_types:ctag(), credit(), Drain :: boolean(), queue_state()) -> + {queue_state(), actions()}. + +-callback credit(queue_name(), rabbit_types:ctag(), delivery_count(), credit(), + Drain :: boolean(), queue_state()) -> {queue_state(), actions()}. -callback dequeue(queue_name(), NoAck :: boolean(), LimiterPid :: pid(), @@ -228,24 +259,58 @@ -callback stat(amqqueue:amqqueue()) -> {'ok', non_neg_integer(), non_neg_integer()}. +-callback format(amqqueue:amqqueue(), Context :: map()) -> + [{atom(), term()}]. + -callback capabilities() -> #{atom() := term()}. -callback notify_decorators(amqqueue:amqqueue()) -> ok. +-spec discover(binary() | atom()) -> queue_type(). +discover(<<"undefined">>) -> + fallback(); +discover(undefined) -> + fallback(); %% TODO: should this use a registry that's populated on boot? discover(<<"quorum">>) -> rabbit_quorum_queue; +discover(rabbit_quorum_queue) -> + rabbit_quorum_queue; discover(<<"classic">>) -> rabbit_classic_queue; +discover(rabbit_classic_queue) -> + rabbit_classic_queue; +discover(rabbit_stream_queue) -> + rabbit_stream_queue; discover(<<"stream">>) -> rabbit_stream_queue; +discover(Other) when is_atom(Other) -> + discover(rabbit_data_coercion:to_binary(Other)); discover(Other) when is_binary(Other) -> T = rabbit_registry:binary_to_type(Other), + rabbit_log:debug("Queue type discovery: will look up a module for type '~tp'", [T]), {ok, Mod} = rabbit_registry:lookup_module(queue, T), Mod. +-spec short_alias_of(queue_type()) -> binary(). +%% The opposite of discover/1: returns a short alias given a module name +short_alias_of(<<"rabbit_quorum_queue">>) -> + <<"quorum">>; +short_alias_of(rabbit_quorum_queue) -> + <<"quorum">>; +short_alias_of(<<"rabbit_classic_queue">>) -> + <<"classic">>; +short_alias_of(rabbit_classic_queue) -> + <<"classic">>; +short_alias_of(<<"rabbit_stream_queue">>) -> + <<"stream">>; +short_alias_of(rabbit_stream_queue) -> + <<"stream">>; +short_alias_of(_Other) -> + undefined. + feature_flag_name(<<"quorum">>) -> quorum_queue; feature_flag_name(<<"classic">>) -> @@ -255,9 +320,30 @@ feature_flag_name(<<"stream">>) -> feature_flag_name(_) -> undefined. -default() -> +%% If the client does not specify the type, the virtual host does not have any +%% metadata default, and rabbit.default_queue_type is not set in the application env, +%% use this type as the last resort. +-spec fallback() -> queue_type(). +fallback() -> rabbit_classic_queue. +-spec default() -> queue_type(). +default() -> + V = rabbit_misc:get_env(rabbit, + default_queue_type, + fallback()), + rabbit_data_coercion:to_atom(V). + +-spec to_binary(module()) -> binary(). +to_binary(rabbit_classic_queue) -> + <<"classic">>; +to_binary(rabbit_quorum_queue) -> + <<"quorum">>; +to_binary(rabbit_stream_queue) -> + <<"stream">>; +to_binary(Other) -> + atom_to_binary(Other). + %% is a specific queue type implementation enabled -spec is_enabled(module()) -> boolean(). is_enabled(Type) -> @@ -272,11 +358,17 @@ is_compatible(Type, Durable, Exclusive, AutoDelete) -> {'new' | 'existing' | 'owner_died', amqqueue:amqqueue()} | {'absent', amqqueue:amqqueue(), rabbit_amqqueue:absent_reason()} | {protocol_error, Type :: atom(), Reason :: string(), Args :: term()} | + {'error', Type :: atom(), Reason :: string(), Args :: term()} | {'error', Err :: term() }. declare(Q0, Node) -> Q = rabbit_queue_decorator:set(rabbit_policy:set(Q0)), Mod = amqqueue:get_type(Q), - Mod:declare(Q, Node). + case check_queue_limits(Q) of + ok -> + Mod:declare(Q, Node); + Error -> + Error + end. -spec delete(amqqueue:amqqueue(), boolean(), boolean(), rabbit_types:username()) -> @@ -304,6 +396,12 @@ stat(Q) -> Mod = amqqueue:get_type(Q), Mod:stat(Q). +-spec format(amqqueue:amqqueue(), map()) -> + [{atom(), term()}]. +format(Q, Context) -> + Mod = amqqueue:get_type(Q), + Mod:format(Q, Context). + -spec remove(queue_name(), state()) -> state(). remove(QRef, #?STATE{ctxs = Ctxs0} = State) -> case maps:take(QRef, Ctxs0) of @@ -351,7 +449,6 @@ i_down(durable, Q, _) -> amqqueue:is_durable(Q); i_down(auto_delete, Q, _) -> amqqueue:is_auto_delete(Q); i_down(arguments, Q, _) -> amqqueue:get_arguments(Q); i_down(pid, Q, _) -> amqqueue:get_pid(Q); -i_down(recoverable_slaves, Q, _) -> amqqueue:get_recoverable_slaves(Q); i_down(type, Q, _) -> amqqueue:get_type(Q); i_down(state, _Q, DownReason) -> DownReason; i_down(_K, _Q, _DownReason) -> ''. @@ -404,7 +501,9 @@ new(Q, State) when ?is_amqqueue(Q) -> set_ctx(Q, Ctx, State). -spec consume(amqqueue:amqqueue(), consume_spec(), state()) -> - {ok, state()} | {error, term()}. + {ok, state()} | + {error, term()} | + {protocol_error, Type :: atom(), Reason :: string(), Args :: term()}. consume(Q, Spec, State) -> #ctx{state = CtxState0} = Ctx = get_ctx(Q, State), Mod = amqqueue:get_type(Q), @@ -415,17 +514,14 @@ consume(Q, Spec, State) -> Err end. -%% TODO switch to cancel spec api -spec cancel(amqqueue:amqqueue(), - rabbit_types:ctag(), - term(), - rabbit_types:username(), + cancel_spec(), state()) -> {ok, state()} | {error, term()}. -cancel(Q, Tag, OkMsg, ActiveUser, Ctxs) -> +cancel(Q, Spec, Ctxs) -> #ctx{state = State0} = Ctx = get_ctx(Q, Ctxs), Mod = amqqueue:get_type(Q), - case Mod:cancel(Q, Tag, OkMsg, ActiveUser, State0) of + case Mod:cancel(Q, Spec, State0) of {ok, State} -> {ok, set_ctx(Q, Ctx#ctx{state = State}, Ctxs)}; Err -> @@ -619,15 +715,23 @@ settle(#resource{kind = queue} = QRef, Op, CTag, MsgIds, Ctxs) -> end end. --spec credit(amqqueue:amqqueue() | queue_name(), - rabbit_types:ctag(), non_neg_integer(), - boolean(), state()) -> {ok, state(), actions()}. -credit(Q, CTag, Credit, Drain, Ctxs) -> +%% Delete this function when feature flag rabbitmq_4.0.0 becomes required. +-spec credit_v1(queue_name(), rabbit_types:ctag(), credit(), boolean(), state()) -> + {ok, state(), actions()}. +credit_v1(QName, CTag, LinkCreditSnd, Drain, Ctxs) -> #ctx{state = State0, - module = Mod} = Ctx = get_ctx(Q, Ctxs), - QName = amqqueue:get_name(Q), - {State, Actions} = Mod:credit(QName, CTag, Credit, Drain, State0), - {ok, set_ctx(Q, Ctx#ctx{state = State}, Ctxs), Actions}. + module = Mod} = Ctx = get_ctx(QName, Ctxs), + {State, Actions} = Mod:credit_v1(QName, CTag, LinkCreditSnd, Drain, State0), + {ok, set_ctx(QName, Ctx#ctx{state = State}, Ctxs), Actions}. + +%% credit API v2 +-spec credit(queue_name(), rabbit_types:ctag(), delivery_count(), credit(), boolean(), state()) -> + {ok, state(), actions()}. +credit(QName, CTag, DeliveryCount, Credit, Drain, Ctxs) -> + #ctx{state = State0, + module = Mod} = Ctx = get_ctx(QName, Ctxs), + {State, Actions} = Mod:credit(QName, CTag, DeliveryCount, Credit, Drain, State0), + {ok, set_ctx(QName, Ctx#ctx{state = State}, Ctxs), Actions}. -spec dequeue(amqqueue:amqqueue(), boolean(), pid(), rabbit_types:ctag(), state()) -> @@ -718,3 +822,43 @@ known_queue_type_names() -> {QueueTypes, _} = lists:unzip(Registered), QTypeBins = lists:map(fun(X) -> atom_to_binary(X) end, QueueTypes), ?KNOWN_QUEUE_TYPES ++ QTypeBins. + +-spec check_queue_limits(amqqueue:amqqueue()) -> + ok | + {error, queue_limit_exceeded, Reason :: string(), Args :: term()}. +check_queue_limits(Q) -> + maybe + ok ?= check_vhost_queue_limit(Q), + ok ?= check_cluster_queue_limit(Q) + end. + +check_vhost_queue_limit(Q) -> + #resource{name = QueueName} = amqqueue:get_name(Q), + VHost = amqqueue:get_vhost(Q), + case rabbit_vhost_limit:is_over_queue_limit(VHost) of + false -> + ok; + {true, Limit} -> + queue_limit_error("cannot declare queue '~ts': " + "queue limit in vhost '~ts' (~tp) is reached", + [QueueName, VHost, Limit]) + end. + +check_cluster_queue_limit(Q) -> + #resource{name = QueueName} = amqqueue:get_name(Q), + case rabbit_misc:get_env(rabbit, cluster_queue_limit, infinity) of + infinity -> + ok; + Limit -> + case rabbit_db_queue:count() >= Limit of + true -> + queue_limit_error("cannot declare queue '~ts': " + "queue limit in cluster (~tp) is reached", + [QueueName, Limit]); + false -> + ok + end + end. + +queue_limit_error(Reason, ReasonArgs) -> + {error, queue_limit_exceeded, Reason, ReasonArgs}. diff --git a/deps/rabbit/src/rabbit_queue_type_util.erl b/deps/rabbit/src/rabbit_queue_type_util.erl index e7e4549c99bf..733d61a6540f 100644 --- a/deps/rabbit/src/rabbit_queue_type_util.erl +++ b/deps/rabbit/src/rabbit_queue_type_util.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_queue_type_util). @@ -12,7 +12,8 @@ check_auto_delete/1, check_exclusive/1, check_non_durable/1, - run_checks/2]). + run_checks/2, + erpc_call/5]). -include_lib("rabbit_common/include/rabbit.hrl"). -include("amqqueue.hrl"). @@ -70,3 +71,30 @@ run_checks([C | Checks], Q) -> Err -> Err end. + +-spec erpc_call(node(), module(), atom(), list(), non_neg_integer()) -> + term() | {error, term()}. +erpc_call(Node, M, F, A, _Timeout) + when Node =:= node() -> + %% Only timeout 'infinity' optimises the local call in OTP 23-25 avoiding a new process being spawned: + %% https://github.com/erlang/otp/blob/47f121af8ee55a0dbe2a8c9ab85031ba052bad6b/lib/kernel/src/erpc.erl#L121 + try erpc:call(Node, M, F, A, infinity) of + Result -> + Result + catch + error:Err -> + {error, Err} + end; +erpc_call(Node, M, F, A, Timeout) -> + case lists:member(Node, nodes()) of + true -> + try erpc:call(Node, M, F, A, Timeout) of + Result -> + Result + catch + error:Err -> + {error, Err} + end; + false -> + {error, noconnection} + end. diff --git a/deps/rabbit/src/rabbit_quorum_memory_manager.erl b/deps/rabbit/src/rabbit_quorum_memory_manager.erl index 61a85c59bf2e..93851d09c789 100644 --- a/deps/rabbit/src/rabbit_quorum_memory_manager.erl +++ b/deps/rabbit/src/rabbit_quorum_memory_manager.erl @@ -2,14 +2,12 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2018-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_quorum_memory_manager). -behaviour(gen_event). --include_lib("rabbit_common/include/rabbit.hrl"). - -export([init/1, handle_call/2, handle_event/2, handle_info/2, terminate/2, code_change/3]). -export([register/0, unregister/0]). diff --git a/deps/rabbit/src/rabbit_quorum_queue.erl b/deps/rabbit/src/rabbit_quorum_queue.erl index 6ebd3ec6cc55..eb7e0def33ec 100644 --- a/deps/rabbit/src/rabbit_quorum_queue.erl +++ b/deps/rabbit/src/rabbit_quorum_queue.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2018-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_quorum_queue). @@ -17,15 +17,16 @@ handle_event/3]). -export([is_recoverable/1, recover/2, + system_recover/1, stop/1, start_server/1, restart_server/1, stop_server/1, delete/4, delete_immediately/1]). --export([state_info/1, info/2, stat/1, infos/1]). --export([settle/5, dequeue/5, consume/3, cancel/5]). --export([credit/5]). +-export([state_info/1, info/2, stat/1, infos/1, infos/2]). +-export([settle/5, dequeue/5, consume/3, cancel/3]). +-export([credit_v1/5, credit/6]). -export([purge/1]). -export([stateless_deliver/2, deliver/3]). -export([dead_letter_publish/5]). @@ -33,25 +34,28 @@ -export([update_consumer_handler/8, update_consumer/9]). -export([cancel_consumer_handler/2, cancel_consumer/3]). -export([become_leader/2, handle_tick/3, spawn_deleter/1]). --export([rpc_delete_metrics/1]). --export([format/1]). +-export([rpc_delete_metrics/1, + key_metrics_rpc/1]). +-export([format/2]). -export([open_files/1]). -export([peek/2, peek/3]). --export([add_member/4, add_member/2]). +-export([add_member/2, + add_member/3, + add_member/4, + add_member/5]). -export([delete_member/3, delete_member/2]). -export([requeue/3]). -export([policy_changed/1]). -export([format_ra_event/3]). -export([cleanup_data_dir/0]). -export([shrink_all/1, - grow/4]). + grow/4, + grow/5]). -export([transfer_leadership/2, get_replicas/1, queue_length/1]). --export([file_handle_leader_reservation/1, - file_handle_other_reservation/0]). --export([file_handle_release_reservation/0]). -export([list_with_minimum_quorum/0, - filter_quorum_critical/1, - filter_quorum_critical/2, + list_with_local_promotable/0, + list_with_local_promotable_for_cli/0, + filter_quorum_critical/3, all_replica_states/0]). -export([capabilities/0]). -export([repair_amqqueue_nodes/1, @@ -72,8 +76,18 @@ -export([force_shrink_member_to_current_member/2, force_all_queues_shrink_member_to_current_member/0]). +%% for backwards compatibility +-export([file_handle_leader_reservation/1, + file_handle_other_reservation/0, + file_handle_release_reservation/0]). + +-ifdef(TEST). +-export([filter_promotable/2]). +-endif. + -import(rabbit_queue_type_util, [args_policy_lookup/3, - qname_to_internal_name/1]). + qname_to_internal_name/1, + erpc_call/5]). -include_lib("stdlib/include/qlc.hrl"). -include_lib("rabbit_common/include/rabbit.hrl"). @@ -82,10 +96,20 @@ -type msg_id() :: non_neg_integer(). -type qmsg() :: {rabbit_types:r('queue'), pid(), msg_id(), boolean(), mc:state()}. +-type membership() :: voter | non_voter | promotable. %% see ra_membership() in Ra. +-type replica_states() :: #{atom() => replica_state()}. +-type replica_state() :: leader | follower | non_voter | promotable. -define(RA_SYSTEM, quorum_queues). -define(RA_WAL_NAME, ra_log_wal). +-define(DEFAULT_DELIVERY_LIMIT, 20). + +-define(INFO(Str, Args), + rabbit_log:info("[~s:~s/~b] " Str, + [?MODULE, ?FUNCTION_NAME, ?FUNCTION_ARITY | Args])). + + -define(STATISTICS_KEYS, [policy, operator_policy, @@ -111,11 +135,13 @@ -define(RPC_TIMEOUT, 1000). -define(START_CLUSTER_TIMEOUT, 5000). --define(START_CLUSTER_RPC_TIMEOUT, 7000). %% needs to be longer than START_CLUSTER_TIMEOUT --define(TICK_TIMEOUT, 5000). %% the ra server tick time +-define(START_CLUSTER_RPC_TIMEOUT, 60_000). %% needs to be longer than START_CLUSTER_TIMEOUT +-define(TICK_INTERVAL, 5000). %% the ra server tick time -define(DELETE_TIMEOUT, 5000). --define(ADD_MEMBER_TIMEOUT, 5000). +-define(MEMBER_CHANGE_TIMEOUT, 20_000). -define(SNAPSHOT_INTERVAL, 8192). %% the ra default is 4096 +% -define(UNLIMITED_PREFETCH_COUNT, 2000). %% something large for ra +-define(MIN_CHECKPOINT_INTERVAL, 8192). %% the ra default is 16384 %%----------- QQ policies --------------------------------------------------- @@ -162,7 +188,7 @@ is_compatible(_, _, _) -> init(Q) when ?is_amqqueue(Q) -> {ok, SoftLimit} = application:get_env(rabbit, quorum_commands_soft_limit), {Name, _} = MaybeLeader = amqqueue:get_pid(Q), - Leader = case ra_leaderboard:lookup_leader(Name) of + Leader = case find_leader(Q) of undefined -> %% leader from queue record will have to suffice MaybeLeader; @@ -224,22 +250,23 @@ start_cluster(Q) -> {error, {too_long, N}} -> rabbit_data_coercion:to_atom(ra:new_uid(N)) end, - {Leader, Followers} = rabbit_queue_location:select_leader_and_followers(Q, QuorumSize), - LeaderId = {RaName, Leader}, + {LeaderNode, FollowerNodes} = + rabbit_queue_location:select_leader_and_followers(Q, QuorumSize), + LeaderId = {RaName, LeaderNode}, NewQ0 = amqqueue:set_pid(Q, LeaderId), - NewQ1 = amqqueue:set_type_state(NewQ0, #{nodes => [Leader | Followers]}), + NewQ1 = amqqueue:set_type_state(NewQ0, + #{nodes => [LeaderNode | FollowerNodes]}), rabbit_log:debug("Will start up to ~w replicas for quorum ~ts with leader on node '~ts'", - [QuorumSize, rabbit_misc:rs(QName), Leader]), + [QuorumSize, rabbit_misc:rs(QName), LeaderNode]), case rabbit_amqqueue:internal_declare(NewQ1, false) of {created, NewQ} -> - TickTimeout = application:get_env(rabbit, quorum_tick_interval, - ?TICK_TIMEOUT), - SnapshotInterval = application:get_env(rabbit, quorum_snapshot_interval, - ?SNAPSHOT_INTERVAL), - RaConfs = [make_ra_conf(NewQ, ServerId, TickTimeout, SnapshotInterval) + RaConfs = [make_ra_conf(NewQ, ServerId) || ServerId <- members(NewQ)], - try erpc_call(Leader, ra, start_cluster, + + %% khepri projections on remote nodes are eventually consistent + wait_for_projections(LeaderNode, QName), + try erpc_call(LeaderNode, ra, start_cluster, [?RA_SYSTEM, RaConfs, ?START_CLUSTER_TIMEOUT], ?START_CLUSTER_RPC_TIMEOUT) of {ok, _, _} -> @@ -251,7 +278,7 @@ start_cluster(Q) -> %% config cannot be updated ok = rabbit_fifo_client:update_machine_state(LeaderId, ra_machine_config(NewQ)), - notify_decorators(QName, startup), + notify_decorators(NewQ, startup), rabbit_quorum_queue_periodic_membership_reconciliation:queue_created(NewQ), rabbit_event:notify(queue_created, [{name, QName}, @@ -264,13 +291,18 @@ start_cluster(Q) -> ActingUser}]), {new, NewQ}; {error, Error} -> - declare_queue_error(Error, NewQ, Leader, ActingUser) + declare_queue_error(Error, NewQ, LeaderNode, ActingUser) catch error:Error -> - declare_queue_error(Error, NewQ, Leader, ActingUser) + declare_queue_error(Error, NewQ, LeaderNode, ActingUser) end; {existing, _} = Ex -> - Ex + Ex; + {error, timeout} -> + {protocol_error, internal_error, + "Could not declare quorum ~ts on node '~ts' because the metadata " + "store operation timed out", + [rabbit_misc:rs(QName), node()]} end. declare_queue_error(Error, Queue, Leader, ActingUser) -> @@ -290,10 +322,15 @@ ra_machine_config(Q) when ?is_amqqueue(Q) -> OverflowBin = args_policy_lookup(<<"overflow">>, fun policyHasPrecedence/2, Q), Overflow = overflow(OverflowBin, drop_head, QName), MaxBytes = args_policy_lookup(<<"max-length-bytes">>, fun min/2, Q), - MaxMemoryLength = args_policy_lookup(<<"max-in-memory-length">>, fun min/2, Q), - MaxMemoryBytes = args_policy_lookup(<<"max-in-memory-bytes">>, fun min/2, Q), - DeliveryLimit = args_policy_lookup(<<"delivery-limit">>, fun min/2, Q), - Expires = args_policy_lookup(<<"expires">>, fun policyHasPrecedence/2, Q), + DeliveryLimit = case args_policy_lookup(<<"delivery-limit">>, fun min/2, Q) of + undefined -> + rabbit_log:info("~ts: delivery_limit not set, defaulting to ~b", + [rabbit_misc:rs(QName), ?DEFAULT_DELIVERY_LIMIT]), + ?DEFAULT_DELIVERY_LIMIT; + DL -> + DL + end, + Expires = args_policy_lookup(<<"expires">>, fun min/2, Q), MsgTTL = args_policy_lookup(<<"message-ttl">>, fun min/2, Q), #{name => Name, queue_resource => QName, @@ -301,8 +338,6 @@ ra_machine_config(Q) when ?is_amqqueue(Q) -> become_leader_handler => {?MODULE, become_leader, [QName]}, max_length => MaxLength, max_bytes => MaxBytes, - max_in_memory_length => MaxMemoryLength, - max_in_memory_bytes => MaxMemoryBytes, single_active_consumer_on => single_active_consumer_on(Q), delivery_limit => DeliveryLimit, overflow_strategy => Overflow, @@ -357,70 +392,89 @@ local_or_remote_handler(ChPid, Module, Function, Args) -> end. become_leader(QName, Name) -> + %% as this function is called synchronously when a ra node becomes leader + %% we need to ensure there is no chance of blocking as else the ra node + %% may not be able to establish its leadership + spawn(fun () -> become_leader0(QName, Name) end). + +become_leader0(QName, Name) -> Fun = fun (Q1) -> amqqueue:set_state( amqqueue:set_pid(Q1, {Name, node()}), live) end, - %% as this function is called synchronously when a ra node becomes leader - %% we need to ensure there is no chance of blocking as else the ra node - %% may not be able to establish its leadership - spawn(fun() -> - _ = rabbit_amqqueue:update(QName, Fun), - case rabbit_amqqueue:lookup(QName) of - {ok, Q0} when ?is_amqqueue(Q0) -> - Nodes = get_nodes(Q0), - [_ = erpc_call(Node, ?MODULE, rpc_delete_metrics, - [QName], ?RPC_TIMEOUT) - || Node <- Nodes, Node =/= node()]; - _ -> - ok - end - end). + _ = rabbit_amqqueue:update(QName, Fun), + case rabbit_amqqueue:lookup(QName) of + {ok, Q0} when ?is_amqqueue(Q0) -> + Nodes = get_nodes(Q0), + _ = [_ = erpc_call(Node, ?MODULE, rpc_delete_metrics, + [QName], ?RPC_TIMEOUT) + || Node <- Nodes, Node =/= node()], + ok; + _ -> + ok + end. -spec all_replica_states() -> {node(), #{atom() => atom()}}. all_replica_states() -> - Rows = ets:tab2list(ra_state), + Rows0 = ets:tab2list(ra_state), + Rows = lists:map(fun + ({K, follower, promotable}) -> + {K, promotable}; + ({K, follower, non_voter}) -> + {K, non_voter}; + ({K, S, _}) -> + %% voter or unknown + {K, S}; + (T) -> + T + end, Rows0), {node(), maps:from_list(Rows)}. -spec list_with_minimum_quorum() -> [amqqueue:amqqueue()]. list_with_minimum_quorum() -> - filter_quorum_critical( - rabbit_amqqueue:list_local_quorum_queues()). - --spec filter_quorum_critical([amqqueue:amqqueue()]) -> [amqqueue:amqqueue()]. -filter_quorum_critical(Queues) -> - %% Example map of QQ replica states: - %% #{rabbit@warp10 => - %% #{'%2F_qq.636' => leader,'%2F_qq.243' => leader, - %% '%2F_qq.1939' => leader,'%2F_qq.1150' => leader, - %% '%2F_qq.1109' => leader,'%2F_qq.1654' => leader, - %% '%2F_qq.1679' => leader,'%2F_qq.1003' => leader, - %% '%2F_qq.1593' => leader,'%2F_qq.1765' => leader, - %% '%2F_qq.933' => leader,'%2F_qq.38' => leader, - %% '%2F_qq.1357' => leader,'%2F_qq.1345' => leader, - %% '%2F_qq.1694' => leader,'%2F_qq.994' => leader, - %% '%2F_qq.490' => leader,'%2F_qq.1704' => leader, - %% '%2F_qq.58' => leader,'%2F_qq.564' => leader, - %% '%2F_qq.683' => leader,'%2F_qq.386' => leader, - %% '%2F_qq.753' => leader,'%2F_qq.6' => leader, - %% '%2F_qq.1590' => leader,'%2F_qq.1363' => leader, - %% '%2F_qq.882' => leader,'%2F_qq.1161' => leader,...}} - ReplicaStates = maps:from_list( - rabbit_misc:append_rpc_all_nodes(rabbit_nodes:list_running(), - ?MODULE, all_replica_states, [])), - filter_quorum_critical(Queues, ReplicaStates). - --spec filter_quorum_critical([amqqueue:amqqueue()], #{node() => #{atom() => atom()}}) -> [amqqueue:amqqueue()]. - -filter_quorum_critical(Queues, ReplicaStates) -> + Queues = rabbit_amqqueue:list_local_quorum_queues(), + ReplicaStates = get_replica_states(rabbit_nodes:list_running()), + filter_quorum_critical(Queues, ReplicaStates, node()). + +-spec list_with_local_promotable() -> [amqqueue:amqqueue()]. +list_with_local_promotable() -> + Queues = rabbit_amqqueue:list_local_quorum_queues(), + #{node() := ReplicaStates} = get_replica_states([node()]), + filter_promotable(Queues, ReplicaStates). + +-spec list_with_local_promotable_for_cli() -> [#{binary() => any()}]. +list_with_local_promotable_for_cli() -> + Qs = list_with_local_promotable(), + lists:map(fun amqqueue:to_printable/1, Qs). + +-spec get_replica_states([node()]) -> #{node() => replica_states()}. +get_replica_states(Nodes) -> + maps:from_list( + rabbit_misc:append_rpc_all_nodes(Nodes, ?MODULE, all_replica_states, [])). + +-spec filter_promotable([amqqueue:amqqueue()], replica_states()) -> + [amqqueue:amqqueue()]. +filter_promotable(Queues, ReplicaStates) -> + lists:filter(fun (Q) -> + {RaName, _Node} = amqqueue:get_pid(Q), + State = maps:get(RaName, ReplicaStates), + State == promotable + end, Queues). + +-spec filter_quorum_critical([amqqueue:amqqueue()], #{node() => replica_states()}, node()) -> + [amqqueue:amqqueue()]. +filter_quorum_critical(Queues, ReplicaStates, Self) -> lists:filter(fun (Q) -> - MemberNodes = rabbit_amqqueue:get_quorum_nodes(Q), + MemberNodes = get_nodes(Q), {Name, _Node} = amqqueue:get_pid(Q), AllUp = lists:filter(fun (N) -> - {Name, _} = amqqueue:get_pid(Q), case maps:get(N, ReplicaStates, undefined) of - #{Name := State} when State =:= follower orelse State =:= leader -> + #{Name := State} + when State =:= follower orelse + State =:= leader orelse + (State =:= promotable andalso N =:= Self) orelse + (State =:= non_voter andalso N =:= Self) -> true; _ -> false end @@ -444,7 +498,7 @@ capabilities() -> <<"x-single-active-consumer">>, <<"x-queue-type">>, <<"x-quorum-initial-group-size">>, <<"x-delivery-limit">>, <<"x-message-ttl">>, <<"x-queue-leader-locator">>], - consumer_arguments => [<<"x-priority">>, <<"x-credit">>], + consumer_arguments => [<<"x-priority">>], server_named => false}. rpc_delete_metrics(QName) -> @@ -463,11 +517,12 @@ spawn_notify_decorators(QName, Fun, Args) -> catch notify_decorators(QName, Fun, Args). handle_tick(QName, - #{config := #{name := Name}, + #{config := #{name := Name} = Cfg, num_active_consumers := NumConsumers, num_checked_out := NumCheckedOut, num_ready_messages := NumReadyMsgs, num_messages := NumMessages, + num_enqueuers := NumEnqueuers, enqueue_message_bytes := EnqueueBytes, checkout_message_bytes := CheckoutBytes, num_discarded := NumDiscarded, @@ -482,6 +537,7 @@ handle_tick(QName, spawn( fun() -> try + {ok, Q} = rabbit_amqqueue:lookup(QName), Reductions = reductions(Name), rabbit_core_metrics:queue_stats(QName, NumReadyMsgs, NumCheckedOut, NumMessages, @@ -490,7 +546,9 @@ handle_tick(QName, 0 -> 0; _ -> rabbit_fifo:usage(Name) end, - Keys = ?STATISTICS_KEYS -- [consumers, + + Keys = ?STATISTICS_KEYS -- [leader, + consumers, messages_dlx, message_bytes_dlx, single_active_consumer_pid, @@ -498,11 +556,25 @@ handle_tick(QName, ], {SacTag, SacPid} = maps:get(single_active_consumer_id, Overview, {'', ''}), + Infos0 = maps:fold( + fun(num_ready_messages_high, V, Acc) -> + [{messages_ready_high, V} | Acc]; + (num_ready_messages_normal, V, Acc) -> + [{messages_ready_normal, V} | Acc]; + (num_ready_messages_return, V, Acc) -> + [{messages_ready_returned, V} | Acc]; + (_, _, Acc) -> + Acc + end, info(Q, Keys), Overview), MsgBytesDiscarded = DiscardBytes + DiscardCheckoutBytes, MsgBytes = EnqueueBytes + CheckoutBytes + MsgBytesDiscarded, Infos = [{consumers, NumConsumers}, + {publishers, NumEnqueuers}, {consumer_capacity, Util}, {consumer_utilisation, Util}, + {messages, NumMessages}, + {messages_ready, NumReadyMsgs}, + {messages_unacknowledged, NumCheckedOut}, {message_bytes_ready, EnqueueBytes}, {message_bytes_unacknowledged, CheckoutBytes}, {message_bytes, MsgBytes}, @@ -511,44 +583,61 @@ handle_tick(QName, {messages_dlx, NumDiscarded + NumDiscardedCheckedOut}, {message_bytes_dlx, MsgBytesDiscarded}, {single_active_consumer_tag, SacTag}, - {single_active_consumer_pid, SacPid} - | infos(QName, Keys)], + {single_active_consumer_pid, SacPid}, + {leader, node()}, + {delivery_limit, case maps:get(delivery_limit, Cfg, + undefined) of + undefined -> + unlimited; + Limit -> + Limit + end} + | Infos0], rabbit_core_metrics:queue_stats(QName, Infos), - ok = repair_leader_record(QName, Self), + ok = repair_leader_record(Q, Self), + case repair_amqqueue_nodes(Q) of + ok -> + ok; + repaired -> + rabbit_log:debug("Repaired quorum queue ~ts amqqueue record", [rabbit_misc:rs(QName)]) + end, ExpectedNodes = rabbit_nodes:list_members(), case Nodes -- ExpectedNodes of [] -> ok; - Stale -> - rabbit_log:debug("~ts: stale nodes detected. Purging ~w", + Stale when length(ExpectedNodes) > 0 -> + %% rabbit_nodes:list_members/0 returns [] when there + %% is an error so we need to handle that case + rabbit_log:debug("~ts: stale nodes detected in quorum " + "queue state. Purging ~w", [rabbit_misc:rs(QName), Stale]), %% pipeline purge command - {ok, Q} = rabbit_amqqueue:lookup(QName), ok = ra:pipeline_command(amqqueue:get_pid(Q), rabbit_fifo:make_purge_nodes(Stale)), - + ok; + _ -> ok end catch _:Err -> rabbit_log:debug("~ts: handle tick failed with ~p", - [rabbit_misc:rs(QName), Err]), + [rabbit_misc:rs(QName), Err]), ok end end). -repair_leader_record(QName, Self) -> - {ok, Q} = rabbit_amqqueue:lookup(QName), +repair_leader_record(Q, Self) -> Node = node(), case amqqueue:get_pid(Q) of {_, Node} -> %% it's ok - we don't need to do anything ok; _ -> + QName = amqqueue:get_name(Q), rabbit_log:debug("~ts: repairing leader record", [rabbit_misc:rs(QName)]), {_, Name} = erlang:process_info(Self, registered_name), - become_leader(QName, Name), + ok = become_leader0(QName, Name), ok end, ok. @@ -564,8 +653,8 @@ repair_amqqueue_nodes(QName = #resource{}) -> repair_amqqueue_nodes(Q0); repair_amqqueue_nodes(Q0) -> QName = amqqueue:get_name(Q0), - Leader = amqqueue:get_pid(Q0), - {ok, Members, _} = ra:members(Leader), + {Name, _} = amqqueue:get_pid(Q0), + Members = ra_leaderboard:lookup_members(Name), RaNodes = [N || {_, N} <- Members], #{nodes := Nodes} = amqqueue:get_type_state(Q0), case lists:sort(RaNodes) =:= lists:sort(Nodes) of @@ -597,17 +686,31 @@ is_recoverable(Q) when ?is_amqqueue(Q) and ?amqqueue_is_quorum(Q) -> Nodes = get_nodes(Q), lists:member(Node, Nodes). +system_recover(quorum_queues) -> + case rabbit:is_booted() of + true -> + Queues = rabbit_amqqueue:list_local_quorum_queues(), + ?INFO("recovering ~b queues", [length(Queues)]), + {Recovered, Failed} = recover(<<>>, Queues), + ?INFO("recovered ~b queues, " + "failed to recover ~b queues", + [length(Recovered), length(Failed)]), + ok; + false -> + ?INFO("rabbit not booted, skipping queue recovery", []), + ok + end. + -spec recover(binary(), [amqqueue:amqqueue()]) -> {[amqqueue:amqqueue()], [amqqueue:amqqueue()]}. recover(_Vhost, Queues) -> lists:foldl( fun (Q0, {R0, F0}) -> {Name, _} = amqqueue:get_pid(Q0), + ServerId = {Name, node()}, QName = amqqueue:get_name(Q0), - Nodes = get_nodes(Q0), - Formatter = {?MODULE, format_ra_event, [QName]}, - Res = case ra:restart_server(?RA_SYSTEM, {Name, node()}, - #{ra_event_formatter => Formatter}) of + MutConf = make_mutable_config(Q0), + Res = case ra:restart_server(?RA_SYSTEM, ServerId, MutConf) of ok -> % queue was restarted, good ok; @@ -616,13 +719,10 @@ recover(_Vhost, Queues) -> Err1 == name_not_registered -> rabbit_log:warning("Quorum queue recovery: configured member of ~ts was not found on this node. Starting member as a new one. " "Context: ~s", - [rabbit_misc:rs(QName), Err1]), + [rabbit_misc:rs(QName), Err1]), % queue was never started on this node % so needs to be started from scratch. - Machine = ra_machine(Q0), - RaNodes = [{Name, Node} || Node <- Nodes], - case ra:start_server(?RA_SYSTEM, Name, {Name, node()}, - Machine, RaNodes) of + case start_server(make_ra_conf(Q0, ServerId)) of ok -> ok; Err2 -> rabbit_log:warning("recover: quorum queue ~w could not" @@ -700,6 +800,9 @@ delete(Q, _IfUnused, _IfEmpty, ActingUser) when ?amqqueue_is_quorum(Q) -> MRef = erlang:monitor(process, Leader), receive {'DOWN', MRef, process, _, _} -> + %% leader is down, + %% force delete remaining members + ok = force_delete_queue(lists:delete(Leader, Servers)), ok after Timeout -> erlang:demonitor(MRef, [flush]), @@ -763,10 +866,16 @@ settle(_QName, complete, CTag, MsgIds, QState) -> settle(_QName, requeue, CTag, MsgIds, QState) -> rabbit_fifo_client:return(quorum_ctag(CTag), MsgIds, QState); settle(_QName, discard, CTag, MsgIds, QState) -> - rabbit_fifo_client:discard(quorum_ctag(CTag), MsgIds, QState). + rabbit_fifo_client:discard(quorum_ctag(CTag), MsgIds, QState); +settle(_QName, {modify, DelFailed, Undel, Anns}, CTag, MsgIds, QState) -> + rabbit_fifo_client:modify(quorum_ctag(CTag), MsgIds, DelFailed, Undel, + Anns, QState). -credit(_QName, CTag, Credit, Drain, QState) -> - rabbit_fifo_client:credit(quorum_ctag(CTag), Credit, Drain, QState). +credit_v1(_QName, CTag, Credit, Drain, QState) -> + rabbit_fifo_client:credit_v1(quorum_ctag(CTag), Credit, Drain, QState). + +credit(_QName, CTag, DeliveryCount, Credit, Drain, QState) -> + rabbit_fifo_client:credit(quorum_ctag(CTag), DeliveryCount, Credit, Drain, QState). -spec dequeue(rabbit_amqqueue:name(), NoAck :: boolean(), pid(), rabbit_types:ctag(), rabbit_fifo_client:state()) -> @@ -794,7 +903,7 @@ consume(Q, #{limiter_active := true}, _State) consume(Q, Spec, QState0) when ?amqqueue_is_quorum(Q) -> #{no_ack := NoAck, channel_pid := ChPid, - prefetch_count := ConsumerPrefetchCount, + mode := Mode, consumer_tag := ConsumerTag0, exclusive_consume := ExclusiveConsume, args := Args, @@ -805,35 +914,28 @@ consume(Q, Spec, QState0) when ?amqqueue_is_quorum(Q) -> QName = amqqueue:get_name(Q), maybe_send_reply(ChPid, OkMsg), ConsumerTag = quorum_ctag(ConsumerTag0), - %% A prefetch count of 0 means no limitation, - %% let's make it into something large for ra - Prefetch0 = case ConsumerPrefetchCount of - 0 -> 2000; - Other -> Other - end, %% consumer info is used to describe the consumer properties AckRequired = not NoAck, - ConsumerMeta = #{ack => AckRequired, - prefetch => ConsumerPrefetchCount, - args => Args, - username => ActingUser}, - - {CreditMode, Credit, Drain} = parse_credit_args(Prefetch0, Args), - %% if the mode is credited we should send a separate credit command - %% after checkout and give 0 credits initally - Prefetch = case CreditMode of - credited -> 0; - simple_prefetch -> Prefetch0 + Prefetch = case Mode of + {simple_prefetch, Declared} -> + Declared; + _ -> + 0 end, - {ok, QState1} = rabbit_fifo_client:checkout(ConsumerTag, Prefetch, - CreditMode, ConsumerMeta, - QState0), - QState = case CreditMode of - credited when Credit > 0 -> - rabbit_fifo_client:credit(ConsumerTag, Credit, Drain, - QState1); - _ -> QState1 + Priority = case rabbit_misc:table_lookup(Args, <<"x-priority">>) of + {_Key, Value} -> + Value; + _ -> + 0 end, + ConsumerMeta = #{ack => AckRequired, + prefetch => Prefetch, + args => Args, + username => ActingUser, + priority => Priority}, + {ok, _Infos, QState} = rabbit_fifo_client:checkout(ConsumerTag, + Mode, ConsumerMeta, + QState0), case single_active_consumer_on(Q) of true -> %% get the leader from state @@ -848,7 +950,7 @@ consume(Q, Spec, QState0) when ?amqqueue_is_quorum(Q) -> rabbit_core_metrics:consumer_created( ChPid, ConsumerTag, ExclusiveConsume, AckRequired, QName, - ConsumerPrefetchCount, ActivityStatus == single_active, %% Active + Prefetch, ActivityStatus == single_active, %% Active ActivityStatus, Args), emit_consumer_created(ChPid, ConsumerTag, ExclusiveConsume, AckRequired, QName, Prefetch, @@ -863,7 +965,7 @@ consume(Q, Spec, QState0) when ?amqqueue_is_quorum(Q) -> rabbit_core_metrics:consumer_created( ChPid, ConsumerTag, ExclusiveConsume, AckRequired, QName, - ConsumerPrefetchCount, true, %% Active + Prefetch, true, %% Active up, Args), emit_consumer_created(ChPid, ConsumerTag, ExclusiveConsume, AckRequired, QName, Prefetch, @@ -871,9 +973,10 @@ consume(Q, Spec, QState0) when ?amqqueue_is_quorum(Q) -> {ok, QState} end. -cancel(_Q, ConsumerTag, OkMsg, _ActingUser, State) -> - maybe_send_reply(self(), OkMsg), - rabbit_fifo_client:cancel_checkout(quorum_ctag(ConsumerTag), State). +cancel(_Q, #{consumer_tag := ConsumerTag} = Spec, State) -> + maybe_send_reply(self(), maps:get(ok_msg, Spec, undefined)), + Reason = maps:get(reason, Spec, cancel), + rabbit_fifo_client:cancel_checkout(quorum_ctag(ConsumerTag), Reason, State). emit_consumer_created(ChPid, CTag, Exclusive, AckRequired, QName, PrefetchCount, Args, Ref, ActingUser) -> rabbit_event:notify(consumer_created, @@ -951,7 +1054,7 @@ info(Q, Items) -> lists:foldr(fun(totals, Acc) -> i_totals(Q) ++ Acc; (type_specific, Acc) -> - format(Q) ++ Acc; + format(Q, #{}) ++ Acc; (Item, Acc) -> [{Item, i(Item, Q)} | Acc] end, [], Items). @@ -1001,12 +1104,12 @@ cleanup_data_dir() -> ok. maybe_delete_data_dir(UId) -> + _ = ra_directory:unregister_name(?RA_SYSTEM, UId), Dir = ra_env:server_data_dir(?RA_SYSTEM, UId), {ok, Config} = ra_log:read_config(Dir), case maps:get(machine, Config) of {module, rabbit_fifo, _} -> - ra_lib:recursive_delete(Dir), - ra_directory:unregister_name(?RA_SYSTEM, UId); + ra_lib:recursive_delete(Dir); _ -> ok end. @@ -1029,7 +1132,7 @@ cluster_state(Name) -> case whereis(Name) of undefined -> down; _ -> - case ets_lookup_element(ra_state, Name, 2, undefined) of + case ets:lookup_element(ra_state, Name, 2, undefined) of recover -> recovering; _ -> @@ -1037,6 +1140,10 @@ cluster_state(Name) -> end end. +key_metrics_rpc(ServerId) -> + Metrics = ra:key_metrics(ServerId), + Metrics#{machine_version := rabbit_fifo:version()}. + -spec status(rabbit_types:vhost(), Name :: rabbit_misc:resource_name()) -> [[{binary(), term()}]] | {error, term()}. status(Vhost, QueueName) -> @@ -1047,34 +1154,67 @@ status(Vhost, QueueName) -> {error, classic_queue_not_supported}; {ok, Q} when ?amqqueue_is_quorum(Q) -> {RName, _} = amqqueue:get_pid(Q), - Nodes = get_nodes(Q), + Nodes = lists:sort(get_nodes(Q)), [begin - case get_sys_status({RName, N}) of - {ok, Sys} -> - {_, M} = lists:keyfind(ra_server_state, 1, Sys), - {_, RaftState} = lists:keyfind(raft_state, 1, Sys), - #{commit_index := Commit, - machine_version := MacVer, - current_term := Term, - log := #{last_index := Last, - snapshot_index := SnapIdx}} = M, + ServerId = {RName, N}, + case erpc_call(N, ?MODULE, key_metrics_rpc, [ServerId], ?RPC_TIMEOUT) of + #{state := RaftState, + membership := Membership, + commit_index := Commit, + term := Term, + last_index := Last, + last_applied := LastApplied, + last_written_index := LastWritten, + snapshot_index := SnapIdx, + machine_version := MacVer} -> [{<<"Node Name">>, N}, {<<"Raft State">>, RaftState}, - {<<"Log Index">>, Last}, + {<<"Membership">>, Membership}, + {<<"Last Log Index">>, Last}, + {<<"Last Written">>, LastWritten}, + {<<"Last Applied">>, LastApplied}, {<<"Commit Index">>, Commit}, {<<"Snapshot Index">>, SnapIdx}, {<<"Term">>, Term}, {<<"Machine Version">>, MacVer} ]; - {error, Err} -> - [{<<"Node Name">>, N}, - {<<"Raft State">>, Err}, - {<<"Log Index">>, <<>>}, - {<<"Commit Index">>, <<>>}, - {<<"Snapshot Index">>, <<>>}, - {<<"Term">>, <<>>}, - {<<"Machine Version">>, <<>>} - ] + {error, _} -> + %% try the old method + case get_sys_status(ServerId) of + {ok, Sys} -> + {_, M} = lists:keyfind(ra_server_state, 1, Sys), + {_, RaftState} = lists:keyfind(raft_state, 1, Sys), + #{commit_index := Commit, + machine_version := MacVer, + current_term := Term, + last_applied := LastApplied, + log := #{last_index := Last, + last_written_index_term := {LastWritten, _}, + snapshot_index := SnapIdx}} = M, + [{<<"Node Name">>, N}, + {<<"Raft State">>, RaftState}, + {<<"Membership">>, voter}, + {<<"Last Log Index">>, Last}, + {<<"Last Written">>, LastWritten}, + {<<"Last Applied">>, LastApplied}, + {<<"Commit Index">>, Commit}, + {<<"Snapshot Index">>, SnapIdx}, + {<<"Term">>, Term}, + {<<"Machine Version">>, MacVer} + ]; + {error, Err} -> + [{<<"Node Name">>, N}, + {<<"Raft State">>, Err}, + {<<"Membership">>, <<>>}, + {<<"LastLog Index">>, <<>>}, + {<<"Last Written">>, <<>>}, + {<<"Last Applied">>, <<>>}, + {<<"Commit Index">>, <<>>}, + {<<"Snapshot Index">>, <<>>}, + {<<"Term">>, <<>>}, + {<<"Machine Version">>, <<>>} + ] + end end end || N <- Nodes]; {ok, _Q} -> @@ -1094,10 +1234,10 @@ get_sys_status(Proc) -> end. - -add_member(VHost, Name, Node, Timeout) -> +add_member(VHost, Name, Node, Membership, Timeout) when is_binary(VHost) -> QName = #resource{virtual_host = VHost, name = Name, kind = queue}, - rabbit_log:debug("Asked to add a replica for queue ~ts on node ~ts", [rabbit_misc:rs(QName), Node]), + rabbit_log:debug("Asked to add a replica for queue ~ts on node ~ts", + [rabbit_misc:rs(QName), Node]), case rabbit_amqqueue:lookup(QName) of {ok, Q} when ?amqqueue_is_classic(Q) -> {error, classic_queue_not_supported}; @@ -1110,10 +1250,11 @@ add_member(VHost, Name, Node, Timeout) -> case lists:member(Node, QNodes) of true -> %% idempotent by design - rabbit_log:debug("Quorum ~ts already has a replica on node ~ts", [rabbit_misc:rs(QName), Node]), + rabbit_log:debug("Quorum ~ts already has a replica on node ~ts", + [rabbit_misc:rs(QName), Node]), ok; false -> - add_member(Q, Node, Timeout) + add_member(Q, Node, Membership, Timeout) end end; {ok, _Q} -> @@ -1123,21 +1264,31 @@ add_member(VHost, Name, Node, Timeout) -> end. add_member(Q, Node) -> - add_member(Q, Node, ?ADD_MEMBER_TIMEOUT). -add_member(Q, Node, Timeout) when ?amqqueue_is_quorum(Q) -> + add_member(Q, Node, promotable). + +add_member(Q, Node, Membership) -> + add_member(Q, Node, Membership, ?MEMBER_CHANGE_TIMEOUT). + +add_member(VHost, Name, Node, Timeout) when is_binary(VHost) -> + %% NOTE needed to pass mixed cluster tests. + add_member(VHost, Name, Node, promotable, Timeout); +add_member(Q, Node, Membership, Timeout) when ?amqqueue_is_quorum(Q) -> {RaName, _} = amqqueue:get_pid(Q), QName = amqqueue:get_name(Q), %% TODO parallel calls might crash this, or add a duplicate in quorum_nodes ServerId = {RaName, Node}, Members = members(Q), - TickTimeout = application:get_env(rabbit, quorum_tick_interval, - ?TICK_TIMEOUT), - SnapshotInterval = application:get_env(rabbit, quorum_snapshot_interval, - ?SNAPSHOT_INTERVAL), - Conf = make_ra_conf(Q, ServerId, TickTimeout, SnapshotInterval), + Conf = make_ra_conf(Q, ServerId, Membership), case ra:start_server(?RA_SYSTEM, Conf) of ok -> - case ra:add_member(Members, ServerId, Timeout) of + ServerIdSpec = + case rabbit_feature_flags:is_enabled(quorum_queue_non_voters) of + true -> + maps:with([id, uid, membership], Conf); + false -> + maps:get(id, Conf) + end, + case ra:add_member(Members, ServerIdSpec, Timeout) of {ok, _, Leader} -> Fun = fun(Q1) -> Q2 = update_type_state( @@ -1193,8 +1344,11 @@ delete_member(Q, Node) when ?amqqueue_is_quorum(Q) -> %% deleting the last member is not allowed {error, last_node}; Members -> - case ra:remove_member(Members, ServerId) of - {ok, _, _Leader} -> + case ra:remove_member(Members, ServerId, ?MEMBER_CHANGE_TIMEOUT) of + Res when element(1, Res) == ok orelse + Res == {error, not_member} -> + %% if not a member we can still proceed with updating the + %% mnesia record and clean up server if still running Fun = fun(Q1) -> update_type_state( Q1, @@ -1236,6 +1390,23 @@ shrink_all(Node) -> case delete_member(Q, Node) of ok -> {QName, {ok, Size-1}}; + {error, cluster_change_not_permitted} -> + %% this could be timing related and due to a new leader just being + %% elected but it's noop command not been committed yet. + %% lets sleep and retry once + rabbit_log:info("~ts: failed to remove member (replica) on node ~w " + "as cluster change is not permitted. " + "retrying once in 500ms", + [rabbit_misc:rs(QName), Node]), + timer:sleep(500), + case delete_member(Q, Node) of + ok -> + {QName, {ok, Size-1}}; + {error, Err} -> + rabbit_log:warning("~ts: failed to remove member (replica) on node ~w, error: ~w", + [rabbit_misc:rs(QName), Node, Err]), + {QName, {error, Size, Err}} + end; {error, Err} -> rabbit_log:warning("~ts: failed to remove member (replica) on node ~w, error: ~w", [rabbit_misc:rs(QName), Node, Err]), @@ -1245,17 +1416,21 @@ shrink_all(Node) -> amqqueue:get_type(Q) == ?MODULE, lists:member(Node, get_nodes(Q))]. --spec grow(node(), binary(), binary(), all | even) -> + +grow(Node, VhostSpec, QueueSpec, Strategy) -> + grow(Node, VhostSpec, QueueSpec, Strategy, promotable). + +-spec grow(node(), binary(), binary(), all | even, membership()) -> [{rabbit_amqqueue:name(), {ok, pos_integer()} | {error, pos_integer(), term()}}]. - grow(Node, VhostSpec, QueueSpec, Strategy) -> +grow(Node, VhostSpec, QueueSpec, Strategy, Membership) -> Running = rabbit_nodes:list_running(), [begin Size = length(get_nodes(Q)), QName = amqqueue:get_name(Q), rabbit_log:info("~ts: adding a new member (replica) on node ~w", [rabbit_misc:rs(QName), Node]), - case add_member(Q, Node, ?ADD_MEMBER_TIMEOUT) of + case add_member(Q, Node, Membership) of ok -> {QName, {ok, Size + 1}}; {error, Err} -> @@ -1316,17 +1491,6 @@ matches_strategy(even, Members) -> is_match(Subj, E) -> nomatch /= re:run(Subj, E). -file_handle_leader_reservation(QName) -> - {ok, Q} = rabbit_amqqueue:lookup(QName), - ClusterSize = length(get_nodes(Q)), - file_handle_cache:set_reservation(2 + ClusterSize). - -file_handle_other_reservation() -> - file_handle_cache:set_reservation(2). - -file_handle_release_reservation() -> - file_handle_cache:release_reservation(). - -spec reclaim_memory(rabbit_types:vhost(), Name :: rabbit_misc:resource_name()) -> ok | {error, term()}. reclaim_memory(Vhost, QueueName) -> QName = #resource{virtual_host = Vhost, name = QueueName, kind = queue}, @@ -1428,10 +1592,10 @@ i(messages, Q) when ?is_amqqueue(Q) -> quorum_messages(QName); i(messages_ready, Q) when ?is_amqqueue(Q) -> QName = amqqueue:get_name(Q), - ets_lookup_element(queue_coarse_metrics, QName, 2, 0); + ets:lookup_element(queue_coarse_metrics, QName, 2, 0); i(messages_unacknowledged, Q) when ?is_amqqueue(Q) -> QName = amqqueue:get_name(Q), - ets_lookup_element(queue_coarse_metrics, QName, 3, 0); + ets:lookup_element(queue_coarse_metrics, QName, 3, 0); i(policy, Q) -> case rabbit_policy:name(Q) of none -> ''; @@ -1449,7 +1613,7 @@ i(effective_policy_definition, Q) -> end; i(consumers, Q) when ?is_amqqueue(Q) -> QName = amqqueue:get_name(Q), - Consumers = ets_lookup_element(queue_metrics, QName, 2, []), + Consumers = ets:lookup_element(queue_metrics, QName, 2, []), proplists:get_value(consumers, Consumers, 0); i(memory, Q) when ?is_amqqueue(Q) -> {Name, _} = amqqueue:get_pid(Q), @@ -1471,7 +1635,7 @@ i(state, Q) when ?is_amqqueue(Q) -> end; i(local_state, Q) when ?is_amqqueue(Q) -> {Name, _} = amqqueue:get_pid(Q), - ets_lookup_element(ra_state, Name, 2, not_member); + ets:lookup_element(ra_state, Name, 2, not_member); i(garbage_collection, Q) when ?is_amqqueue(Q) -> {Name, _} = amqqueue:get_pid(Q), try @@ -1547,15 +1711,26 @@ open_files(Name) -> case whereis(Name) of undefined -> {node(), 0}; - Pid -> - {node(), ets_lookup_element(ra_open_file_metrics, Pid, 2, 0)} + _ -> + case ra_counters:counters({Name, node()}, [open_segments]) of + #{open_segments := Num} -> + {node(), Num}; + _ -> + {node(), 0} + end end. leader(Q) when ?is_amqqueue(Q) -> - {Name, Leader} = amqqueue:get_pid(Q), - case is_process_alive(Name, Leader) of - true -> Leader; - false -> '' + case find_leader(Q) of + undefined -> + ''; + {Name, LeaderNode} -> + case is_process_alive(Name, LeaderNode) of + true -> + LeaderNode; + false -> + '' + end end. peek(Vhost, Queue, Pos) -> @@ -1577,8 +1752,8 @@ peek(Pos, Q) when ?is_amqqueue(Q) andalso ?amqqueue_is_quorum(Q) -> _ -> 0 end, Msg = mc:set_annotation(<<"x-delivery-count">>, Count, Msg0), - XName = mc:get_annotation(exchange, Msg), - RoutingKeys = mc:get_annotation(routing_keys, Msg), + XName = mc:exchange(Msg), + RoutingKeys = mc:routing_keys(Msg), AmqpLegacyMsg = mc:prepare(read, mc:convert(mc_amqpl, Msg)), Content = mc:protocol_state(AmqpLegacyMsg), {ok, rabbit_basic:peek_fmt_message(XName, RoutingKeys, Content)}; @@ -1595,22 +1770,46 @@ peek(_Pos, Q) when ?is_amqqueue(Q) -> online(Q) when ?is_amqqueue(Q) -> Nodes = get_connected_nodes(Q), {Name, _} = amqqueue:get_pid(Q), - [Node || Node <- Nodes, is_process_alive(Name, Node)]. - -format(Q) when ?is_amqqueue(Q) -> - Nodes = get_nodes(Q), - [{members, Nodes}, {online, online(Q)}, {leader, leader(Q)}]. - -is_process_alive(Name, Node) -> - %% don't attempt rpc if node is not already connected - %% as this function is used for metrics and stats and the additional - %% latency isn't warranted - erlang:is_pid(erpc_call(Node, erlang, whereis, [Name], ?RPC_TIMEOUT)). + [node(Pid) || {ok, Pid} <- + erpc:multicall(Nodes, erlang, whereis, [Name]), + is_pid(Pid)]. + +format(Q, Ctx) when ?is_amqqueue(Q) -> + %% TODO: this should really just be voters + Nodes = lists:sort(get_nodes(Q)), + Running = case Ctx of + #{running_nodes := Running0} -> + Running0; + _ -> + %% WARN: slow + rabbit_nodes:list_running() + end, + Online = [N || N <- Nodes, lists:member(N, Running)], + {_, LeaderNode} = amqqueue:get_pid(Q), + State = case is_minority(Nodes, Online) of + true when length(Online) == 0 -> + down; + true -> + minority; + false -> + case lists:member(LeaderNode, Online) of + true -> + running; + false -> + down + end + end, + [{type, quorum}, + {state, State}, + {node, LeaderNode}, + {members, Nodes}, + {leader, LeaderNode}, + {online, Online}]. -spec quorum_messages(rabbit_amqqueue:name()) -> non_neg_integer(). quorum_messages(QName) -> - ets_lookup_element(queue_coarse_metrics, QName, 4, 0). + ets:lookup_element(queue_coarse_metrics, QName, 4, 0). quorum_ctag(Int) when is_integer(Int) -> integer_to_binary(Int); @@ -1637,25 +1836,54 @@ members(Q) when ?amqqueue_is_quorum(Q) -> format_ra_event(ServerId, Evt, QRef) -> {'$gen_cast', {queue_event, QRef, {ServerId, Evt}}}. -make_ra_conf(Q, ServerId, TickTimeout, SnapshotInterval) -> +make_ra_conf(Q, ServerId) -> + make_ra_conf(Q, ServerId, voter). + +make_ra_conf(Q, ServerId, Membership) -> + TickTimeout = application:get_env(rabbit, quorum_tick_interval, + ?TICK_INTERVAL), + SnapshotInterval = application:get_env(rabbit, quorum_snapshot_interval, + ?SNAPSHOT_INTERVAL), + CheckpointInterval = application:get_env(rabbit, + quorum_min_checkpoint_interval, + ?MIN_CHECKPOINT_INTERVAL), + make_ra_conf(Q, ServerId, TickTimeout, + SnapshotInterval, CheckpointInterval, Membership). + +make_ra_conf(Q, ServerId, TickTimeout, + SnapshotInterval, CheckpointInterval, Membership) -> QName = amqqueue:get_name(Q), RaMachine = ra_machine(Q), [{ClusterName, _} | _] = Members = members(Q), UId = ra:new_uid(ra_lib:to_binary(ClusterName)), FName = rabbit_misc:rs(QName), Formatter = {?MODULE, format_ra_event, [QName]}, - #{cluster_name => ClusterName, - id => ServerId, - uid => UId, - friendly_name => FName, - metrics_key => QName, - initial_members => Members, - log_init_args => #{uid => UId, - snapshot_interval => SnapshotInterval}, - tick_timeout => TickTimeout, - machine => RaMachine, + LogCfg = #{uid => UId, + snapshot_interval => SnapshotInterval, + min_checkpoint_interval => CheckpointInterval, + max_checkpoints => 3}, + rabbit_misc:maps_put_truthy(membership, Membership, + #{cluster_name => ClusterName, + id => ServerId, + uid => UId, + friendly_name => FName, + metrics_key => QName, + initial_members => Members, + log_init_args => LogCfg, + tick_timeout => TickTimeout, + machine => RaMachine, + ra_event_formatter => Formatter}). + +make_mutable_config(Q) -> + QName = amqqueue:get_name(Q), + TickTimeout = application:get_env(rabbit, quorum_tick_interval, + ?TICK_INTERVAL), + Formatter = {?MODULE, format_ra_event, [QName]}, + #{tick_timeout => TickTimeout, ra_event_formatter => Formatter}. + + get_nodes(Q) when ?is_amqqueue(Q) -> #{nodes := Nodes} = amqqueue:get_type_state(Q), Nodes. @@ -1676,20 +1904,6 @@ overflow(<<"reject-publish-dlx">> = V, Def, QName) -> [V, rabbit_misc:rs(QName)]), Def. -parse_credit_args(Default, Args) -> - case rabbit_misc:table_lookup(Args, <<"x-credit">>) of - {table, T} -> - case {rabbit_misc:table_lookup(T, <<"credit">>), - rabbit_misc:table_lookup(T, <<"drain">>)} of - {{long, C}, {bool, D}} -> - {credited, C, D}; - _ -> - {simple_prefetch, Default, false} - end; - undefined -> - {simple_prefetch, Default, false} - end. - -spec notify_decorators(amqqueue:amqqueue()) -> 'ok'. notify_decorators(Q) when ?is_amqqueue(Q) -> QName = amqqueue:get_name(Q), @@ -1704,6 +1918,10 @@ notify_decorators(Q) when ?is_amqqueue(Q) -> notify_decorators(QName, Event) -> notify_decorators(QName, Event, []). +notify_decorators(Q, F, A) when ?is_amqqueue(Q) -> + Ds = amqqueue:get_decorators(Q), + [ok = apply(M, F, [Q|A]) || M <- rabbit_queue_decorator:select(Ds)], + ok; notify_decorators(QName, F, A) -> %% Look up again in case policy and hence decorators have changed case rabbit_amqqueue:lookup(QName) of @@ -1715,39 +1933,6 @@ notify_decorators(QName, F, A) -> ok end. -ets_lookup_element(Tbl, Key, Pos, Default) -> - try ets:lookup_element(Tbl, Key, Pos) of - V -> V - catch - _:badarg -> - Default - end. - -erpc_call(Node, M, F, A, _Timeout) - when Node =:= node() -> - %% Only timeout 'infinity' optimises the local call in OTP 23-25 avoiding a new process being spawned: - %% https://github.com/erlang/otp/blob/47f121af8ee55a0dbe2a8c9ab85031ba052bad6b/lib/kernel/src/erpc.erl#L121 - try erpc:call(Node, M, F, A, infinity) of - Result -> - Result - catch - error:Err -> - {error, Err} - end; -erpc_call(Node, M, F, A, Timeout) -> - case lists:member(Node, nodes()) of - true -> - try erpc:call(Node, M, F, A, Timeout) of - Result -> - Result - catch - error:Err -> - {error, Err} - end; - false -> - {error, noconnection} - end. - is_stateful() -> true. force_shrink_member_to_current_member(VHost, Name) -> @@ -1787,3 +1972,64 @@ force_all_queues_shrink_member_to_current_member() -> end || Q <- rabbit_amqqueue:list(), amqqueue:get_type(Q) == ?MODULE], rabbit_log:warning("Disaster recovery procedure: shrinking finished"), ok. + +is_minority(All, Up) -> + MinQuorum = length(All) div 2 + 1, + length(Up) < MinQuorum. + +wait_for_projections(Node, QName) -> + case rabbit_feature_flags:is_enabled(khepri_db) andalso + Node =/= node() of + true -> + wait_for_projections(Node, QName, 256); + false -> + ok + end. + +wait_for_projections(Node, QName, 0) -> + exit({wait_for_projections_timed_out, Node, QName}); +wait_for_projections(Node, QName, N) -> + case erpc_call(Node, rabbit_amqqueue, lookup, [QName], 100) of + {ok, _} -> + ok; + _ -> + timer:sleep(100), + wait_for_projections(Node, QName, N - 1) + end. + +find_leader(Q) when ?is_amqqueue(Q) -> + %% the get_pid field in the queue record is updated async after a leader + %% change, so is likely to be the more stale than the leaderboard + {Name, _Node} = MaybeLeader = amqqueue:get_pid(Q), + Leaders = case ra_leaderboard:lookup_leader(Name) of + undefined -> + %% leader from queue record will have to suffice + [MaybeLeader]; + LikelyLeader -> + [LikelyLeader, MaybeLeader] + end, + Nodes = [node() | nodes()], + case lists:search(fun ({_Nm, Nd}) -> + lists:member(Nd, Nodes) + end, Leaders) of + {value, Leader} -> + Leader; + false -> + undefined + end. + +is_process_alive(Name, Node) -> + %% don't attempt rpc if node is not already connected + %% as this function is used for metrics and stats and the additional + %% latency isn't warranted + erlang:is_pid(erpc_call(Node, erlang, whereis, [Name], ?RPC_TIMEOUT)). + +%% backwards compat +file_handle_leader_reservation(_QName) -> + ok. + +file_handle_other_reservation() -> + ok. + +file_handle_release_reservation() -> + ok. diff --git a/deps/rabbit/src/rabbit_quorum_queue_periodic_membership_reconciliation.erl b/deps/rabbit/src/rabbit_quorum_queue_periodic_membership_reconciliation.erl index 34ed8a275995..81029c9b145c 100644 --- a/deps/rabbit/src/rabbit_quorum_queue_periodic_membership_reconciliation.erl +++ b/deps/rabbit/src/rabbit_quorum_queue_periodic_membership_reconciliation.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_quorum_queue_periodic_membership_reconciliation). @@ -101,7 +101,7 @@ handle_cast(_Msg, State) -> handle_info(?EVAL_MSG, #state{interval = Interval, trigger_interval = TriggerInterval} = State) -> - Res = reconclitiate_quorum_queue_membership(State), + Res = reconciliate_quorum_queue_membership(State), NewTimeout = case Res of noop -> Interval; @@ -125,15 +125,20 @@ code_change(_OldVsn, State, _Extra) -> %% Internal functions %%---------------------------------------------------------------------------- -reconclitiate_quorum_queue_membership(State) -> +reconciliate_quorum_queue_membership(State) -> LocalLeaders = rabbit_amqqueue:list_local_leaders(), ExpectedNodes = rabbit_nodes:list_members(), Running = rabbit_nodes:list_running(), - reconclitiate_quorum_members(ExpectedNodes, Running, LocalLeaders, State, noop). + reconciliate_quorum_members(ExpectedNodes, Running, LocalLeaders, State, noop). -reconclitiate_quorum_members(_ExpectedNodes, _Running, [], _State, Result) -> +reconciliate_quorum_members([], _Running, _, _State, Result) -> + %% if there are no expected nodes rabbit_nodes:list_running/0 encountered + %% an error during query and returned the empty list which is case we need + %% to handle Result; -reconclitiate_quorum_members(ExpectedNodes, Running, [Q | LocalLeaders], +reconciliate_quorum_members(_ExpectedNodes, _Running, [], _State, Result) -> + Result; +reconciliate_quorum_members(ExpectedNodes, Running, [Q | LocalLeaders], #state{target_group_size = TargetSize} = State, OldResult) -> Result = @@ -158,7 +163,7 @@ reconclitiate_quorum_members(ExpectedNodes, Running, [Q | LocalLeaders], _ -> noop end, - reconclitiate_quorum_members(ExpectedNodes, Running, LocalLeaders, State, + reconciliate_quorum_members(ExpectedNodes, Running, LocalLeaders, State, update_result(OldResult, Result)). maybe_remove(_, #state{auto_remove = false}) -> diff --git a/deps/rabbit/src/rabbit_ra_registry.erl b/deps/rabbit/src/rabbit_ra_registry.erl index 19e6944e5d0f..a3046a8f94af 100644 --- a/deps/rabbit/src/rabbit_ra_registry.erl +++ b/deps/rabbit/src/rabbit_ra_registry.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_ra_registry). @@ -13,4 +13,4 @@ %% take them into account in operations such as memory calculation and data cleanup. %% Hardcoded atm list_not_quorum_clusters() -> - [rabbit_stream_coordinator]. + [rabbit_stream_coordinator, rabbit_khepri:get_ra_cluster_name()]. diff --git a/deps/rabbit/src/rabbit_ra_systems.erl b/deps/rabbit/src/rabbit_ra_systems.erl index daf3df61ba7b..08e15ecb53ba 100644 --- a/deps/rabbit/src/rabbit_ra_systems.erl +++ b/deps/rabbit/src/rabbit_ra_systems.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2021 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_ra_systems). @@ -14,12 +14,16 @@ -export([setup/0, setup/1, all_ra_systems/0, - ensure_ra_system_started/1]). + are_running/0, + ensure_ra_system_started/1, + ensure_started/0, + ensure_stopped/0]). -type ra_system_name() :: atom(). -define(COORD_WAL_MAX_SIZE_B, 64_000_000). -define(QUORUM_AER_MAX_RPC_SIZE, 16). +-define(QUORUM_DEFAULT_WAL_MAX_ENTRIES, 500_000). -spec setup() -> ok | no_return(). @@ -29,9 +33,7 @@ setup() -> -spec setup(Context :: map()) -> ok | no_return(). setup(_) -> - ?LOG_DEBUG("Starting Ra systems"), - lists:foreach(fun ensure_ra_system_started/1, all_ra_systems()), - ?LOG_DEBUG("Ra systems started"), + ensure_started(), ok. -spec all_ra_systems() -> [ra_system_name()]. @@ -40,6 +42,41 @@ all_ra_systems() -> [quorum_queues, coordination]. +-spec are_running() -> AreRunning when + AreRunning :: boolean(). + +are_running() -> + try + %% FIXME: We hard-code the name of an internal Ra process here. + Children = supervisor:which_children(ra_systems_sup), + lists:all( + fun(RaSystem) -> + is_ra_system_running(Children, RaSystem) + end, + all_ra_systems()) + catch + exit:{noproc, _} -> + false + end. + +is_ra_system_running(Children, RaSystem) -> + case lists:keyfind(RaSystem, 1, Children) of + {RaSystem, Child, _, _} -> is_pid(Child); + false -> false + end. + +-spec ensure_started() -> ok | no_return(). + +ensure_started() -> + ?LOG_DEBUG( + "Starting Ra systems", + #{domain => ?RMQLOG_DOMAIN_GLOBAL}), + lists:foreach(fun ensure_ra_system_started/1, all_ra_systems()), + ?LOG_DEBUG( + "Ra systems started", + #{domain => ?RMQLOG_DOMAIN_GLOBAL}), + ok. + -spec ensure_ra_system_started(ra_system_name()) -> ok | no_return(). ensure_ra_system_started(RaSystem) -> @@ -75,15 +112,26 @@ get_config(quorum_queues = RaSystem) -> DefaultConfig = get_default_config(), Checksums = application:get_env(rabbit, quorum_compute_checksums, true), WalChecksums = application:get_env(rabbit, quorum_wal_compute_checksums, Checksums), - SegmentChecksums = application:get_env(rabbit, quorum_segment_compute_checksums, Checksums), + SegmentChecksums = application:get_env(rabbit, quorum_segment_compute_checksums, + Checksums), + WalMaxEntries = case DefaultConfig of + #{wal_max_entries := MaxEntries} + when is_integer(MaxEntries) -> + MaxEntries; + _ -> + ?QUORUM_DEFAULT_WAL_MAX_ENTRIES + end, AERBatchSize = application:get_env(rabbit, quorum_max_append_entries_rpc_batch_size, ?QUORUM_AER_MAX_RPC_SIZE), - CompressMemTables = application:get_env(rabbit, quorum_compress_mem_tables, false), + CompressMemTables = application:get_env(rabbit, quorum_compress_mem_tables, true), DefaultConfig#{name => RaSystem, default_max_append_entries_rpc_batch_size => AERBatchSize, wal_compute_checksums => WalChecksums, + wal_max_entries => WalMaxEntries, segment_compute_checksums => SegmentChecksums, - compress_mem_tables => CompressMemTables}; + compress_mem_tables => CompressMemTables, + server_recovery_strategy => {rabbit_quorum_queue, + system_recover, []}}; get_config(coordination = RaSystem) -> DefaultConfig = get_default_config(), CoordDataDir = filename:join( @@ -98,3 +146,29 @@ get_config(coordination = RaSystem) -> get_default_config() -> ra_system:default_config(). + +-spec ensure_stopped() -> ok | no_return(). + +ensure_stopped() -> + ?LOG_DEBUG( + "Stopping Ra systems", + #{domain => ?RMQLOG_DOMAIN_GLOBAL}), + lists:foreach(fun ensure_ra_system_stopped/1, all_ra_systems()), + ?LOG_DEBUG( + "Ra systems stopped", + #{domain => ?RMQLOG_DOMAIN_GLOBAL}), + ok. + +-spec ensure_ra_system_stopped(ra_system_name()) -> ok | no_return(). + +ensure_ra_system_stopped(RaSystem) -> + case ra_system:stop(RaSystem) of + ok -> + ok; + {error, _} = Error -> + ?LOG_ERROR( + "Failed to stop Ra system \"~ts\": ~tp", + [RaSystem, Error], + #{domain => ?RMQLOG_DOMAIN_GLOBAL}), + throw(Error) + end. diff --git a/deps/rabbit/src/rabbit_reader.erl b/deps/rabbit/src/rabbit_reader.erl index 637b3c1a1626..228d12ba2ac9 100644 --- a/deps/rabbit/src/rabbit_reader.erl +++ b/deps/rabbit/src/rabbit_reader.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_reader). @@ -60,6 +60,8 @@ %% from connection storms and DoS. -define(SILENT_CLOSE_DELAY, 3). -define(CHANNEL_MIN, 1). +%% AMQP 1.0 §5.3 +-define(PROTOCOL_ID_SASL, 3). %%-------------------------------------------------------------------------- @@ -78,7 +80,9 @@ %% pre_init | securing | running | blocking | blocked | closing | closed | {become, F} connection_state, %% see comment in rabbit_connection_sup:start_link/0 - helper_sup, + helper_sup :: {HelperSupAmqp091 :: pid(), + HelperSupAmqp10 :: pid()} % pre version negotiation + | pid(), % post version negotiation %% takes care of cleaning up exclusive queues, %% see rabbit_queue_collector queue_collector, @@ -145,11 +149,10 @@ %%-------------------------------------------------------------------------- --spec start_link(pid(), any()) -> rabbit_types:ok(pid()). - -start_link(HelperSup, Ref) -> - Pid = proc_lib:spawn_link(?MODULE, init, [self(), HelperSup, Ref]), - +-spec start_link({pid(), pid()}, ranch:ref()) -> + rabbit_types:ok(pid()). +start_link(HelperSups, Ref) -> + Pid = proc_lib:spawn_link(?MODULE, init, [self(), HelperSups, Ref]), {ok, Pid}. -spec shutdown(pid(), string()) -> 'ok'. @@ -157,14 +160,18 @@ start_link(HelperSup, Ref) -> shutdown(Pid, Explanation) -> gen_server:call(Pid, {shutdown, Explanation}, infinity). --spec init(pid(), pid(), any()) -> no_return(). - -init(Parent, HelperSup, Ref) -> +-spec init(pid(), {pid(), pid()}, ranch:ref()) -> + no_return(). +init(Parent, HelperSups, Ref) -> ?LG_PROCESS_TYPE(reader), + %% Note: + %% This function could return an error if the handshake times out. + %% It is less likely to happen here as compared to MQTT, so + %% crashing with a `badmatch` seems appropriate. {ok, Sock} = rabbit_networking:handshake(Ref, application:get_env(rabbit, proxy_protocol, false)), Deb = sys:debug_options([]), - start_connection(Parent, HelperSup, Ref, Deb, Sock). + start_connection(Parent, HelperSups, Ref, Deb, Sock). -spec system_continue(_,_,{[binary()], non_neg_integer(), #v1{}}) -> any(). @@ -230,7 +237,7 @@ server_properties(Protocol) -> NormalizedConfigServerProps = [{<<"capabilities">>, table, server_capabilities(Protocol)} | [case X of - {KeyAtom, Value} -> {list_to_binary(atom_to_list(KeyAtom)), + {KeyAtom, Value} -> {atom_to_binary(KeyAtom), longstr, maybe_list_to_binary(Value)}; {BinKey, Type, Value} -> {BinKey, Type, Value} @@ -291,10 +298,10 @@ socket_op(Sock, Fun) -> exit(normal) end. --spec start_connection(pid(), pid(), ranch:ref(), any(), rabbit_net:socket()) -> +-spec start_connection(pid(), {pid(), pid()}, ranch:ref(), any(), rabbit_net:socket()) -> no_return(). -start_connection(Parent, HelperSup, RanchRef, Deb, Sock) -> +start_connection(Parent, HelperSups, RanchRef, Deb, Sock) -> process_flag(trap_exit, true), RealSocket = rabbit_net:unwrap_socket(Sock), Name = case rabbit_net:connection_string(Sock, inbound) of @@ -337,7 +344,7 @@ start_connection(Parent, HelperSup, RanchRef, Deb, Sock) -> pending_recv = false, connection_state = pre_init, queue_collector = undefined, %% started on tune-ok - helper_sup = HelperSup, + helper_sup = HelperSups, heartbeater = none, channel_sup_sup_pid = none, channel_count = 0, @@ -356,24 +363,20 @@ start_connection(Parent, HelperSup, RanchRef, Deb, Sock) -> %% connection was closed cleanly by the client #v1{connection = #connection{user = #user{username = Username}, vhost = VHost}} -> - rabbit_log_connection:info("closing AMQP connection ~tp (~ts, vhost: '~ts', user: '~ts')", - [self(), dynamic_connection_name(Name), VHost, Username]); + rabbit_log_connection:info("closing AMQP connection (~ts, vhost: '~ts', user: '~ts')", + [dynamic_connection_name(Name), VHost, Username]); %% just to be more defensive _ -> - rabbit_log_connection:info("closing AMQP connection ~tp (~ts)", - [self(), dynamic_connection_name(Name)]) + rabbit_log_connection:info("closing AMQP connection (~ts)", + [dynamic_connection_name(Name)]) end catch Ex -> - log_connection_exception(dynamic_connection_name(Name), Ex) + log_connection_exception(dynamic_connection_name(Name), Ex) after %% We don't call gen_tcp:close/1 here since it waits for %% pending output to be sent, which results in unnecessary - %% delays. We could just terminate - the reader is the - %% controlling process and hence its termination will close - %% the socket. However, to keep the file_handle_cache - %% accounting as accurate as possible we ought to close the - %% socket w/o delay before termination. + %% delays. rabbit_net:fast_close(RealSocket), rabbit_networking:unregister_connection(self()), rabbit_core_metrics:connection_closed(self()), @@ -431,6 +434,12 @@ log_connection_exception(Severity, Name, {handshake_error, tuning, _Channel, log_connection_exception_with_severity(Severity, "closing AMQP connection ~tp (~ts):~nfailed to negotiate connection parameters: ~ts", [self(), Name, Explanation]); +log_connection_exception(Severity, Name, {sasl_required, ProtocolId}) -> + log_connection_exception_with_severity( + Severity, + "closing AMQP 1.0 connection (~ts): RabbitMQ requires SASL " + "security layer (expected protocol ID 3, but client sent protocol ID ~b)", + [Name, ProtocolId]); %% old exception structure log_connection_exception(Severity, Name, connection_closed_abruptly) -> log_connection_exception_with_severity(Severity, @@ -499,8 +508,8 @@ mainloop(Deb, Buf, BufLen, State = #v1{sock = Sock, %% %% The goal is to not log TCP healthchecks (a connection %% with no data received) unless specified otherwise. - Fmt = "accepting AMQP connection ~tp (~ts)", - Args = [self(), ConnName], + Fmt = "accepting AMQP connection ~ts", + Args = [ConnName], case Recv of closed -> _ = rabbit_log_connection:debug(Fmt, Args); _ -> _ = rabbit_log_connection:info(Fmt, Args) @@ -904,7 +913,7 @@ create_channel(Channel, capabilities = Capabilities, user = #user{username = Username} = User} } = State) -> - case rabbit_auth_backend_internal:is_over_channel_limit(Username) of + case is_over_limits(Username) of false -> {ok, _ChSupPid, {ChPid, AState}} = rabbit_channel_sup_sup:start_channel( @@ -915,11 +924,45 @@ create_channel(Channel, put({ch_pid, ChPid}, {Channel, MRef}), put({channel, Channel}, {ChPid, AState}), {ok, {ChPid, AState}, State#v1{channel_count = ChannelCount + 1}}; + {true, Limit, Fmt} -> + {error, rabbit_misc:amqp_error( + not_allowed, + Fmt, + [node(), Limit], 'none')} + end. + +is_over_limits(Username) -> + case rabbit_auth_backend_internal:is_over_channel_limit(Username) of + false -> + case is_over_node_channel_limit() of + false -> + false; + {true, Limit} -> + Fmt = + "number of channels opened on node '~ts' has reached " + "the maximum allowed limit of (~w)", + {true, Limit, Fmt} + end; {true, Limit} -> - {error, rabbit_misc:amqp_error(not_allowed, - "number of channels opened for user '~ts' has reached " - "the maximum allowed user limit of (~w)", - [Username, Limit], 'none')} + Fmt = + "number of channels opened for user '~ts' has reached " + "the maximum allowed user limit of (~w)", + {true, Limit, Fmt} + end. + +is_over_node_channel_limit() -> + case rabbit_misc:get_env(rabbit, channel_max_per_node, infinity) of + infinity -> + false; + NodeLimit -> + %% Only fetch this if a limit is set + CurrNodeChannels = rabbit_channel_tracking:channel_count_on_node(node()), + case CurrNodeChannels < NodeLimit of + true -> + false; + false -> + {true, NodeLimit} + end end. channel_cleanup(ChPid, State = #v1{channel_count = ChannelCount}) -> @@ -1044,76 +1087,63 @@ handle_input({frame_payload, Type, Channel, PayloadSize}, Data, State) -> Type, Channel, Payload, State) end; handle_input(handshake, <<"AMQP", A, B, C, D, Rest/binary>>, State) -> - {Rest, handshake({A, B, C, D}, State)}; + {Rest, version_negotiation({A, B, C, D}, State)}; handle_input(handshake, <>, #v1{sock = Sock}) -> refuse_connection(Sock, {bad_header, Other}); handle_input(Callback, Data, _State) -> throw({bad_input, Callback, Data}). -%% The two rules pertaining to version negotiation: -%% -%% * If the server cannot support the protocol specified in the -%% protocol header, it MUST respond with a valid protocol header and -%% then close the socket connection. -%% -%% * The server MUST provide a protocol version that is lower than or -%% equal to that requested by the client in the protocol header. -handshake({0, 0, 9, 1}, State) -> - start_connection({0, 9, 1}, rabbit_framing_amqp_0_9_1, State); - -%% This is the protocol header for 0-9, which we can safely treat as -%% though it were 0-9-1. -handshake({1, 1, 0, 9}, State) -> - start_connection({0, 9, 0}, rabbit_framing_amqp_0_9_1, State); - -%% This is what most clients send for 0-8. The 0-8 spec, confusingly, -%% defines the version as 8-0. -handshake({1, 1, 8, 0}, State) -> - start_connection({8, 0, 0}, rabbit_framing_amqp_0_8, State); - -%% The 0-8 spec as on the AMQP web site actually has this as the -%% protocol header; some libraries e.g., py-amqplib, send it when they -%% want 0-8. -handshake({1, 1, 9, 1}, State) -> - start_connection({8, 0, 0}, rabbit_framing_amqp_0_8, State); - -%% ... and finally, the 1.0 spec is crystal clear! -handshake({Id, 1, 0, 0}, State) -> - become_1_0(Id, State); - -handshake(Vsn, #v1{sock = Sock}) -> +%% AMQP 1.0 §2.2 +version_negotiation({?PROTOCOL_ID_SASL, 1, 0, 0}, State) -> + become_10(State); +version_negotiation({ProtocolId, 1, 0, 0}, #v1{sock = Sock}) -> + %% AMQP 1.0 figure 2.13: We require SASL security layer. + refuse_connection(Sock, {sasl_required, ProtocolId}); +version_negotiation({0, 0, 9, 1}, State) -> + start_091_connection({0, 9, 1}, rabbit_framing_amqp_0_9_1, State); +version_negotiation({1, 1, 0, 9}, State) -> + %% This is the protocol header for 0-9, which we can safely treat as though it were 0-9-1. + start_091_connection({0, 9, 0}, rabbit_framing_amqp_0_9_1, State); +version_negotiation(Vsn = {0, 0, Minor, _}, #v1{sock = Sock}) + when Minor >= 9 -> + refuse_connection(Sock, {bad_version, Vsn}, {0, 0, 9, 1}); +version_negotiation(Vsn, #v1{sock = Sock}) -> refuse_connection(Sock, {bad_version, Vsn}). %% Offer a protocol version to the client. Connection.start only %% includes a major and minor version number, Luckily 0-9 and 0-9-1 %% are similar enough that clients will be happy with either. -start_connection({ProtocolMajor, ProtocolMinor, _ProtocolRevision}, - Protocol, - State = #v1{sock = Sock, connection = Connection}) -> +start_091_connection({ProtocolMajor, ProtocolMinor, _ProtocolRevision}, + Protocol, + #v1{parent = Parent, + sock = Sock, + helper_sup = {HelperSup091, _HelperSup10}, + connection = Connection} = State0) -> + ok = rabbit_connection_sup:remove_connection_helper_sup(Parent, helper_sup_amqp_10), rabbit_networking:register_connection(self()), Start = #'connection.start'{ - version_major = ProtocolMajor, - version_minor = ProtocolMinor, - server_properties = server_properties(Protocol), - mechanisms = auth_mechanisms_binary(Sock), - locales = <<"en_US">> }, + version_major = ProtocolMajor, + version_minor = ProtocolMinor, + server_properties = server_properties(Protocol), + mechanisms = auth_mechanisms_binary(Sock), + locales = <<"en_US">> }, ok = send_on_channel0(Sock, Start, Protocol), - switch_callback(State#v1{connection = Connection#connection{ - timeout_sec = ?NORMAL_TIMEOUT, - protocol = Protocol}, - connection_state = starting}, - frame_header, 7). + State = State0#v1{connection = Connection#connection{ + timeout_sec = ?NORMAL_TIMEOUT, + protocol = Protocol}, + connection_state = starting, + helper_sup = HelperSup091}, + switch_callback(State, frame_header, 7). + +-spec refuse_connection(rabbit_net:socket(), any()) -> no_return(). +refuse_connection(Sock, Exception) -> + refuse_connection(Sock, Exception, {?PROTOCOL_ID_SASL, 1, 0, 0}). -spec refuse_connection(_, _, _) -> no_return(). refuse_connection(Sock, Exception, {A, B, C, D}) -> ok = inet_op(fun () -> rabbit_net:send(Sock, <<"AMQP",A,B,C,D>>) end), throw(Exception). --spec refuse_connection(rabbit_net:socket(), any()) -> no_return(). - -refuse_connection(Sock, Exception) -> - refuse_connection(Sock, Exception, {0, 0, 9, 1}). - ensure_stats_timer(State = #v1{connection_state = running}) -> rabbit_event:ensure_stats_timer(State, #v1.stats_timer, emit_stats); ensure_stats_timer(State) -> @@ -1249,9 +1279,8 @@ handle_method0(#'connection.open'{virtual_host = VHost}, rabbit_event:notify(connection_created, Infos), maybe_emit_stats(State1), rabbit_log_connection:info( - "connection ~tp (~ts): " - "user '~ts' authenticated and granted access to vhost '~ts'", - [self(), dynamic_connection_name(ConnName), Username, VHost]), + "connection ~ts: user '~ts' authenticated and granted access to vhost '~ts'", + [dynamic_connection_name(ConnName), Username, VHost]), State1; handle_method0(#'connection.close'{}, State) when ?IS_RUNNING(State) -> lists:foreach(fun rabbit_channel:shutdown/1, all_channels()), @@ -1275,9 +1304,9 @@ handle_method0(#'connection.update_secret'{new_secret = NewSecret, reason = Reas log_name = ConnName} = Conn, sock = Sock}) when ?IS_RUNNING(State) -> rabbit_log_connection:debug( - "connection ~tp (~ts) of user '~ts': " - "asked to update secret, reason: ~ts", - [self(), dynamic_connection_name(ConnName), Username, Reason]), + "connection ~ts of user '~ts': " + "asked to update secret, reason: ~ts", + [dynamic_connection_name(ConnName), Username, Reason]), case rabbit_access_control:update_state(User, NewSecret) of {ok, User1} -> %% User/auth backend state has been updated. Now we can propagate it to channels @@ -1292,9 +1321,8 @@ handle_method0(#'connection.update_secret'{new_secret = NewSecret, reason = Reas end, all_channels()), ok = send_on_channel0(Sock, #'connection.update_secret_ok'{}, Protocol), rabbit_log_connection:info( - "connection ~tp (~ts): " - "user '~ts' updated secret, reason: ~ts", - [self(), dynamic_connection_name(ConnName), Username, Reason]), + "connection ~ts: user '~ts' updated secret, reason: ~ts", + [dynamic_connection_name(ConnName), Username, Reason]), State#v1{connection = Conn#connection{user = User1}}; {refused, Message} -> rabbit_log_connection:error("Secret update was refused for user '~ts': ~tp", @@ -1348,7 +1376,12 @@ is_over_vhost_connection_limit(VHostPath, User) -> [VHostPath, User#user.username, Limit]) catch throw:{error, {no_such_vhost, VHostPath}} -> - rabbit_misc:protocol_error(not_allowed, "vhost ~ts not found", [VHostPath]) + rabbit_misc:protocol_error(not_allowed, "vhost ~ts not found", [VHostPath]); + throw:{error, {cannot_get_limit, VHostPath, timeout}} -> + rabbit_misc:protocol_error(not_allowed, + "access to vhost '~ts' refused for user '~ts': " + "connection limit cannot be queried, timeout", + [VHostPath, User#user.username]) end. is_over_user_connection_limit(#user{username = Username}) -> @@ -1603,33 +1636,26 @@ emit_stats(State) -> State1 = rabbit_event:reset_stats_timer(State, #v1.stats_timer), ensure_stats_timer(State1). -%% 1.0 stub --spec become_1_0(non_neg_integer(), #v1{}) -> no_return(). - -become_1_0(Id, State = #v1{sock = Sock}) -> - case code:is_loaded(rabbit_amqp1_0_reader) of - false -> refuse_connection(Sock, amqp1_0_plugin_not_enabled); - _ -> Mode = case Id of - 0 -> amqp; - 3 -> sasl; - _ -> refuse_connection( - Sock, {unsupported_amqp1_0_protocol_id, Id}, - {3, 1, 0, 0}) - end, - F = fun (_Deb, Buf, BufLen, S) -> - {rabbit_amqp1_0_reader, init, - [Mode, pack_for_1_0(Buf, BufLen, S)]} - end, - State#v1{connection_state = {become, F}} - end. +become_10(State) -> + Fun = fun(_Deb, Buf, BufLen, State0) -> + {rabbit_amqp_reader, init, + [pack_for_1_0(Buf, BufLen, State0)]} + end, + State#v1{connection_state = {become, Fun}}. -pack_for_1_0(Buf, BufLen, #v1{parent = Parent, - sock = Sock, - recv_len = RecvLen, +pack_for_1_0(Buf, BufLen, #v1{sock = Sock, pending_recv = PendingRecv, - helper_sup = SupPid, - proxy_socket = ProxySocket}) -> - {Parent, Sock, RecvLen, PendingRecv, SupPid, Buf, BufLen, ProxySocket}. + helper_sup = {_HelperSup091, HelperSup10}, + proxy_socket = ProxySocket, + connection = #connection{ + name = Name, + host = Host, + peer_host = PeerHost, + port = Port, + peer_port = PeerPort, + connected_at = ConnectedAt}}) -> + {Sock, PendingRecv, HelperSup10, Buf, BufLen, ProxySocket, + Name, Host, PeerHost, Port, PeerPort, ConnectedAt}. respond_and_close(State, Channel, Protocol, Reason, LogErr) -> log_hard_error(State, Channel, LogErr), @@ -1763,7 +1789,8 @@ augment_connection_log_name(#connection{name = Name} = Connection) -> Connection; UserSpecifiedName -> LogName = <>, - rabbit_log_connection:info("connection ~tp (~ts) has a client-provided name: ~ts", [self(), Name, UserSpecifiedName]), + rabbit_log_connection:info("connection ~ts has a client-provided name: ~ts", + [Name, UserSpecifiedName]), ?store_proc_name(LogName), Connection#connection{log_name = LogName} end. diff --git a/deps/rabbit/src/rabbit_recovery_terms.erl b/deps/rabbit/src/rabbit_recovery_terms.erl index fcaf9d645b0a..8eecfa991bdf 100644 --- a/deps/rabbit/src/rabbit_recovery_terms.erl +++ b/deps/rabbit/src/rabbit_recovery_terms.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% %% We use a gen_server simply so that during the terminate/2 call diff --git a/deps/rabbit/src/rabbit_release_series.erl b/deps/rabbit/src/rabbit_release_series.erl index 20ab366848f2..55c164e093ae 100644 --- a/deps/rabbit/src/rabbit_release_series.erl +++ b/deps/rabbit/src/rabbit_release_series.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_release_series). @@ -10,30 +10,16 @@ -define(EOL_DATE_KEY, release_series_eol_date). -export([ - eol_date/0, is_currently_supported/0, readable_support_status/0 ]). --spec eol_date() -> rabbit_types:maybe(calendar:date()). -eol_date() -> - case application:get_env(rabbit, ?EOL_DATE_KEY) of - undefined -> none; - {ok, none} -> none; - {ok, {_Y, _M, _D} = Date} -> Date; - _ -> none - end. - +%% Retained for backwards compatibility with older CLI tools. -spec is_currently_supported() -> boolean(). is_currently_supported() -> - case eol_date() of - none -> true; - Date -> not rabbit_date_time:is_in_the_past(Date) - end. + true. +%% Retained for backwards compatibility with older CLI tools. -spec readable_support_status() -> binary(). readable_support_status() -> - case is_currently_supported() of - false -> <<"out of support">>; - _ -> <<"supported">> - end. \ No newline at end of file + <<"supported">>. diff --git a/deps/rabbit/src/rabbit_restartable_sup.erl b/deps/rabbit/src/rabbit_restartable_sup.erl index 551ea3051d76..c1e575269508 100644 --- a/deps/rabbit/src/rabbit_restartable_sup.erl +++ b/deps/rabbit/src/rabbit_restartable_sup.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_restartable_sup). diff --git a/deps/rabbit/src/rabbit_router.erl b/deps/rabbit/src/rabbit_router.erl index 4e0051b178d1..b26c5e004b7f 100644 --- a/deps/rabbit/src/rabbit_router.erl +++ b/deps/rabbit/src/rabbit_router.erl @@ -2,13 +2,11 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_router). -include_lib("stdlib/include/qlc.hrl"). --include_lib("rabbit_common/include/rabbit.hrl"). - -export([match_bindings/2, match_routing_key/2]). %%---------------------------------------------------------------------------- diff --git a/deps/rabbit/src/rabbit_runtime_parameters.erl b/deps/rabbit/src/rabbit_runtime_parameters.erl index 99db88861939..3a9af73c265b 100644 --- a/deps/rabbit/src/rabbit_runtime_parameters.erl +++ b/deps/rabbit/src/rabbit_runtime_parameters.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_runtime_parameters). @@ -44,10 +44,10 @@ -export([parse_set/5, set/5, set_any/5, clear/4, clear_any/4, list/0, list/1, list_component/1, list/2, list_formatted/1, list_formatted/3, - lookup/3, value/3, value/4, info_keys/0, clear_vhost/2, + lookup/3, value/3, info_keys/0, clear_vhost/2, clear_component/2]). --export([parse_set_global/3, set_global/3, value_global/1, value_global/2, +-export([parse_set_global/3, set_global/3, value_global/1, list_global/0, list_global_formatted/0, list_global_formatted/2, lookup_global/1, global_info_keys/0, clear_global/2]). @@ -165,7 +165,11 @@ is_within_limit(Component) -> Limit = proplists:get_value(Component, Limits, -1), case Limit < 0 orelse count_component(Component) < Limit of true -> ok; - false -> {errors, [{"component ~ts is limited to ~tp per node", [Component, Limit]}]} + false -> + ErrorMsg = "Limit reached: component ~ts is limited to ~tp per node", + ErrorArgs = [Component, Limit], + rabbit_log:error(ErrorMsg, ErrorArgs), + {errors, [{"component ~ts is limited to ~tp per node", [Component, Limit]}]} end. count_component(Component) -> length(list_component(Component)). @@ -203,8 +207,28 @@ clear_global(Key, ActingUser) -> {user_who_performed_action, ActingUser}]) end. -clear_vhost(VHostName, _ActingUser) when is_binary(VHostName) -> - ok = rabbit_db_rtparams:delete(VHostName, '_', '_'). +clear_vhost(VHostName, ActingUser) when is_binary(VHostName) -> + case rabbit_db_rtparams:delete_vhost(VHostName) of + {ok, DeletedParams} -> + lists:foreach( + fun(#runtime_parameters{key = {_VHost, Component, Name}}) -> + case lookup_component(Component) of + {ok, Mod} -> + event_notify( + parameter_cleared, VHostName, Component, + [{name, Name}, + {user_who_performed_action, ActingUser}]), + Mod:notify_clear( + VHostName, Component, Name, ActingUser), + ok; + _ -> + ok + end + end, DeletedParams), + ok; + {error, _} = Err -> + Err + end. clear_component(<<"policy">>, _) -> {error_string, "policies may not be cleared using this method"}; @@ -323,39 +347,23 @@ lookup_global(Name) -> value(VHost, Comp, Name) -> value0({VHost, Comp, Name}). --spec value(rabbit_types:vhost(), binary(), binary(), term()) -> term(). - -value(VHost, Comp, Name, Def) -> value0({VHost, Comp, Name}, Def). - -spec value_global(atom()) -> term() | 'not_found'. value_global(Key) -> value0(Key). --spec value_global(atom(), term()) -> term(). - -value_global(Key, Default) -> - value0(Key, Default). - value0(Key) -> case lookup0(Key, rabbit_misc:const(not_found)) of not_found -> not_found; Params -> Params#runtime_parameters.value end. -value0(Key, Default) -> - Params = lookup0(Key, fun () -> lookup_missing(Key, Default) end), - Params#runtime_parameters.value. - lookup0(Key, DefaultFun) -> case rabbit_db_rtparams:get(Key) of undefined -> DefaultFun(); Record -> Record end. -lookup_missing(Key, Default) -> - rabbit_db_rtparams:get_or_set(Key, Default). - p(#runtime_parameters{key = {VHost, Component, Name}, value = Value}) -> [{vhost, VHost}, {component, Component}, diff --git a/deps/rabbit/src/rabbit_ssl.erl b/deps/rabbit/src/rabbit_ssl.erl index abd94f4ba733..ffb56cd08c7b 100644 --- a/deps/rabbit/src/rabbit_ssl.erl +++ b/deps/rabbit/src/rabbit_ssl.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_ssl). diff --git a/deps/rabbit/src/rabbit_stream_coordinator.erl b/deps/rabbit/src/rabbit_stream_coordinator.erl index 0cffabb7f08c..0846dd58d1e0 100644 --- a/deps/rabbit/src/rabbit_stream_coordinator.erl +++ b/deps/rabbit/src/rabbit_stream_coordinator.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_stream_coordinator). @@ -11,6 +11,7 @@ -export([format_ra_event/2]). +%% machine callbacks -export([init/1, apply/3, state_enter/2, @@ -21,42 +22,58 @@ which_module/1, overview/1]). --export([recover/0, +-export([update_config/2, + policy_changed/1]). + +%% coordinator API +-export([process_command/1, + recover/0, stop/0, - add_replica/2, - delete_replica/2, - register_listener/1, - register_local_member_listener/1]). + transfer_leadership/1, + forget_node/1, + status/0, + member_overview/0 + ]). +%% stream API -export([new_stream/2, restart_stream/1, restart_stream/2, delete_stream/2, - transfer_leadership/1]). - --export([policy_changed/1]). + add_replica/2, + delete_replica/2, + register_listener/1, + register_local_member_listener/1 + ]). -export([local_pid/1, writer_pid/1, members/1, stream_overview/1]). + +%% machine queries -export([query_local_pid/3, query_writer_pid/2, query_members/2, query_stream_overview/2]). --export([log_overview/1]). --export([replay/1]). +-export([log_overview/1, + key_metrics_rpc/1 + ]). %% for SAC coordinator --export([process_command/1, - sac_state/1]). +-export([sac_state/1]). %% for testing and debugging -export([eval_listeners/3, + replay/1, state/0]). +-import(rabbit_queue_type_util, [ + erpc_call/5 + ]). + -rabbit_boot_step({?MODULE, [{description, "Restart stream coordinator"}, {mfa, {?MODULE, recover, []}}, @@ -157,19 +174,14 @@ restart_stream(QRes, Options) restart_stream(Q, Options) when ?is_amqqueue(Q) andalso ?amqqueue_is_stream(Q) -> - case rabbit_feature_flags:is_enabled(restart_streams) of - true -> - rabbit_log:info("restarting stream ~s in vhost ~s with options ~p", - [maps:get(name, amqqueue:get_type_state(Q)), amqqueue:get_vhost(Q), Options]), - #{name := StreamId} = amqqueue:get_type_state(Q), - case process_command({restart_stream, StreamId, Options}) of - {ok, {ok, LeaderPid}, _} -> - {ok, node(LeaderPid)}; - Err -> - Err - end; - false -> - {error, {feature_flag_not_enabled, restart_stream}} + rabbit_log:info("restarting stream ~s in vhost ~s with options ~p", + [maps:get(name, amqqueue:get_type_state(Q)), amqqueue:get_vhost(Q), Options]), + #{name := StreamId} = amqqueue:get_type_state(Q), + case process_command({restart_stream, StreamId, Options}) of + {ok, {ok, LeaderPid}, _} -> + {ok, node(LeaderPid)}; + Err -> + Err end. delete_stream(Q, ActingUser) @@ -221,7 +233,34 @@ delete_replica(StreamId, Node) -> policy_changed(Q) when ?is_amqqueue(Q) -> StreamId = maps:get(name, amqqueue:get_type_state(Q)), - process_command({policy_changed, StreamId, #{queue => Q}}). + Config = rabbit_stream_queue:update_stream_conf(Q, #{}), + case update_config(Q, Config) of + {ok, ok, _} = Res -> + Res; + {error, feature_not_enabled} -> + %% backwards compatibility + %% TODO: remove in future + process_command({policy_changed, StreamId, #{queue => Q}}); + Err -> + Err + end. + +-spec update_config(amqqueue:amqqueue(), #{atom() => term()}) -> + {ok, ok, ra:server_id()} | {error, not_supported | term()}. +update_config(Q, Config) + when ?is_amqqueue(Q) andalso is_map(Config) -> + %% there are the only a few configuration keys that are safe to + %% update + StreamId = maps:get(name, amqqueue:get_type_state(Q)), + case maps:with([filter_size, + retention, + writer_mod, + replica_mod], Config) of + Conf when map_size(Conf) > 0 -> + process_command({update_config, StreamId, Conf}); + _ -> + {error, no_updatable_keys} + end. sac_state(#?MODULE{single_active_consumer = SacState}) -> SacState. @@ -239,6 +278,8 @@ writer_pid(StreamId) when is_list(StreamId) -> MFA = {?MODULE, query_writer_pid, [StreamId]}, query_pid(StreamId, MFA). +-spec local_pid(string()) -> + {ok, pid()} | {error, not_found | term()}. local_pid(StreamId) when is_list(StreamId) -> MFA = {?MODULE, query_local_pid, [StreamId, node()]}, query_pid(StreamId, MFA). @@ -388,23 +429,32 @@ process_command([Server | Servers], Cmd) -> process_command(Servers, Cmd); {error, noproc} -> process_command(Servers, Cmd); + {error, nodedown} -> + process_command(Servers, Cmd); Reply -> Reply end. ensure_coordinator_started() -> Local = {?MODULE, node()}, - AllNodes = all_coord_members(), + ExpectedMembers = expected_coord_members(), case whereis(?MODULE) of undefined -> global:set_lock(?STREAM_COORDINATOR_STARTUP), Nodes = case ra:restart_server(?RA_SYSTEM, Local) of {error, Reason} when Reason == not_started orelse Reason == name_not_registered -> - OtherNodes = all_coord_members() -- [Local], + OtherNodes = ExpectedMembers -- [Local], + %% this could potentially be slow if some expected + %% members are on nodes that have recently terminated + %% and have left a dangling TCP connection + %% I suspect this very rarely happens as the local coordinator + %% server is started in recover/0 case lists:filter( fun({_, N}) -> - erpc:call(N, erlang, whereis, [?MODULE]) =/= undefined + is_pid(erpc_call(N, erlang, + whereis, [?MODULE], + 1000)) end, OtherNodes) of [] -> start_coordinator_cluster(); @@ -412,16 +462,28 @@ ensure_coordinator_started() -> OtherNodes end; ok -> - AllNodes; + %% TODO: it may be better to do a leader call + %% here as the local member may not have caught up + %% yet + locally_known_members(); {error, {already_started, _}} -> - AllNodes; + locally_known_members(); _ -> - AllNodes + locally_known_members() end, global:del_lock(?STREAM_COORDINATOR_STARTUP), Nodes; _ -> - AllNodes + locally_known_members() + end. + +locally_known_members() -> + %% TODO: use ra_leaderboard and fallback if leaderboard not populated + case ra:members({local, {?MODULE, node()}}) of + {_, Members, _} -> + Members; + Err -> + exit({error_fetching_locally_known_coordinator_members, Err}) end. start_coordinator_cluster() -> @@ -438,9 +500,13 @@ start_coordinator_cluster() -> [] end. -all_coord_members() -> - Nodes = rabbit_nodes:list_running() -- [node()], - [{?MODULE, Node} || Node <- [node() | Nodes]]. +expected_coord_members() -> + Nodes = rabbit_nodes:list_members(), + [{?MODULE, Node} || Node <- Nodes]. + +reachable_coord_members() -> + Nodes = rabbit_nodes:list_reachable(), + [{?MODULE, Node} || Node <- Nodes]. version() -> 4. @@ -679,61 +745,127 @@ all_member_nodes(Streams) -> tick(_Ts, _State) -> [{aux, maybe_resize_coordinator_cluster}]. +members() -> + %% TODO: this can be replaced with a ra_leaderboard + %% lookup after Ra 2.7.3_ + LocalServerId = {?MODULE, node()}, + case whereis(?MODULE) of + undefined -> + %% no local member running, we need to try the cluster + OtherMembers = lists:delete(LocalServerId, reachable_coord_members()), + case ra:members(OtherMembers) of + {_, Members, Leader} -> + {ok, Members, Leader}; + Err -> + Err + end; + _Pid -> + case ra:members({local, LocalServerId}) of + {_, Members, Leader} -> + {ok, Members, Leader}; + Err -> + Err + end + end. + maybe_resize_coordinator_cluster() -> spawn(fun() -> - case ra:members({?MODULE, node()}) of - {_, Members, _} -> + RabbitIsRunning = rabbit:is_running(), + case members() of + {ok, Members, Leader} when RabbitIsRunning -> MemberNodes = [Node || {_, Node} <- Members], - Running = rabbit_nodes:list_running(), - All = rabbit_nodes:list_members(), - case Running -- MemberNodes of + %% TODO: in the future replace with + %% rabbit_presence:list_present/0 + Present = rabbit_nodes:list_running(), + RabbitNodes = rabbit_nodes:list_members(), + AddableNodes = [N || N <- RabbitNodes, + lists:member(N, Present)], + case AddableNodes -- MemberNodes of [] -> ok; - New -> + [New | _] -> + %% any remaining members will be added + %% next tick rabbit_log:info("~ts: New rabbit node(s) detected, " "adding : ~w", [?MODULE, New]), - add_members(Members, New) + add_member(Members, New) end, - case MemberNodes -- All of + case MemberNodes -- RabbitNodes of [] -> ok; - Old -> + [Old | _] -> + %% this ought to be rather rare as the stream + %% coordinator member is now removed as part + %% of the forget_cluster_node command rabbit_log:info("~ts: Rabbit node(s) removed from the cluster, " "deleting: ~w", [?MODULE, Old]), - remove_members(Members, Old) + remove_member(Leader, Members, Old) end; _ -> ok end end). -add_members(_, []) -> - ok; -add_members(Members, [Node | Nodes]) -> +add_member(Members, Node) -> Conf = make_ra_conf(Node, [N || {_, N} <- Members]), + ServerId = {?MODULE, Node}, case ra:start_server(?RA_SYSTEM, Conf) of ok -> - case ra:add_member(Members, {?MODULE, Node}) of - {ok, NewMembers, _} -> - add_members(NewMembers, Nodes); - _ -> - add_members(Members, Nodes) + case ra:add_member(Members, ServerId) of + {ok, _, _} -> + ok; + {error, Err} -> + rabbit_log:warning("~ts: Failed to add member, reason ~w" + "deleting started server on ~w", + [?MODULE, Err, Node]), + case ra:force_delete_server(?RA_SYSTEM, ServerId) of + ok -> + ok; + Err -> + rabbit_log:warning("~ts: Failed to delete server " + "on ~w, reason ~w", + [?MODULE, Node, Err]), + ok + end + end; + {error, {already_started, _}} -> + case lists:member(ServerId, Members) of + true -> + %% this feels like an unlikely scenario but best to handle + %% it just in case + ok; + false -> + %% there is a server running but is not a member of the + %% stream coordinator cluster + %% In this case it needs to be deleted + rabbit_log:warning("~ts: server already running on ~w but not + part of cluster, " + "deleting started server", + [?MODULE, Node]), + case ra:force_delete_server(?RA_SYSTEM, ServerId) of + ok -> + ok; + Err -> + rabbit_log:warning("~ts: Failed to delete server " + "on ~w, reason ~w", + [?MODULE, Node, Err]), + ok + end end; Error -> - rabbit_log:warning("Stream coordinator failed to start on node ~ts : ~W", + rabbit_log:warning("Stream coordinator server failed to start on node ~ts : ~W", [Node, Error, 10]), - add_members(Members, Nodes) + ok end. -remove_members(_, []) -> - ok; -remove_members(Members, [Node | Nodes]) -> - case ra:remove_member(Members, {?MODULE, Node}) of - {ok, NewMembers, _} -> - remove_members(NewMembers, Nodes); - _ -> - remove_members(Members, Nodes) +remove_member(Leader, Members, Node) -> + ToRemove = {?MODULE, Node}, + case lists:member(ToRemove, Members) of + true -> + ra:leave_and_delete_server(?RA_SYSTEM, Leader, ToRemove); + false -> + ok end. -record(aux, {actions = #{} :: @@ -898,7 +1030,7 @@ phase_start_replica(StreamId, #{epoch := Epoch, {error, already_present} -> %% need to remove child record if this is the case %% can it ever happen? - _ = osiris_replica:stop(Node, Conf0), + _ = osiris:stop_member(Node, Conf0), send_action_failed(StreamId, starting, Args); {error, {already_started, Pid}} -> %% TODO: we need to check that the current epoch is the same @@ -929,25 +1061,33 @@ send_self_command(Cmd) -> phase_delete_member(StreamId, #{node := Node} = Arg, Conf) -> fun() -> - try osiris_server_sup:delete_child(Node, Conf) of - ok -> - rabbit_log:info("~ts: Member deleted for ~ts : on node ~ts", + case rabbit_nodes:is_member(Node) of + true -> + try osiris:delete_member(Node, Conf) of + ok -> + rabbit_log:info("~ts: Member deleted for ~ts : on node ~ts", + [?MODULE, StreamId, Node]), + send_self_command({member_deleted, StreamId, Arg}); + _ -> + send_action_failed(StreamId, deleting, Arg) + catch _:E -> + rabbit_log:warning("~ts: Error while deleting member for ~ts : on node ~ts ~W", + [?MODULE, StreamId, Node, E, 10]), + maybe_sleep(E), + send_action_failed(StreamId, deleting, Arg) + end; + false -> + %% node is no longer a cluster member, we return success to avoid + %% trying to delete the member indefinitely + rabbit_log:info("~ts: Member deleted/forgotten for ~ts : node ~ts is no longer a cluster member", [?MODULE, StreamId, Node]), - send_self_command({member_deleted, StreamId, Arg}); - _ -> - send_action_failed(StreamId, deleting, Arg) - catch _:E -> - rabbit_log:warning("~ts: Error while deleting member for ~ts : on node ~ts ~W", - [?MODULE, StreamId, Node, E, 10]), - maybe_sleep(E), - send_action_failed(StreamId, deleting, Arg) + send_self_command({member_deleted, StreamId, Arg}) end end. -phase_stop_member(StreamId, #{node := Node, - epoch := Epoch} = Arg0, Conf) -> +phase_stop_member(StreamId, #{node := Node, epoch := Epoch} = Arg0, Conf) -> fun() -> - try osiris_server_sup:stop_child(Node, StreamId) of + try osiris_member:stop(Node, Conf) of ok -> %% get tail try get_replica_tail(Node, Conf) of @@ -966,13 +1106,7 @@ phase_stop_member(StreamId, #{node := Node, [?MODULE, StreamId, Node, Epoch, Err]), maybe_sleep(Err), send_action_failed(StreamId, stopping, Arg0) - end; - Err -> - rabbit_log:warning("~ts: failed to stop " - "member ~ts ~w Error: ~w", - [?MODULE, StreamId, Node, Err]), - maybe_sleep(Err), - send_action_failed(StreamId, stopping, Arg0) + end catch _:Err -> rabbit_log:warning("~ts: failed to stop member ~ts ~w Error: ~w", [?MODULE, StreamId, Node, Err]), @@ -981,10 +1115,9 @@ phase_stop_member(StreamId, #{node := Node, end end. -phase_start_writer(StreamId, #{epoch := Epoch, - node := Node} = Args0, Conf) -> +phase_start_writer(StreamId, #{epoch := Epoch, node := Node} = Args0, Conf) -> fun() -> - try osiris_writer:start(Conf) of + try osiris:start_writer(Conf) of {ok, Pid} -> Args = Args0#{epoch => Epoch, pid => Pid}, rabbit_log:info("~ts: started writer ~ts on ~w in ~b", @@ -1072,7 +1205,8 @@ phase_update_mnesia(StreamId, Args, #{reference := QName, amqqueue:set_pid(Q, LeaderPid), Conf); Ts -> S = maps:get(name, Ts, undefined), - rabbit_log:debug("~ts: refusing mnesia update for stale stream id ~ts, current ~ts", + %% TODO log as side-effect + rabbit_log:debug("~ts: refusing mnesia update for stale stream id ~s, current ~s", [?MODULE, StreamId, S]), %% if the stream id isn't a match this is a stale %% update from a previous stream incarnation for the @@ -1097,7 +1231,7 @@ phase_update_mnesia(StreamId, Args, #{reference := QName, #{name := S} when S == StreamId -> rabbit_log:debug("~ts: initializing queue record for stream id ~ts", [?MODULE, StreamId]), - _ = rabbit_amqqueue:ensure_rabbit_queue_record_is_initialized(Fun(Q)), + ok = rabbit_amqqueue:ensure_rabbit_queue_record_is_initialized(Fun(Q)), ok; _ -> ok @@ -1483,12 +1617,13 @@ update_stream0(#{system_time := _Ts} = _Meta, M end, Members0), Stream0#stream{members = Members}; -update_stream0(#{system_time := _Ts}, - {policy_changed, _StreamId, #{queue := Q}}, - #stream{conf = Conf0, - members = _Members0} = Stream0) -> +update_stream0(_Meta, {policy_changed, _StreamId, #{queue := Q}}, + #stream{conf = Conf0} = Stream0) -> Conf = rabbit_stream_queue:update_stream_conf(Q, Conf0), Stream0#stream{conf = Conf}; +update_stream0(_Meta, {update_config, _StreamId, Conf}, + #stream{conf = Conf0} = Stream0) -> + Stream0#stream{conf = maps:merge(Conf0, Conf)}; update_stream0(_Meta, _Cmd, undefined) -> undefined. @@ -1890,7 +2025,6 @@ make_writer_conf(Node, #stream{epoch = Epoch, replica_nodes => lists:delete(Node, Nodes), epoch => Epoch}. - find_leader(Members) -> case lists:partition( fun ({_, #member{target = deleted}}) -> @@ -2090,5 +2224,106 @@ transfer_leadership([Destination | _] = _TransferCandidates) -> {ok, undefined} end. +-spec forget_node(node()) -> ok | {error, term()}. +forget_node(Node) when is_atom(Node) -> + case ra_directory:uid_of(?RA_SYSTEM, ?MODULE) of + undefined -> + %% if there is no local stream coordinator registered it is likely that the + %% system does not use streams at all and we just return ok + %% here. The alternative would be to do a cluster wide rpc here + %% to check but given there is a fallback + ok; + _ -> + IsRunning = rabbit_nodes:is_running(Node), + ExpectedMembers = expected_coord_members(), + ToRemove = {?MODULE, Node}, + case ra:members(ExpectedMembers) of + {ok, Members, Leader} -> + case lists:member(ToRemove, Members) of + true -> + case ra:remove_member(Leader, ToRemove) of + {ok, _, _} when IsRunning -> + _ = ra:force_delete_server(?RA_SYSTEM, ToRemove), + ok; + {ok, _, _} -> + ok; + {error, _} = Err -> + Err + end; + false -> + ok + end; + Err -> + Err + end + end. + + +-spec member_overview() -> + {ok, map()} | {error, term()}. +member_overview() -> + case whereis(?MODULE) of + undefined -> + {error, local_stream_coordinator_not_running}; + _ -> + case ra:member_overview({?MODULE, node()}) of + {ok, Result, _} -> + {ok, maps:remove(system_config, Result)}; + Err -> + Err + end + end. + +-spec status() -> + [[{binary(), term()}]] | {error, term()}. +status() -> + case members() of + {ok, Members, _} -> + [begin + case erpc_call(N, ?MODULE, key_metrics_rpc, [ServerId], ?RPC_TIMEOUT) of + #{state := RaftState, + membership := Membership, + commit_index := Commit, + term := Term, + last_index := Last, + last_applied := LastApplied, + last_written_index := LastWritten, + snapshot_index := SnapIdx, + machine_version := MacVer} -> + [{<<"Node Name">>, N}, + {<<"Raft State">>, RaftState}, + {<<"Membership">>, Membership}, + {<<"Last Log Index">>, Last}, + {<<"Last Written">>, LastWritten}, + {<<"Last Applied">>, LastApplied}, + {<<"Commit Index">>, Commit}, + {<<"Snapshot Index">>, SnapIdx}, + {<<"Term">>, Term}, + {<<"Machine Version">>, MacVer} + ]; + {error, Err} -> + [{<<"Node Name">>, N}, + {<<"Raft State">>, Err}, + {<<"Membership">>, <<>>}, + {<<"LastLog Index">>, <<>>}, + {<<"Last Written">>, <<>>}, + {<<"Last Applied">>, <<>>}, + {<<"Commit Index">>, <<>>}, + {<<"Snapshot Index">>, <<>>}, + {<<"Term">>, <<>>}, + {<<"Machine Version">>, <<>>} + ] + end + end || {_, N} = ServerId <- Members]; + {error, {no_more_servers_to_try, _}} -> + {error, coordinator_not_started_or_available}; + Err -> + Err + end. + +key_metrics_rpc(ServerId) -> + Metrics = ra:key_metrics(ServerId), + Metrics#{machine_version => rabbit_fifo:version()}. + maps_to_list(M) -> lists:sort(maps:to_list(M)). diff --git a/deps/rabbit/src/rabbit_stream_coordinator.hrl b/deps/rabbit/src/rabbit_stream_coordinator.hrl index ea83dafeb533..630a95e1290e 100644 --- a/deps/rabbit/src/rabbit_stream_coordinator.hrl +++ b/deps/rabbit/src/rabbit_stream_coordinator.hrl @@ -5,6 +5,7 @@ -define(PHASE_RETRY_TIMEOUT, 10000). -define(CMD_TIMEOUT, 30000). -define(RA_SYSTEM, coordination). +-define(RPC_TIMEOUT, 1000). -type stream_id() :: string(). -type stream() :: #{conf := osiris:config(), diff --git a/deps/rabbit/src/rabbit_stream_queue.erl b/deps/rabbit/src/rabbit_stream_queue.erl index b9d9a72e4741..e36ad708eb9a 100644 --- a/deps/rabbit/src/rabbit_stream_queue.erl +++ b/deps/rabbit/src/rabbit_stream_queue.erl @@ -2,10 +2,11 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_stream_queue). +-include("mc.hrl"). -behaviour(rabbit_queue_type). @@ -18,11 +19,12 @@ recover/2, is_recoverable/1, consume/3, - cancel/5, + cancel/3, handle_event/3, deliver/3, settle/5, - credit/5, + credit_v1/5, + credit/6, dequeue/5, info/2, queue_length/1, @@ -33,6 +35,7 @@ update/2, state_info/1, stat/1, + format/2, capabilities/0, notify_decorators/1, is_stateful/0]). @@ -42,7 +45,8 @@ -export([set_retention_policy/3]). -export([restart_stream/3, add_replica/3, - delete_replica/3]). + delete_replica/3, + delete_all_replicas/1]). -export([format_osiris_event/2]). -export([update_stream_conf/2]). -export([readers/1]). @@ -62,34 +66,41 @@ -define(INFO_KEYS, [name, durable, auto_delete, arguments, leader, members, online, state, messages, messages_ready, messages_unacknowledged, committed_offset, policy, operator_policy, effective_policy_definition, type, memory, - consumers]). + consumers, segments]). -type appender_seq() :: non_neg_integer(). --type msg_id() :: non_neg_integer(). -type msg() :: term(). %% TODO: refine --record(stream, {credit :: integer(), - max :: non_neg_integer(), +-record(stream, {mode :: rabbit_queue_type:consume_mode(), + delivery_count :: none | rabbit_queue_type:delivery_count(), + credit :: rabbit_queue_type:credit(), + ack :: boolean(), start_offset = 0 :: non_neg_integer(), listening_offset = 0 :: non_neg_integer(), + last_consumed_offset = 0 :: non_neg_integer(), log :: undefined | osiris_log:state(), + chunk_iterator :: undefined | osiris_log:chunk_iterator(), + %% These messages were already read ahead from the Osiris log, + %% were part of an uncompressed sub batch, and are buffered in + %% reversed order until the consumer has more credits to consume them. + buffer_msgs_rev = [] :: [rabbit_amqqueue:qmsg()], reader_options :: map()}). -record(stream_client, {stream_id :: string(), - name :: term(), + name :: rabbit_amqqueue:name(), leader :: pid(), local_pid :: undefined | pid(), next_seq = 1 :: non_neg_integer(), - correlation = #{} :: #{appender_seq() => {msg_id(), msg()}}, + correlation = #{} :: #{appender_seq() => {rabbit_queue_type:correlation(), msg()}}, soft_limit :: non_neg_integer(), slow = false :: boolean(), - readers = #{} :: #{term() => #stream{}}, - writer_id :: binary(), - filtering_supported :: boolean() + readers = #{} :: #{rabbit_types:ctag() => #stream{}}, + writer_id :: binary() }). -import(rabbit_queue_type_util, [args_policy_lookup/3]). +-import(rabbit_misc, [queue_resource/2]). -type client() :: #stream_client{}. @@ -166,25 +177,37 @@ create_stream(Q0) -> case rabbit_stream_coordinator:new_stream(Q, Leader) of {ok, {ok, LeaderPid}, _} -> %% update record with leader pid - set_leader_pid(LeaderPid, amqqueue:get_name(Q)), - rabbit_event:notify(queue_created, - [{name, QName}, - {durable, true}, - {auto_delete, false}, - {arguments, Arguments}, - {type, amqqueue:get_type(Q1)}, - {user_who_performed_action, - ActingUser}]), - {new, Q}; + case set_leader_pid(LeaderPid, amqqueue:get_name(Q)) of + ok -> + rabbit_event:notify(queue_created, + [{name, QName}, + {durable, true}, + {auto_delete, false}, + {arguments, Arguments}, + {type, amqqueue:get_type(Q1)}, + {user_who_performed_action, + ActingUser}]), + {new, Q}; + {error, timeout} -> + {protocol_error, internal_error, + "Could not set leader PID for ~ts on node '~ts' " + "because the metadata store operation timed out", + [rabbit_misc:rs(QName), node()]} + end; Error -> _ = rabbit_amqqueue:internal_delete(Q, ActingUser), - {protocol_error, internal_error, "Cannot declare a queue '~ts' on node '~ts': ~255p", + {protocol_error, internal_error, "Cannot declare ~ts on node '~ts': ~255p", [rabbit_misc:rs(QName), node(), Error]} end; {existing, Q} -> {existing, Q}; {absent, Q, Reason} -> - {absent, Q, Reason} + {absent, Q, Reason}; + {error, timeout} -> + {protocol_error, internal_error, + "Could not declare ~ts on node '~ts' because the metadata store " + "operation timed out", + [rabbit_misc:rs(QName), node()]} end. -spec delete(amqqueue:amqqueue(), boolean(), @@ -224,55 +247,94 @@ stat(Q) -> {ok, i(messages, Q), 0} end. -consume(Q, #{prefetch_count := 0}, _) +format(Q, Ctx) -> + case amqqueue:get_pid(Q) of + Pid when is_pid(Pid) -> + LeaderNode = node(Pid), + Nodes = lists:sort(get_nodes(Q)), + Running = case Ctx of + #{running_nodes := Running0} -> + Running0; + _ -> + %% WARN: slow + rabbit_nodes:list_running() + end, + Online = [N || N <- Nodes, lists:member(N, Running)], + State = case is_minority(Nodes, Online) of + true when length(Online) == 0 -> + down; + true -> + minority; + false -> + case lists:member(LeaderNode, Online) of + true -> + running; + false -> + down + end + end, + [{type, stream}, + {state, State}, + {leader, LeaderNode}, + {online, Online}, + {members, Nodes}, + {node, node(Pid)}]; + _ -> + [{type, stream}, + {state, down}] + end. + +consume(Q, #{mode := {simple_prefetch, 0}}, _) when ?amqqueue_is_stream(Q) -> - {protocol_error, precondition_failed, "consumer prefetch count is not set for '~ts'", + {protocol_error, precondition_failed, "consumer prefetch count is not set for stream ~ts", [rabbit_misc:rs(amqqueue:get_name(Q))]}; -consume(Q, #{no_ack := true}, _) +consume(Q, #{no_ack := true, + mode := {simple_prefetch, _}}, _) when ?amqqueue_is_stream(Q) -> {protocol_error, not_implemented, - "automatic acknowledgement not supported by stream queues ~ts", + "automatic acknowledgement not supported by stream ~ts", [rabbit_misc:rs(amqqueue:get_name(Q))]}; consume(Q, #{limiter_active := true}, _State) when ?amqqueue_is_stream(Q) -> {error, global_qos_not_supported_for_queue_type}; -consume(Q, Spec, - #stream_client{filtering_supported = FilteringSupported} = QState0) when ?amqqueue_is_stream(Q) -> +consume(Q, Spec, #stream_client{} = QState0) + when ?amqqueue_is_stream(Q) -> %% Messages should include the offset as a custom header. - case check_queue_exists_in_local_node(Q) of - ok -> + case get_local_pid(QState0) of + {LocalPid, QState} when is_pid(LocalPid) -> #{no_ack := NoAck, channel_pid := ChPid, - prefetch_count := ConsumerPrefetchCount, + mode := Mode, consumer_tag := ConsumerTag, exclusive_consume := ExclusiveConsume, args := Args, ok_msg := OkMsg} = Spec, QName = amqqueue:get_name(Q), - case parse_offset_arg(rabbit_misc:table_lookup(Args, <<"x-stream-offset">>)) of + rabbit_log:debug("~s:~s Local pid resolved ~0p", + [?MODULE, ?FUNCTION_NAME, LocalPid]), + case parse_offset_arg( + rabbit_misc:table_lookup(Args, <<"x-stream-offset">>)) of {error, _} = Err -> Err; {ok, OffsetSpec} -> - _ = rabbit_stream_coordinator:register_local_member_listener(Q), - rabbit_core_metrics:consumer_created(ChPid, ConsumerTag, ExclusiveConsume, - not NoAck, QName, - ConsumerPrefetchCount, false, - up, Args), - %% FIXME: reply needs to be sent before the stream begins sending - %% really it should be sent by the stream queue process like classic queues - %% do + ConsumerPrefetchCount = case Mode of + {simple_prefetch, C} -> C; + _ -> 0 + end, + AckRequired = not NoAck, + rabbit_core_metrics:consumer_created( + ChPid, ConsumerTag, ExclusiveConsume, AckRequired, + QName, ConsumerPrefetchCount, false, up, Args), + %% reply needs to be sent before the stream + %% begins sending maybe_send_reply(ChPid, OkMsg), - FilterSpec = filter_spec(Args), - case {FilterSpec, FilteringSupported} of - {#{filter_spec := _}, false} -> - {protocol_error, precondition_failed, "Filtering is not supported", []}; - _ -> - begin_stream(QState0, ConsumerTag, OffsetSpec, - ConsumerPrefetchCount, FilterSpec) - end + _ = rabbit_stream_coordinator:register_local_member_listener(Q), + begin_stream(QState, ConsumerTag, OffsetSpec, Mode, AckRequired, filter_spec(Args)) end; - Err -> - Err + {undefined, _} -> + {protocol_error, precondition_failed, + "stream ~ts does not have a running replica on the local node", + [rabbit_misc:rs(amqqueue:get_name(Q))]} end. -spec parse_offset_arg(undefined | @@ -334,13 +396,20 @@ filter_spec(Args) -> get_local_pid(#stream_client{local_pid = Pid} = State) when is_pid(Pid) -> - {Pid, State}; + case erlang:is_process_alive(Pid) of + true -> + {Pid, State}; + false -> + query_local_pid(State) + end; get_local_pid(#stream_client{leader = Pid} = State) when is_pid(Pid) andalso node(Pid) == node() -> - {Pid, State#stream_client{local_pid = Pid}}; -get_local_pid(#stream_client{stream_id = StreamId, - local_pid = undefined} = State) -> + get_local_pid(State#stream_client{local_pid = Pid}); +get_local_pid(#stream_client{} = State) -> %% query local coordinator to get pid + query_local_pid(State). + +query_local_pid(#stream_client{stream_id = StreamId} = State) -> case rabbit_stream_coordinator:local_pid(StreamId) of {ok, Pid} -> {Pid, State#stream_client{local_pid = Pid}}; @@ -348,37 +417,43 @@ get_local_pid(#stream_client{stream_id = StreamId, {undefined, State} end. -begin_stream(#stream_client{name = QName, readers = Readers0} = State0, - Tag, Offset, Max, Options) -> - {LocalPid, State} = get_local_pid(State0), - case LocalPid of - undefined -> - {error, no_local_stream_replica_available}; - _ -> - CounterSpec = {{?MODULE, QName, Tag, self()}, []}, - {ok, Seg0} = osiris:init_reader(LocalPid, Offset, CounterSpec, Options), - NextOffset = osiris_log:next_offset(Seg0) - 1, - osiris:register_offset_listener(LocalPid, NextOffset), - %% TODO: avoid double calls to the same process - StartOffset = case Offset of - first -> NextOffset; - last -> NextOffset; - next -> NextOffset; - {timestamp, _} -> NextOffset; - _ -> Offset - end, - Str0 = #stream{credit = Max, - start_offset = StartOffset, - listening_offset = NextOffset, - log = Seg0, - max = Max, - reader_options = Options}, - {ok, State#stream_client{local_pid = LocalPid, - readers = Readers0#{Tag => Str0}}} - end. - -cancel(_Q, ConsumerTag, OkMsg, ActingUser, #stream_client{readers = Readers0, - name = QName} = State) -> +begin_stream(#stream_client{name = QName, + readers = Readers0, + local_pid = LocalPid} = State, + Tag, Offset, Mode, AckRequired, Options) + when is_pid(LocalPid) -> + CounterSpec = {{?MODULE, QName, Tag, self()}, []}, + {ok, Seg0} = osiris:init_reader(LocalPid, Offset, CounterSpec, Options), + NextOffset = osiris_log:next_offset(Seg0) - 1, + osiris:register_offset_listener(LocalPid, NextOffset), + StartOffset = case Offset of + first -> NextOffset; + last -> NextOffset; + next -> NextOffset; + {timestamp, _} -> NextOffset; + _ -> Offset + end, + {DeliveryCount, Credit} = case Mode of + {simple_prefetch, N} -> + {none, N}; + {credited, InitialDC} -> + {InitialDC, 0} + end, + Str0 = #stream{mode = Mode, + delivery_count = DeliveryCount, + credit = Credit, + ack = AckRequired, + start_offset = StartOffset, + listening_offset = NextOffset, + last_consumed_offset = StartOffset, + log = Seg0, + reader_options = Options}, + {ok, State#stream_client{readers = Readers0#{Tag => Str0}}}. + +cancel(_Q, #{consumer_tag := ConsumerTag, + user := ActingUser} = Spec, + #stream_client{readers = Readers0, + name = QName} = State) -> case maps:take(ConsumerTag, Readers0) of {#stream{log = Log}, Readers} -> ok = close_log(Log), @@ -388,45 +463,57 @@ cancel(_Q, ConsumerTag, OkMsg, ActingUser, #stream_client{readers = Readers0, {channel, self()}, {queue, QName}, {user_who_performed_action, ActingUser}]), - maybe_send_reply(self(), OkMsg), + maybe_send_reply(self(), maps:get(ok_msg, Spec, undefined)), {ok, State#stream_client{readers = Readers}}; error -> {ok, State} end. -credit(QName, CTag, Credit, Drain, #stream_client{readers = Readers0, - name = Name, - local_pid = LocalPid} = State) -> - {Readers1, Msgs} = case Readers0 of - #{CTag := #stream{credit = Credit0} = Str0} -> - Str1 = Str0#stream{credit = Credit0 + Credit}, - {Str, Msgs0} = stream_entries(QName, Name, LocalPid, Str1), - {Readers0#{CTag => Str}, Msgs0}; - _ -> - {Readers0, []} - end, - {Readers, Actions} = - case Drain of - true -> - case Readers1 of - #{CTag := #stream{credit = Credit1} = Str2} -> - {Readers0#{CTag => Str2#stream{credit = 0}}, [{send_drained, {CTag, Credit1}}]}; - _ -> - {Readers1, []} - end; - false -> - {Readers1, []} - end, - {State#stream_client{readers = Readers}, - [{send_credit_reply, length(Msgs)}, - {deliver, CTag, true, Msgs}] ++ Actions}. +-dialyzer({nowarn_function, credit_v1/5}). +credit_v1(_, _, _, _, _) -> + erlang:error(credit_v1_unsupported). + +credit(QName, CTag, DeliveryCountRcv, LinkCreditRcv, Drain, + #stream_client{readers = Readers, + name = Name, + local_pid = LocalPid} = State0) -> + case Readers of + #{CTag := Str0 = #stream{delivery_count = DeliveryCountSnd}} -> + LinkCreditSnd = amqp10_util:link_credit_snd( + DeliveryCountRcv, LinkCreditRcv, DeliveryCountSnd), + Str1 = Str0#stream{credit = LinkCreditSnd}, + {Str2 = #stream{delivery_count = DeliveryCount, + credit = Credit, + ack = Ack}, Msgs} = stream_entries(QName, Name, LocalPid, Str1), + Str = case Drain andalso Credit > 0 of + true -> + Str2#stream{delivery_count = serial_number:add(DeliveryCount, Credit), + credit = 0}; + false -> + Str2 + end, + State = State0#stream_client{readers = maps:update(CTag, Str, Readers)}, + Actions = deliver_actions(CTag, Ack, Msgs) ++ [{credit_reply, + CTag, + Str#stream.delivery_count, + Str#stream.credit, + available_messages(Str), + Drain}], + {State, Actions}; + _ -> + {State0, []} + end. + +%% Returns only an approximation. +available_messages(#stream{log = Log, + last_consumed_offset = LastConsumedOffset}) -> + max(0, osiris_log:committed_offset(Log) - LastConsumedOffset). deliver(QSs, Msg, Options) -> lists:foldl( fun({Q, stateless}, {Qs, Actions}) -> LeaderPid = amqqueue:get_pid(Q), - ok = osiris:write(LeaderPid, - stream_message(Msg, filtering_supported())), + ok = osiris:write(LeaderPid, stream_message(Msg)), {Qs, Actions}; ({Q, S0}, {Qs, Actions0}) -> {S, Actions} = deliver0(maps:get(correlation, Options, undefined), @@ -441,15 +528,13 @@ deliver0(MsgId, Msg, next_seq = Seq, correlation = Correlation0, soft_limit = SftLmt, - slow = Slow0, - filtering_supported = FilteringSupported} = State, - Actions0) -> - ok = osiris:write(LeaderPid, WriterId, Seq, - stream_message(Msg, FilteringSupported)), + slow = Slow0} = State, + Actions0) -> + ok = osiris:write(LeaderPid, WriterId, Seq, stream_message(Msg)), Correlation = case MsgId of undefined -> Correlation0; - _ when is_number(MsgId) -> + _ -> Correlation0#{Seq => {MsgId, Msg}} end, {Slow, Actions} = case maps:size(Correlation) >= SftLmt of @@ -462,16 +547,15 @@ deliver0(MsgId, Msg, correlation = Correlation, slow = Slow}, Actions}. -stream_message(Msg, _FilteringSupported = true) -> - MsgData = msg_to_iodata(Msg), - case mc:x_header(<<"x-stream-filter-value">>, Msg) of +stream_message(Msg) -> + McAmqp = mc:convert(mc_amqp, Msg), + MsgData = mc:protocol_state(McAmqp), + case mc:x_header(<<"x-stream-filter-value">>, McAmqp) of undefined -> MsgData; {utf8, Value} -> {Value, MsgData} - end; -stream_message(Msg, _FilteringSupported = false) -> - msg_to_iodata(Msg). + end. -spec dequeue(_, _, _, _, client()) -> no_return(). dequeue(_, _, _, _, #stream_client{name = Name}) -> @@ -479,46 +563,50 @@ dequeue(_, _, _, _, #stream_client{name = Name}) -> [rabbit_misc:rs(Name)]}. handle_event(_QName, {osiris_written, From, _WriterId, Corrs}, - State = #stream_client{correlation = Correlation0, - soft_limit = SftLmt, - slow = Slow0, - name = Name}) -> + State0 = #stream_client{correlation = Correlation0, + soft_limit = SftLmt, + slow = Slow0, + name = Name}) -> MsgIds = lists:sort(maps:fold( fun (_Seq, {I, _M}, Acc) -> [I | Acc] end, [], maps:with(Corrs, Correlation0))), Correlation = maps:without(Corrs, Correlation0), - {Slow, Actions} = case maps:size(Correlation) < SftLmt of - true when Slow0 -> - {false, [{unblock, Name}]}; - _ -> - {Slow0, []} - end, - {ok, State#stream_client{correlation = Correlation, - slow = Slow}, [{settled, From, MsgIds} | Actions]}; + {Slow, Actions0} = case maps:size(Correlation) < SftLmt of + true when Slow0 -> + {false, [{unblock, Name}]}; + _ -> + {Slow0, []} + end, + Actions = case MsgIds of + [] -> Actions0; + [_|_] -> [{settled, From, MsgIds} | Actions0] + end, + State = State0#stream_client{correlation = Correlation, + slow = Slow}, + {ok, State, Actions}; handle_event(QName, {osiris_offset, _From, _Offs}, State = #stream_client{local_pid = LocalPid, readers = Readers0, name = Name}) -> %% offset isn't actually needed as we use the atomic to read the %% current committed - {Readers, TagMsgs} = maps:fold( - fun (Tag, Str0, {Acc, TM}) -> + {Readers, Actions} = maps:fold( + fun (Tag, Str0, {Rds, As}) -> {Str, Msgs} = stream_entries(QName, Name, LocalPid, Str0), - {Acc#{Tag => Str}, [{Tag, LocalPid, Msgs} | TM]} + {Rds#{Tag => Str}, deliver_actions(Tag, Str#stream.ack, Msgs) ++ As} end, {#{}, []}, Readers0), - Ack = true, - Deliveries = [{deliver, Tag, Ack, OffsetMsg} - || {Tag, _LeaderPid, OffsetMsg} <- TagMsgs], - {ok, State#stream_client{readers = Readers}, Deliveries}; + {ok, State#stream_client{readers = Readers}, Actions}; handle_event(_QName, {stream_leader_change, Pid}, State) -> {ok, update_leader_pid(Pid, State), []}; -handle_event(_QName, {stream_local_member_change, Pid}, #stream_client{local_pid = P} = State) +handle_event(_QName, {stream_local_member_change, Pid}, + #stream_client{local_pid = P} = State) when P == Pid -> {ok, State, []}; -handle_event(_QName, {stream_local_member_change, Pid}, State = #stream_client{name = QName, - readers = Readers0}) -> +handle_event(_QName, {stream_local_member_change, Pid}, + #stream_client{name = QName, + readers = Readers0} = State) -> rabbit_log:debug("Local member change event for ~tp", [QName]), Readers1 = maps:fold(fun(T, #stream{log = Log0, reader_options = Options} = S0, Acc) -> Offset = osiris_log:next_offset(Log0), @@ -553,23 +641,23 @@ recover(_VHost, Queues) -> {[Q | R0], F0} end, {[], []}, Queues). -settle(QName, complete, CTag, MsgIds, #stream_client{readers = Readers0, - local_pid = LocalPid, - name = Name} = State) -> - Credit = length(MsgIds), - {Readers, Msgs} = case Readers0 of - #{CTag := #stream{credit = Credit0} = Str0} -> - Str1 = Str0#stream{credit = Credit0 + Credit}, - {Str, Msgs0} = stream_entries(QName, Name, LocalPid, Str1), - {Readers0#{CTag => Str}, Msgs0}; - _ -> - {Readers0, []} - end, - {State#stream_client{readers = Readers}, [{deliver, CTag, true, Msgs}]}; -settle(_, _, _, _, #stream_client{name = Name}) -> - {protocol_error, not_implemented, - "basic.nack and basic.reject not supported by stream queues ~ts", - [rabbit_misc:rs(Name)]}. +settle(QName, _, CTag, MsgIds, #stream_client{readers = Readers0, + local_pid = LocalPid, + name = Name} = State) -> + case Readers0 of + #{CTag := #stream{mode = {simple_prefetch, _MaxCredit}, + ack = Ack, + credit = Credit0} = Str0} -> + %% all settle reasons will "give credit" to the stream queue + Credit = length(MsgIds), + Str1 = Str0#stream{credit = Credit0 + Credit}, + {Str, Msgs} = stream_entries(QName, Name, LocalPid, Str1), + Readers = maps:update(CTag, Str, Readers0), + {State#stream_client{readers = Readers}, + deliver_actions(CTag, Ack, Msgs)}; + _ -> + {State, []} + end. info(Q, all_keys) -> info(Q, ?INFO_KEYS); @@ -672,6 +760,14 @@ i(committed_offset, Q) -> Data -> maps:get(committed_offset, Data, '') end; +i(segments, Q) -> + Key = {osiris_writer, amqqueue:get_name(Q)}, + case osiris_counters:overview(Key) of + undefined -> + ''; + Data -> + maps:get(segments, Data, '') + end; i(policy, Q) -> case rabbit_policy:name(Q) of none -> ''; @@ -835,8 +931,7 @@ init(Q) when ?is_amqqueue(Q) -> name = amqqueue:get_name(Q), leader = Leader, writer_id = WriterId, - soft_limit = SoftLimit, - filtering_supported = filtering_supported()}}; + soft_limit = SoftLimit}}; {ok, stream_not_found, _} -> {error, stream_not_found}; {error, coordinator_unavailable} = E -> @@ -868,7 +963,7 @@ set_retention_policy(Name, VHost, Policy) -> {error, _} = E -> E; MaxAge -> - QName = rabbit_misc:r(VHost, queue, Name), + QName = queue_resource(VHost, Name), Fun = fun(Q) -> Conf = amqqueue:get_type_state(Q), amqqueue:set_type_state(Q, Conf#{max_age => MaxAge}) @@ -899,7 +994,7 @@ restart_stream(VHost, Queue, Options) add_replica(VHost, Name, Node) -> - QName = rabbit_misc:r(VHost, queue, Name), + QName = queue_resource(VHost, Name), case rabbit_amqqueue:lookup(QName) of {ok, Q} when ?amqqueue_is_classic(Q) -> {error, classic_queue_not_supported}; @@ -917,25 +1012,39 @@ add_replica(VHost, Name, Node) -> end. delete_replica(VHost, Name, Node) -> - QName = rabbit_misc:r(VHost, queue, Name), + QName = queue_resource(VHost, Name), case rabbit_amqqueue:lookup(QName) of {ok, Q} when ?amqqueue_is_classic(Q) -> {error, classic_queue_not_supported}; {ok, Q} when ?amqqueue_is_quorum(Q) -> {error, quorum_queue_not_supported}; {ok, Q} when ?amqqueue_is_stream(Q) -> - case lists:member(Node, rabbit_nodes:list_running()) of - false -> - {error, node_not_running}; - true -> - #{name := StreamId} = amqqueue:get_type_state(Q), - {ok, Reply, _} = rabbit_stream_coordinator:delete_replica(StreamId, Node), - Reply - end; + #{name := StreamId} = amqqueue:get_type_state(Q), + {ok, Reply, _} = rabbit_stream_coordinator:delete_replica(StreamId, Node), + Reply; E -> E end. +delete_all_replicas(Node) -> + rabbit_log:info("Asked to remove all stream replicas from node ~ts", [Node]), + Streams = rabbit_amqqueue:list_stream_queues_on(Node), + lists:map(fun(Q) -> + QName = amqqueue:get_name(Q), + rabbit_log:info("~ts: removing replica on node ~w", + [rabbit_misc:rs(QName), Node]), + #{name := StreamId} = amqqueue:get_type_state(Q), + {ok, Reply, _} = rabbit_stream_coordinator:delete_replica(StreamId, Node), + case Reply of + ok -> + {QName, ok}; + Err -> + rabbit_log:warning("~ts: failed to remove replica on node ~w, error: ~w", + [rabbit_misc:rs(QName), Node, Err]), + {QName, {error, Err}} + end + end, Streams). + make_stream_conf(Q) -> QName = amqqueue:get_name(Q), Name = stream_name(QName), @@ -993,86 +1102,167 @@ stream_name(#resource{virtual_host = VHost, name = Name}) -> recover(Q) -> {ok, Q}. -check_queue_exists_in_local_node(Q) -> - #{name := StreamId} = amqqueue:get_type_state(Q), - case rabbit_stream_coordinator:local_pid(StreamId) of - {ok, Pid} when is_pid(Pid) -> - ok; - _ -> - {protocol_error, precondition_failed, - "queue '~ts' does not have a replica on the local node", - [rabbit_misc:rs(amqqueue:get_name(Q))]} - end. - maybe_send_reply(_ChPid, undefined) -> ok; maybe_send_reply(ChPid, Msg) -> ok = rabbit_channel:send_command(ChPid, Msg). +stream_entries(QName, Name, LocalPid, + #stream{chunk_iterator = undefined, + credit = Credit} = Str0) -> + case Credit > 0 of + true -> + case chunk_iterator(Str0, LocalPid) of + {ok, Str} -> + stream_entries(QName, Name, LocalPid, Str); + {end_of_stream, Str} -> + {Str, []} + end; + false -> + {Str0, []} + end; +stream_entries(QName, Name, LocalPid, + #stream{delivery_count = DC, + credit = Credit, + buffer_msgs_rev = Buf0, + last_consumed_offset = LastOff} = Str0) + when Credit > 0 andalso Buf0 =/= [] -> + BufLen = length(Buf0), + case Credit =< BufLen of + true -> + %% Entire credit worth of messages can be served from the buffer. + {Buf, BufMsgsRev} = lists:split(BufLen - Credit, Buf0), + {Str0#stream{delivery_count = delivery_count_add(DC, Credit), + credit = 0, + buffer_msgs_rev = Buf, + last_consumed_offset = LastOff + Credit}, + lists:reverse(BufMsgsRev)}; + false -> + Str = Str0#stream{delivery_count = delivery_count_add(DC, BufLen), + credit = Credit - BufLen, + buffer_msgs_rev = [], + last_consumed_offset = LastOff + BufLen}, + stream_entries(QName, Name, LocalPid, Str, Buf0) + end; stream_entries(QName, Name, LocalPid, Str) -> stream_entries(QName, Name, LocalPid, Str, []). +stream_entries(_, _, _, #stream{credit = Credit} = Str, Acc) + when Credit < 1 -> + {Str, lists:reverse(Acc)}; stream_entries(QName, Name, LocalPid, - #stream{credit = Credit, - start_offset = StartOffs, - listening_offset = LOffs, - log = Seg0} = Str0, MsgIn) - when Credit > 0 -> - case osiris_log:read_chunk_parsed(Seg0) of - {end_of_stream, Seg} -> - NextOffset = osiris_log:next_offset(Seg), - case NextOffset > LOffs of - true -> - osiris:register_offset_listener(LocalPid, NextOffset), - {Str0#stream{log = Seg, - listening_offset = NextOffset}, MsgIn}; - false -> - {Str0#stream{log = Seg}, MsgIn} + #stream{chunk_iterator = Iter0, + delivery_count = DC, + credit = Credit, + start_offset = StartOffset} = Str0, Acc0) -> + case osiris_log:iterator_next(Iter0) of + end_of_chunk -> + case chunk_iterator(Str0, LocalPid) of + {ok, Str} -> + stream_entries(QName, Name, LocalPid, Str, Acc0); + {end_of_stream, Str} -> + {Str, lists:reverse(Acc0)} end; + {{Offset, Entry}, Iter} -> + {Str, Acc} = case Entry of + {batch, _NumRecords, 0, _Len, BatchedEntries} -> + {MsgsRev, NumMsgs} = parse_uncompressed_subbatch( + BatchedEntries, Offset, StartOffset, + QName, Name, LocalPid, {[], 0}), + case Credit >= NumMsgs of + true -> + {Str0#stream{chunk_iterator = Iter, + delivery_count = delivery_count_add(DC, NumMsgs), + credit = Credit - NumMsgs, + last_consumed_offset = Offset + NumMsgs - 1}, + MsgsRev ++ Acc0}; + false -> + %% Consumer doesn't have sufficient credit. + %% Buffer the remaining messages. + [] = Str0#stream.buffer_msgs_rev, % assertion + {Buf, MsgsRev1} = lists:split(NumMsgs - Credit, MsgsRev), + {Str0#stream{chunk_iterator = Iter, + delivery_count = delivery_count_add(DC, Credit), + credit = 0, + buffer_msgs_rev = Buf, + last_consumed_offset = Offset + Credit - 1}, + MsgsRev1 ++ Acc0} + end; + {batch, _, _CompressionType, _, _} -> + %% Skip compressed sub batch. + %% It can only be consumed by Stream protocol clients. + {Str0#stream{chunk_iterator = Iter}, Acc0}; + _SimpleEntry -> + case Offset >= StartOffset of + true -> + Msg = entry_to_msg(Entry, Offset, QName, Name, LocalPid), + {Str0#stream{chunk_iterator = Iter, + delivery_count = delivery_count_add(DC, 1), + credit = Credit - 1, + last_consumed_offset = Offset}, + [Msg | Acc0]}; + false -> + {Str0#stream{chunk_iterator = Iter}, Acc0} + end + end, + stream_entries(QName, Name, LocalPid, Str, Acc) + end. + +chunk_iterator(#stream{credit = Credit, + listening_offset = LOffs, + log = Log0} = Str0, LocalPid) -> + case osiris_log:chunk_iterator(Log0, Credit) of + {ok, _ChunkHeader, Iter, Log} -> + {ok, Str0#stream{chunk_iterator = Iter, + log = Log}}; + {end_of_stream, Log} -> + NextOffset = osiris_log:next_offset(Log), + Str = case NextOffset > LOffs of + true -> + osiris:register_offset_listener(LocalPid, NextOffset), + Str0#stream{log = Log, + listening_offset = NextOffset}; + false -> + Str0#stream{log = Log} + end, + {end_of_stream, Str}; {error, Err} -> - rabbit_log:debug("stream client: error reading chunk ~w", [Err]), - exit(Err); - {Records, Seg} -> - Msgs = [begin - Msg0 = binary_to_msg(QName, B), - Msg = mc:set_annotation(<<"x-stream-offset">>, O, Msg0), - {Name, LocalPid, O, false, Msg} - end || {O, B} <- Records, - O >= StartOffs], - - NumMsgs = length(Msgs), - - Str = Str0#stream{credit = Credit - NumMsgs, - log = Seg}, - case Str#stream.credit < 1 of - true -> - %% we are done here - {Str, MsgIn ++ Msgs}; - false -> - %% if there are fewer Msgs than Entries0 it means there were non-events - %% in the log and we should recurse and try again - stream_entries(QName, Name, LocalPid, Str, MsgIn ++ Msgs) - end - end; -stream_entries(_QName, _Name, _LocalPid, Str, Msgs) -> - {Str, Msgs}. + rabbit_log:info("stream client: failed to create chunk iterator ~p", [Err]), + exit(Err) + end. -binary_to_msg(#resource{kind = queue, - name = QName}, Data) -> - Mc0 = mc:init(mc_amqp, amqp10_framing:decode_bin(Data), #{}), - %% If exchange or routing_keys annotation isn't present the data most likely came +%% Deliver each record of an uncompressed sub batch individually. +parse_uncompressed_subbatch(<<>>, _Offset, _StartOffset, _QName, _Name, _LocalPid, Acc) -> + Acc; +parse_uncompressed_subbatch( + <<0:1, %% simple entry + Len:31/unsigned, + Entry:Len/binary, + Rem/binary>>, + Offset, StartOffset, QName, Name, LocalPid, Acc0 = {AccList, AccCount}) -> + Acc = case Offset >= StartOffset of + true -> + Msg = entry_to_msg(Entry, Offset, QName, Name, LocalPid), + {[Msg | AccList], AccCount + 1}; + false -> + Acc0 + end, + parse_uncompressed_subbatch(Rem, Offset + 1, StartOffset, QName, Name, LocalPid, Acc). + +entry_to_msg(Entry, Offset, #resource{kind = queue, + name = QName}, Name, LocalPid) -> + Mc0 = mc:init(mc_amqp, Entry, #{}), + %% If exchange or routing_keys annotation isn't present the entry most likely came %% from the rabbitmq-stream plugin so we'll choose defaults that simulate use %% of the direct exchange. - Mc = case mc:get_annotation(exchange, Mc0) of - undefined -> mc:set_annotation(exchange, <<>>, Mc0); - _ -> Mc0 - end, - case mc:get_annotation(routing_keys, Mc) of - undefined -> mc:set_annotation(routing_keys, [QName], Mc); - _ -> Mc - end. - -msg_to_iodata(Msg0) -> - Sections = mc:protocol_state(mc:convert(mc_amqp, Msg0)), - mc_amqp:serialize(Sections). + Mc1 = case mc:exchange(Mc0) of + undefined -> mc:set_annotation(?ANN_EXCHANGE, <<>>, Mc0); + _ -> Mc0 + end, + Mc2 = case mc:routing_keys(Mc1) of + [] -> mc:set_annotation(?ANN_ROUTING_KEYS, [QName], Mc1); + _ -> Mc1 + end, + Mc = mc:set_annotation(<<"x-stream-offset">>, Offset, Mc2), + {Name, LocalPid, Offset, false, Mc}. capabilities() -> #{unsupported_policies => [%% Classic policies @@ -1085,11 +1275,13 @@ capabilities() -> <<"ha-promote-on-shutdown">>, <<"ha-promote-on-failure">>, <<"queue-master-locator">>, %% Quorum policies - <<"dead-letter-strategy">>], + <<"dead-letter-strategy">>, <<"target-group-size">>], queue_arguments => [<<"x-max-length-bytes">>, <<"x-queue-type">>, <<"x-max-age">>, <<"x-stream-max-segment-size-bytes">>, <<"x-initial-cluster-size">>, <<"x-queue-leader-locator">>], - consumer_arguments => [<<"x-stream-offset">>, <<"x-credit">>], + consumer_arguments => [<<"x-stream-offset">>, + <<"x-stream-filter">>, + <<"x-stream-match-unfiltered">>], server_named => false}. notify_decorators(Q) when ?is_amqqueue(Q) -> @@ -1098,8 +1290,7 @@ notify_decorators(Q) when ?is_amqqueue(Q) -> resend_all(#stream_client{leader = LeaderPid, writer_id = WriterId, - correlation = Corrs, - filtering_supported = FilteringSupported} = State) -> + correlation = Corrs} = State) -> Msgs = lists:sort(maps:values(Corrs)), case Msgs of [] -> ok; @@ -1108,12 +1299,17 @@ resend_all(#stream_client{leader = LeaderPid, [Seq, maps:size(Corrs)]) end, [begin - ok = osiris:write(LeaderPid, WriterId, Seq, - stream_message(Msg, FilteringSupported)) + ok = osiris:write(LeaderPid, WriterId, Seq, stream_message(Msg)) end || {Seq, Msg} <- Msgs], State. +-spec set_leader_pid(Pid, QName) -> Ret when + Pid :: pid(), + QName :: rabbit_amqqueue:name(), + Ret :: ok | {error, timeout}. + set_leader_pid(Pid, QName) -> + %% TODO this should probably be a single khepri transaction for better performance. Fun = fun (Q) -> amqqueue:set_pid(Q, Pid) end, @@ -1143,5 +1339,20 @@ list_with_minimum_quorum() -> is_stateful() -> true. -filtering_supported() -> - rabbit_feature_flags:is_enabled(stream_filtering). +get_nodes(Q) when ?is_amqqueue(Q) -> + #{nodes := Nodes} = amqqueue:get_type_state(Q), + Nodes. + +is_minority(All, Up) -> + MinQuorum = length(All) div 2 + 1, + length(Up) < MinQuorum. + +deliver_actions(_, _, []) -> + []; +deliver_actions(CTag, Ack, Msgs) -> + [{deliver, CTag, Ack, Msgs}]. + +delivery_count_add(none, _) -> + none; +delivery_count_add(Count, N) -> + serial_number:add(Count, N). diff --git a/deps/rabbit/src/rabbit_stream_sac_coordinator.erl b/deps/rabbit/src/rabbit_stream_sac_coordinator.erl index f8540c09f7ac..9e46085ed9d1 100644 --- a/deps/rabbit/src/rabbit_stream_sac_coordinator.erl +++ b/deps/rabbit/src/rabbit_stream_sac_coordinator.erl @@ -11,7 +11,7 @@ %% The Original Code is RabbitMQ. %% %% The Initial Developer of the Original Code is Pivotal Software, Inc. -%% Copyright (c) 2021-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_stream_sac_coordinator). @@ -629,45 +629,22 @@ handle_consumer_removal(Group0, Consumer, Stream, ConsumerName) -> end end. -message_type() -> - case has_unblock_group_support() of - true -> - map; - false -> - tuple - end. - notify_consumer_effect(Pid, SubId, Stream, Name, Active) -> notify_consumer_effect(Pid, SubId, Stream, Name, Active, false). -notify_consumer_effect(Pid, SubId, Stream, Name, Active, SteppingDown) -> - notify_consumer_effect(Pid, SubId, Stream, Name, Active, SteppingDown, message_type()). - -notify_consumer_effect(Pid, SubId, _Stream, _Name, Active, false = _SteppingDown, tuple) -> - mod_call_effect(Pid, - {sac, - {{subscription_id, SubId}, - {active, Active}, - {extra, []}}}); -notify_consumer_effect(Pid, SubId, _Stream, _Name, Active, true = _SteppingDown, tuple) -> - mod_call_effect(Pid, - {sac, - {{subscription_id, SubId}, - {active, Active}, - {extra, [{stepping_down, true}]}}}); -notify_consumer_effect(Pid, SubId, Stream, Name, Active, false = _SteppingDown, map) -> +notify_consumer_effect(Pid, SubId, Stream, Name, Active, false = _SteppingDown) -> mod_call_effect(Pid, {sac, #{subscription_id => SubId, stream => Stream, consumer_name => Name, active => Active}}); -notify_consumer_effect(Pid, SubId, Stream, Name, Active, true = _SteppingDown, map) -> +notify_consumer_effect(Pid, SubId, Stream, Name, Active, true = SteppingDown) -> mod_call_effect(Pid, {sac, #{subscription_id => SubId, stream => Stream, consumer_name => Name, active => Active, - stepping_down => true}}). + stepping_down => SteppingDown}}). maybe_create_group(VirtualHost, Stream, @@ -776,6 +753,3 @@ mod_call_effect(Pid, Msg) -> send_message(ConnectionPid, Msg) -> ConnectionPid ! Msg, ok. - -has_unblock_group_support() -> - rabbit_feature_flags:is_enabled(stream_sac_coordinator_unblock_group). diff --git a/deps/rabbit/src/rabbit_stream_sac_coordinator.hrl b/deps/rabbit/src/rabbit_stream_sac_coordinator.hrl index a0a2eacfb45f..756a6d7a688f 100644 --- a/deps/rabbit/src/rabbit_stream_sac_coordinator.hrl +++ b/deps/rabbit/src/rabbit_stream_sac_coordinator.hrl @@ -11,7 +11,7 @@ %% The Original Code is RabbitMQ. %% %% The Initial Developer of the Original Code is Pivotal Software, Inc. -%% Copyright (c) 2021 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -type vhost() :: binary(). diff --git a/deps/rabbit/src/rabbit_sup.erl b/deps/rabbit/src/rabbit_sup.erl index fb6c6f783b72..72127a3fea5c 100644 --- a/deps/rabbit/src/rabbit_sup.erl +++ b/deps/rabbit/src/rabbit_sup.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_sup). diff --git a/deps/rabbit/src/rabbit_sysmon_handler.erl b/deps/rabbit/src/rabbit_sysmon_handler.erl index 9ee2de63e04d..b83d2e76f327 100644 --- a/deps/rabbit/src/rabbit_sysmon_handler.erl +++ b/deps/rabbit/src/rabbit_sysmon_handler.erl @@ -1,5 +1,5 @@ %% Copyright (c) 2011 Basho Technologies, Inc. All Rights Reserved. -%% Copyright (c) 2018-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% %% This file is provided to you under the Apache License, %% Version 2.0 (the "License"); you may not use this file diff --git a/deps/rabbit/src/rabbit_sysmon_minder.erl b/deps/rabbit/src/rabbit_sysmon_minder.erl index b0ec1afa1087..e40da6a7e17d 100644 --- a/deps/rabbit/src/rabbit_sysmon_minder.erl +++ b/deps/rabbit/src/rabbit_sysmon_minder.erl @@ -1,6 +1,6 @@ %% ------------------------------------------------------------------- %% Copyright (c) 2007-2010 Basho Technologies, Inc. All Rights Reserved. -%% Copyright (c) 2018-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% %% This file is provided to you under the Apache License, %% Version 2.0 (the "License"); you may not use this file diff --git a/deps/rabbit/src/rabbit_table.erl b/deps/rabbit/src/rabbit_table.erl index e735df876582..22b39bb30c64 100644 --- a/deps/rabbit/src/rabbit_table.erl +++ b/deps/rabbit/src/rabbit_table.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_table). @@ -21,6 +21,10 @@ -include_lib("rabbit_common/include/rabbit.hrl"). +-ifdef(TEST). +-export([pre_khepri_definitions/0]). +-endif. + %%---------------------------------------------------------------------------- -type retry() :: boolean(). @@ -34,7 +38,7 @@ create() -> lists:foreach( fun ({Table, Def}) -> create(Table, Def) end, - definitions()), + mandatory_definitions()), ensure_secondary_indexes(), ok. @@ -53,8 +57,13 @@ create(TableName, TableDefinition) -> %% Sets up secondary indexes in a blank node database. ensure_secondary_indexes() -> - ensure_secondary_index(rabbit_queue, vhost), - ok. + case rabbit_khepri:is_enabled() of + true -> + ok; + false -> + ensure_secondary_index(rabbit_queue, vhost), + ok + end. ensure_secondary_index(Table, Field) -> case mnesia:add_table_index(Table, Field) of @@ -101,6 +110,14 @@ wait(TableNames, Retry) -> wait(TableNames, Timeout, Retries). wait(TableNames, Timeout, Retries) -> + %% Wait for tables must only wait for tables that have already been declared. + %% Otherwise, node boot returns a timeout when the Khepri ff is enabled from the start + ExistingTables = mnesia:system_info(tables), + MissingTables = TableNames -- ExistingTables, + TablesToMigrate = TableNames -- MissingTables, + wait1(TablesToMigrate, Timeout, Retries). + +wait1(TableNames, Timeout, Retries) -> %% We might be in ctl here for offline ops, in which case we can't %% get_env() for the rabbit app. rabbit_log:info("Waiting for Mnesia tables for ~tp ms, ~tp retries left", @@ -123,7 +140,7 @@ wait(TableNames, Timeout, Retries) -> throw(Error); {_, {error, Error}} -> rabbit_log:warning("Error while waiting for Mnesia tables: ~tp", [Error]), - wait(TableNames, Timeout, Retries - 1) + wait1(TableNames, Timeout, Retries - 1) end. retry_timeout(_Retry = false) -> @@ -157,8 +174,28 @@ is_empty() -> is_empty(names()). -spec needs_default_data() -> boolean(). -needs_default_data() -> is_empty([rabbit_user, rabbit_user_permission, - rabbit_vhost]). +needs_default_data() -> + case rabbit_khepri:is_enabled() of + true -> + needs_default_data_in_khepri(); + false -> + needs_default_data_in_mnesia() + end. + +needs_default_data_in_khepri() -> + Paths = [rabbit_db_vhost:khepri_vhosts_path(), + rabbit_db_user:khepri_users_path()], + lists:all( + fun(Path) -> + case rabbit_khepri:list(Path) of + {ok, List} when is_map(List) andalso List =:= #{} -> true; + _ -> false + end + end, Paths). + +needs_default_data_in_mnesia() -> + is_empty([rabbit_user, rabbit_user_permission, + rabbit_vhost]). is_empty(Names) -> lists:all(fun (Tab) -> mnesia:dirty_first(Tab) == '$end_of_table' end, @@ -195,9 +232,18 @@ clear_ram_only_tables() -> -spec maybe_clear_ram_only_tables() -> ok. maybe_clear_ram_only_tables() -> - case rabbit_mnesia:members() of - [N] when N=:= node() -> clear_ram_only_tables(); - _ -> ok + %% We use `rabbit_khepri:get_feature_state/0' because we don't want to + %% block here. Indeed, this function is executed as part of + %% `rabbit:stop/1'. + case rabbit_khepri:get_feature_state() of + enabled -> + ok; + _ -> + _ = case rabbit_mnesia:members() of + [N] when N=:= node() -> clear_ram_only_tables(); + _ -> ok + end, + ok end. %% The sequence in which we delete the schema and then the other @@ -305,6 +351,19 @@ definitions(ram) -> {Tab, TabDef} <- definitions()]. definitions() -> + %% Checks for feature flags enabled during node boot must be non_blocking + case rabbit_khepri:get_feature_state() of + enabled -> []; + _ -> mandatory_definitions() + end. + +mandatory_definitions() -> + pre_khepri_definitions() + ++ gm:table_definitions() + ++ mirrored_supervisor:table_definitions() + ++ rabbit_maintenance:table_definitions(). + +pre_khepri_definitions() -> [{rabbit_user, [{record_name, internal_user}, {attributes, internal_user:fields()}, @@ -317,6 +376,11 @@ definitions() -> {match, #user_permission{user_vhost = #user_vhost{_='_'}, permission = #permission{_='_'}, _='_'}}]}, + {rabbit_runtime_parameters, + [{record_name, runtime_parameters}, + {attributes, record_info(fields, runtime_parameters)}, + {disc_copies, [node()]}, + {match, #runtime_parameters{_='_'}}]}, {rabbit_topic_permission, [{record_name, topic_permission}, {attributes, record_info(fields, topic_permission)}, @@ -330,6 +394,28 @@ definitions() -> {attributes, vhost:fields()}, {disc_copies, [node()]}, {match, vhost:pattern_match_all()}]}, + {rabbit_durable_queue, + [{record_name, amqqueue}, + {attributes, amqqueue:fields()}, + {disc_copies, [node()]}, + {match, amqqueue:pattern_match_on_name(queue_name_match())}]}, + {rabbit_queue, + [{record_name, amqqueue}, + {attributes, amqqueue:fields()}, + {match, amqqueue:pattern_match_on_name(queue_name_match())}]}, + {rabbit_durable_exchange, + [{record_name, exchange}, + {attributes, record_info(fields, exchange)}, + {disc_copies, [node()]}, + {match, #exchange{name = exchange_name_match(), _='_'}}]}, + {rabbit_exchange, + [{record_name, exchange}, + {attributes, record_info(fields, exchange)}, + {match, #exchange{name = exchange_name_match(), _='_'}}]}, + {rabbit_exchange_serial, + [{record_name, exchange_serial}, + {attributes, record_info(fields, exchange_serial)}, + {match, #exchange_serial{name = exchange_name_match(), _='_'}}]}, {rabbit_durable_route, [{record_name, route}, {attributes, record_info(fields, route)}, @@ -374,37 +460,8 @@ definitions() -> {attributes, record_info(fields, topic_trie_binding)}, {type, ordered_set}, {match, #topic_trie_binding{trie_binding = trie_binding_match(), - _='_'}}]}, - {rabbit_durable_exchange, - [{record_name, exchange}, - {attributes, record_info(fields, exchange)}, - {disc_copies, [node()]}, - {match, #exchange{name = exchange_name_match(), _='_'}}]}, - {rabbit_exchange, - [{record_name, exchange}, - {attributes, record_info(fields, exchange)}, - {match, #exchange{name = exchange_name_match(), _='_'}}]}, - {rabbit_exchange_serial, - [{record_name, exchange_serial}, - {attributes, record_info(fields, exchange_serial)}, - {match, #exchange_serial{name = exchange_name_match(), _='_'}}]}, - {rabbit_runtime_parameters, - [{record_name, runtime_parameters}, - {attributes, record_info(fields, runtime_parameters)}, - {disc_copies, [node()]}, - {match, #runtime_parameters{_='_'}}]}, - {rabbit_durable_queue, - [{record_name, amqqueue}, - {attributes, amqqueue:fields()}, - {disc_copies, [node()]}, - {match, amqqueue:pattern_match_on_name(queue_name_match())}]}, - {rabbit_queue, - [{record_name, amqqueue}, - {attributes, amqqueue:fields()}, - {match, amqqueue:pattern_match_on_name(queue_name_match())}]} - ] - ++ gm:table_definitions() - ++ mirrored_supervisor:table_definitions(). + _='_'}}]} + ]. binding_match() -> #binding{source = exchange_name_match(), diff --git a/deps/rabbit/src/rabbit_time_travel_dbg.erl b/deps/rabbit/src/rabbit_time_travel_dbg.erl index 1db0a79ff16a..4ab6674514de 100644 --- a/deps/rabbit/src/rabbit_time_travel_dbg.erl +++ b/deps/rabbit/src/rabbit_time_travel_dbg.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% %% This module is a debugging utility mainly meant for debugging diff --git a/deps/rabbit/src/rabbit_trace.erl b/deps/rabbit/src/rabbit_trace.erl index dbfc0537090b..ff6e6ae15b75 100644 --- a/deps/rabbit/src/rabbit_trace.erl +++ b/deps/rabbit/src/rabbit_trace.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_trace). @@ -26,26 +26,24 @@ %%---------------------------------------------------------------------------- -spec init(rabbit_types:vhost()) -> state(). - init(VHost) when is_binary(VHost) -> case enabled(VHost) of - false -> none; - true -> {ok, X} = rabbit_exchange:lookup( - rabbit_misc:r(VHost, exchange, ?XNAME)), - X + false -> + none; + true -> + {ok, X} = rabbit_exchange:lookup(rabbit_misc:r(VHost, exchange, ?XNAME)), + X end. -spec enabled(rabbit_types:vhost() | state()) -> boolean(). - -enabled(VHost) - when is_binary(VHost) -> - {ok, VHosts} = application:get_env(rabbit, ?TRACE_VHOSTS), - lists:member(VHost, VHosts); enabled(none) -> false; enabled(#exchange{}) -> - true. + true; +enabled(VHost) + when is_binary(VHost) -> + lists:member(VHost, vhosts_with_tracing_enabled()). -spec tap_in(mc:state(), rabbit_exchange:route_return(), binary(), rabbit_types:username(), state()) -> 'ok'. @@ -55,13 +53,16 @@ tap_in(Msg, QNames, ConnName, Username, State) -> -spec tap_in(mc:state(), rabbit_exchange:route_return(), binary(), rabbit_channel:channel_number(), rabbit_types:username(), state()) -> 'ok'. -tap_in(_Msg, _QNames, _ConnName, _ChannelNum, _Username, none) -> ok; +tap_in(_Msg, _QNames, _ConnName, _ChannelNum, _Username, none) -> + ok; tap_in(Msg, QNames, ConnName, ChannelNum, Username, TraceX) -> - XName = mc:get_annotation(exchange, Msg), + XName = mc:exchange(Msg), #exchange{name = #resource{virtual_host = VHost}} = TraceX, RoutedQs = lists:map(fun(#resource{kind = queue, name = Name}) -> {longstr, Name}; ({#resource{kind = queue, name = Name}, _}) -> + {longstr, Name}; + ({virtual_reply_queue, Name}) -> {longstr, Name} end, QNames), trace(TraceX, Msg, <<"publish">>, XName, @@ -79,11 +80,15 @@ tap_out(Msg, ConnName, Username, State) -> -spec tap_out(rabbit_amqqueue:qmsg(), binary(), rabbit_channel:channel_number(), rabbit_types:username(), state()) -> 'ok'. -tap_out(_Msg, _ConnName, _ChannelNum, _Username, none) -> ok; +tap_out(_Msg, _ConnName, _ChannelNum, _Username, none) -> + ok; tap_out({#resource{name = QName, virtual_host = VHost}, _QPid, _QMsgId, Redelivered, Msg}, ConnName, ChannelNum, Username, TraceX) -> - RedeliveredNum = case Redelivered of true -> 1; false -> 0 end, + RedeliveredNum = case Redelivered of + true -> 1; + false -> 0 + end, trace(TraceX, Msg, <<"deliver">>, QName, [{<<"redelivered">>, signedint, RedeliveredNum}, {<<"vhost">>, longstr, VHost}, @@ -94,28 +99,24 @@ tap_out({#resource{name = QName, virtual_host = VHost}, %%---------------------------------------------------------------------------- -spec start(rabbit_types:vhost()) -> 'ok'. - start(VHost) when is_binary(VHost) -> - case lists:member(VHost, vhosts_with_tracing_enabled()) of + case enabled(VHost) of true -> rabbit_log:info("Tracing is already enabled for vhost '~ts'", [VHost]), ok; false -> rabbit_log:info("Enabling tracing for vhost '~ts'", [VHost]), - update_config(fun (VHosts) -> - lists:usort([VHost | VHosts]) - end) + update_config(fun(VHosts) -> lists:usort([VHost | VHosts]) end) end. -spec stop(rabbit_types:vhost()) -> 'ok'. - stop(VHost) when is_binary(VHost) -> - case lists:member(VHost, vhosts_with_tracing_enabled()) of + case enabled(VHost) of true -> rabbit_log:info("Disabling tracing for vhost '~ts'", [VHost]), - update_config(fun (VHosts) -> VHosts -- [VHost] end); + update_config(fun(VHosts) -> VHosts -- [VHost] end); false -> rabbit_log:info("Tracing is already disabled for vhost '~ts'", [VHost]), ok @@ -125,32 +126,33 @@ update_config(Fun) -> VHosts0 = vhosts_with_tracing_enabled(), VHosts = Fun(VHosts0), application:set_env(rabbit, ?TRACE_VHOSTS, VHosts), + Sessions = rabbit_amqp_session:list_local(), NonAmqpPids = rabbit_networking:local_non_amqp_connections(), - rabbit_log:debug("Will now refresh state of channels and of ~b non AMQP 0.9.1 " - "connections after virtual host tracing changes", - [length(NonAmqpPids)]), - lists:foreach(fun(Pid) -> gen_server:cast(Pid, refresh_config) end, NonAmqpPids), - {Time, _} = timer:tc(fun rabbit_channel:refresh_config_local/0), - rabbit_log:debug("Refreshed channel state in ~fs", [Time/1_000_000]), + rabbit_log:debug("Refreshing state of channels, ~b sessions and ~b non " + "AMQP 0.9.1 connections after virtual host tracing changes...", + [length(Sessions), length(NonAmqpPids)]), + Pids = Sessions ++ NonAmqpPids, + lists:foreach(fun(Pid) -> gen_server:cast(Pid, refresh_config) end, Pids), + {Time, ok} = timer:tc(fun rabbit_channel:refresh_config_local/0), + rabbit_log:debug("Refreshed channel states in ~fs", [Time / 1_000_000]), ok. vhosts_with_tracing_enabled() -> - application:get_env(rabbit, ?TRACE_VHOSTS, []). + {ok, Vhosts} = application:get_env(rabbit, ?TRACE_VHOSTS), + Vhosts. %%---------------------------------------------------------------------------- trace(X, Msg0, RKPrefix, RKSuffix, Extra) -> - XName = mc:get_annotation(exchange, Msg0), + XName = mc:exchange(Msg0), case X of #exchange{name = #resource{name = XName}} -> ok; #exchange{name = SourceXName} -> - RoutingKeys = mc:get_annotation(routing_keys, Msg0), + RoutingKeys = mc:routing_keys(Msg0), %% for now convert into amqp legacy Msg = mc:prepare(read, mc:convert(mc_amqpl, Msg0)), - %% check exchange name in case it is same as target - #content{properties = Props} = Content0 = - mc:protocol_state(Msg), + #content{properties = Props} = Content0 = mc:protocol_state(Msg), Key = <>, Content = Content0#content{properties = @@ -158,27 +160,24 @@ trace(X, Msg0, RKPrefix, RKSuffix, Extra) -> ++ Extra}, properties_bin = none}, TargetXName = SourceXName#resource{name = ?XNAME}, - TraceMsg = mc_amqpl:message(TargetXName, Key, Content), - ok = rabbit_queue_type:publish_at_most_once(X, TraceMsg), - ok + {ok, TraceMsg} = mc_amqpl:message(TargetXName, Key, Content), + ok = rabbit_queue_type:publish_at_most_once(X, TraceMsg) end. msg_to_table(XName, RoutingKeys, Props) -> {PropsTable, _Ix} = - lists:foldl(fun (K, {L, Ix}) -> + lists:foldl(fun(K, {L, Ix}) -> V = element(Ix, Props), NewL = case V of undefined -> L; - _ -> [{a2b(K), type(V), V} | L] + _ -> [{atom_to_binary(K), type(V), V} | L] end, {NewL, Ix + 1} end, {[], 2}, record_info(fields, 'P_basic')), [{<<"exchange_name">>, longstr, XName}, {<<"routing_keys">>, array, [{longstr, K} || K <- RoutingKeys]}, {<<"properties">>, table, PropsTable}, - {<<"node">>, longstr, a2b(node())}]. - -a2b(A) -> list_to_binary(atom_to_list(A)). + {<<"node">>, longstr, atom_to_binary(node())}]. type(V) when is_list(V) -> table; type(V) when is_integer(V) -> signedint; diff --git a/deps/rabbit/src/rabbit_tracking.erl b/deps/rabbit/src/rabbit_tracking.erl index de5af6b5793e..942727e0fc1b 100644 --- a/deps/rabbit/src/rabbit_tracking.erl +++ b/deps/rabbit/src/rabbit_tracking.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2020-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_tracking). diff --git a/deps/rabbit/src/rabbit_tracking_store.erl b/deps/rabbit/src/rabbit_tracking_store.erl index ac58d7131376..fef4b7b00b4a 100644 --- a/deps/rabbit/src/rabbit_tracking_store.erl +++ b/deps/rabbit/src/rabbit_tracking_store.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_tracking_store). @@ -13,7 +13,7 @@ %% gen_server callbacks -export([init/1, handle_call/3, handle_cast/2, handle_info/2, - terminate/2, code_change/3, format_status/2]). + terminate/2, code_change/3]). -define(SERVER, ?MODULE). @@ -42,6 +42,3 @@ terminate(_Reason, _State) -> code_change(_OldVsn, State, _Extra) -> {ok, State}. - -format_status(_Opt, Status) -> - Status. diff --git a/deps/rabbit/src/rabbit_upgrade_preparation.erl b/deps/rabbit/src/rabbit_upgrade_preparation.erl index cac62505e9fa..3de38740b1da 100644 --- a/deps/rabbit/src/rabbit_upgrade_preparation.erl +++ b/deps/rabbit/src/rabbit_upgrade_preparation.erl @@ -2,16 +2,14 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_upgrade_preparation). -export([await_online_quorum_plus_one/1, - await_online_synchronised_mirrors/1, list_with_minimum_quorum_for_cli/0]). --include_lib("rabbit_common/include/rabbit.hrl"). %% %% API %% @@ -22,12 +20,6 @@ await_online_quorum_plus_one(Timeout) -> Iterations = ceil(Timeout / ?SAMPLING_INTERVAL), do_await_safe_online_quorum(Iterations). - -await_online_synchronised_mirrors(Timeout) -> - Iterations = ceil(Timeout / ?SAMPLING_INTERVAL), - do_await_online_synchronised_mirrors(Iterations). - - %% %% Implementation %% @@ -69,31 +61,12 @@ do_await_safe_online_quorum(IterationsLeft) -> do_await_safe_online_quorum(IterationsLeft - 1) end. - -do_await_online_synchronised_mirrors(0) -> - false; -do_await_online_synchronised_mirrors(IterationsLeft) -> - case rabbit_amqqueue:list_local_mirrored_classic_without_synchronised_mirrors() of - [] -> true; - List when is_list(List) -> - timer:sleep(?SAMPLING_INTERVAL), - do_await_online_synchronised_mirrors(IterationsLeft - 1) - end. - -spec list_with_minimum_quorum_for_cli() -> [#{binary() => term()}]. list_with_minimum_quorum_for_cli() -> EndangeredQueues = lists:append( rabbit_quorum_queue:list_with_minimum_quorum(), rabbit_stream_queue:list_with_minimum_quorum()), - [begin - #resource{name = Name} = QName = amqqueue:get_name(Q), - #{ - <<"readable_name">> => rabbit_data_coercion:to_binary(rabbit_misc:rs(QName)), - <<"name">> => Name, - <<"virtual_host">> => amqqueue:get_vhost(Q), - <<"type">> => amqqueue:get_type(Q) - } - end || Q <- EndangeredQueues] ++ + [amqqueue:to_printable(Q) || Q <- EndangeredQueues] ++ [#{ <<"readable_name">> => C, <<"name">> => C, diff --git a/deps/rabbit/src/rabbit_uri.erl b/deps/rabbit/src/rabbit_uri.erl new file mode 100644 index 000000000000..f1e2d028753f --- /dev/null +++ b/deps/rabbit/src/rabbit_uri.erl @@ -0,0 +1,154 @@ +%% Copyright (c) 2016-2024, Loïc Hoguin +%% +%% Permission to use, copy, modify, and/or distribute this software for any +%% purpose with or without fee is hereby granted, provided that the above +%% copyright notice and this permission notice appear in all copies. +%% +%% THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +%% WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +%% MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +%% ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +%% WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +%% ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF +%% OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +%% ------------------------------------------------------------------------- %% +%% This file is a partial copy of +%% https://github.com/ninenines/cowlib/blob/optimise-urldecode/src/cow_uri.erl +%% We use this copy because: +%% 1. uri_string:unquote/1 is lax: It doesn't validate that characters that are +%% required to be percent encoded are indeed percent encoded. In RabbitMQ, +%% we want to enforce that proper percent encoding is done by AMQP clients. +%% 2. uri_string:unquote/1 and cow_uri:urldecode/1 in cowlib v2.13.0 are both +%% slow because they allocate a new binary for the common case where no +%% character was percent encoded. +%% When a new cowlib version is released, we should make app rabbit depend on +%% app cowlib calling cow_uri:urldecode/1 and delete this file (rabbit_uri.erl). +%% ------------------------------------------------------------------------- %% + +-module(rabbit_uri). + +-export([urldecode/1]). + +-define(UNHEX(H, L), (?UNHEX(H) bsl 4 bor ?UNHEX(L))). + +-define(UNHEX(C), + case C of + $0 -> 0; + $1 -> 1; + $2 -> 2; + $3 -> 3; + $4 -> 4; + $5 -> 5; + $6 -> 6; + $7 -> 7; + $8 -> 8; + $9 -> 9; + $A -> 10; + $B -> 11; + $C -> 12; + $D -> 13; + $E -> 14; + $F -> 15; + $a -> 10; + $b -> 11; + $c -> 12; + $d -> 13; + $e -> 14; + $f -> 15 + end +). + +%% Decode a percent encoded string. (RFC3986 2.1) +%% +%% Inspiration for some of the optimisations done here come +%% from the new `json` module as it was in mid-2024. +%% +%% Possible input includes: +%% +%% * nothing encoded (no % character): +%% We want to return the binary as-is to avoid an allocation. +%% +%% * small number of encoded characters: +%% We can "skip" words of text. +%% +%% * mostly encoded characters (non-ascii languages) +%% We can decode characters in bulk. + +-define(IS_PLAIN(C), ( + (C =:= $!) orelse (C =:= $$) orelse (C =:= $&) orelse (C =:= $') orelse + (C =:= $() orelse (C =:= $)) orelse (C =:= $*) orelse (C =:= $+) orelse + (C =:= $,) orelse (C =:= $-) orelse (C =:= $.) orelse (C =:= $0) orelse + (C =:= $1) orelse (C =:= $2) orelse (C =:= $3) orelse (C =:= $4) orelse + (C =:= $5) orelse (C =:= $6) orelse (C =:= $7) orelse (C =:= $8) orelse + (C =:= $9) orelse (C =:= $:) orelse (C =:= $;) orelse (C =:= $=) orelse + (C =:= $@) orelse (C =:= $A) orelse (C =:= $B) orelse (C =:= $C) orelse + (C =:= $D) orelse (C =:= $E) orelse (C =:= $F) orelse (C =:= $G) orelse + (C =:= $H) orelse (C =:= $I) orelse (C =:= $J) orelse (C =:= $K) orelse + (C =:= $L) orelse (C =:= $M) orelse (C =:= $N) orelse (C =:= $O) orelse + (C =:= $P) orelse (C =:= $Q) orelse (C =:= $R) orelse (C =:= $S) orelse + (C =:= $T) orelse (C =:= $U) orelse (C =:= $V) orelse (C =:= $W) orelse + (C =:= $X) orelse (C =:= $Y) orelse (C =:= $Z) orelse (C =:= $_) orelse + (C =:= $a) orelse (C =:= $b) orelse (C =:= $c) orelse (C =:= $d) orelse + (C =:= $e) orelse (C =:= $f) orelse (C =:= $g) orelse (C =:= $h) orelse + (C =:= $i) orelse (C =:= $j) orelse (C =:= $k) orelse (C =:= $l) orelse + (C =:= $m) orelse (C =:= $n) orelse (C =:= $o) orelse (C =:= $p) orelse + (C =:= $q) orelse (C =:= $r) orelse (C =:= $s) orelse (C =:= $t) orelse + (C =:= $u) orelse (C =:= $v) orelse (C =:= $w) orelse (C =:= $x) orelse + (C =:= $y) orelse (C =:= $z) orelse (C =:= $~) +)). + +urldecode(Binary) -> + skip_dec(Binary, Binary, 0). + +%% This functions helps avoid a binary allocation when +%% there is nothing to decode. +skip_dec(Binary, Orig, Len) -> + case Binary of + <> + when ?IS_PLAIN(C1) andalso ?IS_PLAIN(C2) + andalso ?IS_PLAIN(C3) andalso ?IS_PLAIN(C4) -> + skip_dec(Rest, Orig, Len + 4); + _ -> + dec(Binary, [], Orig, 0, Len) + end. + +-dialyzer({no_improper_lists, [dec/5]}). +%% This clause helps speed up decoding of highly encoded values. +dec(<<$%, H1, L1, $%, H2, L2, $%, H3, L3, $%, H4, L4, Rest/bits>>, Acc, Orig, Skip, Len) -> + C1 = ?UNHEX(H1, L1), + C2 = ?UNHEX(H2, L2), + C3 = ?UNHEX(H3, L3), + C4 = ?UNHEX(H4, L4), + case Len of + 0 -> + dec(Rest, [Acc|<>], Orig, Skip + 12, 0); + _ -> + Part = binary_part(Orig, Skip, Len), + dec(Rest, [Acc, Part|<>], Orig, Skip + Len + 12, 0) + end; +dec(<<$%, H, L, Rest/bits>>, Acc, Orig, Skip, Len) -> + C = ?UNHEX(H, L), + case Len of + 0 -> + dec(Rest, [Acc|<>], Orig, Skip + 3, 0); + _ -> + Part = binary_part(Orig, Skip, Len), + dec(Rest, [Acc, Part|<>], Orig, Skip + Len + 3, 0) + end; +%% This clause helps speed up decoding of barely encoded values. +dec(<>, Acc, Orig, Skip, Len) + when ?IS_PLAIN(C1) andalso ?IS_PLAIN(C2) + andalso ?IS_PLAIN(C3) andalso ?IS_PLAIN(C4) -> + dec(Rest, Acc, Orig, Skip, Len + 4); +dec(<>, Acc, Orig, Skip, Len) when ?IS_PLAIN(C) -> + dec(Rest, Acc, Orig, Skip, Len + 1); +dec(<<>>, _, Orig, 0, _) -> + Orig; +dec(<<>>, Acc, _, _, 0) -> + iolist_to_binary(Acc); +dec(<<>>, Acc, Orig, Skip, Len) -> + Part = binary_part(Orig, Skip, Len), + iolist_to_binary([Acc|Part]); +dec(_, _, Orig, Skip, Len) -> + error({invalid_byte, binary:at(Orig, Skip + Len)}). diff --git a/deps/rabbit/src/rabbit_variable_queue.erl b/deps/rabbit/src/rabbit_variable_queue.erl index 9b26f84a2948..b9d5bb8480ac 100644 --- a/deps/rabbit/src/rabbit_variable_queue.erl +++ b/deps/rabbit/src/rabbit_variable_queue.erl @@ -2,28 +2,27 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_variable_queue). -export([init/3, terminate/2, delete_and_terminate/2, delete_crashed/1, purge/1, purge_acks/1, - publish/6, publish_delivered/5, - batch_publish/4, batch_publish_delivered/4, - discard/4, drain_confirmed/1, + publish/5, publish_delivered/4, + discard/3, drain_confirmed/1, dropwhile/2, fetchwhile/4, fetch/2, drop/2, ack/2, requeue/2, ackfold/4, fold/3, len/1, is_empty/1, depth/1, - set_ram_duration_target/2, ram_duration/1, needs_timeout/1, timeout/1, + update_rates/1, needs_timeout/1, timeout/1, handle_pre_hibernate/1, resume/1, msg_rates/1, info/2, invoke/3, is_duplicate/2, set_queue_mode/2, set_queue_version/2, zip_msgs_and_acks/4]). -export([start/2, stop/1]). -%% Used during dirty recovery to resume conversion between versions. +%% This function is used by rabbit_classic_queue_index_v2 +%% to convert v1 queues to v2 after an upgrade to 4.0. -export([convert_from_v1_to_v2_loop/8]). --export([convert_from_v2_to_v1_loop/8]). %% exported for testing only -export([start_msg_store/3, stop_msg_store/1, init/5]). @@ -52,20 +51,13 @@ %% %% Messages are persisted using a queue index and a message store. %% A few different scenarios may play out depending on the message -%% size and the queue-version argument. +%% size: %% -%% - queue-version=1, size < qi_msgs_embed_below: both the message -%% metadata and content are stored in rabbit_queue_index -%% -%% - queue-version=1, size >= qi_msgs_embed_below: the metadata -%% is stored in rabbit_queue_index, while the content is stored -%% in the per-vhost shared rabbit_msg_store -%% -%% - queue-version=2, size < qi_msgs_embed_below: the metadata +%% - size < qi_msgs_embed_below: the metadata %% is stored in rabbit_classic_queue_index_v2, while the content %% is stored in the per-queue rabbit_classic_queue_store_v2 %% -%% - queue-version=2, size >= qi_msgs_embed_below: the metadata +%% - size >= qi_msgs_embed_below: the metadata %% is stored in rabbit_classic_queue_index_v2, while the content %% is stored in the per-vhost shared rabbit_msg_store %% @@ -179,7 +171,7 @@ ram_pending_ack, %% msgs still in RAM disk_pending_ack, %% msgs in store, paged out qi_pending_ack, %% Unused. - index_mod, + index_mod, %% Unused. index_state, store_state, msg_store_clients, @@ -222,7 +214,7 @@ %% default queue or lazy queue mode, %% Unused. - version = 1, + version = 2, %% Unused. %% Fast path for confirms handling. Instead of having %% index/store keep track of confirms separately and %% doing intersect/subtract/union we just put the messages @@ -275,7 +267,6 @@ -define(IN_MEMORY, memory). -include_lib("rabbit_common/include/rabbit.hrl"). --include_lib("rabbit_common/include/rabbit_framing.hrl"). -include("amqqueue.hrl"). %%---------------------------------------------------------------------------- @@ -311,7 +302,6 @@ ram_pending_ack :: map(), disk_pending_ack :: map(), qi_pending_ack :: undefined, - index_mod :: rabbit_queue_index | rabbit_classic_queue_index_v2, index_state :: any(), store_state :: any(), msg_store_clients :: 'undefined' | {{any(), binary()}, @@ -346,7 +336,7 @@ io_batch_size :: pos_integer(), mode :: 'default' | 'lazy', - version :: 1 | 2, + version :: 2, unconfirmed_simple :: sets:set()}. -define(BLANK_DELTA, #delta { start_seq_id = undefined, @@ -360,12 +350,12 @@ -define(MICROS_PER_SECOND, 1000000.0). -%% We're sampling every 5s for RAM duration; a half life that is of +%% We're updating rates every 5s at most; a half life that is of %% the same order of magnitude is probably about right. -define(RATE_AVG_HALF_LIFE, 5.0). -%% We will recalculate the #rates{} every time we get asked for our -%% RAM duration, or every N messages published, whichever is +%% We will recalculate the #rates{} every 5 seconds, +%% or every N messages published, whichever is %% sooner. We do this since the priority calculations in %% rabbit_amqqueue_process need fairly fresh rates. -define(MSGS_PER_RATE_CALC, 100). @@ -431,14 +421,11 @@ init(Queue, Recover, Callback) -> init(Q, new, MsgOnDiskFun, MsgIdxOnDiskFun, MsgAndIdxOnDiskFun) when ?is_amqqueue(Q) -> QueueName = amqqueue:get_name(Q), IsDurable = amqqueue:is_durable(Q), - %% We resolve the queue version immediately to avoid converting - %% between queue versions unnecessarily. - IndexMod = index_mod(Q), - IndexState = IndexMod:init(QueueName, MsgIdxOnDiskFun, MsgAndIdxOnDiskFun), + IndexState = rabbit_classic_queue_index_v2:init(QueueName, + MsgIdxOnDiskFun, MsgAndIdxOnDiskFun), StoreState = rabbit_classic_queue_store_v2:init(QueueName), VHost = QueueName#resource.virtual_host, - init(queue_version(Q), - IsDurable, IndexMod, IndexState, StoreState, 0, 0, [], + init(IsDurable, IndexState, StoreState, 0, 0, [], case IsDurable of true -> msg_store_client_init(?PERSISTENT_MSG_STORE, MsgOnDiskFun, VHost); @@ -466,10 +453,8 @@ init(Q, Terms, MsgOnDiskFun, MsgIdxOnDiskFun, MsgAndIdxOnDiskFun) when ?is_amqqu end, TransientClient = msg_store_client_init(?TRANSIENT_MSG_STORE, undefined, VHost), - %% We MUST resolve the queue version immediately in order to recover. - IndexMod = index_mod(Q), {DeltaCount, DeltaBytes, IndexState} = - IndexMod:recover( + rabbit_classic_queue_index_v2:recover( QueueName, RecoveryTerms, rabbit_vhost_msg_store:successfully_recovered_state( VHost, @@ -477,8 +462,8 @@ init(Q, Terms, MsgOnDiskFun, MsgIdxOnDiskFun, MsgAndIdxOnDiskFun) when ?is_amqqu ContainsCheckFun, MsgIdxOnDiskFun, MsgAndIdxOnDiskFun, main), StoreState = rabbit_classic_queue_store_v2:init(QueueName), - init(queue_version(Q), - IsDurable, IndexMod, IndexState, StoreState, DeltaCount, DeltaBytes, RecoveryTerms, + init(IsDurable, IndexState, StoreState, + DeltaCount, DeltaBytes, RecoveryTerms, PersistentClient, TransientClient, VHost). process_recovery_terms(Terms=non_clean_shutdown) -> @@ -489,28 +474,12 @@ process_recovery_terms(Terms) -> PRef -> {PRef, Terms} end. -%% If queue-version is undefined, we assume v2 starting with RabbitMQ 3.13.0. -queue_version(Q) -> - Resolve = fun(_, ArgVal) -> ArgVal end, - case rabbit_queue_type_util:args_policy_lookup(<<"queue-version">>, Resolve, Q) of - undefined -> rabbit_misc:get_env(rabbit, classic_queue_default_version, 2); - Vsn when is_integer(Vsn) -> Vsn; - Vsn -> binary_to_integer(Vsn) - end. - -index_mod(Q) -> - case queue_version(Q) of - 1 -> rabbit_queue_index; - 2 -> rabbit_classic_queue_index_v2 - end. - terminate(_Reason, State) -> State1 = #vqstate { virtual_host = VHost, next_seq_id = NextSeqId, next_deliver_seq_id = NextDeliverSeqId, persistent_count = PCount, persistent_bytes = PBytes, - index_mod = IndexMod, index_state = IndexState, store_state = StoreState, msg_store_clients = {MSCStateP, MSCStateT} } = @@ -527,7 +496,7 @@ terminate(_Reason, State) -> {persistent_count, PCount}, {persistent_bytes, PBytes}], a(State1#vqstate { - index_state = IndexMod:terminate(VHost, Terms, IndexState), + index_state = rabbit_classic_queue_index_v2:terminate(VHost, Terms, IndexState), store_state = rabbit_classic_queue_store_v2:terminate(StoreState), msg_store_clients = undefined }). @@ -561,34 +530,21 @@ purge(State = #vqstate { len = Len }) -> purge_acks(State) -> a(purge_pending_ack(false, State)). -publish(Msg, MsgProps, IsDelivered, ChPid, Flow, State) -> +publish(Msg, MsgProps, IsDelivered, ChPid, State) -> State1 = - publish1(Msg, MsgProps, IsDelivered, ChPid, Flow, + publish1(Msg, MsgProps, IsDelivered, ChPid, fun maybe_write_to_disk/4, State), a(maybe_update_rates(State1)). -batch_publish(Publishes, ChPid, Flow, State) -> - {ChPid, Flow, State1} = - lists:foldl(fun batch_publish1/2, {ChPid, Flow, State}, Publishes), - State2 = ui(State1), - a(maybe_update_rates(State2)). - -publish_delivered(Msg, MsgProps, ChPid, Flow, State) -> +publish_delivered(Msg, MsgProps, ChPid, State) -> {SeqId, State1} = - publish_delivered1(Msg, MsgProps, ChPid, Flow, + publish_delivered1(Msg, MsgProps, ChPid, fun maybe_write_to_disk/4, State), {SeqId, a(maybe_update_rates(State1))}. -batch_publish_delivered(Publishes, ChPid, Flow, State) -> - {ChPid, Flow, SeqIds, State1} = - lists:foldl(fun batch_publish_delivered1/2, - {ChPid, Flow, [], State}, Publishes), - State2 = ui(State1), - {lists:reverse(SeqIds), a(maybe_update_rates(State2))}. - -discard(_MsgId, _ChPid, _Flow, State) -> State. +discard(_MsgId, _ChPid, State) -> State. drain_confirmed(State = #vqstate { confirmed = C }) -> case sets:is_empty(C) of @@ -650,8 +606,7 @@ ack([SeqId], State) -> end; ack(AckTags, State) -> {{IndexOnDiskSeqIds, MsgIdsByStore, SeqIdsInStore, AllMsgIds}, - State1 = #vqstate { index_mod = IndexMod, - index_state = IndexState, + State1 = #vqstate { index_state = IndexState, store_state = StoreState0, ack_out_counter = AckOutCount }} = lists:foldl( @@ -665,7 +620,7 @@ ack(AckTags, State) -> {accumulate_ack(MsgStatus, Acc), State3} end end, {accumulate_ack_init(), State}, AckTags), - {DeletedSegments, IndexState1} = IndexMod:ack(IndexOnDiskSeqIds, IndexState), + {DeletedSegments, IndexState1} = rabbit_classic_queue_index_v2:ack(IndexOnDiskSeqIds, IndexState), StoreState1 = rabbit_classic_queue_store_v2:delete_segments(DeletedSegments, StoreState0), StoreState = lists:foldl(fun rabbit_classic_queue_store_v2:remove/2, StoreState1, SeqIdsInStore), State2 = remove_vhost_msgs_by_id(MsgIdsByStore, State1), @@ -715,9 +670,6 @@ is_empty(State) -> 0 == len(State). depth(State) -> len(State) + count_pending_acks(State). -set_ram_duration_target(_DurationTarget, State) -> - State. - maybe_update_rates(State = #vqstate{ in_counter = InCount, out_counter = OutCount }) when InCount + OutCount > ?MSGS_PER_RATE_CALC -> @@ -757,42 +709,32 @@ update_rate(Now, TS, Count, Rate) -> Count / Time, Rate) end. -%% @todo Should be renamed since it's only used to update_rates. -%% Can do this after mirroring gets removed. -ram_duration(State) -> - State1 = update_rates(State), - {infinity, State1}. - -needs_timeout(#vqstate { index_mod = IndexMod, - index_state = IndexState, +needs_timeout(#vqstate { index_state = IndexState, unconfirmed_simple = UCS }) -> - case {IndexMod:needs_sync(IndexState), sets:is_empty(UCS)} of + case {rabbit_classic_queue_index_v2:needs_sync(IndexState), sets:is_empty(UCS)} of {false, false} -> timed; {confirms, _} -> timed; - {other, _} -> idle; {false, true} -> false end. -timeout(State = #vqstate { index_mod = IndexMod, - index_state = IndexState0, +timeout(State = #vqstate { index_state = IndexState0, store_state = StoreState0, unconfirmed_simple = UCS, confirmed = C }) -> - IndexState = IndexMod:sync(IndexState0), + IndexState = rabbit_classic_queue_index_v2:sync(IndexState0), StoreState = rabbit_classic_queue_store_v2:sync(StoreState0), State #vqstate { index_state = IndexState, store_state = StoreState, unconfirmed_simple = sets:new([{version,2}]), confirmed = sets:union(C, UCS) }. -handle_pre_hibernate(State = #vqstate { index_mod = IndexMod, - index_state = IndexState0, +handle_pre_hibernate(State = #vqstate { index_state = IndexState0, store_state = StoreState0, msg_store_clients = MSCState0, unconfirmed_simple = UCS, confirmed = C }) -> MSCState = msg_store_pre_hibernate(MSCState0), - IndexState = IndexMod:flush(IndexState0), + IndexState = rabbit_classic_queue_index_v2:flush(IndexState0), StoreState = rabbit_classic_queue_store_v2:sync(StoreState0), State #vqstate { index_state = IndexState, store_state = StoreState, @@ -833,6 +775,10 @@ info(head_message_timestamp, #vqstate{ q3 = Q3, ram_pending_ack = RPA}) -> head_message_timestamp(Q3, RPA); +info(oldest_message_received_timestamp, #vqstate{ + q3 = Q3, + ram_pending_ack = RPA}) -> + oldest_message_received_timestamp(Q3, RPA); info(disk_reads, #vqstate{disk_read_count = Count}) -> Count; info(disk_writes, #vqstate{disk_write_count = Count}) -> @@ -840,7 +786,6 @@ info(disk_writes, #vqstate{disk_write_count = Count}) -> info(backing_queue_status, #vqstate { delta = Delta, q3 = Q3, mode = Mode, - version = Version, len = Len, target_ram_count = TargetRamCount, next_seq_id = NextSeqId, @@ -849,7 +794,6 @@ info(backing_queue_status, #vqstate { disk_pending_ack = DPA, unconfirmed = UC, unconfirmed_simple = UCS, - index_mod = IndexMod, index_state = IndexState, store_state = StoreState, rates = #rates { in = AvgIngressRate, @@ -857,7 +801,7 @@ info(backing_queue_status, #vqstate { ack_in = AvgAckIngressRate, ack_out = AvgAckEgressRate }}) -> [ {mode , Mode}, - {version , Version}, + {version , 2}, {q1 , 0}, {q2 , 0}, {delta , Delta}, @@ -873,7 +817,7 @@ info(backing_queue_status, #vqstate { {avg_egress_rate , AvgEgressRate}, {avg_ack_ingress_rate, AvgAckIngressRate}, {avg_ack_egress_rate , AvgAckEgressRate} ] - ++ IndexMod:info(IndexState) + ++ rabbit_classic_queue_index_v2:info(IndexState) ++ rabbit_classic_queue_store_v2:info(StoreState); info(_, _) -> ''. @@ -893,94 +837,12 @@ zip_msgs_and_acks(Msgs, AckTags, Accumulator, _State) -> [{Id, AckTag} | Acc] end, Accumulator, lists:zip(Msgs, AckTags)). -%% No change. -set_queue_version(Version, State = #vqstate { version = Version }) -> - State; -%% v2 -> v1. -set_queue_version(1, State0 = #vqstate { version = 2 }) -> - %% We call timeout/1 so that we sync to disk and get the confirms - %% handled before we do the conversion. This is necessary because - %% v2 now has a simpler confirms code path. - State = timeout(State0), - convert_from_v2_to_v1(State #vqstate { version = 1 }); -%% v1 -> v2. -set_queue_version(2, State0 = #vqstate { version = 1 }) -> - %% We call timeout/1 so that we sync to disk and get the confirms - %% handled before we do the conversion. This is necessary because - %% v2 now has a simpler confirms code path. - State = timeout(State0), - convert_from_v1_to_v2(State #vqstate { version = 2 }). - --define(CONVERT_COUNT, 1). --define(CONVERT_BYTES, 2). %% Unused. --define(CONVERT_COUNTER_SIZE, 2). - -%% We move messages from the v1 index to the v2 index. The message payload -%% is moved to the v2 store if it was embedded, and left in the per-vhost -%% store otherwise. -convert_from_v1_to_v2(State0 = #vqstate{ index_mod = rabbit_queue_index, - index_state = V1Index, - store_state = V2Store0 }) -> - {QueueName, MsgIdxOnDiskFun, MsgAndIdxOnDiskFun} = rabbit_queue_index:init_args(V1Index), - #resource{virtual_host = VHost, name = QName} = QueueName, - rabbit_log:info("Converting running queue ~ts in vhost ~ts from v1 to v2", [QName, VHost]), - State = convert_from_v1_to_v2_in_memory(State0), - V2Index0 = rabbit_classic_queue_index_v2:init_for_conversion(QueueName, MsgIdxOnDiskFun, MsgAndIdxOnDiskFun), - %% We do not need to init the v2 per-queue store because we already did so in the queue init. - {LoSeqId, HiSeqId, _} = rabbit_queue_index:bounds(V1Index), - CountersRef = counters:new(?CONVERT_COUNTER_SIZE, []), - {V2Index, V2Store} = convert_from_v1_to_v2_loop(QueueName, V1Index, V2Index0, V2Store0, - {CountersRef, ?CONVERT_COUNT, ?CONVERT_BYTES}, - LoSeqId, HiSeqId, - %% Write all messages. - fun (_, FunState) -> {write, FunState} end), - %% We have already deleted segments files but not the journal. - rabbit_queue_index:delete_journal(V1Index), - rabbit_log:info("Queue ~ts in vhost ~ts converted ~b total messages from v1 to v2", - [QName, VHost, counters:get(CountersRef, ?CONVERT_COUNT)]), - State#vqstate{ index_mod = rabbit_classic_queue_index_v2, - index_state = V2Index, - store_state = V2Store }. - -convert_from_v1_to_v2_in_memory(State = #vqstate{ q1 = Q1b, - q2 = Q2b, - q3 = Q3b, - q4 = Q4b, - ram_pending_ack = RPAb, - disk_pending_ack = DPAb }) -> - Q1 = convert_from_v1_to_v2_queue(Q1b), - Q2 = convert_from_v1_to_v2_queue(Q2b), - Q3 = convert_from_v1_to_v2_queue(Q3b), - Q4 = convert_from_v1_to_v2_queue(Q4b), - %% We also must convert the #msg_status entries in the pending_ack fields. - RPA = convert_from_v1_to_v2_map(RPAb), - DPA = convert_from_v1_to_v2_map(DPAb), - State#vqstate{ q1 = Q1, - q2 = Q2, - q3 = Q3, - q4 = Q4, - ram_pending_ack = RPA, - disk_pending_ack = DPA }. - -%% We change where the message is expected to be persisted to. -%% We do not need to worry about the message location because -%% it will only be in memory or in the per-vhost store. -convert_from_v1_to_v2_queue(Q) -> - List0 = ?QUEUE:to_list(Q), - List = lists:map(fun (MsgStatus) -> convert_from_v1_to_v2_msg_status(MsgStatus) end, List0), - ?QUEUE:from_list(List). - -convert_from_v1_to_v2_map(T) -> - maps:map(fun (_, MsgStatus) -> convert_from_v1_to_v2_msg_status(MsgStatus) end, T). - -convert_from_v1_to_v2_msg_status(MsgStatus) -> - case MsgStatus of - #msg_status{ persist_to = queue_index } -> - MsgStatus#msg_status{ persist_to = queue_store }; - _ -> - MsgStatus - end. +%% Queue version now ignored; only v2 is available. +set_queue_version(_, State) -> + State. +%% This function is used by rabbit_classic_queue_index_v2 +%% to convert v1 queues to v2 after an upgrade to 4.0. convert_from_v1_to_v2_loop(_, _, V2Index, V2Store, _, HiSeqId, HiSeqId, _) -> {V2Index, V2Store}; convert_from_v1_to_v2_loop(QueueName, V1Index0, V2Index0, V2Store0, @@ -1033,155 +895,6 @@ convert_from_v1_to_v2_loop(QueueName, V1Index0, V2Index0, V2Store0, [Name, VHost, length(Messages)]), convert_from_v1_to_v2_loop(QueueName, V1Index, V2Index, V2Store, Counters, UpSeqId, HiSeqId, SkipFun). -%% We move messages from the v1 index to the v2 index. The message payload -%% is moved to the v2 store if it was embedded, and left in the per-vhost -%% store otherwise. -convert_from_v2_to_v1(State0 = #vqstate{ index_mod = rabbit_classic_queue_index_v2, - index_state = V2Index }) -> - {QueueName, MsgIdxOnDiskFun, MsgAndIdxOnDiskFun} = rabbit_classic_queue_index_v2:init_args(V2Index), - #resource{virtual_host = VHost, name = QName} = QueueName, - rabbit_log:info("Converting running queue ~ts in vhost ~ts from v2 to v1", [QName, VHost]), - State = convert_from_v2_to_v1_in_memory(State0), - %% We may have read from the per-queue store state and opened FDs. - #vqstate{ store_state = V2Store0 } = State, - V1Index0 = rabbit_queue_index:init_for_conversion(QueueName, MsgIdxOnDiskFun, MsgAndIdxOnDiskFun), - {LoSeqId, HiSeqId, _} = rabbit_classic_queue_index_v2:bounds(V2Index), - CountersRef = counters:new(?CONVERT_COUNTER_SIZE, []), - {V1Index, V2Store} = convert_from_v2_to_v1_loop(QueueName, V1Index0, V2Index, V2Store0, - {CountersRef, ?CONVERT_COUNT, ?CONVERT_BYTES}, - LoSeqId, HiSeqId, - %% Write all messages. - fun (_, FunState) -> {write, FunState} end), - rabbit_log:info("Queue ~ts in vhost ~ts converted ~b total messages from v2 to v1", - [QName, VHost, counters:get(CountersRef, ?CONVERT_COUNT)]), - %% We have already closed the v2 index/store FDs when deleting the files. - State#vqstate{ index_mod = rabbit_queue_index, - index_state = V1Index, - store_state = rabbit_classic_queue_store_v2:terminate(V2Store) }. - -convert_from_v2_to_v1_in_memory(State0 = #vqstate{ q1 = Q1b, - q2 = Q2b, - q3 = Q3b, - q4 = Q4b, - ram_pending_ack = RPAb, - disk_pending_ack = DPAb }) -> - {Q1, State1} = convert_from_v2_to_v1_queue(Q1b, State0), - {Q2, State2} = convert_from_v2_to_v1_queue(Q2b, State1), - {Q3, State3} = convert_from_v2_to_v1_queue(Q3b, State2), - {Q4, State4} = convert_from_v2_to_v1_queue(Q4b, State3), - %% We also must convert the #msg_status entries in the pending_ack fields. - %% We must separate entries in the queue index from other entries as - %% that is what is expected from the v1 index. - {RPA, State5} = convert_from_v2_to_v1_map(RPAb, State4), - {DPA, State6} = convert_from_v2_to_v1_map(DPAb, State5), - State6#vqstate{ q1 = Q1, - q2 = Q2, - q3 = Q3, - q4 = Q4, - ram_pending_ack = RPA, - disk_pending_ack = DPA }. - -%% We fetch the message from the per-queue store if necessary -%% and mark all messages as delivered to make the v1 index happy. -convert_from_v2_to_v1_queue(Q, State0) -> - List0 = ?QUEUE:to_list(Q), - {List, State} = lists:mapfoldl(fun (MsgStatus, State1) -> - convert_from_v2_to_v1_msg_status(MsgStatus, State1, true) - end, State0, List0), - {?QUEUE:from_list(List), State}. - -convert_from_v2_to_v1_map(T, State) -> - convert_from_v2_to_v1_map_loop(maps:iterator(T), #{}, State). - -convert_from_v2_to_v1_map_loop(Iterator0, Acc, State0) -> - case maps:next(Iterator0) of - none -> - {Acc, State0}; - {Key, Value0, Iterator} -> - {Value, State} = convert_from_v2_to_v1_msg_status(Value0, State0, false), - convert_from_v2_to_v1_map_loop(Iterator, maps:put(Key, Value, Acc), State) - end. - -convert_from_v2_to_v1_msg_status(MsgStatus0, State1 = #vqstate{ store_state = StoreState0, - ram_msg_count = RamMsgCount, - ram_bytes = RamBytes }, Ready) -> - case MsgStatus0 of - #msg_status{ seq_id = SeqId, - msg = undefined, - msg_location = MsgLocation = {rabbit_classic_queue_store_v2, _, _} } -> - {Msg, StoreState} = rabbit_classic_queue_store_v2:read(SeqId, MsgLocation, StoreState0), - MsgStatus = MsgStatus0#msg_status{ msg = Msg, - msg_location = memory, - is_delivered = true, - persist_to = queue_index }, - %% We have read the message into memory. We must also update the stats. - {MsgStatus, State1#vqstate{ store_state = StoreState, - ram_msg_count = RamMsgCount + one_if(Ready), - ram_bytes = RamBytes + msg_size(MsgStatus) }}; - #msg_status{ persist_to = queue_store } -> - {MsgStatus0#msg_status{ is_delivered = true, - persist_to = queue_index }, State1}; - _ -> - {MsgStatus0#msg_status{ is_delivered = true }, State1} - end. - -convert_from_v2_to_v1_loop(_, V1Index, _, V2Store, _, HiSeqId, HiSeqId, _) -> - {V1Index, V2Store}; -convert_from_v2_to_v1_loop(QueueName, V1Index0, V2Index0, V2Store0, - Counters = {CountersRef, CountIx, BytesIx}, - LoSeqId, HiSeqId, SkipFun) -> - UpSeqId = lists:min([rabbit_classic_queue_index_v2:next_segment_boundary(LoSeqId), - HiSeqId]), - {Messages, V2Index1} = rabbit_classic_queue_index_v2:read(LoSeqId, UpSeqId, V2Index0), - {V1Index3, V2Store3} = lists:foldl(fun - %% Read per-queue store messages before writing to the index. - ({_MsgId, SeqId, Location = {rabbit_classic_queue_store_v2, _, _}, Props, IsPersistent}, - {V1Index1, V2Store1}) -> - {Msg, V2Store2} = rabbit_classic_queue_store_v2:read(SeqId, Location, V2Store1), - %% When we are resuming the conversion the messages may have already been written to disk. - %% We do NOT want them written again: this is an error that leads to a corrupted index - %% (because it uses a journal it cannot know whether there's been a double write). - %% We therefore check first if the entry exists and if we need to write it. - V1Index2 = case SkipFun(SeqId, V1Index1) of - {skip, V1Index1a} -> - V1Index1a; - {write, V1Index1a} -> - counters:add(CountersRef, CountIx, 1), - counters:add(CountersRef, BytesIx, Props#message_properties.size), - V1Index1b = rabbit_queue_index:publish(Msg, SeqId, rabbit_queue_index, Props, IsPersistent, infinity, V1Index1a), - rabbit_queue_index:deliver([SeqId], V1Index1b) - end, - {V1Index2, V2Store2}; - %% Keep messages in the per-vhost store where they are. - ({MsgId, SeqId, rabbit_msg_store, Props, IsPersistent}, - {V1Index1, V2Store1}) -> - %% See comment in previous clause. - V1Index2 = case SkipFun(SeqId, V1Index1) of - {skip, V1Index1a} -> - V1Index1a; - {write, V1Index1a} -> - counters:add(CountersRef, CountIx, 1), - counters:add(CountersRef, BytesIx, Props#message_properties.size), - V1Index1b = rabbit_queue_index:publish(MsgId, SeqId, rabbit_msg_store, Props, IsPersistent, infinity, V1Index1a), - rabbit_queue_index:deliver([SeqId], V1Index1b) - end, - {V1Index2, V2Store1} - end, {V1Index0, V2Store0}, Messages), - %% Flush to disk to avoid keeping too much in memory between segments. - V1Index = rabbit_queue_index:flush(V1Index3), - %% We do a garbage collect because the old index may have created a lot of garbage. - garbage_collect(), - %% We have written everything to disk. We can delete the old segment file - %% to free up much needed space, to avoid doubling disk usage during the upgrade. - {DeletedSegments, V2Index} = rabbit_classic_queue_index_v2:delete_segment_file_for_seq_id(LoSeqId, V2Index1), - V2Store = rabbit_classic_queue_store_v2:delete_segments(DeletedSegments, V2Store3), - %% Log some progress to keep the user aware of what's going on, as moving - %% embedded messages can take quite some time. - #resource{virtual_host = VHost, name = Name} = QueueName, - rabbit_log:info("Queue ~ts in vhost ~ts converted ~b messages from v2 to v1", - [Name, VHost, length(Messages)]), - convert_from_v2_to_v1_loop(QueueName, V1Index, V2Index, V2Store, Counters, UpSeqId, HiSeqId, SkipFun). - %% Get the Timestamp property of the first msg, if present. This is %% the one with the oldest timestamp among the heads of the pending %% acks and unread queues. We can't check disk_pending_acks as these @@ -1190,7 +903,6 @@ convert_from_v2_to_v1_loop(QueueName, V1Index0, V2Index0, V2Store0, %% regarded as unprocessed until acked, this also prevents the result %% apparently oscillating during repeated rejects. %% -%% @todo OK I think we can do this differently head_message_timestamp(Q3, RPA) -> HeadMsgs = [ HeadMsgStatus#msg_status.msg || HeadMsgStatus <- @@ -1211,6 +923,26 @@ head_message_timestamp(Q3, RPA) -> false -> lists:min(Timestamps) end. +oldest_message_received_timestamp(Q3, RPA) -> + HeadMsgs = [ HeadMsgStatus#msg_status.msg || + HeadMsgStatus <- + [ get_q_head(Q3), + get_pa_head(RPA) ], + HeadMsgStatus /= undefined, + HeadMsgStatus#msg_status.msg /= undefined ], + + Timestamps = + [Timestamp + || HeadMsg <- HeadMsgs, + Timestamp <- [mc:get_annotation(?ANN_RECEIVED_AT_TIMESTAMP, HeadMsg)], + Timestamp /= undefined + ], + + case Timestamps == [] of + true -> ''; + false -> lists:min(Timestamps) + end. + get_q_head(Q) -> ?QUEUE:get(Q, undefined). @@ -1270,7 +1002,7 @@ one_if(false) -> 0. cons_if(true, E, L) -> [E | L]; cons_if(false, _E, L) -> L. -msg_status(Version, IsPersistent, IsDelivered, SeqId, +msg_status(IsPersistent, IsDelivered, SeqId, Msg, MsgProps, IndexMaxSize) -> MsgId = mc:get_annotation(id, Msg), #msg_status{seq_id = SeqId, @@ -1282,7 +1014,7 @@ msg_status(Version, IsPersistent, IsDelivered, SeqId, is_delivered = IsDelivered, msg_location = memory, index_on_disk = false, - persist_to = determine_persist_to(Version, Msg, MsgProps, IndexMaxSize), + persist_to = determine_persist_to(Msg, MsgProps, IndexMaxSize), msg_props = MsgProps}. beta_msg_status({MsgId, SeqId, MsgLocation, MsgProps, IsPersistent}) @@ -1438,9 +1170,9 @@ expand_delta(_SeqId, #delta { count = Count, %% Internal major helpers for Public API %%---------------------------------------------------------------------------- -init(QueueVsn, IsDurable, IndexMod, IndexState, StoreState, DeltaCount, DeltaBytes, Terms, +init(IsDurable, IndexState, StoreState, DeltaCount, DeltaBytes, Terms, PersistentClient, TransientClient, VHost) -> - {LowSeqId, HiSeqId, IndexState1} = IndexMod:bounds(IndexState), + {LowSeqId, HiSeqId, IndexState1} = rabbit_classic_queue_index_v2:bounds(IndexState), {NextSeqId, NextDeliverSeqId, DeltaCount1, DeltaBytes1} = case Terms of @@ -1478,7 +1210,6 @@ init(QueueVsn, IsDurable, IndexMod, IndexState, StoreState, DeltaCount, DeltaByt next_deliver_seq_id = NextDeliverSeqId, ram_pending_ack = #{}, disk_pending_ack = #{}, - index_mod = IndexMod, index_state = IndexState1, store_state = StoreState, msg_store_clients = {PersistentClient, TransientClient}, @@ -1514,7 +1245,6 @@ init(QueueVsn, IsDurable, IndexMod, IndexState, StoreState, DeltaCount, DeltaByt io_batch_size = IoBatchSize, mode = default, - version = QueueVsn, virtual_host = VHost}, a(maybe_deltas_to_betas(State)). @@ -1707,13 +1437,12 @@ remove_from_disk(#msg_status { is_persistent = IsPersistent, msg_location = MsgLocation, index_on_disk = IndexOnDisk }, - State = #vqstate {index_mod = IndexMod, - index_state = IndexState1, + State = #vqstate {index_state = IndexState1, store_state = StoreState0, msg_store_clients = MSCState}) -> {DeletedSegments, IndexState2} = case IndexOnDisk of - true -> IndexMod:ack([SeqId], IndexState1); + true -> rabbit_classic_queue_index_v2:ack([SeqId], IndexState1); false -> {[], IndexState1} end, {StoreState1, State1} = case MsgLocation of @@ -1864,11 +1593,10 @@ purge_and_index_reset(State) -> purge1(AfterFun, State) -> a(purge_betas_and_deltas(AfterFun, State)). -reset_qi_state(State = #vqstate{ index_mod = IndexMod, - index_state = IndexState0, +reset_qi_state(State = #vqstate{ index_state = IndexState0, store_state = StoreState0 }) -> StoreState = rabbit_classic_queue_store_v2:terminate(StoreState0), - IndexState = IndexMod:reset_state(IndexState0), + IndexState = rabbit_classic_queue_index_v2:reset_state(IndexState0), State#vqstate{ index_state = IndexState, store_state = StoreState }. @@ -1920,12 +1648,9 @@ remove_queue_entries1( process_delivers_and_acks_fun(deliver_and_ack) -> %% @todo Make a clause for empty Acks list? - fun (NextDeliverSeqId, Acks, State = #vqstate { index_mod = IndexMod, - index_state = IndexState, + fun (NextDeliverSeqId, Acks, State = #vqstate { index_state = IndexState, store_state = StoreState0}) -> - %% We do not send delivers to the v1 index because - %% we've already done so when publishing. - {DeletedSegments, IndexState1} = IndexMod:ack(Acks, IndexState), + {DeletedSegments, IndexState1} = rabbit_classic_queue_index_v2:ack(Acks, IndexState), StoreState = rabbit_classic_queue_store_v2:delete_segments(DeletedSegments, StoreState0), @@ -1947,10 +1672,9 @@ process_delivers_and_acks_fun(_) -> publish1(Msg, MsgProps = #message_properties { needs_confirming = NeedsConfirming }, - IsDelivered, _ChPid, _Flow, PersistFun, + IsDelivered, _ChPid, PersistFun, State = #vqstate { q3 = Q3, delta = Delta = #delta { count = DeltaCount }, len = Len, - version = Version, qi_embed_msgs_below = IndexMaxSize, next_seq_id = SeqId, next_deliver_seq_id = NextDeliverSeqId, @@ -1962,7 +1686,7 @@ publish1(Msg, MsgId = mc:get_annotation(id, Msg), IsPersistent = mc:is_persistent(Msg), IsPersistent1 = IsDurable andalso IsPersistent, - MsgStatus = msg_status(Version, IsPersistent1, IsDelivered, SeqId, Msg, MsgProps, IndexMaxSize), + MsgStatus = msg_status(IsPersistent1, IsDelivered, SeqId, Msg, MsgProps, IndexMaxSize), %% We allow from 1 to 2048 messages in memory depending on the consume rate. The lower %% limit is at 1 because the queue process will need to access this message to know %% expiration information. @@ -1980,7 +1704,7 @@ publish1(Msg, stats_published_disk(MsgStatus1, State2) end, {UC1, UCS1} = maybe_needs_confirming(NeedsConfirming, persist_to(MsgStatus), - Version, MsgId, UC, UCS), + MsgId, UC, UCS), State3#vqstate{ next_seq_id = SeqId + 1, next_deliver_seq_id = maybe_next_deliver_seq_id(SeqId, NextDeliverSeqId, IsDelivered), in_counter = InCount + 1, @@ -1993,16 +1717,11 @@ maybe_next_deliver_seq_id(SeqId, NextDeliverSeqId, true) -> maybe_next_deliver_seq_id(_, NextDeliverSeqId, false) -> NextDeliverSeqId. -batch_publish1({Msg, MsgProps, IsDelivered}, {ChPid, Flow, State}) -> - {ChPid, Flow, publish1(Msg, MsgProps, IsDelivered, ChPid, Flow, - fun maybe_prepare_write_to_disk/4, State)}. - publish_delivered1(Msg, MsgProps = #message_properties { needs_confirming = NeedsConfirming }, - _ChPid, _Flow, PersistFun, - State = #vqstate { version = Version, - qi_embed_msgs_below = IndexMaxSize, + _ChPid, PersistFun, + State = #vqstate { qi_embed_msgs_below = IndexMaxSize, next_seq_id = SeqId, next_deliver_seq_id = NextDeliverSeqId, in_counter = InCount, @@ -2013,11 +1732,11 @@ publish_delivered1(Msg, MsgId = mc:get_annotation(id, Msg), IsPersistent = mc:is_persistent(Msg), IsPersistent1 = IsDurable andalso IsPersistent, - MsgStatus = msg_status(Version, IsPersistent1, true, SeqId, Msg, MsgProps, IndexMaxSize), + MsgStatus = msg_status(IsPersistent1, true, SeqId, Msg, MsgProps, IndexMaxSize), {MsgStatus1, State1} = PersistFun(false, false, MsgStatus, State), State2 = record_pending_ack(m(MsgStatus1), State1), {UC1, UCS1} = maybe_needs_confirming(NeedsConfirming, persist_to(MsgStatus), - Version, MsgId, UC, UCS), + MsgId, UC, UCS), {SeqId, stats_published_pending_acks(MsgStatus1, State2#vqstate{ next_seq_id = SeqId + 1, @@ -2027,23 +1746,16 @@ publish_delivered1(Msg, unconfirmed = UC1, unconfirmed_simple = UCS1 })}. -maybe_needs_confirming(false, _, _, _, UC, UCS) -> +maybe_needs_confirming(false, _, _, UC, UCS) -> {UC, UCS}; %% When storing to the v2 queue store we take the simple confirms %% path because we don't need to track index and store separately. -maybe_needs_confirming(true, queue_store, 2, MsgId, UC, UCS) -> +maybe_needs_confirming(true, queue_store, MsgId, UC, UCS) -> {UC, sets:add_element(MsgId, UCS)}; %% Otherwise we keep tracking as it used to be. -maybe_needs_confirming(true, _, _, MsgId, UC, UCS) -> +maybe_needs_confirming(true, _, MsgId, UC, UCS) -> {sets:add_element(MsgId, UC), UCS}. -batch_publish_delivered1({Msg, MsgProps}, {ChPid, Flow, SeqIds, State}) -> - {SeqId, State1} = - publish_delivered1(Msg, MsgProps, ChPid, Flow, - fun maybe_prepare_write_to_disk/4, - State), - {ChPid, Flow, [SeqId | SeqIds], State1}. - maybe_write_msg_to_disk(Force, MsgStatus = #msg_status { seq_id = SeqId, msg = Msg, msg_id = MsgId, @@ -2089,7 +1801,6 @@ maybe_batch_write_index_to_disk(Force, State = #vqstate { target_ram_count = TargetRamCount, disk_write_count = DiskWriteCount, - index_mod = IndexMod, index_state = IndexState}) when Force orelse IsPersistent -> {MsgOrId, DiskWriteCount1} = @@ -2098,19 +1809,9 @@ maybe_batch_write_index_to_disk(Force, queue_store -> {MsgId, DiskWriteCount}; queue_index -> {prepare_to_store(Msg), DiskWriteCount + 1} end, - IndexState1 = case IndexMod of - %% The old index needs IsDelivered to apply some of its optimisations. - %% But because the deliver tracking is now in the queue we always pass 'true'. - %% It also does not need the location so it is not given here. - rabbit_queue_index -> - IndexMod:pre_publish( - MsgOrId, SeqId, MsgProps, IsPersistent, true, - TargetRamCount, IndexState); - _ -> - IndexMod:pre_publish( - MsgOrId, SeqId, MsgLocation, MsgProps, IsPersistent, - TargetRamCount, IndexState) - end, + IndexState1 = rabbit_classic_queue_index_v2:pre_publish( + MsgOrId, SeqId, MsgLocation, MsgProps, + IsPersistent, TargetRamCount, IndexState), {MsgStatus#msg_status{index_on_disk = true}, State#vqstate{index_state = IndexState1, disk_write_count = DiskWriteCount1}}; @@ -2129,7 +1830,6 @@ maybe_write_index_to_disk(Force, MsgStatus = #msg_status { msg_props = MsgProps}, State = #vqstate{target_ram_count = TargetRamCount, disk_write_count = DiskWriteCount, - index_mod = IndexMod, index_state = IndexState}) when Force orelse IsPersistent -> {MsgOrId, DiskWriteCount1} = @@ -2138,21 +1838,12 @@ maybe_write_index_to_disk(Force, MsgStatus = #msg_status { queue_store -> {MsgId, DiskWriteCount}; queue_index -> {prepare_to_store(Msg), DiskWriteCount + 1} end, - IndexState2 = IndexMod:publish( + IndexState2 = rabbit_classic_queue_index_v2:publish( MsgOrId, SeqId, MsgLocation, MsgProps, IsPersistent, persist_to(MsgStatus) =:= msg_store, TargetRamCount, IndexState), - %% We always deliver messages when the old index is used. - %% We are actually tracking message deliveries per-queue - %% but the old index expects delivers to be handled - %% per-message. Always delivering on publish prevents - %% issues related to delivers. - IndexState3 = case IndexMod of - rabbit_queue_index -> IndexMod:deliver([SeqId], IndexState2); - _ -> IndexState2 - end, {MsgStatus#msg_status{index_on_disk = true}, - State#vqstate{index_state = IndexState3, + State#vqstate{index_state = IndexState2, disk_write_count = DiskWriteCount1}}; maybe_write_index_to_disk(_Force, MsgStatus, State) -> @@ -2162,20 +1853,19 @@ maybe_write_to_disk(ForceMsg, ForceIndex, MsgStatus, State) -> {MsgStatus1, State1} = maybe_write_msg_to_disk(ForceMsg, MsgStatus, State), maybe_write_index_to_disk(ForceIndex, MsgStatus1, State1). -maybe_prepare_write_to_disk(ForceMsg, ForceIndex0, MsgStatus, State = #vqstate{ version = Version }) -> +maybe_prepare_write_to_disk(ForceMsg, ForceIndex0, MsgStatus, State) -> {MsgStatus1, State1} = maybe_write_msg_to_disk(ForceMsg, MsgStatus, State), %% We want messages written to the v2 per-queue store to also %% be written to the index for proper accounting. The situation %% where a message can be in the store but not in the index can %% only occur when going through this function (not via maybe_write_to_disk). - ForceIndex = case {Version, persist_to(MsgStatus)} of - {2, queue_store} -> true; + ForceIndex = case persist_to(MsgStatus) of + queue_store -> true; _ -> ForceIndex0 end, maybe_batch_write_index_to_disk(ForceIndex, MsgStatus1, State1). -determine_persist_to(Version, - Msg, +determine_persist_to(Msg, #message_properties{size = BodySize}, IndexMaxSize) -> %% The >= is so that you can set the env to 0 and never persist @@ -2198,9 +1888,8 @@ determine_persist_to(Version, false -> Est = MetaSize + BodySize, case Est >= IndexMaxSize of - true -> msg_store; - false when Version =:= 1 -> queue_index; - false when Version =:= 2 -> queue_store + true -> msg_store; + false -> queue_store end end. @@ -2257,14 +1946,13 @@ remove_pending_ack(false, SeqId, State = #vqstate{ram_pending_ack = RPA, end. purge_pending_ack(KeepPersistent, - State = #vqstate { index_mod = IndexMod, - index_state = IndexState, + State = #vqstate { index_state = IndexState, store_state = StoreState0 }) -> {IndexOnDiskSeqIds, MsgIdsByStore, SeqIdsInStore, State1} = purge_pending_ack1(State), case KeepPersistent of true -> remove_transient_msgs_by_id(MsgIdsByStore, State1); false -> {DeletedSegments, IndexState1} = - IndexMod:ack(IndexOnDiskSeqIds, IndexState), + rabbit_classic_queue_index_v2:ack(IndexOnDiskSeqIds, IndexState), StoreState1 = lists:foldl(fun rabbit_classic_queue_store_v2:remove/2, StoreState0, SeqIdsInStore), StoreState = rabbit_classic_queue_store_v2:delete_segments(DeletedSegments, StoreState1), State2 = remove_vhost_msgs_by_id(MsgIdsByStore, State1), @@ -2273,12 +1961,11 @@ purge_pending_ack(KeepPersistent, end. purge_pending_ack_delete_and_terminate( - State = #vqstate { index_mod = IndexMod, - index_state = IndexState, + State = #vqstate { index_state = IndexState, store_state = StoreState }) -> {_, MsgIdsByStore, _SeqIdsInStore, State1} = purge_pending_ack1(State), StoreState1 = rabbit_classic_queue_store_v2:terminate(StoreState), - IndexState1 = IndexMod:delete_and_terminate(IndexState), + IndexState1 = rabbit_classic_queue_index_v2:delete_and_terminate(IndexState), State2 = remove_vhost_msgs_by_id(MsgIdsByStore, State1), State2 #vqstate { index_state = IndexState1, store_state = StoreState1 }. @@ -2484,8 +2171,8 @@ next({delta, #delta{start_seq_id = SeqId, end_seq_id = SeqId}, State}, IndexState) -> next(istate(delta, State), IndexState); next({delta, #delta{start_seq_id = SeqId, - end_seq_id = SeqIdEnd} = Delta, State = #vqstate{index_mod = IndexMod}}, IndexState) -> - SeqIdB = IndexMod:next_segment_boundary(SeqId), + end_seq_id = SeqIdEnd} = Delta, State}, IndexState) -> + SeqIdB = rabbit_classic_queue_index_v2:next_segment_boundary(SeqId), %% It may make sense to limit this based on rate. But this %% is not called outside of CMQs so I will leave it alone %% for the time being. @@ -2494,15 +2181,9 @@ next({delta, #delta{start_seq_id = SeqId, %% otherwise the queue will attempt to read up to segment_entry_count() %% messages from the index each time. The value %% chosen here is arbitrary. - %% @todo We have a problem where reduce_memory_usage puts messages back to 0, - %% and then this or the maybe_deltas_to_betas function is called and it - %% fetches 2048 messages again. This is not good. Maybe the reduce_memory_usage - %% function should reduce the number of messages we fetch at once at the - %% same time (start at 2048, divide by 2 every time we reduce, or something). - %% Maybe expiration does that? SeqId + 2048, SeqIdEnd]), - {List, IndexState1} = IndexMod:read(SeqId, SeqId1, IndexState), + {List, IndexState1} = rabbit_classic_queue_index_v2:read(SeqId, SeqId1, IndexState), next({delta, Delta#delta{start_seq_id = SeqId1}, List, State}, IndexState1); next({delta, Delta, [], State}, IndexState) -> next({delta, Delta, State}, IndexState); @@ -2584,7 +2265,6 @@ maybe_deltas_to_betas(DelsAndAcksFun, State = #vqstate { delta = Delta, q3 = Q3, - index_mod = IndexMod, index_state = IndexState, store_state = StoreState, msg_store_clients = {MCStateP, MCStateT}, @@ -2592,30 +2272,20 @@ maybe_deltas_to_betas(DelsAndAcksFun, ram_bytes = RamBytes, disk_read_count = DiskReadCount, delta_transient_bytes = DeltaTransientBytes, - transient_threshold = TransientThreshold, - version = Version }, + transient_threshold = TransientThreshold }, MemoryLimit, WhatToRead) -> #delta { start_seq_id = DeltaSeqId, count = DeltaCount, transient = Transient, end_seq_id = DeltaSeqIdEnd } = Delta, - %% For v1 we always want to read messages up to the next segment boundary. - %% This is because v1 is not optimised for multiple reads from the same - %% segment: every time we read messages from a segment it has to read - %% and parse the entire segment from disk, filtering the messages we - %% requested afterwards. - %% %% For v2 we want to limit the number of messages read at once to lower %% the memory footprint. We use the consume rate to determine how many %% messages we read. - DeltaSeqLimit = case Version of - 1 -> DeltaSeqIdEnd; - 2 -> DeltaSeqId + MemoryLimit - end, + DeltaSeqLimit = DeltaSeqId + MemoryLimit, DeltaSeqId1 = - lists:min([IndexMod:next_segment_boundary(DeltaSeqId), + lists:min([rabbit_classic_queue_index_v2:next_segment_boundary(DeltaSeqId), DeltaSeqLimit, DeltaSeqIdEnd]), - {List0, IndexState1} = IndexMod:read(DeltaSeqId, DeltaSeqId1, IndexState), + {List0, IndexState1} = rabbit_classic_queue_index_v2:read(DeltaSeqId, DeltaSeqId1, IndexState), {List, StoreState3, MCStateP3, MCStateT3} = case WhatToRead of messages -> %% We try to read messages from disk all at once instead of @@ -2743,10 +2413,9 @@ merge_sh_read_msgs(MTail, _Reads) -> MTail. %% Flushes queue index batch caches and updates queue index state. -ui(#vqstate{index_mod = IndexMod, - index_state = IndexState, +ui(#vqstate{index_state = IndexState, target_ram_count = TargetRamCount} = State) -> - IndexState1 = IndexMod:flush_pre_publish_cache( + IndexState1 = rabbit_classic_queue_index_v2:flush_pre_publish_cache( TargetRamCount, IndexState), State#vqstate{index_state = IndexState1}. diff --git a/deps/rabbit/src/rabbit_version.erl b/deps/rabbit/src/rabbit_version.erl index 51bc3948b30c..0dcbde42c497 100644 --- a/deps/rabbit/src/rabbit_version.erl +++ b/deps/rabbit/src/rabbit_version.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_version). diff --git a/deps/rabbit/src/rabbit_vhost.erl b/deps/rabbit/src/rabbit_vhost.erl index b7ad5f2bbb80..0f3da8fdd14c 100644 --- a/deps/rabbit/src/rabbit_vhost.erl +++ b/deps/rabbit/src/rabbit_vhost.erl @@ -2,12 +2,11 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_vhost). --include_lib("kernel/include/logger.hrl"). -include_lib("rabbit_common/include/rabbit.hrl"). -include("vhost.hrl"). @@ -19,11 +18,11 @@ -export([update_metadata/3]). -export([lookup/1, default_name/0]). -export([info/1, info/2, info_all/0, info_all/1, info_all/2, info_all/3]). --export([dir/1, msg_store_dir_path/1, msg_store_dir_wildcard/0, config_file_path/1, ensure_config_file/1]). +-export([dir/1, msg_store_dir_path/1, msg_store_dir_wildcard/0, msg_store_dir_base/0, config_file_path/1, ensure_config_file/1]). -export([delete_storage/1]). -export([vhost_down/1]). --export([put_vhost/5, - put_vhost/6]). +-export([put_vhost/6]). +-export([default_queue_type/1, default_queue_type/2]). %% %% API @@ -37,8 +36,6 @@ recover() -> %% faster than other nodes handled DOWN messages from us. rabbit_amqqueue:on_node_down(node()), - rabbit_amqqueue:warn_file_limit(), - %% Prepare rabbit_semi_durable_route table {Time, _} = timer:tc(fun() -> rabbit_binding:recover() @@ -69,8 +66,6 @@ recover(VHost) -> rabbit_log:debug("rabbit_binding:recover/2 for vhost ~ts completed in ~fs", [VHost, Time/1000000]), ok = rabbit_amqqueue:start(Recovered), - %% Start queue mirrors. - ok = rabbit_mirror_queue_misc:on_vhost_up(VHost), ok. ensure_config_file(VHost) -> @@ -171,6 +166,7 @@ do_add(Name, Metadata, ActingUser) -> case Metadata of #{default_queue_type := DQT} -> %% check that the queue type is known + rabbit_log:debug("Default queue type of virtual host '~ts' is ~tp", [Name, DQT]), try rabbit_queue_type:discover(DQT) of _ -> case rabbit_queue_type:feature_flag_name(DQT) of @@ -182,7 +178,7 @@ do_add(Name, Metadata, ActingUser) -> end end catch _:_ -> - throw({error, invalid_queue_type}) + throw({error, invalid_queue_type, DQT}) end; _ -> ok @@ -200,38 +196,62 @@ do_add(Name, Metadata, ActingUser) -> {NewOrNot, VHost} = rabbit_db_vhost:create_or_get(Name, DefaultLimits, Metadata), case NewOrNot of new -> - rabbit_log:info("Inserted a virtual host record ~tp", [VHost]); + rabbit_log:debug("Inserted a virtual host record ~tp", [VHost]); existing -> ok end, rabbit_db_vhost_defaults:apply(Name, ActingUser), - _ = [begin - Resource = rabbit_misc:r(Name, exchange, ExchangeName), - rabbit_log:debug("Will declare an exchange ~tp", [Resource]), - _ = rabbit_exchange:declare(Resource, Type, true, false, Internal, [], ActingUser) - end || {ExchangeName, Type, Internal} <- - [{<<"">>, direct, false}, - {<<"amq.direct">>, direct, false}, - {<<"amq.topic">>, topic, false}, - %% per 0-9-1 pdf - {<<"amq.match">>, headers, false}, - %% per 0-9-1 xml - {<<"amq.headers">>, headers, false}, - {<<"amq.fanout">>, fanout, false}, - {<<"amq.rabbitmq.trace">>, topic, true}]], - case rabbit_vhost_sup_sup:start_on_all_nodes(Name) of + case declare_default_exchanges(Name, ActingUser) of ok -> - rabbit_event:notify(vhost_created, info(VHost) - ++ [{user_who_performed_action, ActingUser}, - {description, Description}, - {tags, Tags}]), - ok; - {error, Reason} -> - Msg = rabbit_misc:format("failed to set up vhost '~ts': ~tp", - [Name, Reason]), + case rabbit_vhost_sup_sup:start_on_all_nodes(Name) of + ok -> + rabbit_event:notify(vhost_created, info(VHost) + ++ [{user_who_performed_action, ActingUser}, + {description, Description}, + {tags, Tags}]), + ok; + {error, Reason} -> + Msg = rabbit_misc:format("failed to set up vhost '~ts': ~tp", + [Name, Reason]), + {error, Msg} + end; + {error, timeout} -> + Msg = rabbit_misc:format( + "failed to set up vhost '~ts' because a timeout occurred " + "while adding default exchanges", + [Name]), {error, Msg} end. +-spec declare_default_exchanges(VHostName, ActingUser) -> Ret when + VHostName :: vhost:name(), + ActingUser :: rabbit_types:username(), + Ret :: ok | {error, timeout}. + +declare_default_exchanges(VHostName, ActingUser) -> + DefaultExchanges = [{<<"">>, direct, false}, + {<<"amq.direct">>, direct, false}, + {<<"amq.topic">>, topic, false}, + %% per 0-9-1 pdf + {<<"amq.match">>, headers, false}, + %% per 0-9-1 xml + {<<"amq.headers">>, headers, false}, + {<<"amq.fanout">>, fanout, false}, + {<<"amq.rabbitmq.trace">>, topic, true}], + rabbit_misc:for_each_while_ok( + fun({ExchangeName, Type, Internal}) -> + Resource = rabbit_misc:r(VHostName, exchange, ExchangeName), + rabbit_log:debug("Will declare an exchange ~tp", [Resource]), + case rabbit_exchange:declare( + Resource, Type, true, false, Internal, [], + ActingUser) of + {ok, _} -> + ok; + {error, timeout} = Err -> + Err + end + end, DefaultExchanges). + -spec update_metadata(vhost:name(), vhost:metadata(), rabbit_types:username()) -> rabbit_types:ok_or_error(any()). update_metadata(Name, Metadata0, ActingUser) -> Metadata = maps:with([description, tags, default_queue_type], Metadata0), @@ -254,11 +274,10 @@ update_metadata(Name, Metadata0, ActingUser) -> -spec update(vhost:name(), binary(), [atom()], rabbit_queue_type:queue_type() | 'undefined', rabbit_types:username()) -> rabbit_types:ok_or_error(any()). update(Name, Description, Tags, DefaultQueueType, ActingUser) -> - Metadata = #{description => Description, tags => Tags, default_queue_type => DefaultQueueType}, + Metadata = vhost:new_metadata(Description, Tags, DefaultQueueType), update_metadata(Name, Metadata, ActingUser). -spec delete(vhost:name(), rabbit_types:username()) -> rabbit_types:ok_or_error(any()). - delete(VHost, ActingUser) -> %% FIXME: We are forced to delete the queues and exchanges outside %% the TX below. Queue deletion involves sending messages to the queue @@ -271,45 +290,40 @@ delete(VHost, ActingUser) -> %% modules, like `rabbit_amqqueue:delete_all_for_vhost(VHost)'. These new %% calls would be responsible for the atomicity, not this code. %% Clear the permissions first to prohibit new incoming connections when deleting a vhost - _ = rabbit_auth_backend_internal:clear_permissions_for_vhost(VHost, ActingUser), - _ = rabbit_auth_backend_internal:clear_topic_permissions_for_vhost(VHost, ActingUser), + rabbit_log:info("Clearing permissions in vhost '~ts' because it's being deleted", [VHost]), + ok = rabbit_auth_backend_internal:clear_all_permissions_for_vhost(VHost, ActingUser), + rabbit_log:info("Deleting queues in vhost '~ts' because it's being deleted", [VHost]), QDelFun = fun (Q) -> rabbit_amqqueue:delete(Q, false, false, ActingUser) end, [begin Name = amqqueue:get_name(Q), assert_benign(rabbit_amqqueue:with(Name, QDelFun), ActingUser) end || Q <- rabbit_amqqueue:list(VHost)], - [assert_benign(rabbit_exchange:delete(Name, false, ActingUser), ActingUser) || + rabbit_log:info("Deleting exchanges in vhost '~ts' because it's being deleted", [VHost]), + [ok = rabbit_exchange:ensure_deleted(Name, false, ActingUser) || #exchange{name = Name} <- rabbit_exchange:list(VHost)], + rabbit_log:info("Clearing policies and runtime parameters in vhost '~ts' because it's being deleted", [VHost]), _ = rabbit_runtime_parameters:clear_vhost(VHost, ActingUser), - _ = [rabbit_policy:delete(VHost, proplists:get_value(name, Info), ActingUser) - || Info <- rabbit_policy:list(VHost)], - case rabbit_db_vhost:delete(VHost) of - true -> - ok = rabbit_event:notify( - vhost_deleted, - [{name, VHost}, - {user_who_performed_action, ActingUser}]); - false -> - ok - end, + rabbit_log:debug("Removing vhost '~ts' from the metadata storage because it's being deleted", [VHost]), + Ret = case rabbit_db_vhost:delete(VHost) of + true -> + ok = rabbit_event:notify( + vhost_deleted, + [{name, VHost}, + {user_who_performed_action, ActingUser}]); + false -> + {error, {no_such_vhost, VHost}}; + {error, _} = Err -> + Err + end, %% After vhost was deleted from the database, we try to stop vhost %% supervisors on all the nodes. rabbit_vhost_sup_sup:delete_on_all_nodes(VHost), - ok. - --spec put_vhost(vhost:name(), - binary(), - vhost:tags(), - boolean(), - rabbit_types:username()) -> - 'ok' | {'error', any()} | {'EXIT', any()}. -put_vhost(Name, Description, Tags0, Trace, Username) -> - put_vhost(Name, Description, Tags0, undefined, Trace, Username). + Ret. -spec put_vhost(vhost:name(), binary(), vhost:unparsed_tags() | vhost:tags(), - rabbit_queue_type:queue_type() | 'undefined', + rabbit_queue_type:queue_type() | 'undefined' | binary(), boolean(), rabbit_types:username()) -> 'ok' | {'error', any()} | {'EXIT', any()}. @@ -322,21 +336,13 @@ put_vhost(Name, Description, Tags0, DefaultQueueType, Trace, Username) -> Other -> Other end, ParsedTags = parse_tags(Tags), - rabbit_log:debug("Parsed tags ~tp to ~tp", [Tags, ParsedTags]), + rabbit_log:debug("Parsed virtual host tags ~tp to ~tp", [Tags, ParsedTags]), Result = case exists(Name) of true -> update(Name, Description, ParsedTags, DefaultQueueType, Username); false -> - Metadata0 = #{description => Description, - tags => ParsedTags}, - Metadata = case DefaultQueueType of - undefined -> - Metadata0; - _ -> - Metadata0#{default_queue_type => - DefaultQueueType} - end, - case add(Name, Metadata, Username) of + Metadata = vhost:new_metadata(Description, ParsedTags, DefaultQueueType), + case catch do_add(Name, Metadata, Username) of ok -> %% wait for up to 45 seconds for the vhost to initialise %% on all nodes @@ -374,6 +380,7 @@ is_over_vhost_limit(Name, Limit) when is_integer(Limit) -> ErrorMsg = rabbit_misc:format("cannot create vhost '~ts': " "vhost limit of ~tp is reached", [Name, Limit]), + rabbit_log:error(ErrorMsg), exit({vhost_limit_exceeded, ErrorMsg}) end. @@ -432,6 +439,7 @@ vhost_cluster_state(VHost) -> Nodes). vhost_down(VHost) -> + rabbit_log:info("Virtual host '~ts' is stopping", [VHost]), ok = rabbit_event:notify(vhost_down, [{name, VHost}, {node, node()}, @@ -499,6 +507,22 @@ default_name() -> undefined -> <<"/">> end. +-spec default_queue_type(VirtualHost :: vhost:name()) -> rabbit_queue_type:queue_type(). +default_queue_type(VirtualHost) -> + default_queue_type(VirtualHost, rabbit_queue_type:fallback()). +-spec default_queue_type(VirtualHost :: vhost:name(), Fallback :: rabbit_queue_type:queue_type()) -> rabbit_queue_type:queue_type(). +default_queue_type(VirtualHost, FallbackQueueType) -> + case exists(VirtualHost) of + false -> FallbackQueueType; + true -> + Record = lookup(VirtualHost), + case vhost:get_default_queue_type(Record) of + undefined -> FallbackQueueType; + <<"undefined">> -> FallbackQueueType; + Type -> Type + end +end. + -spec lookup(vhost:name()) -> vhost:vhost() | rabbit_types:ok_or_error(any()). lookup(VHostName) -> case rabbit_db_vhost:get(VHostName) of diff --git a/deps/rabbit/src/rabbit_vhost_limit.erl b/deps/rabbit/src/rabbit_vhost_limit.erl index e28accf6a21c..9429de2f9018 100644 --- a/deps/rabbit/src/rabbit_vhost_limit.erl +++ b/deps/rabbit/src/rabbit_vhost_limit.erl @@ -2,15 +2,13 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_vhost_limit). -behaviour(rabbit_runtime_parameter). --include_lib("rabbit_common/include/rabbit.hrl"). - -export([register/0]). -export([parse_set/3, set/3, clear/2]). -export([list/0, list/1]). @@ -39,12 +37,14 @@ validate(_VHost, <<"vhost-limits">>, Name, Term, _User) -> notify(VHost, <<"vhost-limits">>, <<"limits">>, Limits, ActingUser) -> rabbit_event:notify(vhost_limits_set, [{name, <<"limits">>}, + {vhost, VHost}, {user_who_performed_action, ActingUser} | Limits]), update_vhost(VHost, Limits). notify_clear(VHost, <<"vhost-limits">>, <<"limits">>, ActingUser) -> rabbit_event:notify(vhost_limits_cleared, [{name, <<"limits">>}, + {vhost, VHost}, {user_who_performed_action, ActingUser}]), %% If the function is called as a part of vhost deletion, the vhost can %% be already deleted. @@ -84,7 +84,7 @@ list(VHost) -> -spec is_over_connection_limit(vhost:name()) -> {true, non_neg_integer()} | false. is_over_connection_limit(VirtualHost) -> - case rabbit_vhost_limit:connection_limit(VirtualHost) of + case connection_limit(VirtualHost) of %% no limit configured undefined -> false; %% with limit = 0, no connections are allowed diff --git a/deps/rabbit/src/rabbit_vhost_msg_store.erl b/deps/rabbit/src/rabbit_vhost_msg_store.erl index 0cc0b4c56b6b..d56743d4802a 100644 --- a/deps/rabbit/src/rabbit_vhost_msg_store.erl +++ b/deps/rabbit/src/rabbit_vhost_msg_store.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_vhost_msg_store). diff --git a/deps/rabbit/src/rabbit_vhost_process.erl b/deps/rabbit/src/rabbit_vhost_process.erl index 9e6419491d08..730425259b88 100644 --- a/deps/rabbit/src/rabbit_vhost_process.erl +++ b/deps/rabbit/src/rabbit_vhost_process.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2017-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% %% This module implements a vhost identity process. @@ -21,8 +21,6 @@ -module(rabbit_vhost_process). --include_lib("rabbit_common/include/rabbit.hrl"). - -define(VHOST_CHECK_INTERVAL, 5000). -behaviour(gen_server2). diff --git a/deps/rabbit/src/rabbit_vhost_sup.erl b/deps/rabbit/src/rabbit_vhost_sup.erl index a9843aea14e2..9fcf79bffae3 100644 --- a/deps/rabbit/src/rabbit_vhost_sup.erl +++ b/deps/rabbit/src/rabbit_vhost_sup.erl @@ -2,13 +2,11 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2017-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_vhost_sup). --include_lib("rabbit_common/include/rabbit.hrl"). - %% Each vhost gets an instance of this supervisor that supervises %% message stores and queues (via rabbit_amqqueue_sup_sup). -behaviour(supervisor). diff --git a/deps/rabbit/src/rabbit_vhost_sup_sup.erl b/deps/rabbit/src/rabbit_vhost_sup_sup.erl index 706004efc824..2b9bf8e7be67 100644 --- a/deps/rabbit/src/rabbit_vhost_sup_sup.erl +++ b/deps/rabbit/src/rabbit_vhost_sup_sup.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_vhost_sup_sup). @@ -22,7 +22,7 @@ save_vhost_recovery_terms/2, lookup_vhost_sup_record/1, lookup_vhost_recovery_terms/1]). --export([delete_on_all_nodes/1, start_on_all_nodes/1]). +-export([delete_on_all_nodes/1, start_on_all_nodes/1, start_on_all_nodes/2]). -export([is_vhost_alive/1]). -export([check/0]). @@ -54,16 +54,19 @@ init([]) -> [rabbit_vhost_sup_wrapper, rabbit_vhost_sup]}]}}. start_on_all_nodes(VHost) -> - %% Do not try to start a vhost on booting peer nodes - AllBooted = [Node || Node <- rabbit_nodes:list_running()], + %% By default select only fully booted peers + AllBooted = rabbit_nodes:list_running(), Nodes = [node() | AllBooted], + start_on_all_nodes(VHost, Nodes). + +start_on_all_nodes(VHost, Nodes) -> Results = [{Node, start_vhost(VHost, Node)} || Node <- Nodes], Failures = lists:filter(fun - ({_, {ok, _}}) -> false; - ({_, {error, {already_started, _}}}) -> false; - (_) -> true - end, - Results), + ({_, {ok, _}}) -> false; + ({_, {error, {already_started, _}}}) -> false; + (_) -> true + end, + Results), case Failures of [] -> ok; Errors -> {error, {failed_to_start_vhost_on_nodes, Errors}} @@ -75,11 +78,17 @@ delete_on_all_nodes(VHost) -> stop_and_delete_vhost(VHost) -> StopResult = case lookup_vhost_sup_record(VHost) of - not_found -> ok; + not_found -> + rabbit_log:warning("Supervisor for vhost '~ts' not found during deletion procedure", + [VHost]), + ok; #vhost_sup{wrapper_pid = WrapperPid, vhost_sup_pid = VHostSupPid} -> case is_process_alive(WrapperPid) of - false -> ok; + false -> + rabbit_log:info("Supervisor ~tp for vhost '~ts' already stopped", + [VHostSupPid, VHost]), + ok; true -> rabbit_log:info("Stopping vhost supervisor ~tp" " for vhost '~ts'", @@ -278,7 +287,7 @@ check() -> VHosts = rabbit_vhost:list_names(), lists:filter( fun(V) -> - case rabbit_vhost_sup_sup:get_vhost_sup(V) of + case get_vhost_sup(V) of {ok, Sup} -> MsgStores = [Pid || {Name, Pid, _, _} <- supervisor:which_children(Sup), lists:member(Name, [msg_store_persistent, diff --git a/deps/rabbit/src/rabbit_vhost_sup_wrapper.erl b/deps/rabbit/src/rabbit_vhost_sup_wrapper.erl index e35ea6813d16..6248dd677802 100644 --- a/deps/rabbit/src/rabbit_vhost_sup_wrapper.erl +++ b/deps/rabbit/src/rabbit_vhost_sup_wrapper.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2017-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% %% This module is a wrapper around vhost supervisor to diff --git a/deps/rabbit/src/rabbit_vhosts.erl b/deps/rabbit/src/rabbit_vhosts.erl new file mode 100644 index 000000000000..641522d0fc95 --- /dev/null +++ b/deps/rabbit/src/rabbit_vhosts.erl @@ -0,0 +1,166 @@ +%% This Source Code Form is subject to the terms of the Mozilla Public +%% License, v. 2.0. If a copy of the MPL was not distributed with this +%% file, You can obtain one at https://mozilla.org/MPL/2.0/. +%% +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. +%% + +%% This module exists to avoid circular module dependencies between +%% several others virtual hosts-related modules. +-module(rabbit_vhosts). + +-define(PERSISTENT_TERM_COUNTER_KEY, rabbit_vhosts_reconciliation_run_counter). + +%% API + +-export([ + list_names/0, + exists/1, + boot/0, + reconcile/0, + reconcile_once/0, + is_reconciliation_enabled/0, + disable_reconciliation/0, + enable_reconciliation/0, + start_processes_for_all/0, + start_on_all_nodes/2, + on_node_up/1 +]). + +%% Same as rabbit_vhost:exists/1. +-spec exists(vhost:name()) -> boolean(). +exists(VirtualHost) -> + rabbit_db_vhost:exists(VirtualHost). + +%% Same as rabbit_vhost:list_names/0. +-spec list_names() -> [vhost:name()]. +list_names() -> rabbit_db_vhost:list(). + +-spec boot() -> 'ok'. +boot() -> + _ = start_processes_for_all(), + _ = increment_run_counter(), + _ = case is_reconciliation_enabled() of + false -> ok; + true -> maybe_start_timer(reconcile) + end, + ok. + +%% Performs a round of virtual host process reconciliation and sets up a timer to +%% re-run this operation again unless it has been run 10 or more times since cluster boot. +%% See start_processes_for_all/1. +-spec reconcile() -> 'ok'. +reconcile() -> + case is_reconciliation_enabled() of + false -> ok; + true -> + _ = reconcile_once(), + _ = maybe_start_timer(?FUNCTION_NAME), + ok + end. + +%% Performs a round of virtual host process reconciliation but does not schedule any future runs. +%% See start_processes_for_all/1. +-spec reconcile_once() -> 'ok'. +reconcile_once() -> + rabbit_log:debug("Will reconcile virtual host processes on all cluster members..."), + _ = start_processes_for_all(), + _ = increment_run_counter(), + N = get_run_counter(), + rabbit_log:debug("Done with virtual host processes reconciliation (run ~tp)", [N]), + ok. + +-spec on_node_up(Node :: node()) -> 'ok'. +on_node_up(_Node) -> + case is_reconciliation_enabled() of + false -> ok; + true -> + DelayInSeconds = 10, + Delay = DelayInSeconds * 1000, + rabbit_log:debug("Will reschedule virtual host process reconciliation after ~b seconds", [DelayInSeconds]), + _ = timer:apply_after(Delay, ?MODULE, reconcile_once, []), + ok + end. + +-spec is_reconciliation_enabled() -> boolean(). +is_reconciliation_enabled() -> + application:get_env(rabbit, vhost_process_reconciliation_enabled, true). + +-spec enable_reconciliation() -> 'ok'. +enable_reconciliation() -> + %% reset the auto-stop counter + persistent_term:put(?PERSISTENT_TERM_COUNTER_KEY, 0), + application:set_env(rabbit, vhost_process_reconciliation_enabled, true). + +-spec disable_reconciliation() -> 'ok'. +disable_reconciliation() -> + application:set_env(rabbit, vhost_process_reconciliation_enabled, false). + +-spec reconciliation_interval() -> non_neg_integer(). +reconciliation_interval() -> + application:get_env(rabbit, vhost_process_reconciliation_run_interval, 30). + +%% Starts a virtual host process on every specified nodes. +%% Only exists to allow for "virtual host process repair" +%% in clusters where nodes a booted in parallel and seeded +%% (e.g. using definitions) at the same time. +%% +%% In that case, during virtual host insertion into the schema database, +%% some processes predictably won't be started on the yet-to-be-discovered nodes. +-spec start_processes_for_all([node()]) -> 'ok'. +start_processes_for_all(Nodes) -> + Names = list_names(), + N = length(Names), + rabbit_log:debug("Will make sure that processes of ~p virtual hosts are running on all reachable cluster nodes", [N]), + [begin + try + start_on_all_nodes(VH, Nodes) + catch + _:Err:_Stacktrace -> + rabbit_log:error("Could not reconcile virtual host ~ts: ~tp", [VH, Err]) + end + end || VH <- Names], + ok. + +-spec start_processes_for_all() -> 'ok'. +start_processes_for_all() -> + start_processes_for_all(rabbit_nodes:list_reachable()). + +%% Same as rabbit_vhost_sup_sup:start_on_all_nodes/0. +-spec start_on_all_nodes(vhost:name(), [node()]) -> 'ok'. +start_on_all_nodes(VirtualHost, Nodes) -> + _ = rabbit_vhost_sup_sup:start_on_all_nodes(VirtualHost, Nodes), + ok. + +%% +%% Implementation +%% + +-spec get_run_counter() -> non_neg_integer(). +get_run_counter() -> + persistent_term:get(?PERSISTENT_TERM_COUNTER_KEY, 0). + +-spec increment_run_counter() -> non_neg_integer(). +increment_run_counter() -> + N = get_run_counter(), + persistent_term:put(?PERSISTENT_TERM_COUNTER_KEY, N + 1), + N. + +-spec maybe_start_timer(atom()) -> ok | {ok, timer:tref()} | {error, any()}. +maybe_start_timer(FunName) -> + N = get_run_counter(), + DelayInSeconds = reconciliation_interval(), + case N >= 10 of + true -> + %% Stop after ten runs + rabbit_log:debug("Will stop virtual host process reconciliation after ~tp runs", [N]), + ok; + false -> + case is_reconciliation_enabled() of + false -> ok; + true -> + Delay = DelayInSeconds * 1000, + rabbit_log:debug("Will reschedule virtual host process reconciliation after ~b seconds", [DelayInSeconds]), + timer:apply_after(Delay, ?MODULE, FunName, []) + end + end. diff --git a/deps/rabbit/src/rabbit_vm.erl b/deps/rabbit/src/rabbit_vm.erl index eb7857f2a5f5..8e4288aa2d0b 100644 --- a/deps/rabbit/src/rabbit_vm.erl +++ b/deps/rabbit/src/rabbit_vm.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_vm). @@ -20,7 +20,7 @@ memory() -> {Sums, _Other} = sum_processes( lists:append(All), distinguishers(), [memory]), - [Qs, QsSlave, Qqs, DlxWorkers, Ssqs, Srqs, SCoor, ConnsReader, ConnsWriter, ConnsChannel, + [Qs, Qqs, DlxWorkers, Ssqs, Srqs, SCoor, ConnsReader, ConnsWriter, ConnsChannel, ConnsOther, MsgIndexProc, MgmtDbProc, Plugins] = [aggregate(Names, Sums, memory, fun (X) -> X end) || Names <- distinguished_interesting_sups()], @@ -36,7 +36,15 @@ memory() -> error:badarg -> 0 end, + MetadataStoreProc = try + [{_, MS}] = process_info(whereis(rabbit_khepri:get_ra_cluster_name()), [memory]), + MS + catch + error:badarg -> + 0 + end, MgmtDbETS = ets_memory([rabbit_mgmt_storage]), + MetadataStoreETS = ets_memory([rabbitmq_metadata]), [{total, ErlangTotal}, {processes, Processes}, {ets, ETS}, @@ -55,9 +63,8 @@ memory() -> OtherProc = Processes - ConnsReader - ConnsWriter - ConnsChannel - ConnsOther - - Qs - QsSlave - Qqs - DlxWorkers - Ssqs - Srqs - SCoor - MsgIndexProc - Plugins - - MgmtDbProc - MetricsProc, - + - Qs - Qqs - DlxWorkers - Ssqs - Srqs - SCoor - MsgIndexProc - Plugins + - MgmtDbProc - MetricsProc - MetadataStoreProc, [ %% Connections {connection_readers, ConnsReader}, @@ -67,7 +74,6 @@ memory() -> %% Queues {queue_procs, Qs}, - {queue_slave_procs, QsSlave}, {quorum_queue_procs, Qqs}, {quorum_queue_dlx_procs, DlxWorkers}, {stream_queue_procs, Ssqs}, @@ -76,6 +82,7 @@ memory() -> %% Processes {plugins, Plugins}, + {metadata_store, MetadataStoreProc}, {other_proc, lists:max([0, OtherProc])}, %% [1] %% Metrics @@ -85,7 +92,8 @@ memory() -> %% ETS {mnesia, MnesiaETS}, {quorum_ets, QuorumETS}, - {other_ets, ETS - MnesiaETS - MetricsETS - MgmtDbETS - MsgIndexETS - QuorumETS}, + {metadata_store_ets, MetadataStoreETS}, + {other_ets, ETS - MnesiaETS - MetricsETS - MgmtDbETS - MsgIndexETS - QuorumETS - MetadataStoreETS}, %% Messages (mostly, some binaries are not messages) {binary, Bin}, @@ -119,25 +127,34 @@ binary() -> sets:add_element({Ptr, Sz}, Acc0) end, Acc, Info) end, distinguishers(), [{binary, sets:new()}]), - [Other, Qs, QsSlave, Qqs, DlxWorkers, Ssqs, Srqs, Scoor, ConnsReader, ConnsWriter, + [Other, Qs, Qqs, DlxWorkers, Ssqs, Srqs, Scoor, ConnsReader, ConnsWriter, ConnsChannel, ConnsOther, MsgIndexProc, MgmtDbProc, Plugins] = [aggregate(Names, [{other, Rest} | Sums], binary, fun sum_binary/1) || Names <- [[other] | distinguished_interesting_sups()]], + MetadataStoreProc = try + [{_, B}] = process_info(whereis(rabbit_khepri:get_ra_cluster_name()), [binary]), + lists:foldl(fun({_, Sz, _}, Acc) -> + Sz + Acc + end, 0, B) + catch + error:badarg -> + 0 + end, [{connection_readers, ConnsReader}, {connection_writers, ConnsWriter}, {connection_channels, ConnsChannel}, {connection_other, ConnsOther}, {queue_procs, Qs}, - {queue_slave_procs, QsSlave}, {quorum_queue_procs, Qqs}, {quorum_queue_dlx_procs, DlxWorkers}, {stream_queue_procs, Ssqs}, {stream_queue_replica_reader_procs, Srqs}, {stream_queue_coordinator_procs, Scoor}, + {metadata_store, MetadataStoreProc}, {plugins, Plugins}, {mgmt_db, MgmtDbProc}, {msg_index, MsgIndexProc}, - {other, Other}]. + {other, Other - MetadataStoreProc}]. %%---------------------------------------------------------------------------- @@ -235,13 +252,11 @@ ranch_server_sups() -> with(Sups, With) -> [{Sup, With} || Sup <- Sups]. -distinguishers() -> with(queue_sups(), fun queue_type/1) ++ - with(conn_sups(), fun conn_type/1). +distinguishers() -> with(conn_sups(), fun conn_type/1). distinguished_interesting_sups() -> [ - with(queue_sups(), master), - with(queue_sups(), slave), + queue_sups(), quorum_sups(), dlx_sups(), stream_server_sups(), @@ -289,12 +304,6 @@ extract(Name, Sums, Key, Fun) -> sum_binary(Set) -> sets:fold(fun({_Pt, Sz}, Acc) -> Acc + Sz end, 0, Set). -queue_type(PDict) -> - case keyfind(process_name, PDict) of - {value, {rabbit_mirror_queue_slave, _}} -> slave; - _ -> master - end. - conn_type(PDict) -> case keyfind(process_name, PDict) of {value, {rabbit_reader, _}} -> reader; diff --git a/deps/rabbit/src/supervised_lifecycle.erl b/deps/rabbit/src/supervised_lifecycle.erl index 1edfb4efe8a6..95387bc5b8b0 100644 --- a/deps/rabbit/src/supervised_lifecycle.erl +++ b/deps/rabbit/src/supervised_lifecycle.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% %% Invoke callbacks on startup and termination. diff --git a/deps/rabbit/src/tcp_listener.erl b/deps/rabbit/src/tcp_listener.erl index 2cd0da032e53..91f102c99062 100644 --- a/deps/rabbit/src/tcp_listener.erl +++ b/deps/rabbit/src/tcp_listener.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(tcp_listener). @@ -103,4 +103,4 @@ obfuscate_state(#state{on_shutdown = OnShutdown} = State) -> on_shutdown = fun(IPAddress, Port) -> apply(M, F, A ++ [IPAddress, Port]) end - }. \ No newline at end of file + }. diff --git a/deps/rabbit/src/tcp_listener_sup.erl b/deps/rabbit/src/tcp_listener_sup.erl index fbb1efa1f264..fc8379419a00 100644 --- a/deps/rabbit/src/tcp_listener_sup.erl +++ b/deps/rabbit/src/tcp_listener_sup.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(tcp_listener_sup). diff --git a/deps/rabbit/src/term_to_binary_compat.erl b/deps/rabbit/src/term_to_binary_compat.erl index 4686658a8e3e..6b844e74f19a 100644 --- a/deps/rabbit/src/term_to_binary_compat.erl +++ b/deps/rabbit/src/term_to_binary_compat.erl @@ -2,13 +2,11 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2017-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(term_to_binary_compat). --include_lib("rabbit_common/include/rabbit.hrl"). - -export([term_to_binary_1/1]). term_to_binary_1(Term) -> diff --git a/deps/rabbit/src/vhost.erl b/deps/rabbit/src/vhost.erl index 635e0d14e4f0..bece0b49c957 100644 --- a/deps/rabbit/src/vhost.erl +++ b/deps/rabbit/src/vhost.erl @@ -2,12 +2,11 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2018-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(vhost). --include_lib("rabbit_common/include/rabbit.hrl"). -include("vhost.hrl"). -export([ @@ -20,6 +19,7 @@ upgrade/1, upgrade_to/2, pattern_match_all/0, + pattern_match_names/0, get_name/1, get_limits/1, get_metadata/1, @@ -29,6 +29,7 @@ set_limits/2, set_metadata/2, merge_metadata/2, + new_metadata/3, is_tagged_with/2 ]). @@ -53,7 +54,7 @@ -record(vhost, { %% name as a binary - virtual_host :: name() | '_', + virtual_host :: name() | '_' | '$1', %% proplist of limits configured, if any limits :: limits() | '_', metadata :: metadata() | '_' @@ -67,7 +68,7 @@ -type vhost_pattern() :: vhost_v2_pattern(). -type vhost_v2_pattern() :: #vhost{ - virtual_host :: name() | '_', + virtual_host :: name() | '_' | '$1', limits :: '_', metadata :: '_' }. @@ -128,6 +129,10 @@ info_keys() -> pattern_match_all() -> #vhost{_ = '_'}. +-spec pattern_match_names() -> vhost_pattern(). +pattern_match_names() -> + #vhost{virtual_host = '$1', _ = '_'}. + -spec get_name(vhost()) -> name(). get_name(#vhost{virtual_host = Value}) -> Value. @@ -159,11 +164,35 @@ set_metadata(VHost, Value) -> VHost#vhost{metadata = Value}. -spec merge_metadata(vhost(), metadata()) -> vhost(). -merge_metadata(VHost, Value) -> - Meta0 = get_metadata(VHost), - NewMeta = maps:merge(Meta0, Value), - VHost#vhost{metadata = NewMeta}. - --spec is_tagged_with(vhost:vhost(), tag()) -> boolean(). +merge_metadata(VHost, NewVHostMeta) -> + CurrentVHostMeta = get_metadata(VHost), + FinalMeta = maps:merge_with( + fun metadata_merger/3, CurrentVHostMeta, NewVHostMeta), + VHost#vhost{metadata = FinalMeta}. + +%% This is the case where the existing VHost metadata has a default queue type +%% value and the proposed value is `undefined`. We do not want the proposed +%% value to overwrite the current value +metadata_merger(default_queue_type, CurrentDefaultQueueType, undefined) -> + CurrentDefaultQueueType; +%% This is the case where the existing VHost metadata has any default queue +%% type value, and the proposed value is NOT `undefined`. It is OK for any +%% proposed value to be used. +metadata_merger(default_queue_type, _, NewVHostDefaultQueueType) -> + NewVHostDefaultQueueType; +%% This is the case for all other VHost metadata keys. +metadata_merger(_, _, NewMetadataValue) -> + NewMetadataValue. + +-spec new_metadata(binary(), [atom()], rabbit_queue_type:queue_type() | 'undefined') -> metadata(). +new_metadata(Description, Tags, undefined) -> + #{description => Description, + tags => Tags}; +new_metadata(Description, Tags, DefaultQueueType) -> + #{description => Description, + tags => Tags, + default_queue_type => DefaultQueueType}. + +-spec is_tagged_with(vhost(), tag()) -> boolean(). is_tagged_with(VHost, Tag) -> lists:member(Tag, get_tags(VHost)). diff --git a/deps/rabbit/test/amqp_address_SUITE.erl b/deps/rabbit/test/amqp_address_SUITE.erl new file mode 100644 index 000000000000..eaa0ffaf0b3d --- /dev/null +++ b/deps/rabbit/test/amqp_address_SUITE.erl @@ -0,0 +1,644 @@ +%% This Source Code Form is subject to the terms of the Mozilla Public +%% License, v. 2.0. If a copy of the MPL was not distributed with this +%% file, You can obtain one at https://mozilla.org/MPL/2.0/. +%% +%% Copyright (c) 2007-2024 VMware, Inc. or its affiliates. All rights reserved. + +-module(amqp_address_SUITE). + +-compile([export_all, + nowarn_export_all]). + +-include_lib("common_test/include/ct.hrl"). +-include_lib("eunit/include/eunit.hrl"). +-include_lib("amqp10_common/include/amqp10_framing.hrl"). +-include_lib("rabbitmq_amqp_client/include/rabbitmq_amqp_client.hrl"). + +-import(rabbit_ct_broker_helpers, + [rpc/4]). +-import(rabbit_ct_helpers, + [eventually/1]). + +all() -> + [ + {group, v1_permitted}, + {group, v1_denied} + ]. + +groups() -> + [ + {v1_permitted, [shuffle], + common_tests() + }, + {v1_denied, [shuffle], + [ + target_queue_absent, + source_queue_absent, + target_bad_v2_address, + source_bad_v2_address + ] ++ common_tests() + } + ]. + +common_tests() -> + [ + target_exchange_routing_key, + target_exchange_routing_key_with_slash, + target_exchange_routing_key_empty, + target_exchange, + target_exchange_absent, + queue, + queue_with_slash, + target_per_message_exchange_routing_key, + target_per_message_exchange, + target_per_message_queue, + target_per_message_unset_to_address, + target_per_message_bad_to_address, + target_per_message_exchange_absent, + target_bad_address, + source_bad_address + ]. + +init_per_suite(Config) -> + {ok, _} = application:ensure_all_started(amqp10_client), + rabbit_ct_helpers:log_environment(), + Config. + +end_per_suite(Config) -> + Config. + +init_per_group(Group, Config0) -> + PermitV1 = case Group of + v1_permitted -> true; + v1_denied -> false + end, + Config = rabbit_ct_helpers:merge_app_env( + Config0, + {rabbit, + [{permit_deprecated_features, + #{amqp_address_v1 => PermitV1} + }] + }), + rabbit_ct_helpers:run_setup_steps( + Config, + rabbit_ct_broker_helpers:setup_steps() ++ + rabbit_ct_client_helpers:setup_steps()). + +end_per_group(_Group, Config) -> + rabbit_ct_helpers:run_teardown_steps( + Config, + rabbit_ct_client_helpers:teardown_steps() ++ + rabbit_ct_broker_helpers:teardown_steps()). + +init_per_testcase(Testcase, Config) -> + rabbit_ct_helpers:testcase_started(Config, Testcase). + +end_per_testcase(Testcase, Config) -> + rabbit_ct_helpers:testcase_finished(Config, Testcase). + +%% Test v2 target address +%% /exchanges/:exchange/:routing-key +target_exchange_routing_key(Config) -> + XName = <<"👉"/utf8>>, + RKey = <<"🗝️"/utf8>>, + target_exchange_routing_key0(XName, RKey, Config). + +%% Test v2 target address +%% /exchanges/:exchange/:routing-key +%% where both :exchange and :routing-key contains a "/" character. +target_exchange_routing_key_with_slash(Config) -> + XName = <<"my/exchange">>, + RKey = <<"my/key">>, + target_exchange_routing_key0(XName, RKey, Config). + +target_exchange_routing_key0(XName, RKey, Config) -> + TargetAddr = rabbitmq_amqp_address:exchange(XName, RKey), + QName = atom_to_binary(?FUNCTION_NAME), + + Init = {_, LinkPair = #link_pair{session = Session}} = init(Config), + ok = rabbitmq_amqp_client:declare_exchange(LinkPair, XName, #{}), + {ok, _} = rabbitmq_amqp_client:declare_queue(LinkPair, QName, #{}), + ok = rabbitmq_amqp_client:bind_queue(LinkPair, QName, XName, RKey, #{}), + SrcAddr = rabbitmq_amqp_address:queue(QName), + {ok, Receiver} = amqp10_client:attach_receiver_link(Session, <<"receiver">>, SrcAddr), + + {ok, Sender} = amqp10_client:attach_sender_link(Session, <<"sender">>, TargetAddr), + ok = wait_for_credit(Sender), + Body = <<"body">>, + Msg0 = amqp10_msg:new(<<"tag">>, Body, true), + %% Although mc_amqp:essential_properties/1 parses these annotations, they should be ignored. + Msg1 = amqp10_msg:set_message_annotations( + #{<<"x-exchange">> => <<"ignored">>, + <<"x-routing-key">> => <<"ignored">>}, + Msg0), + ok = amqp10_client:send_msg(Sender, Msg1), + + {ok, Msg} = amqp10_client:get_msg(Receiver), + ?assertEqual([Body], amqp10_msg:body(Msg)), + + {ok, _} = rabbitmq_amqp_client:delete_queue(LinkPair, QName), + ok = rabbitmq_amqp_client:delete_exchange(LinkPair, XName), + ok = cleanup(Init). + +%% Test v2 target address +%% /exchanges/:exchange/ +%% Routing key is empty. +target_exchange_routing_key_empty(Config) -> + XName = <<"amq.fanout">>, + TargetAddr = rabbitmq_amqp_address:exchange(XName, <<>>), + QName = atom_to_binary(?FUNCTION_NAME), + + Init = {_, LinkPair = #link_pair{session = Session}} = init(Config), + {ok, _} = rabbitmq_amqp_client:declare_queue(LinkPair, QName, #{}), + ok = rabbitmq_amqp_client:bind_queue(LinkPair, QName, XName, <<"ignored">>, #{}), + SrcAddr = rabbitmq_amqp_address:queue(QName), + {ok, Receiver} = amqp10_client:attach_receiver_link(Session, <<"receiver">>, SrcAddr), + + {ok, Sender} = amqp10_client:attach_sender_link(Session, <<"sender">>, TargetAddr), + ok = wait_for_credit(Sender), + Body = <<"body">>, + Msg0 = amqp10_msg:new(<<"tag">>, Body, true), + ok = amqp10_client:send_msg(Sender, Msg0), + + {ok, Msg} = amqp10_client:get_msg(Receiver), + ?assertEqual([Body], amqp10_msg:body(Msg)), + + {ok, _} = rabbitmq_amqp_client:delete_queue(LinkPair, QName), + ok = cleanup(Init). + +%% Test v2 target address +%% /exchanges/:exchange +%% Routing key is empty. +target_exchange(Config) -> + XName = <<"amq.fanout">>, + TargetAddr = rabbitmq_amqp_address:exchange(XName), + QName = atom_to_binary(?FUNCTION_NAME), + + Init = {_, LinkPair = #link_pair{session = Session}} = init(Config), + {ok, _} = rabbitmq_amqp_client:declare_queue(LinkPair, QName, #{}), + ok = rabbitmq_amqp_client:bind_queue(LinkPair, QName, XName, <<"ignored">>, #{}), + SrcAddr = rabbitmq_amqp_address:queue(QName), + {ok, Receiver} = amqp10_client:attach_receiver_link(Session, <<"receiver">>, SrcAddr), + + {ok, Sender} = amqp10_client:attach_sender_link(Session, <<"sender">>, TargetAddr), + ok = wait_for_credit(Sender), + Body = <<"body">>, + Msg0 = amqp10_msg:new(<<"tag">>, Body, true), + ok = amqp10_client:send_msg(Sender, Msg0), + + {ok, Msg} = amqp10_client:get_msg(Receiver), + ?assertEqual([Body], amqp10_msg:body(Msg)), + + {ok, _} = rabbitmq_amqp_client:delete_queue(LinkPair, QName), + ok = cleanup(Init). + +%% Test v2 target address +%% /exchanges/:exchange +%% where the target exchange does not exist. +target_exchange_absent(Config) -> + XName = <<"🎈"/utf8>>, + TargetAddr = rabbitmq_amqp_address:exchange(XName), + + OpnConf = connection_config(Config), + {ok, Connection} = amqp10_client:open_connection(OpnConf), + {ok, Session} = amqp10_client:begin_session_sync(Connection), + + {ok, _Sender} = amqp10_client:attach_sender_link(Session, <<"sender">>, TargetAddr), + receive + {amqp10_event, + {session, Session, + {ended, + #'v1_0.error'{ + condition = ?V_1_0_AMQP_ERROR_NOT_FOUND, + description = {utf8, <<"no exchange '", XName:(byte_size(XName))/binary, + "' in vhost '/'">>}}}}} -> ok + after 5000 -> + Reason = {missing_event, ?LINE}, + flush(Reason), + ct:fail(Reason) + end, + ok = amqp10_client:close_connection(Connection). + +%% Test v2 target and source address +%% /queues/:queue +queue(Config) -> + QName = <<"🎈"/utf8>>, + queue0(QName, Config). + +%% Test v2 target and source address +%% /queues/:queue +%% where :queue contains a "/" character. +queue_with_slash(Config) -> + QName = <<"my/queue">>, + queue0(QName, Config). + +queue0(QName, Config) -> + Addr = rabbitmq_amqp_address:queue(QName), + + Init = {_, LinkPair = #link_pair{session = Session}} = init(Config), + {ok, _} = rabbitmq_amqp_client:declare_queue(LinkPair, QName, #{}), + {ok, Receiver} = amqp10_client:attach_receiver_link(Session, <<"receiver">>, Addr), + + {ok, Sender} = amqp10_client:attach_sender_link(Session, <<"sender">>, Addr), + ok = wait_for_credit(Sender), + Body = <<"body">>, + Msg0 = amqp10_msg:new(<<"tag">>, Body, true), + ok = amqp10_client:send_msg(Sender, Msg0), + + {ok, Msg} = amqp10_client:get_msg(Receiver), + ?assertEqual([Body], amqp10_msg:body(Msg)), + + {ok, _} = rabbitmq_amqp_client:delete_queue(LinkPair, QName), + ok = cleanup(Init). + +%% Test v2 target address +%% /queues/:queue +%% where the target queue does not exist. +target_queue_absent(Config) -> + QName = <<"🎈"/utf8>>, + TargetAddr = rabbitmq_amqp_address:queue(QName), + + OpnConf = connection_config(Config), + {ok, Connection} = amqp10_client:open_connection(OpnConf), + {ok, Session} = amqp10_client:begin_session_sync(Connection), + + {ok, _Sender} = amqp10_client:attach_sender_link(Session, <<"sender">>, TargetAddr), + receive + {amqp10_event, + {session, Session, + {ended, + #'v1_0.error'{ + condition = ?V_1_0_AMQP_ERROR_NOT_FOUND, + description = {utf8, <<"no queue '", QName:(byte_size(QName))/binary, + "' in vhost '/'">>}}}}} -> ok + after 5000 -> + Reason = {missing_event, ?LINE}, + flush(Reason), + ct:fail(Reason) + end, + ok = amqp10_client:close_connection(Connection). + +%% Test v2 target address 'null' and 'to' +%% /exchanges/:exchange/:routing-key +%% with varying routing keys. +target_per_message_exchange_routing_key(Config) -> + QName = atom_to_binary(?FUNCTION_NAME), + DirectX = <<"amq.direct">>, + RKey1 = <<"🗝️1"/utf8>>, + RKey2 = <<"🗝️2"/utf8>>, + To1 = rabbitmq_amqp_address:exchange(DirectX, RKey1), + To2 = rabbitmq_amqp_address:exchange(DirectX, RKey2), + + Init = {_, LinkPair = #link_pair{session = Session}} = init(Config), + {ok, _} = rabbitmq_amqp_client:declare_queue(LinkPair, QName, #{}), + ok = rabbitmq_amqp_client:bind_queue(LinkPair, QName, DirectX, RKey1, #{}), + ok = rabbitmq_amqp_client:bind_queue(LinkPair, QName, DirectX, RKey2, #{}), + + {ok, Sender} = amqp10_client:attach_sender_link(Session, <<"sender">>, null), + ok = wait_for_credit(Sender), + + Tag1 = Body1 = <<1>>, + Tag2 = Body2 = <<2>>, + + %% Although mc_amqp:essential_properties/1 parses these annotations, they should be ignored. + Msg1 = amqp10_msg:set_message_annotations( + #{<<"x-exchange">> => <<"ignored">>, + <<"x-routing-key">> => <<"ignored">>}, + amqp10_msg:set_properties(#{to => To1}, amqp10_msg:new(Tag1, Body1))), + Msg2 = amqp10_msg:set_properties(#{to => To2}, amqp10_msg:new(Tag2, Body2)), + ok = amqp10_client:send_msg(Sender, Msg1), + ok = amqp10_client:send_msg(Sender, Msg2), + ok = wait_for_settled(accepted, Tag1), + ok = wait_for_settled(accepted, Tag2), + + {ok, #{message_count := 2}} = rabbitmq_amqp_client:delete_queue(LinkPair, QName), + ok = cleanup(Init). + +%% Test v2 target address 'null' and 'to' +%% /exchanges/:exchange +%% with varying exchanges. +target_per_message_exchange(Config) -> + XFanout = <<"amq.fanout">>, + XHeaders = <<"amq.headers">>, + To1 = rabbitmq_amqp_address:exchange(XFanout), + To2 = rabbitmq_amqp_address:exchange(XHeaders), + QName = atom_to_binary(?FUNCTION_NAME), + + Init = {_, LinkPair = #link_pair{session = Session}} = init(Config), + {ok, _} = rabbitmq_amqp_client:declare_queue(LinkPair, QName, #{}), + ok = rabbitmq_amqp_client:bind_queue(LinkPair, QName, XFanout, <<>>, #{}), + ok = rabbitmq_amqp_client:bind_queue(LinkPair, QName, XHeaders, <<>>, + #{<<"my key">> => true, + <<"x-match">> => {utf8, <<"any">>}}), + + {ok, Sender} = amqp10_client:attach_sender_link(Session, <<"sender">>, null), + ok = wait_for_credit(Sender), + + Tag1 = Body1 = <<1>>, + Tag2 = Body2 = <<2>>, + Msg1 = amqp10_msg:set_properties(#{to => To1}, amqp10_msg:new(Tag1, Body1)), + Msg2 = amqp10_msg:set_application_properties( + #{<<"my key">> => true}, + amqp10_msg:set_properties(#{to => To2}, amqp10_msg:new(Tag2, Body2))), + ok = amqp10_client:send_msg(Sender, Msg1), + ok = amqp10_client:send_msg(Sender, Msg2), + ok = wait_for_settled(accepted, Tag1), + ok = wait_for_settled(accepted, Tag2), + + {ok, #{message_count := 2}} = rabbitmq_amqp_client:delete_queue(LinkPair, QName), + ok = cleanup(Init). + +%% Test v2 target address 'null' and 'to' +%% /queues/:queue +target_per_message_queue(Config) -> + Q1 = <<"q1">>, + Q2 = <<"q2">>, + Q3 = <<"q3">>, + To1 = rabbitmq_amqp_address:queue(Q1), + To2 = rabbitmq_amqp_address:queue(Q2), + To3 = rabbitmq_amqp_address:queue(Q3), + + Init = {_, LinkPair = #link_pair{session = Session}} = init(Config), + {ok, _} = rabbitmq_amqp_client:declare_queue(LinkPair, Q1, #{}), + {ok, _} = rabbitmq_amqp_client:declare_queue(LinkPair, Q2, #{}), + + {ok, Sender} = amqp10_client:attach_sender_link(Session, <<"sender">>, null), + ok = wait_for_credit(Sender), + + Tag1 = Body1 = <<1>>, + Tag2 = Body2 = <<2>>, + Tag3 = Body3 = <<3>>, + Msg1 = amqp10_msg:set_properties(#{to => To1}, amqp10_msg:new(Tag1, Body1)), + Msg2 = amqp10_msg:set_properties(#{to => To2}, amqp10_msg:new(Tag2, Body2)), + Msg3 = amqp10_msg:set_properties(#{to => To3}, amqp10_msg:new(Tag3, Body3)), + ok = amqp10_client:send_msg(Sender, Msg1), + ok = amqp10_client:send_msg(Sender, Msg2), + ok = amqp10_client:send_msg(Sender, Msg3), + ok = wait_for_settled(accepted, Tag1), + ok = wait_for_settled(accepted, Tag2), + ok = wait_for_settled(released, Tag3), + + {ok, #{message_count := 1}} = rabbitmq_amqp_client:delete_queue(LinkPair, Q1), + {ok, #{message_count := 1}} = rabbitmq_amqp_client:delete_queue(LinkPair, Q2), + ok = cleanup(Init). + +%% Test v2 target address 'null', but 'to' not set. +target_per_message_unset_to_address(Config) -> + OpnConf = connection_config(Config), + {ok, Connection} = amqp10_client:open_connection(OpnConf), + {ok, Session} = amqp10_client:begin_session_sync(Connection), + {ok, Sender} = amqp10_client:attach_sender_link(Session, <<"sender">>, null), + ok = wait_for_credit(Sender), + + %% Send message with 'to' unset. + DTag = <<1>>, + ok = amqp10_client:send_msg(Sender, amqp10_msg:new(DTag, <<0>>)), + ok = wait_for_settled(released, DTag), + receive {amqp10_event, + {link, Sender, + {detached, + #'v1_0.error'{ + condition = ?V_1_0_AMQP_ERROR_PRECONDITION_FAILED, + description = {utf8, <<"anonymous terminus requires 'to' address to be set">>}}}}} -> ok + after 5000 -> ct:fail("server did not close our outgoing link") + end, + + ok = amqp10_client:end_session(Session), + ok = amqp10_client:close_connection(Connection). + +bad_v2_addresses() -> + [ + %% valid v1, but bad v2 target addresses + <<"/topic/mytopic">>, + <<"/amq/queue/myqueue">>, + <<"myqueue">>, + <<"/queue">>, + %% bad v2 target addresses + <<>>, + <<0>>, + <<"/">>, + <<"//">>, + <<"/queues">>, + <<"/queues/">>, + <<"/queue/">>, + <<"/exchanges">>, + %% default exchange in v2 target address is disallowed + <<"/exchanges/">>, + <<"/exchanges//">>, + <<"/exchanges//mykey">>, + <<"/exchanges/amq.default">>, + <<"/exchanges/amq.default/">>, + <<"/exchanges/amq.default/mykey">>, + <<"/ex/✋"/utf8>>, + <<"/exchange">>, + <<"/exchange/">>, + <<"/exchange/amq.default">>, + <<"/exchange//key/">>, + <<"/exchange//key/mykey">>, + <<"/exchange/amq.default/key/">>, + <<"/exchange/amq.default/key/mykey">>, + %% The following addresses should be percent encoded, but aren't. + <<"/queues/missing%encoding">>, + <<"/queues/missing/encoding">>, + <<"/queues/✋"/utf8>>, + <<"/exchanges/missing%encoding">>, + <<"/exchanges/missing/encoding/routingkey">>, + <<"/exchanges/exchange/missing%encoding">>, + <<"/exchanges/✋"/utf8>> + ]. + +%% Test v2 target address 'null' with an invalid 'to' addresses. +target_per_message_bad_to_address(Config) -> + lists:foreach(fun(Addr) -> + ok = target_per_message_bad_to_address0(Addr, Config) + end, bad_v2_addresses()). + +target_per_message_bad_to_address0(Address, Config) -> + OpnConf = connection_config(Config), + {ok, Connection} = amqp10_client:open_connection(OpnConf), + {ok, Session} = amqp10_client:begin_session_sync(Connection), + {ok, Sender} = amqp10_client:attach_sender_link(Session, <<"sender">>, null), + ok = wait_for_credit(Sender), + + DTag = <<255>>, + Msg = amqp10_msg:set_properties(#{to => Address}, amqp10_msg:new(DTag, <<0>>)), + ok = amqp10_client:send_msg(Sender, Msg), + ok = wait_for_settled(released, DTag), + receive {amqp10_event, + {link, Sender, + {detached, + #'v1_0.error'{ + condition = ?V_1_0_AMQP_ERROR_PRECONDITION_FAILED, + description = {utf8, <<"bad 'to' address", _Rest/binary>>}}}}} -> ok + after 5000 -> ct:fail("server did not close our outgoing link") + end, + + ok = amqp10_client:end_session(Session), + ok = amqp10_client:close_connection(Connection). + +target_per_message_exchange_absent(Config) -> + Init = {_, LinkPair = #link_pair{session = Session}} = init(Config), + XName = <<"🎈"/utf8>>, + Address = rabbitmq_amqp_address:exchange(XName), + ok = rabbitmq_amqp_client:declare_exchange(LinkPair, XName, #{}), + {ok, Sender} = amqp10_client:attach_sender_link(Session, <<"sender">>, null), + ok = wait_for_credit(Sender), + + DTag1 = <<1>>, + Msg1 = amqp10_msg:set_properties(#{to => Address}, amqp10_msg:new(DTag1, <<"m1">>)), + ok = amqp10_client:send_msg(Sender, Msg1), + ok = wait_for_settled(released, DTag1), + + ok = rabbitmq_amqp_client:delete_exchange(LinkPair, XName), + + DTag2 = <<2>>, + Msg2 = amqp10_msg:set_properties(#{to => Address}, amqp10_msg:new(DTag2, <<"m2">>)), + ok = amqp10_client:send_msg(Sender, Msg2), + ok = wait_for_settled(released, DTag2), + receive {amqp10_event, {link, Sender, {detached, Error}}} -> + ?assertEqual( + #'v1_0.error'{ + condition = ?V_1_0_AMQP_ERROR_NOT_FOUND, + description = {utf8, <<"no exchange '", XName/binary, "' in vhost '/'">>}}, + Error) + after 5000 -> ct:fail("server did not close our outgoing link") + end, + + ok = cleanup(Init). + +target_bad_address(Config) -> + %% bad v1 and bad v2 target address + TargetAddr = <<"/qqq/🎈"/utf8>>, + target_bad_address0(TargetAddr, Config). + +target_bad_v2_address(Config) -> + lists:foreach(fun(Addr) -> + ok = target_bad_address0(Addr, Config) + end, bad_v2_addresses()). + +target_bad_address0(TargetAddress, Config) -> + OpnConf = connection_config(Config), + {ok, Connection} = amqp10_client:open_connection(OpnConf), + {ok, Session} = amqp10_client:begin_session_sync(Connection), + + {ok, _Sender} = amqp10_client:attach_sender_link(Session, <<"sender">>, TargetAddress), + receive + {amqp10_event, + {session, Session, + {ended, + #'v1_0.error'{condition = ?V_1_0_AMQP_ERROR_INVALID_FIELD}}}} -> ok + after 5000 -> + Reason = {missing_event, ?LINE, TargetAddress}, + flush(Reason), + ct:fail(Reason) + end, + ok = amqp10_client:close_connection(Connection). + +%% Test v2 source address +%% /queues/:queue +%% where the source queue does not exist. +source_queue_absent(Config) -> + QName = <<"🎈"/utf8>>, + SourceAddr = rabbitmq_amqp_address:queue(QName), + + OpnConf = connection_config(Config), + {ok, Connection} = amqp10_client:open_connection(OpnConf), + {ok, Session} = amqp10_client:begin_session_sync(Connection), + + {ok, _Receiver} = amqp10_client:attach_receiver_link(Session, <<"receiver">>, SourceAddr), + receive + {amqp10_event, + {session, Session, + {ended, + #'v1_0.error'{ + condition = ?V_1_0_AMQP_ERROR_NOT_FOUND, + description = {utf8, <<"no queue '", QName:(byte_size(QName))/binary, + "' in vhost '/'">>}}}}} -> ok + after 5000 -> + Reason = {missing_event, ?LINE}, + flush(Reason), + ct:fail(Reason) + end, + ok = amqp10_client:close_connection(Connection). + +source_bad_address(Config) -> + %% bad v1 and bad v2 source address + SourceAddr = <<"/qqq/🎈"/utf8>>, + source_bad_address0(SourceAddr, Config). + +source_bad_v2_address(Config) -> + %% valid v1, but bad v2 source addresses + SourceAddresses = [<<"/exchange/myroutingkey">>, + <<"/topic/mytopic">>, + <<"/amq/queue/myqueue">>, + <<"myqueue">>], + lists:foreach(fun(Addr) -> + ok = source_bad_address0(Addr, Config) + end, SourceAddresses). + +source_bad_address0(SourceAddress, Config) -> + OpnConf = connection_config(Config), + {ok, Connection} = amqp10_client:open_connection(OpnConf), + {ok, Session} = amqp10_client:begin_session_sync(Connection), + + {ok, _Receiver} = amqp10_client:attach_receiver_link(Session, <<"sender">>, SourceAddress), + receive + {amqp10_event, + {session, Session, + {ended, + #'v1_0.error'{condition = ?V_1_0_AMQP_ERROR_INVALID_FIELD}}}} -> ok + after 5000 -> + Reason = {missing_event, ?LINE}, + flush(Reason), + ct:fail(Reason) + end, + ok = amqp10_client:close_connection(Connection). + +init(Config) -> + OpnConf = connection_config(Config), + {ok, Connection} = amqp10_client:open_connection(OpnConf), + {ok, Session} = amqp10_client:begin_session_sync(Connection), + {ok, LinkPair} = rabbitmq_amqp_client:attach_management_link_pair_sync(Session, <<"mgmt link pair">>), + {Connection, LinkPair}. + +cleanup({Connection, LinkPair = #link_pair{session = Session}}) -> + ok = rabbitmq_amqp_client:detach_management_link_pair_sync(LinkPair), + ok = amqp10_client:end_session(Session), + ok = amqp10_client:close_connection(Connection). + +connection_config(Config) -> + Host = ?config(rmq_hostname, Config), + Port = rabbit_ct_broker_helpers:get_node_config(Config, 0, tcp_port_amqp), + #{address => Host, + port => Port, + container_id => <<"my container">>, + sasl => {plain, <<"guest">>, <<"guest">>}}. + +% before we can send messages we have to wait for credit from the server +wait_for_credit(Sender) -> + receive + {amqp10_event, {link, Sender, credited}} -> + flush(?FUNCTION_NAME), + ok + after 5000 -> + flush(?FUNCTION_NAME), + ct:fail(?FUNCTION_NAME) + end. + +wait_for_settled(State, Tag) -> + receive + {amqp10_disposition, {State, Tag}} -> + ok + after 5000 -> + Reason = {?FUNCTION_NAME, State, Tag}, + flush(Reason), + ct:fail(Reason) + end. + +flush(Prefix) -> + receive Msg -> + ct:pal("~tp flushed: ~p~n", [Prefix, Msg]), + flush(Prefix) + after 1 -> + ok + end. diff --git a/deps/rabbit/test/amqp_auth_SUITE.erl b/deps/rabbit/test/amqp_auth_SUITE.erl new file mode 100644 index 000000000000..c717cd886d60 --- /dev/null +++ b/deps/rabbit/test/amqp_auth_SUITE.erl @@ -0,0 +1,1110 @@ +%% This Source Code Form is subject to the terms of the Mozilla Public +%% License, v. 2.0. If a copy of the MPL was not distributed with this +%% file, You can obtain one at https://mozilla.org/MPL/2.0/. +%% +%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. + +-module(amqp_auth_SUITE). + +-compile([export_all, + nowarn_export_all]). + +-include_lib("common_test/include/ct.hrl"). +-include_lib("eunit/include/eunit.hrl"). +-include_lib("amqp_client/include/amqp_client.hrl"). +-include_lib("amqp10_common/include/amqp10_framing.hrl"). + +-import(rabbit_ct_broker_helpers, + [rpc/4]). +-import(rabbit_ct_helpers, + [eventually/1]). +-import(event_recorder, + [assert_event_type/2, + assert_event_prop/2]). + +all() -> + [ + {group, address_v1}, + {group, address_v2} + ]. + +groups() -> + [ + {address_v1, [shuffle], + [ + %% authz + v1_attach_target_queue, + v1_attach_source_exchange, + v1_send_to_topic, + v1_send_to_topic_using_subject, + v1_attach_source_topic, + v1_attach_target_internal_exchange, + + %% limits + v1_vhost_queue_limit + ] + }, + {address_v2, [shuffle], + [ + %% authz + attach_source_queue, + attach_target_exchange, + attach_target_topic_exchange, + attach_target_queue, + target_per_message_exchange, + target_per_message_internal_exchange, + target_per_message_topic, + + %% authn + authn_failure_event, + sasl_anonymous_success, + sasl_plain_success, + sasl_anonymous_failure, + sasl_plain_failure, + sasl_none_failure, + vhost_absent, + + %% limits + vhost_connection_limit, + user_connection_limit, + + %% AMQP Management operations against HTTP API v2 + declare_exchange, + delete_exchange, + declare_queue, + declare_queue_dlx_queue, + declare_queue_dlx_exchange, + declare_queue_vhost_queue_limit, + delete_queue, + purge_queue, + bind_queue_source, + bind_queue_destination, + bind_exchange_source, + bind_exchange_destination, + bind_to_topic_exchange, + unbind_queue_source, + unbind_queue_target, + unbind_from_topic_exchange + ] + } + ]. + +init_per_suite(Config) -> + {ok, _} = application:ensure_all_started(amqp10_client), + rabbit_ct_helpers:log_environment(), + Config. + +end_per_suite(Config) -> + Config. + +init_per_group(Group, Config0) -> + PermitV1 = case Group of + address_v1 -> true; + address_v2 -> false + end, + Config1 = rabbit_ct_helpers:merge_app_env( + Config0, {rabbit, + [{permit_deprecated_features, + #{amqp_address_v1 => PermitV1}}]}), + Config = rabbit_ct_helpers:run_setup_steps( + Config1, + rabbit_ct_broker_helpers:setup_steps() ++ + rabbit_ct_client_helpers:setup_steps()), + Vhost = <<"test vhost">>, + User = <<"test user">>, + ok = rabbit_ct_broker_helpers:add_vhost(Config, Vhost), + ok = rabbit_ct_broker_helpers:add_user(Config, User), + [{test_vhost, Vhost}, + {test_user, User}] ++ Config. + +end_per_group(_Group, Config) -> + ok = rabbit_ct_broker_helpers:delete_user(Config, ?config(test_user, Config)), + ok = rabbit_ct_broker_helpers:delete_vhost(Config, ?config(test_vhost, Config)), + rabbit_ct_helpers:run_teardown_steps( + Config, + rabbit_ct_client_helpers:teardown_steps() ++ + rabbit_ct_broker_helpers:teardown_steps()). + +init_per_testcase(Testcase, Config) -> + ok = set_permissions(Config, <<>>, <<>>, <<"^some vhost permission">>), + rabbit_ct_helpers:testcase_started(Config, Testcase). + +end_per_testcase(Testcase, Config) -> + delete_all_queues(Config), + ok = clear_permissions(Config), + rabbit_ct_helpers:testcase_finished(Config, Testcase). + +v1_attach_target_queue(Config) -> + QName = <<"test queue">>, + %% This target address means RabbitMQ will create a queue + %% requiring configure access on the queue. + %% We will also need write access to the default exchange to send to this queue. + TargetAddress = <<"/queue/", QName/binary>>, + OpnConf = connection_config(Config), + {ok, Connection} = amqp10_client:open_connection(OpnConf), + {ok, Session1} = amqp10_client:begin_session_sync(Connection), + {ok, _Sender1} = amqp10_client:attach_sender_link( + Session1, <<"test-sender-1">>, TargetAddress), + ExpectedErr1 = error_unauthorized( + <<"configure access to queue 'test queue' in vhost " + "'test vhost' refused for user 'test user'">>), + receive {amqp10_event, {session, Session1, {ended, ExpectedErr1}}} -> ok + after 5000 -> flush(missing_ended), + ct:fail("did not receive AMQP_ERROR_UNAUTHORIZED_ACCESS") + end, + + %% Give the user configure permissions on the queue. + ok = set_permissions(Config, QName, <<>>, <<>>), + {ok, Session2} = amqp10_client:begin_session_sync(Connection), + {ok, _Sender2} = amqp10_client:attach_sender_link( + Session2, <<"test-sender-2">>, TargetAddress), + ExpectedErr2 = error_unauthorized( + <<"write access to exchange 'amq.default' in vhost " + "'test vhost' refused for user 'test user'">>), + receive {amqp10_event, {session, Session2, {ended, ExpectedErr2}}} -> ok + after 5000 -> flush(missing_ended), + ct:fail("did not receive AMQP_ERROR_UNAUTHORIZED_ACCESS") + end, + + %% Give the user configure permissions on the queue and + %% write access to the default exchange. + ok = set_permissions(Config, QName, <<"amq\.default">>, <<>>), + {ok, Session3} = amqp10_client:begin_session_sync(Connection), + {ok, Sender3} = amqp10_client:attach_sender_link( + Session3, <<"test-sender-3">>, TargetAddress), + receive {amqp10_event, {link, Sender3, attached}} -> ok + after 5000 -> flush(missing_attached), + ct:fail("missing ATTACH from server") + end, + + ok = close_connection_sync(Connection). + +v1_attach_source_exchange(Config) -> + %% This source address means RabbitMQ will create a queue with a generated name + %% prefixed with amq.gen requiring configure access on the queue. + %% The queue is bound to the fanout exchange requiring write access on the queue + %% and read access on the fanout exchange. + %% To consume from the queue, we will also need read access on the queue. + SourceAddress = <<"/exchange/amq.fanout/ignored">>, + OpnConf = connection_config(Config), + {ok, Connection} = amqp10_client:open_connection(OpnConf), + {ok, Session1} = amqp10_client:begin_session_sync(Connection), + {ok, _Recv1} = amqp10_client:attach_receiver_link( + Session1, <<"receiver-1">>, SourceAddress), + receive + {amqp10_event, + {session, Session1, + {ended, + #'v1_0.error'{ + condition = ?V_1_0_AMQP_ERROR_UNAUTHORIZED_ACCESS, + description = {utf8, <<"configure access to queue 'amq.gen", _/binary>>}}}}} -> ok + after 5000 -> flush(missing_ended), + ct:fail("did not receive AMQP_ERROR_UNAUTHORIZED_ACCESS") + end, + + %% Give the user configure permissions on the queue. + ok = set_permissions(Config, <<"^amq\.gen">>, <<>>, <<>>), + {ok, Session2} = amqp10_client:begin_session_sync(Connection), + {ok, _Recv2} = amqp10_client:attach_receiver_link( + Session2, <<"receiver-2">>, SourceAddress), + receive + {amqp10_event, + {session, Session2, + {ended, + #'v1_0.error'{ + condition = ?V_1_0_AMQP_ERROR_UNAUTHORIZED_ACCESS, + description = {utf8, <<"write access to queue 'amq.gen", _/binary>>}}}}} -> ok + after 5000 -> flush(missing_ended), + ct:fail("did not receive AMQP_ERROR_UNAUTHORIZED_ACCESS") + end, + + %% Give the user configure and write permissions on the queue. + ok = set_permissions(Config, <<"^amq\.gen">>, <<"^amq\.gen">>, <<>>), + {ok, Session3} = amqp10_client:begin_session_sync(Connection), + {ok, _Recv3} = amqp10_client:attach_receiver_link( + Session3, <<"receiver-3">>, SourceAddress), + ExpectedErr1 = error_unauthorized( + <<"read access to exchange 'amq.fanout' in vhost " + "'test vhost' refused for user 'test user'">>), + receive {amqp10_event, {session, Session3, {ended, ExpectedErr1}}} -> ok + after 5000 -> flush(missing_ended), + ct:fail("did not receive AMQP_ERROR_UNAUTHORIZED_ACCESS") + end, + + %% Give the user configure and write permissions on the queue, and read access on the exchange. + ok = set_permissions(Config, <<"^amq\.gen">>, <<"^amq\.gen">>, <<"amq\.fanout">>), + {ok, Session4} = amqp10_client:begin_session_sync(Connection), + {ok, _Recv4} = amqp10_client:attach_receiver_link( + Session4, <<"receiver-4">>, SourceAddress), + receive + {amqp10_event, + {session, Session4, + {ended, + #'v1_0.error'{ + condition = ?V_1_0_AMQP_ERROR_UNAUTHORIZED_ACCESS, + description = {utf8, <<"read access to queue 'amq.gen", _/binary>>}}}}} -> ok + after 5000 -> flush(missing_ended), + ct:fail("did not receive AMQP_ERROR_UNAUTHORIZED_ACCESS") + end, + + %% Give the user configure, write, and read permissions on the queue, + %% and read access on the exchange. + ok = set_permissions(Config, <<"^amq\.gen">>, <<"^amq\.gen">>, <<"^(amq\.gen|amq\.fanout)">>), + {ok, Session5} = amqp10_client:begin_session_sync(Connection), + {ok, Recv5} = amqp10_client:attach_receiver_link( + Session5, <<"receiver-5">>, SourceAddress), + receive {amqp10_event, {link, Recv5, attached}} -> ok + after 5000 -> flush(missing_attached), + ct:fail("missing ATTACH from server") + end, + + ok = close_connection_sync(Connection). + +v1_send_to_topic(Config) -> + TargetAddresses = [<<"/topic/test vhost.test user.a.b">>, + <<"/exchange/amq.topic/test vhost.test user.a.b">>], + lists:foreach(fun(Address) -> + ok = send_to_topic(Address, Config) + end, TargetAddresses). + +send_to_topic(TargetAddress, Config) -> + User = ?config(test_user, Config), + Vhost = ?config(test_vhost, Config), + ok = rabbit_ct_broker_helpers:set_full_permissions(Config, User, Vhost), + ok = set_topic_permissions(Config, <<"amq.topic">>, <<"^$">>, <<"^$">>), + + OpnConf = connection_config(Config), + {ok, Connection} = amqp10_client:open_connection(OpnConf), + {ok, Session1} = amqp10_client:begin_session_sync(Connection), + {ok, Sender1} = amqp10_client:attach_sender_link_sync( + Session1, <<"sender-1">>, TargetAddress), + ok = wait_for_credit(Sender1), + Msg1 = amqp10_msg:new(<<255>>, <<1>>, true), + ok = amqp10_client:send_msg(Sender1, Msg1), + + ExpectedErr = error_unauthorized( + <<"write access to topic 'test vhost.test user.a.b' in exchange " + "'amq.topic' in vhost 'test vhost' refused for user 'test user'">>), + receive {amqp10_event, {session, Session1, {ended, ExpectedErr}}} -> ok + after 5000 -> flush(missing_ended), + ct:fail("did not receive AMQP_ERROR_UNAUTHORIZED_ACCESS") + end, + + ok = set_topic_permissions(Config, <<"amq.topic">>, <<"^{vhost}\.{username}\.a\.b$">>, <<"^$">>), + {ok, Session2} = amqp10_client:begin_session_sync(Connection), + {ok, Sender2} = amqp10_client:attach_sender_link_sync( + Session2, <<"sender-2">>, TargetAddress), + ok = wait_for_credit(Sender2), + Dtag = <<0, 0>>, + Msg2 = amqp10_msg:new(Dtag, <<2>>, false), + ok = amqp10_client:send_msg(Sender2, Msg2), + %% We expect RELEASED since no queue is bound. + receive {amqp10_disposition, {released, Dtag}} -> ok + after 5000 -> ct:fail(released_timeout) + end, + + ok = amqp10_client:detach_link(Sender2), + ok = close_connection_sync(Connection). + +v1_send_to_topic_using_subject(Config) -> + TargetAddress = <<"/exchange/amq.topic">>, + User = ?config(test_user, Config), + Vhost = ?config(test_vhost, Config), + ok = rabbit_ct_broker_helpers:set_full_permissions(Config, User, Vhost), + ok = set_topic_permissions(Config, <<"amq.topic">>, <<"^\.a$">>, <<"^$">>), + + OpnConf = connection_config(Config), + {ok, Connection} = amqp10_client:open_connection(OpnConf), + {ok, Session} = amqp10_client:begin_session_sync(Connection), + {ok, Sender} = amqp10_client:attach_sender_link_sync( + Session, <<"sender">>, TargetAddress), + ok = wait_for_credit(Sender), + + Dtag1 = <<"dtag 1">>, + Msg1a = amqp10_msg:new(Dtag1, <<"m1">>, false), + Msg1b = amqp10_msg:set_properties(#{subject => <<".a">>}, Msg1a), + ok = amqp10_client:send_msg(Sender, Msg1b), + %% We have sufficient authorization, but expect RELEASED since no queue is bound. + receive {amqp10_disposition, {released, Dtag1}} -> ok + after 5000 -> ct:fail(released_timeout) + end, + + Dtag2 = <<"dtag 2">>, + Msg2a = amqp10_msg:new(Dtag2, <<"m2">>, false), + %% We don't have sufficient authorization. + Msg2b = amqp10_msg:set_properties(#{subject => <<".a.b">>}, Msg2a), + ok = amqp10_client:send_msg(Sender, Msg2b), + ExpectedErr = error_unauthorized( + <<"write access to topic '.a.b' in exchange 'amq.topic' in " + "vhost 'test vhost' refused for user 'test user'">>), + receive {amqp10_event, {session, Session, {ended, ExpectedErr}}} -> ok + after 5000 -> flush(missing_ended), + ct:fail("did not receive AMQP_ERROR_UNAUTHORIZED_ACCESS") + end, + + ok = close_connection_sync(Connection). + +v1_attach_source_topic(Config) -> + %% These source addresses mean RabbitMQ will bind a queue to the default topic + %% exchange with binding key 'test vhost.test user.a.b'. + %% Therefore, we need read access to that topic. + %% We also test variable expansion in topic permission patterns. + SourceAddresses = [<<"/topic/test vhost.test user.a.b">>, + <<"/exchange/amq.topic/test vhost.test user.a.b">>], + lists:foreach(fun(Address) -> + ok = attach_source_topic0(Address, Config) + end, SourceAddresses). + +attach_source_topic0(SourceAddress, Config) -> + User = ?config(test_user, Config), + Vhost = ?config(test_vhost, Config), + ok = rabbit_ct_broker_helpers:set_full_permissions(Config, User, Vhost), + ok = set_topic_permissions(Config, <<"amq.topic">>, <<"^$">>, <<"^$">>), + + OpnConf = connection_config(Config), + {ok, Connection} = amqp10_client:open_connection(OpnConf), + {ok, Session1} = amqp10_client:begin_session_sync(Connection), + {ok, _Recv1} = amqp10_client:attach_receiver_link( + Session1, <<"receiver-1">>, SourceAddress), + ExpectedErr = error_unauthorized( + <<"read access to topic 'test vhost.test user.a.b' in exchange " + "'amq.topic' in vhost 'test vhost' refused for user 'test user'">>), + receive {amqp10_event, {session, Session1, {ended, ExpectedErr}}} -> ok + after 5000 -> flush(missing_ended), + ct:fail("did not receive AMQP_ERROR_UNAUTHORIZED_ACCESS") + end, + + ok = set_topic_permissions(Config, <<"amq.topic">>, <<"^$">>, <<"^{vhost}\.{username}\.a\.b$">>), + {ok, Session2} = amqp10_client:begin_session_sync(Connection), + {ok, Recv2} = amqp10_client:attach_receiver_link( + Session2, <<"receiver-2">>, SourceAddress), + receive {amqp10_event, {link, Recv2, attached}} -> ok + after 5000 -> flush(missing_attached), + ct:fail("missing ATTACH from server") + end, + + ok = close_connection_sync(Connection). + +v1_attach_target_internal_exchange(Config) -> + XName = <<"test exchange">>, + Ch = rabbit_ct_client_helpers:open_channel(Config), + #'exchange.declare_ok'{} = amqp_channel:call(Ch, #'exchange.declare'{internal = true, + exchange = XName}), + + OpnConf0 = connection_config(Config, <<"/">>), + OpnConf = OpnConf0#{sasl := anon}, + {ok, Connection} = amqp10_client:open_connection(OpnConf), + {ok, Session} = amqp10_client:begin_session_sync(Connection), + Address = <<"/exchange/", XName/binary, "/some-routing-key">>, + {ok, _} = amqp10_client:attach_sender_link( + Session, <<"test-sender">>, Address), + ExpectedErr = error_unauthorized( + <<"forbidden to publish to internal exchange 'test exchange' in vhost '/'">>), + receive {amqp10_event, {session, Session, {ended, ExpectedErr}}} -> ok + after 5000 -> flush(missing_ended), + ct:fail("did not receive AMQP_ERROR_UNAUTHORIZED_ACCESS") + end, + + ok = amqp10_client:close_connection(Connection), + #'exchange.delete_ok'{} = amqp_channel:call(Ch, #'exchange.delete'{exchange = XName}), + ok = rabbit_ct_client_helpers:close_channel(Ch). + +attach_source_queue(Config) -> + {Conn, Session, LinkPair} = init_pair(Config), + QName = <<"🍿"/utf8>>, + Address = rabbitmq_amqp_address:queue(QName), + + %% missing read permission to queue + ok = set_permissions(Config, QName, <<>>, <<>>), + {ok, _} = rabbitmq_amqp_client:declare_queue(LinkPair, QName, #{}), + + {ok, _Recv} = amqp10_client:attach_receiver_link(Session, <<"receiver">>, Address), + ExpectedErr = error_unauthorized( + <<"read access to queue '", QName/binary, + "' in vhost 'test vhost' refused for user 'test user'">>), + receive {amqp10_event, + {session, Session, + {ended, ExpectedErr}}} -> ok + after 5000 -> flush(missing_ended), + ct:fail("did not receive AMQP_ERROR_UNAUTHORIZED_ACCESS") + end, + ok = close_connection_sync(Conn). + +attach_target_exchange(Config) -> + XName = <<"amq.fanout">>, + Address1 = rabbitmq_amqp_address:exchange(XName), + Address2 = rabbitmq_amqp_address:exchange(XName, <<"some-key">>), + + OpnConf = connection_config(Config), + {ok, Connection} = amqp10_client:open_connection(OpnConf), + + {ok, Session1} = amqp10_client:begin_session_sync(Connection), + {ok, _} = amqp10_client:attach_sender_link(Session1, <<"test-sender">>, Address1), + ExpectedErr = error_unauthorized( + <<"write access to exchange '", XName/binary, + "' in vhost 'test vhost' refused for user 'test user'">>), + receive {amqp10_event, {session, Session1, {ended, ExpectedErr}}} -> ok + after 5000 -> ct:fail({missing_event, ?LINE}) + end, + + {ok, Session2} = amqp10_client:begin_session_sync(Connection), + {ok, _} = amqp10_client:attach_sender_link(Session2, <<"test-sender">>, Address2), + receive {amqp10_event, {session, Session2, {ended, ExpectedErr}}} -> ok + after 5000 -> ct:fail({missing_event, ?LINE}) + end, + + ok = amqp10_client:close_connection(Connection). + +attach_target_topic_exchange(Config) -> + TargetAddress = rabbitmq_amqp_address:exchange( + <<"amq.topic">>, <<"test vhost.test user.a.b">>), + ok = send_to_topic(TargetAddress, Config). + +attach_target_queue(Config) -> + {Conn, Session, LinkPair} = init_pair(Config), + QName = <<"🍿"/utf8>>, + Address = rabbitmq_amqp_address:queue(QName), + + %% missing write permission to default exchange + ok = set_permissions(Config, QName, <<>>, <<>>), + {ok, _} = rabbitmq_amqp_client:declare_queue(LinkPair, QName, #{}), + + {ok, _} = amqp10_client:attach_sender_link(Session, <<"sender">>, Address), + ExpectedErr = error_unauthorized( + <<"write access to exchange 'amq.default' ", + "in vhost 'test vhost' refused for user 'test user'">>), + receive {amqp10_event, {session, Session, {ended, ExpectedErr}}} -> ok + after 5000 -> ct:fail({missing_event, ?LINE}) + end, + ok = amqp10_client:close_connection(Conn). + +target_per_message_exchange(Config) -> + TargetAddress = null, + To1 = rabbitmq_amqp_address:exchange(<<"amq.fanout">>), + To2 = rabbitmq_amqp_address:queue(<<"q1">>), + %% missing write permission to default exchange + ok = set_permissions(Config, <<>>, <<"amq.fanout">>, <<>>), + + OpnConf = connection_config(Config), + {ok, Connection} = amqp10_client:open_connection(OpnConf), + {ok, Session} = amqp10_client:begin_session_sync(Connection), + {ok, Sender} = amqp10_client:attach_sender_link_sync(Session, <<"sender">>, TargetAddress), + ok = wait_for_credit(Sender), + + %% We have sufficient authorization, but expect RELEASED since no queue is bound. + Tag1 = <<"dtag 1">>, + Msg1 = amqp10_msg:set_properties(#{to => To1}, amqp10_msg:new(Tag1, <<"m1">>)), + ok = amqp10_client:send_msg(Sender, Msg1), + receive {amqp10_disposition, {released, Tag1}} -> ok + after 5000 -> ct:fail(released_timeout) + end, + + %% We don't have sufficient authorization. + Tag2 = <<"dtag 2">>, + Msg2 = amqp10_msg:set_properties(#{to => To2}, amqp10_msg:new(Tag2, <<"m2">>)), + ok = amqp10_client:send_msg(Sender, Msg2), + ExpectedErr = error_unauthorized( + <<"write access to exchange 'amq.default' in " + "vhost 'test vhost' refused for user 'test user'">>), + receive {amqp10_event, {session, Session, {ended, ExpectedErr}}} -> ok + after 5000 -> ct:fail({missing_event, ?LINE}) + end, + + ok = close_connection_sync(Connection). + +target_per_message_internal_exchange(Config) -> + XName = <<"my internal exchange">>, + XProps = #{internal => true}, + TargetAddress = null, + To = rabbitmq_amqp_address:exchange(XName), + + ok = set_permissions(Config, XName, XName, <<>>), + {Conn1, Session1, LinkPair1} = init_pair(Config), + ok = rabbitmq_amqp_client:declare_exchange(LinkPair1, XName, XProps), + {ok, Sender} = amqp10_client:attach_sender_link_sync(Session1, <<"sender">>, TargetAddress), + ok = wait_for_credit(Sender), + + Tag = <<"tag">>, + Msg = amqp10_msg:set_properties(#{to => To}, amqp10_msg:new(Tag, <<"msg">>, true)), + ok = amqp10_client:send_msg(Sender, Msg), + ExpectedErr = error_unauthorized( + <<"forbidden to publish to internal exchange '", XName/binary, "' in vhost 'test vhost'">>), + receive {amqp10_event, {session, Session1, {ended, ExpectedErr}}} -> ok + after 5000 -> flush(aaa), + ct:fail({missing_event, ?LINE}) + end, + ok = close_connection_sync(Conn1), + + Init = {_, _, LinkPair2} = init_pair(Config), + ok = rabbitmq_amqp_client:delete_exchange(LinkPair2, XName), + ok = cleanup_pair(Init). + +target_per_message_topic(Config) -> + TargetAddress = null, + To1 = rabbitmq_amqp_address:exchange(<<"amq.topic">>, <<".a">>), + To2 = rabbitmq_amqp_address:exchange(<<"amq.topic">>, <<".a.b">>), + User = ?config(test_user, Config), + Vhost = ?config(test_vhost, Config), + ok = rabbit_ct_broker_helpers:set_full_permissions(Config, User, Vhost), + ok = set_topic_permissions(Config, <<"amq.topic">>, <<"^\.a$">>, <<"^$">>), + + OpnConf = connection_config(Config), + {ok, Connection} = amqp10_client:open_connection(OpnConf), + {ok, Session} = amqp10_client:begin_session_sync(Connection), + {ok, Sender} = amqp10_client:attach_sender_link_sync(Session, <<"sender">>, TargetAddress), + ok = wait_for_credit(Sender), + + %% We have sufficient authorization, but expect RELEASED since no queue is bound. + Tag1 = <<"dtag 1">>, + Msg1 = amqp10_msg:set_properties(#{to => To1}, amqp10_msg:new(Tag1, <<"m1">>)), + ok = amqp10_client:send_msg(Sender, Msg1), + receive {amqp10_disposition, {released, Tag1}} -> ok + after 5000 -> ct:fail(released_timeout) + end, + + %% We don't have sufficient authorization. + Tag2 = <<"dtag 2">>, + Msg2 = amqp10_msg:set_properties(#{to => To2}, amqp10_msg:new(Tag2, <<"m2">>)), + ok = amqp10_client:send_msg(Sender, Msg2), + ExpectedErr = error_unauthorized( + <<"write access to topic '.a.b' in exchange 'amq.topic' in " + "vhost 'test vhost' refused for user 'test user'">>), + receive {amqp10_event, {session, Session, {ended, ExpectedErr}}} -> ok + after 5000 -> ct:fail({missing_event, ?LINE}) + end, + + ok = close_connection_sync(Connection). + +authn_failure_event(Config) -> + ok = event_recorder:start(Config), + + Host = ?config(rmq_hostname, Config), + Port = rabbit_ct_broker_helpers:get_node_config(Config, 0, tcp_port_amqp), + Vhost = ?config(test_vhost, Config), + User = ?config(test_user, Config), + OpnConf = #{address => Host, + port => Port, + container_id => <<"my container">>, + sasl => {plain, User, <<"wrong password">>}, + hostname => <<"vhost:", Vhost/binary>>}, + + {ok, Connection} = amqp10_client:open_connection(OpnConf), + receive {amqp10_event, {connection, Connection, {closed, sasl_auth_failure}}} -> ok + after 5000 -> flush(missing_closed), + ct:fail("did not receive sasl_auth_failure") + end, + + [E | _] = event_recorder:get_events(Config), + ok = event_recorder:stop(Config), + + assert_event_type(user_authentication_failure, E), + assert_event_prop([{name, <<"test user">>}, + {auth_mechanism, <<"PLAIN">>}, + {ssl, false}, + {protocol, {1, 0}}], + E). + +sasl_anonymous_success(Config) -> + Mechanism = anon, + ok = sasl_success(Mechanism, Config). + +sasl_plain_success(Config) -> + Mechanism = {plain, <<"guest">>, <<"guest">>}, + ok = sasl_success(Mechanism, Config). + +sasl_success(Mechanism, Config) -> + OpnConf0 = connection_config(Config, <<"/">>), + OpnConf = OpnConf0#{sasl := Mechanism}, + {ok, Connection} = amqp10_client:open_connection(OpnConf), + receive {amqp10_event, {connection, Connection, opened}} -> ok + after 5000 -> ct:fail(missing_opened) + end, + ok = amqp10_client:close_connection(Connection). + +sasl_anonymous_failure(Config) -> + App = rabbit, + Par = anonymous_login_user, + {ok, Default} = rpc(Config, application, get_env, [App, Par]), + %% Prohibit anonymous login. + ok = rpc(Config, application, set_env, [App, Par, none]), + + Mechanism = anon, + OpnConf0 = connection_config(Config, <<"/">>), + OpnConf = OpnConf0#{sasl := Mechanism}, + {ok, Connection} = amqp10_client:open_connection(OpnConf), + receive {amqp10_event, {connection, Connection, {closed, Reason}}} -> + ?assertEqual({sasl_not_supported, Mechanism}, Reason) + after 5000 -> ct:fail(missing_closed) + end, + + ok = rpc(Config, application, set_env, [App, Par, Default]). + +sasl_plain_failure(Config) -> + OpnConf0 = connection_config(Config, <<"/">>), + OpnConf = OpnConf0#{sasl := {plain, <<"guest">>, <<"wrong password">>}}, + {ok, Connection} = amqp10_client:open_connection(OpnConf), + receive {amqp10_event, {connection, Connection, {closed, Reason}}} -> + ?assertEqual(sasl_auth_failure, Reason) + after 5000 -> ct:fail(missing_closed) + end. + +%% Skipping SASL is disallowed in RabbitMQ. +sasl_none_failure(Config) -> + OpnConf0 = connection_config(Config, <<"/">>), + OpnConf = OpnConf0#{sasl := none}, + {ok, Connection} = amqp10_client:open_connection(OpnConf), + receive {amqp10_event, {connection, Connection, {closed, _Reason}}} -> ok + after 5000 -> ct:fail(missing_closed) + end. + +vhost_absent(Config) -> + OpnConf = connection_config(Config, <<"this vhost does not exist">>), + {ok, Connection} = amqp10_client:open_connection(OpnConf), + receive {amqp10_event, {connection, Connection, {closed, _}}} -> ok + after 5000 -> ct:fail(missing_closed) + end. + +vhost_connection_limit(Config) -> + Vhost = proplists:get_value(test_vhost, Config), + ok = rabbit_ct_broker_helpers:set_vhost_limit(Config, 0, Vhost, max_connections, 1), + + OpnConf = connection_config(Config), + {ok, C1} = amqp10_client:open_connection(OpnConf), + receive {amqp10_event, {connection, C1, opened}} -> ok + after 5000 -> ct:fail({missing_event, ?LINE}) + end, + {ok, C2} = amqp10_client:open_connection(OpnConf), + receive {amqp10_event, {connection, C2, {closed, _}}} -> ok + after 5000 -> ct:fail({missing_event, ?LINE}) + end, + + OpnConf0 = connection_config(Config, <<"/">>), + OpnConf1 = OpnConf0#{sasl := anon}, + {ok, C3} = amqp10_client:open_connection(OpnConf1), + receive {amqp10_event, {connection, C3, opened}} -> ok + after 5000 -> ct:fail({missing_event, ?LINE}) + end, + {ok, C4} = amqp10_client:open_connection(OpnConf1), + receive {amqp10_event, {connection, C4, opened}} -> ok + after 5000 -> ct:fail({missing_event, ?LINE}) + end, + + [ok = close_connection_sync(C) || C <- [C1, C3, C4]], + ok = rabbit_ct_broker_helpers:clear_vhost_limit(Config, 0, Vhost). + +user_connection_limit(Config) -> + DefaultUser = <<"guest">>, + Limit = max_connections, + ok = rabbit_ct_broker_helpers:set_user_limits(Config, DefaultUser, #{Limit => 0}), + OpnConf0 = connection_config(Config, <<"/">>), + OpnConf = OpnConf0#{sasl := anon}, + {ok, C1} = amqp10_client:open_connection(OpnConf), + receive {amqp10_event, {connection, C1, {closed, _}}} -> ok + after 5000 -> ct:fail({missing_event, ?LINE}) + end, + + {ok, C2} = amqp10_client:open_connection(connection_config(Config)), + receive {amqp10_event, {connection, C2, opened}} -> ok + after 5000 -> ct:fail({missing_event, ?LINE}) + end, + + ok = close_connection_sync(C2), + ok = rabbit_ct_broker_helpers:clear_user_limits(Config, DefaultUser, Limit). + +v1_vhost_queue_limit(Config) -> + Vhost = proplists:get_value(test_vhost, Config), + ok = rabbit_ct_broker_helpers:set_vhost_limit(Config, 0, Vhost, max_queues, 0), + QName = <<"q1">>, + ok = set_permissions(Config, QName, <<>>, <<>>), + + OpnConf1 = connection_config(Config), + {ok, C1} = amqp10_client:open_connection(OpnConf1), + {ok, Session1} = amqp10_client:begin_session_sync(C1), + TargetAddress = <<"/queue/", QName/binary>>, + {ok, _Sender1} = amqp10_client:attach_sender_link( + Session1, <<"test-sender-1">>, TargetAddress), + ExpectedErr = amqp_error( + ?V_1_0_AMQP_ERROR_RESOURCE_LIMIT_EXCEEDED, + <<"cannot declare queue 'q1': queue limit in vhost 'test vhost' (0) is reached">>), + receive {amqp10_event, {session, Session1, {ended, ExpectedErr}}} -> ok + after 5000 -> flush(missing_ended), + ct:fail("did not receive expected error") + end, + + OpnConf2 = connection_config(Config, <<"/">>), + OpnConf3 = OpnConf2#{sasl := anon}, + {ok, C2} = amqp10_client:open_connection(OpnConf3), + {ok, Session2} = amqp10_client:begin_session_sync(C2), + {ok, Sender2} = amqp10_client:attach_sender_link( + Session2, <<"test-sender-2">>, TargetAddress), + receive {amqp10_event, {link, Sender2, attached}} -> ok + after 5000 -> flush(missing_attached), + ct:fail("missing ATTACH from server") + end, + + ok = close_connection_sync(C1), + ok = close_connection_sync(C2), + ok = rabbit_ct_broker_helpers:clear_vhost_limit(Config, 0, Vhost). + +declare_exchange(Config) -> + {Conn, _Session, LinkPair} = init_pair(Config), + XName = <<"📮"/utf8>>, + ExpectedErr = error_unauthorized( + <<"configure access to exchange '", XName/binary, + "' in vhost 'test vhost' refused for user 'test user'">>), + ?assertEqual({error, {session_ended, ExpectedErr}}, + rabbitmq_amqp_client:declare_exchange(LinkPair, XName, #{})), + ok = close_connection_sync(Conn). + +delete_exchange(Config) -> + {Conn, Session1, LinkPair1} = init_pair(Config), + XName = <<"📮"/utf8>>, + ok = set_permissions(Config, XName, <<>>, <<>>), + ok = rabbitmq_amqp_client:declare_exchange(LinkPair1, XName, #{}), + ok = rabbitmq_amqp_client:detach_management_link_pair_sync(LinkPair1), + ok = amqp10_client:end_session(Session1), + + ok = clear_permissions(Config), + + {ok, Session2} = amqp10_client:begin_session_sync(Conn), + {ok, LinkPair2} = rabbitmq_amqp_client:attach_management_link_pair_sync(Session2, <<"pair 2">>), + ExpectedErr = error_unauthorized( + <<"configure access to exchange '", XName/binary, + "' in vhost 'test vhost' refused for user 'test user'">>), + ?assertEqual({error, {session_ended, ExpectedErr}}, + rabbitmq_amqp_client:delete_exchange(LinkPair2, XName)), + ok = close_connection_sync(Conn), + + ok = set_permissions(Config, XName, <<>>, <<>>), + Init = {_, _, LinkPair3} = init_pair(Config), + ok = rabbitmq_amqp_client:delete_exchange(LinkPair3, XName), + ok = cleanup_pair(Init). + +declare_queue(Config) -> + {Conn, _, LinkPair} = init_pair(Config), + QName = <<"🍿"/utf8>>, + ExpectedErr = error_unauthorized( + <<"configure access to queue '", QName/binary, + "' in vhost 'test vhost' refused for user 'test user'">>), + ?assertEqual({error, {session_ended, ExpectedErr}}, + rabbitmq_amqp_client:declare_queue(LinkPair, QName, #{})), + ok = close_connection_sync(Conn). + +declare_queue_dlx_queue(Config) -> + {Conn, _, LinkPair} = init_pair(Config), + QName = <<"🍿"/utf8>>, + DlxName = <<"📥"/utf8>>, + QProps = #{arguments => #{<<"x-dead-letter-exchange">> => {utf8, DlxName}}}, + %% missing read permission to queue + ok = set_permissions(Config, QName, DlxName, <<>>), + ExpectedErr = error_unauthorized( + <<"read access to queue '", QName/binary, + "' in vhost 'test vhost' refused for user 'test user'">>), + ?assertEqual({error, {session_ended, ExpectedErr}}, + rabbitmq_amqp_client:declare_queue(LinkPair, QName, QProps)), + ok = close_connection_sync(Conn). + +declare_queue_dlx_exchange(Config) -> + {Conn, _, LinkPair} = init_pair(Config), + QName = <<"🍿"/utf8>>, + DlxName = <<"📥"/utf8>>, + QProps = #{arguments => #{<<"x-dead-letter-exchange">> => {utf8, DlxName}}}, + %% missing write permission to dead letter exchange + ok = set_permissions(Config, QName, <<>>, QName), + ExpectedErr = error_unauthorized( + <<"write access to exchange '", DlxName/binary, + "' in vhost 'test vhost' refused for user 'test user'">>), + ?assertEqual({error, {session_ended, ExpectedErr}}, + rabbitmq_amqp_client:declare_queue(LinkPair, QName, QProps)), + ok = close_connection_sync(Conn). + +declare_queue_vhost_queue_limit(Config) -> + QName = <<"🍿"/utf8>>, + ok = set_permissions(Config, QName, <<>>, <<>>), + Vhost = proplists:get_value(test_vhost, Config), + ok = rabbit_ct_broker_helpers:set_vhost_limit(Config, 0, Vhost, max_queues, 0), + + Init = {_, _, LinkPair} = init_pair(Config), + {error, Resp} = rabbitmq_amqp_client:declare_queue(LinkPair, QName, #{}), + ?assertMatch(#{subject := <<"403">>}, amqp10_msg:properties(Resp)), + ?assertEqual( + #'v1_0.amqp_value'{ + content = {utf8, <<"cannot declare queue '", QName/binary, "': queue limit in vhost 'test vhost' (0) is reached">>}}, + amqp10_msg:body(Resp)), + + ok = cleanup_pair(Init), + ok = rabbit_ct_broker_helpers:clear_vhost_limit(Config, 0, Vhost). + +delete_queue(Config) -> + {Conn, Session1, LinkPair1} = init_pair(Config), + QName = <<"🍿"/utf8>>, + ok = set_permissions(Config, QName, <<>>, <<>>), + {ok, _} = rabbitmq_amqp_client:declare_queue(LinkPair1, QName, #{}), + ok = rabbitmq_amqp_client:detach_management_link_pair_sync(LinkPair1), + ok = amqp10_client:end_session(Session1), + + ok = clear_permissions(Config), + + {ok, Session2} = amqp10_client:begin_session_sync(Conn), + {ok, LinkPair2} = rabbitmq_amqp_client:attach_management_link_pair_sync(Session2, <<"pair 2">>), + ExpectedErr = error_unauthorized( + <<"configure access to queue '", QName/binary, + "' in vhost 'test vhost' refused for user 'test user'">>), + ?assertEqual({error, {session_ended, ExpectedErr}}, + rabbitmq_amqp_client:delete_queue(LinkPair2, QName)), + ok = close_connection_sync(Conn). + +purge_queue(Config) -> + {Conn, _, LinkPair} = init_pair(Config), + QName = <<"🍿"/utf8>>, + %% missing read permission to queue + ok = set_permissions(Config, QName, <<>>, <<>>), + {ok, _} = rabbitmq_amqp_client:declare_queue(LinkPair, QName, #{}), + ExpectedErr = error_unauthorized( + <<"read access to queue '", QName/binary, + "' in vhost 'test vhost' refused for user 'test user'">>), + ?assertEqual({error, {session_ended, ExpectedErr}}, + rabbitmq_amqp_client:purge_queue(LinkPair, QName)), + ok = close_connection_sync(Conn). + +bind_queue_source(Config) -> + {Conn, _, LinkPair} = init_pair(Config), + QName = atom_to_binary(?FUNCTION_NAME), + %% missing read permission to source exchange + ok = set_permissions(Config, QName, QName, QName), + {ok, #{}} = rabbitmq_amqp_client:declare_queue(LinkPair, QName, #{}), + + XName = <<"amq.direct">>, + ExpectedErr = error_unauthorized( + <<"read access to exchange '", XName/binary, + "' in vhost 'test vhost' refused for user 'test user'">>), + ?assertEqual({error, {session_ended, ExpectedErr}}, + rabbitmq_amqp_client:bind_queue(LinkPair, QName, XName, <<"key">>, #{})), + ok = close_connection_sync(Conn). + +bind_queue_destination(Config) -> + {Conn, _, LinkPair} = init_pair(Config), + QName = <<"my 🐇"/utf8>>, + XName = <<"amq.direct">>, + %% missing write permission to destination queue + ok = set_permissions(Config, QName, <<>>, XName), + {ok, #{}} = rabbitmq_amqp_client:declare_queue(LinkPair, QName, #{}), + + ExpectedErr = error_unauthorized( + <<"write access to queue '", QName/binary, + "' in vhost 'test vhost' refused for user 'test user'">>), + ?assertEqual({error, {session_ended, ExpectedErr}}, + rabbitmq_amqp_client:bind_queue(LinkPair, QName, XName, <<"key">>, #{})), + ok = close_connection_sync(Conn). + +bind_exchange_source(Config) -> + {Conn, _, LinkPair} = init_pair(Config), + SrcXName = <<"amq.fanout">>, + DstXName = <<"amq.direct">>, + %% missing read permission to source exchange + ok = set_permissions(Config, <<>>, DstXName, <<>>), + + ExpectedErr = error_unauthorized( + <<"read access to exchange '", SrcXName/binary, + "' in vhost 'test vhost' refused for user 'test user'">>), + ?assertEqual({error, {session_ended, ExpectedErr}}, + rabbitmq_amqp_client:bind_exchange(LinkPair, DstXName, SrcXName, <<"key">>, #{})), + ok = close_connection_sync(Conn). + +bind_exchange_destination(Config) -> + {Conn, _, LinkPair} = init_pair(Config), + SrcXName = <<"amq.fanout">>, + DstXName = <<"amq.direct">>, + %% missing write permission to destination exchange + ok = set_permissions(Config, <<>>, <<>>, SrcXName), + + ExpectedErr = error_unauthorized( + <<"write access to exchange '", DstXName/binary, + "' in vhost 'test vhost' refused for user 'test user'">>), + ?assertEqual({error, {session_ended, ExpectedErr}}, + rabbitmq_amqp_client:bind_exchange(LinkPair, DstXName, SrcXName, <<"key">>, #{})), + ok = close_connection_sync(Conn). + +bind_to_topic_exchange(Config) -> + {Conn, _, LinkPair} = init_pair(Config), + SrcXName = <<"amq.topic">>, + DstXName = <<"amq.direct">>, + Topic = <<"a.b.🐇"/utf8>>, + + User = ?config(test_user, Config), + Vhost = ?config(test_vhost, Config), + ok = rabbit_ct_broker_helpers:set_full_permissions(Config, User, Vhost), + %% missing read permission to Topic + ok = set_topic_permissions(Config, SrcXName, <<".*">>, <<"wrong.topic">>), + + ExpectedErr = error_unauthorized( + <<"read access to topic '", Topic/binary, + "' in exchange 'amq.topic' in vhost 'test vhost' refused for user 'test user'">>), + ?assertEqual({error, {session_ended, ExpectedErr}}, + rabbitmq_amqp_client:bind_exchange(LinkPair, DstXName, SrcXName, Topic, #{})), + ok = close_connection_sync(Conn). + +unbind_queue_source(Config) -> + {Conn, Session1, LinkPair1} = init_pair(Config), + QName = BindingKey = atom_to_binary(?FUNCTION_NAME), + XName = <<"amq.direct">>, + ok = set_permissions(Config, QName, QName, XName), + {ok, #{}} = rabbitmq_amqp_client:declare_queue(LinkPair1, QName, #{}), + ok = rabbitmq_amqp_client:bind_queue(LinkPair1, QName, XName, BindingKey, #{}), + ok = rabbitmq_amqp_client:detach_management_link_pair_sync(LinkPair1), + ok = amqp10_client:end_session(Session1), + + %% remove read permission to source exchange + ok = set_permissions(Config, QName, QName, <<"^$">>), + + {ok, Session2} = amqp10_client:begin_session_sync(Conn), + {ok, LinkPair2} = rabbitmq_amqp_client:attach_management_link_pair_sync(Session2, <<"pair 2">>), + ExpectedErr = error_unauthorized( + <<"read access to exchange '", XName/binary, + "' in vhost 'test vhost' refused for user 'test user'">>), + ?assertEqual({error, {session_ended, ExpectedErr}}, + rabbitmq_amqp_client:unbind_queue(LinkPair2, QName, XName, BindingKey, #{})), + ok = close_connection_sync(Conn). + +unbind_queue_target(Config) -> + {Conn, Session1, LinkPair1} = init_pair(Config), + QName = BindingKey = atom_to_binary(?FUNCTION_NAME), + XName = <<"amq.direct">>, + ok = set_permissions(Config, QName, QName, XName), + {ok, #{}} = rabbitmq_amqp_client:declare_queue(LinkPair1, QName, #{}), + ok = rabbitmq_amqp_client:bind_queue(LinkPair1, QName, XName, BindingKey, #{}), + ok = rabbitmq_amqp_client:detach_management_link_pair_sync(LinkPair1), + ok = amqp10_client:end_session(Session1), + + %% remove write permission to destination queue + ok = set_permissions(Config, QName, <<"^$">>, XName), + + {ok, Session2} = amqp10_client:begin_session_sync(Conn), + {ok, LinkPair2} = rabbitmq_amqp_client:attach_management_link_pair_sync(Session2, <<"pair 2">>), + ExpectedErr = error_unauthorized( + <<"write access to queue '", QName/binary, + "' in vhost 'test vhost' refused for user 'test user'">>), + ?assertEqual({error, {session_ended, ExpectedErr}}, + rabbitmq_amqp_client:unbind_queue(LinkPair2, QName, XName, BindingKey, #{})), + ok = close_connection_sync(Conn). + +unbind_from_topic_exchange(Config) -> + Init = {_, _, LinkPair1} = init_pair(Config), + SrcXName = <<"amq.topic">>, + DstXName = <<"amq.direct">>, + Topic = <<"a.b.🐇"/utf8>>, + + User = ?config(test_user, Config), + Vhost = ?config(test_vhost, Config), + ok = rabbit_ct_broker_helpers:set_full_permissions(Config, User, Vhost), + ok = set_topic_permissions(Config, SrcXName, <<"^$">>, Topic), + ok = rabbitmq_amqp_client:bind_exchange(LinkPair1, DstXName, SrcXName, Topic, #{}), + + %% remove Topic read permission + ok = set_topic_permissions(Config, SrcXName, <<"^$">>, <<"^$">>), + %% Start a new connection since topic permissions are cached by the AMQP session process. + ok = cleanup_pair(Init), + {Conn, _, LinkPair2} = init_pair(Config), + + ExpectedErr = error_unauthorized( + <<"read access to topic '", Topic/binary, + "' in exchange 'amq.topic' in vhost 'test vhost' refused for user 'test user'">>), + ?assertEqual({error, {session_ended, ExpectedErr}}, + rabbitmq_amqp_client:unbind_exchange(LinkPair2, DstXName, SrcXName, Topic, #{})), + + ok = close_connection_sync(Conn). + +init_pair(Config) -> + OpnConf = connection_config(Config), + {ok, Connection} = amqp10_client:open_connection(OpnConf), + {ok, Session} = amqp10_client:begin_session_sync(Connection), + {ok, LinkPair} = rabbitmq_amqp_client:attach_management_link_pair_sync(Session, <<"mgmt link pair">>), + {Connection, Session, LinkPair}. + +cleanup_pair({Connection, Session, LinkPair}) -> + ok = rabbitmq_amqp_client:detach_management_link_pair_sync(LinkPair), + ok = amqp10_client:end_session(Session), + ok = amqp10_client:close_connection(Connection). + +connection_config(Config) -> + Vhost = ?config(test_vhost, Config), + connection_config(Config, Vhost). + +connection_config(Config, Vhost) -> + Host = ?config(rmq_hostname, Config), + Port = rabbit_ct_broker_helpers:get_node_config(Config, 0, tcp_port_amqp), + User = Password = ?config(test_user, Config), + #{address => Host, + port => Port, + container_id => <<"my container">>, + sasl => {plain, User, Password}, + hostname => <<"vhost:", Vhost/binary>>}. + +set_permissions(Config, ConfigurePerm, WritePerm, ReadPerm) -> + ok = rabbit_ct_broker_helpers:set_permissions(Config, + ?config(test_user, Config), + ?config(test_vhost, Config), + ConfigurePerm, + WritePerm, + ReadPerm). + +set_topic_permissions(Config, Exchange, WritePat, ReadPat) -> + ok = rpc(Config, + rabbit_auth_backend_internal, + set_topic_permissions, + [?config(test_user, Config), + ?config(test_vhost, Config), + Exchange, + WritePat, + ReadPat, + <<"acting-user">>]). + +clear_permissions(Config) -> + User = ?config(test_user, Config), + Vhost = ?config(test_vhost, Config), + ok = rabbit_ct_broker_helpers:clear_permissions(Config, User, Vhost), + ok = rpc(Config, + rabbit_auth_backend_internal, + clear_topic_permissions, + [User, Vhost, <<"acting-user">>]). + +error_unauthorized(Description) -> + amqp_error(?V_1_0_AMQP_ERROR_UNAUTHORIZED_ACCESS, Description). + +amqp_error(Condition, Description) + when is_binary(Description) -> + #'v1_0.error'{ + condition = Condition, + description = {utf8, Description}}. + +% before we can send messages we have to wait for credit from the server +wait_for_credit(Sender) -> + receive + {amqp10_event, {link, Sender, credited}} -> + flush(?FUNCTION_NAME), + ok + after 5000 -> + flush("wait_for_credit timed out"), + ct:fail(credited_timeout) + end. + +flush(Prefix) -> + receive Msg -> + ct:pal("~ts flushed: ~p~n", [Prefix, Msg]), + flush(Prefix) + after 1 -> + ok + end. + +delete_all_queues(Config) -> + Qs = rpc(Config, rabbit_amqqueue, list, []), + [{ok, _QLen} = rpc(Config, rabbit_amqqueue, delete, [Q, false, false, <<"fake-user">>]) + || Q <- Qs]. + +close_connection_sync(Connection) + when is_pid(Connection) -> + ok = amqp10_client:close_connection(Connection), + receive {amqp10_event, {connection, Connection, {closed, normal}}} -> ok + after 5000 -> flush(missing_closed), + ct:fail("missing CLOSE from server") + end. diff --git a/deps/rabbit/test/amqp_client_SUITE.erl b/deps/rabbit/test/amqp_client_SUITE.erl new file mode 100644 index 000000000000..7400227bb5ce --- /dev/null +++ b/deps/rabbit/test/amqp_client_SUITE.erl @@ -0,0 +1,5952 @@ +%% This Source Code Form is subject to the terms of the Mozilla Public +%% License, v. 2.0. If a copy of the MPL was not distributed with this +%% file, You can obtain one at https://mozilla.org/MPL/2.0/. +%% +%% Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. +%% + +-module(amqp_client_SUITE). + +-include_lib("common_test/include/ct.hrl"). +-include_lib("eunit/include/eunit.hrl"). +-include_lib("amqp_client/include/amqp_client.hrl"). +-include_lib("amqp10_common/include/amqp10_framing.hrl"). + +-compile([nowarn_export_all, + export_all]). + +-import(rabbit_ct_broker_helpers, + [get_node_config/3, + rpc/4, + rpc/5, + drain_node/2, + revive_node/2 + ]). +-import(rabbit_ct_helpers, + [eventually/1, eventually/3]). +-import(event_recorder, + [assert_event_type/2, + assert_event_prop/2]). + +all() -> + [ + {group, cluster_size_1}, + {group, cluster_size_3}, + {group, metrics} + ]. + +groups() -> + [ + {cluster_size_1, [shuffle], + [ + reliable_send_receive_with_outcomes_classic_queue, + reliable_send_receive_with_outcomes_quorum_queue, + sender_settle_mode_unsettled, + sender_settle_mode_unsettled_fanout, + sender_settle_mode_mixed, + quorum_queue_rejects, + receiver_settle_mode_first, + publishing_to_non_existing_queue_should_settle_with_released, + open_link_to_non_existing_destination_should_end_session, + roundtrip_with_drain_classic_queue, + roundtrip_with_drain_quorum_queue, + roundtrip_with_drain_stream, + drain_many_classic_queue, + drain_many_quorum_queue, + drain_many_stream, + amqp_stream_amqpl, + amqp_quorum_queue_amqpl, + message_headers_conversion, + multiple_sessions, + server_closes_link_classic_queue, + server_closes_link_quorum_queue, + server_closes_link_stream, + server_closes_link_exchange, + link_target_classic_queue_deleted, + link_target_quorum_queue_deleted, + target_queues_deleted_accepted, + events, + sync_get_unsettled_classic_queue, + sync_get_unsettled_quorum_queue, + sync_get_unsettled_stream, + sync_get_unsettled_2_classic_queue, + sync_get_unsettled_2_quorum_queue, + sync_get_unsettled_2_stream, + sync_get_settled_classic_queue, + sync_get_settled_quorum_queue, + sync_get_settled_stream, + timed_get_classic_queue, + timed_get_quorum_queue, + timed_get_stream, + stop_classic_queue, + stop_quorum_queue, + stop_stream, + priority_classic_queue, + priority_quorum_queue, + consumer_priority_classic_queue, + consumer_priority_quorum_queue, + single_active_consumer_classic_queue, + single_active_consumer_quorum_queue, + single_active_consumer_priority_quorum_queue, + single_active_consumer_drain_classic_queue, + single_active_consumer_drain_quorum_queue, + detach_requeues_one_session_classic_queue, + detach_requeues_one_session_quorum_queue, + detach_requeues_drop_head_classic_queue, + resource_alarm_before_session_begin, + resource_alarm_after_session_begin, + max_message_size_client_to_server, + max_message_size_server_to_client, + global_counters, + stream_filtering, + available_messages_classic_queue, + available_messages_quorum_queue, + available_messages_stream, + incoming_message_interceptors, + trace, + user_id, + message_ttl, + plugin, + idle_time_out_on_server, + idle_time_out_on_client, + idle_time_out_too_short, + rabbit_status_connection_count, + handshake_timeout, + credential_expires, + attach_to_exclusive_queue, + modified_classic_queue, + modified_quorum_queue, + modified_dead_letter_headers_exchange, + dead_letter_headers_exchange, + dead_letter_reject, + dead_letter_reject_message_order_classic_queue, + dead_letter_reject_message_order_quorum_queue, + dead_letter_reject_many_message_order_classic_queue, + dead_letter_reject_many_message_order_quorum_queue, + accept_multiple_message_order_classic_queue, + accept_multiple_message_order_quorum_queue, + release_multiple_message_order_classic_queue, + release_multiple_message_order_quorum_queue, + immutable_bare_message, + receive_many_made_available_over_time_classic_queue, + receive_many_made_available_over_time_quorum_queue, + receive_many_made_available_over_time_stream, + receive_many_auto_flow_classic_queue, + receive_many_auto_flow_quorum_queue, + receive_many_auto_flow_stream, + incoming_window_closed_transfer_flow_order, + incoming_window_closed_stop_link, + incoming_window_closed_close_link, + incoming_window_closed_rabbitmq_internal_flow_classic_queue, + incoming_window_closed_rabbitmq_internal_flow_quorum_queue, + tcp_back_pressure_rabbitmq_internal_flow_classic_queue, + tcp_back_pressure_rabbitmq_internal_flow_quorum_queue + ]}, + + {cluster_size_3, [shuffle], + [ + dead_letter_into_stream, + last_queue_confirms, + target_queue_deleted, + target_classic_queue_down, + async_notify_settled_classic_queue, + async_notify_settled_quorum_queue, + async_notify_settled_stream, + async_notify_unsettled_classic_queue, + async_notify_unsettled_quorum_queue, + async_notify_unsettled_stream, + link_flow_control, + classic_queue_on_old_node, + classic_queue_on_new_node, + quorum_queue_on_old_node, + quorum_queue_on_new_node, + maintenance, + leader_transfer_quorum_queue_credit_single, + leader_transfer_quorum_queue_credit_batches, + leader_transfer_stream_credit_single, + leader_transfer_stream_credit_batches, + list_connections, + detach_requeues_two_connections_classic_queue, + detach_requeues_two_connections_quorum_queue + ]}, + + {metrics, [shuffle], + [ + auth_attempt_metrics + ]} + ]. + +%% ------------------------------------------------------------------- +%% Testsuite setup/teardown. +%% ------------------------------------------------------------------- + +init_per_suite(Config) -> + {ok, _} = application:ensure_all_started(amqp10_client), + rabbit_ct_helpers:log_environment(), + rabbit_ct_helpers:merge_app_env( + Config, {rabbit, [{quorum_tick_interval, 1000}, + {stream_tick_interval, 1000} + ]}). + +end_per_suite(Config) -> + Config. + +init_per_group(Group, Config) -> + Nodes = case Group of + cluster_size_3 -> 3; + _ -> 1 + end, + Suffix = rabbit_ct_helpers:testcase_absname(Config, "", "-"), + Config1 = rabbit_ct_helpers:set_config( + Config, [{rmq_nodes_count, Nodes}, + {rmq_nodename_suffix, Suffix}]), + rabbit_ct_helpers:run_setup_steps( + Config1, + rabbit_ct_broker_helpers:setup_steps() ++ + rabbit_ct_client_helpers:setup_steps()). + +end_per_group(_, Config) -> + rabbit_ct_helpers:run_teardown_steps(Config, + rabbit_ct_client_helpers:teardown_steps() ++ + rabbit_ct_broker_helpers:teardown_steps()). + +init_per_testcase(T, Config) + when T =:= message_headers_conversion orelse + T =:= roundtrip_with_drain_quorum_queue orelse + T =:= drain_many_quorum_queue orelse + T =:= timed_get_quorum_queue orelse + T =:= available_messages_quorum_queue -> + case rpc(Config, rabbit_feature_flags, is_enabled, ['rabbitmq_4.0.0']) of + true -> + rabbit_ct_helpers:testcase_started(Config, T); + false -> + {skip, "Receiving with drain from quorum queues in credit API v1 have a known " + "bug that they reply with send_drained before delivering the message."} + end; +init_per_testcase(single_active_consumer_drain_quorum_queue = T, Config) -> + case rpc(Config, rabbit_feature_flags, is_enabled, ['rabbitmq_4.0.0']) of + true -> + rabbit_ct_helpers:testcase_started(Config, T); + false -> + {skip, "Draining a SAC inactive quorum queue consumer with credit API v1 " + "is known to be unsupported."} + end; +init_per_testcase(T, Config) + when T =:= incoming_window_closed_close_link orelse + T =:= incoming_window_closed_rabbitmq_internal_flow_classic_queue orelse + T =:= incoming_window_closed_rabbitmq_internal_flow_quorum_queue orelse + T =:= tcp_back_pressure_rabbitmq_internal_flow_classic_queue orelse + T =:= tcp_back_pressure_rabbitmq_internal_flow_quorum_queue -> + %% The new RabbitMQ internal flow control + %% writer proc <- session proc <- queue proc + %% is only available with credit API v2. + case rpc(Config, rabbit_feature_flags, is_enabled, ['rabbitmq_4.0.0']) of + true -> + rabbit_ct_helpers:testcase_started(Config, T); + false -> + {skip, "Feature flag rabbitmq_4.0.0 is disabled"} + end; +init_per_testcase(T, Config) + when T =:= modified_quorum_queue orelse + T =:= modified_dead_letter_headers_exchange -> + case rpc(Config, rabbit_feature_flags, is_enabled, ['rabbitmq_4.0.0']) of + true -> + rabbit_ct_helpers:testcase_started(Config, T); + false -> + {skip, "Feature flag rabbitmq_4.0.0 is disabled, but needed for " + "the new #modify{} command being sent to quorum queues."} + end; +init_per_testcase(T, Config) + when T =:= detach_requeues_one_session_classic_queue orelse + T =:= detach_requeues_drop_head_classic_queue orelse + T =:= detach_requeues_two_connections_classic_queue orelse + T =:= single_active_consumer_classic_queue -> + %% Cancel API v2 reuses feature flag rabbitmq_4.0.0. + %% In 3.13, with cancel API v1, when a receiver detaches with unacked messages, these messages + %% will remain unacked and unacked message state will be left behind in the server session + %% process state. + %% In contrast, cancel API v2 in 4.x will requeue any unacked messages if the receiver detaches. + %% We skip the single active consumer tests because these test cases assume that detaching a + %% receiver link will requeue unacked messages. + case rpc(Config, rabbit_feature_flags, is_enabled, ['rabbitmq_4.0.0']) of + true -> + rabbit_ct_helpers:testcase_started(Config, T); + false -> + {skip, "Cancel API v2 is disabled due to feature flag rabbitmq_4.0.0 being disabled."} + end; +init_per_testcase(T, Config) + when T =:= detach_requeues_one_session_quorum_queue orelse + T =:= single_active_consumer_quorum_queue orelse + T =:= detach_requeues_two_connections_quorum_queue -> + case rabbit_ct_broker_helpers:enable_feature_flag(Config, 'rabbitmq_4.0.0') of + ok -> + rabbit_ct_helpers:testcase_started(Config, T); + {skip, _} -> + {skip, "Feature flag rabbitmq_4.0.0 enables the consumer removal API"} + end; +init_per_testcase(T = immutable_bare_message, Config) -> + case rpc(Config, rabbit_feature_flags, is_enabled, ['rabbitmq_4.0.0']) of + true -> + rabbit_ct_helpers:testcase_started(Config, T); + false -> + {skip, "RabbitMQ is known to wrongfully modify the bare message with feature " + "flag rabbitmq_4.0.0 disabled"} + end; +init_per_testcase(T = dead_letter_into_stream, Config) -> + case rpc(Config, rabbit_feature_flags, is_enabled, [message_containers_deaths_v2]) of + true -> + rabbit_ct_helpers:testcase_started(Config, T); + false -> + {skip, "This test is known to fail with feature flag message_containers_deaths_v2 disabled " + "due to missing feature https://github.com/rabbitmq/rabbitmq-server/issues/11173"} + end; +init_per_testcase(T = dead_letter_reject, Config) -> + case rpc(Config, rabbit_feature_flags, is_enabled, [message_containers_deaths_v2]) of + true -> + rabbit_ct_helpers:testcase_started(Config, T); + false -> + {skip, "This test is known to fail with feature flag message_containers_deaths_v2 disabled " + "due bug https://github.com/rabbitmq/rabbitmq-server/issues/11159"} + end; +init_per_testcase(T, Config) + when T =:= leader_transfer_quorum_queue_credit_single orelse + T =:= leader_transfer_quorum_queue_credit_batches orelse + T =:= leader_transfer_stream_credit_single orelse + T =:= leader_transfer_stream_credit_batches -> + case rpc(Config, rabbit_feature_flags, is_supported, ['rabbitmq_4.0.0']) of + true -> + rabbit_ct_helpers:testcase_started(Config, T); + false -> + {skip, "This test requires the AMQP management extension of RabbitMQ 4.0"} + end; +init_per_testcase(T, Config) + when T =:= classic_queue_on_new_node orelse + T =:= quorum_queue_on_new_node -> + %% If node 1 runs 4.x, this is the new no-op plugin. + %% If node 1 runs 3.x, this is the old real plugin. + ok = rabbit_ct_broker_helpers:enable_plugin(Config, 1, rabbitmq_amqp1_0), + rabbit_ct_helpers:testcase_started(Config, T); +init_per_testcase(Testcase, Config) -> + rabbit_ct_helpers:testcase_started(Config, Testcase). + +end_per_testcase(Testcase, Config) -> + %% Assert that every testcase cleaned up. + eventually(?_assertEqual([], rpc(Config, rabbit_amqqueue, list, []))), + %% Wait for sessions to terminate before starting the next test case. + eventually(?_assertEqual([], rpc(Config, rabbit_amqp_session, list_local, []))), + %% Assert that global counters count correctly. + eventually(?_assertMatch(#{publishers := 0, + consumers := 0}, + get_global_counters(Config))), + rabbit_ct_helpers:testcase_finished(Config, Testcase). + +reliable_send_receive_with_outcomes_classic_queue(Config) -> + reliable_send_receive_with_outcomes(<<"classic">>, Config). + +reliable_send_receive_with_outcomes_quorum_queue(Config) -> + reliable_send_receive_with_outcomes(<<"quorum">>, Config). + +reliable_send_receive_with_outcomes(QType, Config) -> + Outcomes = [ + accepted, + modified, + {modified, true, false, #{<<"fruit">> => <<"banana">>}}, + {modified, false, true, #{}}, + rejected, + released + ], + [ok = reliable_send_receive(QType, Outcome, Config) || Outcome <- Outcomes]. + +reliable_send_receive(QType, Outcome, Config) -> + OutcomeBin = if is_atom(Outcome) -> + atom_to_binary(Outcome); + is_tuple(Outcome) -> + O1 = atom_to_binary(element(1, Outcome)), + O2 = atom_to_binary(element(2, Outcome)), + <> + end, + ct:pal("~s testing ~s", [?FUNCTION_NAME, OutcomeBin]), + + OpnConf = connection_config(Config), + {ok, Connection} = amqp10_client:open_connection(OpnConf), + {ok, Session} = amqp10_client:begin_session_sync(Connection), + {ok, LinkPair} = rabbitmq_amqp_client:attach_management_link_pair_sync(Session, <<"pair">>), + QName = <>, + QProps = #{arguments => #{<<"x-queue-type">> => {utf8, QType}}}, + {ok, _} = rabbitmq_amqp_client:declare_queue(LinkPair, QName, QProps), + ok = rabbitmq_amqp_client:detach_management_link_pair_sync(LinkPair), + + %% reliable send and consume + Address = rabbitmq_amqp_address:queue(QName), + {ok, Sender} = amqp10_client:attach_sender_link( + Session, <<"test-sender">>, Address), + ok = wait_for_credit(Sender), + flush(credited), + DTag1 = <<"dtag-1">>, + %% create an unsettled message, + %% link will be in "mixed" mode by default + Body = <<"body-1">>, + Msg1 = amqp10_msg:new(DTag1, Body, false), + + %% Use the 2 byte AMQP boolean encoding, see AMQP §1.6.2 + True = {boolean, true}, + Msg2 = amqp10_msg:set_headers(#{durable => True}, Msg1), + ok = amqp10_client:send_msg(Sender, Msg2), + ok = wait_for_accepted(DTag1), + + ok = amqp10_client:detach_link(Sender), + ok = end_session_sync(Session), + ok = amqp10_client:close_connection(Connection), + flush("post sender close"), + + {ok, Connection2} = amqp10_client:open_connection(OpnConf), + {ok, Session2} = amqp10_client:begin_session_sync(Connection2), + {ok, Receiver} = amqp10_client:attach_receiver_link( + Session2, <<"test-receiver">>, Address, unsettled), + {ok, Msg} = amqp10_client:get_msg(Receiver), + ?assertEqual([Body], amqp10_msg:body(Msg)), + ?assertEqual(true, amqp10_msg:header(durable, Msg)), + + ok = amqp10_client:settle_msg(Receiver, Msg, Outcome), + flush("post accept"), + + ok = amqp10_client:detach_link(Receiver), + ok = delete_queue(Session2, QName), + ok = end_session_sync(Session2), + ok = amqp10_client:close_connection(Connection2). + +%% We test the modified outcome with classic queues. +%% We expect that classic queues implement field undeliverable-here incorrectly +%% by discarding (if true) or requeueing (if false). +%% Fields delivery-failed and message-annotations are not implemented. +modified_classic_queue(Config) -> + QName = atom_to_binary(?FUNCTION_NAME), + {Connection, Session, LinkPair} = init(Config), + {ok, #{type := <<"classic">>}} = rabbitmq_amqp_client:declare_queue( + LinkPair, QName, + #{arguments => #{<<"x-queue-type">> => {utf8, <<"classic">>}}}), + Address = rabbitmq_amqp_address:queue(QName), + {ok, Sender} = amqp10_client:attach_sender_link(Session, <<"sender">>, Address), + ok = wait_for_credit(Sender), + + Msg1 = amqp10_msg:new(<<"tag1">>, <<"m1">>, true), + Msg2 = amqp10_msg:new(<<"tag2">>, <<"m2">>, true), + ok = amqp10_client:send_msg(Sender, Msg1), + ok = amqp10_client:send_msg(Sender, Msg2), + ok = amqp10_client:detach_link(Sender), + + {ok, Receiver} = amqp10_client:attach_receiver_link(Session, <<"receiver">>, Address, unsettled), + + {ok, M1} = amqp10_client:get_msg(Receiver), + ?assertEqual([<<"m1">>], amqp10_msg:body(M1)), + ok = amqp10_client:settle_msg(Receiver, M1, {modified, false, true, #{}}), + + {ok, M2a} = amqp10_client:get_msg(Receiver), + ?assertEqual([<<"m2">>], amqp10_msg:body(M2a)), + ok = amqp10_client:settle_msg(Receiver, M2a, + {modified, false, false, #{}}), + + {ok, M2b} = amqp10_client:get_msg(Receiver), + ?assertEqual([<<"m2">>], amqp10_msg:body(M2b)), + ok = amqp10_client:settle_msg(Receiver, M2b, + {modified, true, false, #{<<"x-opt-key">> => <<"val">>}}), + + {ok, M2c} = amqp10_client:get_msg(Receiver), + ?assertEqual([<<"m2">>], amqp10_msg:body(M2c)), + ok = amqp10_client:settle_msg(Receiver, M2c, modified), + + ok = amqp10_client:detach_link(Receiver), + ?assertMatch({ok, #{message_count := 1}}, + rabbitmq_amqp_client:delete_queue(LinkPair, QName)), + ok = rabbitmq_amqp_client:detach_management_link_pair_sync(LinkPair), + ok = end_session_sync(Session), + ok = amqp10_client:close_connection(Connection). + +%% We test the modified outcome with quorum queues. +%% We expect that quorum queues implement field +%% * delivery-failed correctly +%% * undeliverable-here incorrectly by discarding (if true) or requeueing (if false) +%% * message-annotations correctly +modified_quorum_queue(Config) -> + QName = atom_to_binary(?FUNCTION_NAME), + {Connection, Session, LinkPair} = init(Config), + {ok, #{type := <<"quorum">>}} = rabbitmq_amqp_client:declare_queue( + LinkPair, QName, + #{arguments => #{<<"x-queue-type">> => {utf8, <<"quorum">>}}}), + Address = rabbitmq_amqp_address:queue(QName), + {ok, Sender} = amqp10_client:attach_sender_link(Session, <<"sender">>, Address), + ok = wait_for_credit(Sender), + + Msg1 = amqp10_msg:new(<<"tag1">>, <<"m1">>, true), + Msg2 = amqp10_msg:new(<<"tag2">>, <<"m2">>, true), + ok = amqp10_client:send_msg(Sender, Msg1), + ok = amqp10_client:send_msg(Sender, Msg2), + ok = amqp10_client:detach_link(Sender), + + {ok, Receiver} = amqp10_client:attach_receiver_link(Session, <<"receiver">>, Address, unsettled), + + {ok, M1} = amqp10_client:get_msg(Receiver), + ?assertEqual([<<"m1">>], amqp10_msg:body(M1)), + ?assertMatch(#{delivery_count := 0, + first_acquirer := true}, + amqp10_msg:headers(M1)), + ok = amqp10_client:settle_msg(Receiver, M1, {modified, false, true, #{}}), + + {ok, M2a} = amqp10_client:get_msg(Receiver), + ?assertEqual([<<"m2">>], amqp10_msg:body(M2a)), + ?assertMatch(#{delivery_count := 0, + first_acquirer := true}, + amqp10_msg:headers(M2a)), + ok = amqp10_client:settle_msg(Receiver, M2a, {modified, false, false, #{}}), + + {ok, M2b} = amqp10_client:get_msg(Receiver), + ?assertEqual([<<"m2">>], amqp10_msg:body(M2b)), + ?assertMatch(#{delivery_count := 0, + first_acquirer := false}, + amqp10_msg:headers(M2b)), + ok = amqp10_client:settle_msg(Receiver, M2b, {modified, true, false, #{}}), + + {ok, M2c} = amqp10_client:get_msg(Receiver), + ?assertEqual([<<"m2">>], amqp10_msg:body(M2c)), + ?assertMatch(#{delivery_count := 1, + first_acquirer := false}, + amqp10_msg:headers(M2c)), + ok = amqp10_client:settle_msg(Receiver, M2c, + {modified, true, false, + #{<<"x-opt-key">> => <<"val 1">>}}), + + {ok, M2d} = amqp10_client:get_msg(Receiver), + ?assertEqual([<<"m2">>], amqp10_msg:body(M2d)), + ?assertMatch(#{delivery_count := 2, + first_acquirer := false}, + amqp10_msg:headers(M2d)), + ?assertMatch(#{<<"x-opt-key">> := <<"val 1">>}, amqp10_msg:message_annotations(M2d)), + ok = amqp10_client:settle_msg(Receiver, M2d, + {modified, false, false, + #{<<"x-opt-key">> => <<"val 2">>, + <<"x-other">> => 99}}), + + {ok, M2e} = amqp10_client:get_msg(Receiver), + ?assertEqual([<<"m2">>], amqp10_msg:body(M2e)), + ?assertMatch(#{delivery_count := 2, + first_acquirer := false}, + amqp10_msg:headers(M2e)), + ?assertMatch(#{<<"x-opt-key">> := <<"val 2">>, + <<"x-other">> := 99}, amqp10_msg:message_annotations(M2e)), + ok = amqp10_client:settle_msg(Receiver, M2e, modified), + + ok = amqp10_client:detach_link(Receiver), + ?assertMatch({ok, #{message_count := 1}}, + rabbitmq_amqp_client:delete_queue(LinkPair, QName)), + ok = rabbitmq_amqp_client:detach_management_link_pair_sync(LinkPair), + ok = end_session_sync(Session), + ok = amqp10_client:close_connection(Connection). + +%% Test that a message can be routed based on the message-annotations +%% provided in the modified outcome. +modified_dead_letter_headers_exchange(Config) -> + {Connection, Session, LinkPair} = init(Config), + SourceQName = <<"source quorum queue">>, + AppleQName = <<"dead letter classic queue receiving apples">>, + BananaQName = <<"dead letter quorum queue receiving bananas">>, + {ok, #{type := <<"quorum">>}} = rabbitmq_amqp_client:declare_queue( + LinkPair, + SourceQName, + #{arguments => #{<<"x-queue-type">> => {utf8, <<"quorum">>}, + <<"x-overflow">> => {utf8, <<"reject-publish">>}, + <<"x-dead-letter-strategy">> => {utf8, <<"at-least-once">>}, + <<"x-dead-letter-exchange">> => {utf8, <<"amq.headers">>}}}), + {ok, #{type := <<"classic">>}} = rabbitmq_amqp_client:declare_queue( + LinkPair, + AppleQName, + #{arguments => #{<<"x-queue-type">> => {utf8, <<"classic">>}}}), + {ok, #{type := <<"quorum">>}} = rabbitmq_amqp_client:declare_queue( + LinkPair, + BananaQName, + #{arguments => #{<<"x-queue-type">> => {utf8, <<"quorum">>}}}), + ok = rabbitmq_amqp_client:bind_queue( + LinkPair, AppleQName, <<"amq.headers">>, <<>>, + #{<<"x-fruit">> => {utf8, <<"apple">>}, + <<"x-match">> => {utf8, <<"any-with-x">>}}), + ok = rabbitmq_amqp_client:bind_queue( + LinkPair, BananaQName, <<"amq.headers">>, <<>>, + #{<<"x-fruit">> => {utf8, <<"banana">>}, + <<"x-match">> => {utf8, <<"any-with-x">>}}), + + {ok, Sender} = amqp10_client:attach_sender_link( + Session, <<"test-sender">>, rabbitmq_amqp_address:queue(SourceQName)), + wait_for_credit(Sender), + {ok, Receiver} = amqp10_client:attach_receiver_link( + Session, <<"receiver">>, rabbitmq_amqp_address:queue(SourceQName), unsettled), + {ok, ReceiverApple} = amqp10_client:attach_receiver_link( + Session, <<"receiver apple">>, rabbitmq_amqp_address:queue(AppleQName), unsettled), + {ok, ReceiverBanana} = amqp10_client:attach_receiver_link( + Session, <<"receiver banana">>, rabbitmq_amqp_address:queue(BananaQName), unsettled), + + ok = amqp10_client:send_msg(Sender, amqp10_msg:new(<<"t1">>, <<"m1">>)), + ok = amqp10_client:send_msg(Sender, amqp10_msg:new(<<"t2">>, <<"m2">>)), + ok = amqp10_client:send_msg(Sender, amqp10_msg:set_message_annotations( + #{"x-fruit" => <<"apple">>}, + amqp10_msg:new(<<"t3">>, <<"m3">>))), + ok = amqp10_client:send_msg(Sender, amqp10_msg:set_message_annotations( + #{"x-fruit" => <<"apple">>}, + amqp10_msg:new(<<"t4">>, <<"m4">>))), + ok = wait_for_accepts(3), + + {ok, Msg1} = amqp10_client:get_msg(Receiver), + ?assertMatch(#{delivery_count := 0, + first_acquirer := true}, + amqp10_msg:headers(Msg1)), + ok = amqp10_client:settle_msg(Receiver, Msg1, {modified, true, true, #{<<"x-fruit">> => <<"banana">>}}), + {ok, MsgBanana1} = amqp10_client:get_msg(ReceiverBanana), + ?assertEqual([<<"m1">>], amqp10_msg:body(MsgBanana1)), + ?assertMatch(#{delivery_count := 1, + first_acquirer := false}, + amqp10_msg:headers(MsgBanana1)), + ok = amqp10_client:accept_msg(ReceiverBanana, MsgBanana1), + + {ok, Msg2} = amqp10_client:get_msg(Receiver), + ok = amqp10_client:settle_msg(Receiver, Msg2, {modified, true, true, #{<<"x-fruit">> => <<"apple">>}}), + {ok, MsgApple1} = amqp10_client:get_msg(ReceiverApple), + ?assertEqual([<<"m2">>], amqp10_msg:body(MsgApple1)), + ?assertMatch(#{delivery_count := 1, + first_acquirer := false}, + amqp10_msg:headers(MsgApple1)), + ok = amqp10_client:accept_msg(ReceiverApple, MsgApple1), + + {ok, Msg3} = amqp10_client:get_msg(Receiver), + ok = amqp10_client:settle_msg(Receiver, Msg3, {modified, false, true, #{}}), + {ok, MsgApple2} = amqp10_client:get_msg(ReceiverApple), + ?assertEqual([<<"m3">>], amqp10_msg:body(MsgApple2)), + ?assertMatch(#{delivery_count := 0, + first_acquirer := false}, + amqp10_msg:headers(MsgApple2)), + ok = amqp10_client:accept_msg(ReceiverApple, MsgApple2), + + {ok, Msg4} = amqp10_client:get_msg(Receiver), + ok = amqp10_client:settle_msg(Receiver, Msg4, {modified, false, true, #{<<"x-fruit">> => <<"banana">>}}), + {ok, MsgBanana2} = amqp10_client:get_msg(ReceiverBanana), + ?assertEqual([<<"m4">>], amqp10_msg:body(MsgBanana2)), + ?assertMatch(#{delivery_count := 0, + first_acquirer := false}, + amqp10_msg:headers(MsgBanana2)), + ok = amqp10_client:accept_msg(ReceiverBanana, MsgBanana2), + + ok = detach_link_sync(Sender), + ok = detach_link_sync(Receiver), + ok = detach_link_sync(ReceiverApple), + ok = detach_link_sync(ReceiverBanana), + {ok, #{message_count := 0}} = rabbitmq_amqp_client:delete_queue(LinkPair, SourceQName), + {ok, #{message_count := 0}} = rabbitmq_amqp_client:delete_queue(LinkPair, AppleQName), + {ok, #{message_count := 0}} = rabbitmq_amqp_client:delete_queue(LinkPair, BananaQName), + ok = rabbitmq_amqp_client:detach_management_link_pair_sync(LinkPair), + ok = end_session_sync(Session), + ok = amqp10_client:close_connection(Connection). + +%% Tests that confirmations are returned correctly +%% when sending many messages async to a quorum queue. +sender_settle_mode_unsettled(Config) -> + QName = atom_to_binary(?FUNCTION_NAME), + {Connection, Session, LinkPair} = init(Config), + QProps = #{arguments => #{<<"x-queue-type">> => {utf8, <<"quorum">>}}}, + {ok, _} = rabbitmq_amqp_client:declare_queue(LinkPair, QName, QProps), + Address = rabbitmq_amqp_address:queue(QName), + {ok, Sender} = amqp10_client:attach_sender_link( + Session, <<"test-sender">>, Address, unsettled), + ok = wait_for_credit(Sender), + + %% Send many messages aync. + NumMsgs = 30, + DTags = [begin + DTag = integer_to_binary(N), + Msg = amqp10_msg:new(DTag, <<"body">>, false), + ok = amqp10_client:send_msg(Sender, Msg), + DTag + end || N <- lists:seq(1, NumMsgs)], + + %% Wait for confirms. + [receive {amqp10_disposition, {accepted, DTag}} -> ok + after 5000 -> ct:fail({missing_accepted, DTag}) + end || DTag <- DTags], + + ok = amqp10_client:detach_link(Sender), + ?assertMatch({ok, #{message_count := NumMsgs}}, + rabbitmq_amqp_client:delete_queue(LinkPair, QName)), + ok = rabbitmq_amqp_client:detach_management_link_pair_sync(LinkPair), + ok = end_session_sync(Session), + ok = amqp10_client:close_connection(Connection). + +sender_settle_mode_unsettled_fanout(Config) -> + {Connection, Session, LinkPair} = init(Config), + QNames = [<<"q1">>, <<"q2">>, <<"q3">>], + [begin + {ok, _} = rabbitmq_amqp_client:declare_queue(LinkPair, QName, #{}), + ok = rabbitmq_amqp_client:bind_queue(LinkPair, QName, <<"amq.fanout">>, <<>>, #{}) + end || QName <- QNames], + + Address = rabbitmq_amqp_address:exchange(<<"amq.fanout">>), + {ok, Sender} = amqp10_client:attach_sender_link(Session, <<"test-sender">>, Address, unsettled), + ok = wait_for_credit(Sender), + + %% Send many messages aync. + NumMsgs = 20, + DTags = [begin + DTag = integer_to_binary(N), + Msg = amqp10_msg:new(DTag, <<"body">>, false), + ok = amqp10_client:send_msg(Sender, Msg), + DTag + end || N <- lists:seq(1, NumMsgs)], + + %% Wait for confirms. + [receive {amqp10_disposition, {accepted, DTag}} -> ok + after 5000 -> ct:fail({missing_accepted, DTag}) + end || DTag <- DTags], + + ok = amqp10_client:detach_link(Sender), + [?assertMatch({ok, #{message_count := NumMsgs}}, + rabbitmq_amqp_client:delete_queue(LinkPair, QName)) + || QName <- QNames], + ok = rabbitmq_amqp_client:detach_management_link_pair_sync(LinkPair), + ok = end_session_sync(Session), + ok = amqp10_client:close_connection(Connection). + +%% Tests that confirmations are returned correctly +%% when sending many messages async to a quorum queue where +%% every 3rd message is settled by the sender. +sender_settle_mode_mixed(Config) -> + {Connection, Session, LinkPair} = init(Config), + QName = atom_to_binary(?FUNCTION_NAME), + QProps = #{arguments => #{<<"x-queue-type">> => {utf8, <<"quorum">>}}}, + {ok, _} = rabbitmq_amqp_client:declare_queue(LinkPair, QName, QProps), + + Address = rabbitmq_amqp_address:queue(QName), + {ok, Sender} = amqp10_client:attach_sender_link( + Session, <<"test-sender">>, Address, mixed), + ok = wait_for_credit(Sender), + + %% Send many messages async. + %% The last message (31) will be sent unsettled. + NumMsgs = 31, + DTags = lists:filtermap( + fun(N) -> + DTag = integer_to_binary(N), + {Settled, Ret} = case N rem 3 of + 0 -> {true, false}; + _ -> {false, {true, DTag}} + end, + Msg = amqp10_msg:new(DTag, <<"body">>, Settled), + ok = amqp10_client:send_msg(Sender, Msg), + Ret + end, lists:seq(1, NumMsgs)), + 21 = length(DTags), + + %% Wait for confirms. + [receive {amqp10_disposition, {accepted, DTag}} -> ok + after 5000 -> ct:fail({missing_accepted, DTag}) + end || DTag <- DTags], + + ok = amqp10_client:detach_link(Sender), + ?assertMatch({ok, #{message_count := NumMsgs}}, + rabbitmq_amqp_client:delete_queue(LinkPair, QName)), + ok = rabbitmq_amqp_client:detach_management_link_pair_sync(LinkPair), + ok = end_session_sync(Session), + ok = amqp10_client:close_connection(Connection). + +quorum_queue_rejects(Config) -> + {Connection, Session, LinkPair} = init(Config), + QName = atom_to_binary(?FUNCTION_NAME), + QProps = #{arguments => #{<<"x-queue-type">> => {utf8, <<"quorum">>}, + <<"x-max-length">> => {ulong, 1}, + <<"x-overflow">> => {utf8, <<"reject-publish">>}}}, + {ok, _} = rabbitmq_amqp_client:declare_queue(LinkPair, QName, QProps), + + Address = rabbitmq_amqp_address:queue(QName), + {ok, Sender} = amqp10_client:attach_sender_link( + Session, <<"test-sender">>, Address, mixed), + ok = wait_for_credit(Sender), + + %% Quorum queue's x-max-length limit is known to be off by 1. + %% Therefore, we expect the first 2 messages to be accepted. + ok = amqp10_client:send_msg(Sender, amqp10_msg:new(<<"tag a">>, <<>>, false)), + ok = amqp10_client:send_msg(Sender, amqp10_msg:new(<<"tag b">>, <<>>, false)), + [receive {amqp10_disposition, {accepted, DTag}} -> ok + after 5000 -> ct:fail({missing_accepted, DTag}) + end || DTag <- [<<"tag a">>, <<"tag b">>]], + + %% From now on the quorum queue should reject our publishes. + %% Send many messages aync. + NumMsgs = 20, + DTags = [begin + DTag = integer_to_binary(N), + Msg = amqp10_msg:new(DTag, <<"body">>, false), + ok = amqp10_client:send_msg(Sender, Msg), + DTag + end || N <- lists:seq(1, NumMsgs)], + %% Since our sender settle mode is mixed, let's also test sending one as settled. + ok = amqp10_client:send_msg(Sender, amqp10_msg:new(<<"tag c">>, <<>>, true)), + %% and the final one as unsettled again + ok = amqp10_client:send_msg(Sender, amqp10_msg:new(<<"tag d">>, <<>>, false)), + + [receive {amqp10_disposition, {rejected, DTag}} -> ok + after 5000 -> ct:fail({missing_rejected, DTag}) + end || DTag <- DTags ++ [<<"tag d">>]], + + ok = amqp10_client:detach_link(Sender), + ?assertMatch({ok, #{message_count := 2}}, + rabbitmq_amqp_client:delete_queue(LinkPair, QName)), + ok = rabbitmq_amqp_client:detach_management_link_pair_sync(LinkPair), + ok = amqp10_client:end_session(Session), + ok = amqp10_client:close_connection(Connection). + +receiver_settle_mode_first(Config) -> + QName = atom_to_binary(?FUNCTION_NAME), + {Connection, Session, LinkPair} = init(Config), + Address = rabbitmq_amqp_address:queue(QName), + {ok, _} = rabbitmq_amqp_client:declare_queue(LinkPair, QName, #{}), + {ok, Sender} = amqp10_client:attach_sender_link( + Session, <<"test-sender">>, Address, settled), + ok = wait_for_credit(Sender), + + %% Send 10 messages. + [begin + DTag = Body = integer_to_binary(N), + Msg = amqp10_msg:new(DTag, Body, true), + ok = amqp10_client:send_msg(Sender, Msg) + end || N <- lists:seq(1, 10)], + ok = amqp10_client:detach_link(Sender), + flush("post sender close"), + + %% Receive the first 9 messages. + {ok, Receiver} = amqp10_client:attach_receiver_link( + Session, <<"test-receiver">>, Address, unsettled), + ok = amqp10_client:flow_link_credit(Receiver, 9, never), + Msgs_1_to_9 = receive_messages(Receiver, 9), + receive {amqp10_event, {link, Receiver, credit_exhausted}} -> ok + after 5000 -> ct:fail("expected credit_exhausted") + end, + assert_messages(QName, 10, 9, Config), + + %% What follows is white box testing: We want to hit a few different branches in the + %% server code. Although this test is a bit artificial, the AMQP spec does not prohibit + %% clients to ack in such ranges. + + %% 1. Ack a range smaller than the number of unacked messages where some delivery IDs + %% are outside the [min, max] range of unacked messages. + {Msgs_1_to_7, [Msg8, Msg9]} = lists:split(7, Msgs_1_to_9), + DeliveryIdMsg8 = amqp10_msg:delivery_id(Msg8), + DeliveryIdMsg9 = amqp10_msg:delivery_id(Msg9), + ?assertEqual(DeliveryIdMsg9, serial_number_increment(DeliveryIdMsg8)), + Last1 = serial_number_increment(serial_number_increment(DeliveryIdMsg9)), + ok = amqp10_client_session:disposition( + Receiver, DeliveryIdMsg8, Last1, true, accepted), + assert_messages(QName, 8, 7, Config), + + %% 2. Ack a range smaller than the number of unacked messages where all delivery IDs + %% are inside the [min, max] range of unacked messages. + [Msg1, Msg2, _Msg3, Msg4, _Msg5, Msg6, Msg7] = Msgs_1_to_7, + DeliveryIdMsg4 = amqp10_msg:delivery_id(Msg4), + DeliveryIdMsg6 = amqp10_msg:delivery_id(Msg6), + ok = amqp10_client_session:disposition( + Receiver, DeliveryIdMsg4, DeliveryIdMsg6, true, accepted), + assert_messages(QName, 5, 4, Config), + + %% 3. Ack a range larger than the number of unacked messages where all delivery IDs + %% are inside the [min, max] range of unacked messages. + DeliveryIdMsg2 = amqp10_msg:delivery_id(Msg2), + DeliveryIdMsg7 = amqp10_msg:delivery_id(Msg7), + ok = amqp10_client_session:disposition( + Receiver, DeliveryIdMsg2, DeliveryIdMsg7, true, accepted), + assert_messages(QName, 2, 1, Config), + + %% Consume the last message. + ok = amqp10_client:flow_link_credit(Receiver, 1, never), + [Msg10] = receive_messages(Receiver, 1), + ?assertEqual([<<"10">>], amqp10_msg:body(Msg10)), + + %% 4. Ack a range larger than the number of unacked messages where some delivery IDs + %% are outside the [min, max] range of unacked messages. + DeliveryIdMsg1 = amqp10_msg:delivery_id(Msg1), + DeliveryIdMsg10 = amqp10_msg:delivery_id(Msg10), + Last2 = serial_number_increment(DeliveryIdMsg10), + ok = amqp10_client_session:disposition( + Receiver, DeliveryIdMsg1, Last2, true, accepted), + assert_messages(QName, 0, 0, Config), + + %% 5. Ack single delivery ID when there are no unacked messages. + ok = amqp10_client_session:disposition( + Receiver, DeliveryIdMsg1, DeliveryIdMsg1, true, accepted), + + %% 6. Ack multiple delivery IDs when there are no unacked messages. + ok = amqp10_client_session:disposition( + Receiver, DeliveryIdMsg1, DeliveryIdMsg6, true, accepted), + assert_messages(QName, 0, 0, Config), + + ok = amqp10_client:detach_link(Receiver), + {ok, _} = rabbitmq_amqp_client:delete_queue(LinkPair, QName), + ok = amqp10_client:end_session(Session), + ok = amqp10_client:close_connection(Connection). + +publishing_to_non_existing_queue_should_settle_with_released(Config) -> + OpnConf = connection_config(Config), + {ok, Connection} = amqp10_client:open_connection(OpnConf), + {ok, Session} = amqp10_client:begin_session_sync(Connection), + QName = <<"queue does not exist">>, + Address = rabbitmq_amqp_address:exchange(<<"amq.direct">>, QName), + {ok, Sender} = amqp10_client:attach_sender_link( + Session, <<"test-sender">>, Address), + ok = wait_for_credit(Sender), + DTag1 = <<"dtag-1">>, + %% create an unsettled message, + %% link will be in "mixed" mode by default + Msg1 = amqp10_msg:new(DTag1, <<"body-1">>, false), + ok = amqp10_client:send_msg(Sender, Msg1), + ok = wait_for_settlement(DTag1, released), + + ok = amqp10_client:detach_link(Sender), + ok = amqp10_client:close_connection(Connection), + ok = flush("post sender close"). + +open_link_to_non_existing_destination_should_end_session(Config) -> + OpnConf = connection_config(Config), + Name = atom_to_binary(?FUNCTION_NAME), + Addresses = [rabbitmq_amqp_address:exchange(Name, <<"bar">>), + rabbitmq_amqp_address:queue(Name)], + SenderLinkName = <<"test-sender">>, + [begin + {ok, Connection} = amqp10_client:open_connection(OpnConf), + {ok, Session} = amqp10_client:begin_session_sync(Connection), + ct:pal("Address ~s", [Address]), + {ok, _} = amqp10_client:attach_sender_link( + Session, SenderLinkName, Address), + wait_for_session_end(Session), + ok = amqp10_client:close_connection(Connection), + flush("post sender close") + end || Address <- Addresses], + ok. + +roundtrip_with_drain_classic_queue(Config) -> + QName = atom_to_binary(?FUNCTION_NAME), + roundtrip_with_drain(Config, <<"classic">>, QName). + +roundtrip_with_drain_quorum_queue(Config) -> + QName = atom_to_binary(?FUNCTION_NAME), + roundtrip_with_drain(Config, <<"quorum">>, QName). + +roundtrip_with_drain_stream(Config) -> + QName = atom_to_binary(?FUNCTION_NAME), + roundtrip_with_drain(Config, <<"stream">>, QName). + +roundtrip_with_drain(Config, QueueType, QName) + when is_binary(QueueType) -> + Address = rabbitmq_amqp_address:queue(QName), + {Connection, Session, LinkPair} = init(Config), + QProps = #{arguments => #{<<"x-queue-type">> => {utf8, QueueType}}}, + {ok, _} = rabbitmq_amqp_client:declare_queue(LinkPair, QName, QProps), + {ok, Sender} = amqp10_client:attach_sender_link( + Session, <<"test-sender">>, Address), + wait_for_credit(Sender), + + OutMsg = amqp10_msg:new(<<"tag-1">>, <<"my-body">>, false), + ok = amqp10_client:send_msg(Sender, OutMsg), + ok = wait_for_accepts(1), + + flush("pre-receive"), + % create a receiver link + TerminusDurability = none, + Filter = consume_from_first(QueueType), + {ok, Receiver} = amqp10_client:attach_receiver_link( + Session, <<"test-receiver">>, Address, unsettled, + TerminusDurability, Filter), + + % grant credit and drain + ok = amqp10_client:flow_link_credit(Receiver, 1, never, true), + + % wait for a delivery + receive {amqp10_msg, Receiver, InMsg} -> + ok = amqp10_client:accept_msg(Receiver, InMsg) + after 2000 -> + Reason = delivery_timeout, + flush(Reason), + ct:fail(Reason) + end, + OutMsg2 = amqp10_msg:new(<<"tag-2">>, <<"my-body2">>, false), + ok = amqp10_client:send_msg(Sender, OutMsg2), + ok = wait_for_accepted(<<"tag-2">>), + + %% no delivery should be made at this point + receive {amqp10_msg, _, _} -> ct:fail(unexpected_delivery) + after 500 -> ok + end, + + flush("final"), + ok = amqp10_client:detach_link(Sender), + {ok, _} = rabbitmq_amqp_client:delete_queue(LinkPair, QName), + ok = rabbitmq_amqp_client:detach_management_link_pair_sync(LinkPair), + ok = amqp10_client:close_connection(Connection). + +drain_many_classic_queue(Config) -> + QName = atom_to_binary(?FUNCTION_NAME), + drain_many(Config, <<"classic">>, QName). + +drain_many_quorum_queue(Config) -> + QName = atom_to_binary(?FUNCTION_NAME), + drain_many(Config, <<"quorum">>, QName). + +drain_many_stream(Config) -> + QName = atom_to_binary(?FUNCTION_NAME), + drain_many(Config, <<"stream">>, QName). + +drain_many(Config, QueueType, QName) + when is_binary(QueueType) -> + Address = rabbitmq_amqp_address:queue(QName), + {Connection, Session, LinkPair} = init(Config), + QProps = #{arguments => #{<<"x-queue-type">> => {utf8, QueueType}}}, + {ok, #{type := QueueType}} = rabbitmq_amqp_client:declare_queue(LinkPair, QName, QProps), + {ok, Sender} = amqp10_client:attach_sender_link(Session, <<"test-sender">>, Address), + wait_for_credit(Sender), + + Num = 5000, + ok = send_messages(Sender, Num, false), + ok = wait_for_accepts(Num), + + TerminusDurability = none, + Filter = consume_from_first(QueueType), + {ok, Receiver} = amqp10_client:attach_receiver_link( + Session, <<"test-receiver">>, Address, settled, + TerminusDurability, Filter), + + ok = amqp10_client:flow_link_credit(Receiver, Num - 1, never, true), + ?assertEqual(Num - 1, count_received_messages(Receiver)), + flush("drained 1"), + + ok = amqp10_client:flow_link_credit(Receiver, Num, never, true), + receive_messages(Receiver, 1), + flush("drained 2"), + + ok = send_messages(Sender, Num, false), + ok = wait_for_accepts(Num), + receive {amqp10_msg, _, _} = Unexpected1 -> ct:fail({unexpected, Unexpected1}) + after 10 -> ok + end, + + %% Let's send a couple of FLOW frames in sequence. + ok = amqp10_client:flow_link_credit(Receiver, 0, never, false), + ok = amqp10_client:flow_link_credit(Receiver, 1, never, false), + ok = amqp10_client:flow_link_credit(Receiver, Num div 2, never, false), + ok = amqp10_client:flow_link_credit(Receiver, Num, never, false), + ok = amqp10_client:flow_link_credit(Receiver, Num, never, true), + %% Eventually, we should receive all messages. + receive_messages(Receiver, Num), + flush("drained 3"), + + ok = send_messages(Sender, 1, false), + ok = wait_for_accepts(1), + %% Our receiver shouldn't have any credit left to consume this message. + receive {amqp10_msg, _, _} = Unexpected2 -> ct:fail({unexpected, Unexpected2}) + after 30 -> ok + end, + + %% Grant a huge amount of credits. + ok = amqp10_client:flow_link_credit(Receiver, 2_000_000_000, never, true), + %% We expect the server to send us the last message and + %% to advance the delivery-count promptly. + receive {amqp10_msg, _, _} -> ok + after 2000 -> ct:fail({missing_delivery, ?LINE}) + end, + receive {amqp10_event, {link, Receiver, credit_exhausted}} -> ok + after 300 -> ct:fail("expected credit_exhausted") + end, + + ok = amqp10_client:detach_link(Sender), + ok = amqp10_client:detach_link(Receiver), + {ok, _} = rabbitmq_amqp_client:delete_queue(LinkPair, QName), + ok = rabbitmq_amqp_client:detach_management_link_pair_sync(LinkPair), + ok = amqp10_client:close_connection(Connection). + +amqp_stream_amqpl(Config) -> + amqp_amqpl(<<"stream">>, Config). + +amqp_quorum_queue_amqpl(Config) -> + amqp_amqpl(<<"quorum">>, Config). + +%% Send messages with different body sections to a queue and consume via AMQP 0.9.1. +amqp_amqpl(QType, Config) -> + {Connection, Session, LinkPair} = init(Config), + QName = atom_to_binary(?FUNCTION_NAME), + QProps = #{arguments => #{<<"x-queue-type">> => {utf8, QType}}}, + {ok, #{type := QType}} = rabbitmq_amqp_client:declare_queue(LinkPair, QName, QProps), + + Address = rabbitmq_amqp_address:queue(QName), + {ok, Sender} = amqp10_client:attach_sender_link(Session, <<"test-sender">>, Address), + wait_for_credit(Sender), + + %% single amqp-value section + Body1 = #'v1_0.amqp_value'{content = {binary, <<0, 255>>}}, + Body2 = #'v1_0.amqp_value'{content = false}, + %% single amqp-sequene section + Body3 = [#'v1_0.amqp_sequence'{content = [{binary, <<0, 255>>}]}], + %% multiple amqp-sequene sections + Body4 = [#'v1_0.amqp_sequence'{content = [{long, -1}]}, + #'v1_0.amqp_sequence'{content = [true, {utf8, <<"🐇"/utf8>>}]}], + %% single data section + Body5 = [#'v1_0.data'{content = <<0, 255>>}], + %% multiple data sections + Body6 = [#'v1_0.data'{content = <<0, 1>>}, + #'v1_0.data'{content = <<2, 3>>}], + + %% Send only body sections + [ok = amqp10_client:send_msg(Sender, amqp10_msg:new(<<>>, Body, true)) || + Body <- [Body1, Body2, Body3, Body4, Body5, Body6]], + %% Send with application-properties + ok = amqp10_client:send_msg( + Sender, + amqp10_msg:set_application_properties( + #{"my int" => -2}, + amqp10_msg:new(<<>>, Body1, true))), + %% Send with properties + CorrelationID = <<"my correlation ID">>, + ok = amqp10_client:send_msg( + Sender, + amqp10_msg:set_properties( + #{correlation_id => CorrelationID}, + amqp10_msg:new(<<>>, Body1, true))), + %% Send with both properties and application-properties + ok = amqp10_client:send_msg( + Sender, + amqp10_msg:set_properties( + #{correlation_id => CorrelationID}, + amqp10_msg:set_application_properties( + #{"my int" => -2}, + amqp10_msg:new(<<>>, Body1, true)))), + %% Send with footer + Footer = #'v1_0.footer'{content = [{{symbol, <<"my footer">>}, {ubyte, 255}}]}, + ok = amqp10_client:send_msg( + Sender, + amqp10_msg:from_amqp_records( + [#'v1_0.transfer'{delivery_tag = {binary, <<>>}, + settled = true, + message_format = {uint, 0}}, + Body1, + Footer])), + + ok = amqp10_client:detach_link(Sender), + flush(detached), + + Ch = rabbit_ct_client_helpers:open_channel(Config), + #'basic.qos_ok'{} = amqp_channel:call(Ch, #'basic.qos'{global = false, + prefetch_count = 100}), + CTag = <<"my-tag">>, + Args = case QType of + <<"stream">> -> [{<<"x-stream-offset">>, longstr, <<"first">>}]; + <<"quorum">> -> [] + end, + #'basic.consume_ok'{} = amqp_channel:subscribe( + Ch, + #'basic.consume'{ + queue = QName, + consumer_tag = CTag, + arguments = Args}, + self()), + + receive {#'basic.deliver'{consumer_tag = CTag, + redelivered = false}, + #amqp_msg{payload = Payload1, + props = #'P_basic'{type = <<"amqp-1.0">>}}} -> + ?assertEqual([Body1], amqp10_framing:decode_bin(Payload1)) + after 5000 -> ct:fail({missing_deliver, ?LINE}) + end, + receive {_, #amqp_msg{payload = Payload2, + props = #'P_basic'{type = <<"amqp-1.0">>}}} -> + ?assertEqual([Body2], amqp10_framing:decode_bin(Payload2)) + after 5000 -> ct:fail({missing_deliver, ?LINE}) + end, + receive {_, #amqp_msg{payload = Payload3, + props = #'P_basic'{type = <<"amqp-1.0">>}}} -> + ?assertEqual(Body3, amqp10_framing:decode_bin(Payload3)) + after 5000 -> ct:fail({missing_deliver, ?LINE}) + end, + receive {_, #amqp_msg{payload = Payload4, + props = #'P_basic'{type = <<"amqp-1.0">>}}} -> + ?assertEqual(Body4, amqp10_framing:decode_bin(Payload4)) + after 5000 -> ct:fail({missing_deliver, ?LINE}) + end, + receive {_, #amqp_msg{payload = Payload5, + props = #'P_basic'{type = undefined}}} -> + ?assertEqual(<<0, 255>>, Payload5) + after 5000 -> ct:fail({missing_deliver, ?LINE}) + end, + receive {_, #amqp_msg{payload = Payload6, + props = #'P_basic'{type = undefined}}} -> + %% We expect that RabbitMQ concatenates the binaries of multiple data sections. + ?assertEqual(<<0, 1, 2, 3>>, Payload6) + after 5000 -> ct:fail({missing_deliver, ?LINE}) + end, + receive {_, #amqp_msg{payload = Payload7, + props = #'P_basic'{headers = Headers7}}} -> + ?assertEqual([Body1], amqp10_framing:decode_bin(Payload7)), + ?assertEqual({signedint, -2}, rabbit_misc:table_lookup(Headers7, <<"my int">>)) + after 5000 -> ct:fail({missing_deliver, ?LINE}) + end, + receive {_, #amqp_msg{payload = Payload8, + props = #'P_basic'{correlation_id = Corr8}}} -> + ?assertEqual([Body1], amqp10_framing:decode_bin(Payload8)), + ?assertEqual(CorrelationID, Corr8) + after 5000 -> ct:fail({missing_deliver, ?LINE}) + end, + receive {_, #amqp_msg{payload = Payload9, + props = #'P_basic'{headers = Headers9, + correlation_id = Corr9}}} -> + ?assertEqual([Body1], amqp10_framing:decode_bin(Payload9)), + ?assertEqual(CorrelationID, Corr9), + ?assertEqual({signedint, -2}, rabbit_misc:table_lookup(Headers9, <<"my int">>)) + after 5000 -> ct:fail({missing_deliver, ?LINE}) + end, + receive {_, #amqp_msg{payload = Payload10}} -> + %% RabbitMQ converts the entire AMQP encoded body including the footer + %% to AMQP legacy payload. + ?assertEqual([Body1, Footer], amqp10_framing:decode_bin(Payload10)) + after 5000 -> ct:fail({missing_deliver, ?LINE}) + end, + + ok = rabbit_ct_client_helpers:close_channel(Ch), + {ok, _} = rabbitmq_amqp_client:delete_queue(LinkPair, QName), + ok = rabbitmq_amqp_client:detach_management_link_pair_sync(LinkPair), + ok = amqp10_client:close_connection(Connection). + +message_headers_conversion(Config) -> + QName = atom_to_binary(?FUNCTION_NAME), + Address = rabbitmq_amqp_address:queue(QName), + %% declare a quorum queue + Ch = rabbit_ct_client_helpers:open_channel(Config), + amqp_channel:call(Ch, #'queue.declare'{ + queue = QName, + durable = true, + arguments = [{<<"x-queue-type">>, longstr, <<"quorum">>}]}), + OpnConf = connection_config(Config), + {ok, Connection} = amqp10_client:open_connection(OpnConf), + {ok, Session} = amqp10_client:begin_session(Connection), + + amqp10_to_amqp091_header_conversion(Session, Ch, QName, Address), + amqp091_to_amqp10_header_conversion(Session, Ch, QName, Address), + + ok = rabbit_ct_client_helpers:close_channel(Ch), + ok = delete_queue(Session, QName), + ok = amqp10_client:close_connection(Connection). + +amqp10_to_amqp091_header_conversion(Session,Ch, QName, Address) -> + {ok, Sender} = create_amqp10_sender(Session, Address), + + OutMsg1 = amqp10_msg:new(<<"my-tag">>, <<"my-body">>, false), + OutMsg2 = amqp10_msg:set_application_properties( + #{"string" => "string-val", + "int" => 2, + "bool" => false}, + OutMsg1), + OutMsg3 = amqp10_msg:set_message_annotations( + #{"x-string" => "string-value", + "x-int" => 3, + "x-bool" => true}, + OutMsg2), + OutMsg = amqp10_msg:set_headers( + #{durable => true, + priority => 7, + ttl => 88000}, + OutMsg3), + ok = amqp10_client:send_msg(Sender, OutMsg), + ok = wait_for_accepts(1), + + {#'basic.get_ok'{}, + #amqp_msg{props = #'P_basic'{headers = Headers, + delivery_mode = DeliveryMode, + priority = Priority, + expiration = Expiration}} + } = amqp_channel:call(Ch, #'basic.get'{queue = QName, no_ack = true}), + + %% assert application properties + ?assertEqual({longstr, <<"string-val">>}, rabbit_misc:table_lookup(Headers, <<"string">>)), + ?assertEqual({unsignedint, 2}, rabbit_misc:table_lookup(Headers, <<"int">>)), + ?assertEqual({bool, false}, rabbit_misc:table_lookup(Headers, <<"bool">>)), + %% assert message annotations + ?assertEqual({longstr, <<"string-value">>}, rabbit_misc:table_lookup(Headers, <<"x-string">>)), + ?assertEqual({unsignedint, 3}, rabbit_misc:table_lookup(Headers, <<"x-int">>)), + ?assertEqual({bool, true}, rabbit_misc:table_lookup(Headers, <<"x-bool">>)), + %% assert headers + ?assertEqual(2, DeliveryMode), + ?assertEqual(7, Priority), + ?assertEqual(<<"88000">>, Expiration). + +amqp091_to_amqp10_header_conversion(Session, Ch, QName, Address) -> + Amqp091Headers = [{<<"x-forwarding">>, array, + [{table, [{<<"uri">>, longstr, + <<"amqp://localhost/%2F/upstream">>}]}]}, + {<<"x-string">>, longstr, "my-string"}, + {<<"x-int">>, long, 92}, + {<<"x-bool">>, bool, true}, + {<<"string">>, longstr, "my-str"}, + {<<"int">>, long, 101}, + {<<"bool">>, bool, false}], + + amqp_channel:cast( + Ch, + #'basic.publish'{routing_key = QName}, + #amqp_msg{props = #'P_basic'{headers = Amqp091Headers}, + payload = <<"foobar">>}), + + {ok, [Msg]} = drain_queue(Session, Address, 1), + Amqp10MA = amqp10_msg:message_annotations(Msg), + ?assertEqual(<<"my-string">>, maps:get(<<"x-string">>, Amqp10MA, undefined)), + ?assertEqual(92, maps:get(<<"x-int">>, Amqp10MA, undefined)), + ?assertEqual(true, maps:get(<<"x-bool">>, Amqp10MA, undefined)), + + Amqp10Props = amqp10_msg:application_properties(Msg), + ?assertEqual(<<"my-str">>, maps:get(<<"string">>, Amqp10Props, undefined)), + ?assertEqual(101, maps:get(<<"int">>, Amqp10Props, undefined)), + ?assertEqual(false, maps:get(<<"bool">>, Amqp10Props, undefined)). + +%% Test sending and receiving concurrently on multiple sessions of the same connection. +multiple_sessions(Config) -> + OpnConf = connection_config(Config), + {ok, Connection} = amqp10_client:open_connection(OpnConf), + %% Create 2 sessions on the same connection. + {ok, Session1} = amqp10_client:begin_session(Connection), + {ok, Session2} = amqp10_client:begin_session(Connection), + + %% Receive on each session. + Q1 = <<"q1">>, + Q2 = <<"q2">>, + Qs = [Q1, Q2], + {ok, Receiver1} = amqp10_client:attach_receiver_link( + Session1, <<"receiver link 1">>, Q1, settled, configuration), + {ok, Receiver2} = amqp10_client:attach_receiver_link( + Session2, <<"receiver link 2">>, Q2, settled, configuration), + receive {amqp10_event, {link, Receiver1, attached}} -> ok + after 5000 -> ct:fail("missing attached") + end, + receive {amqp10_event, {link, Receiver2, attached}} -> ok + after 5000 -> ct:fail("missing attached") + end, + NMsgsPerSender = 20, + NMsgsPerReceiver = NMsgsPerSender * 2, % due to fanout + ok = amqp10_client:flow_link_credit(Receiver1, NMsgsPerReceiver, never), + ok = amqp10_client:flow_link_credit(Receiver2, NMsgsPerReceiver, never), + flush("receiver attached"), + + Ch = rabbit_ct_client_helpers:open_channel(Config, 0), + [#'queue.bind_ok'{} = amqp_channel:call(Ch, #'queue.bind'{queue = QName, + exchange = <<"amq.fanout">>}) + || QName <- Qs], + ok = rabbit_ct_client_helpers:close_channel(Ch), + + %% Send on each session. + TargetAddr = rabbitmq_amqp_address:exchange(<<"amq.fanout">>), + {ok, Sender1} = amqp10_client:attach_sender_link_sync( + Session1, <<"sender link 1">>, TargetAddr, settled, configuration), + ok = wait_for_credit(Sender1), + {ok, Sender2} = amqp10_client:attach_sender_link_sync( + Session2, <<"sender link 2">>, TargetAddr, settled, configuration), + ok = wait_for_credit(Sender2), + + %% Send concurrently. + Group1 = <<"group 1">>, + Group2 = <<"group 2">>, + spawn_link(?MODULE, send_messages_with_group_id, [Sender1, NMsgsPerSender, Group1]), + spawn_link(?MODULE, send_messages_with_group_id, [Sender2, NMsgsPerSender, Group2]), + + Q1Msgs = receive_messages(Receiver1, NMsgsPerReceiver), + Q2Msgs = receive_messages(Receiver2, NMsgsPerReceiver), + ExpectedBodies = [integer_to_binary(I) || I <- lists:seq(1, NMsgsPerSender)], + [begin + {G1Msgs, G2Msgs} = lists:partition( + fun(Msg) -> + #{group_id := GroupId} = amqp10_msg:properties(Msg), + case GroupId of + Group1 -> true; + Group2 -> false + end + end, Msgs), + [begin + Bodies = [begin + [Bin] = amqp10_msg:body(M), + Bin + end || M <- GMsgs], + ?assertEqual(ExpectedBodies, Bodies) + end || GMsgs <- [G1Msgs, G2Msgs]] + end || Msgs <- [Q1Msgs, Q2Msgs]], + + %% Clean up. + [ok = amqp10_client:detach_link(Link) || Link <- [Receiver1, Receiver2, Sender1, Sender2]], + [ok = delete_queue(Session1, Q) || Q <- Qs], + ok = end_session_sync(Session1), + ok = end_session_sync(Session2), + ok = amqp10_client:close_connection(Connection). + +server_closes_link_classic_queue(Config) -> + server_closes_link(<<"classic">>, Config). + +server_closes_link_quorum_queue(Config) -> + server_closes_link(<<"quorum">>, Config). + +server_closes_link_stream(Config) -> + server_closes_link(<<"stream">>, Config). + +server_closes_link(QType, Config) -> + QName = atom_to_binary(?FUNCTION_NAME), + Ch = rabbit_ct_client_helpers:open_channel(Config), + #'queue.declare_ok'{} = amqp_channel:call( + Ch, #'queue.declare'{ + queue = QName, + durable = true, + arguments = [{<<"x-queue-type">>, longstr, QType}]}), + ok = rabbit_ct_client_helpers:close_channel(Ch), + + OpnConf = connection_config(Config), + {ok, Connection} = amqp10_client:open_connection(OpnConf), + {ok, Session} = amqp10_client:begin_session_sync(Connection), + Address = rabbitmq_amqp_address:queue(QName), + + {ok, Receiver} = amqp10_client:attach_receiver_link( + Session, <<"test-receiver">>, Address, unsettled), + receive {amqp10_event, {link, Receiver, attached}} -> ok + after 5000 -> ct:fail("missing ATTACH frame from server") + end, + ok = amqp10_client:flow_link_credit(Receiver, 5, never), + + {ok, Sender} = amqp10_client:attach_sender_link( + Session, <<"test-sender">>, Address), + ok = wait_for_credit(Sender), + flush(credited), + DTag = <<0>>, + Body = <<"body">>, + ok = amqp10_client:send_msg(Sender, amqp10_msg:new(DTag, Body, false)), + ok = wait_for_accepted(DTag), + + receive {amqp10_msg, Receiver, Msg} -> + ?assertEqual([Body], amqp10_msg:body(Msg)) + after 5000 -> ct:fail("missing msg") + end, + + [SessionPid] = rpc(Config, rabbit_amqp_session, list_local, []), + %% Received delivery is unsettled. + eventually(?_assertEqual( + 1, + begin + #{outgoing_unsettled_map := UnsettledMap} = formatted_state(SessionPid), + maps:size(UnsettledMap) + end)), + + %% Server closes the link endpoint due to some AMQP 1.0 external condition: + %% In this test, the external condition is that an AMQP 0.9.1 client deletes the queue. + ok = delete_queue(Session, QName), + + %% We expect that the server closes the link endpoints, + %% i.e. the server sends us DETACH frames. + ExpectedError = #'v1_0.error'{condition = ?V_1_0_AMQP_ERROR_RESOURCE_DELETED}, + receive {amqp10_event, {link, Sender, {detached, ExpectedError}}} -> ok + after 5000 -> ct:fail("server did not close our outgoing link") + end, + + receive {amqp10_event, {link, Receiver, {detached, ExpectedError}}} -> ok + after 5000 -> ct:fail("server did not close our incoming link") + end, + + %% Our client has not and will not settle the delivery since the source queue got deleted and + %% the link detached with an error condition. Nevertheless the server session should clean up its + %% session state by removing the unsettled delivery from its session state. + eventually(?_assertEqual( + 0, + begin + #{outgoing_unsettled_map := UnsettledMap} = formatted_state(SessionPid), + maps:size(UnsettledMap) + end)), + + ok = end_session_sync(Session), + ok = amqp10_client:close_connection(Connection). + +server_closes_link_exchange(Config) -> + XName = atom_to_binary(?FUNCTION_NAME), + QName = <<"my queue">>, + RoutingKey = <<"my routing key">>, + Ch = rabbit_ct_client_helpers:open_channel(Config), + #'exchange.declare_ok'{} = amqp_channel:call(Ch, #'exchange.declare'{exchange = XName}), + #'queue.declare_ok'{} = amqp_channel:call(Ch, #'queue.declare'{queue = QName}), + #'queue.bind_ok'{} = amqp_channel:call(Ch, #'queue.bind'{queue = QName, + exchange = XName, + routing_key = RoutingKey}), + OpnConf = connection_config(Config), + {ok, Connection} = amqp10_client:open_connection(OpnConf), + {ok, Session} = amqp10_client:begin_session_sync(Connection), + Address = rabbitmq_amqp_address:exchange(XName, RoutingKey), + {ok, Sender} = amqp10_client:attach_sender_link( + Session, <<"test-sender">>, Address), + ok = wait_for_credit(Sender), + ?assertMatch(#{publishers := 1}, get_global_counters(Config)), + + DTag1 = <<1>>, + ok = amqp10_client:send_msg(Sender, amqp10_msg:new(DTag1, <<"m1">>, false)), + ok = wait_for_accepted(DTag1), + + %% Server closes the link endpoint due to some AMQP 1.0 external condition: + %% In this test, the external condition is that an AMQP 0.9.1 client deletes the exchange. + #'exchange.delete_ok'{} = amqp_channel:call(Ch, #'exchange.delete'{exchange = XName}), + + %% When we publish the next message, we expect: + %% 1. that the message is released because the exchange doesn't exist anymore, and + DTag2 = <<255>>, + ok = amqp10_client:send_msg(Sender, amqp10_msg:new(DTag2, <<"m2">>, false)), + ok = wait_for_settlement(DTag2, released), + %% 2. that the server closes the link, i.e. sends us a DETACH frame. + receive {amqp10_event, + {link, Sender, + {detached, #'v1_0.error'{condition = ?V_1_0_AMQP_ERROR_NOT_FOUND}}}} -> ok + after 5000 -> ct:fail("server did not close our outgoing link") + end, + ?assertMatch(#{publishers := 0}, get_global_counters(Config)), + + #'queue.delete_ok'{} = amqp_channel:call(Ch, #'queue.delete'{queue = QName}), + ok = rabbit_ct_client_helpers:close_channel(Ch), + ok = end_session_sync(Session), + ok = amqp10_client:close_connection(Connection). + +link_target_classic_queue_deleted(Config) -> + link_target_queue_deleted(<<"classic">>, Config). + +link_target_quorum_queue_deleted(Config) -> + link_target_queue_deleted(<<"quorum">>, Config). + +link_target_queue_deleted(QType, Config) -> + QName = atom_to_binary(?FUNCTION_NAME), + Ch = rabbit_ct_client_helpers:open_channel(Config), + #'queue.declare_ok'{} = amqp_channel:call( + Ch, #'queue.declare'{ + queue = QName, + durable = true, + arguments = [{<<"x-queue-type">>, longstr, QType}]}), + ok = rabbit_ct_client_helpers:close_channel(Ch), + + OpnConf = connection_config(Config), + {ok, Connection} = amqp10_client:open_connection(OpnConf), + {ok, Session} = amqp10_client:begin_session_sync(Connection), + Address = rabbitmq_amqp_address:queue(QName), + + {ok, Sender} = amqp10_client:attach_sender_link( + Session, <<"test-sender">>, Address), + ok = wait_for_credit(Sender), + flush(credited), + DTag1 = <<1>>, + ok = amqp10_client:send_msg(Sender, amqp10_msg:new(DTag1, <<"m1">>, false)), + ok = wait_for_accepted(DTag1), + + %% Mock delivery to the target queue to do nothing. + rabbit_ct_broker_helpers:setup_meck(Config, [?MODULE]), + Mod = rabbit_queue_type, + ok = rpc(Config, meck, new, [Mod, [no_link, passthrough]]), + ok = rpc(Config, meck, expect, [Mod, deliver, fun ?MODULE:rabbit_queue_type_deliver_noop/4]), + + %% Send 2nd message. + DTag2 = <<2>>, + ok = amqp10_client:send_msg(Sender, amqp10_msg:new(DTag2, <<"m2">>, false)), + receive {amqp10_disposition, Unexpected} -> ct:fail({unexpected_disposition, Unexpected}) + after 200 -> ok + end, + + %% Now, the server AMQP session contains a delivery that did not get confirmed by the target queue. + %% If we now delete that target queue, RabbitMQ must not reply to us with ACCEPTED. + %% Instead, we expect RabbitMQ to reply with RELEASED since no queue ever received our 2nd message. + ok = delete_queue(Session, QName), + ok = wait_for_settlement(DTag2, released), + + %% After the 2nd message got released, we additionally expect RabbitMQ to close the link given + %% that the target link endpoint - the queue - got deleted. + ExpectedError = #'v1_0.error'{condition = ?V_1_0_AMQP_ERROR_RESOURCE_DELETED}, + receive {amqp10_event, {link, Sender, {detached, ExpectedError}}} -> ok + after 5000 -> ct:fail("server did not close our outgoing link") + end, + + ?assert(rpc(Config, meck, validate, [Mod])), + ok = rpc(Config, meck, unload, [Mod]), + ok = end_session_sync(Session), + ok = amqp10_client:close_connection(Connection). + +rabbit_queue_type_deliver_noop(_TargetQs, _Msg, _Opts, QTypeState) -> + Actions = [], + {ok, QTypeState, Actions}. + +target_queues_deleted_accepted(Config) -> + Q1 = <<"q1">>, + Q2 = <<"q2">>, + Q3 = <<"q3">>, + QNames = [Q1, Q2, Q3], + Ch = rabbit_ct_client_helpers:open_channel(Config), + [begin + #'queue.declare_ok'{} = amqp_channel:call(Ch, #'queue.declare'{queue = QName}), + #'queue.bind_ok'{} = amqp_channel:call(Ch, #'queue.bind'{queue = QName, + exchange = <<"amq.fanout">>}) + end || QName <- QNames], + + OpnConf = connection_config(Config), + {ok, Connection} = amqp10_client:open_connection(OpnConf), + {ok, Session} = amqp10_client:begin_session_sync(Connection), + Address = rabbitmq_amqp_address:exchange(<<"amq.fanout">>, <<"ignored">>), + {ok, Sender} = amqp10_client:attach_sender_link( + Session, <<"test-sender">>, Address, unsettled), + ok = wait_for_credit(Sender), + flush(credited), + + DTag1 = <<1>>, + ok = amqp10_client:send_msg(Sender, amqp10_msg:new(DTag1, <<"m1">>, false)), + ok = wait_for_accepted(DTag1), + + %% Mock to deliver only to q1. + rabbit_ct_broker_helpers:setup_meck(Config, [?MODULE]), + Mod = rabbit_queue_type, + ok = rpc(Config, meck, new, [Mod, [no_link, passthrough]]), + ok = rpc(Config, meck, expect, [Mod, deliver, fun ?MODULE:rabbit_queue_type_deliver_to_q1/4]), + + %% Send 2nd message. + DTag2 = <<2>>, + ok = amqp10_client:send_msg(Sender, amqp10_msg:new(DTag2, <<"m2">>, false)), + receive {amqp10_disposition, Disp1} -> ct:fail({unexpected_disposition, Disp1}) + after 200 -> ok + end, + + %% Now, the server AMQP session contains a delivery that got confirmed by only q1. + %% If we delete q2, we should still receive no DISPOSITION since q3 hasn't confirmed. + ?assertEqual(#'queue.delete_ok'{message_count = 1}, + amqp_channel:call(Ch, #'queue.delete'{queue = Q2})), + receive {amqp10_disposition, Disp2} -> ct:fail({unexpected_disposition, Disp2}) + after 100 -> ok + end, + %% If we delete q3, RabbitMQ should reply with ACCEPTED since at least one target queue (q1) confirmed. + ?assertEqual(#'queue.delete_ok'{message_count = 1}, + amqp_channel:call(Ch, #'queue.delete'{queue = Q3})), + receive {amqp10_disposition, {accepted, DTag2}} -> ok + after 5000 -> ct:fail(accepted_timeout) + end, + + ?assertEqual(#'queue.delete_ok'{message_count = 2}, + amqp_channel:call(Ch, #'queue.delete'{queue = Q1})), + ok = rabbit_ct_client_helpers:close_channel(Ch), + ?assert(rpc(Config, meck, validate, [Mod])), + ok = rpc(Config, meck, unload, [Mod]), + ok = end_session_sync(Session), + ok = amqp10_client:close_connection(Connection). + +rabbit_queue_type_deliver_to_q1(Qs, Msg, Opts, QTypeState) -> + %% Drop q2 and q3. + 3 = length(Qs), + Q1 = lists:filter(fun({Q, _RouteInos}) -> + amqqueue:get_name(Q) =:= rabbit_misc:r(<<"/">>, queue, <<"q1">>) + end, Qs), + 1 = length(Q1), + meck:passthrough([Q1, Msg, Opts, QTypeState]). + +events(Config) -> + ok = event_recorder:start(Config), + + OpnConf0 = connection_config(Config), + OpnConf = OpnConf0#{properties => #{<<"ignore-maintenance">> => {boolean, true}}}, + {ok, Connection} = amqp10_client:open_connection(OpnConf), + receive {amqp10_event, {connection, Connection, opened}} -> ok + after 5000 -> ct:fail(opened_timeout) + end, + ok = close_connection_sync(Connection), + + Events = event_recorder:get_events(Config), + ok = event_recorder:stop(Config), + ct:pal("Recorded events: ~p", [Events]), + + Protocol = {protocol, {1, 0}}, + AuthProps = [{name, <<"guest">>}, + {auth_mechanism, <<"PLAIN">>}, + {ssl, false}, + Protocol], + ?assertMatch( + {value, _}, + find_event(user_authentication_success, AuthProps, Events)), + + Node = get_node_config(Config, 0, nodename), + ConnectionCreatedProps = [Protocol, + {node, Node}, + {vhost, <<"/">>}, + {user, <<"guest">>}, + {type, network}], + {value, ConnectionCreatedEvent} = find_event( + connection_created, + ConnectionCreatedProps, Events), + Props = ConnectionCreatedEvent#event.props, + Name = proplists:lookup(name, Props), + Pid = proplists:lookup(pid, Props), + ClientProperties = {client_properties, List} = proplists:lookup(client_properties, Props), + ?assert(lists:member( + {<<"product">>, longstr, <<"AMQP 1.0 client">>}, + List)), + ?assert(lists:member( + {<<"ignore-maintenance">>, bool, true}, + List)), + + ConnectionClosedProps = [{node, Node}, + Name, + Pid, + ClientProperties], + ?assertMatch( + {value, _}, + find_event(connection_closed, ConnectionClosedProps, Events)), + ok. + +sync_get_unsettled_classic_queue(Config) -> + sync_get_unsettled(<<"classic">>, Config). + +sync_get_unsettled_quorum_queue(Config) -> + sync_get_unsettled(<<"quorum">>, Config). + +sync_get_unsettled_stream(Config) -> + sync_get_unsettled(<<"stream">>, Config). + +%% Test synchronous get, figure 2.43 with sender settle mode unsettled. +sync_get_unsettled(QType, Config) -> + SenderSettleMode = unsettled, + QName = atom_to_binary(?FUNCTION_NAME), + Ch = rabbit_ct_client_helpers:open_channel(Config), + #'queue.declare_ok'{} = amqp_channel:call( + Ch, #'queue.declare'{ + queue = QName, + durable = true, + arguments = [{<<"x-queue-type">>, longstr, QType}]}), + + %% Attach 1 sender and 1 receiver to the queue. + OpnConf = connection_config(Config), + {ok, Connection} = amqp10_client:open_connection(OpnConf), + {ok, Session} = amqp10_client:begin_session_sync(Connection), + Address = rabbitmq_amqp_address:queue(QName), + {ok, Sender} = amqp10_client:attach_sender_link( + Session, <<"test-sender">>, Address), + ok = wait_for_credit(Sender), + {ok, Receiver} = amqp10_client:attach_receiver_link( + Session, <<"test-receiver">>, Address, SenderSettleMode), + receive {amqp10_event, {link, Receiver, attached}} -> ok + after 5000 -> ct:fail("missing attached") + end, + flush(receiver_attached), + + %% Grant 1 credit to the sending queue. + ok = amqp10_client:flow_link_credit(Receiver, 1, never), + + %% Since the queue has no messages yet, we shouldn't receive any message. + receive {amqp10_msg, _, _} = Unexp1 -> ct:fail("received unexpected message ~p", [Unexp1]) + after 10 -> ok + end, + + %% Let's send 4 messages to the queue. + ok = amqp10_client:send_msg(Sender, amqp10_msg:new(<<"tag1">>, <<"m1">>, true)), + ok = amqp10_client:send_msg(Sender, amqp10_msg:new(<<"tag2">>, <<"m2">>, true)), + ok = amqp10_client:send_msg(Sender, amqp10_msg:new(<<"tag3">>, <<"m3">>, true)), + ok = amqp10_client:send_msg(Sender, amqp10_msg:new(<<"tag4">>, <<"m4">>, true)), + + %% Since we previously granted only 1 credit, we should get only the 1st message. + M1 = receive {amqp10_msg, Receiver, Msg1} -> + ?assertEqual([<<"m1">>], amqp10_msg:body(Msg1)), + Msg1 + after 5000 -> ct:fail("missing m1") + end, + receive {amqp10_event, {link, Receiver, credit_exhausted}} -> ok + after 5000 -> ct:fail("expected credit_exhausted") + end, + receive {amqp10_msg, _, _} = Unexp2 -> ct:fail("received unexpected message ~p", [Unexp2]) + after 10 -> ok + end, + + %% Synchronously get the 2nd message. + ok = amqp10_client:flow_link_credit(Receiver, 1, never), + M2 = receive {amqp10_msg, Receiver, Msg2} -> + ?assertEqual([<<"m2">>], amqp10_msg:body(Msg2)), + Msg2 + after 5000 -> ct:fail("missing m2") + end, + receive {amqp10_event, {link, Receiver, credit_exhausted}} -> ok + after 5000 -> ct:fail("expected credit_exhausted") + end, + receive {amqp10_msg, _, _} = Unexp3 -> ct:fail("received unexpected message ~p", [Unexp3]) + after 10 -> ok + end, + + %% Accept the first 2 messages. + ok = amqp10_client:accept_msg(Receiver, M1), + ok = amqp10_client:accept_msg(Receiver, M2), + %% Settlements should not top up credit. We are still out of credits. + receive {amqp10_msg, _, _} = Unexp4 -> ct:fail("received unexpected message ~p", [Unexp4]) + after 10 -> ok + end, + + %% Synchronously get the 3rd message. + ok = amqp10_client:flow_link_credit(Receiver, 1, never), + receive {amqp10_msg, Receiver, Msg3} -> + ?assertEqual([<<"m3">>], amqp10_msg:body(Msg3)) + after 5000 -> ct:fail("missing m3") + end, + receive {amqp10_event, {link, Receiver, credit_exhausted}} -> ok + after 5000 -> ct:fail("expected credit_exhausted") + end, + receive {amqp10_msg, _, _} = Unexp5 -> ct:fail("received unexpected message ~p", [Unexp5]) + after 10 -> ok + end, + + ok = amqp10_client:detach_link(Sender), + ok = amqp10_client:detach_link(Receiver), + ok = end_session_sync(Session), + ok = amqp10_client:close_connection(Connection), + #'queue.delete_ok'{} = amqp_channel:call(Ch, #'queue.delete'{queue = QName}), + ok = rabbit_ct_client_helpers:close_channel(Ch). + +sync_get_unsettled_2_classic_queue(Config) -> + sync_get_unsettled_2(<<"classic">>, Config). + +sync_get_unsettled_2_quorum_queue(Config) -> + sync_get_unsettled_2(<<"quorum">>, Config). + +sync_get_unsettled_2_stream(Config) -> + sync_get_unsettled_2(<<"stream">>, Config). + +%% Synchronously get 2 messages from queue. +sync_get_unsettled_2(QType, Config) -> + SenderSettleMode = unsettled, + QName = atom_to_binary(?FUNCTION_NAME), + Ch = rabbit_ct_client_helpers:open_channel(Config), + #'queue.declare_ok'{} = amqp_channel:call( + Ch, #'queue.declare'{ + queue = QName, + durable = true, + arguments = [{<<"x-queue-type">>, longstr, QType}]}), + + %% Attach a sender and a receiver to the queue. + OpnConf = connection_config(Config), + {ok, Connection} = amqp10_client:open_connection(OpnConf), + {ok, Session} = amqp10_client:begin_session_sync(Connection), + Address = rabbitmq_amqp_address:queue(QName), + {ok, Sender} = amqp10_client:attach_sender_link( + Session, <<"test-sender">>, Address), + ok = wait_for_credit(Sender), + {ok, Receiver} = amqp10_client:attach_receiver_link( + Session, + <<"test-receiver">>, + Address, + SenderSettleMode), + receive {amqp10_event, {link, Receiver, attached}} -> ok + after 5000 -> ct:fail("missing attached") + end, + flush(receiver_attached), + + %% Grant 2 credits to the sending queue. + ok = amqp10_client:flow_link_credit(Receiver, 2, never), + + %% Let's send 5 messages to the queue. + [ok = amqp10_client:send_msg(Sender, amqp10_msg:new(Bin, Bin, true)) || + Bin <- [<<"m1">>, <<"m2">>, <<"m3">>, <<"m4">>, <<"m5">>]], + + %% We should receive exactly 2 messages. + receive {amqp10_msg, Receiver, Msg1} -> ?assertEqual([<<"m1">>], amqp10_msg:body(Msg1)) + after 5000 -> ct:fail("missing m1") + end, + receive {amqp10_msg, Receiver, Msg2} -> ?assertEqual([<<"m2">>], amqp10_msg:body(Msg2)) + after 5000 -> ct:fail("missing m2") + end, + receive {amqp10_event, {link, Receiver, credit_exhausted}} -> ok + after 5000 -> ct:fail("expected credit_exhausted") + end, + receive {amqp10_msg, _, _} = Unexp1 -> ct:fail("received unexpected message ~p", [Unexp1]) + after 50 -> ok + end, + + %% Grant 2 more credits to the sending queue. + ok = amqp10_client:flow_link_credit(Receiver, 2, never), + %% Again, we should receive exactly 2 messages. + receive {amqp10_msg, Receiver, Msg3} -> ?assertEqual([<<"m3">>], amqp10_msg:body(Msg3)) + after 5000 -> ct:fail("missing m3") + end, + receive {amqp10_msg, Receiver, Msg4} -> ?assertEqual([<<"m4">>], amqp10_msg:body(Msg4)) + after 5000 -> ct:fail("missing m4") + end, + receive {amqp10_event, {link, Receiver, credit_exhausted}} -> ok + after 5000 -> ct:fail("expected credit_exhausted") + end, + receive {amqp10_msg, _, _} = Unexp2 -> ct:fail("received unexpected message ~p", [Unexp2]) + after 50 -> ok + end, + + %% Grant 2 more credits to the sending queue. + ok = amqp10_client:flow_link_credit(Receiver, 2, never), + + %% We should receive the last (5th) message. + receive {amqp10_msg, Receiver, Msg5} -> ?assertEqual([<<"m5">>], amqp10_msg:body(Msg5)) + after 5000 -> ct:fail("missing m5") + end, + + ok = amqp10_client:detach_link(Sender), + ok = amqp10_client:detach_link(Receiver), + ok = end_session_sync(Session), + ok = amqp10_client:close_connection(Connection), + #'queue.delete_ok'{} = amqp_channel:call(Ch, #'queue.delete'{queue = QName}), + ok = rabbit_ct_client_helpers:close_channel(Ch). + +sync_get_settled_classic_queue(Config) -> + sync_get_settled(<<"classic">>, Config). + +sync_get_settled_quorum_queue(Config) -> + sync_get_settled(<<"quorum">>, Config). + +sync_get_settled_stream(Config) -> + sync_get_settled(<<"stream">>, Config). + +%% Test synchronous get, figure 2.43 with sender settle mode settled. +sync_get_settled(QType, Config) -> + SenderSettleMode = settled, + QName = atom_to_binary(?FUNCTION_NAME), + Ch = rabbit_ct_client_helpers:open_channel(Config), + #'queue.declare_ok'{} = amqp_channel:call( + Ch, #'queue.declare'{ + queue = QName, + durable = true, + arguments = [{<<"x-queue-type">>, longstr, QType}]}), + + %% Attach 1 sender and 1 receivers to the queue. + OpnConf = connection_config(Config), + {ok, Connection} = amqp10_client:open_connection(OpnConf), + {ok, Session} = amqp10_client:begin_session_sync(Connection), + Address = rabbitmq_amqp_address:queue(QName), + {ok, Sender} = amqp10_client:attach_sender_link( + Session, <<"test-sender">>, Address), + ok = wait_for_credit(Sender), + {ok, Receiver} = amqp10_client:attach_receiver_link( + Session, <<"my receiver">>, Address, SenderSettleMode), + receive {amqp10_event, {link, Receiver, attached}} -> ok + after 5000 -> ct:fail("missing attached") + end, + flush(receiver_attached), + + %% Grant 1 credit to the sending queue. + ok = amqp10_client:flow_link_credit(Receiver, 1, never), + + %% Since the queue has no messages yet, we shouldn't receive any message. + receive {amqp10_msg, _, _} = Unexp1 -> ct:fail("received unexpected message ~p", [Unexp1]) + after 10 -> ok + end, + + %% Let's send 3 messages to the queue. + ok = amqp10_client:send_msg(Sender, amqp10_msg:new(<<"tag1">>, <<"m1">>, true)), + ok = amqp10_client:send_msg(Sender, amqp10_msg:new(<<"tag2">>, <<"m2">>, true)), + ok = amqp10_client:send_msg(Sender, amqp10_msg:new(<<"tag3">>, <<"m3">>, true)), + + %% Since we previously granted only 1 credit, we should get only the 1st message. + receive {amqp10_msg, Receiver, Msg1} -> + ?assertEqual([<<"m1">>], amqp10_msg:body(Msg1)) + after 5000 -> ct:fail("missing m1") + end, + receive {amqp10_event, {link, Receiver, credit_exhausted}} -> ok + after 5000 -> ct:fail("expected credit_exhausted") + end, + receive {amqp10_msg, _, _} = Unexp2 -> ct:fail("received unexpected message ~p", [Unexp2]) + after 10 -> ok + end, + + %% Synchronously get the 2nd message. + ok = amqp10_client:flow_link_credit(Receiver, 1, never), + receive {amqp10_msg, Receiver, Msg2} -> + ?assertEqual([<<"m2">>], amqp10_msg:body(Msg2)) + after 5000 -> ct:fail("missing m2") + end, + receive {amqp10_event, {link, Receiver, credit_exhausted}} -> ok + after 5000 -> ct:fail("expected credit_exhausted") + end, + receive {amqp10_msg, _, _} = Unexp3 -> ct:fail("received unexpected message ~p", [Unexp3]) + after 10 -> ok + end, + + ok = amqp10_client:detach_link(Sender), + ok = amqp10_client:detach_link(Receiver), + ok = end_session_sync(Session), + ok = amqp10_client:close_connection(Connection), + #'queue.delete_ok'{} = amqp_channel:call(Ch, #'queue.delete'{queue = QName}), + ok = rabbit_ct_client_helpers:close_channel(Ch). + +timed_get_classic_queue(Config) -> + timed_get(<<"classic">>, Config). + +timed_get_quorum_queue(Config) -> + timed_get(<<"quorum">>, Config). + +timed_get_stream(Config) -> + timed_get(<<"stream">>, Config). + +%% Synchronous get with a timeout, figure 2.44. +timed_get(QType, Config) -> + QName = atom_to_binary(?FUNCTION_NAME), + Ch = rabbit_ct_client_helpers:open_channel(Config), + #'queue.declare_ok'{} = amqp_channel:call( + Ch, #'queue.declare'{ + queue = QName, + durable = true, + arguments = [{<<"x-queue-type">>, longstr, QType}]}), + + %% Attach a sender and a receiver to the queue. + OpnConf = connection_config(Config), + {ok, Connection} = amqp10_client:open_connection(OpnConf), + {ok, Session} = amqp10_client:begin_session_sync(Connection), + Address = rabbitmq_amqp_address:queue(QName), + {ok, Sender} = amqp10_client:attach_sender_link( + Session, <<"test-sender">>, Address), + ok = wait_for_credit(Sender), + {ok, Receiver} = amqp10_client:attach_receiver_link( + Session, + <<"test-receiver">>, + Address, + unsettled), + receive {amqp10_event, {link, Receiver, attached}} -> ok + after 5000 -> ct:fail("missing attached") + end, + flush(receiver_attached), + + ok = amqp10_client:flow_link_credit(Receiver, 1, never, false), + + Timeout = 10, + receive Unexpected0 -> ct:fail("received unexpected ~p", [Unexpected0]) + after Timeout -> ok + end, + + ok = amqp10_client:flow_link_credit(Receiver, 1, never, true), + receive {amqp10_event, {link, Receiver, credit_exhausted}} -> ok + after 5000 -> ct:fail("expected credit_exhausted") + end, + + ok = amqp10_client:send_msg(Sender, amqp10_msg:new(<<"my tag">>, <<"my msg">>, true)), + + %% Since our consumer didn't grant any new credit, we shouldn't receive the message we + %% just sent. + receive Unexpected1 -> ct:fail("received unexpected ~p", [Unexpected1]) + after 50 -> ok + end, + + ok = amqp10_client:flow_link_credit(Receiver, 1, never, true), + receive {amqp10_msg, Receiver, Msg1} -> ?assertEqual([<<"my msg">>], amqp10_msg:body(Msg1)) + after 5000 -> ct:fail("missing 'my msg'") + end, + receive {amqp10_event, {link, Receiver, credit_exhausted}} -> ok + after 5000 -> ct:fail("expected credit_exhausted") + end, + + ok = amqp10_client:detach_link(Receiver), + ok = amqp10_client:detach_link(Sender), + ok = end_session_sync(Session), + ok = amqp10_client:close_connection(Connection), + #'queue.delete_ok'{} = amqp_channel:call(Ch, #'queue.delete'{queue = QName}), + ok = rabbit_ct_client_helpers:close_channel(Ch). + +stop_classic_queue(Config) -> + stop(<<"classic">>, Config). + +stop_quorum_queue(Config) -> + stop(<<"quorum">>, Config). + +stop_stream(Config) -> + stop(<<"stream">>, Config). + +%% Test stopping a link, figure 2.46. +stop(QType, Config) -> + Ch = rabbit_ct_client_helpers:open_channel(Config), + QName = atom_to_binary(?FUNCTION_NAME), + #'queue.declare_ok'{} = amqp_channel:call( + Ch, #'queue.declare'{ + queue = QName, + durable = true, + arguments = [{<<"x-queue-type">>, longstr, QType}]}), + %% Attach 1 sender and 1 receiver to the queue. + OpnConf0 = connection_config(Config), + NumSent = 300, + %% Allow in flight messages to be received after stopping the link. + OpnConf = OpnConf0#{transfer_limit_margin => -NumSent}, + {ok, Connection} = amqp10_client:open_connection(OpnConf), + {ok, Session} = amqp10_client:begin_session_sync(Connection), + Address = rabbitmq_amqp_address:queue(QName), + {ok, Sender} = amqp10_client:attach_sender_link( + Session, <<"test-sender">>, Address), + ok = wait_for_credit(Sender), + {ok, Receiver} = amqp10_client:attach_receiver_link( + Session, <<"test-receiver">>, Address, settled), + receive {amqp10_event, {link, Receiver, attached}} -> ok + after 5000 -> ct:fail("missing attached") + end, + flush(receiver_attached), + + ok = amqp10_client:flow_link_credit(Receiver, 10, 5), + ok = send_messages(Sender, NumSent, true), + + %% Let's await the first 20 messages. + NumReceived = 20, + Msgs = receive_messages(Receiver, NumReceived), + + %% Stop the link. + %% "Stopping the transfers on a given link is accomplished by updating + %% the link-credit to be zero and sending the updated flow state." [2.6.10] + ok = amqp10_client:stop_receiver_link(Receiver), + %% "It is possible that some transfers could be in flight at the time the flow + %% state is sent, so incoming transfers could still arrive on the link." [2.6.10] + NumInFlight = count_received_messages(Receiver), + + ct:pal("After receiving the first ~b messages and stopping the link, " + "we received ~b more in flight messages", [NumReceived, NumInFlight]), + ?assert(NumInFlight > 0, + "expected some in flight messages, but there were actually none"), + ?assert(NumInFlight < NumSent - NumReceived, + "expected the link to stop, but actually received all messages"), + + %% Check that contents of the first 20 messages are correct. + FirstMsg = hd(Msgs), + LastMsg = lists:last(Msgs), + ?assertEqual([integer_to_binary(NumSent)], amqp10_msg:body(FirstMsg)), + ?assertEqual([integer_to_binary(NumSent - NumReceived + 1)], amqp10_msg:body(LastMsg)), + + %% Let's resume the link. + ok = amqp10_client:flow_link_credit(Receiver, 50, 40), + + %% We expect to receive all remaining messages. + NumRemaining = NumSent - NumReceived - NumInFlight, + ct:pal("Waiting for the remaining ~b messages", [NumRemaining]), + Msgs1 = receive_messages(Receiver, NumRemaining), + ?assertEqual([<<"1">>], amqp10_msg:body(lists:last(Msgs1))), + + ok = amqp10_client:detach_link(Receiver), + ok = end_session_sync(Session), + ok = amqp10_client:close_connection(Connection), + #'queue.delete_ok'{} = amqp_channel:call(Ch, #'queue.delete'{queue = QName}), + ok = rabbit_ct_client_helpers:close_channel(Ch). + +consumer_priority_classic_queue(Config) -> + consumer_priority(<<"classic">>, Config). + +consumer_priority_quorum_queue(Config) -> + consumer_priority(<<"quorum">>, Config). + +consumer_priority(QType, Config) -> + QName = atom_to_binary(?FUNCTION_NAME), + {Connection, Session, LinkPair} = init(Config), + QProps = #{arguments => #{<<"x-queue-type">> => {utf8, QType}}}, + {ok, #{type := QType}} = rabbitmq_amqp_client:declare_queue(LinkPair, QName, QProps), + + Address = rabbitmq_amqp_address:queue(QName), + {ok, Sender} = amqp10_client:attach_sender_link(Session, <<"sender">>, Address), + ok = wait_for_credit(Sender), + + %% We test what our RabbitMQ docs state: + %% "Consumers which do not specify a value have priority 0. + %% Larger numbers indicate higher priority, and both positive and negative numbers can be used." + {ok, ReceiverDefaultPrio} = amqp10_client:attach_receiver_link( + Session, + <<"default prio consumer">>, + Address, + unsettled), + {ok, ReceiverHighPrio} = amqp10_client:attach_receiver_link( + Session, + <<"high prio consumer">>, + Address, + unsettled, + none, + #{}, + #{<<"rabbitmq:priority">> => {int, 2_000_000_000}}), + {ok, ReceiverLowPrio} = amqp10_client:attach_receiver_link( + Session, + <<"low prio consumer">>, + Address, + unsettled, + none, + #{}, + #{<<"rabbitmq:priority">> => {int, -2_000_000_000}}), + ok = amqp10_client:flow_link_credit(ReceiverDefaultPrio, 1, never), + ok = amqp10_client:flow_link_credit(ReceiverHighPrio, 2, never), + ok = amqp10_client:flow_link_credit(ReceiverLowPrio, 1, never), + + NumMsgs = 5, + [begin + Bin = integer_to_binary(N), + ok = amqp10_client:send_msg(Sender, amqp10_msg:new(Bin, Bin)) + end || N <- lists:seq(1, NumMsgs)], + ok = wait_for_accepts(NumMsgs), + + receive {amqp10_msg, Rec1, Msg1} -> + ?assertEqual(<<"1">>, amqp10_msg:body_bin(Msg1)), + ?assertEqual(ReceiverHighPrio, Rec1), + ok = amqp10_client:accept_msg(Rec1, Msg1) + after 5000 -> ct:fail({missing_msg, ?LINE}) + end, + receive {amqp10_msg, Rec2, Msg2} -> + ?assertEqual(<<"2">>, amqp10_msg:body_bin(Msg2)), + ?assertEqual(ReceiverHighPrio, Rec2), + ok = amqp10_client:accept_msg(Rec2, Msg2) + after 5000 -> ct:fail({missing_msg, ?LINE}) + end, + receive {amqp10_msg, Rec3, Msg3} -> + ?assertEqual(<<"3">>, amqp10_msg:body_bin(Msg3)), + ?assertEqual(ReceiverDefaultPrio, Rec3), + ok = amqp10_client:accept_msg(Rec3, Msg3) + after 5000 -> ct:fail({missing_msg, ?LINE}) + end, + receive {amqp10_msg, Rec4, Msg4} -> + ?assertEqual(<<"4">>, amqp10_msg:body_bin(Msg4)), + ?assertEqual(ReceiverLowPrio, Rec4), + ok = amqp10_client:accept_msg(Rec4, Msg4) + after 5000 -> ct:fail({missing_msg, ?LINE}) + end, + receive {amqp10_msg, _, _} = Unexpected -> + ct:fail({unexpected_msg, Unexpected, ?LINE}) + after 5 -> ok + end, + + ok = amqp10_client:detach_link(Sender), + ok = amqp10_client:detach_link(ReceiverDefaultPrio), + ok = amqp10_client:detach_link(ReceiverHighPrio), + ok = amqp10_client:detach_link(ReceiverLowPrio), + {ok, #{message_count := 1}} = rabbitmq_amqp_client:delete_queue(LinkPair, QName), + ok = rabbitmq_amqp_client:detach_management_link_pair_sync(LinkPair), + ok = end_session_sync(Session), + ok = amqp10_client:close_connection(Connection). + +single_active_consumer_priority_quorum_queue(Config) -> + QType = <<"quorum">>, + QName = atom_to_binary(?FUNCTION_NAME), + {Connection, Session1, LinkPair} = init(Config), + QProps = #{arguments => #{<<"x-queue-type">> => {utf8, QType}, + <<"x-single-active-consumer">> => true}}, + {ok, #{type := QType}} = rabbitmq_amqp_client:declare_queue(LinkPair, QName, QProps), + + %% Send 6 messages. + Address = rabbitmq_amqp_address:queue(QName), + {ok, Sender} = amqp10_client:attach_sender_link(Session1, <<"test-sender">>, Address), + ok = wait_for_credit(Sender), + NumMsgs = 6, + [begin + Bin = integer_to_binary(N), + ok = amqp10_client:send_msg(Sender, amqp10_msg:new(Bin, Bin, true)) + end || N <- lists:seq(1, NumMsgs)], + ok = amqp10_client:detach_link(Sender), + + %% The 1st consumer (with default prio 0) will become active. + {ok, Recv1} = amqp10_client:attach_receiver_link( + Session1, <<"receiver 1">>, Address, unsettled), + receive {amqp10_event, {link, Recv1, attached}} -> ok + after 5000 -> ct:fail({missing_event, ?LINE}) + end, + + {ok, Msg1} = amqp10_client:get_msg(Recv1), + ?assertEqual([<<"1">>], amqp10_msg:body(Msg1)), + + %% The 2nd consumer should take over thanks to higher prio. + {ok, Recv2} = amqp10_client:attach_receiver_link( + Session1, <<"receiver 2">>, Address, unsettled, none, #{}, + #{<<"rabbitmq:priority">> => {int, 1}}), + receive {amqp10_event, {link, Recv2, attached}} -> ok + after 5000 -> ct:fail({missing_event, ?LINE}) + end, + flush("attched receiver 2"), + + %% To ensure in-order processing and to avoid interrupting the 1st consumer during + %% its long running task processing, neither of the 2 consumers should receive more + %% messages until the 1st consumer settles all outstanding messages. + ?assertEqual({error, timeout}, amqp10_client:get_msg(Recv1, 5)), + ?assertEqual({error, timeout}, amqp10_client:get_msg(Recv2, 5)), + ok = amqp10_client:accept_msg(Recv1, Msg1), + receive {amqp10_msg, R1, Msg2} -> + ?assertEqual([<<"2">>], amqp10_msg:body(Msg2)), + ?assertEqual(Recv2, R1), + ok = amqp10_client:accept_msg(Recv2, Msg2) + after 5000 -> ct:fail({missing_msg, ?LINE}) + end, + + %% Attaching with same prio should not take over. + {ok, Session2} = amqp10_client:begin_session_sync(Connection), + {ok, Recv3} = amqp10_client:attach_receiver_link( + Session2, <<"receiver 3">>, Address, unsettled, none, #{}, + #{<<"rabbitmq:priority">> => {int, 1}}), + receive {amqp10_event, {link, Recv3, attached}} -> ok + after 5000 -> ct:fail({missing_event, ?LINE}) + end, + ?assertEqual({error, timeout}, amqp10_client:get_msg(Recv3, 5)), + ok = end_session_sync(Session2), + + {ok, Recv4} = amqp10_client:attach_receiver_link( + Session1, <<"receiver 4">>, Address, unsettled, none, #{}, + #{<<"rabbitmq:priority">> => {int, 1}}), + receive {amqp10_event, {link, Recv4, attached}} -> ok + after 5000 -> ct:fail({missing_event, ?LINE}) + end, + + {ok, Recv5} = amqp10_client:attach_receiver_link( + Session1, <<"receiver 5">>, Address, unsettled, none, #{}, + #{<<"rabbitmq:priority">> => {int, 1}}), + receive {amqp10_event, {link, Recv5, attached}} -> ok + after 5000 -> ct:fail({missing_event, ?LINE}) + end, + flush("attched receivers 4 and 5"), + + ok = amqp10_client:flow_link_credit(Recv4, 1, never), + ok = amqp10_client:flow_link_credit(Recv5, 2, never), + + %% Stop the active consumer. + ok = amqp10_client:detach_link(Recv2), + receive {amqp10_event, {link, Recv2, {detached, normal}}} -> ok + after 5000 -> ct:fail({missing_event, ?LINE}) + end, + + %% The 5th consumer should become the active one because it is up, + %% has highest prio (1), and most credits (2). + receive {amqp10_msg, R2, Msg3} -> + ?assertEqual([<<"3">>], amqp10_msg:body(Msg3)), + ?assertEqual(Recv5, R2), + ok = amqp10_client:accept_msg(Recv5, Msg3) + after 5000 -> ct:fail({missing_msg, ?LINE}) + end, + receive {amqp10_msg, R3, Msg4} -> + ?assertEqual([<<"4">>], amqp10_msg:body(Msg4)), + ?assertEqual(Recv5, R3), + ok = amqp10_client:accept_msg(Recv5, Msg4) + after 5000 -> ct:fail({missing_msg, ?LINE}) + end, + + %% Stop the active consumer. + ok = amqp10_client:detach_link(Recv5), + receive {amqp10_event, {link, Recv5, {detached, normal}}} -> ok + after 5000 -> ct:fail({missing_event, ?LINE}) + end, + + %% The 4th consumer should become the active one because it is up, + %% has highest prio (1), and most credits (1). + receive {amqp10_msg, R4, Msg5} -> + ?assertEqual([<<"5">>], amqp10_msg:body(Msg5)), + ?assertEqual(Recv4, R4), + ok = amqp10_client:accept_msg(Recv4, Msg5) + after 5000 -> ct:fail({missing_msg, ?LINE}) + end, + + %% Stop the active consumer. + ok = amqp10_client:detach_link(Recv4), + receive {amqp10_event, {link, Recv4, {detached, normal}}} -> ok + after 5000 -> ct:fail({missing_event, ?LINE}) + end, + + %% The only up consumer left is the 1st one (prio 0) which still has 1 credit. + receive {amqp10_msg, R5, Msg6} -> + ?assertEqual([<<"6">>], amqp10_msg:body(Msg6)), + ?assertEqual(Recv1, R5), + ok = amqp10_client:accept_msg(Recv1, Msg6) + after 5000 -> ct:fail({missing_msg, ?LINE}) + end, + + ok = amqp10_client:detach_link(Recv1), + {ok, #{message_count := 0}} = rabbitmq_amqp_client:delete_queue(LinkPair, QName), + ok = rabbitmq_amqp_client:detach_management_link_pair_sync(LinkPair), + ok = end_session_sync(Session1), + ok = amqp10_client:close_connection(Connection). + +single_active_consumer_classic_queue(Config) -> + single_active_consumer(<<"classic">>, Config). + +single_active_consumer_quorum_queue(Config) -> + single_active_consumer(<<"quorum">>, Config). + +single_active_consumer(QType, Config) -> + QName = atom_to_binary(?FUNCTION_NAME), + {Connection, Session, LinkPair} = init(Config), + QProps = #{arguments => #{<<"x-queue-type">> => {utf8, QType}, + <<"x-single-active-consumer">> => true}}, + {ok, #{type := QType}} = rabbitmq_amqp_client:declare_queue(LinkPair, QName, QProps), + + %% Attach 1 sender and 2 receivers to the queue. + Address = rabbitmq_amqp_address:queue(QName), + {ok, Sender} = amqp10_client:attach_sender_link( + Session, <<"test-sender">>, Address), + ok = wait_for_credit(Sender), + flush(sender_attached), + + %% The 1st consumer will become active. + {ok, Receiver1} = amqp10_client:attach_receiver_link( + Session, + <<"test-receiver-1">>, + Address, + unsettled), + receive {amqp10_event, {link, Receiver1, attached}} -> ok + after 5000 -> ct:fail("missing attached") + end, + ok = amqp10_client:flow_link_credit(Receiver1, 3, never), + + %% The 2nd consumer will become inactive. + {ok, Receiver2} = amqp10_client:attach_receiver_link( + Session, + <<"test-receiver-2">>, + Address, + unsettled), + receive {amqp10_event, {link, Receiver2, attached}} -> ok + after 5000 -> ct:fail("missing attached") + end, + ok = amqp10_client:flow_link_credit(Receiver2, 3, never), + + NumMsgs = 5, + [begin + Bin = integer_to_binary(N), + ok = amqp10_client:send_msg(Sender, amqp10_msg:new(Bin, Bin, true)) + end || N <- lists:seq(1, NumMsgs)], + + %% Only the active consumer should receive messages. + M1 = receive {amqp10_msg, Receiver1, Msg1} -> ?assertEqual([<<"1">>], amqp10_msg:body(Msg1)), + Msg1 + after 5000 -> ct:fail({missing_msg, ?LINE}) + end, + receive {amqp10_msg, Receiver1, Msg2} -> ?assertEqual([<<"2">>], amqp10_msg:body(Msg2)) + after 5000 -> ct:fail({missing_msg, ?LINE}) + end, + receive {amqp10_msg, Receiver1, Msg3} -> ?assertEqual([<<"3">>], amqp10_msg:body(Msg3)) + after 5000 -> ct:fail({missing_msg, ?LINE}) + end, + receive {amqp10_event, {link, Receiver1, credit_exhausted}} -> ok + after 5000 -> ct:fail("expected credit_exhausted") + end, + receive Unexpected0 -> ct:fail("received unexpected ~p", [Unexpected0]) + after 10 -> ok + end, + + %% Accept only msg 1 + ok = amqp10_client:accept_msg(Receiver1, M1), + + %% Cancelling the active consumer should cause the inactive to become active. + ok = amqp10_client:detach_link(Receiver1), + receive {amqp10_event, {link, Receiver1, {detached, normal}}} -> ok + after 5000 -> ct:fail("missing detached") + end, + + %% Since Receiver 1 didn't settle msg 2 and msg 3 but detached the link, + %% both messages should have been requeued. + %% With single-active-consumer, we expect the original message order to be retained. + M2b = receive {amqp10_msg, Receiver2, Msg2b} -> ?assertEqual([<<"2">>], amqp10_msg:body(Msg2b)), + Msg2b + after 5000 -> ct:fail({missing_msg, ?LINE}) + end, + receive {amqp10_msg, Receiver2, Msg3b} -> ?assertEqual([<<"3">>], amqp10_msg:body(Msg3b)) + after 5000 -> ct:fail({missing_msg, ?LINE}) + end, + M4 = receive {amqp10_msg, Receiver2, Msg4} -> ?assertEqual([<<"4">>], amqp10_msg:body(Msg4)), + Msg4 + after 5000 -> ct:fail({missing_msg, ?LINE}) + end, + receive {amqp10_event, {link, Receiver2, credit_exhausted}} -> ok + after 5000 -> ct:fail("expected credit_exhausted") + end, + receive Unexpected1 -> ct:fail("received unexpected ~p", [Unexpected1]) + after 10 -> ok + end, + + %% Receiver2 accepts all 3 messages it received. + ok = amqp10_client_session:disposition( + Receiver2, + amqp10_msg:delivery_id(M2b), + amqp10_msg:delivery_id(M4), + true, accepted), + %% This should leave us with Msg5 in the queue. + assert_messages(QName, 1, 0, Config), + + ok = amqp10_client:detach_link(Receiver2), + {ok, _} = rabbitmq_amqp_client:delete_queue(LinkPair, QName), + ok = rabbitmq_amqp_client:detach_management_link_pair_sync(LinkPair), + ok = end_session_sync(Session), + ok = amqp10_client:close_connection(Connection). + +single_active_consumer_drain_classic_queue(Config) -> + single_active_consumer_drain(<<"classic">>, Config). + +single_active_consumer_drain_quorum_queue(Config) -> + single_active_consumer_drain(<<"quorum">>, Config). + +single_active_consumer_drain(QType, Config) -> + QName = atom_to_binary(?FUNCTION_NAME), + {Connection, Session, LinkPair} = init(Config), + QProps = #{arguments => #{<<"x-queue-type">> => {utf8, QType}, + <<"x-single-active-consumer">> => true}}, + {ok, #{type := QType}} = rabbitmq_amqp_client:declare_queue(LinkPair, QName, QProps), + + %% Attach 1 sender and 2 receivers to the queue. + Address = rabbitmq_amqp_address:queue(QName), + {ok, Sender} = amqp10_client:attach_sender_link(Session, <<"test-sender">>, Address), + ok = wait_for_credit(Sender), + + %% The 1st consumer will become active. + {ok, Receiver1} = amqp10_client:attach_receiver_link( + Session, + <<"test-receiver-1">>, + Address, + unsettled), + receive {amqp10_event, {link, Receiver1, attached}} -> ok + after 5000 -> ct:fail("missing attached") + end, + %% The 2nd consumer will become inactive. + {ok, Receiver2} = amqp10_client:attach_receiver_link( + Session, + <<"test-receiver-2">>, + Address, + unsettled), + receive {amqp10_event, {link, Receiver2, attached}} -> ok + after 5000 -> ct:fail("missing attached") + end, + flush(attached), + + %% Drain both active and inactive consumer for the 1st time. + ok = amqp10_client:flow_link_credit(Receiver1, 100, never, true), + ok = amqp10_client:flow_link_credit(Receiver2, 100, never, true), + receive {amqp10_event, {link, Receiver1, credit_exhausted}} -> ok + after 5000 -> ct:fail({missing_event, ?LINE}) + end, + receive {amqp10_event, {link, Receiver2, credit_exhausted}} -> ok + after 5000 -> ct:fail({missing_event, ?LINE}) + end, + + %% Send 2 messages. + ok = amqp10_client:send_msg(Sender, amqp10_msg:new(<<"dtag1">>, <<"m1">>)), + ok = amqp10_client:send_msg(Sender, amqp10_msg:new(<<"dtag2">>, <<"m2">>)), + ok = wait_for_accepts(2), + + %% No consumer should receive a message since both should have 0 credits. + receive Unexpected0 -> ct:fail("received unexpected ~p", [Unexpected0]) + after 10 -> ok + end, + + %% Drain both active and inactive consumer for the 2nd time. + ok = amqp10_client:flow_link_credit(Receiver1, 200, never, true), + ok = amqp10_client:flow_link_credit(Receiver2, 200, never, true), + + %% Only the active consumer should receive messages. + receive {amqp10_msg, Receiver1, Msg1} -> + ?assertEqual([<<"m1">>], amqp10_msg:body(Msg1)), + ok = amqp10_client:accept_msg(Receiver1, Msg1) + after 5000 -> ct:fail({missing_msg, ?LINE}) + end, + receive {amqp10_msg, Receiver1, Msg2} -> + ?assertEqual([<<"m2">>], amqp10_msg:body(Msg2)), + ok = amqp10_client:accept_msg(Receiver1, Msg2) + after 5000 -> ct:fail({missing_msg, ?LINE}) + end, + receive {amqp10_event, {link, Receiver1, credit_exhausted}} -> ok + after 5000 -> ct:fail({missing_event, ?LINE}) + end, + receive {amqp10_event, {link, Receiver2, credit_exhausted}} -> ok + after 5000 -> ct:fail({missing_event, ?LINE}) + end, + + %% Cancelling the active consumer should cause the inactive to become active. + ok = amqp10_client:detach_link(Receiver1), + receive {amqp10_event, {link, Receiver1, {detached, normal}}} -> ok + after 5000 -> ct:fail({missing_event, ?LINE}) + end, + + %% Send 1 more message. + ok = amqp10_client:send_msg(Sender, amqp10_msg:new(<<"dtag3">>, <<"m3">>)), + ok = wait_for_accepted(<<"dtag3">>), + + %% Our 2nd (now active) consumer should have 0 credits. + receive Unexpected1 -> ct:fail("received unexpected ~p", [Unexpected1]) + after 10 -> ok + end, + + %% Drain for the 3rd time. + ok = amqp10_client:flow_link_credit(Receiver2, 300, never, true), + + receive {amqp10_msg, Receiver2, Msg3} -> + ?assertEqual([<<"m3">>], amqp10_msg:body(Msg3)), + ok = amqp10_client:accept_msg(Receiver2, Msg3) + after 5000 -> ct:fail({missing_msg, ?LINE}) + end, + receive {amqp10_event, {link, Receiver2, credit_exhausted}} -> ok + after 5000 -> ct:fail({missing_event, ?LINE}) + end, + + ok = amqp10_client:detach_link(Receiver2), + receive {amqp10_event, {link, Receiver2, {detached, normal}}} -> ok + after 5000 -> ct:fail({missing_event, ?LINE}) + end, + ?assertMatch({ok, #{message_count := 0}}, + rabbitmq_amqp_client:delete_queue(LinkPair, QName)), + ok = rabbitmq_amqp_client:detach_management_link_pair_sync(LinkPair), + ok = end_session_sync(Session), + ok = amqp10_client:close_connection(Connection). + +%% "A session endpoint can choose to unmap its output handle for a link. In this case, the endpoint MUST +%% send a detach frame to inform the remote peer that the handle is no longer attached to the link endpoint. +%% If both endpoints do this, the link MAY return to a fully detached state. Note that in this case the +%% link endpoints MAY still indirectly communicate via the session, as there could still be active deliveries +%% on the link referenced via delivery-id." [2.6.4] +%% +%% "The disposition performative MAY refer to deliveries on links that are no longer attached. As long as +%% the links have not been closed or detached with an error then the deliveries are still "live" and the +%% updated state MUST be applied." [2.7.6] +%% +%% Although the spec allows to settle delivery IDs on detached links, RabbitMQ does not respect the 'closed' +%% field of the DETACH frame and therefore handles every DETACH frame as closed. Since the link is closed, +%% we expect every outstanding delivery to be requeued. +%% +%% In addition to consumer cancellation, detaching a link therefore causes in flight deliveries to be requeued. +%% That's okay given that AMQP receivers can stop a link (figure 2.46) before detaching. +%% +%% Note that this behaviour is different from merely consumer cancellation in +%% AMQP legacy: +%% "After a consumer is cancelled there will be no future deliveries dispatched to it. +%% Note that there can still be "in flight" deliveries dispatched previously. +%% Cancelling a consumer will neither discard nor requeue them." +%% [https://www.rabbitmq.com/consumers.html#unsubscribing] +detach_requeues_one_session_classic_queue(Config) -> + detach_requeue_one_session(<<"classic">>, Config). + +detach_requeues_one_session_quorum_queue(Config) -> + detach_requeue_one_session(<<"quorum">>, Config). + +detach_requeue_one_session(QType, Config) -> + QName = atom_to_binary(?FUNCTION_NAME), + Ch = rabbit_ct_client_helpers:open_channel(Config), + #'queue.declare_ok'{} = amqp_channel:call( + Ch, #'queue.declare'{ + queue = QName, + durable = true, + arguments = [{<<"x-queue-type">>, longstr, QType}]}), + + %% Attach 1 sender and 2 receivers to the queue. + OpnConf = connection_config(Config), + {ok, Connection} = amqp10_client:open_connection(OpnConf), + {ok, Session} = amqp10_client:begin_session_sync(Connection), + Address = rabbitmq_amqp_address:queue(QName), + {ok, Sender} = amqp10_client:attach_sender_link( + Session, <<"test-sender">>, Address, settled), + ok = wait_for_credit(Sender), + {ok, Receiver1} = amqp10_client:attach_receiver_link( + Session, <<"recv 1">>, Address, unsettled), + receive {amqp10_event, {link, Receiver1, attached}} -> ok + after 5000 -> ct:fail("missing attached") + end, + {ok, Receiver2} = amqp10_client:attach_receiver_link( + Session, <<"recv 2">>, Address, unsettled), + receive {amqp10_event, {link, Receiver2, attached}} -> ok + after 5000 -> ct:fail("missing attached") + end, + flush(attached), + + ok = amqp10_client:flow_link_credit(Receiver1, 50, never), + ok = amqp10_client:flow_link_credit(Receiver2, 50, never), + + %% Let's send 4 messages to the queue. + ok = amqp10_client:send_msg(Sender, amqp10_msg:new(<<"tag1">>, <<"m1">>, true)), + ok = amqp10_client:send_msg(Sender, amqp10_msg:new(<<"tag2">>, <<"m2">>, true)), + ok = amqp10_client:send_msg(Sender, amqp10_msg:new(<<"tag3">>, <<"m3">>, true)), + ok = amqp10_client:send_msg(Sender, amqp10_msg:new(<<"tag4">>, <<"m4">>, true)), + ok = amqp10_client:detach_link(Sender), + + %% The queue should serve round robin. + [Msg1, Msg3] = receive_messages(Receiver1, 2), + [Msg2, Msg4] = receive_messages(Receiver2, 2), + ?assertEqual([<<"m1">>], amqp10_msg:body(Msg1)), + ?assertEqual([<<"m2">>], amqp10_msg:body(Msg2)), + ?assertEqual([<<"m3">>], amqp10_msg:body(Msg3)), + ?assertEqual([<<"m4">>], amqp10_msg:body(Msg4)), + + %% Let's detach the 1st receiver. + ok = amqp10_client:detach_link(Receiver1), + receive {amqp10_event, {link, Receiver1, {detached, normal}}} -> ok + after 5000 -> ct:fail("missing detached") + end, + + %% Since Receiver1 hasn't settled its 2 deliveries, + %% we expect them to be re-queued and re-delivered to Receiver2. + [Msg1b, Msg3b] = receive_messages(Receiver2, 2), + ?assertEqual([<<"m1">>], amqp10_msg:body(Msg1b)), + ?assertEqual([<<"m3">>], amqp10_msg:body(Msg3b)), + + %% Receiver2 accepts all 4 messages. + ok = amqp10_client_session:disposition( + Receiver2, + amqp10_msg:delivery_id(Msg2), + amqp10_msg:delivery_id(Msg3b), + true, accepted), + assert_messages(QName, 0, 0, Config), + + %% Double check that there are no in flight deliveries in the server session. + [SessionPid] = rpc(Config, rabbit_amqp_session, list_local, []), + eventually(?_assertEqual( + 0, + begin + #{outgoing_unsettled_map := UnsettledMap} = formatted_state(SessionPid), + maps:size(UnsettledMap) + end)), + + ok = amqp10_client:detach_link(Receiver2), + ok = end_session_sync(Session), + ok = amqp10_client:close_connection(Connection), + #'queue.delete_ok'{message_count = 0} = amqp_channel:call(Ch, #'queue.delete'{queue = QName}), + ok = rabbit_ct_client_helpers:close_channel(Ch). + +detach_requeues_drop_head_classic_queue(Config) -> + QName1 = <<"q1">>, + QName2 = <<"q2">>, + Addr1 = rabbitmq_amqp_address:queue(QName1), + Addr2 = rabbitmq_amqp_address:queue(QName2), + {Connection, Session, LinkPair} = init(Config), + {ok, #{}} = rabbitmq_amqp_client:declare_queue( + LinkPair, + QName1, + #{arguments => #{<<"x-queue-type">> => {utf8, <<"classic">>}, + <<"x-max-length">> => {ulong, 1}, + <<"x-overflow">> => {utf8, <<"drop-head">>}, + <<"x-dead-letter-exchange">> => {utf8, <<>>}, + <<"x-dead-letter-routing-key">> => {utf8, QName2} + }}), + {ok, #{}} = rabbitmq_amqp_client:declare_queue(LinkPair, QName2, #{}), + + {ok, Sender} = amqp10_client:attach_sender_link(Session, <<"sender">>, Addr1, settled), + ok = wait_for_credit(Sender), + {ok, Receiver1} = amqp10_client:attach_receiver_link(Session, <<"recv 1">>, Addr1, unsettled), + receive {amqp10_event, {link, Receiver1, attached}} -> ok + after 5000 -> ct:fail("missing attached") + end, + {ok, Receiver2} = amqp10_client:attach_receiver_link(Session, <<"recv 2">>, Addr2, unsettled), + receive {amqp10_event, {link, Receiver2, attached}} -> ok + after 5000 -> ct:fail("missing attached") + end, + flush(attached), + + ok = amqp10_client:send_msg(Sender, amqp10_msg:new(<<"tag1">>, <<"m1">>, true)), + {ok, Msg1} = amqp10_client:get_msg(Receiver1), + ?assertEqual([<<"m1">>], amqp10_msg:body(Msg1)), + + %% x-max-length in classic queues takes only ready but not unacked messages into account. + %% Since there are 0 ready messages, m2 will be queued. + ok = amqp10_client:send_msg(Sender, amqp10_msg:new(<<"tag2">>, <<"m2">>, true)), + %% Now, we have 2 messages in total: m1 is unacked and m2 is ready. + assert_messages(QName1, 2, 1, Config), + + ok = amqp10_client:detach_link(Sender), + + %% Detaching the link should requeue m1. + %% Since x-max-length is now exceeded, m1 should be dead-lettered to q2. + ok = amqp10_client:detach_link(Receiver1), + receive {amqp10_event, {link, Receiver1, {detached, normal}}} -> ok + after 5000 -> ct:fail("missing detached") + end, + assert_messages(QName1, 1, 0, Config), %% m2 + assert_messages(QName2, 1, 0, Config), %% m1 + + {ok, Msg1DeadLettered} = amqp10_client:get_msg(Receiver2), + ?assertEqual([<<"m1">>], amqp10_msg:body(Msg1DeadLettered)), + ok = amqp10_client:accept_msg(Receiver2, Msg1DeadLettered), + assert_messages(QName2, 0, 0, Config), + + ok = amqp10_client:detach_link(Receiver2), + {ok, #{message_count := 1}} = rabbitmq_amqp_client:delete_queue(LinkPair, QName1), + {ok, #{message_count := 0}} = rabbitmq_amqp_client:delete_queue(LinkPair, QName2), + ok = rabbitmq_amqp_client:detach_management_link_pair_sync(LinkPair), + ok = end_session_sync(Session), + ok = amqp10_client:close_connection(Connection). + +detach_requeues_two_connections_classic_queue(Config) -> + detach_requeues_two_connections(<<"classic">>, Config). + +detach_requeues_two_connections_quorum_queue(Config) -> + detach_requeues_two_connections(<<"quorum">>, Config). + +detach_requeues_two_connections(QType, Config) -> + QName = atom_to_binary(?FUNCTION_NAME), + Address = <<"/queue/", QName/binary>>, + + %% Connect to new node. + OpnConf0 = connection_config(0, Config), + {ok, Connection0} = amqp10_client:open_connection(OpnConf0), + {ok, Session0} = amqp10_client:begin_session_sync(Connection0), + %% Connect to old node. + OpnConf1 = connection_config(1, Config), + {ok, Connection1} = amqp10_client:open_connection(OpnConf1), + {ok, Session1} = amqp10_client:begin_session_sync(Connection1), + + %% Declare queue on old node. + {ok, LinkPair} = rabbitmq_amqp_client:attach_management_link_pair_sync(Session1, <<"my link pair">>), + QProps = #{arguments => #{<<"x-queue-type">> => {utf8, QType}}}, + {ok, #{type := QType}} = rabbitmq_amqp_client:declare_queue(LinkPair, QName, QProps), + flush(link_pair_attached), + + %% Attach 1 sender and 2 receivers. + {ok, Sender} = amqp10_client:attach_sender_link(Session0, <<"sender">>, Address, settled), + ok = wait_for_credit(Sender), + + {ok, Receiver0} = amqp10_client:attach_receiver_link(Session0, <<"receiver 0">>, Address, unsettled), + receive {amqp10_event, {link, Receiver0, attached}} -> ok + after 5000 -> ct:fail({missing_event, ?LINE}) + end, + ok = gen_statem:cast(Session0, {flow_session, #'v1_0.flow'{incoming_window = {uint, 1}}}), + ok = amqp10_client:flow_link_credit(Receiver0, 50, never), + %% Wait for credit being applied to the queue. + timer:sleep(10), + + {ok, Receiver1} = amqp10_client:attach_receiver_link(Session1, <<"receiver 1">>, Address, unsettled), + receive {amqp10_event, {link, Receiver1, attached}} -> ok + after 5000 -> ct:fail({missing_event, ?LINE}) + end, + ok = amqp10_client:flow_link_credit(Receiver1, 40, never), + %% Wait for credit being applied to the queue. + timer:sleep(10), + + NumMsgs = 6, + [begin + Bin = integer_to_binary(N), + ok = amqp10_client:send_msg(Sender, amqp10_msg:new(Bin, Bin, true)) + end || N <- lists:seq(1, NumMsgs)], + ok = amqp10_client:detach_link(Sender), + + %% The queue should serve round robin. Msg3 and Msg5 are in the server session's + %% outgoing-pending queue since we previously set Receiver0's incoming-window to 1. + [Msg1] = receive_messages(Receiver0, 1), + [Msg2, Msg4, Msg6] = receive_messages(Receiver1, 3), + ?assertEqual([<<"1">>], amqp10_msg:body(Msg1)), + ?assertEqual([<<"2">>], amqp10_msg:body(Msg2)), + ?assertEqual([<<"4">>], amqp10_msg:body(Msg4)), + ?assertEqual([<<"6">>], amqp10_msg:body(Msg6)), + %% no delivery should be made at this point + receive {amqp10_msg, _, _} -> ct:fail(unexpected_delivery) + after 50 -> ok + end, + + %% Let's detach the receiver on the new node. (Internally on the server, + %% this sends a consumer removal message from the new node to the old node). + ok = amqp10_client:detach_link(Receiver0), + receive {amqp10_event, {link, Receiver0, {detached, normal}}} -> ok + after 5000 -> ct:fail({missing_event, ?LINE}) + end, + + %% Since Receiver0 hasn't settled any deliveries, + %% we expect all 3 messages to be re-queued and re-delivered to Receiver1. + [Msg1b, Msg3, Msg5] = receive_messages(Receiver1, 3), + ?assertEqual([<<"1">>], amqp10_msg:body(Msg1b)), + ?assertEqual([<<"3">>], amqp10_msg:body(Msg3)), + ?assertEqual([<<"5">>], amqp10_msg:body(Msg5)), + + %% Receiver1 accepts all 6 messages. + ok = amqp10_client_session:disposition( + Receiver1, + amqp10_msg:delivery_id(Msg2), + amqp10_msg:delivery_id(Msg5), + true, accepted), + assert_messages(QName, 0, 0, Config), + + %% Double check that there are no in flight deliveries in the server session. + [SessionPid] = rpc(Config, rabbit_amqp_session, list_local, []), + eventually(?_assertEqual( + {0, 0}, + begin + #{outgoing_unsettled_map := UnsettledMap, + outgoing_pending := QueueLen} = formatted_state(SessionPid), + {maps:size(UnsettledMap), QueueLen} + end)), + + ok = amqp10_client:detach_link(Receiver1), + {ok, #{message_count := 0}} = rabbitmq_amqp_client:delete_queue(LinkPair, QName), + ok = rabbitmq_amqp_client:detach_management_link_pair_sync(LinkPair), + ok = end_session_sync(Session0), + ok = end_session_sync(Session1), + ok = amqp10_client:close_connection(Connection0), + ok = amqp10_client:close_connection(Connection1). + +resource_alarm_before_session_begin(Config) -> + QName = atom_to_binary(?FUNCTION_NAME), + Ch = rabbit_ct_client_helpers:open_channel(Config), + #'queue.declare_ok'{} = amqp_channel:call(Ch, #'queue.declare'{queue = QName}), + OpnConf = connection_config(Config), + {ok, Connection} = amqp10_client:open_connection(OpnConf), + + %% Set memory alarm before beginning the session. + DefaultWatermark = rpc(Config, vm_memory_monitor, get_vm_memory_high_watermark, []), + ok = rpc(Config, vm_memory_monitor, set_vm_memory_high_watermark, [0]), + timer:sleep(100), + + {ok, Session1} = amqp10_client:begin_session_sync(Connection), + Address = rabbitmq_amqp_address:queue(QName), + {ok, Sender} = amqp10_client:attach_sender_link(Session1, <<"test-sender">>, Address, unsettled), + %% We should still receive link credit since the target queue is fine. + ok = wait_for_credit(Sender), + %% However, RabbitMQ's incoming window shouldn't allow our client to send any TRANSFER. + %% In other words, the client is limited by session flow control, but not by link flow control. + Tag1 = <<"tag1">>, + Msg1 = amqp10_msg:new(Tag1, <<"m1">>, false), + ?assertEqual({error, remote_incoming_window_exceeded}, + amqp10_client:send_msg(Sender, Msg1)), + + %% Set additionally disk alarm. + DefaultDiskFreeLimit = rpc(Config, rabbit_disk_monitor, get_disk_free_limit, []), + ok = rpc(Config, rabbit_disk_monitor, set_disk_free_limit, [999_000_000_000_000]), % 999 TB + timer:sleep(100), + + ?assertEqual({error, remote_incoming_window_exceeded}, + amqp10_client:send_msg(Sender, Msg1)), + + %% Clear memory alarm. + ok = rpc(Config, vm_memory_monitor, set_vm_memory_high_watermark, [DefaultWatermark]), + timer:sleep(100), + + ?assertEqual({error, remote_incoming_window_exceeded}, + amqp10_client:send_msg(Sender, Msg1)), + + %% Clear disk alarm. + ok = rpc(Config, rabbit_disk_monitor, set_disk_free_limit, [DefaultDiskFreeLimit]), + timer:sleep(100), + + %% All alarms are cleared now. + %% Hence, RabbitMQ should open its incoming window allowing our client to send TRANSFERs. + ?assertEqual(ok, + amqp10_client:send_msg(Sender, Msg1)), + ok = wait_for_accepted(Tag1), + + ok = amqp10_client:detach_link(Sender), + ok = end_session_sync(Session1), + ok = amqp10_client:close_connection(Connection), + #'queue.delete_ok'{} = amqp_channel:call(Ch, #'queue.delete'{queue = QName}), + ok = rabbit_ct_client_helpers:close_channel(Ch). + +resource_alarm_after_session_begin(Config) -> + QName = atom_to_binary(?FUNCTION_NAME), + Ch = rabbit_ct_client_helpers:open_channel(Config), + #'queue.declare_ok'{} = amqp_channel:call(Ch, #'queue.declare'{queue = QName}), + Address = rabbitmq_amqp_address:queue(QName), + OpnConf = connection_config(Config), + + {ok, Connection1} = amqp10_client:open_connection(OpnConf), + {ok, Session1} = amqp10_client:begin_session_sync(Connection1), + {ok, Sender} = amqp10_client:attach_sender_link(Session1, <<"sender">>, Address, unsettled), + ok = wait_for_credit(Sender), + {ok, Receiver1} = amqp10_client:attach_receiver_link(Session1, <<"receiver 1">>, Address, unsettled), + + ok = amqp10_client:send_msg(Sender, amqp10_msg:new(<<"t1">>, <<"m1">>, false)), + ok = amqp10_client:send_msg(Sender, amqp10_msg:new(<<"t2">>, <<"m2">>, false)), + ok = amqp10_client:send_msg(Sender, amqp10_msg:new(<<"t3">>, <<"m3">>, false)), + ok = wait_for_accepts(3), + + %% Set memory alarm. + DefaultWatermark = rpc(Config, vm_memory_monitor, get_vm_memory_high_watermark, []), + ok = rpc(Config, vm_memory_monitor, set_vm_memory_high_watermark, [0]), + timer:sleep(100), + + %% Our existing receiver should still be able to receive. + {ok, Msg1} = amqp10_client:get_msg(Receiver1), + ?assertEqual([<<"m1">>], amqp10_msg:body(Msg1)), + ok = amqp10_client:accept_msg(Receiver1, Msg1), + + %% Attaching a new receiver to the same session and receiving should also work. + {ok, Receiver2} = amqp10_client:attach_receiver_link(Session1, <<"receiver 2">>, Address, unsettled), + {ok, Msg2} = amqp10_client:get_msg(Receiver2), + ?assertEqual([<<"m2">>], amqp10_msg:body(Msg2)), + ok = amqp10_client:accept_msg(Receiver2, Msg2), + + %% Even creating a new connection and receiving should work. + {ok, Connection2} = amqp10_client:open_connection(OpnConf#{container_id => <<"my container 2">>}), + {ok, Session2} = amqp10_client:begin_session_sync(Connection2), + {ok, Receiver3} = amqp10_client:attach_receiver_link(Session2, <<"receiver 3">>, Address, unsettled), + {ok, Msg3} = amqp10_client:get_msg(Receiver3), + ?assertEqual([<<"m3">>], amqp10_msg:body(Msg3)), + ok = amqp10_client:accept_msg(Receiver3, Msg3), + + %% However, we shouldn't be able to send any TRANSFER. + Msg4 = amqp10_msg:new(<<"t4">>, <<"m4">>, false), + ?assertEqual({error, remote_incoming_window_exceeded}, + amqp10_client:send_msg(Sender, Msg4)), + + %% Clear memory alarm. + ok = rpc(Config, vm_memory_monitor, set_vm_memory_high_watermark, [DefaultWatermark]), + timer:sleep(100), + + %% Now, we should be able to send again. + ?assertEqual(ok, + amqp10_client:send_msg(Sender, Msg4)), + ok = wait_for_accepted(<<"t4">>), + + ok = amqp10_client:detach_link(Sender), + ok = amqp10_client:detach_link(Receiver1), + ok = amqp10_client:detach_link(Receiver2), + ok = amqp10_client:detach_link(Receiver3), + ok = end_session_sync(Session1), + ok = end_session_sync(Session2), + ok = amqp10_client:close_connection(Connection1), + ok = amqp10_client:close_connection(Connection2), + #'queue.delete_ok'{} = amqp_channel:call(Ch, #'queue.delete'{queue = QName}), + ok = rabbit_ct_client_helpers:close_channel(Ch). + +auth_attempt_metrics(Config) -> + open_and_close_connection(Config), + [Attempt1] = rpc(Config, rabbit_core_metrics, get_auth_attempts, []), + ?assertEqual(false, proplists:is_defined(remote_address, Attempt1)), + ?assertEqual(false, proplists:is_defined(username, Attempt1)), + ?assertEqual(<<"amqp10">>, proplists:get_value(protocol, Attempt1)), + ?assertEqual(1, proplists:get_value(auth_attempts, Attempt1)), + ?assertEqual(0, proplists:get_value(auth_attempts_failed, Attempt1)), + ?assertEqual(1, proplists:get_value(auth_attempts_succeeded, Attempt1)), + + rpc(Config, rabbit_core_metrics, reset_auth_attempt_metrics, []), + ok = rpc(Config, application, set_env, [rabbit, track_auth_attempt_source, true]), + open_and_close_connection(Config), + Attempts = rpc(Config, rabbit_core_metrics, get_auth_attempts_by_source, []), + [Attempt2] = lists:filter(fun(Props) -> + proplists:is_defined(remote_address, Props) + end, Attempts), + ?assertEqual(<<>>, proplists:get_value(remote_address, Attempt2)), + ?assertEqual(<<"guest">>, proplists:get_value(username, Attempt2)), + ?assertEqual(<<"amqp10">>, proplists:get_value(protocol, Attempt2)), + ?assertEqual(1, proplists:get_value(auth_attempts, Attempt2)), + ?assertEqual(0, proplists:get_value(auth_attempts_failed, Attempt2)), + ?assertEqual(1, proplists:get_value(auth_attempts_succeeded, Attempt2)). + +max_message_size_client_to_server(Config) -> + DefaultMaxMessageSize = rpc(Config, persistent_term, get, [max_message_size]), + %% Limit the server to only accept messages up to 2KB. + MaxMessageSize = 2_000, + ok = rpc(Config, persistent_term, put, [max_message_size, MaxMessageSize]), + + QName = atom_to_binary(?FUNCTION_NAME), + Ch = rabbit_ct_client_helpers:open_channel(Config), + #'queue.declare_ok'{} = amqp_channel:call(Ch, #'queue.declare'{queue = QName}), + Address = rabbitmq_amqp_address:queue(QName), + OpnConf = connection_config(Config), + {ok, Connection} = amqp10_client:open_connection(OpnConf), + {ok, Session} = amqp10_client:begin_session_sync(Connection), + {ok, Sender} = amqp10_client:attach_sender_link(Session, <<"sender">>, Address, mixed), + ok = wait_for_credit(Sender), + + PayloadSmallEnough = binary:copy(<<0>>, MaxMessageSize - 10), + ?assertEqual(ok, + amqp10_client:send_msg(Sender, amqp10_msg:new(<<"t1">>, PayloadSmallEnough, false))), + ok = wait_for_accepted(<<"t1">>), + + PayloadTooLarge = binary:copy(<<0>>, MaxMessageSize + 1), + ?assertEqual({error, message_size_exceeded}, + amqp10_client:send_msg(Sender, amqp10_msg:new(<<"t2">>, PayloadTooLarge, false))), + ?assertEqual({error, message_size_exceeded}, + amqp10_client:send_msg(Sender, amqp10_msg:new(<<"t3">>, PayloadTooLarge, true))), + + ok = amqp10_client:detach_link(Sender), + ok = end_session_sync(Session), + ok = amqp10_client:close_connection(Connection), + #'queue.delete_ok'{} = amqp_channel:call(Ch, #'queue.delete'{queue = QName}), + ok = rabbit_ct_client_helpers:close_channel(Ch), + ok = rpc(Config, persistent_term, put, [max_message_size, DefaultMaxMessageSize]). + +max_message_size_server_to_client(Config) -> + QName = atom_to_binary(?FUNCTION_NAME), + Ch = rabbit_ct_client_helpers:open_channel(Config), + #'queue.declare_ok'{} = amqp_channel:call(Ch, #'queue.declare'{queue = QName}), + Address = rabbitmq_amqp_address:queue(QName), + OpnConf = connection_config(Config), + {ok, Connection} = amqp10_client:open_connection(OpnConf), + {ok, Session} = amqp10_client:begin_session_sync(Connection), + {ok, Sender} = amqp10_client:attach_sender_link(Session, <<"sender">>, Address, unsettled), + ok = wait_for_credit(Sender), + + MaxMessageSize = 2000, + %% Leave a bit of headroom for additional sections sent from RabbitMQ to us, + %% e.g. message annotations with routing key and exchange name. + PayloadSmallEnough = binary:copy(<<0>>, MaxMessageSize - 200), + ok = amqp10_client:send_msg(Sender, amqp10_msg:new(<<"t1">>, PayloadSmallEnough, false)), + ok = wait_for_accepted(<<"t1">>), + + AttachArgs = #{max_message_size => MaxMessageSize, + name => <<"test-receiver">>, + role => {receiver, #{address => Address, + durable => configuration}, self()}, + snd_settle_mode => unsettled, + rcv_settle_mode => first, + filter => #{}}, + {ok, Receiver} = amqp10_client:attach_link(Session, AttachArgs), + {ok, Msg} = amqp10_client:get_msg(Receiver), + ?assertEqual([PayloadSmallEnough], amqp10_msg:body(Msg)), + + PayloadTooLarge = binary:copy(<<0>>, MaxMessageSize + 1), + %% The sending link has no maximum message size set. + %% Hence, sending this large message from client to server should work. + ok = amqp10_client:send_msg(Sender, amqp10_msg:new(<<"t2">>, PayloadTooLarge, false)), + ok = wait_for_accepted(<<"t2">>), + + %% However, the receiving link has a maximum message size set. + %% Hence, when the server attempts to deliver this large message, + %% it should throw link error message-size-exceeded. + ok = amqp10_client:flow_link_credit(Receiver, 1, never), + receive + {amqp10_event, + {session, Session, + {ended, + #'v1_0.error'{ + condition = ?V_1_0_LINK_ERROR_MESSAGE_SIZE_EXCEEDED}}}} -> ok + after 5000 -> flush(missing_ended), + ct:fail("did not receive expected error") + end, + + ok = amqp10_client:close_connection(Connection), + #'queue.delete_ok'{} = amqp_channel:call(Ch, #'queue.delete'{queue = QName}), + ok = rabbit_ct_client_helpers:close_channel(Ch). + +last_queue_confirms(Config) -> + ClassicQ = <<"my classic queue">>, + QuorumQ = <<"my quorum queue">>, + Qs = [ClassicQ, QuorumQ], + Ch = rabbit_ct_client_helpers:open_channel(Config, 0), + #'queue.declare_ok'{} = amqp_channel:call( + Ch, #'queue.declare'{queue = ClassicQ}), + #'queue.declare_ok'{} = amqp_channel:call( + Ch, #'queue.declare'{ + queue = QuorumQ, + durable = true, + arguments = [{<<"x-queue-type">>, longstr, <<"quorum">>}, + {<<"x-quorum-initial-group-size">>, long, 3} + ]}), + [#'queue.bind_ok'{} = amqp_channel:call(Ch, #'queue.bind'{queue = QName, + exchange = <<"amq.fanout">>}) + || QName <- Qs], + + OpnConf = connection_config(Config), + {ok, Connection} = amqp10_client:open_connection(OpnConf), + {ok, Session} = amqp10_client:begin_session_sync(Connection), + + AddressFanout = rabbitmq_amqp_address:exchange(<<"amq.fanout">>), + {ok, SenderFanout} = amqp10_client:attach_sender_link( + Session, <<"sender-1">>, AddressFanout, unsettled), + ok = wait_for_credit(SenderFanout), + + AddressClassicQ = rabbitmq_amqp_address:queue(ClassicQ), + {ok, SenderClassicQ} = amqp10_client:attach_sender_link( + Session, <<"sender-2">>, AddressClassicQ, unsettled), + ok = wait_for_credit(SenderClassicQ), + + DTag1 = <<"t1">>, + ok = amqp10_client:send_msg(SenderFanout, amqp10_msg:new(DTag1, <<"m1">>, false)), + receive {amqp10_disposition, {accepted, DTag1}} -> ok + after 5000 -> ct:fail({missing_accepted, DTag1}) + end, + + %% Make quorum queue unavailable. + ok = rabbit_ct_broker_helpers:stop_node(Config, 2), + ok = rabbit_ct_broker_helpers:stop_node(Config, 1), + + DTag2 = <<"t2">>, + DTag3 = <<"t3">>, + ok = amqp10_client:send_msg(SenderFanout, amqp10_msg:new(DTag2, <<"m2">>, false)), + ok = amqp10_client:send_msg(SenderClassicQ, amqp10_msg:new(DTag3, <<"m3">>, false)), + + %% Since quorum queue is down, we should only get a confirmation for m3. + receive {amqp10_disposition, {accepted, DTag3}} -> ok + after 5000 -> ct:fail({missing_accepted, DTag3}) + end, + receive {amqp10_disposition, Unexpected} -> ct:fail({unexpected_disposition, Unexpected}) + after 200 -> ok + end, + + ok = rabbit_ct_broker_helpers:start_node(Config, 1), + ok = rabbit_ct_broker_helpers:start_node(Config, 2), + %% Since the quorum queue has become available, we should now get a confirmation for m2. + receive {amqp10_disposition, {accepted, DTag2}} -> ok + after 10_000 -> ct:fail({missing_accepted, DTag2}) + end, + + ok = amqp10_client:detach_link(SenderClassicQ), + ok = amqp10_client:detach_link(SenderFanout), + ok = end_session_sync(Session), + ok = amqp10_client:close_connection(Connection), + ?assertEqual(#'queue.delete_ok'{message_count = 3}, + amqp_channel:call(Ch, #'queue.delete'{queue = ClassicQ})), + ?assertEqual(#'queue.delete_ok'{message_count = 2}, + amqp_channel:call(Ch, #'queue.delete'{queue = QuorumQ})), + ok = rabbit_ct_client_helpers:close_channel(Ch). + +target_queue_deleted(Config) -> + ClassicQ = <<"my classic queue">>, + QuorumQ = <<"my quorum queue">>, + Qs = [ClassicQ, QuorumQ], + Ch = rabbit_ct_client_helpers:open_channel(Config, 0), + #'queue.declare_ok'{} = amqp_channel:call( + Ch, #'queue.declare'{queue = ClassicQ}), + #'queue.declare_ok'{} = amqp_channel:call( + Ch, #'queue.declare'{ + queue = QuorumQ, + durable = true, + arguments = [{<<"x-queue-type">>, longstr, <<"quorum">>}, + %% Use 2 replica quorum queue, such that we can stop 1 node + %% later to make quorum queue unavailable, but still have + %% 2 out of 3 nodes running for Khepri being available. + {<<"x-quorum-initial-group-size">>, long, 2} + ]}), + [#'queue.bind_ok'{} = amqp_channel:call(Ch, #'queue.bind'{queue = QName, + exchange = <<"amq.fanout">>}) + || QName <- Qs], + + OpnConf = connection_config(Config), + {ok, Connection} = amqp10_client:open_connection(OpnConf), + {ok, Session} = amqp10_client:begin_session_sync(Connection), + + Address = rabbitmq_amqp_address:exchange(<<"amq.fanout">>), + {ok, Sender} = amqp10_client:attach_sender_link( + Session, <<"sender">>, Address, unsettled), + ok = wait_for_credit(Sender), + + DTag1 = <<"t1">>, + ok = amqp10_client:send_msg(Sender, amqp10_msg:new(DTag1, <<"m1">>, false)), + receive {amqp10_disposition, {accepted, DTag1}} -> ok + after 5000 -> ct:fail({missing_accepted, DTag1}) + end, + + N0 = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename), + RaName = ra_name(QuorumQ), + ServerId0 = {RaName, N0}, + {ok, Members, _Leader} = ra:members(ServerId0), + ?assertEqual(2, length(Members)), + [{RaName, ReplicaNode}] = Members -- [ServerId0], + ct:pal("Stopping node ~s to make quorum queue unavailable...", [ReplicaNode]), + ok = rabbit_ct_broker_helpers:stop_node(Config, ReplicaNode), + flush("quorum queue is down"), + + DTag2 = <<"t2">>, + ok = amqp10_client:send_msg(Sender, amqp10_msg:new(DTag2, <<"m2">>, false)), + %% Target classic queue should receive m2. + assert_messages(ClassicQ, 2, 0, Config), + %% Delete target classic queue. (Khepri is still available.) + ?assertEqual(#'queue.delete_ok'{message_count = 2}, + amqp_channel:call(Ch, #'queue.delete'{queue = ClassicQ})), + + %% Since quorum queue is down, we should still receive no DISPOSITION. + receive {amqp10_disposition, Unexpected} -> ct:fail({unexpected_disposition, Unexpected}) + after 100 -> ok + end, + + ok = rabbit_ct_broker_helpers:start_node(Config, ReplicaNode), + %% Since the quorum queue has become available, we should now get a confirmation for m2. + receive {amqp10_disposition, {accepted, DTag2}} -> ok + after 10_000 -> ct:fail({missing_accepted, DTag2}) + end, + + ok = amqp10_client:detach_link(Sender), + ok = end_session_sync(Session), + ok = amqp10_client:close_connection(Connection), + ?assertEqual(#'queue.delete_ok'{message_count = 2}, + amqp_channel:call(Ch, #'queue.delete'{queue = QuorumQ})), + ok = rabbit_ct_client_helpers:close_channel(Ch). + +target_classic_queue_down(Config) -> + ClassicQueueNode = 2, + Ch = rabbit_ct_client_helpers:open_channel(Config, ClassicQueueNode), + QName = atom_to_binary(?FUNCTION_NAME), + Address = rabbitmq_amqp_address:queue(QName), + #'queue.declare_ok'{} = amqp_channel:call( + Ch, #'queue.declare'{ + queue = QName, + durable = true, + arguments = [{<<"x-queue-type">>, longstr, <<"classic">>}]}), + ok = rabbit_ct_client_helpers:close_channels_and_connection(Config, ClassicQueueNode), + + OpnConf = connection_config(Config), + {ok, Connection} = amqp10_client:open_connection(OpnConf), + {ok, Session} = amqp10_client:begin_session_sync(Connection), + {ok, Receiver1} = amqp10_client:attach_receiver_link(Session, <<"receiver 1">>, Address), + {ok, Sender} = amqp10_client:attach_sender_link(Session, <<"sender">>, Address, unsettled), + ok = wait_for_credit(Sender), + + DTag1 = <<"t1">>, + ok = amqp10_client:send_msg(Sender, amqp10_msg:new(DTag1, <<"m1">>, false)), + ok = wait_for_accepted(DTag1), + + {ok, Msg1} = amqp10_client:get_msg(Receiver1), + ?assertEqual([<<"m1">>], amqp10_msg:body(Msg1)), + + %% Make classic queue down. + flush("stopping node"), + ok = rabbit_ct_broker_helpers:stop_node(Config, ClassicQueueNode), + + %% We expect that the server closes links that receive from classic queues that are down. + ExpectedError = #'v1_0.error'{condition = ?V_1_0_AMQP_ERROR_ILLEGAL_STATE}, + receive {amqp10_event, {link, Receiver1, {detached, ExpectedError}}} -> ok + after 10_000 -> ct:fail({missing_event, ?LINE}) + end, + %% However the server should not close links that send to classic queues that are down. + receive Unexpected -> ct:fail({unexpected, Unexpected}) + after 20 -> ok + end, + %% Instead, the server should reject messages that are sent to classic queues that are down. + DTag2 = <<"t2">>, + ok = amqp10_client:send_msg(Sender, amqp10_msg:new(DTag2, <<"m2">>, false)), + ok = wait_for_settlement(DTag2, rejected), + + ok = rabbit_ct_broker_helpers:start_node(Config, ClassicQueueNode), + %% Now that the classic queue is up again, we should be able to attach a new receiver + %% and be able to send to and receive from the classic queue. + {ok, Receiver2} = amqp10_client:attach_receiver_link(Session, <<"receiver 2">>, Address), + receive {amqp10_event, {link, Receiver2, attached}} -> ok + after 5000 -> ct:fail({missing_event, ?LINE}) + end, + DTag3 = <<"t3">>, + ok = amqp10_client:send_msg(Sender, amqp10_msg:new(DTag3, <<"m3">>, false)), + ok = wait_for_accepted(DTag3), + {ok, Msg3} = amqp10_client:get_msg(Receiver2), + ?assertEqual([<<"m3">>], amqp10_msg:body(Msg3)), + + ok = amqp10_client:detach_link(Sender), + ok = amqp10_client:detach_link(Receiver2), + ok = delete_queue(Session, QName), + ok = end_session_sync(Session), + ok = amqp10_client:close_connection(Connection). + +async_notify_settled_classic_queue(Config) -> + async_notify(settled, <<"classic">>, Config). + +async_notify_settled_quorum_queue(Config) -> + async_notify(settled, <<"quorum">>, Config). + +async_notify_settled_stream(Config) -> + async_notify(settled, <<"stream">>, Config). + +async_notify_unsettled_classic_queue(Config) -> + case rabbit_ct_broker_helpers:enable_feature_flag(Config, 'rabbitmq_4.0.0') of + ok -> + async_notify(unsettled, <<"classic">>, Config); + {skip, _} -> + {skip, "Skipping as this test will flake. Link flow control in classic " + "queues with credit API v1 is known to be broken: " + "https://github.com/rabbitmq/rabbitmq-server/issues/2597"} + end. + +async_notify_unsettled_quorum_queue(Config) -> + async_notify(unsettled, <<"quorum">>, Config). + +async_notify_unsettled_stream(Config) -> + async_notify(unsettled, <<"stream">>, Config). + +%% Test asynchronous notification, figure 2.45. +async_notify(SenderSettleMode, QType, Config) -> + %% Place queue leader on the old node. + Ch = rabbit_ct_client_helpers:open_channel(Config, 1), + QName = atom_to_binary(?FUNCTION_NAME), + #'queue.declare_ok'{} = amqp_channel:call( + Ch, #'queue.declare'{ + queue = QName, + durable = true, + arguments = [{<<"x-queue-type">>, longstr, QType}]}), + %% Connect AMQP client to the new node causing queue client to run the new code. + OpnConf = connection_config(Config), + {ok, Connection} = amqp10_client:open_connection(OpnConf), + {ok, Session} = amqp10_client:begin_session_sync(Connection), + + %% Send 30 messages to the queue. + Address = rabbitmq_amqp_address:queue(QName), + {ok, Sender} = amqp10_client:attach_sender_link( + Session, <<"test-sender">>, Address), + ok = wait_for_credit(Sender), + NumMsgs = 30, + [begin + Bin = integer_to_binary(N), + ok = amqp10_client:send_msg(Sender, amqp10_msg:new(Bin, Bin, false)) + end || N <- lists:seq(1, NumMsgs)], + %% Wait for last message to be confirmed. + ok = wait_for_accepted(integer_to_binary(NumMsgs)), + flush(settled), + ok = detach_link_sync(Sender), + + case QType of + <<"stream">> -> + %% If it is a stream we need to wait until there is a local member + %% on the node we want to subscibe from before proceeding. + rabbit_ct_helpers:await_condition( + fun() -> rpc(Config, 0, ?MODULE, has_local_member, + [rabbit_misc:r(<<"/">>, queue, QName)]) + end, 30_000); + _ -> + ok + end, + Filter = consume_from_first(QType), + {ok, Receiver} = amqp10_client:attach_receiver_link( + Session, <<"test-receiver">>, Address, + SenderSettleMode, configuration, Filter), + receive {amqp10_event, {link, Receiver, attached}} -> ok + after 5000 -> ct:fail("missing attached") + end, + + %% Initially, grant 10 credits to the sending queue. + %% Whenever the sum of credits and number of unsettled messages drops below 5, renew back to 10. + ok = amqp10_client:flow_link_credit(Receiver, 10, 5), + + %% We should receive all messages. + Accept = case SenderSettleMode of + settled -> false; + unsettled -> true + end, + Msgs = receive_all_messages(Receiver, Accept), + FirstMsg = hd(Msgs), + LastMsg = lists:last(Msgs), + ?assertEqual([<<"1">>], amqp10_msg:body(FirstMsg)), + ?assertEqual([integer_to_binary(NumMsgs)], amqp10_msg:body(LastMsg)), + + %% No further messages should be delivered. + receive Unexpected -> ct:fail({received_unexpected_message, Unexpected}) + after 50 -> ok + end, + + #'queue.delete_ok'{} = amqp_channel:call(Ch, #'queue.delete'{queue = QName}), + ok = rabbit_ct_client_helpers:close_channel(Ch), + ok = end_session_sync(Session), + ok = close_connection_sync(Connection). + +%% For TRANSFERS from AMQP client to RabbitMQ, this test asserts that a single slow link receiver +%% (slow queue) does not impact other link receivers (fast queues) on the **same** session. +%% (This is unlike AMQP legacy where a single slow queue will block the entire connection.) +link_flow_control(Config) -> + Ch = rabbit_ct_client_helpers:open_channel(Config), + CQ = <<"cq">>, + QQ = <<"qq">>, + #'queue.declare_ok'{} = amqp_channel:call( + Ch, #'queue.declare'{ + queue = CQ, + durable = true, + arguments = [{<<"x-queue-type">>, longstr, <<"classic">>}]}), + #'queue.declare_ok'{} = amqp_channel:call( + Ch, #'queue.declare'{ + queue = QQ, + durable = true, + arguments = [{<<"x-queue-type">>, longstr, <<"quorum">>}]}), + OpnConf = connection_config(Config), + {ok, Connection} = amqp10_client:open_connection(OpnConf), + {ok, Session} = amqp10_client:begin_session_sync(Connection), + + AddressCQ = rabbitmq_amqp_address:queue(CQ), + AddressQQ = rabbitmq_amqp_address:queue(QQ), + {ok, ReceiverCQ} = amqp10_client:attach_receiver_link(Session, <<"cq-receiver">>, AddressCQ, settled), + {ok, ReceiverQQ} = amqp10_client:attach_receiver_link(Session, <<"qq-receiver">>, AddressQQ, settled), + {ok, SenderCQ} = amqp10_client:attach_sender_link(Session, <<"cq-sender">>, AddressCQ, settled), + {ok, SenderQQ} = amqp10_client:attach_sender_link(Session, <<"qq-sender">>, AddressQQ, settled), + ok = wait_for_credit(SenderCQ), + ok = wait_for_credit(SenderQQ), + flush(attached), + + %% Send and receive a single message on both queues. + ok = amqp10_client:send_msg(SenderCQ, amqp10_msg:new(<<0>>, <<0>>, true)), + ok = amqp10_client:send_msg(SenderQQ, amqp10_msg:new(<<1>>, <<1>>, true)), + {ok, Msg0} = amqp10_client:get_msg(ReceiverCQ), + ?assertEqual([<<0>>], amqp10_msg:body(Msg0)), + {ok, Msg1} = amqp10_client:get_msg(ReceiverQQ), + ?assertEqual([<<1>>], amqp10_msg:body(Msg1)), + + %% Make quorum queue unavailable. + ok = rabbit_ct_broker_helpers:stop_node(Config, 2), + ok = rabbit_ct_broker_helpers:stop_node(Config, 1), + + NumMsgs = 1000, + %% Since the quorum queue is unavailable, we expect our quorum queue sender to run + %% out of credits and RabbitMQ should not grant our quorum queue sender any new credits. + ok = assert_link_credit_runs_out(SenderQQ, NumMsgs), + %% Despite the quorum queue being unavailable, the classic queue can perfectly receive messages. + %% So, we expect that on the same AMQP session, link credit will be renewed for our classic queue sender. + ok = send_messages(SenderCQ, NumMsgs, true), + + %% Check that all 1k messages can be received from the classic queue. + ok = amqp10_client:flow_link_credit(ReceiverCQ, NumMsgs, never), + ReceivedCQ = receive_messages(ReceiverCQ, NumMsgs), + FirstMsg = hd(ReceivedCQ), + LastMsg = lists:last(ReceivedCQ), + ?assertEqual([integer_to_binary(NumMsgs)], amqp10_msg:body(FirstMsg)), + ?assertEqual([<<"1">>], amqp10_msg:body(LastMsg)), + + %% We expect still that RabbitMQ won't grant our quorum queue sender any new credits. + receive {amqp10_event, {link, SenderQQ, credited}} -> + ct:fail({unexpected_credited, ?LINE}) + after 5 -> ok + end, + + %% Make quorum queue available again. + ok = rabbit_ct_broker_helpers:start_node(Config, 1), + ok = rabbit_ct_broker_helpers:start_node(Config, 2), + + %% Now, we exepct that the messages sent earlier make it actually into the quorum queue. + %% Therefore, RabbitMQ should grant our quorum queue sender more credits. + receive {amqp10_event, {link, SenderQQ, credited}} -> + ct:pal("quorum queue sender got credited") + after 30_000 -> ct:fail({credited_timeout, ?LINE}) + end, + + [ok = amqp10_client:detach_link(Link) || Link <- [ReceiverCQ, ReceiverQQ, SenderCQ, SenderQQ]], + ok = delete_queue(Session, QQ), + ok = delete_queue(Session, CQ), + ok = end_session_sync(Session), + ok = amqp10_client:close_connection(Connection). + +classic_queue_on_old_node(Config) -> + queue_and_client_different_nodes(1, 0, <<"classic">>, Config). + +classic_queue_on_new_node(Config) -> + queue_and_client_different_nodes(0, 1, <<"classic">>, Config). + +quorum_queue_on_old_node(Config) -> + queue_and_client_different_nodes(1, 0, <<"quorum">>, Config). + +quorum_queue_on_new_node(Config) -> + Versions = rabbit_ct_broker_helpers:rpc_all(Config, rabbit_fifo, version, []), + case lists:usort(Versions) of + [_] -> + %% all are one version, go ahead with the test + queue_and_client_different_nodes(0, 1, <<"quorum">>, Config); + _ -> + {skip, "this test cannot pass with mixed QQ machine versions"} + end. + +%% In mixed version tests, run the queue leader with old code +%% and queue client with new code, or vice versa. +queue_and_client_different_nodes(QueueLeaderNode, ClientNode, QueueType, Config) -> + Ch = rabbit_ct_client_helpers:open_channel(Config, QueueLeaderNode), + QName = atom_to_binary(?FUNCTION_NAME), + #'queue.declare_ok'{} = amqp_channel:call( + Ch, #'queue.declare'{queue = QName, + durable = true, + arguments = [{<<"x-queue-type">>, longstr, QueueType}]}), + %% Connect AMQP client to the new (or old) node causing queue client to run the new (or old) code. + OpnConf = connection_config(ClientNode, Config), + {ok, Connection} = amqp10_client:open_connection(OpnConf), + {ok, Session} = amqp10_client:begin_session_sync(Connection), + Address = <<"/amq/queue/", QName/binary>>, + {ok, Sender} = amqp10_client:attach_sender_link( + Session, <<"test-sender">>, Address), + ok = wait_for_credit(Sender), + {ok, Receiver} = amqp10_client:attach_receiver_link( + Session, + <<"test-receiver">>, + Address, + unsettled), + receive {amqp10_event, {link, Receiver, attached}} -> ok + after 5000 -> ct:fail("missing attached") + end, + flush(receiver_attached), + + %% Let's test with many messages to make sure we're not + %% impacted by RabbitMQ internal credit based flow control. + NumMsgs = 1100, + ok = send_messages(Sender, NumMsgs, true), + + %% Grant credits to the sending queue. + ok = amqp10_client:flow_link_credit(Receiver, NumMsgs, never), + + %% We should receive all messages. + Msgs = receive_messages(Receiver, NumMsgs), + FirstMsg = hd(Msgs), + LastMsg = lists:last(Msgs), + ?assertEqual([integer_to_binary(NumMsgs)], amqp10_msg:body(FirstMsg)), + ?assertEqual([<<"1">>], amqp10_msg:body(LastMsg)), + + ok = amqp10_client_session:disposition( + Receiver, + amqp10_msg:delivery_id(FirstMsg), + amqp10_msg:delivery_id(LastMsg), + true, + accepted), + + case rpc(Config, rabbit_feature_flags, is_enabled, ['rabbitmq_4.0.0']) of + true -> + %% Send another message and drain. + Tag = <<"tag">>, + Body = <<"body">>, + ok = amqp10_client:send_msg(Sender, amqp10_msg:new(Tag, Body, false)), + ok = wait_for_accepted(Tag), + ok = amqp10_client:flow_link_credit(Receiver, 999, never, true), + [Msg] = receive_messages(Receiver, 1), + ?assertEqual([Body], amqp10_msg:body(Msg)), + receive {amqp10_event, {link, Receiver, credit_exhausted}} -> ok + after 5000 -> ct:fail("expected credit_exhausted") + end, + ok = amqp10_client:accept_msg(Receiver, Msg); + false -> + ct:pal("Both quorum queues and classic queues in credit API v1 + have a known bug that they reply with send_drained + before delivering the message.") + end, + + ExpectedReadyMsgs = 0, + ?assertEqual(#'queue.delete_ok'{message_count = ExpectedReadyMsgs}, + amqp_channel:call(Ch, #'queue.delete'{queue = QName})), + ok = rabbit_ct_client_helpers:close_channel(Ch), + ok = amqp10_client:close_connection(Connection). + +maintenance(Config) -> + {ok, C0} = amqp10_client:open_connection(connection_config(0, Config)), + {ok, C2} = amqp10_client:open_connection(connection_config(2, Config)), + receive {amqp10_event, {connection, C0, opened}} -> ok + after 5000 -> ct:fail({missing_event, ?LINE}) + end, + receive {amqp10_event, {connection, C2, opened}} -> ok + after 5000 -> ct:fail({missing_event, ?LINE}) + end, + + ok = drain_node(Config, 2), + receive + {amqp10_event, + {connection, C2, + {closed, + {internal_error, <<"Connection forced: \"Node was put into maintenance mode\"">>}}}} -> + ok + after 5000 -> + flush(?LINE), + ct:fail({missing_event, ?LINE}) + end, + ok = revive_node(Config, 2), + + ok = close_connection_sync(C0). + +%% https://github.com/rabbitmq/rabbitmq-server/issues/11841 +leader_transfer_quorum_queue_credit_single(Config) -> + QName = atom_to_binary(?FUNCTION_NAME), + leader_transfer(QName, <<"quorum">>, 1, Config). + +leader_transfer_quorum_queue_credit_batches(Config) -> + QName = atom_to_binary(?FUNCTION_NAME), + leader_transfer(QName, <<"quorum">>, 3, Config). + +leader_transfer_stream_credit_single(Config) -> + QName = atom_to_binary(?FUNCTION_NAME), + leader_transfer(QName, <<"stream">>, 1, Config). + +leader_transfer_stream_credit_batches(Config) -> + QName = atom_to_binary(?FUNCTION_NAME), + leader_transfer(QName, <<"stream">>, 3, Config). + +leader_transfer(QName, QType, Credit, Config) -> + %% Create queue with leader on node 1. + {Connection1, Session1, LinkPair1} = init(1, Config), + {ok, #{type := QType}} = rabbitmq_amqp_client:declare_queue( + LinkPair1, + QName, + #{arguments => #{<<"x-queue-type">> => {utf8, QType}, + <<"x-queue-leader-locator">> => {utf8, <<"client-local">>}}}), + ok = rabbitmq_amqp_client:detach_management_link_pair_sync(LinkPair1), + ok = end_session_sync(Session1), + ok = close_connection_sync(Connection1), + + %% Consume from a follower. + OpnConf = connection_config(0, Config), + {ok, Connection0} = amqp10_client:open_connection(OpnConf), + {ok, Session0} = amqp10_client:begin_session_sync(Connection0), + Address = rabbitmq_amqp_address:queue(QName), + {ok, Sender} = amqp10_client:attach_sender_link( + Session0, <<"test-sender">>, Address), + ok = wait_for_credit(Sender), + + NumMsgs = 30, + ok = send_messages(Sender, NumMsgs, false), + ok = wait_for_accepts(NumMsgs), + ok = detach_link_sync(Sender), + + %% Wait a bit to avoid the following error when attaching: + %% "stream queue does not have a running replica on the local node" + timer:sleep(50), + + Filter = consume_from_first(QType), + {ok, Receiver} = amqp10_client:attach_receiver_link( + Session0, <<"receiver">>, Address, + settled, configuration, Filter), + flush(receiver_attached), + %% Top up credits very often during the leader change. + ok = amqp10_client:flow_link_credit(Receiver, Credit, Credit), + + %% After receiving the 1st message, let's move the leader away from node 1. + receive_messages(Receiver, 1), + ok = drain_node(Config, 1), + %% We expect to receive all remaining messages. + receive_messages(Receiver, NumMsgs - 1), + + ok = revive_node(Config, 1), + ok = amqp10_client:detach_link(Receiver), + ok = delete_queue(Session0, QName), + ok = end_session_sync(Session0), + ok = amqp10_client:close_connection(Connection0). + +%% rabbitmqctl list_connections +%% should list both AMQP 1.0 and AMQP 0.9.1 connections. +list_connections(Config) -> + %% Close any open AMQP 0.9.1 connections from previous test cases. + [ok = rabbit_ct_client_helpers:close_channels_and_connection(Config, Node) || Node <- [0, 1, 2]], + + Connection091 = rabbit_ct_client_helpers:open_unmanaged_connection(Config, 0), + {ok, C0} = amqp10_client:open_connection(connection_config(0, Config)), + {ok, C2} = amqp10_client:open_connection(connection_config(2, Config)), + receive {amqp10_event, {connection, C0, opened}} -> ok + after 5000 -> ct:fail({missing_event, ?LINE}) + end, + receive {amqp10_event, {connection, C2, opened}} -> ok + after 5000 -> ct:fail({missing_event, ?LINE}) + end, + + {ok, StdOut} = rabbit_ct_broker_helpers:rabbitmqctl(Config, 0, ["list_connections", "--silent", "protocol"]), + Protocols0 = re:split(StdOut, <<"\n">>, [trim]), + %% Remove any whitespaces. + Protocols1 = [binary:replace(Subject, <<" ">>, <<>>, [global]) || Subject <- Protocols0], + Protocols = lists:sort(Protocols1), + ?assertEqual([<<"{0,9,1}">>, + <<"{1,0}">>, + <<"{1,0}">>], + Protocols), + + ok = rabbit_ct_client_helpers:close_connection(Connection091), + ok = close_connection_sync(C0), + ok = close_connection_sync(C2). + +global_counters(Config) -> + #{publishers := 0, + consumers := 0, + messages_received_total := Received0, + messages_received_confirm_total := ReceivedConfirm0, + messages_confirmed_total := Confirmed0, + messages_routed_total := Routed0, + messages_unroutable_dropped_total := UnroutableDropped0, + messages_unroutable_returned_total := UnroutableReturned0} = get_global_counters(Config), + + #{messages_delivered_total := CQDelivered0, + messages_redelivered_total := CQRedelivered0, + messages_acknowledged_total := CQAcknowledged0} = get_global_counters(Config, rabbit_classic_queue), + + #{messages_delivered_total := QQDelivered0, + messages_redelivered_total := QQRedelivered0, + messages_acknowledged_total := QQAcknowledged0} = get_global_counters(Config, rabbit_quorum_queue), + + Ch = rabbit_ct_client_helpers:open_channel(Config), + CQ = <<"my classic queue">>, + QQ = <<"my quorum queue">>, + CQAddress = rabbitmq_amqp_address:queue(CQ), + QQAddress = rabbitmq_amqp_address:queue(QQ), + #'queue.declare_ok'{} = amqp_channel:call( + Ch, #'queue.declare'{ + queue = CQ, + durable = true, + arguments = [{<<"x-queue-type">>, longstr, <<"classic">>}]}), + #'queue.declare_ok'{} = amqp_channel:call( + Ch, #'queue.declare'{ + queue = QQ, + durable = true, + arguments = [{<<"x-queue-type">>, longstr, <<"quorum">>}]}), + + OpnConf = connection_config(Config), + {ok, Connection} = amqp10_client:open_connection(OpnConf), + {ok, Session} = amqp10_client:begin_session_sync(Connection), + {ok, CQSender} = amqp10_client:attach_sender_link(Session, <<"test-sender-cq">>, CQAddress), + {ok, QQSender} = amqp10_client:attach_sender_link(Session, <<"test-sender-qq">>, QQAddress), + ok = wait_for_credit(CQSender), + ok = wait_for_credit(QQSender), + {ok, CQReceiver} = amqp10_client:attach_receiver_link(Session, <<"test-receiver-cq">>, CQAddress, settled), + {ok, QQReceiver} = amqp10_client:attach_receiver_link(Session, <<"test-receiver-qq">>, QQAddress, unsettled), + ok = amqp10_client:send_msg(CQSender, amqp10_msg:new(<<0>>, <<"m0">>, true)), + ok = amqp10_client:send_msg(QQSender, amqp10_msg:new(<<1>>, <<"m1">>, false)), + ok = wait_for_accepted(<<1>>), + + {ok, Msg0} = amqp10_client:get_msg(CQReceiver), + ?assertEqual([<<"m0">>], amqp10_msg:body(Msg0)), + + {ok, Msg1} = amqp10_client:get_msg(QQReceiver), + ?assertEqual([<<"m1">>], amqp10_msg:body(Msg1)), + ok = amqp10_client:accept_msg(QQReceiver, Msg1), + + #{publishers := 2, + consumers := 2, + messages_received_total := Received1, + messages_received_confirm_total := ReceivedConfirm1, + messages_confirmed_total := Confirmed1, + messages_routed_total := Routed1, + messages_unroutable_dropped_total := UnroutableDropped1, + messages_unroutable_returned_total := UnroutableReturned1} = get_global_counters(Config), + ?assertEqual(Received0 + 2, Received1), + ?assertEqual(ReceivedConfirm0 + 1, ReceivedConfirm1), + ?assertEqual(Confirmed0 + 1, Confirmed1), + ?assertEqual(Routed0 + 2, Routed1), + ?assertEqual(UnroutableDropped0, UnroutableDropped1), + ?assertEqual(UnroutableReturned0, UnroutableReturned1), + + #{messages_delivered_total := CQDelivered1, + messages_redelivered_total := CQRedelivered1, + messages_acknowledged_total := CQAcknowledged1} = get_global_counters(Config, rabbit_classic_queue), + ?assertEqual(CQDelivered0 + 1, CQDelivered1), + ?assertEqual(CQRedelivered0, CQRedelivered1), + ?assertEqual(CQAcknowledged0, CQAcknowledged1), + + #{messages_delivered_total := QQDelivered1, + messages_redelivered_total := QQRedelivered1, + messages_acknowledged_total := QQAcknowledged1} = get_global_counters(Config, rabbit_quorum_queue), + ?assertEqual(QQDelivered0 + 1, QQDelivered1), + ?assertEqual(QQRedelivered0, QQRedelivered1), + ?assertEqual(QQAcknowledged0 + 1, QQAcknowledged1), + + %% Test re-delivery. + ok = amqp10_client:send_msg(QQSender, amqp10_msg:new(<<2>>, <<"m2">>, false)), + ok = wait_for_accepted(<<2>>), + {ok, Msg2a} = amqp10_client:get_msg(QQReceiver), + ?assertEqual([<<"m2">>], amqp10_msg:body(Msg2a)), + %% Releasing causes the message to be requeued. + ok = amqp10_client:settle_msg(QQReceiver, Msg2a, released), + %% The message should be re-delivered. + {ok, Msg2b} = amqp10_client:get_msg(QQReceiver), + ?assertEqual([<<"m2">>], amqp10_msg:body(Msg2b)), + #{messages_delivered_total := QQDelivered2, + messages_redelivered_total := QQRedelivered2, + messages_acknowledged_total := QQAcknowledged2} = get_global_counters(Config, rabbit_quorum_queue), + %% m2 was delivered 2 times + ?assertEqual(QQDelivered1 + 2, QQDelivered2), + %% m2 was re-delivered 1 time + ?assertEqual(QQRedelivered1 + 1, QQRedelivered2), + %% Releasing a message shouldn't count as acknowledged. + ?assertEqual(QQAcknowledged1, QQAcknowledged2), + ok = amqp10_client:accept_msg(QQReceiver, Msg2b), + + %% Server closes the link endpoint due to some AMQP 1.0 external condition: + %% In this test, the external condition is that an AMQP 0.9.1 client deletes the queue. + %% Gauges for publishers and consumers should be decremented. + #'queue.delete_ok'{} = amqp_channel:call(Ch, #'queue.delete'{queue = QQ}), + ExpectedError = #'v1_0.error'{condition = ?V_1_0_AMQP_ERROR_RESOURCE_DELETED}, + receive {amqp10_event, {link, QQSender, {detached, ExpectedError}}} -> ok + after 5000 -> ct:fail("server did not close our sending link") + end, + receive {amqp10_event, {link, QQReceiver, {detached, ExpectedError}}} -> ok + after 5000 -> ct:fail("server did not close our receiving link") + end, + ?assertMatch(#{publishers := 1, + consumers := 1}, + get_global_counters(Config)), + + %% Gauges for publishers and consumers should also be decremented for normal link detachments. + ok = detach_link_sync(CQSender), + ok = detach_link_sync(CQReceiver), + ?assertMatch(#{publishers := 0, + consumers := 0}, + get_global_counters(Config)), + #'queue.delete_ok'{} = amqp_channel:call(Ch, #'queue.delete'{queue = CQ}), + + flush("testing unroutable..."), + %% Send 2 messages to the fanout exchange that has no bound queues. + {ok, Sender} = amqp10_client:attach_sender_link( + Session, <<"test-sender-fanout">>, + rabbitmq_amqp_address:exchange(<<"amq.fanout">>, <<"ignored">>)), + ok = wait_for_credit(Sender), + ok = amqp10_client:send_msg(Sender, amqp10_msg:new(<<3>>, <<"m3">>, true)), + ok = amqp10_client:send_msg(Sender, amqp10_msg:new(<<4>>, <<"m4">>, false)), + ok = wait_for_settlement(<<4>>, released), + #{messages_unroutable_dropped_total := UnroutableDropped2, + messages_unroutable_returned_total := UnroutableReturned2} = get_global_counters(Config), + %% m3 was dropped + ?assertEqual(UnroutableDropped1 + 1, UnroutableDropped2), + %% m4 was returned + ?assertEqual(UnroutableReturned1 + 1, UnroutableReturned2), + + ok = rabbit_ct_client_helpers:close_channel(Ch), + ok = amqp10_client:detach_link(Sender), + ok = end_session_sync(Session), + ok = amqp10_client:close_connection(Connection). + +stream_filtering(Config) -> + Stream = atom_to_binary(?FUNCTION_NAME), + Address = rabbitmq_amqp_address:queue(Stream), + Ch = rabbit_ct_client_helpers:open_channel(Config), + amqp_channel:call(Ch, #'queue.declare'{ + queue = Stream, + durable = true, + arguments = [{<<"x-queue-type">>, longstr, <<"stream">>}]}), + ok = rabbit_ct_client_helpers:close_channel(Ch), + + OpnConf = connection_config(Config), + {ok, Connection} = amqp10_client:open_connection(OpnConf), + {ok, Session} = amqp10_client:begin_session(Connection), + SenderLinkName = <<"test-sender">>, + {ok, Sender} = amqp10_client:attach_sender_link(Session, + SenderLinkName, + Address), + wait_for_credit(Sender), + + %% We are going to publish several waves of messages with and without filter values. + %% We will then create subscriptions with various filter options + %% and make sure we receive only what we asked for and not all the messages. + WaveCount = 1000, + %% logic to publish a wave of messages with or without a filter value + Publish = fun(FilterValue) -> + lists:foreach(fun(Seq) -> + {AppProps, Anns} = + case FilterValue of + undefined -> + {#{}, #{}}; + _ -> + {#{<<"filter">> => FilterValue}, + #{<<"x-stream-filter-value">> => FilterValue}} + end, + FilterBin = rabbit_data_coercion:to_binary(FilterValue), + SeqBin = integer_to_binary(Seq), + DTag = <>, + Msg0 = amqp10_msg:new(DTag, <<"my-body">>, false), + Msg1 = amqp10_msg:set_application_properties(AppProps, Msg0), + Msg2 = amqp10_msg:set_message_annotations(Anns, Msg1), + ok = amqp10_client:send_msg(Sender, Msg2), + ok = wait_for_accepted(DTag) + end, lists:seq(1, WaveCount)) + end, + + %% Publish messages with the "apple" filter value. + Publish(<<"apple">>), + %% Publish messages with no filter value. + Publish(undefined), + %% Publish messages with the "orange" filter value. + Publish(<<"orange">>), + ok = amqp10_client:detach_link(Sender), + + % filtering on "apple" + TerminusDurability = none, + {ok, AppleReceiver} = amqp10_client:attach_receiver_link( + Session, + <<"test-receiver-1">>, + Address, + unsettled, + TerminusDurability, + #{<<"rabbitmq:stream-offset-spec">> => <<"first">>, + <<"rabbitmq:stream-filter">> => <<"apple">>}), + ok = amqp10_client:flow_link_credit(AppleReceiver, 100, 10), + AppleMessages = receive_all_messages(AppleReceiver, true), + %% we should get less than all the waves combined + ?assert(length(AppleMessages) < WaveCount * 3), + %% client-side filtering + AppleFilteredMessages = lists:filter(fun(Msg) -> + AP = amqp10_msg:application_properties(Msg), + maps:get(<<"filter">>, AP) =:= <<"apple">> + end, AppleMessages), + ?assertEqual(WaveCount, length(AppleFilteredMessages)), + ok = amqp10_client:detach_link(AppleReceiver), + + %% filtering on "apple" and "orange" + {ok, AppleOrangeReceiver} = amqp10_client:attach_receiver_link( + Session, + <<"test-receiver-2">>, + Address, + unsettled, + TerminusDurability, + #{<<"rabbitmq:stream-offset-spec">> => <<"first">>, + <<"rabbitmq:stream-filter">> => [<<"apple">>, <<"orange">>]}), + ok = amqp10_client:flow_link_credit(AppleOrangeReceiver, 100, 10), + AppleOrangeMessages = receive_all_messages(AppleOrangeReceiver, true), + %% we should get less than all the waves combined + ?assert(length(AppleOrangeMessages) < WaveCount * 3), + %% client-side filtering + AppleOrangeFilteredMessages = lists:filter(fun(Msg) -> + AP = amqp10_msg:application_properties(Msg), + Filter = maps:get(<<"filter">>, AP), + Filter =:= <<"apple">> orelse Filter =:= <<"orange">> + end, AppleOrangeMessages), + ?assertEqual(WaveCount * 2, length(AppleOrangeFilteredMessages)), + ok = amqp10_client:detach_link(AppleOrangeReceiver), + + %% filtering on "apple" and messages without a filter value + {ok, AppleUnfilteredReceiver} = amqp10_client:attach_receiver_link( + Session, + <<"test-receiver-3">>, + Address, + unsettled, + TerminusDurability, + #{<<"rabbitmq:stream-offset-spec">> => <<"first">>, + <<"rabbitmq:stream-filter">> => <<"apple">>, + <<"rabbitmq:stream-match-unfiltered">> => {boolean, true}}), + ok = amqp10_client:flow_link_credit(AppleUnfilteredReceiver, 100, 10), + + AppleUnfilteredMessages = receive_all_messages(AppleUnfilteredReceiver, true), + %% we should get less than all the waves combined + ?assert(length(AppleUnfilteredMessages) < WaveCount * 3), + %% client-side filtering + AppleUnfilteredFilteredMessages = lists:filter(fun(Msg) -> + AP = amqp10_msg:application_properties(Msg), + not maps:is_key(<<"filter">>, AP) orelse + maps:get(<<"filter">>, AP) =:= <<"apple">> + end, AppleUnfilteredMessages), + ?assertEqual(WaveCount * 2, length(AppleUnfilteredFilteredMessages)), + ok = amqp10_client:detach_link(AppleUnfilteredReceiver), + + ok = delete_queue(Session, Stream), + ok = amqp10_client:close_connection(Connection). + +available_messages_classic_queue(Config) -> + available_messages(<<"classic">>, Config). + +available_messages_quorum_queue(Config) -> + available_messages(<<"quorum">>, Config). + +available_messages_stream(Config) -> + available_messages(<<"stream">>, Config). + +available_messages(QType, Config) -> + QName = atom_to_binary(?FUNCTION_NAME), + Ch = rabbit_ct_client_helpers:open_channel(Config), + #'queue.declare_ok'{} = amqp_channel:call( + Ch, #'queue.declare'{ + queue = QName, + durable = true, + arguments = [{<<"x-queue-type">>, longstr, QType}]}), + + %% Attach 1 sender and 1 receiver to the queue. + OpnConf = connection_config(Config), + {ok, Connection} = amqp10_client:open_connection(OpnConf), + {ok, Session} = amqp10_client:begin_session_sync(Connection), + Address = rabbitmq_amqp_address:queue(QName), + {ok, Sender} = amqp10_client:attach_sender_link( + Session, <<"test-sender">>, Address), + ok = wait_for_credit(Sender), + {ok, Receiver} = amqp10_client:attach_receiver_link( + Session, <<"test-receiver">>, Address), + receive {amqp10_event, {link, Receiver, attached}} -> ok + after 5000 -> ct:fail("missing attached") + end, + flush(receiver_attached), + + ?assertEqual(0, get_available_messages(Receiver)), + + ok = send_messages(Sender, 3, false), + %% We know that Streams only return an approximation for available messages. + %% The committed Stream offset is queried by chunk ID. + %% So, we wait here a bit such that the 4th message goes into its own new chunk. + timer:sleep(50), + ok = amqp10_client:send_msg(Sender, amqp10_msg:new(<<"4">>, <<"4">>, false)), + ok = wait_for_accepts(4), + + OutputHandle = element(4, Receiver), + Flow0 = #'v1_0.flow'{ + %% Grant 1 credit to the sending queue. + link_credit = {uint, 1}, + %% Request sending queue to send us a FLOW including available messages. + echo = true}, + ok = amqp10_client_session:flow(Session, OutputHandle, Flow0, never), + receive_messages(Receiver, 1), + receive {amqp10_event, {link, Receiver, credit_exhausted}} -> ok + after 5000 -> ct:fail({missing_event, ?LINE}) + end, + eventually(?_assertEqual(3, get_available_messages(Receiver))), + + %% Send a FLOW with echo=false and drain=false from client to server. + %% Even if the server doesn't reply with a FLOW, our client lib should + %% maintain the 'available' variable correctly. + ok = amqp10_client:flow_link_credit(Receiver, 1, never, false), + receive_messages(Receiver, 1), + receive {amqp10_event, {link, Receiver, credit_exhausted}} -> ok + after 5000 -> ct:fail({missing_event, ?LINE}) + end, + ?assertEqual(2, get_available_messages(Receiver)), + + %% We force the queue to send us a FLOW including available messages + %% by granting more credits than messages being available and drain=true. + ok = amqp10_client:flow_link_credit(Receiver, 99, never, true), + receive_messages(Receiver, 2), + receive {amqp10_event, {link, Receiver, credit_exhausted}} -> ok + after 5000 -> ct:fail({missing_event, ?LINE}) + end, + ?assertEqual(0, get_available_messages(Receiver)), + + ok = send_messages(Sender, 5000, false), + %% We know that Streams only return an approximation for available messages. + %% The committed Stream offset is queried by chunk ID. + %% So, we wait here a bit such that the 4th message goes into its own new chunk. + timer:sleep(50), + ok = amqp10_client:send_msg(Sender, amqp10_msg:new(<<"last dtag">>, <<"last msg">>, false)), + ok = wait_for_accepts(5001), + + Flow1 = #'v1_0.flow'{ + link_credit = {uint, 0}, + echo = false}, + Flow2 = #'v1_0.flow'{ + link_credit = {uint, 1}, + echo = true}, + %% Send both FLOW frames in sequence. + ok = amqp10_client_session:flow(Session, OutputHandle, Flow1, never), + ok = amqp10_client_session:flow(Session, OutputHandle, Flow2, never), + receive_messages(Receiver, 1), + receive {amqp10_event, {link, Receiver, credit_exhausted}} -> ok + after 5000 -> ct:fail({missing_event, ?LINE}) + end, + eventually(?_assertEqual(5000, get_available_messages(Receiver))), + + ok = amqp10_client:detach_link(Sender), + ok = amqp10_client:detach_link(Receiver), + ok = end_session_sync(Session), + ok = amqp10_client:close_connection(Connection), + #'queue.delete_ok'{} = amqp_channel:call(Ch, #'queue.delete'{queue = QName}), + ok = rabbit_ct_client_helpers:close_channel(Ch). + +incoming_message_interceptors(Config) -> + Key = ?FUNCTION_NAME, + ok = rpc(Config, persistent_term, put, [Key, [{set_header_routing_node, false}, + {set_header_timestamp, false}]]), + Stream = <<"my stream">>, + QQName = <<"my quorum queue">>, + {Connection, Session, LinkPair} = init(Config), + {ok, #{type := <<"stream">>}} = rabbitmq_amqp_client:declare_queue( + LinkPair, + Stream, + #{arguments => #{<<"x-queue-type">> => {utf8, <<"stream">>}}}), + {ok, #{type := <<"quorum">>}} = rabbitmq_amqp_client:declare_queue( + LinkPair, + QQName, + #{arguments => #{<<"x-queue-type">> => {utf8, <<"quorum">>}}}), + ok = rabbitmq_amqp_client:bind_queue(LinkPair, Stream, <<"amq.fanout">>, <<"ignored">>, #{}), + ok = rabbitmq_amqp_client:bind_queue(LinkPair, QQName, <<"amq.fanout">>, <<"ignored">>, #{}), + {ok, StreamReceiver} = amqp10_client:attach_receiver_link( + Session, <<"stream receiver">>, rabbitmq_amqp_address:queue(Stream)), + {ok, QQReceiver} = amqp10_client:attach_receiver_link( + Session, <<"qq receiver">>, rabbitmq_amqp_address:queue(QQName)), + {ok, Sender} = amqp10_client:attach_sender_link( + Session, <<"sender">>, rabbitmq_amqp_address:exchange(<<"amq.fanout">>)), + ok = wait_for_credit(Sender), + + NowMillis = os:system_time(millisecond), + Tag = <<"tag">>, + ok = amqp10_client:send_msg(Sender, amqp10_msg:new(Tag, <<"body">>)), + ok = wait_for_accepted(Tag), + + {ok, Msg1} = amqp10_client:get_msg(StreamReceiver), + {ok, Msg2} = amqp10_client:get_msg(QQReceiver), + ?assertEqual([<<"body">>], amqp10_msg:body(Msg1)), + ?assertEqual([<<"body">>], amqp10_msg:body(Msg2)), + + Node = atom_to_binary(get_node_config(Config, 0, nodename)), + #{<<"x-routed-by">> := Node, + <<"x-opt-rabbitmq-received-time">> := Millis} = amqp10_msg:message_annotations(Msg1), + ?assertMatch( + #{<<"x-routed-by">> := Node, + <<"x-opt-rabbitmq-received-time">> := Millis}, amqp10_msg:message_annotations(Msg2)), + ?assert(Millis < NowMillis + 4000), + ?assert(Millis > NowMillis - 4000), + + ok = amqp10_client:detach_link(Sender), + ok = amqp10_client:detach_link(StreamReceiver), + ok = amqp10_client:detach_link(QQReceiver), + {ok, _} = rabbitmq_amqp_client:delete_queue(LinkPair, Stream), + {ok, _} = rabbitmq_amqp_client:delete_queue(LinkPair, QQName), + ok = rabbitmq_amqp_client:detach_management_link_pair_sync(LinkPair), + ok = end_session_sync(Session), + ok = amqp10_client:close_connection(Connection), + true = rpc(Config, persistent_term, erase, [Key]). + +trace(Config) -> + Node = atom_to_binary(get_node_config(Config, 0, nodename)), + TraceQ = <<"my trace queue">>, + Q = <<"my queue">>, + Qs = [Q, TraceQ], + RoutingKey = <<"my routing key">>, + Payload = <<"my payload">>, + CorrelationId = <<"my correlation 👀"/utf8>>, + Ch = rabbit_ct_client_helpers:open_channel(Config), + [#'queue.declare_ok'{} = amqp_channel:call(Ch, #'queue.declare'{queue = Q0}) || Q0 <- Qs], + #'queue.bind_ok'{} = amqp_channel:call( + Ch, #'queue.bind'{queue = TraceQ, + exchange = <<"amq.rabbitmq.trace">>, + routing_key = <<"#">>}), + #'queue.bind_ok'{} = amqp_channel:call( + Ch, #'queue.bind'{queue = Q, + exchange = <<"amq.direct">>, + routing_key = RoutingKey}), + OpnConf = connection_config(Config), + {ok, Connection} = amqp10_client:open_connection(OpnConf), + + %% We expect traced messages for sessions created before and + %% sessions created after tracing is enabled. + {ok, SessionSender} = amqp10_client:begin_session_sync(Connection), + {ok, _} = rabbit_ct_broker_helpers:rabbitmqctl(Config, 0, ["trace_on"]), + {ok, SessionReceiver} = amqp10_client:begin_session_sync(Connection), + + {ok, Sender} = amqp10_client:attach_sender_link( + SessionSender, + <<"test-sender">>, + rabbitmq_amqp_address:exchange(<<"amq.direct">>, RoutingKey)), + ok = wait_for_credit(Sender), + {ok, Receiver} = amqp10_client:attach_receiver_link(SessionReceiver, + <<"test-receiver">>, + rabbitmq_amqp_address:queue(Q)), + Msg0 = amqp10_msg:new(<<"tag 1">>, Payload, true), + Msg = amqp10_msg:set_properties(#{correlation_id => CorrelationId}, Msg0), + ok = amqp10_client:send_msg(Sender, Msg), + {ok, _} = amqp10_client:get_msg(Receiver), + + timer:sleep(20), + {#'basic.get_ok'{routing_key = <<"publish.amq.direct">>}, + #amqp_msg{props = #'P_basic'{headers = PublishHeaders}, + payload = Payload}} = + amqp_channel:call(Ch, #'basic.get'{queue = TraceQ}), + ?assertMatch(#{<<"exchange_name">> := <<"amq.direct">>, + <<"routing_keys">> := [RoutingKey], + <<"connection">> := <<"127.0.0.1:", _/binary>>, + <<"node">> := Node, + <<"vhost">> := <<"/">>, + <<"channel">> := 1, + <<"user">> := <<"guest">>, + <<"properties">> := #{<<"correlation_id">> := CorrelationId}, + <<"routed_queues">> := [Q]}, + rabbit_misc:amqp_table(PublishHeaders)), + + {#'basic.get_ok'{routing_key = <<"deliver.", Q/binary>>}, + #amqp_msg{props = #'P_basic'{headers = DeliverHeaders}, + payload = Payload}} = + amqp_channel:call(Ch, #'basic.get'{queue = TraceQ}), + ?assertMatch(#{<<"exchange_name">> := <<"amq.direct">>, + <<"routing_keys">> := [RoutingKey], + <<"connection">> := <<"127.0.0.1:", _/binary>>, + <<"node">> := Node, + <<"vhost">> := <<"/">>, + <<"channel">> := 2, + <<"user">> := <<"guest">>, + <<"properties">> := #{<<"correlation_id">> := CorrelationId}, + <<"redelivered">> := 0}, + rabbit_misc:amqp_table(DeliverHeaders)), + + {ok, _} = rabbit_ct_broker_helpers:rabbitmqctl(Config, 0, ["trace_off"]), + ok = amqp10_client:send_msg(Sender, amqp10_msg:new(<<"tag 2">>, Payload, true)), + {ok, _} = amqp10_client:get_msg(Receiver), + timer:sleep(20), + ?assertMatch(#'basic.get_empty'{}, + amqp_channel:call(Ch, #'basic.get'{queue = TraceQ})), + + ok = amqp10_client:detach_link(Sender), + ok = amqp10_client:detach_link(Receiver), + [delete_queue(SessionSender, Q0) || Q0 <- Qs], + ok = end_session_sync(SessionSender), + ok = end_session_sync(SessionReceiver), + ok = amqp10_client:close_connection(Connection). + +%% https://www.rabbitmq.com/validated-user-id.html +user_id(Config) -> + OpnConf = connection_config(Config), + {ok, Connection} = amqp10_client:open_connection(OpnConf), + {ok, Session} = amqp10_client:begin_session_sync(Connection), + Address = rabbitmq_amqp_address:exchange(<<"amq.direct">>, <<"some-routing-key">>), + {ok, Sender} = amqp10_client:attach_sender_link(Session, <<"test-sender">>, Address), + ok = wait_for_credit(Sender), + flush(attached), + + Msg1 = amqp10_msg:set_properties(#{user_id => <<"guest">>}, + amqp10_msg:new(<<"t1">>, <<"m1">>, true)), + ok = amqp10_client:send_msg(Sender, Msg1), + receive Unexpected -> ct:fail({received_unexpected_message, Unexpected}) + after 10 -> ok + end, + + Msg2 = amqp10_msg:set_properties(#{user_id => <<"fake user">>}, + amqp10_msg:new(<<"t2">>, <<"m2">>, true)), + ok = amqp10_client:send_msg(Sender, Msg2), + receive + {amqp10_event, + {session, Session, + {ended, + #'v1_0.error'{ + condition = ?V_1_0_AMQP_ERROR_UNAUTHORIZED_ACCESS, + description = {utf8, <<"user_id property set to 'fake user' but authenticated user was 'guest'">>}}}}} -> ok + after 5000 -> flush(missing_ended), + ct:fail("did not receive expected error") + end, + + ok = amqp10_client:close_connection(Connection). + +message_ttl(Config) -> + QName = atom_to_binary(?FUNCTION_NAME), + Address = rabbitmq_amqp_address:queue(QName), + Ch = rabbit_ct_client_helpers:open_channel(Config), + #'queue.declare_ok'{} = amqp_channel:call(Ch, #'queue.declare'{queue = QName}), + ok = rabbit_ct_client_helpers:close_channel(Ch), + OpnConf = connection_config(Config), + {ok, Connection} = amqp10_client:open_connection(OpnConf), + {ok, Session} = amqp10_client:begin_session_sync(Connection), + {ok, Receiver} = amqp10_client:attach_receiver_link(Session, <<"test-receiver">>, Address), + {ok, Sender} = amqp10_client:attach_sender_link(Session, <<"test-sender">>, Address), + wait_for_credit(Sender), + + M1 = amqp10_msg:set_headers(#{ttl => 1}, amqp10_msg:new(<<"t1">>, <<"m1">>, false)), + M2 = amqp10_msg:set_headers(#{ttl => 60 * 1000}, amqp10_msg:new(<<"t2">>, <<"m2">>, false)), + ok = amqp10_client:send_msg(Sender, M1), + ok = amqp10_client:send_msg(Sender, M2), + ok = wait_for_accepts(2), + %% Wait for the first message to expire. + timer:sleep(50), + flush(pre_receive), + ok = amqp10_client:flow_link_credit(Receiver, 2, never, true), + receive {amqp10_msg, Receiver, Msg} -> + ?assertEqual([<<"m2">>], amqp10_msg:body(Msg)) + after 5000 -> ct:fail(delivery_timeout) + end, + receive {amqp10_event, {link, Receiver, credit_exhausted}} -> ok + after 5000 -> ct:fail({missing_event, ?LINE}) + end, + receive Unexpected -> ct:fail({received_unexpected_message, Unexpected}) + after 5 -> ok + end, + + ok = amqp10_client:detach_link(Sender), + ok = amqp10_client:detach_link(Receiver), + ok = delete_queue(Session, QName), + ok = amqp10_client:close_connection(Connection). + +%% For backward compatibility, deployment tools should be able to +%% enable and disable the deprecated no-op AMQP 1.0 plugin. +plugin(Config) -> + Node = 0, + Plugin = rabbitmq_amqp1_0, + %% rabbit/Makefile and rabbit/BUILD.bazel declare a test dependency on the rabbitmq_amqp1_0 plugin. + %% Therefore, we first disable, and then enable. + ?assertEqual(ok, rabbit_ct_broker_helpers:disable_plugin(Config, Node, Plugin)), + ?assertEqual(ok, rabbit_ct_broker_helpers:enable_plugin(Config, Node, Plugin)). + +%% Test that the idle timeout threshold is exceeded on the server +%% when no frames are sent from client to server. +idle_time_out_on_server(Config) -> + App = rabbit, + Par = heartbeat, + {ok, DefaultVal} = rpc(Config, application, get_env, [App, Par]), + %% Configure RabbitMQ to use an idle-time-out of 1 second. + ok = rpc(Config, application, set_env, [App, Par, 1]), + + OpnConf = connection_config(Config), + {ok, Connection} = amqp10_client:open_connection(OpnConf), + receive {amqp10_event, {connection, Connection, opened}} -> ok + after 5000 -> ct:fail({missing_event, ?LINE}) + end, + + %% Mock the server socket to not have received any bytes. + rabbit_ct_broker_helpers:setup_meck(Config), + Mod = rabbit_net, + ok = rpc(Config, meck, new, [Mod, [no_link, passthrough]]), + ok = rpc(Config, meck, expect, [Mod, getstat, 2, {ok, [{recv_oct, 999}]}]), + %% The server "SHOULD try to gracefully close the connection using a close + %% frame with an error explaining why" [2.4.5]. + %% Since we chose a heartbeat value of 1 second, the server should easily + %% close the connection within 5 seconds. + receive + {amqp10_event, + {connection, Connection, + {closed, + {resource_limit_exceeded, + <<"no frame received from client within idle timeout threshold">>}}}} -> ok + after 5000 -> + ct:fail({missing_event, ?LINE}) + end, + + ?assert(rpc(Config, meck, validate, [Mod])), + ok = rpc(Config, meck, unload, [Mod]), + ok = rpc(Config, application, set_env, [App, Par, DefaultVal]). + +%% Test that the idle timeout threshold is exceeded on the client +%% when no frames are sent from server to client. +idle_time_out_on_client(Config) -> + OpnConf0 = connection_config(Config), + %% Request the server to send us frames every second. + OpnConf = OpnConf0#{idle_time_out => 1000}, + {ok, Connection} = amqp10_client:open_connection(OpnConf), + receive {amqp10_event, {connection, Connection, opened}} -> ok + after 5000 -> ct:fail({missing_event, ?LINE}) + end, + + receive Unexpected -> ct:fail({unexpected, Unexpected}) + after 3100 -> ok + end, + ?assert(is_process_alive(Connection)), + %% All good, the server sent us frames every second. + + %% Mock the server to not send anything. + rabbit_ct_broker_helpers:setup_meck(Config), + Mod = rabbit_net, + ok = rpc(Config, meck, new, [Mod, [no_link, passthrough]]), + ok = rpc(Config, meck, expect, [Mod, send, 2, ok]), + + %% Our client should time out within less than 5 seconds given that the + %% idle-time-out is 1 second. + receive + {amqp10_event, + {connection, Connection, + {closed, + {resource_limit_exceeded, + <<"remote idle-time-out">>}}}} -> ok + after 5000 -> + ct:fail({missing_event, ?LINE}) + end, + + ?assert(rpc(Config, meck, validate, [Mod])), + ok = rpc(Config, meck, unload, [Mod]). + +%% Test that RabbitMQ does not support idle timeouts smaller than 1 second. +idle_time_out_too_short(Config) -> + OpnConf0 = connection_config(Config), + OpnConf = OpnConf0#{idle_time_out => 900}, + {ok, Connection} = amqp10_client:open_connection(OpnConf), + receive {amqp10_event, {connection, Connection, {closed, _}}} -> ok + after 5000 -> ct:fail({missing_event, ?LINE}) + end. + +rabbit_status_connection_count(Config) -> + %% Close any open AMQP 0.9.1 connections from previous test cases. + ok = rabbit_ct_client_helpers:close_channels_and_connection(Config, 0), + + OpnConf = connection_config(Config), + {ok, Connection} = amqp10_client:open_connection(OpnConf), + receive {amqp10_event, {connection, Connection, opened}} -> ok + after 5000 -> ct:fail({missing_event, ?LINE}) + end, + + {ok, String} = rabbit_ct_broker_helpers:rabbitmqctl(Config, 0, ["status"]), + ?assertNotEqual(nomatch, string:find(String, "Connection count: 1")), + + ok = amqp10_client:close_connection(Connection). + +handshake_timeout(Config) -> + App = rabbit, + Par = ?FUNCTION_NAME, + {ok, DefaultVal} = rpc(Config, application, get_env, [App, Par]), + ok = rpc(Config, application, set_env, [App, Par, 200]), + Port = rabbit_ct_broker_helpers:get_node_config(Config, 0, tcp_port_amqp), + {ok, Socket} = gen_tcp:connect("localhost", Port, [{active, false}]), + ?assertEqual({error, closed}, gen_tcp:recv(Socket, 0, 400)), + ok = rpc(Config, application, set_env, [App, Par, DefaultVal]). + +credential_expires(Config) -> + rabbit_ct_broker_helpers:setup_meck(Config), + Mod = rabbit_auth_backend_internal, + ok = rpc(Config, meck, new, [Mod, [no_link, passthrough]]), + ExpiryTimestamp = os:system_time(second) + 3, + ok = rpc(Config, meck, expect, [Mod, expiry_timestamp, 1, ExpiryTimestamp]), + + OpnConf = connection_config(Config), + {ok, Connection} = amqp10_client:open_connection(OpnConf), + receive {amqp10_event, {connection, Connection, opened}} -> ok + after 2000 -> ct:fail({missing_event, ?LINE}) + end, + + %% Since we don't renew our credential, we expect the server to close our connection. + receive + {amqp10_event, + {connection, Connection, + {closed, + {unauthorized_access, <<"credential expired">>}}}} -> ok + after 10_000 -> + flush(?LINE), + ct:fail({missing_event, ?LINE}) + end, + + ?assert(rpc(Config, meck, validate, [Mod])), + ok = rpc(Config, meck, unload, [Mod]). + +%% Attaching to an exclusive source queue should fail. +attach_to_exclusive_queue(Config) -> + QName = <<"my queue">>, + Ch = rabbit_ct_client_helpers:open_channel(Config), + #'queue.declare_ok'{} = amqp_channel:call( + Ch, #'queue.declare'{queue = QName, + durable = true, + exclusive = true}), + OpnConf = connection_config(Config), + {ok, Connection} = amqp10_client:open_connection(OpnConf), + {ok, Session} = amqp10_client:begin_session_sync(Connection), + Address = rabbitmq_amqp_address:queue(QName), + {ok, _Receiver} = amqp10_client:attach_receiver_link(Session, <<"test-receiver">>, Address), + receive + {amqp10_event, + {session, Session, + {ended, + #'v1_0.error'{ + condition = ?V_1_0_AMQP_ERROR_RESOURCE_LOCKED, + description = {utf8, <<"cannot obtain exclusive access to locked " + "queue 'my queue' in vhost '/'">>}}}}} -> ok + after 5000 -> ct:fail({missing_event, ?LINE}) + end, + + ok = amqp10_client:close_connection(Connection), + #'queue.delete_ok'{} = amqp_channel:call(Ch, #'queue.delete'{queue = QName}), + ok = rabbit_ct_client_helpers:close_channel(Ch). + +priority_classic_queue(Config) -> + QArgs = #{<<"x-queue-type">> => {utf8, <<"classic">>}, + <<"x-max-priority">> => {ulong, 10}}, + priority(QArgs, Config). + +priority_quorum_queue(Config) -> + QArgs = #{<<"x-queue-type">> => {utf8, <<"quorum">>}}, + priority(QArgs, Config). + +priority(QArgs, Config) -> + {Connection, Session, LinkPair} = init(Config), + QName = atom_to_binary(?FUNCTION_NAME), + Address = rabbitmq_amqp_address:queue(QName), + {ok, _} = rabbitmq_amqp_client:declare_queue(LinkPair, QName, #{arguments => QArgs}), + {ok, Sender} = amqp10_client:attach_sender_link(Session, <<"test-sender">>, Address), + wait_for_credit(Sender), + + %% We don't set a priority on Msg1. + %% According to the AMQP spec, the default priority is 4. + Msg1 = amqp10_msg:set_headers( + #{durable => true}, + amqp10_msg:new(<<"t1">>, <<"low prio">>)), + %% Quorum queues implement 2 distinct priority levels. + %% "if 2 distinct priorities are implemented, then levels 0 to 4 are equivalent, + %% and levels 5 to 9 are equivalent and levels 4 and 5 are distinct." [§3.2.1] + %% Therefore, when we set a priority of 5 on Msg2, Msg2 will have a higher priority + %% than the default priority 4 of Msg1. + Msg2 = amqp10_msg:set_headers( + #{priority => 5, + durable => true}, + amqp10_msg:new(<<"t2">>, <<"high prio">>)), + ok = amqp10_client:send_msg(Sender, Msg1), + ok = amqp10_client:send_msg(Sender, Msg2), + ok = wait_for_accepts(2), + flush(accepted), + + %% The high prio Msg2 should overtake the low prio Msg1 and therefore be delivered first. + {ok, Receiver1} = amqp10_client:attach_receiver_link(Session, <<"receiver 1">>, Address, unsettled), + {ok, In1} = amqp10_client:get_msg(Receiver1), + ?assertEqual([<<"high prio">>], amqp10_msg:body(In1)), + ?assertEqual(5, amqp10_msg:header(priority, In1)), + ?assert(amqp10_msg:header(durable, In1)), + ok = amqp10_client:accept_msg(Receiver1, In1), + + {ok, Receiver2} = amqp10_client:attach_receiver_link(Session, <<"receiver 2">>, Address, settled), + {ok, In2} = amqp10_client:get_msg(Receiver2), + ?assertEqual([<<"low prio">>], amqp10_msg:body(In2)), + ?assert(amqp10_msg:header(durable, In2)), + + ok = amqp10_client:detach_link(Receiver1), + ok = amqp10_client:detach_link(Receiver2), + ok = amqp10_client:detach_link(Sender), + {ok, #{message_count := 0}} = rabbitmq_amqp_client:delete_queue(LinkPair, QName), + ok = rabbitmq_amqp_client:detach_management_link_pair_sync(LinkPair), + ok = end_session_sync(Session), + ok = amqp10_client:close_connection(Connection). + +dead_letter_headers_exchange(Config) -> + {Connection, Session, LinkPair} = init(Config), + QName1 = <<"q1">>, + QName2 = <<"q2">>, + {ok, _} = rabbitmq_amqp_client:declare_queue( + LinkPair, + QName1, + #{arguments => #{<<"x-dead-letter-exchange">> => {utf8, <<"amq.headers">>}, + <<"x-message-ttl">> => {ulong, 0}}}), + {ok, _} = rabbitmq_amqp_client:declare_queue(LinkPair, QName2, #{}), + ok = rabbitmq_amqp_client:bind_queue(LinkPair, QName2, <<"amq.headers">>, <<>>, + #{<<"my key">> => {uint, 5}, + <<"x-my key">> => {uint, 6}, + <<"x-match">> => {utf8, <<"all-with-x">>}}), + + {ok, Sender} = amqp10_client:attach_sender_link( + Session, <<"test-sender">>, rabbitmq_amqp_address:queue(QName1)), + wait_for_credit(Sender), + {ok, Receiver} = amqp10_client:attach_receiver_link( + Session, <<"my receiver">>, rabbitmq_amqp_address:queue(QName2), settled), + + %% Test M1 with properties section. + M1 = amqp10_msg:set_message_annotations( + #{<<"x-my key">> => 6}, + amqp10_msg:set_properties( + #{message_id => <<"my ID">>}, + amqp10_msg:set_application_properties( + #{<<"my key">> => 5}, + amqp10_msg:new(<<"tag 1">>, <<"m1">>, false)))), + %% Test M2 without properties section. + M2 = amqp10_msg:set_message_annotations( + #{<<"x-my key">> => 6}, + amqp10_msg:set_application_properties( + #{<<"my key">> => 5}, + amqp10_msg:new(<<"tag 2">>, <<"m2">>, false))), + %% M3 should be dropped due to missing x-header. + M3 = amqp10_msg:set_application_properties( + #{<<"my key">> => 5}, + amqp10_msg:new(<<"tag 3">>, <<"m3">>, false)), + %% M4 should be dropped due to missing header. + M4 = amqp10_msg:set_message_annotations( + #{<<"x-my key">> => 6}, + amqp10_msg:new(<<"tag 4">>, <<"m4">>, false)), + + Now = os:system_time(millisecond), + [ok = amqp10_client:send_msg(Sender, M) || M <- [M1, M2, M3, M4]], + ok = wait_for_accepts(4), + flush(accepted), + + ok = amqp10_client:flow_link_credit(Receiver, 4, never), + [Msg1, Msg2] = receive_messages(Receiver, 2), + ?assertEqual(<<"m1">>, amqp10_msg:body_bin(Msg1)), + ?assertEqual(<<"m2">>, amqp10_msg:body_bin(Msg2)), + ?assertEqual(#{message_id => <<"my ID">>}, amqp10_msg:properties(Msg1)), + ?assertEqual(0, maps:size(amqp10_msg:properties(Msg2))), + case rpc(Config, rabbit_feature_flags, is_enabled, [message_containers_deaths_v2]) of + true -> + ?assertMatch( + #{<<"x-first-death-queue">> := QName1, + <<"x-first-death-exchange">> := <<>>, + <<"x-first-death-reason">> := <<"expired">>, + <<"x-last-death-queue">> := QName1, + <<"x-last-death-exchange">> := <<>>, + <<"x-last-death-reason">> := <<"expired">>, + <<"x-opt-deaths">> := {array, + map, + [{map, + [ + {{symbol, <<"queue">>}, {utf8, QName1}}, + {{symbol, <<"reason">>}, {symbol, <<"expired">>}}, + {{symbol, <<"count">>}, {ulong, 1}}, + {{symbol, <<"first-time">>}, {timestamp, Timestamp}}, + {{symbol, <<"last-time">>}, {timestamp, Timestamp}}, + {{symbol, <<"exchange">>},{utf8, <<>>}}, + {{symbol, <<"routing-keys">>}, {array, utf8, [{utf8, QName1}]}} + ]}]} + } when is_integer(Timestamp) andalso + Timestamp > Now - 5000 andalso + Timestamp < Now + 5000, + amqp10_msg:message_annotations(Msg1)); + false -> + ok + end, + + %% We expect M3 and M4 to get dropped. + receive Unexp -> ct:fail({unexpected, Unexp}) + after 10 -> ok + end, + + ok = amqp10_client:detach_link(Receiver), + ok = amqp10_client:detach_link(Sender), + {ok, #{message_count := 0}} = rabbitmq_amqp_client:delete_queue(LinkPair, QName1), + {ok, #{message_count := 0}} = rabbitmq_amqp_client:delete_queue(LinkPair, QName2), + ok = rabbitmq_amqp_client:detach_management_link_pair_sync(LinkPair), + ok = end_session_sync(Session), + ok = amqp10_client:close_connection(Connection). + +dead_letter_reject(Config) -> + {Connection, Session, LinkPair} = init(Config), + QName1 = <<"q1">>, + QName2 = <<"q2">>, + QName3 = <<"q3">>, + {ok, #{type := <<"quorum">>}} = rabbitmq_amqp_client:declare_queue( + LinkPair, + QName1, + #{arguments => #{<<"x-queue-type">> => {utf8, <<"quorum">>}, + <<"x-message-ttl">> => {ulong, 20}, + <<"x-overflow">> => {utf8, <<"reject-publish">>}, + <<"x-dead-letter-strategy">> => {utf8, <<"at-least-once">>}, + <<"x-dead-letter-exchange">> => {utf8, <<>>}, + <<"x-dead-letter-routing-key">> => {utf8, QName2} + }}), + {ok, #{type := <<"quorum">>}} = rabbitmq_amqp_client:declare_queue( + LinkPair, + QName2, + #{arguments => #{<<"x-queue-type">> => {utf8, <<"quorum">>}, + <<"x-dead-letter-exchange">> => {utf8, <<>>}, + <<"x-dead-letter-routing-key">> => {utf8, QName3} + }}), + {ok, #{type := <<"classic">>}} = rabbitmq_amqp_client:declare_queue( + LinkPair, + QName3, + #{arguments => #{<<"x-queue-type">> => {utf8, <<"classic">>}, + <<"x-message-ttl">> => {ulong, 20}, + <<"x-dead-letter-exchange">> => {utf8, <<>>}, + <<"x-dead-letter-routing-key">> => {utf8, QName1} + }}), + {ok, Receiver} = amqp10_client:attach_receiver_link( + Session, <<"receiver">>, rabbitmq_amqp_address:queue(QName2), unsettled), + {ok, Sender} = amqp10_client:attach_sender_link( + Session, <<"sender">>, rabbitmq_amqp_address:queue(QName1), unsettled), + wait_for_credit(Sender), + Tag = <<"my tag">>, + Body = <<"my body">>, + M = amqp10_msg:new(Tag, Body), + ok = amqp10_client:send_msg(Sender, M), + ok = wait_for_accepted(Tag), + + {ok, Msg1} = amqp10_client:get_msg(Receiver), + ?assertMatch(#{delivery_count := 0}, amqp10_msg:headers(Msg1)), + ok = amqp10_client:settle_msg(Receiver, Msg1, rejected), + + {ok, Msg2} = amqp10_client:get_msg(Receiver), + ?assertMatch(#{delivery_count := 1, + first_acquirer := false}, amqp10_msg:headers(Msg2)), + ok = amqp10_client:settle_msg(Receiver, Msg2, + {modified, true, true, + #{<<"x-opt-thekey">> => <<"val">>}}), + + {ok, Msg3} = amqp10_client:get_msg(Receiver), + ?assertMatch(#{delivery_count := 2, + first_acquirer := false}, amqp10_msg:headers(Msg3)), + ?assertEqual(Body, amqp10_msg:body_bin(Msg3)), + Annotations = amqp10_msg:message_annotations(Msg3), + ?assertMatch( + #{<<"x-opt-thekey">> := <<"val">>, + <<"x-first-death-queue">> := QName1, + <<"x-first-death-exchange">> := <<>>, + <<"x-first-death-reason">> := <<"expired">>, + <<"x-last-death-queue">> := QName1, + <<"x-last-death-exchange">> := <<>>, + <<"x-last-death-reason">> := <<"expired">>}, + Annotations), + %% The array should be ordered by death recency. + {ok, {array, map, [D1, D3, D2]}} = maps:find(<<"x-opt-deaths">>, Annotations), + {map, [ + {{symbol, <<"queue">>}, {utf8, QName1}}, + {{symbol, <<"reason">>}, {symbol, <<"expired">>}}, + {{symbol, <<"count">>}, {ulong, 3}}, + {{symbol, <<"first-time">>}, {timestamp, Ts1}}, + {{symbol, <<"last-time">>}, {timestamp, Ts2}}, + {{symbol, <<"exchange">>},{utf8, <<>>}}, + {{symbol, <<"routing-keys">>}, {array, utf8, [{utf8, QName1}]}} + ]} = D1, + {map, [ + {{symbol, <<"queue">>}, {utf8, QName2}}, + {{symbol, <<"reason">>}, {symbol, <<"rejected">>}}, + {{symbol, <<"count">>}, {ulong, 2}}, + {{symbol, <<"first-time">>}, {timestamp, Ts3}}, + {{symbol, <<"last-time">>}, {timestamp, Ts4}}, + {{symbol, <<"exchange">>},{utf8, <<>>}}, + {{symbol, <<"routing-keys">>}, {array, utf8, [{utf8, QName2}]}} + ]} = D2, + {map, [ + {{symbol, <<"queue">>}, {utf8, QName3}}, + {{symbol, <<"reason">>}, {symbol, <<"expired">>}}, + {{symbol, <<"count">>}, {ulong, 2}}, + {{symbol, <<"first-time">>}, {timestamp, Ts5}}, + {{symbol, <<"last-time">>}, {timestamp, Ts6}}, + {{symbol, <<"exchange">>},{utf8, <<>>}}, + {{symbol, <<"routing-keys">>}, {array, utf8, [{utf8, QName3}]}} + ]} = D3, + ?assertEqual([Ts1, Ts3, Ts5, Ts4, Ts6, Ts2], + lists:sort([Ts1, Ts2, Ts3, Ts4, Ts5, Ts6])), + ok = amqp10_client:settle_msg(Receiver, Msg3, accepted), + + ok = amqp10_client:detach_link(Receiver), + ok = amqp10_client:detach_link(Sender), + {ok, #{message_count := 0}} = rabbitmq_amqp_client:delete_queue(LinkPair, QName1), + {ok, #{message_count := 0}} = rabbitmq_amqp_client:delete_queue(LinkPair, QName2), + {ok, #{message_count := 0}} = rabbitmq_amqp_client:delete_queue(LinkPair, QName3), + ok = rabbitmq_amqp_client:detach_management_link_pair_sync(LinkPair), + ok = end_session_sync(Session), + ok = amqp10_client:close_connection(Connection). + +dead_letter_reject_message_order_classic_queue(Config) -> + dead_letter_reject_message_order(<<"classic">>, Config). + +dead_letter_reject_message_order_quorum_queue(Config) -> + dead_letter_reject_message_order(<<"quorum">>, Config). + +dead_letter_reject_message_order(QType, Config) -> + {Connection, Session, LinkPair} = init(Config), + QName1 = <<"q1">>, + QName2 = <<"q2">>, + {ok, #{type := QType}} = rabbitmq_amqp_client:declare_queue( + LinkPair, + QName1, + #{arguments => #{<<"x-queue-type">> => {utf8, QType}, + <<"x-dead-letter-exchange">> => {utf8, <<>>}, + <<"x-dead-letter-routing-key">> => {utf8, QName2} + }}), + %% We don't care about the target dead letter queue type. + {ok, _} = rabbitmq_amqp_client:declare_queue(LinkPair, QName2, #{}), + + {ok, Sender} = amqp10_client:attach_sender_link( + Session, <<"sender">>, rabbitmq_amqp_address:queue(QName1), unsettled), + wait_for_credit(Sender), + {ok, Receiver1} = amqp10_client:attach_receiver_link( + Session, <<"receiver 1">>, rabbitmq_amqp_address:queue(QName1), unsettled), + {ok, Receiver2} = amqp10_client:attach_receiver_link( + Session, <<"receiver 2">>, rabbitmq_amqp_address:queue(QName2), unsettled), + + [begin + Bin = integer_to_binary(N), + Msg = amqp10_msg:new(Bin, Bin, true), + ok = amqp10_client:send_msg(Sender, Msg) + end || N <- lists:seq(1, 5)], + + {ok, Msg1} = amqp10_client:get_msg(Receiver1), + {ok, Msg2} = amqp10_client:get_msg(Receiver1), + {ok, _Msg3} = amqp10_client:get_msg(Receiver1), + {ok, Msg4} = amqp10_client:get_msg(Receiver1), + {ok, Msg5} = amqp10_client:get_msg(Receiver1), + assert_messages(QName1, 5, 5, Config), + + %% Reject messages in the following order: 2, 3, 4, 1, 5 + ok = amqp10_client_session:disposition( + Receiver1, + amqp10_msg:delivery_id(Msg2), + amqp10_msg:delivery_id(Msg4), + true, + rejected), + ok = amqp10_client_session:disposition( + Receiver1, + amqp10_msg:delivery_id(Msg1), + amqp10_msg:delivery_id(Msg5), + true, + rejected), + + assert_messages(QName1, 0, 0, Config), + %% All 5 messages should be in the dead letter queue. + assert_messages(QName2, 5, 0, Config), + + {ok, MsgDead2} = amqp10_client:get_msg(Receiver2), + {ok, MsgDead3} = amqp10_client:get_msg(Receiver2), + {ok, MsgDead4} = amqp10_client:get_msg(Receiver2), + {ok, MsgDead1} = amqp10_client:get_msg(Receiver2), + {ok, MsgDead5} = amqp10_client:get_msg(Receiver2), + assert_messages(QName2, 5, 5, Config), + + %% Messages should be dead lettered in the order we rejected. + ?assertEqual(<<"2">>, amqp10_msg:body_bin(MsgDead2)), + ?assertEqual(<<"3">>, amqp10_msg:body_bin(MsgDead3)), + ?assertEqual(<<"4">>, amqp10_msg:body_bin(MsgDead4)), + ?assertEqual(<<"1">>, amqp10_msg:body_bin(MsgDead1)), + ?assertEqual(<<"5">>, amqp10_msg:body_bin(MsgDead5)), + + %% Accept all messages in the dead letter queue. + ok = amqp10_client_session:disposition( + Receiver2, + amqp10_msg:delivery_id(MsgDead2), + amqp10_msg:delivery_id(MsgDead5), + true, + accepted), + assert_messages(QName2, 0, 0, Config), + + ok = amqp10_client:detach_link(Receiver1), + ok = amqp10_client:detach_link(Receiver2), + ok = amqp10_client:detach_link(Sender), + {ok, #{message_count := 0}} = rabbitmq_amqp_client:delete_queue(LinkPair, QName1), + {ok, #{message_count := 0}} = rabbitmq_amqp_client:delete_queue(LinkPair, QName2), + ok = rabbitmq_amqp_client:detach_management_link_pair_sync(LinkPair), + ok = end_session_sync(Session), + ok = amqp10_client:close_connection(Connection). + +dead_letter_reject_many_message_order_classic_queue(Config) -> + dead_letter_reject_many_message_order(<<"classic">>, Config). + +dead_letter_reject_many_message_order_quorum_queue(Config) -> + dead_letter_reject_many_message_order(<<"quorum">>, Config). + +dead_letter_reject_many_message_order(QType, Config) -> + {Connection, Session, LinkPair} = init(Config), + QName1 = <<"q1">>, + QName2 = <<"q2">>, + {ok, #{type := QType}} = rabbitmq_amqp_client:declare_queue( + LinkPair, + QName1, + #{arguments => #{<<"x-queue-type">> => {utf8, QType}, + <<"x-dead-letter-exchange">> => {utf8, <<>>}, + <<"x-dead-letter-routing-key">> => {utf8, QName2} + }}), + %% We don't care about the target dead letter queue type. + {ok, _} = rabbitmq_amqp_client:declare_queue(LinkPair, QName2, #{}), + + {ok, Sender} = amqp10_client:attach_sender_link( + Session, <<"sender">>, rabbitmq_amqp_address:queue(QName1), unsettled), + wait_for_credit(Sender), + {ok, Receiver1} = amqp10_client:attach_receiver_link( + Session, <<"receiver 1">>, rabbitmq_amqp_address:queue(QName1), unsettled), + {ok, Receiver2} = amqp10_client:attach_receiver_link( + Session, <<"receiver 2">>, rabbitmq_amqp_address:queue(QName2), unsettled), + + Num = 100, + Bins = [integer_to_binary(N) || N <- lists:seq(1, Num)], + [begin + Msg = amqp10_msg:new(Bin, Bin, true), + ok = amqp10_client:send_msg(Sender, Msg) + end || Bin <- Bins], + + ok = amqp10_client:flow_link_credit(Receiver1, Num, never), + Msgs = receive_messages(Receiver1, Num), + [begin + {ExpectedBody, Msg} = Elem, + ?assertEqual(ExpectedBody, amqp10_msg:body_bin(Msg)) + end || Elem <- lists:zip(Bins, Msgs)], + assert_messages(QName1, Num, Num, Config), + + %% Accept the 2nd message. + ok = amqp10_client:accept_msg(Receiver1, lists:nth(2, Msgs)), + %% Reject all other messages. + %% Here, we intentionally settle a range larger than the number of unacked messages. + ok = amqp10_client_session:disposition( + Receiver1, + amqp10_msg:delivery_id(hd(Msgs)), + amqp10_msg:delivery_id(lists:last(Msgs)), + true, + rejected), + + assert_messages(QName1, 0, 0, Config), + assert_messages(QName2, Num - 1, 0, Config), + + ok = amqp10_client:flow_link_credit(Receiver2, Num, never), + DeadLetteredMsgs = receive_messages(Receiver2, Num - 1), + %% Messages should be dead lettered in the order we rejected. + ExpectedBodies = [hd(Bins) | lists:nthtail(2, Bins)], + [begin + {ExpectedBody, Msg} = Elem, + ?assertEqual(ExpectedBody, amqp10_msg:body_bin(Msg)) + end || Elem <- lists:zip(ExpectedBodies, DeadLetteredMsgs)], + assert_messages(QName2, Num - 1, Num - 1, Config), + + %% Accept the 10th message in the dead letter queue. + ok = amqp10_client:accept_msg(Receiver2, lists:nth(10, DeadLetteredMsgs)), + assert_messages(QName2, Num - 2, Num - 2, Config), + %% Accept all other messages. + %% Here, we intentionally settle a range larger than the number of unacked messages. + ok = amqp10_client_session:disposition( + Receiver2, + amqp10_msg:delivery_id(hd(DeadLetteredMsgs)), + amqp10_msg:delivery_id(lists:last(DeadLetteredMsgs)), + true, + accepted), + assert_messages(QName2, 0, 0, Config), + + ok = amqp10_client:detach_link(Receiver1), + ok = amqp10_client:detach_link(Receiver2), + ok = amqp10_client:detach_link(Sender), + {ok, #{message_count := 0}} = rabbitmq_amqp_client:delete_queue(LinkPair, QName1), + {ok, #{message_count := 0}} = rabbitmq_amqp_client:delete_queue(LinkPair, QName2), + ok = rabbitmq_amqp_client:detach_management_link_pair_sync(LinkPair), + ok = end_session_sync(Session), + ok = amqp10_client:close_connection(Connection). + +%% Dead letter from a quorum queue into a stream. +dead_letter_into_stream(Config) -> + {Connection0, Session0, LinkPair0} = init(0, Config), + {Connection1, Session1, LinkPair1} = init(1, Config), + QName0 = <<"q0">>, + QName1 = <<"q1">>, + {ok, #{type := <<"quorum">>}} = rabbitmq_amqp_client:declare_queue( + LinkPair0, + QName0, + #{arguments => #{<<"x-queue-type">> => {utf8, <<"quorum">>}, + <<"x-quorum-initial-group-size">> => {ulong, 1}, + <<"x-dead-letter-exchange">> => {utf8, <<>>}, + <<"x-dead-letter-routing-key">> => {utf8, QName1} + }}), + {ok, #{type := <<"stream">>}} = rabbitmq_amqp_client:declare_queue( + LinkPair1, + QName1, + #{arguments => #{<<"x-queue-type">> => {utf8, <<"stream">>}, + <<"x-initial-cluster-size">> => {ulong, 1} + }}), + {ok, Receiver} = amqp10_client:attach_receiver_link( + Session1, <<"receiver">>, <<"/amq/queue/", QName1/binary>>, + settled, configuration, + #{<<"rabbitmq:stream-offset-spec">> => <<"first">>}), + {ok, Sender} = amqp10_client:attach_sender_link( + Session0, <<"sender">>, rabbitmq_amqp_address:queue(QName0)), + wait_for_credit(Sender), + Ttl = 10, + M = amqp10_msg:set_headers( + #{durable => true, + ttl => Ttl}, + amqp10_msg:new(<<"tag">>, <<"msg">>, true)), + Now = os:system_time(millisecond), + ok = amqp10_client:send_msg(Sender, M), + + {ok, Msg} = amqp10_client:get_msg(Receiver), + ?assertEqual(<<"msg">>, amqp10_msg:body_bin(Msg)), + ?assertMatch( + #{<<"x-first-death-queue">> := QName0, + <<"x-first-death-exchange">> := <<>>, + <<"x-first-death-reason">> := <<"expired">>, + <<"x-last-death-queue">> := QName0, + <<"x-last-death-exchange">> := <<>>, + <<"x-last-death-reason">> := <<"expired">>, + <<"x-opt-deaths">> := {array, + map, + [{map, + [ + {{symbol, <<"ttl">>}, {uint, Ttl}}, + {{symbol, <<"queue">>}, {utf8, QName0}}, + {{symbol, <<"reason">>}, {symbol, <<"expired">>}}, + {{symbol, <<"count">>}, {ulong, 1}}, + {{symbol, <<"first-time">>}, {timestamp, Timestamp}}, + {{symbol, <<"last-time">>}, {timestamp, Timestamp}}, + {{symbol, <<"exchange">>},{utf8, <<>>}}, + {{symbol, <<"routing-keys">>}, {array, utf8, [{utf8, QName0}]}} + ]}]} + } when is_integer(Timestamp) andalso + Timestamp > Now - 5000 andalso + Timestamp < Now + 5000, + amqp10_msg:message_annotations(Msg)), + + ok = amqp10_client:detach_link(Receiver), + ok = amqp10_client:detach_link(Sender), + {ok, _} = rabbitmq_amqp_client:delete_queue(LinkPair0, QName0), + {ok, _} = rabbitmq_amqp_client:delete_queue(LinkPair1, QName1), + ok = rabbitmq_amqp_client:detach_management_link_pair_sync(LinkPair0), + ok = rabbitmq_amqp_client:detach_management_link_pair_sync(LinkPair1), + ok = end_session_sync(Session0), + ok = end_session_sync(Session1), + ok = amqp10_client:close_connection(Connection0), + ok = amqp10_client:close_connection(Connection1). + +accept_multiple_message_order_classic_queue(Config) -> + accept_multiple_message_order(<<"classic">>, Config). + +accept_multiple_message_order_quorum_queue(Config) -> + accept_multiple_message_order(<<"quorum">>, Config). + +accept_multiple_message_order(QType, Config) -> + QName = atom_to_binary(?FUNCTION_NAME), + Address = rabbitmq_amqp_address:queue(QName), + + {Connection, Session, LinkPair} = init(Config), + QProps = #{arguments => #{<<"x-queue-type">> => {utf8, QType}}}, + {ok, #{type := QType}} = rabbitmq_amqp_client:declare_queue(LinkPair, QName, QProps), + + {ok, Sender} = amqp10_client:attach_sender_link(Session, <<"sender">>, Address, settled), + ok = wait_for_credit(Sender), + [begin + Bin = integer_to_binary(N), + Msg = amqp10_msg:new(Bin, Bin, true), + ok = amqp10_client:send_msg(Sender, Msg) + end || N <- lists:seq(1, 5)], + ok = amqp10_client:detach_link(Sender), + assert_messages(QName, 5, 0, Config), + + {ok, Receiver} = amqp10_client:attach_receiver_link(Session, <<"receiver">>, QName, unsettled), + {ok, Msg1} = amqp10_client:get_msg(Receiver), + {ok, Msg2} = amqp10_client:get_msg(Receiver), + {ok, _Msg3} = amqp10_client:get_msg(Receiver), + {ok, Msg4} = amqp10_client:get_msg(Receiver), + {ok, Msg5} = amqp10_client:get_msg(Receiver), + assert_messages(QName, 5, 5, Config), + + %% Accept messages out of order. + ok = amqp10_client_session:disposition( + Receiver, + amqp10_msg:delivery_id(Msg2), + amqp10_msg:delivery_id(Msg4), + true, + accepted), + assert_messages(QName, 2, 2, Config), + + ok = amqp10_client:accept_msg(Receiver, Msg5), + assert_messages(QName, 1, 1, Config), + + ok = amqp10_client:accept_msg(Receiver, Msg1), + assert_messages(QName, 0, 0, Config), + + ok = amqp10_client:detach_link(Receiver), + ?assertMatch({ok, #{message_count := 0}}, rabbitmq_amqp_client:delete_queue(LinkPair, QName)), + ok = rabbitmq_amqp_client:detach_management_link_pair_sync(LinkPair), + ok = end_session_sync(Session), + ok = amqp10_client:close_connection(Connection). + +release_multiple_message_order_classic_queue(Config) -> + release_multiple_message_order(<<"classic">>, Config). + +release_multiple_message_order_quorum_queue(Config) -> + release_multiple_message_order(<<"quorum">>, Config). + +release_multiple_message_order(QType, Config) -> + QName = atom_to_binary(?FUNCTION_NAME), + Address = rabbitmq_amqp_address:queue(QName), + + {Connection, Session, LinkPair} = init(Config), + QProps = #{arguments => #{<<"x-queue-type">> => {utf8, QType}}}, + {ok, #{type := QType}} = rabbitmq_amqp_client:declare_queue(LinkPair, QName, QProps), + + {ok, Sender} = amqp10_client:attach_sender_link(Session, <<"sender">>, Address, settled), + ok = wait_for_credit(Sender), + [begin + Bin = integer_to_binary(N), + Msg = amqp10_msg:new(Bin, Bin, true), + ok = amqp10_client:send_msg(Sender, Msg) + end || N <- lists:seq(1, 4)], + ok = amqp10_client:detach_link(Sender), + assert_messages(QName, 4, 0, Config), + + {ok, Receiver} = amqp10_client:attach_receiver_link(Session, <<"receiver">>, QName, unsettled), + {ok, Msg1} = amqp10_client:get_msg(Receiver), + {ok, Msg2} = amqp10_client:get_msg(Receiver), + {ok, Msg3} = amqp10_client:get_msg(Receiver), + {ok, Msg4} = amqp10_client:get_msg(Receiver), + assert_messages(QName, 4, 4, Config), + + %% Release messages out of order. + ok = amqp10_client_session:disposition( + Receiver, + amqp10_msg:delivery_id(Msg2), + amqp10_msg:delivery_id(Msg3), + true, + released), + %% Both messages should be requeued and redelivered. + assert_messages(QName, 4, 2, Config), + + {ok, Msg2b} = amqp10_client:get_msg(Receiver), + {ok, Msg3b} = amqp10_client:get_msg(Receiver), + assert_messages(QName, 4, 4, Config), + ?assertEqual([<<"2">>], amqp10_msg:body(Msg2b)), + ?assertEqual([<<"3">>], amqp10_msg:body(Msg3b)), + + ok = amqp10_client_session:disposition( + Receiver, + amqp10_msg:delivery_id(Msg4), + amqp10_msg:delivery_id(Msg3b), + true, + accepted), + assert_messages(QName, 1, 1, Config), + + ok = amqp10_client:accept_msg(Receiver, Msg1), + assert_messages(QName, 0, 0, Config), + + ok = amqp10_client:detach_link(Receiver), + ?assertMatch({ok, #{message_count := 0}}, rabbitmq_amqp_client:delete_queue(LinkPair, QName)), + ok = rabbitmq_amqp_client:detach_management_link_pair_sync(LinkPair), + ok = end_session_sync(Session), + ok = amqp10_client:close_connection(Connection). + + +%% This test asserts the following §3.2 requirement: +%% "The bare message is immutable within the AMQP network. That is, none of the sections can be +%% changed by any node acting as an AMQP intermediary. If a section of the bare message is +%% omitted, one MUST NOT be inserted by an intermediary. The exact encoding of sections of the +%% bare message MUST NOT be modified. This preserves message hashes, HMACs and signatures based +%% on the binary encoding of the bare message." +immutable_bare_message(Config) -> + footer_checksum(crc32, Config), + footer_checksum(adler32, Config). + +footer_checksum(FooterOpt, Config) -> + ExpectedKey = case FooterOpt of + crc32 -> <<"x-opt-crc-32">>; + adler32 -> <<"x-opt-adler-32">> + end, + + {Connection, Session, LinkPair} = init(Config), + QName = atom_to_binary(FooterOpt), + Addr = rabbitmq_amqp_address:queue(QName), + {ok, _} = rabbitmq_amqp_client:declare_queue(LinkPair, QName, #{}), + RecvAttachArgs = #{name => <<"my receiver">>, + role => {receiver, #{address => Addr, + durable => configuration}, self()}, + snd_settle_mode => settled, + rcv_settle_mode => first, + filter => #{}, + footer_opt => FooterOpt}, + SndAttachArgs = #{name => <<"my sender">>, + role => {sender, #{address => Addr, + durable => configuration}}, + snd_settle_mode => settled, + rcv_settle_mode => first, + footer_opt => FooterOpt}, + {ok, Receiver} = amqp10_client:attach_link(Session, RecvAttachArgs), + {ok, Sender} = amqp10_client:attach_link(Session, SndAttachArgs), + wait_for_credit(Sender), + + Now = erlang:system_time(millisecond), + %% with properties and application-properties + M1 = amqp10_msg:set_headers( + #{durable => true, + priority => 7, + ttl => 100_000}, + amqp10_msg:set_delivery_annotations( + #{"a" => "b"}, + amqp10_msg:set_message_annotations( + #{"x-string" => "string-value", + "x-int" => 3, + "x-bool" => true}, + amqp10_msg:set_properties( + #{message_id => {ulong, 999}, + user_id => <<"guest">>, + to => Addr, + subject => <<"high prio">>, + reply_to => rabbitmq_amqp_address:queue(<<"a">>), + correlation_id => <<"correlation">>, + content_type => <<"text/plain">>, + content_encoding => <<"some encoding">>, + absolute_expiry_time => Now + 100_000, + creation_time => Now, + group_id => <<"my group ID">>, + group_sequence => 16#ff_ff_ff_ff, + reply_to_group_id => <<"other group ID">>}, + amqp10_msg:set_application_properties( + #{"string" => "string-val", + "int" => 2, + "true" => true, + "false" => false}, + amqp10_msg:new(<<"t1">>, <<"m1">>)))))), + ok = amqp10_client:send_msg(Sender, M1), + ok = wait_for_accepted(<<"t1">>), + + {ok, Msg1} = amqp10_client:get_msg(Receiver), + ?assertEqual(<<"m1">>, amqp10_msg:body_bin(Msg1)), + + %% without properties + M2 = amqp10_msg:set_application_properties( + #{"string" => "string-val", + "int" => 2, + "true" => true, + "false" => false}, + amqp10_msg:new(<<"t2">>, <<"m2">>)), + ok = amqp10_client:send_msg(Sender, M2), + ok = wait_for_accepted(<<"t2">>), + + {ok, Msg2} = amqp10_client:get_msg(Receiver), + ?assertEqual(<<"m2">>, amqp10_msg:body_bin(Msg2)), + + %% bare message consists of single data section + M3 = amqp10_msg:new(<<"t3">>, <<"m3">>), + ok = amqp10_client:send_msg(Sender, M3), + ok = wait_for_accepted(<<"t3">>), + + {ok, Msg3} = amqp10_client:get_msg(Receiver), + ?assertEqual(<<"m3">>, amqp10_msg:body_bin(Msg3)), + + %% bare message consists of multiple data sections + M4 = amqp10_msg:new(<<"t4">>, [#'v1_0.data'{content = <<"m4 a">>}, + #'v1_0.data'{content = <<"m4 b">>}]), + ok = amqp10_client:send_msg(Sender, M4), + ok = wait_for_accepted(<<"t4">>), + + {ok, Msg4} = amqp10_client:get_msg(Receiver), + ?assertEqual([<<"m4 a">>, <<"m4 b">>], amqp10_msg:body(Msg4)), + + %% bare message consists of multiple sequence sections + M5 = amqp10_msg:new(<<"t5">>, + [#'v1_0.amqp_sequence'{content = [{ubyte, 255}]}, + %% Our serialiser uses 2 byte boolean encoding + #'v1_0.amqp_sequence'{content = [{boolean, true}, {boolean, false}]}, + %% Our serialiser uses 1 byte boolean encoding + #'v1_0.amqp_sequence'{content = [true, false, undefined]}]), + ok = amqp10_client:send_msg(Sender, M5), + ok = wait_for_accepted(<<"t5">>), + + {ok, Msg5} = amqp10_client:get_msg(Receiver), + ?assertEqual([#'v1_0.amqp_sequence'{content = [{ubyte, 255}]}, + %% Our parser returns the Erlang boolean term. + %% However, the important assertion is that RabbitMQ sent us back + %% the bare message unmodified, i.e. that the checksum holds. + #'v1_0.amqp_sequence'{content = [true, false]}, + #'v1_0.amqp_sequence'{content = [true, false, undefined]}], + amqp10_msg:body(Msg5)), + + %% with footer + M6 = amqp10_msg:from_amqp_records( + [#'v1_0.transfer'{delivery_tag = {binary, <<"t6">>}, + settled = false, + message_format = {uint, 0}}, + #'v1_0.properties'{correlation_id = {ulong, 16#ff_ff_ff_ff_ff_ff_ff_ff}}, + #'v1_0.data'{content = <<"m6 a">>}, + #'v1_0.data'{content = <<"m6 b">>}, + #'v1_0.footer'{ + content = [ + {{symbol, <<"x-opt-rabbit">>}, {char, $🐇}}, + {{symbol, <<"x-opt-carrot">>}, {char, $🥕}} + ]}]), + ok = amqp10_client:send_msg(Sender, M6), + ok = wait_for_accepted(<<"t6">>), + + {ok, Msg6} = amqp10_client:get_msg(Receiver), + ?assertEqual([<<"m6 a">>, <<"m6 b">>], amqp10_msg:body(Msg6)), + ?assertMatch(#{ExpectedKey := _, + <<"x-opt-rabbit">> := $🐇, + <<"x-opt-carrot">> := $🥕}, + amqp10_msg:footer(Msg6)), + + %% We only sanity check here that the footer annotation we received from the server + %% contains a checksum. The AMQP Erlang client library will assert for us that the + %% received checksum matches the actual checksum. + lists:foreach(fun(Msg) -> + Map = amqp10_msg:footer(Msg), + {ok, Checksum} = maps:find(ExpectedKey, Map), + ?assert(is_integer(Checksum)) + end, [Msg1, Msg2, Msg3, Msg4, Msg5, Msg6]), + + ok = amqp10_client:detach_link(Receiver), + ok = amqp10_client:detach_link(Sender), + {ok, _} = rabbitmq_amqp_client:delete_queue(LinkPair, QName), + ok = rabbitmq_amqp_client:detach_management_link_pair_sync(LinkPair), + ok = end_session_sync(Session), + ok = amqp10_client:close_connection(Connection). + +receive_many_made_available_over_time_classic_queue(Config) -> + receive_many_made_available_over_time(<<"classic">>, Config). + +receive_many_made_available_over_time_quorum_queue(Config) -> + receive_many_made_available_over_time(<<"quorum">>, Config). + +receive_many_made_available_over_time_stream(Config) -> + receive_many_made_available_over_time(<<"stream">>, Config). + +%% This test grants many credits to the queue once while +%% messages are being made available at the source over time. +receive_many_made_available_over_time(QType, Config) -> + QName = atom_to_binary(?FUNCTION_NAME), + Address = rabbitmq_amqp_address:queue(QName), + {Connection, Session, LinkPair} = init(Config), + QProps = #{arguments => #{<<"x-queue-type">> => {utf8, QType}}}, + {ok, #{type := QType}} = rabbitmq_amqp_client:declare_queue(LinkPair, QName, QProps), + {ok, Sender} = amqp10_client:attach_sender_link(Session, <<"test-sender">>, Address), + wait_for_credit(Sender), + + %% Send first batch of messages. + ok = send_messages(Sender, 10, false), + ok = wait_for_accepts(10), + Filter = consume_from_first(QType), + {ok, Receiver} = amqp10_client:attach_receiver_link( + Session, <<"receiver">>, Address, + settled, configuration, Filter), + flush(attached), + %% Grant many credits to the queue once. + ok = amqp10_client:flow_link_credit(Receiver, 5000, never), + %% We expect to receive the first batch of messages. + receive_messages(Receiver, 10), + + %% Make next batch of messages available. + ok = send_messages(Sender, 2990, false), + ok = wait_for_accepts(2990), + %% We expect to receive this batch of messages. + receive_messages(Receiver, 2990), + + %% Make next batch of messages available. + ok = send_messages(Sender, 1999, false), + ok = wait_for_accepts(1999), + %% We expect to receive this batch of messages. + receive_messages(Receiver, 1999), + + %% Make next batch of messages available. + ok = send_messages(Sender, 2, false), + ok = wait_for_accepts(2), + %% At this point, we only have 2 messages in the queue, but only 1 credit left. + ?assertEqual(1, count_received_messages(Receiver)), + + ok = amqp10_client:detach_link(Sender), + ok = amqp10_client:detach_link(Receiver), + {ok, _} = rabbitmq_amqp_client:delete_queue(LinkPair, QName), + ok = rabbitmq_amqp_client:detach_management_link_pair_sync(LinkPair), + ok = amqp10_client:close_connection(Connection). + +receive_many_auto_flow_classic_queue(Config) -> + receive_many_auto_flow(<<"classic">>, Config). + +receive_many_auto_flow_quorum_queue(Config) -> + receive_many_auto_flow(<<"quorum">>, Config). + +receive_many_auto_flow_stream(Config) -> + receive_many_auto_flow(<<"stream">>, Config). + +receive_many_auto_flow(QType, Config) -> + QName = atom_to_binary(?FUNCTION_NAME), + Address = rabbitmq_amqp_address:queue(QName), + {Connection, Session, LinkPair} = init(Config), + QProps = #{arguments => #{<<"x-queue-type">> => {utf8, QType}}}, + {ok, #{type := QType}} = rabbitmq_amqp_client:declare_queue(LinkPair, QName, QProps), + {ok, Sender} = amqp10_client:attach_sender_link(Session, <<"test-sender">>, Address), + wait_for_credit(Sender), + + %% Send many messages. + Num = 10_000, + ok = send_messages(Sender, Num, false), + ok = wait_for_accepts(Num), + + Filter = consume_from_first(QType), + {ok, Receiver} = amqp10_client:attach_receiver_link( + Session, <<"receiver">>, Address, + settled, configuration, Filter), + receive {amqp10_event, {link, Receiver, attached}} -> ok + after 5000 -> ct:fail(missing_attached) + end, + flush(receiver_attached), + + %% Let's auto top up relatively often, but in large batches. + ok = amqp10_client:flow_link_credit(Receiver, 1300, 1200), + receive_messages(Receiver, Num), + + ok = amqp10_client:detach_link(Sender), + ok = amqp10_client:detach_link(Receiver), + {ok, #{message_count := 0}} = rabbitmq_amqp_client:delete_queue(LinkPair, QName), + ok = rabbitmq_amqp_client:detach_management_link_pair_sync(LinkPair), + ok = amqp10_client:close_connection(Connection). + +%% This test ensures that the server sends us TRANSFER and FLOW frames in the correct order +%% even if the server is temporarily not allowed to send us any TRANSFERs due to our session +%% incoming-window being closed. +incoming_window_closed_transfer_flow_order(Config) -> + QName = atom_to_binary(?FUNCTION_NAME), + Ch = rabbit_ct_client_helpers:open_channel(Config), + #'queue.declare_ok'{} = amqp_channel:call(Ch, #'queue.declare'{queue = QName}), + ok = rabbit_ct_client_helpers:close_channel(Ch), + Address = rabbitmq_amqp_address:queue(QName), + OpnConf = connection_config(Config), + {ok, Connection} = amqp10_client:open_connection(OpnConf), + {ok, Session} = amqp10_client:begin_session_sync(Connection), + {ok, Sender} = amqp10_client:attach_sender_link(Session, <<"sender">>, Address), + ok = wait_for_credit(Sender), + DTag = <<"my tag">>, + Body = <<"my body">>, + ok = amqp10_client:send_msg(Sender, amqp10_msg:new(DTag, Body, false)), + ok = wait_for_accepted(DTag), + ok = amqp10_client:detach_link(Sender), + {ok, Receiver} = amqp10_client:attach_receiver_link(Session, <<"receiver">>, Address, unsettled), + receive {amqp10_event, {link, Receiver, attached}} -> ok + after 5000 -> ct:fail(missing_attached) + end, + flush(receiver_attached), + + ok = close_incoming_window(Session), + ok = amqp10_client:flow_link_credit(Receiver, 2, never, true), + %% Given our incoming window is closed, we shouldn't receive the TRANSFER yet, and therefore + %% must not yet receive the FLOW that comes thereafter with drain=true, credit=0, and advanced delivery-count. + receive Unexpected -> ct:fail({unexpected, Unexpected}) + after 300 -> ok + end, + + %% Open our incoming window + gen_statem:cast(Session, {flow_session, #'v1_0.flow'{incoming_window = {uint, 5}}}), + %% Important: We should first receive the TRANSFER, + %% and only thereafter the FLOW (and hence the credit_exhausted notification). + receive First -> + {amqp10_msg, Receiver, Msg} = First, + ?assertEqual([Body], amqp10_msg:body(Msg)) + after 5000 -> ct:fail("timeout receiving message") + end, + receive Second -> + ?assertEqual({amqp10_event, {link, Receiver, credit_exhausted}}, Second) + after 5000 -> ct:fail("timeout receiving credit_exhausted") + end, + + ok = delete_queue(Session, QName), + ok = end_session_sync(Session), + ok = amqp10_client:close_connection(Connection). + +incoming_window_closed_stop_link(Config) -> + QName = atom_to_binary(?FUNCTION_NAME), + Address = rabbitmq_amqp_address:queue(QName), + + OpnConf0 = connection_config(Config), + OpnConf = OpnConf0#{transfer_limit_margin => -1}, + {ok, Connection} = amqp10_client:open_connection(OpnConf), + {ok, Session} = amqp10_client:begin_session_sync(Connection), + {ok, LinkPair} = rabbitmq_amqp_client:attach_management_link_pair_sync(Session, <<"my link pair">>), + {ok, _} = rabbitmq_amqp_client:declare_queue(LinkPair, QName, #{}), + + {ok, Sender} = amqp10_client:attach_sender_link(Session, <<"sender">>, Address), + ok = wait_for_credit(Sender), + ok = amqp10_client:send_msg(Sender, amqp10_msg:new(<<"t1">>, <<"m1">>, false)), + ok = amqp10_client:send_msg(Sender, amqp10_msg:new(<<"t2">>, <<"m2">>, false)), + ok = wait_for_accepted(<<"t1">>), + ok = wait_for_accepted(<<"t2">>), + ok = amqp10_client:detach_link(Sender), + + {ok, Receiver} = amqp10_client:attach_receiver_link(Session, <<"receiver">>, Address, unsettled), + receive {amqp10_event, {link, Receiver, attached}} -> ok + after 5000 -> ct:fail(missing_attached) + end, + flush(receiver_attached), + + ok = close_incoming_window(Session), + %% We first grant a credit in drain mode. + ok = amqp10_client:flow_link_credit(Receiver, 1, never, true), + %% Then, we change our mind and stop the link. + ok = amqp10_client:stop_receiver_link(Receiver), + %% Given our incoming window is closed, we shouldn't receive any TRANSFER. + receive {amqp10_msg, _, _} = Unexp1 -> ct:fail({?LINE, unexpected_msg, Unexp1}) + after 10 -> ok + end, + + %% Open our incoming window + gen_statem:cast(Session, {flow_session, #'v1_0.flow'{incoming_window = {uint, 5}}}), + + %% Since we decreased link credit dynamically, we may or may not receive the 1st message. + receive {amqp10_msg, Receiver, Msg1} -> + ?assertEqual([<<"m1">>], amqp10_msg:body(Msg1)) + after 500 -> ok + end, + %% We must not receive the 2nd message. + receive {amqp10_msg, _, _} = Unexp2 -> ct:fail({?LINE, unexpected_msg, Unexp2}) + after 5 -> ok + end, + + {ok, _} = rabbitmq_amqp_client:delete_queue(LinkPair, QName), + ok = rabbitmq_amqp_client:detach_management_link_pair_sync(LinkPair), + ok = end_session_sync(Session), + ok = amqp10_client:close_connection(Connection). + +%% Test that we can close a link while our session incoming-window is closed. +incoming_window_closed_close_link(Config) -> + QName = atom_to_binary(?FUNCTION_NAME), + Address = rabbitmq_amqp_address:queue(QName), + + {Connection, Session, LinkPair} = init(Config), + {ok, _} = rabbitmq_amqp_client:declare_queue(LinkPair, QName, #{}), + + {ok, Sender} = amqp10_client:attach_sender_link(Session, <<"sender">>, Address), + ok = wait_for_credit(Sender), + DTag = <<"my tag">>, + Body = <<"my body">>, + ok = amqp10_client:send_msg(Sender, amqp10_msg:new(DTag, Body, false)), + ok = wait_for_accepted(DTag), + ok = amqp10_client:detach_link(Sender), + {ok, Receiver} = amqp10_client:attach_receiver_link(Session, <<"receiver">>, Address, unsettled), + receive {amqp10_event, {link, Receiver, attached}} -> ok + after 5000 -> ct:fail(missing_attached) + end, + flush(receiver_attached), + + ok = close_incoming_window(Session), + ok = amqp10_client:flow_link_credit(Receiver, 2, never, true), + %% Given our incoming window is closed, we shouldn't receive the TRANSFER yet, and therefore + %% must not yet receive the FLOW that comes thereafter with drain=true, credit=0, and advanced delivery-count. + receive Unexpected1 -> ct:fail({unexpected, Unexpected1}) + after 300 -> ok + end, + %% Close the link while our session incoming-window is closed. + ok = detach_link_sync(Receiver), + %% Open our incoming window. + gen_statem:cast(Session, {flow_session, #'v1_0.flow'{incoming_window = {uint, 5}}}), + %% Given that both endpoints have now destroyed the link, we do not + %% expect to receive any TRANSFER or FLOW frame referencing the destroyed link. + receive Unexpected2 -> ct:fail({unexpected, Unexpected2}) + after 300 -> ok + end, + + {ok, _} = rabbitmq_amqp_client:delete_queue(LinkPair, QName), + ok = rabbitmq_amqp_client:detach_management_link_pair_sync(LinkPair), + ok = end_session_sync(Session), + ok = amqp10_client:close_connection(Connection). + +incoming_window_closed_rabbitmq_internal_flow_classic_queue(Config) -> + incoming_window_closed_rabbitmq_internal_flow(<<"classic">>, Config). + +incoming_window_closed_rabbitmq_internal_flow_quorum_queue(Config) -> + incoming_window_closed_rabbitmq_internal_flow(<<"quorum">>, Config). + +incoming_window_closed_rabbitmq_internal_flow(QType, Config) -> + QName = atom_to_binary(?FUNCTION_NAME), + Address = rabbitmq_amqp_address:queue(QName), + + {Connection, Session, LinkPair} = init(Config), + QProps = #{arguments => #{<<"x-queue-type">> => {utf8, QType}}}, + {ok, #{type := QType}} = rabbitmq_amqp_client:declare_queue(LinkPair, QName, QProps), + + {ok, Sender} = amqp10_client:attach_sender_link(Session, <<"sender">>, Address), + ok = wait_for_credit(Sender), + + %% Send many messages. + Num = 5_000, + ok = send_messages(Sender, Num, false), + ok = wait_for_accepts(Num), + ok = detach_link_sync(Sender), + + {ok, Receiver} = amqp10_client:attach_receiver_link(Session, <<"receiver">>, Address, settled), + receive {amqp10_event, {link, Receiver, attached}} -> ok + after 5000 -> ct:fail(missing_attached) + end, + flush(receiver_attached), + + ok = close_incoming_window(Session), + %% Grant all link credit at once. + ok = amqp10_client:flow_link_credit(Receiver, Num, never), + %% Given our incoming window is closed, we shouldn't receive any TRANSFER yet. + receive Unexpected -> ct:fail({unexpected, Unexpected}) + after 200 -> ok + end, + + %% Here, we do a bit of white box testing: We assert that RabbitMQ has some form of internal + %% flow control by checking that the queue did not send all its messages to the server session + %% process. In other words, there should be ready messages in the queue. + MsgsReady = ready_messages(QName, Config), + ?assert(MsgsReady > 0), + + %% Open our incoming window. + gen_statem:cast(Session, {flow_session, #'v1_0.flow'{incoming_window = {uint, Num}}}), + receive_messages(Receiver, Num), + + ok = detach_link_sync(Receiver), + {ok, #{message_count := 0}} = rabbitmq_amqp_client:delete_queue(LinkPair, QName), + ok = rabbitmq_amqp_client:detach_management_link_pair_sync(LinkPair), + ok = end_session_sync(Session), + ok = amqp10_client:close_connection(Connection). + +tcp_back_pressure_rabbitmq_internal_flow_classic_queue(Config) -> + tcp_back_pressure_rabbitmq_internal_flow(<<"classic">>, Config). + +tcp_back_pressure_rabbitmq_internal_flow_quorum_queue(Config) -> + tcp_back_pressure_rabbitmq_internal_flow(<<"quorum">>, Config). + +%% Test that RabbitMQ can handle clients that do not receive fast enough +%% causing TCP back-pressure to the server. RabbitMQ's internal flow control +%% writer proc <- session proc <- queue proc +%% should be able to protect the server by having the queue not send out all messages at once. +tcp_back_pressure_rabbitmq_internal_flow(QType, Config) -> + QName = atom_to_binary(?FUNCTION_NAME), + Address = rabbitmq_amqp_address:queue(QName), + + OpnConf0 = connection_config(Config), + %% We also want to test the code path where large messages are split into multiple transfer frames. + OpnConf = OpnConf0#{max_frame_size => 600}, + {ok, Connection} = amqp10_client:open_connection(OpnConf), + {ok, Session} = amqp10_client:begin_session_sync(Connection), + {ok, LinkPair} = rabbitmq_amqp_client:attach_management_link_pair_sync(Session, <<"my link pair">>), + + QProps = #{arguments => #{<<"x-queue-type">> => {utf8, QType}}}, + {ok, #{type := QType}} = rabbitmq_amqp_client:declare_queue(LinkPair, QName, QProps), + + {ok, Sender} = amqp10_client:attach_sender_link(Session, <<"sender">>, Address), + ok = wait_for_credit(Sender), + + %% Send many messages. + %% The messages should be somewhat large to fill up buffers causing TCP back-pressure. + BodySuffix = binary:copy(<<"x">>, 1000), + Num = 5000, + ok = send_messages(Sender, Num, false, BodySuffix), + ok = wait_for_accepts(Num), + ok = detach_link_sync(Sender), + + {ok, Receiver} = amqp10_client:attach_receiver_link(Session, <<"receiver">>, Address, settled), + receive {amqp10_event, {link, Receiver, attached}} -> ok + after 5000 -> ct:fail(missing_attached) + end, + flush(receiver_attached), + + {_GenStatemState, + #{reader := ReaderPid, + socket := {tcp, Socket}}} = formatted_state(Session), + + %% Provoke TCP back-pressure from client to server by using very small buffers. + ok = inet:setopts(Socket, [{recbuf, 256}, + {buffer, 256}]), + %% Suspend the receiving client such that it stops reading from its socket + %% causing TCP back-pressure to the server being applied. + true = erlang:suspend_process(ReaderPid), + + ok = amqp10_client:flow_link_credit(Receiver, Num, never), + %% We give the queue time to send messages to the session proc and writer proc. + timer:sleep(1000), + + %% Here, we do a bit of white box testing: We assert that RabbitMQ has some form of internal + %% flow control by checking that the queue sent some but, more importantly, not all its + %% messages to the server session and writer processes. In other words, there should be + %% ready messages in the queue. + MsgsReady = ready_messages(QName, Config), + ?assert(MsgsReady > 0), + ?assert(MsgsReady < Num), + + %% Use large buffers. This will considerably speed up receiving all messages (on Linux). + ok = inet:setopts(Socket, [{recbuf, 65536}, + {buffer, 65536}]), + %% When we resume the receiving client, we expect to receive all messages. + true = erlang:resume_process(ReaderPid), + receive_messages(Receiver, Num), + + ok = detach_link_sync(Receiver), + {ok, #{message_count := 0}} = rabbitmq_amqp_client:delete_queue(LinkPair, QName), + ok = rabbitmq_amqp_client:detach_management_link_pair_sync(LinkPair), + ok = end_session_sync(Session), + ok = amqp10_client:close_connection(Connection). + +%% internal +%% + +init(Config) -> + init(0, Config). + +init(Node, Config) -> + OpnConf = connection_config(Node, Config), + {ok, Connection} = amqp10_client:open_connection(OpnConf), + {ok, Session} = amqp10_client:begin_session_sync(Connection), + {ok, LinkPair} = rabbitmq_amqp_client:attach_management_link_pair_sync(Session, <<"my link pair">>), + {Connection, Session, LinkPair}. + +receive_all_messages(Receiver, Accept) -> + receive_all_messages0(Receiver, Accept, []). + +receive_all_messages0(Receiver, Accept, Acc) -> + receive {amqp10_msg, Receiver, Msg} -> + case Accept of + true -> ok = amqp10_client:accept_msg(Receiver, Msg); + false -> ok + end, + receive_all_messages0(Receiver, Accept, [Msg | Acc]) + after 1000 -> + lists:reverse(Acc) + end. + +connection_config(Config) -> + connection_config(0, Config). + +connection_config(Node, Config) -> + Host = ?config(rmq_hostname, Config), + Port = rabbit_ct_broker_helpers:get_node_config(Config, Node, tcp_port_amqp), + #{address => Host, + port => Port, + container_id => <<"my container">>, + sasl => {plain, <<"guest">>, <<"guest">>}}. + +flush(Prefix) -> + receive + Msg -> + ct:pal("~p flushed: ~p~n", [Prefix, Msg]), + flush(Prefix) + after 1 -> + ok + end. + +open_and_close_connection(Config) -> + OpnConf = connection_config(Config), + {ok, Connection} = amqp10_client:open_connection(OpnConf), + receive {amqp10_event, {connection, Connection, opened}} -> ok + after 5000 -> ct:fail(opened_timeout) + end, + ok = close_connection_sync(Connection). + +% before we can send messages we have to wait for credit from the server +wait_for_credit(Sender) -> + receive + {amqp10_event, {link, Sender, credited}} -> + ok + after 5000 -> + flush("wait_for_credit timed out"), + ct:fail(credited_timeout) + end. + +detach_link_sync(Link) -> + ok = amqp10_client:detach_link(Link), + ok = wait_for_link_detach(Link). + +wait_for_link_detach(Link) -> + receive + {amqp10_event, {link, Link, {detached, normal}}} -> + flush(?FUNCTION_NAME), + ok + after 5000 -> + flush("wait_for_link_detach timed out"), + ct:fail({link_detach_timeout, Link}) + end. + +end_session_sync(Session) -> + ok = amqp10_client:end_session(Session), + ok = wait_for_session_end(Session). + +wait_for_session_end(Session) -> + receive + {amqp10_event, {session, Session, {ended, _}}} -> + flush(?FUNCTION_NAME), + ok + after 5000 -> + flush("wait_for_session_end timed out"), + ct:fail({session_end_timeout, Session}) + end. + +close_connection_sync(Connection) -> + ok = amqp10_client:close_connection(Connection), + ok = wait_for_connection_close(Connection). + +wait_for_connection_close(Connection) -> + receive + {amqp10_event, {connection, Connection, {closed, normal}}} -> + flush(?FUNCTION_NAME), + ok + after 5000 -> + flush("wait_for_connection_close timed out"), + ct:fail({connection_close_timeout, Connection}) + end. + +wait_for_accepted(Tag) -> + wait_for_settlement(Tag, accepted). + +wait_for_settlement(Tag, State) -> + receive + {amqp10_disposition, {State, Tag}} -> + ok + after 5000 -> + flush("wait_for_settlement timed out"), + ct:fail({settled_timeout, Tag}) + end. + +wait_for_accepts(0) -> + ok; +wait_for_accepts(N) -> + receive + {amqp10_disposition,{accepted,_}} -> + wait_for_accepts(N - 1) + after 5000 -> + ct:fail({missing_accepted, N}) + end. + +delete_queue(Session, QName) -> + {ok, LinkPair} = rabbitmq_amqp_client:attach_management_link_pair_sync( + Session, <<"delete queue">>), + {ok, _} = rabbitmq_amqp_client:delete_queue(LinkPair, QName), + ok = rabbitmq_amqp_client:detach_management_link_pair_sync(LinkPair). + +create_amqp10_sender(Session, Address) -> + {ok, Sender} = amqp10_client:attach_sender_link( + Session, <<"test-sender">>, Address), + wait_for_credit(Sender), + {ok, Sender}. + +drain_queue(Session, Address, N) -> + flush("Before drain_queue"), + {ok, Receiver} = amqp10_client:attach_receiver_link( + Session, + <<"test-receiver">>, + Address, + settled, + configuration), + ok = amqp10_client:flow_link_credit(Receiver, 1000, never, true), + Msgs = receive_messages(Receiver, N), + flush("after drain"), + ok = amqp10_client:detach_link(Receiver), + {ok, Msgs}. + +receive_messages(Receiver, N) -> + receive_messages0(Receiver, N, []). + +receive_messages0(_Receiver, 0, Acc) -> + lists:reverse(Acc); +receive_messages0(Receiver, N, Acc) -> + receive + {amqp10_msg, Receiver, Msg} -> + receive_messages0(Receiver, N - 1, [Msg | Acc]) + after 5000 -> + ct:fail({timeout, {num_received, length(Acc)}, {num_missing, N}}) + end. + +count_received_messages(Receiver) -> + count_received_messages0(Receiver, 0). + +count_received_messages0(Receiver, Count) -> + receive + {amqp10_msg, Receiver, _Msg} -> + count_received_messages0(Receiver, Count + 1) + after 1000 -> + Count + end. + +send_messages(Sender, Left, Settled) -> + send_messages(Sender, Left, Settled, <<>>). + +send_messages(_, 0, _, _) -> + ok; +send_messages(Sender, Left, Settled, BodySuffix) -> + Bin = integer_to_binary(Left), + Body = <>, + Msg = amqp10_msg:new(Bin, Body, Settled), + case amqp10_client:send_msg(Sender, Msg) of + ok -> + send_messages(Sender, Left - 1, Settled, BodySuffix); + {error, insufficient_credit} -> + ok = wait_for_credit(Sender), + %% The credited event we just processed could have been received some time ago, + %% i.e. we might have 0 credits right now. This happens in the following scenario: + %% 1. We (test case proc) send a message successfully, the client session proc decrements remaining link credit from 1 to 0. + %% 2. The server grants our client session proc new credits. + %% 3. The client session proc sends us (test case proc) a credited event. + %% 4. We didn't even notice that we ran out of credits temporarily. We send the next message, it succeeds, + %% but do not process the credited event in our mailbox. + %% So, we must be defensive here and assume that the next amqp10_client:send/2 call might return {error, insufficient_credit} + %% again causing us then to really wait to receive a credited event (instead of just processing an old credited event). + send_messages(Sender, Left, Settled, BodySuffix) + end. + +assert_link_credit_runs_out(_Sender, 0) -> + ct:fail(sufficient_link_credit); +assert_link_credit_runs_out(Sender, Left) -> + Bin = integer_to_binary(Left), + Msg = amqp10_msg:new(Bin, Bin, true), + case amqp10_client:send_msg(Sender, Msg) of + ok -> + assert_link_credit_runs_out(Sender, Left - 1); + {error, insufficient_credit} -> + receive {amqp10_event, {link, Sender, credited}} -> + ct:pal("credited with ~b messages left", [Left]), + assert_link_credit_runs_out(Sender, Left - 1) + after 500 -> + ct:pal("insufficient link credit with ~b messages left", [Left]), + ok + end + end. + +send_messages_with_group_id(Sender, N, GroupId) -> + [begin + Bin = integer_to_binary(I), + Msg0 = amqp10_msg:new(Bin, Bin, true), + Props = #{group_id => GroupId}, + Msg = amqp10_msg:set_properties(Props, Msg0), + ok = amqp10_client:send_msg(Sender, Msg) + end || I <- lists:seq(1, N)]. + +assert_messages(QNameBin, NumTotalMsgs, NumUnackedMsgs, Config) -> + assert_messages(QNameBin, NumTotalMsgs, NumUnackedMsgs, Config, 0). + +assert_messages(QNameBin, NumTotalMsgs, NumUnackedMsgs, Config, Node) -> + Vhost = ?config(rmq_vhost, Config), + eventually( + ?_assertEqual( + lists:sort([{messages, NumTotalMsgs}, {messages_unacknowledged, NumUnackedMsgs}]), + begin + {ok, Q} = rpc(Config, Node, rabbit_amqqueue, lookup, [QNameBin, Vhost]), + Infos = rpc(Config, Node, rabbit_amqqueue, info, [Q, [messages, messages_unacknowledged]]), + lists:sort(Infos) + end + ), 500, 5). + +serial_number_increment(S) -> + case S + 1 of + 16#ffffffff + 1 -> 0; + S1 -> S1 + end. + +consume_from_first(<<"stream">>) -> + #{<<"rabbitmq:stream-offset-spec">> => <<"first">>}; +consume_from_first(_) -> + #{}. + +%% Return the formatted state of a gen_server or gen_statem via sys:get_status/1. +%% (sys:get_state/1 is unformatted) +formatted_state(Pid) -> + {status, _, _, L0} = sys:get_status(Pid, 20_000), + L1 = lists:last(L0), + {data, L2} = lists:last(L1), + proplists:get_value("State", L2). + +get_global_counters(Config) -> + get_global_counters0(Config, [{protocol, amqp10}]). + +get_global_counters(Config, QType) -> + get_global_counters0(Config, [{protocol, amqp10}, + {queue_type, QType}]). + +get_global_counters0(Config, Key) -> + Overview = rpc(Config, rabbit_global_counters, overview, []), + maps:get(Key, Overview). + +get_available_messages({link_ref, receiver, Session, OutputHandle}) -> + {status, _Pid, _Mod, [_, _, _, _, Misc]} = sys:get_status(Session), + [State] = [S || {data, [{"State", S}]} <- Misc], + {_StateName, StateData} = State, + {ok, Links} = maps:find(links, StateData), + {ok, Link} = maps:find(OutputHandle, Links), + {ok, Available} = maps:find(available, Link), + Available. + +ready_messages(QName, Config) + when is_binary(QName) -> + {ok, Q} = rpc(Config, rabbit_amqqueue, lookup, [QName, <<"/">>]), + {ok, MsgsReady, _ConsumerCount} = rpc(Config, rabbit_queue_type, stat, [Q]), + ?assert(is_integer(MsgsReady)), + ct:pal("Queue ~s has ~b ready messages.", [QName, MsgsReady]), + MsgsReady. + +ra_name(Q) -> + binary_to_atom(<<"%2F_", Q/binary>>). + +has_local_member(QName) -> + case rabbit_amqqueue:lookup(QName) of + {ok, Q} -> + #{name := StreamId} = amqqueue:get_type_state(Q), + case rabbit_stream_coordinator:local_pid(StreamId) of + {ok, Pid} -> + is_process_alive(Pid); + {error, _} -> + false + end; + {error, _} -> + false + end. + +-spec find_event(Type, Props, Events) -> Ret when + Type :: atom(), + Props :: proplists:proplist(), + Events :: [#event{}], + Ret :: {value, #event{}} | false. + +find_event(Type, Props, Events) when is_list(Props), is_list(Events) -> + lists:search( + fun(#event{type = EventType, props = EventProps}) -> + Type =:= EventType andalso + lists:all( + fun({Key, _Value}) -> + lists:keymember(Key, 1, EventProps) + end, Props) + end, Events). + +close_incoming_window(Session) -> + gen_statem:cast(Session, {flow_session, #'v1_0.flow'{incoming_window = {uint, 0}}}). diff --git a/deps/rabbit/test/amqp_credit_api_v2_SUITE.erl b/deps/rabbit/test/amqp_credit_api_v2_SUITE.erl new file mode 100644 index 000000000000..76a12873e715 --- /dev/null +++ b/deps/rabbit/test/amqp_credit_api_v2_SUITE.erl @@ -0,0 +1,212 @@ +%% This Source Code Form is subject to the terms of the Mozilla Public +%% License, v. 2.0. If a copy of the MPL was not distributed with this +%% file, You can obtain one at https://mozilla.org/MPL/2.0/. +%% +%% Copyright (c) 2016-2023 VMware, Inc. or its affiliates. All rights reserved. + +-module(amqp_credit_api_v2_SUITE). + +-compile([export_all, nowarn_export_all]). + +-include_lib("common_test/include/ct.hrl"). +-include_lib("eunit/include/eunit.hrl"). +-include_lib("amqp_client/include/amqp_client.hrl"). + +all() -> + [ + {group, cluster_size_1} + ]. + +groups() -> + [ + {cluster_size_1, [], + [credit_api_v2]} + ]. + +suite() -> + [ + {timetrap, {minutes, 10}} + ]. + +init_per_suite(Config) -> + {ok, _} = application:ensure_all_started(amqp10_client), + rabbit_ct_helpers:log_environment(), + rabbit_ct_helpers:run_setup_steps(Config, []). + +end_per_suite(Config) -> + rabbit_ct_helpers:run_teardown_steps(Config). + +init_per_group(_Group, Config0) -> + Config = rabbit_ct_helpers:merge_app_env( + Config0, {rabbit, [{forced_feature_flags_on_init, []}]}), + rabbit_ct_helpers:run_steps(Config, + rabbit_ct_broker_helpers:setup_steps() ++ + rabbit_ct_client_helpers:setup_steps()). + +end_per_group(_Group, Config) -> + rabbit_ct_helpers:run_steps(Config, + rabbit_ct_client_helpers:teardown_steps() ++ + rabbit_ct_broker_helpers:teardown_steps()). + + +credit_api_v2(Config) -> + %% Feature flag rabbitmq_4.0.0 enables credit API v2. + FeatureFlag = 'rabbitmq_4.0.0', + ?assertNot(rabbit_ct_broker_helpers:is_feature_flag_enabled(Config, FeatureFlag)), + + CQ = <<"classic queue">>, + QQ = <<"quorum queue">>, + CQAddr = rabbitmq_amqp_address:queue(CQ), + QQAddr = rabbitmq_amqp_address:queue(QQ), + + Ch = rabbit_ct_client_helpers:open_channel(Config), + #'queue.declare_ok'{} = amqp_channel:call(Ch, #'queue.declare'{queue = CQ}), + #'queue.declare_ok'{} = amqp_channel:call( + Ch, #'queue.declare'{ + queue = QQ, + durable = true, + arguments = [{<<"x-queue-type">>, longstr, <<"quorum">>}]}), + ok = rabbit_ct_client_helpers:close_channel(Ch), + + Host = ?config(rmq_hostname, Config), + Port = rabbit_ct_broker_helpers:get_node_config(Config, 0, tcp_port_amqp), + OpnConf = #{address => Host, + port => Port, + container_id => <<"my container">>, + sasl => {plain, <<"guest">>, <<"guest">>}}, + {ok, Connection} = amqp10_client:open_connection(OpnConf), + {ok, Session} = amqp10_client:begin_session_sync(Connection), + + {ok, CQSender} = amqp10_client:attach_sender_link(Session, <<"cq sender">>, CQAddr), + {ok, QQSender} = amqp10_client:attach_sender_link(Session, <<"qq sender">>, QQAddr), + receive {amqp10_event, {link, CQSender, credited}} -> ok + after 5000 -> ct:fail(credited_timeout) + end, + receive {amqp10_event, {link, QQSender, credited}} -> ok + after 5000 -> ct:fail(credited_timeout) + end, + + %% Send 40 messages to each queue. + NumMsgs = 40, + [begin + Bin = integer_to_binary(N), + ok = amqp10_client:send_msg(CQSender, amqp10_msg:new(Bin, Bin, true)), + ok = amqp10_client:send_msg(QQSender, amqp10_msg:new(Bin, Bin, true)) + end || N <- lists:seq(1, NumMsgs)], + ok = amqp10_client:detach_link(CQSender), + ok = amqp10_client:detach_link(QQSender), + + %% Consume with credit API v1 + CQAttachArgs = #{handle => 300, + name => <<"cq receiver 1">>, + role => {receiver, #{address => CQAddr, + durable => configuration}, self()}, + snd_settle_mode => unsettled, + rcv_settle_mode => first, + filter => #{}}, + {ok, CQReceiver1} = amqp10_client:attach_link(Session, CQAttachArgs), + QQAttachArgs = #{handle => 400, + name => <<"qq receiver 1">>, + role => {receiver, #{address => QQAddr, + durable => configuration}, self()}, + snd_settle_mode => unsettled, + rcv_settle_mode => first, + filter => #{}}, + {ok, QQReceiver1} = amqp10_client:attach_link(Session, QQAttachArgs), + + ok = consume_and_accept(10, CQReceiver1), + ok = consume_and_accept(10, QQReceiver1), + + ?assertEqual(ok, rabbit_ct_broker_helpers:enable_feature_flag(Config, FeatureFlag)), + flush(enabled_feature_flag), + + %% Consume with credit API v2 + {ok, CQReceiver2} = amqp10_client:attach_receiver_link( + Session, <<"cq receiver 2">>, CQAddr, unsettled), + {ok, QQReceiver2} = amqp10_client:attach_receiver_link( + Session, <<"qq receiver 2">>, QQAddr, unsettled), + ok = consume_and_accept(10, CQReceiver2), + ok = consume_and_accept(10, QQReceiver2), + + %% Consume via with credit API v1 + ok = consume_and_accept(10, CQReceiver1), + ok = consume_and_accept(10, QQReceiver1), + + %% Detach the credit API v1 links and attach with the same output handle. + ok = detach_sync(CQReceiver1), + ok = detach_sync(QQReceiver1), + {ok, CQReceiver3} = amqp10_client:attach_link(Session, CQAttachArgs), + {ok, QQReceiver3} = amqp10_client:attach_link(Session, QQAttachArgs), + + %% The new links should use credit API v2 + ok = consume_and_accept(10, CQReceiver3), + ok = consume_and_accept(10, QQReceiver3), + + flush(pre_drain), + %% Draining should also work. + ok = amqp10_client:flow_link_credit(CQReceiver3, 10, never, true), + receive {amqp10_event, {link, CQReceiver3, credit_exhausted}} -> ok + after 5000 -> ct:fail({missing_credit_exhausted, ?LINE}) + end, + receive Unexpected1 -> ct:fail({unexpected, ?LINE, Unexpected1}) + after 20 -> ok + end, + + ok = amqp10_client:flow_link_credit(QQReceiver3, 10, never, true), + receive {amqp10_event, {link, QQReceiver3, credit_exhausted}} -> ok + after 5000 -> ct:fail({missing_credit_exhausted, ?LINE}) + end, + receive Unexpected2 -> ct:fail({unexpected, ?LINE, Unexpected2}) + after 20 -> ok + end, + + ok = detach_sync(CQReceiver2), + ok = detach_sync(QQReceiver2), + ok = detach_sync(CQReceiver3), + ok = detach_sync(QQReceiver3), + ok = amqp10_client:end_session(Session), + receive {amqp10_event, {session, Session, {ended, _}}} -> ok + after 5000 -> ct:fail(missing_ended) + end, + ok = amqp10_client:close_connection(Connection), + receive {amqp10_event, {connection, Connection, {closed, normal}}} -> ok + after 5000 -> ct:fail(missing_closed) + end. + +consume_and_accept(NumMsgs, Receiver) -> + ok = amqp10_client:flow_link_credit(Receiver, NumMsgs, never), + Msgs = receive_messages(Receiver, NumMsgs), + ok = amqp10_client_session:disposition( + Receiver, + amqp10_msg:delivery_id(hd(Msgs)), + amqp10_msg:delivery_id(lists:last(Msgs)), + true, + accepted). + +receive_messages(Receiver, N) -> + receive_messages0(Receiver, N, []). + +receive_messages0(_Receiver, 0, Acc) -> + lists:reverse(Acc); +receive_messages0(Receiver, N, Acc) -> + receive + {amqp10_msg, Receiver, Msg} -> + receive_messages0(Receiver, N - 1, [Msg | Acc]) + after 5000 -> + exit({timeout, {num_received, length(Acc)}, {num_missing, N}}) + end. + +detach_sync(Receiver) -> + ok = amqp10_client:detach_link(Receiver), + receive {amqp10_event, {link, Receiver, {detached, normal}}} -> ok + after 5000 -> ct:fail({missing_detached, Receiver}) + end. + +flush(Prefix) -> + receive + Msg -> + ct:pal("~ts flushed: ~p~n", [Prefix, Msg]), + flush(Prefix) + after 1 -> + ok + end. diff --git a/deps/rabbitmq_amqp1_0/test/proxy_protocol_SUITE.erl b/deps/rabbit/test/amqp_proxy_protocol_SUITE.erl similarity index 63% rename from deps/rabbitmq_amqp1_0/test/proxy_protocol_SUITE.erl rename to deps/rabbit/test/amqp_proxy_protocol_SUITE.erl index 489a8f3b02f5..7743af325ffb 100644 --- a/deps/rabbitmq_amqp1_0/test/proxy_protocol_SUITE.erl +++ b/deps/rabbit/test/amqp_proxy_protocol_SUITE.erl @@ -2,91 +2,86 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2017-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% --module(proxy_protocol_SUITE). +-module(amqp_proxy_protocol_SUITE). --include_lib("common_test/include/ct.hrl"). +-compile([export_all, nowarn_export_all]). --compile(export_all). +-include_lib("eunit/include/eunit.hrl"). + +-import(rabbit_ct_helpers, [eventually/3]). +-import(rabbit_ct_broker_helpers, [rpc/4]). -define(TIMEOUT, 5000). all() -> - [ - {group, sequential_tests} - ]. - -groups() -> [ - {sequential_tests, [], [ - proxy_protocol_v1, - proxy_protocol_v1_tls, - proxy_protocol_v2_local - ]} + [{group, tests}]. + +groups() -> + [{tests, [shuffle], + [ + v1, + v1_tls, + v2_local + ]} ]. init_per_suite(Config) -> rabbit_ct_helpers:log_environment(), - Config1 = rabbit_ct_helpers:set_config(Config, [ - {rmq_nodename_suffix, ?MODULE} - ]), - Config2 = rabbit_ct_helpers:merge_app_env(Config1, [ - {rabbit, [ - {proxy_protocol, true} - ]} - ]), - Config3 = rabbit_ct_helpers:set_config(Config2, {rabbitmq_ct_tls_verify, verify_none}), - rabbit_ct_helpers:run_setup_steps(Config3, - rabbit_ct_broker_helpers:setup_steps() ++ - rabbit_ct_client_helpers:setup_steps()). + Config1 = rabbit_ct_helpers:set_config( + Config, + [{rmq_nodename_suffix, ?MODULE}, + {rabbitmq_ct_tls_verify, verify_none}]), + Config2 = rabbit_ct_helpers:merge_app_env( + Config1, + [{rabbit, [{proxy_protocol, true}]}]), + rabbit_ct_helpers:run_setup_steps( + Config2, + rabbit_ct_broker_helpers:setup_steps() ++ + rabbit_ct_client_helpers:setup_steps()). end_per_suite(Config) -> rabbit_ct_helpers:run_teardown_steps(Config, rabbit_ct_client_helpers:teardown_steps() ++ rabbit_ct_broker_helpers:teardown_steps()). -init_per_group(_, Config) -> Config. -end_per_group(_, Config) -> Config. - init_per_testcase(Testcase, Config) -> rabbit_ct_helpers:testcase_started(Config, Testcase). end_per_testcase(Testcase, Config) -> + eventually(?_assertEqual(0, rpc(Config, ets, info, [connection_created, size])), 1000, 10), rabbit_ct_helpers:testcase_finished(Config, Testcase). -proxy_protocol_v1(Config) -> +v1(Config) -> Port = rabbit_ct_broker_helpers:get_node_config(Config, 0, tcp_port_amqp), {ok, Socket} = gen_tcp:connect({127,0,0,1}, Port, - [binary, {active, false}, {packet, raw}]), + [binary, {active, false}, {packet, raw}]), ok = inet:send(Socket, "PROXY TCP4 192.168.1.1 192.168.1.2 80 81\r\n"), [ok = inet:send(Socket, amqp_1_0_frame(FrameType)) - || FrameType <- [header_sasl, sasl_init, header_amqp, open, 'begin']], + || FrameType <- [header_sasl, sasl_init, header_amqp, open]], {ok, _Packet} = gen_tcp:recv(Socket, 0, ?TIMEOUT), - ConnectionName = rabbit_ct_broker_helpers:rpc(Config, 0, - ?MODULE, connection_name, []), + ConnectionName = rpc(Config, ?MODULE, connection_name, []), match = re:run(ConnectionName, <<"^192.168.1.1:80 -> 192.168.1.2:81$">>, [{capture, none}]), - gen_tcp:close(Socket), - ok. + ok = gen_tcp:close(Socket). -proxy_protocol_v1_tls(Config) -> +v1_tls(Config) -> app_utils:start_applications([asn1, crypto, public_key, ssl]), Port = rabbit_ct_broker_helpers:get_node_config(Config, 0, tcp_port_amqp_tls), {ok, Socket} = gen_tcp:connect({127,0,0,1}, Port, - [binary, {active, false}, {packet, raw}]), - ok = inet:send(Socket, "PROXY TCP4 192.168.1.1 192.168.1.2 80 81\r\n"), + [binary, {active, false}, {packet, raw}]), + ok = inet:send(Socket, "PROXY TCP4 192.168.1.1 192.168.1.2 80 82\r\n"), {ok, SslSocket} = ssl:connect(Socket, [{verify, verify_none}], ?TIMEOUT), [ok = ssl:send(SslSocket, amqp_1_0_frame(FrameType)) - || FrameType <- [header_sasl, sasl_init, header_amqp, open, 'begin']], + || FrameType <- [header_sasl, sasl_init, header_amqp, open]], {ok, _Packet} = ssl:recv(SslSocket, 0, ?TIMEOUT), timer:sleep(1000), - ConnectionName = rabbit_ct_broker_helpers:rpc(Config, 0, - ?MODULE, connection_name, []), - match = re:run(ConnectionName, <<"^192.168.1.1:80 -> 192.168.1.2:81$">>, [{capture, none}]), - gen_tcp:close(Socket), - ok. + ConnectionName = rpc(Config, ?MODULE, connection_name, []), + match = re:run(ConnectionName, <<"^192.168.1.1:80 -> 192.168.1.2:82$">>, [{capture, none}]), + ok = gen_tcp:close(Socket). -proxy_protocol_v2_local(Config) -> +v2_local(Config) -> ProxyInfo = #{ command => local, version => 2 @@ -96,14 +91,11 @@ proxy_protocol_v2_local(Config) -> [binary, {active, false}, {packet, raw}]), ok = inet:send(Socket, ranch_proxy_header:header(ProxyInfo)), [ok = inet:send(Socket, amqp_1_0_frame(FrameType)) - || FrameType <- [header_sasl, sasl_init, header_amqp, open, 'begin']], + || FrameType <- [header_sasl, sasl_init, header_amqp, open]], {ok, _Packet} = gen_tcp:recv(Socket, 0, ?TIMEOUT), - ConnectionName = rabbit_ct_broker_helpers:rpc(Config, 0, - ?MODULE, connection_name, []), + ConnectionName = rpc(Config, ?MODULE, connection_name, []), match = re:run(ConnectionName, <<"^127.0.0.1:\\d+ -> 127.0.0.1:\\d+$">>, [{capture, none}]), - gen_tcp:close(Socket), - ok. - + ok = gen_tcp:close(Socket). %% hex frames to send to have the connection recorded in RabbitMQ %% use wireshark with one of the Java tests to record those @@ -114,9 +106,7 @@ amqp_1_0_frame(header_amqp) -> amqp_1_0_frame(sasl_init) -> hex_frame_to_binary("0000001902010000005341c00c01a309414e4f4e594d4f5553"); amqp_1_0_frame(open) -> - hex_frame_to_binary("0000003f02000000005310c03202a12438306335323662332d653530662d343835352d613564302d336466643738623537633730a1096c6f63616c686f7374"); -amqp_1_0_frame('begin') -> - hex_frame_to_binary("0000002002000000005311c01305405201707fffffff707fffffff700000ffff"). + hex_frame_to_binary("0000003f02000000005310c03202a12438306335323662332d653530662d343835352d613564302d336466643738623537633730a1096c6f63616c686f7374"). hex_frame_to_binary(HexsString) -> Hexs = split(HexsString, []), @@ -135,16 +125,16 @@ connection_name() -> %% hence the retry case retry(fun connection_registered/0, 20) of true -> - Connections = ets:tab2list(connection_created), - {_Key, Values} = lists:nth(1, Connections), + [{_Key, Values}] = ets:tab2list(connection_created), {_, Name} = lists:keyfind(name, 1, Values), Name; false -> - error + ct:fail("not 1 connection registered") end. connection_registered() -> - length(ets:tab2list(connection_created)) > 0. + Size = ets:info(connection_created, size), + Size =:= 1. retry(_Function, 0) -> false; diff --git a/deps/rabbitmq_amqp1_0/test/system_SUITE.erl b/deps/rabbit/test/amqp_system_SUITE.erl similarity index 61% rename from deps/rabbitmq_amqp1_0/test/system_SUITE.erl rename to deps/rabbit/test/amqp_system_SUITE.erl index 73204172d98f..e1bf5abea72b 100644 --- a/deps/rabbitmq_amqp1_0/test/system_SUITE.erl +++ b/deps/rabbit/test/amqp_system_SUITE.erl @@ -2,10 +2,10 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% --module(system_SUITE). +-module(amqp_system_SUITE). -include_lib("common_test/include/ct.hrl"). -include_lib("rabbit_common/include/rabbit_framing.hrl"). @@ -34,6 +34,7 @@ groups() -> %% TODO at_most_once, reject, redelivery, + released, routing, invalid_routes, auth_failure, @@ -58,37 +59,23 @@ init_per_suite(Config) -> end_per_suite(Config) -> Config. -init_per_group(streams, Config) -> - case rabbit_ct_helpers:is_mixed_versions() of - false -> - Suffix = rabbit_ct_helpers:testcase_absname(Config, "", "-"), - Config1 = rabbit_ct_helpers:set_config(Config, [ - {rmq_nodename_suffix, Suffix}, - {amqp10_client_library, dotnet} - ]), - rabbit_ct_helpers:run_setup_steps(Config1, [ - fun build_dotnet_test_project/1 - ] ++ - rabbit_ct_broker_helpers:setup_steps() ++ - rabbit_ct_client_helpers:setup_steps()); - _ -> - {skip, "stream tests are skipped in mixed mode"} - end; init_per_group(Group, Config) -> Suffix = rabbit_ct_helpers:testcase_absname(Config, "", "-"), Config1 = rabbit_ct_helpers:set_config(Config, [ {rmq_nodename_suffix, Suffix}, - {amqp10_client_library, Group} + {amqp_client_library, Group} ]), GroupSetupStep = case Group of dotnet -> fun build_dotnet_test_project/1; java -> fun build_maven_test_project/1 end, - rabbit_ct_helpers:run_setup_steps(Config1, [ - GroupSetupStep - ] ++ - rabbit_ct_broker_helpers:setup_steps() ++ - rabbit_ct_client_helpers:setup_steps()). + Config2 = rabbit_ct_helpers:run_setup_steps( + Config1, + [GroupSetupStep] ++ + rabbit_ct_broker_helpers:setup_steps() ++ + rabbit_ct_client_helpers:setup_steps()), + ok = rabbit_ct_broker_helpers:enable_feature_flag(Config2, 'rabbitmq_4.0.0'), + Config2. end_per_group(_, Config) -> rabbit_ct_helpers:run_teardown_steps(Config, @@ -103,13 +90,13 @@ end_per_testcase(Testcase, Config) -> build_dotnet_test_project(Config) -> TestProjectDir = filename:join( - [?config(data_dir, Config), "fsharp-tests"]), + [?config(data_dir, Config), "fsharp-tests"]), Ret = rabbit_ct_helpers:exec(["dotnet", "restore"], - [{cd, TestProjectDir}]), + [{cd, TestProjectDir}]), case Ret of {ok, _} -> - rabbit_ct_helpers:set_config(Config, - {dotnet_test_project_dir, TestProjectDir}); + rabbit_ct_helpers:set_config( + Config, {dotnet_test_project_dir, TestProjectDir}); _ -> {skip, "Failed to fetch .NET Core test project dependencies"} end. @@ -131,75 +118,64 @@ build_maven_test_project(Config) -> %% ------------------------------------------------------------------- roundtrip(Config) -> - run(Config, [ - {dotnet, "roundtrip"}, - {java, "RoundTripTest"} - ]). + declare_queue(Config, ?FUNCTION_NAME, "quorum"), + run(Config, [{dotnet, "roundtrip"}, + {java, "RoundTripTest"}]). streams(Config) -> - Ch = rabbit_ct_client_helpers:open_channel(Config, 0), - amqp_channel:call(Ch, #'queue.declare'{queue = <<"stream_q">>, - durable = true, - arguments = [{<<"x-queue-type">>, longstr, "stream"}]}), - run(Config, [ - {dotnet, "streams"} - ]). + declare_queue(Config, ?FUNCTION_NAME, "stream"), + run(Config, [{dotnet, "streams"}]). roundtrip_to_amqp_091(Config) -> - run(Config, [ - {dotnet, "roundtrip_to_amqp_091"} - ]). + declare_queue(Config, ?FUNCTION_NAME, "classic"), + run(Config, [{dotnet, "roundtrip_to_amqp_091"}]). default_outcome(Config) -> - run(Config, [ - {dotnet, "default_outcome"} - ]). + declare_queue(Config, ?FUNCTION_NAME, "classic"), + run(Config, [{dotnet, "default_outcome"}]). no_routes_is_released(Config) -> - Ch = rabbit_ct_client_helpers:open_channel(Config, 0), + Ch = rabbit_ct_client_helpers:open_channel(Config), amqp_channel:call(Ch, #'exchange.declare'{exchange = <<"no_routes_is_released">>, durable = true}), - run(Config, [ - {dotnet, "no_routes_is_released"} - ]). + run(Config, [{dotnet, "no_routes_is_released"}]). outcomes(Config) -> - run(Config, [ - {dotnet, "outcomes"} - ]). + declare_queue(Config, ?FUNCTION_NAME, "classic"), + run(Config, [{dotnet, "outcomes"}]). fragmentation(Config) -> - run(Config, [ - {dotnet, "fragmentation"} - ]). + declare_queue(Config, ?FUNCTION_NAME, "classic"), + run(Config, [{dotnet, "fragmentation"}]). message_annotations(Config) -> - run(Config, [ - {dotnet, "message_annotations"} - ]). + declare_queue(Config, ?FUNCTION_NAME, "classic"), + run(Config, [{dotnet, "message_annotations"}]). footer(Config) -> - run(Config, [ - {dotnet, "footer"} - ]). + declare_queue(Config, ?FUNCTION_NAME, "classic"), + run(Config, [{dotnet, "footer"}]). data_types(Config) -> - run(Config, [ - {dotnet, "data_types"} - ]). + declare_queue(Config, ?FUNCTION_NAME, "classic"), + run(Config, [{dotnet, "data_types"}]). reject(Config) -> - run(Config, [ - {dotnet, "reject"} - ]). + declare_queue(Config, ?FUNCTION_NAME, "classic"), + run(Config, [{dotnet, "reject"}]). redelivery(Config) -> - run(Config, [ - {dotnet, "redelivery"} - ]). + declare_queue(Config, ?FUNCTION_NAME, "quorum"), + run(Config, [{dotnet, "redelivery"}]). + +released(Config) -> + declare_queue(Config, ?FUNCTION_NAME, "quorum"), + run(Config, [{dotnet, "released"}]). routing(Config) -> - Ch = rabbit_ct_client_helpers:open_channel(Config, 0), + Ch = rabbit_ct_client_helpers:open_channel(Config), + amqp_channel:call(Ch, #'queue.declare'{queue = <<"test">>, + durable = true}), amqp_channel:call(Ch, #'queue.declare'{queue = <<"transient_q">>, durable = false}), amqp_channel:call(Ch, #'queue.declare'{queue = <<"durable_q">>, @@ -210,23 +186,20 @@ routing(Config) -> amqp_channel:call(Ch, #'queue.declare'{queue = <<"stream_q">>, durable = true, arguments = [{<<"x-queue-type">>, longstr, <<"stream">>}]}), - amqp_channel:call(Ch, #'queue.declare'{queue = <<"stream_q2">>, - durable = true, - arguments = [{<<"x-queue-type">>, longstr, <<"stream">>}]}), amqp_channel:call(Ch, #'queue.declare'{queue = <<"autodel_q">>, auto_delete = true}), - run(Config, [ - {dotnet, "routing"} - ]). + amqp_channel:call(Ch, #'queue.declare'{queue = <<"fanout_q">>, + durable = false}), + amqp_channel:call(Ch, #'queue.bind'{queue = <<"fanout_q">>, + exchange = <<"amq.fanout">> + }), + amqp_channel:call(Ch, #'queue.declare'{queue = <<"direct_q">>, + durable = false}), + amqp_channel:call(Ch, #'queue.bind'{queue = <<"direct_q">>, + exchange = <<"amq.direct">>, + routing_key = <<"direct_q">> + }), -%% TODO: this tests doesn't test anything that the standard routing test -%% already does. We should test stream specific things here like attaching -%% to a given offset -stream_interop_basics(Config) -> - Ch = rabbit_ct_client_helpers:open_channel(Config, 0), - amqp_channel:call(Ch, #'queue.declare'{queue = <<"stream_q">>, - durable = true, - arguments = [{<<"x-queue-type">>, longstr, <<"stream">>}]}), run(Config, [ {dotnet, "routing"} ]). @@ -240,7 +213,7 @@ auth_failure(Config) -> run(Config, [ {dotnet, "auth_failure"} ]). access_failure(Config) -> - User = <<"access_failure">>, + User = atom_to_binary(?FUNCTION_NAME), rabbit_ct_broker_helpers:add_user(Config, User, <<"boo">>), rabbit_ct_broker_helpers:set_permissions(Config, User, <<"/">>, <<".*">>, %% configure @@ -250,12 +223,12 @@ access_failure(Config) -> run(Config, [ {dotnet, "access_failure"} ]). access_failure_not_allowed(Config) -> - User = <<"access_failure_not_allowed">>, + User = atom_to_binary(?FUNCTION_NAME), rabbit_ct_broker_helpers:add_user(Config, User, <<"boo">>), run(Config, [ {dotnet, "access_failure_not_allowed"} ]). access_failure_send(Config) -> - User = <<"access_failure_send">>, + User = atom_to_binary(?FUNCTION_NAME), rabbit_ct_broker_helpers:add_user(Config, User, <<"boo">>), rabbit_ct_broker_helpers:set_permissions(Config, User, <<"/">>, <<".*">>, %% configure @@ -265,15 +238,13 @@ access_failure_send(Config) -> run(Config, [ {dotnet, "access_failure_send"} ]). run(Config, Flavors) -> - ClientLibrary = ?config(amqp10_client_library, Config), + ClientLibrary = ?config(amqp_client_library, Config), Fun = case ClientLibrary of - dotnet -> fun run_dotnet_test/2; - java -> fun run_java_test/2 - end, - case proplists:get_value(ClientLibrary, Flavors) of - false -> ok; - TestName -> Fun(Config, TestName) - end. + dotnet -> fun run_dotnet_test/2; + java -> fun run_java_test/2 + end, + {ClientLibrary, TestName} = proplists:lookup(ClientLibrary, Flavors), + Fun(Config, TestName). run_dotnet_test(Config, Method) -> TestProjectDir = ?config(dotnet_test_project_dir, Config), @@ -282,6 +253,7 @@ run_dotnet_test(Config, Method) -> [ {cd, TestProjectDir} ]), + ct:pal("~s: result ~p", [?FUNCTION_NAME, Ret]), {ok, _} = Ret. run_java_test(Config, Class) -> @@ -294,3 +266,13 @@ run_java_test(Config, Class) -> ], [{cd, TestProjectDir}]), {ok, _} = Ret. + +declare_queue(Config, Name, Type) -> + Ch = rabbit_ct_client_helpers:open_channel(Config), + #'queue.declare_ok'{} = + amqp_channel:call(Ch, #'queue.declare'{queue = atom_to_binary(Name, utf8), + durable = true, + arguments = [{<<"x-queue-type">>, + longstr, Type}]}), + rabbit_ct_client_helpers:close_channel(Ch), + ok. diff --git a/deps/rabbitmq_amqp1_0/test/system_SUITE_data/console/Program.cs b/deps/rabbit/test/amqp_system_SUITE_data/console/Program.cs similarity index 100% rename from deps/rabbitmq_amqp1_0/test/system_SUITE_data/console/Program.cs rename to deps/rabbit/test/amqp_system_SUITE_data/console/Program.cs diff --git a/deps/rabbitmq_amqp1_0/test/system_SUITE_data/console/README.md b/deps/rabbit/test/amqp_system_SUITE_data/console/README.md similarity index 100% rename from deps/rabbitmq_amqp1_0/test/system_SUITE_data/console/README.md rename to deps/rabbit/test/amqp_system_SUITE_data/console/README.md diff --git a/deps/rabbitmq_amqp1_0/test/system_SUITE_data/console/standalone.csproj b/deps/rabbit/test/amqp_system_SUITE_data/console/standalone.csproj similarity index 100% rename from deps/rabbitmq_amqp1_0/test/system_SUITE_data/console/standalone.csproj rename to deps/rabbit/test/amqp_system_SUITE_data/console/standalone.csproj diff --git a/deps/rabbitmq_amqp1_0/test/system_SUITE_data/fsharp-tests/Program.fs b/deps/rabbit/test/amqp_system_SUITE_data/fsharp-tests/Program.fs similarity index 77% rename from deps/rabbitmq_amqp1_0/test/system_SUITE_data/fsharp-tests/Program.fs rename to deps/rabbit/test/amqp_system_SUITE_data/fsharp-tests/Program.fs index cf8fb2e6d9d5..3f322dfbb029 100755 --- a/deps/rabbitmq_amqp1_0/test/system_SUITE_data/fsharp-tests/Program.fs +++ b/deps/rabbit/test/amqp_system_SUITE_data/fsharp-tests/Program.fs @@ -48,8 +48,13 @@ module AmqpClient = let s = Session c { Conn = c; Session = s } - let connectWithOpen uri opn = - let c = Connection(Address uri, null, opn, null) + let connectAnon uri = + let c = Connection(Address uri, SaslProfile.Anonymous, null, null) + let s = Session c + { Conn = c; Session = s } + + let connectAnonWithOpen uri opn = + let c = Connection(Address uri, SaslProfile.Anonymous, opn, null) let s = Session c { Conn = c; Session = s } @@ -114,7 +119,7 @@ module Test = ] let testOutcome uri (attach: Attach) (cond: string) = - use ac = connect uri + use ac = connectAnon uri let trySet (mre: AutoResetEvent) = try mre.Set() |> ignore with _ -> () @@ -135,8 +140,8 @@ module Test = let no_routes_is_released uri = // tests that a message sent to an exchange that resolves no routes for the // binding key returns the Released outcome, rather than Accepted - use ac = connect uri - let address = "/exchange/no_routes_is_released" + use ac = connectAnon uri + let address = "/exchanges/no_routes_is_released" let sender = SenderLink(ac.Session, "released-sender", address) let trySet (mre: AutoResetEvent) = try mre.Set() |> ignore with _ -> () @@ -160,8 +165,8 @@ module Test = () let roundtrip uri = - use c = connect uri - let sender, receiver = senderReceiver c "test" "roundtrip-q" + use c = connectAnon uri + let sender, receiver = senderReceiver c "test" "/queues/roundtrip" for body in sampleTypes do let corr = "correlation" new Message(body, @@ -175,9 +180,9 @@ module Test = () let streams uri = - use c = connect uri + use c = connectAnon uri let name = "streams-test" - let address = "/amq/queue/stream_q2" + let address = "/queues/streams" let sender = SenderLink(c.Session, name + "-sender" , address) //for body in sampleTypes do let body = "hi"B :> obj @@ -203,9 +208,11 @@ module Test = receiver.SetCredit(100, true) let rtd = receiver.Receive() assertNotNull rtd - assertTrue (rtd.MessageAnnotations.Map.Count = 1) - let (result, _) = rtd.MessageAnnotations.Map.TryGetValue("x-stream-offset") - assertTrue result + assertEqual 3 rtd.MessageAnnotations.Map.Count + assertTrue (rtd.MessageAnnotations.Map.ContainsKey(Symbol "x-stream-offset")) + assertTrue (rtd.MessageAnnotations.Map.ContainsKey(Symbol "x-exchange")) + assertTrue (rtd.MessageAnnotations.Map.ContainsKey(Symbol "x-routing-key")) + assertEqual body rtd.Body assertEqual rtd.Properties.CorrelationId corr receiver.Close() @@ -214,10 +221,11 @@ module Test = open RabbitMQ.Client let roundtrip_to_amqp_091 uri = - use c = connect uri - let q = "roundtrip-091-q" - let corr = "corrlation" - let sender = SenderLink(c.Session, q + "-sender" , q) + use c = connectAnon uri + let q = "roundtrip_to_amqp_091" + let target = "/queues/roundtrip_to_amqp_091" + let corr = "correlation" + let sender = SenderLink(c.Session, q + "-sender" , target) new Message("hi"B, Header = Header(), Properties = new Properties(CorrelationId = corr)) @@ -240,13 +248,13 @@ module Test = assertEqual id corr () - let defaultOutcome uri = + let default_outcome uri = for (defOut, cond, defObj) in ["amqp:accepted:list", null, Accepted() :> Outcome "amqp:rejected:list", null, Rejected() :> Outcome "amqp:released:list", null, Released() :> Outcome] do - let source = new Source(Address = "default_outcome_q", + let source = new Source(Address = "/queues/default_outcome", DefaultOutcome = defObj) let attach = new Attach (Source = source, Target = Target()) @@ -261,7 +269,7 @@ module Test = "amqp:modified:list", null "amqp:madeup:list", "amqp:not-implemented"] do - let source = new Source(Address = "outcomes_q", + let source = new Source(Address = "/queues/outcomes", Outcomes = [| Symbol outcome |]) let attach = new Attach (Source = source, Target = Target()) @@ -271,24 +279,24 @@ module Test = let fragmentation uri = for frameSize, size in - [512u, 512 - 512u, 600 - 512u, 1024 - 1024u, 1024] do + [1024u, 1024 + 1024u, 1100 + 1024u, 2048 + 2048u, 2048] do let addr = Address uri let opn = Open(ContainerId = Guid.NewGuid().ToString(), HostName = addr.Host, ChannelMax = 256us, MaxFrameSize = frameSize) - use c = connectWithOpen uri opn - let sender, receiver = senderReceiver c "test" "framentation-q" + use c = connectAnonWithOpen uri opn + let sender, receiver = senderReceiver c "test" "/queues/fragmentation" let m = new Message(String.replicate size "a") sender.Send m let m' = receive receiver assertEqual (m.Body) (m'.Body) - let messageAnnotations uri = - use c = connect uri - let sender, receiver = senderReceiver c "test" "annotations-q" + let message_annotations uri = + use c = connectAnon uri + let sender, receiver = senderReceiver c "test" "/queues/message_annotations" let ann = MessageAnnotations() let k1 = Symbol "key1" let k2 = Symbol "key2" @@ -300,19 +308,20 @@ module Test = assertEqual m.Body m'.Body assertEqual (m.MessageAnnotations.Descriptor) (m'.MessageAnnotations.Descriptor) - assertEqual 2 (m'.MessageAnnotations.Map.Count) + // our 2 custom annotations + x-exchange + x-routing-key = 4 + assertEqual 4 (m'.MessageAnnotations.Map.Count) assertTrue (m.MessageAnnotations.[k1] = m'.MessageAnnotations.[k1]) assertTrue (m.MessageAnnotations.[k2] = m'.MessageAnnotations.[k2]) let footer uri = - use c = connect uri - let sender, receiver = senderReceiver c "test" "footer-q" + use c = connectAnon uri + let sender, receiver = senderReceiver c "test" "/queues/footer" let footer = Footer() let k1 = Symbol "key1" let k2 = Symbol "key2" footer.[Symbol "key1"] <- "value1" footer.[Symbol "key2"] <- "value2" - let m = new Message("testing annotations", Footer = footer) + let m = new Message("testing footer", Footer = footer) sender.Send m let m' = receive receiver @@ -322,9 +331,9 @@ module Test = assertTrue (m.Footer.[k1] = m'.Footer.[k1]) assertTrue (m.Footer.[k2] = m'.Footer.[k2]) - let datatypes uri = - use c = connect uri - let sender, receiver = senderReceiver c "test" "datatypes-q" + let data_types uri = + use c = connectAnon uri + let sender, receiver = senderReceiver c "test" "/queues/data_types" let aSeq = amqpSequence sampleTypes (new Message(aSeq)) |> sender.Send let rtd = receive receiver @@ -333,86 +342,81 @@ module Test = List.exists ((=) a) sampleTypes |> assertTrue let reject uri = - use c = connect uri - let sender, receiver = senderReceiver c "test" "reject-q" + use c = connectAnon uri + let sender, receiver = senderReceiver c "test" "/queues/reject" new Message "testing reject" |> sender.Send let m = receiver.Receive() receiver.Reject(m) assertEqual null (receiver.Receive(TimeSpan.FromMilliseconds 100.)) let redelivery uri = - use c = connect uri - let sender, receiver = senderReceiver c "test" "redelivery-q" + use c = connectAnon uri + let sender, receiver = senderReceiver c "test" "/queues/redelivery" new Message "testing redelivery" |> sender.Send let m = receiver.Receive() assertTrue (m.Header.FirstAcquirer) - receiver.Close() c.Session.Close() + let session = Session(c.Conn) - let receiver = ReceiverLink(session, "test-receiver", "redelivery-q") + let receiver = ReceiverLink(session, "test-receiver", "/queues/redelivery") let m' = receive receiver assertEqual (m.Body :?> string) (m'.Body :?> string) assertTrue (not m'.Header.FirstAcquirer) + assertEqual 1u (m'.Header.DeliveryCount) assertEqual null (receiver.Receive(TimeSpan.FromMilliseconds 100.)) session.Close() + let released uri = + use c = connectAnon uri + let sender, receiver = senderReceiver c "test" "/queues/released" + new Message "testing released" |> sender.Send + let m = receiver.Receive() + assertTrue (m.Header.FirstAcquirer) + receiver.SetCredit(0, false) + receiver.Release m + + let m' = receive receiver + assertEqual (m.Body :?> string) (m'.Body :?> string) + assertTrue (not m'.Header.FirstAcquirer) + assertEqual 0u (m'.Header.DeliveryCount) + assertEqual null (receiver.Receive(TimeSpan.FromMilliseconds 100.)) + c.Session.Close() + let routing uri = - for target, source, routingKey, succeed in - ["/queue/test", "test", "", true - "test", "/queue/test", "", true - "test", "test", "", true - - "/topic/a.b.c.d", "/topic/#.c.*", "", true - "/exchange/amq.topic", "/topic/#.c.*", "a.b.c.d", true - "/topic/w.x.y.z", "/exchange/amq.topic/#.y.*", "", true - "/exchange/amq.topic", "/exchange/amq.topic/#.y.*", "w.x.y.z", true - - "/exchange/amq.fanout", "/exchange/amq.fanout/", "", true - "/exchange/amq.direct", "/exchange/amq.direct/", "", true - "/exchange/amq.direct", "/exchange/amq.direct/a", "a", true - - (* FIXME: The following three tests rely on the queue "test" - * created by previous tests in this function. *) - "/queue/test", "/amq/queue/test", "", true - "/amq/queue/test", "/queue/test", "", true - "/amq/queue/test", "/amq/queue/test", "", true - - (* The following tests verify that a queue created out-of-band - * in AMQP is reachable from the AMQP 1.0 world. Queues are created - * from the common_test suite. *) - "/amq/queue/transient_q", "/amq/queue/transient_q", "", true - "/amq/queue/durable_q", "/amq/queue/durable_q", "", true - "/amq/queue/quorum_q", "/amq/queue/quorum_q", "", true - "/amq/queue/stream_q", "/amq/queue/stream_q", "", true - "/amq/queue/autodel_q", "/amq/queue/autodel_q", "", true] do + for target, source, toProp in + [ + "/queues/test", "/queues/test", "" + "/exchanges/amq.fanout", "/queues/fanout_q", "" + "/exchanges/amq.direct/direct_q", "/queues/direct_q", "" + null, "/queues/direct_q", "/exchanges/amq.direct/direct_q" + "/queues/transient_q", "/queues/transient_q", "" + "/queues/durable_q", "/queues/durable_q", "" + "/queues/quorum_q", "/queues/quorum_q", "" + "/queues/stream_q", "/queues/stream_q", "" + "/queues/autodel_q", "/queues/autodel_q", ""] do let rnd = Random() - use c = connect uri + use c = connectAnon uri let sender = SenderLink(c.Session, "test-sender", target) let receiver = ReceiverLink(c.Session, "test-receiver", source) receiver.SetCredit(100, true) - use m = new Message(rnd.Next(10000), Properties = Properties(Subject = routingKey)) + use m = new Message(rnd.Next(10000), + Properties = Properties(To = toProp)) sender.Send m - (* printfn "%s %s %s %A" target source routingKey succeed *) - - if succeed then - let m' = receiver.Receive(TimeSpan.FromMilliseconds 3000.) - receiver.Accept m' - assertTrue (m' <> null) - assertEqual (m.Body :?> int) (m'.Body :?> int) - else - use m' = receiver.Receive(TimeSpan.FromMilliseconds 100.) - assertEqual null m' - + (* printfn "%s %s %s %A" target source routingKey *) + let m' = receiver.Receive(TimeSpan.FromMilliseconds 3000.) + receiver.Accept m' + assertTrue (m' <> null) + assertEqual (m.Body :?> int) (m'.Body :?> int) let invalidRoutes uri = for dest, cond in - ["/exchange/missing", "amqp:not-found" + ["/exchanges/missing", "amqp:not-found" "/fruit/orange", "amqp:invalid-field"] do - use ac = connect uri + use ac = connectAnon uri let trySet (mre: AutoResetEvent) = try mre.Set() |> ignore with _ -> () @@ -432,7 +436,7 @@ module Test = receiver.Close() with | :? Amqp.AmqpException as ae -> - assertEqual (ae.Error.Condition) (Symbol cond) + assertEqual (Symbol cond) (ae.Error.Condition) | _ -> failwith "invalid expection thrown" let authFailure uri = @@ -450,14 +454,12 @@ module Test = let u = Uri uri let uri = sprintf "amqp://access_failure:boo@%s:%i" u.Host u.Port use ac = connect uri - let dest = "/amq/queue/test" + let dest = "/queues/test" ac.Session.add_Closed ( new ClosedCallback (fun _ err -> printfn "session err %A" err.Condition )) let sender = new SenderLink(ac.Session, "test-sender", dest) sender.Send(new Message "hi", TimeSpan.FromSeconds 15.) - - failwith "expected exception not received" with | :? Amqp.AmqpException as ex -> @@ -469,7 +471,7 @@ module Test = let u = Uri uri let uri = sprintf "amqp://access_failure:boo@%s:%i" u.Host u.Port use ac = connect uri - let dest = "/amq/queue/test" + let dest = "/queues/test" let receiver = ReceiverLink(ac.Session, "test-receiver", dest) receiver.Close() failwith "expected exception not received" @@ -483,7 +485,7 @@ module Test = let u = Uri uri let uri = sprintf "amqp://access_failure_not_allowed:boo@%s:%i" u.Host u.Port use ac = connect uri - let dest = "/amq/queue/test" + let dest = "/queues/test" let receiver = ReceiverLink(ac.Session, "test-receiver", dest) receiver.Close() failwith "expected exception not received" @@ -519,10 +521,10 @@ let main argv = roundtrip_to_amqp_091 uri 0 | [AsLower "data_types"; uri] -> - datatypes uri + data_types uri 0 | [AsLower "default_outcome"; uri] -> - defaultOutcome uri + default_outcome uri 0 | [AsLower "outcomes"; uri] -> outcomes uri @@ -531,7 +533,7 @@ let main argv = fragmentation uri 0 | [AsLower "message_annotations"; uri] -> - messageAnnotations uri + message_annotations uri 0 | [AsLower "footer"; uri] -> footer uri @@ -542,6 +544,9 @@ let main argv = | [AsLower "redelivery"; uri] -> redelivery uri 0 + | [AsLower "released"; uri] -> + released uri + 0 | [AsLower "routing"; uri] -> routing uri 0 diff --git a/deps/rabbitmq_amqp1_0/test/system_SUITE_data/fsharp-tests/fsharp-tests.fsproj b/deps/rabbit/test/amqp_system_SUITE_data/fsharp-tests/fsharp-tests.fsproj similarity index 83% rename from deps/rabbitmq_amqp1_0/test/system_SUITE_data/fsharp-tests/fsharp-tests.fsproj rename to deps/rabbit/test/amqp_system_SUITE_data/fsharp-tests/fsharp-tests.fsproj index 157790aa61e0..bd832eaac890 100755 --- a/deps/rabbitmq_amqp1_0/test/system_SUITE_data/fsharp-tests/fsharp-tests.fsproj +++ b/deps/rabbit/test/amqp_system_SUITE_data/fsharp-tests/fsharp-tests.fsproj @@ -8,7 +8,7 @@ - - + + diff --git a/deps/rabbitmq_amqp1_0/test/system_SUITE_data/fsharp-tests/global.json b/deps/rabbit/test/amqp_system_SUITE_data/fsharp-tests/global.json similarity index 100% rename from deps/rabbitmq_amqp1_0/test/system_SUITE_data/fsharp-tests/global.json rename to deps/rabbit/test/amqp_system_SUITE_data/fsharp-tests/global.json diff --git a/deps/rabbitmq_amqp1_0/test/system_SUITE_data/java-tests/.gitignore b/deps/rabbit/test/amqp_system_SUITE_data/java-tests/.gitignore similarity index 100% rename from deps/rabbitmq_amqp1_0/test/system_SUITE_data/java-tests/.gitignore rename to deps/rabbit/test/amqp_system_SUITE_data/java-tests/.gitignore diff --git a/deps/rabbit/test/amqp_system_SUITE_data/java-tests/.mvn/wrapper/maven-wrapper.properties b/deps/rabbit/test/amqp_system_SUITE_data/java-tests/.mvn/wrapper/maven-wrapper.properties new file mode 100755 index 000000000000..f95f1ee80715 --- /dev/null +++ b/deps/rabbit/test/amqp_system_SUITE_data/java-tests/.mvn/wrapper/maven-wrapper.properties @@ -0,0 +1,19 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +wrapperVersion=3.3.2 +distributionType=only-script +distributionUrl=https://repo.maven.apache.org/maven2/org/apache/maven/apache-maven/3.9.8/apache-maven-3.9.8-bin.zip diff --git a/deps/rabbit/test/amqp_system_SUITE_data/java-tests/mvnw b/deps/rabbit/test/amqp_system_SUITE_data/java-tests/mvnw new file mode 100755 index 000000000000..19529ddf8c6e --- /dev/null +++ b/deps/rabbit/test/amqp_system_SUITE_data/java-tests/mvnw @@ -0,0 +1,259 @@ +#!/bin/sh +# ---------------------------------------------------------------------------- +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# ---------------------------------------------------------------------------- + +# ---------------------------------------------------------------------------- +# Apache Maven Wrapper startup batch script, version 3.3.2 +# +# Optional ENV vars +# ----------------- +# JAVA_HOME - location of a JDK home dir, required when download maven via java source +# MVNW_REPOURL - repo url base for downloading maven distribution +# MVNW_USERNAME/MVNW_PASSWORD - user and password for downloading maven +# MVNW_VERBOSE - true: enable verbose log; debug: trace the mvnw script; others: silence the output +# ---------------------------------------------------------------------------- + +set -euf +[ "${MVNW_VERBOSE-}" != debug ] || set -x + +# OS specific support. +native_path() { printf %s\\n "$1"; } +case "$(uname)" in +CYGWIN* | MINGW*) + [ -z "${JAVA_HOME-}" ] || JAVA_HOME="$(cygpath --unix "$JAVA_HOME")" + native_path() { cygpath --path --windows "$1"; } + ;; +esac + +# set JAVACMD and JAVACCMD +set_java_home() { + # For Cygwin and MinGW, ensure paths are in Unix format before anything is touched + if [ -n "${JAVA_HOME-}" ]; then + if [ -x "$JAVA_HOME/jre/sh/java" ]; then + # IBM's JDK on AIX uses strange locations for the executables + JAVACMD="$JAVA_HOME/jre/sh/java" + JAVACCMD="$JAVA_HOME/jre/sh/javac" + else + JAVACMD="$JAVA_HOME/bin/java" + JAVACCMD="$JAVA_HOME/bin/javac" + + if [ ! -x "$JAVACMD" ] || [ ! -x "$JAVACCMD" ]; then + echo "The JAVA_HOME environment variable is not defined correctly, so mvnw cannot run." >&2 + echo "JAVA_HOME is set to \"$JAVA_HOME\", but \"\$JAVA_HOME/bin/java\" or \"\$JAVA_HOME/bin/javac\" does not exist." >&2 + return 1 + fi + fi + else + JAVACMD="$( + 'set' +e + 'unset' -f command 2>/dev/null + 'command' -v java + )" || : + JAVACCMD="$( + 'set' +e + 'unset' -f command 2>/dev/null + 'command' -v javac + )" || : + + if [ ! -x "${JAVACMD-}" ] || [ ! -x "${JAVACCMD-}" ]; then + echo "The java/javac command does not exist in PATH nor is JAVA_HOME set, so mvnw cannot run." >&2 + return 1 + fi + fi +} + +# hash string like Java String::hashCode +hash_string() { + str="${1:-}" h=0 + while [ -n "$str" ]; do + char="${str%"${str#?}"}" + h=$(((h * 31 + $(LC_CTYPE=C printf %d "'$char")) % 4294967296)) + str="${str#?}" + done + printf %x\\n $h +} + +verbose() { :; } +[ "${MVNW_VERBOSE-}" != true ] || verbose() { printf %s\\n "${1-}"; } + +die() { + printf %s\\n "$1" >&2 + exit 1 +} + +trim() { + # MWRAPPER-139: + # Trims trailing and leading whitespace, carriage returns, tabs, and linefeeds. + # Needed for removing poorly interpreted newline sequences when running in more + # exotic environments such as mingw bash on Windows. + printf "%s" "${1}" | tr -d '[:space:]' +} + +# parse distributionUrl and optional distributionSha256Sum, requires .mvn/wrapper/maven-wrapper.properties +while IFS="=" read -r key value; do + case "${key-}" in + distributionUrl) distributionUrl=$(trim "${value-}") ;; + distributionSha256Sum) distributionSha256Sum=$(trim "${value-}") ;; + esac +done <"${0%/*}/.mvn/wrapper/maven-wrapper.properties" +[ -n "${distributionUrl-}" ] || die "cannot read distributionUrl property in ${0%/*}/.mvn/wrapper/maven-wrapper.properties" + +case "${distributionUrl##*/}" in +maven-mvnd-*bin.*) + MVN_CMD=mvnd.sh _MVNW_REPO_PATTERN=/maven/mvnd/ + case "${PROCESSOR_ARCHITECTURE-}${PROCESSOR_ARCHITEW6432-}:$(uname -a)" in + *AMD64:CYGWIN* | *AMD64:MINGW*) distributionPlatform=windows-amd64 ;; + :Darwin*x86_64) distributionPlatform=darwin-amd64 ;; + :Darwin*arm64) distributionPlatform=darwin-aarch64 ;; + :Linux*x86_64*) distributionPlatform=linux-amd64 ;; + *) + echo "Cannot detect native platform for mvnd on $(uname)-$(uname -m), use pure java version" >&2 + distributionPlatform=linux-amd64 + ;; + esac + distributionUrl="${distributionUrl%-bin.*}-$distributionPlatform.zip" + ;; +maven-mvnd-*) MVN_CMD=mvnd.sh _MVNW_REPO_PATTERN=/maven/mvnd/ ;; +*) MVN_CMD="mvn${0##*/mvnw}" _MVNW_REPO_PATTERN=/org/apache/maven/ ;; +esac + +# apply MVNW_REPOURL and calculate MAVEN_HOME +# maven home pattern: ~/.m2/wrapper/dists/{apache-maven-,maven-mvnd--}/ +[ -z "${MVNW_REPOURL-}" ] || distributionUrl="$MVNW_REPOURL$_MVNW_REPO_PATTERN${distributionUrl#*"$_MVNW_REPO_PATTERN"}" +distributionUrlName="${distributionUrl##*/}" +distributionUrlNameMain="${distributionUrlName%.*}" +distributionUrlNameMain="${distributionUrlNameMain%-bin}" +MAVEN_USER_HOME="${MAVEN_USER_HOME:-${HOME}/.m2}" +MAVEN_HOME="${MAVEN_USER_HOME}/wrapper/dists/${distributionUrlNameMain-}/$(hash_string "$distributionUrl")" + +exec_maven() { + unset MVNW_VERBOSE MVNW_USERNAME MVNW_PASSWORD MVNW_REPOURL || : + exec "$MAVEN_HOME/bin/$MVN_CMD" "$@" || die "cannot exec $MAVEN_HOME/bin/$MVN_CMD" +} + +if [ -d "$MAVEN_HOME" ]; then + verbose "found existing MAVEN_HOME at $MAVEN_HOME" + exec_maven "$@" +fi + +case "${distributionUrl-}" in +*?-bin.zip | *?maven-mvnd-?*-?*.zip) ;; +*) die "distributionUrl is not valid, must match *-bin.zip or maven-mvnd-*.zip, but found '${distributionUrl-}'" ;; +esac + +# prepare tmp dir +if TMP_DOWNLOAD_DIR="$(mktemp -d)" && [ -d "$TMP_DOWNLOAD_DIR" ]; then + clean() { rm -rf -- "$TMP_DOWNLOAD_DIR"; } + trap clean HUP INT TERM EXIT +else + die "cannot create temp dir" +fi + +mkdir -p -- "${MAVEN_HOME%/*}" + +# Download and Install Apache Maven +verbose "Couldn't find MAVEN_HOME, downloading and installing it ..." +verbose "Downloading from: $distributionUrl" +verbose "Downloading to: $TMP_DOWNLOAD_DIR/$distributionUrlName" + +# select .zip or .tar.gz +if ! command -v unzip >/dev/null; then + distributionUrl="${distributionUrl%.zip}.tar.gz" + distributionUrlName="${distributionUrl##*/}" +fi + +# verbose opt +__MVNW_QUIET_WGET=--quiet __MVNW_QUIET_CURL=--silent __MVNW_QUIET_UNZIP=-q __MVNW_QUIET_TAR='' +[ "${MVNW_VERBOSE-}" != true ] || __MVNW_QUIET_WGET='' __MVNW_QUIET_CURL='' __MVNW_QUIET_UNZIP='' __MVNW_QUIET_TAR=v + +# normalize http auth +case "${MVNW_PASSWORD:+has-password}" in +'') MVNW_USERNAME='' MVNW_PASSWORD='' ;; +has-password) [ -n "${MVNW_USERNAME-}" ] || MVNW_USERNAME='' MVNW_PASSWORD='' ;; +esac + +if [ -z "${MVNW_USERNAME-}" ] && command -v wget >/dev/null; then + verbose "Found wget ... using wget" + wget ${__MVNW_QUIET_WGET:+"$__MVNW_QUIET_WGET"} "$distributionUrl" -O "$TMP_DOWNLOAD_DIR/$distributionUrlName" || die "wget: Failed to fetch $distributionUrl" +elif [ -z "${MVNW_USERNAME-}" ] && command -v curl >/dev/null; then + verbose "Found curl ... using curl" + curl ${__MVNW_QUIET_CURL:+"$__MVNW_QUIET_CURL"} -f -L -o "$TMP_DOWNLOAD_DIR/$distributionUrlName" "$distributionUrl" || die "curl: Failed to fetch $distributionUrl" +elif set_java_home; then + verbose "Falling back to use Java to download" + javaSource="$TMP_DOWNLOAD_DIR/Downloader.java" + targetZip="$TMP_DOWNLOAD_DIR/$distributionUrlName" + cat >"$javaSource" <<-END + public class Downloader extends java.net.Authenticator + { + protected java.net.PasswordAuthentication getPasswordAuthentication() + { + return new java.net.PasswordAuthentication( System.getenv( "MVNW_USERNAME" ), System.getenv( "MVNW_PASSWORD" ).toCharArray() ); + } + public static void main( String[] args ) throws Exception + { + setDefault( new Downloader() ); + java.nio.file.Files.copy( java.net.URI.create( args[0] ).toURL().openStream(), java.nio.file.Paths.get( args[1] ).toAbsolutePath().normalize() ); + } + } + END + # For Cygwin/MinGW, switch paths to Windows format before running javac and java + verbose " - Compiling Downloader.java ..." + "$(native_path "$JAVACCMD")" "$(native_path "$javaSource")" || die "Failed to compile Downloader.java" + verbose " - Running Downloader.java ..." + "$(native_path "$JAVACMD")" -cp "$(native_path "$TMP_DOWNLOAD_DIR")" Downloader "$distributionUrl" "$(native_path "$targetZip")" +fi + +# If specified, validate the SHA-256 sum of the Maven distribution zip file +if [ -n "${distributionSha256Sum-}" ]; then + distributionSha256Result=false + if [ "$MVN_CMD" = mvnd.sh ]; then + echo "Checksum validation is not supported for maven-mvnd." >&2 + echo "Please disable validation by removing 'distributionSha256Sum' from your maven-wrapper.properties." >&2 + exit 1 + elif command -v sha256sum >/dev/null; then + if echo "$distributionSha256Sum $TMP_DOWNLOAD_DIR/$distributionUrlName" | sha256sum -c >/dev/null 2>&1; then + distributionSha256Result=true + fi + elif command -v shasum >/dev/null; then + if echo "$distributionSha256Sum $TMP_DOWNLOAD_DIR/$distributionUrlName" | shasum -a 256 -c >/dev/null 2>&1; then + distributionSha256Result=true + fi + else + echo "Checksum validation was requested but neither 'sha256sum' or 'shasum' are available." >&2 + echo "Please install either command, or disable validation by removing 'distributionSha256Sum' from your maven-wrapper.properties." >&2 + exit 1 + fi + if [ $distributionSha256Result = false ]; then + echo "Error: Failed to validate Maven distribution SHA-256, your Maven distribution might be compromised." >&2 + echo "If you updated your Maven version, you need to update the specified distributionSha256Sum property." >&2 + exit 1 + fi +fi + +# unzip and move +if command -v unzip >/dev/null; then + unzip ${__MVNW_QUIET_UNZIP:+"$__MVNW_QUIET_UNZIP"} "$TMP_DOWNLOAD_DIR/$distributionUrlName" -d "$TMP_DOWNLOAD_DIR" || die "failed to unzip" +else + tar xzf${__MVNW_QUIET_TAR:+"$__MVNW_QUIET_TAR"} "$TMP_DOWNLOAD_DIR/$distributionUrlName" -C "$TMP_DOWNLOAD_DIR" || die "failed to untar" +fi +printf %s\\n "$distributionUrl" >"$TMP_DOWNLOAD_DIR/$distributionUrlNameMain/mvnw.url" +mv -- "$TMP_DOWNLOAD_DIR/$distributionUrlNameMain" "$MAVEN_HOME" || [ -d "$MAVEN_HOME" ] || die "fail to move MAVEN_HOME" + +clean || : +exec_maven "$@" diff --git a/deps/rabbit/test/amqp_system_SUITE_data/java-tests/mvnw.cmd b/deps/rabbit/test/amqp_system_SUITE_data/java-tests/mvnw.cmd new file mode 100644 index 000000000000..b150b91ed500 --- /dev/null +++ b/deps/rabbit/test/amqp_system_SUITE_data/java-tests/mvnw.cmd @@ -0,0 +1,149 @@ +<# : batch portion +@REM ---------------------------------------------------------------------------- +@REM Licensed to the Apache Software Foundation (ASF) under one +@REM or more contributor license agreements. See the NOTICE file +@REM distributed with this work for additional information +@REM regarding copyright ownership. The ASF licenses this file +@REM to you under the Apache License, Version 2.0 (the +@REM "License"); you may not use this file except in compliance +@REM with the License. You may obtain a copy of the License at +@REM +@REM http://www.apache.org/licenses/LICENSE-2.0 +@REM +@REM Unless required by applicable law or agreed to in writing, +@REM software distributed under the License is distributed on an +@REM "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +@REM KIND, either express or implied. See the License for the +@REM specific language governing permissions and limitations +@REM under the License. +@REM ---------------------------------------------------------------------------- + +@REM ---------------------------------------------------------------------------- +@REM Apache Maven Wrapper startup batch script, version 3.3.2 +@REM +@REM Optional ENV vars +@REM MVNW_REPOURL - repo url base for downloading maven distribution +@REM MVNW_USERNAME/MVNW_PASSWORD - user and password for downloading maven +@REM MVNW_VERBOSE - true: enable verbose log; others: silence the output +@REM ---------------------------------------------------------------------------- + +@IF "%__MVNW_ARG0_NAME__%"=="" (SET __MVNW_ARG0_NAME__=%~nx0) +@SET __MVNW_CMD__= +@SET __MVNW_ERROR__= +@SET __MVNW_PSMODULEP_SAVE=%PSModulePath% +@SET PSModulePath= +@FOR /F "usebackq tokens=1* delims==" %%A IN (`powershell -noprofile "& {$scriptDir='%~dp0'; $script='%__MVNW_ARG0_NAME__%'; icm -ScriptBlock ([Scriptblock]::Create((Get-Content -Raw '%~f0'))) -NoNewScope}"`) DO @( + IF "%%A"=="MVN_CMD" (set __MVNW_CMD__=%%B) ELSE IF "%%B"=="" (echo %%A) ELSE (echo %%A=%%B) +) +@SET PSModulePath=%__MVNW_PSMODULEP_SAVE% +@SET __MVNW_PSMODULEP_SAVE= +@SET __MVNW_ARG0_NAME__= +@SET MVNW_USERNAME= +@SET MVNW_PASSWORD= +@IF NOT "%__MVNW_CMD__%"=="" (%__MVNW_CMD__% %*) +@echo Cannot start maven from wrapper >&2 && exit /b 1 +@GOTO :EOF +: end batch / begin powershell #> + +$ErrorActionPreference = "Stop" +if ($env:MVNW_VERBOSE -eq "true") { + $VerbosePreference = "Continue" +} + +# calculate distributionUrl, requires .mvn/wrapper/maven-wrapper.properties +$distributionUrl = (Get-Content -Raw "$scriptDir/.mvn/wrapper/maven-wrapper.properties" | ConvertFrom-StringData).distributionUrl +if (!$distributionUrl) { + Write-Error "cannot read distributionUrl property in $scriptDir/.mvn/wrapper/maven-wrapper.properties" +} + +switch -wildcard -casesensitive ( $($distributionUrl -replace '^.*/','') ) { + "maven-mvnd-*" { + $USE_MVND = $true + $distributionUrl = $distributionUrl -replace '-bin\.[^.]*$',"-windows-amd64.zip" + $MVN_CMD = "mvnd.cmd" + break + } + default { + $USE_MVND = $false + $MVN_CMD = $script -replace '^mvnw','mvn' + break + } +} + +# apply MVNW_REPOURL and calculate MAVEN_HOME +# maven home pattern: ~/.m2/wrapper/dists/{apache-maven-,maven-mvnd--}/ +if ($env:MVNW_REPOURL) { + $MVNW_REPO_PATTERN = if ($USE_MVND) { "/org/apache/maven/" } else { "/maven/mvnd/" } + $distributionUrl = "$env:MVNW_REPOURL$MVNW_REPO_PATTERN$($distributionUrl -replace '^.*'+$MVNW_REPO_PATTERN,'')" +} +$distributionUrlName = $distributionUrl -replace '^.*/','' +$distributionUrlNameMain = $distributionUrlName -replace '\.[^.]*$','' -replace '-bin$','' +$MAVEN_HOME_PARENT = "$HOME/.m2/wrapper/dists/$distributionUrlNameMain" +if ($env:MAVEN_USER_HOME) { + $MAVEN_HOME_PARENT = "$env:MAVEN_USER_HOME/wrapper/dists/$distributionUrlNameMain" +} +$MAVEN_HOME_NAME = ([System.Security.Cryptography.MD5]::Create().ComputeHash([byte[]][char[]]$distributionUrl) | ForEach-Object {$_.ToString("x2")}) -join '' +$MAVEN_HOME = "$MAVEN_HOME_PARENT/$MAVEN_HOME_NAME" + +if (Test-Path -Path "$MAVEN_HOME" -PathType Container) { + Write-Verbose "found existing MAVEN_HOME at $MAVEN_HOME" + Write-Output "MVN_CMD=$MAVEN_HOME/bin/$MVN_CMD" + exit $? +} + +if (! $distributionUrlNameMain -or ($distributionUrlName -eq $distributionUrlNameMain)) { + Write-Error "distributionUrl is not valid, must end with *-bin.zip, but found $distributionUrl" +} + +# prepare tmp dir +$TMP_DOWNLOAD_DIR_HOLDER = New-TemporaryFile +$TMP_DOWNLOAD_DIR = New-Item -Itemtype Directory -Path "$TMP_DOWNLOAD_DIR_HOLDER.dir" +$TMP_DOWNLOAD_DIR_HOLDER.Delete() | Out-Null +trap { + if ($TMP_DOWNLOAD_DIR.Exists) { + try { Remove-Item $TMP_DOWNLOAD_DIR -Recurse -Force | Out-Null } + catch { Write-Warning "Cannot remove $TMP_DOWNLOAD_DIR" } + } +} + +New-Item -Itemtype Directory -Path "$MAVEN_HOME_PARENT" -Force | Out-Null + +# Download and Install Apache Maven +Write-Verbose "Couldn't find MAVEN_HOME, downloading and installing it ..." +Write-Verbose "Downloading from: $distributionUrl" +Write-Verbose "Downloading to: $TMP_DOWNLOAD_DIR/$distributionUrlName" + +$webclient = New-Object System.Net.WebClient +if ($env:MVNW_USERNAME -and $env:MVNW_PASSWORD) { + $webclient.Credentials = New-Object System.Net.NetworkCredential($env:MVNW_USERNAME, $env:MVNW_PASSWORD) +} +[Net.ServicePointManager]::SecurityProtocol = [Net.SecurityProtocolType]::Tls12 +$webclient.DownloadFile($distributionUrl, "$TMP_DOWNLOAD_DIR/$distributionUrlName") | Out-Null + +# If specified, validate the SHA-256 sum of the Maven distribution zip file +$distributionSha256Sum = (Get-Content -Raw "$scriptDir/.mvn/wrapper/maven-wrapper.properties" | ConvertFrom-StringData).distributionSha256Sum +if ($distributionSha256Sum) { + if ($USE_MVND) { + Write-Error "Checksum validation is not supported for maven-mvnd. `nPlease disable validation by removing 'distributionSha256Sum' from your maven-wrapper.properties." + } + Import-Module $PSHOME\Modules\Microsoft.PowerShell.Utility -Function Get-FileHash + if ((Get-FileHash "$TMP_DOWNLOAD_DIR/$distributionUrlName" -Algorithm SHA256).Hash.ToLower() -ne $distributionSha256Sum) { + Write-Error "Error: Failed to validate Maven distribution SHA-256, your Maven distribution might be compromised. If you updated your Maven version, you need to update the specified distributionSha256Sum property." + } +} + +# unzip and move +Expand-Archive "$TMP_DOWNLOAD_DIR/$distributionUrlName" -DestinationPath "$TMP_DOWNLOAD_DIR" | Out-Null +Rename-Item -Path "$TMP_DOWNLOAD_DIR/$distributionUrlNameMain" -NewName $MAVEN_HOME_NAME | Out-Null +try { + Move-Item -Path "$TMP_DOWNLOAD_DIR/$MAVEN_HOME_NAME" -Destination $MAVEN_HOME_PARENT | Out-Null +} catch { + if (! (Test-Path -Path "$MAVEN_HOME" -PathType Container)) { + Write-Error "fail to move MAVEN_HOME" + } +} finally { + try { Remove-Item $TMP_DOWNLOAD_DIR -Recurse -Force | Out-Null } + catch { Write-Warning "Cannot remove $TMP_DOWNLOAD_DIR" } +} + +Write-Output "MVN_CMD=$MAVEN_HOME/bin/$MVN_CMD" diff --git a/deps/rabbitmq_amqp1_0/test/system_SUITE_data/java-tests/pom.xml b/deps/rabbit/test/amqp_system_SUITE_data/java-tests/pom.xml similarity index 88% rename from deps/rabbitmq_amqp1_0/test/system_SUITE_data/java-tests/pom.xml rename to deps/rabbit/test/amqp_system_SUITE_data/java-tests/pom.xml index 54a031857b02..1cf102431b64 100644 --- a/deps/rabbitmq_amqp1_0/test/system_SUITE_data/java-tests/pom.xml +++ b/deps/rabbit/test/amqp_system_SUITE_data/java-tests/pom.xml @@ -8,13 +8,13 @@ rabbitmq-amqp1.0-java-tests https://www.rabbitmq.com - 5.10.0 - 2.4.0 - 1.2.12 - 2.39.0 + 5.10.2 + 2.5.0 + 1.2.13 + 2.43.0 1.17.0 - 3.11.0 - 3.1.2 + 3.12.1 + 3.2.5 diff --git a/deps/rabbitmq_amqp1_0/test/system_SUITE_data/java-tests/src/test/java/com/rabbitmq/amqp1_0/tests/jms/RoundTripTest.java b/deps/rabbit/test/amqp_system_SUITE_data/java-tests/src/test/java/com/rabbitmq/amqp1_0/tests/jms/RoundTripTest.java similarity index 100% rename from deps/rabbitmq_amqp1_0/test/system_SUITE_data/java-tests/src/test/java/com/rabbitmq/amqp1_0/tests/jms/RoundTripTest.java rename to deps/rabbit/test/amqp_system_SUITE_data/java-tests/src/test/java/com/rabbitmq/amqp1_0/tests/jms/RoundTripTest.java diff --git a/deps/rabbitmq_amqp1_0/test/system_SUITE_data/java-tests/src/test/resources/logback-test.xml b/deps/rabbit/test/amqp_system_SUITE_data/java-tests/src/test/resources/logback-test.xml similarity index 100% rename from deps/rabbitmq_amqp1_0/test/system_SUITE_data/java-tests/src/test/resources/logback-test.xml rename to deps/rabbit/test/amqp_system_SUITE_data/java-tests/src/test/resources/logback-test.xml diff --git a/deps/rabbit/test/amqpl_consumer_ack_SUITE.erl b/deps/rabbit/test/amqpl_consumer_ack_SUITE.erl new file mode 100644 index 000000000000..f907e77e0a26 --- /dev/null +++ b/deps/rabbit/test/amqpl_consumer_ack_SUITE.erl @@ -0,0 +1,259 @@ +%% This Source Code Form is subject to the terms of the Mozilla Public +%% License, v. 2.0. If a copy of the MPL was not distributed with this +%% file, You can obtain one at https://mozilla.org/MPL/2.0/. +%% +%% Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. +%% + +-module(amqpl_consumer_ack_SUITE). + +-include_lib("common_test/include/ct.hrl"). +-include_lib("eunit/include/eunit.hrl"). +-include_lib("amqp_client/include/amqp_client.hrl"). + +-compile([nowarn_export_all, + export_all]). + +-import(rabbit_ct_broker_helpers, + [rpc/4]). +-import(rabbit_ct_helpers, + [eventually/3]). + +all() -> + [ + {group, tests} + ]. + +groups() -> + [ + {tests, [shuffle], + [ + requeue_one_channel_classic_queue, + requeue_one_channel_quorum_queue, + requeue_two_channels_classic_queue, + requeue_two_channels_quorum_queue + ]} + ]. + +%% ------------------------------------------------------------------- +%% Testsuite setup/teardown. +%% ------------------------------------------------------------------- + +init_per_suite(Config) -> + rabbit_ct_helpers:log_environment(), + rabbit_ct_helpers:merge_app_env( + Config, {rabbit, [{quorum_tick_interval, 1000}]}). + +end_per_suite(Config) -> + Config. + +init_per_group(_Group, Config) -> + Nodes = 1, + Suffix = rabbit_ct_helpers:testcase_absname(Config, "", "-"), + Config1 = rabbit_ct_helpers:set_config( + Config, [{rmq_nodes_count, Nodes}, + {rmq_nodename_suffix, Suffix}]), + rabbit_ct_helpers:run_setup_steps( + Config1, + rabbit_ct_broker_helpers:setup_steps() ++ + rabbit_ct_client_helpers:setup_steps()). + +end_per_group(_Group, Config) -> + rabbit_ct_helpers:run_teardown_steps( + Config, + rabbit_ct_client_helpers:teardown_steps() ++ + rabbit_ct_broker_helpers:teardown_steps()). + +init_per_testcase(Testcase, Config) -> + rabbit_ct_helpers:testcase_started(Config, Testcase). + +end_per_testcase(Testcase, Config) -> + rabbit_ct_helpers:testcase_finished(Config, Testcase). + +requeue_one_channel_classic_queue(Config) -> + requeue_one_channel(<<"classic">>, Config). + +requeue_one_channel_quorum_queue(Config) -> + requeue_one_channel(<<"quorum">>, Config). + +requeue_one_channel(QType, Config) -> + QName = atom_to_binary(?FUNCTION_NAME), + Ctag = <<"my consumer tag">>, + Ch = rabbit_ct_client_helpers:open_channel(Config), + + #'queue.declare_ok'{} = amqp_channel:call( + Ch, + #'queue.declare'{ + queue = QName, + durable = true, + arguments = [{<<"x-queue-type">>, longstr, QType}]}), + + amqp_channel:subscribe(Ch, + #'basic.consume'{queue = QName, + consumer_tag = Ctag}, + self()), + + receive #'basic.consume_ok'{consumer_tag = Ctag} -> ok + after 5000 -> ct:fail({missing_event, ?LINE}) + end, + + [begin + amqp_channel:cast( + Ch, + #'basic.publish'{routing_key = QName}, + #amqp_msg{payload = integer_to_binary(N)}) + end || N <- lists:seq(1, 4)], + + receive {#'basic.deliver'{}, + #amqp_msg{payload = <<"1">>}} -> ok + after 5000 -> ct:fail({missing_event, ?LINE}) + end, + receive {#'basic.deliver'{}, + #amqp_msg{payload = <<"2">>}} -> ok + after 5000 -> ct:fail({missing_event, ?LINE}) + end, + D3 = receive {#'basic.deliver'{delivery_tag = Del3}, + #amqp_msg{payload = <<"3">>}} -> Del3 + after 5000 -> ct:fail({missing_event, ?LINE}) + end, + receive {#'basic.deliver'{}, + #amqp_msg{payload = <<"4">>}} -> ok + after 5000 -> ct:fail({missing_event, ?LINE}) + end, + assert_messages(QName, 4, 4, Config), + + %% Requeue the first 3 messages. + amqp_channel:cast(Ch, #'basic.nack'{delivery_tag = D3, + requeue = true, + multiple = true}), + + %% First 3 messages should be redelivered. + receive {#'basic.deliver'{}, + #amqp_msg{payload = P1}} -> + ?assertEqual(<<"1">>, P1) + after 5000 -> ct:fail({missing_event, ?LINE}) + end, + receive {#'basic.deliver'{}, + #amqp_msg{payload = P2}} -> + ?assertEqual(<<"2">>, P2) + after 5000 -> ct:fail({missing_event, ?LINE}) + end, + D3b = receive {#'basic.deliver'{delivery_tag = Del3b}, + #amqp_msg{payload = P3}} -> + ?assertEqual(<<"3">>, P3), + Del3b + after 5000 -> ct:fail({missing_event, ?LINE}) + end, + assert_messages(QName, 4, 4, Config), + + %% Ack all 4 messages. + amqp_channel:cast(Ch, #'basic.ack'{delivery_tag = D3b, + multiple = true}), + assert_messages(QName, 0, 0, Config), + + ?assertMatch(#'queue.delete_ok'{}, + amqp_channel:call(Ch, #'queue.delete'{queue = QName})). + +requeue_two_channels_classic_queue(Config) -> + requeue_two_channels(<<"classic">>, Config). + +requeue_two_channels_quorum_queue(Config) -> + requeue_two_channels(<<"quorum">>, Config). + +requeue_two_channels(QType, Config) -> + QName = atom_to_binary(?FUNCTION_NAME), + Ctag1 = <<"consumter tag 1">>, + Ctag2 = <<"consumter tag 2">>, + Ch1 = rabbit_ct_client_helpers:open_channel(Config), + Ch2 = rabbit_ct_client_helpers:open_channel(Config), + + #'queue.declare_ok'{} = amqp_channel:call( + Ch1, + #'queue.declare'{ + queue = QName, + durable = true, + arguments = [{<<"x-queue-type">>, longstr, QType}]}), + + amqp_channel:subscribe(Ch1, + #'basic.consume'{queue = QName, + consumer_tag = Ctag1}, + self()), + + receive #'basic.consume_ok'{consumer_tag = Ctag1} -> ok + after 5000 -> ct:fail({missing_event, ?LINE}) + end, + + amqp_channel:subscribe(Ch2, + #'basic.consume'{queue = QName, + consumer_tag = Ctag2}, + self()), + receive #'basic.consume_ok'{consumer_tag = Ctag2} -> ok + after 5000 -> ct:fail({missing_event, ?LINE}) + end, + + [begin + amqp_channel:cast( + Ch1, + #'basic.publish'{routing_key = QName}, + #amqp_msg{payload = integer_to_binary(N)}) + end || N <- lists:seq(1,4)], + + %% Queue should deliver round robin. + receive {#'basic.deliver'{consumer_tag = C1}, + #amqp_msg{payload = <<"1">>}} -> + ?assertEqual(Ctag1, C1) + after 5000 -> ct:fail({missing_event, ?LINE}) + end, + receive {#'basic.deliver'{consumer_tag = C2}, + #amqp_msg{payload = <<"2">>}} -> + ?assertEqual(Ctag2, C2) + after 5000 -> ct:fail({missing_event, ?LINE}) + end, + receive {#'basic.deliver'{consumer_tag = C3}, + #amqp_msg{payload = <<"3">>}} -> + ?assertEqual(Ctag1, C3) + after 5000 -> ct:fail({missing_event, ?LINE}) + end, + receive {#'basic.deliver'{consumer_tag = C4}, + #amqp_msg{payload = <<"4">>}} -> + ?assertEqual(Ctag2, C4) + after 5000 -> ct:fail({missing_event, ?LINE}) + end, + assert_messages(QName, 4, 4, Config), + + %% Closing Ch1 should cause both messages to be requeued and delivered to the Ch2. + ok = rabbit_ct_client_helpers:close_channel(Ch1), + + receive {#'basic.deliver'{consumer_tag = C5}, + #amqp_msg{payload = <<"1">>}} -> + ?assertEqual(Ctag2, C5) + after 5000 -> ct:fail({missing_event, ?LINE}) + end, + DelTag = receive {#'basic.deliver'{consumer_tag = C6, + delivery_tag = D}, + #amqp_msg{payload = <<"3">>}} -> + ?assertEqual(Ctag2, C6), + D + after 5000 -> ct:fail({missing_event, ?LINE}) + end, + assert_messages(QName, 4, 4, Config), + + %% Ch2 acks all 4 messages + amqp_channel:cast(Ch2, #'basic.ack'{delivery_tag = DelTag, + multiple = true}), + assert_messages(QName, 0, 0, Config), + + ?assertMatch(#'queue.delete_ok'{}, + amqp_channel:call(Ch2, #'queue.delete'{queue = QName})). + +assert_messages(QNameBin, NumTotalMsgs, NumUnackedMsgs, Config) -> + Vhost = ?config(rmq_vhost, Config), + eventually( + ?_assertEqual( + lists:sort([{messages, NumTotalMsgs}, {messages_unacknowledged, NumUnackedMsgs}]), + begin + {ok, Q} = rpc(Config, rabbit_amqqueue, lookup, [QNameBin, Vhost]), + Infos = rpc(Config, rabbit_amqqueue, info, [Q, [messages, messages_unacknowledged]]), + lists:sort(Infos) + end + ), 500, 5). diff --git a/deps/rabbit/test/amqpl_direct_reply_to_SUITE.erl b/deps/rabbit/test/amqpl_direct_reply_to_SUITE.erl new file mode 100644 index 000000000000..8cd607966951 --- /dev/null +++ b/deps/rabbit/test/amqpl_direct_reply_to_SUITE.erl @@ -0,0 +1,255 @@ +%% This Source Code Form is subject to the terms of the Mozilla Public +%% License, v. 2.0. If a copy of the MPL was not distributed with this +%% file, You can obtain one at https://mozilla.org/MPL/2.0/. +%% +%% Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. +%% + +-module(amqpl_direct_reply_to_SUITE). + +-include_lib("common_test/include/ct.hrl"). +-include_lib("eunit/include/eunit.hrl"). +-include_lib("amqp_client/include/amqp_client.hrl"). + +-compile([nowarn_export_all, + export_all]). + +-import(rabbit_ct_helpers, [eventually/1]). + +all() -> + [ + {group, cluster_size_1}, + {group, cluster_size_3} + ]. + +groups() -> + [ + {cluster_size_1, [shuffle], + [ + trace + ]}, + {cluster_size_3, [shuffle], + [ + rpc_new_to_old_node, + rpc_old_to_new_node + ]} + ]. + +%% ------------------------------------------------------------------- +%% Testsuite setup/teardown. +%% ------------------------------------------------------------------- + +init_per_suite(Config) -> + rabbit_ct_helpers:log_environment(), + Config. + +end_per_suite(Config) -> + Config. + +init_per_group(Group, Config) -> + Nodes = case Group of + cluster_size_1 -> 1; + cluster_size_3 -> 3 + end, + Suffix = rabbit_ct_helpers:testcase_absname(Config, "", "-"), + Config1 = rabbit_ct_helpers:set_config( + Config, [{rmq_nodes_count, Nodes}, + {rmq_nodename_suffix, Suffix}]), + rabbit_ct_helpers:run_setup_steps( + Config1, + rabbit_ct_broker_helpers:setup_steps() ++ + rabbit_ct_client_helpers:setup_steps()). + +end_per_group(_Group, Config) -> + rabbit_ct_helpers:run_teardown_steps( + Config, + rabbit_ct_client_helpers:teardown_steps() ++ + rabbit_ct_broker_helpers:teardown_steps()). + +init_per_testcase(Testcase, Config) -> + rabbit_ct_helpers:testcase_started(Config, Testcase). + +end_per_testcase(Testcase, Config) -> + rabbit_ct_helpers:testcase_finished(Config, Testcase). + +%% Test case for +%% https://github.com/rabbitmq/rabbitmq-server/discussions/11662 +trace(Config) -> + {ok, _} = rabbit_ct_broker_helpers:rabbitmqctl(Config, 0, ["trace_on"]), + + Node = atom_to_binary(rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename)), + TraceQueue = <<"tests.amqpl_direct_reply_to.trace.tracing">>, + RequestQueue = <<"tests.amqpl_direct_reply_to.trace.requests">>, + %% This is the pseudo queue that is specially interpreted by RabbitMQ. + ReplyQueue = <<"amq.rabbitmq.reply-to">>, + RequestPayload = <<"my request">>, + ReplyPayload = <<"my reply">>, + CorrelationId = <<"my correlation ID">>, + Qs = [RequestQueue, TraceQueue], + Ch = rabbit_ct_client_helpers:open_channel(Config), + RequesterCh = rabbit_ct_client_helpers:open_channel(Config, 0), + ResponderCh = rabbit_ct_client_helpers:open_channel(Config, 0), + + [#'queue.declare_ok'{} = amqp_channel:call(Ch, #'queue.declare'{queue = Q0}) || Q0 <- Qs], + #'queue.bind_ok'{} = amqp_channel:call( + Ch, #'queue.bind'{ + queue = TraceQueue, + exchange = <<"amq.rabbitmq.trace">>, + %% We subscribe only to messages entering RabbitMQ. + routing_key = <<"publish.#">>}), + + %% There is no need to declare this pseudo queue first. + amqp_channel:subscribe(RequesterCh, + #'basic.consume'{queue = ReplyQueue, + no_ack = true}, + self()), + CTag = receive #'basic.consume_ok'{consumer_tag = CTag0} -> CTag0 + end, + #'confirm.select_ok'{} = amqp_channel:call(RequesterCh, #'confirm.select'{}), + amqp_channel:register_confirm_handler(RequesterCh, self()), + + %% Send the request. + amqp_channel:cast( + RequesterCh, + #'basic.publish'{routing_key = RequestQueue}, + #amqp_msg{props = #'P_basic'{reply_to = ReplyQueue, + correlation_id = CorrelationId}, + payload = RequestPayload}), + receive #'basic.ack'{} -> ok + after 5000 -> ct:fail(confirm_timeout) + end, + + %% Receive the request. + {#'basic.get_ok'{}, + #amqp_msg{props = #'P_basic'{reply_to = ReplyTo, + correlation_id = CorrelationId}, + payload = RequestPayload} + } = amqp_channel:call(ResponderCh, #'basic.get'{queue = RequestQueue}), + + %% Send the reply. + amqp_channel:cast( + ResponderCh, + #'basic.publish'{routing_key = ReplyTo}, + #amqp_msg{props = #'P_basic'{correlation_id = CorrelationId}, + payload = ReplyPayload}), + + %% Receive the reply. + receive {#'basic.deliver'{consumer_tag = CTag}, + #amqp_msg{payload = ReplyPayload, + props = #'P_basic'{correlation_id = CorrelationId}}} -> + ok + after 5000 -> ct:fail(missing_reply) + end, + + %% 2 messages should have entered RabbitMQ: + %% 1. the RPC request + %% 2. the RPC reply + + {#'basic.get_ok'{routing_key = <<"publish.">>}, + #amqp_msg{props = #'P_basic'{headers = RequestHeaders}, + payload = RequestPayload} + } = amqp_channel:call(Ch, #'basic.get'{queue = TraceQueue}), + ?assertMatch(#{ + <<"exchange_name">> := <<>>, + <<"routing_keys">> := [RequestQueue], + <<"connection">> := <<"127.0.0.1:", _/binary>>, + <<"node">> := Node, + <<"vhost">> := <<"/">>, + <<"user">> := <<"guest">>, + <<"properties">> := #{<<"correlation_id">> := CorrelationId}, + <<"routed_queues">> := [RequestQueue] + }, + rabbit_misc:amqp_table(RequestHeaders)), + + {#'basic.get_ok'{routing_key = <<"publish.">>}, + #amqp_msg{props = #'P_basic'{headers = ResponseHeaders}, + payload = ReplyPayload} + } = amqp_channel:call(Ch, #'basic.get'{queue = TraceQueue}), + ?assertMatch(#{ + <<"exchange_name">> := <<>>, + <<"routing_keys">> := [<<"amq.rabbitmq.reply-to.", _/binary>>], + <<"connection">> := <<"127.0.0.1:", _/binary>>, + <<"node">> := Node, + <<"vhost">> := <<"/">>, + <<"user">> := <<"guest">>, + <<"properties">> := #{<<"correlation_id">> := CorrelationId}, + <<"routed_queues">> := [<<"amq.rabbitmq.reply-to.", _/binary>>] + }, + rabbit_misc:amqp_table(ResponseHeaders)), + + [#'queue.delete_ok'{} = amqp_channel:call(Ch, #'queue.delete'{queue = Q0}) || Q0 <- Qs], + {ok, _} = rabbit_ct_broker_helpers:rabbitmqctl(Config, 0, ["trace_off"]). + +%% "new" and "old" refers to new and old RabbitMQ versions in mixed version tests. +rpc_new_to_old_node(Config) -> + rpc(0, 1, Config). + +rpc_old_to_new_node(Config) -> + rpc(1, 0, Config). + +rpc(RequesterNode, ResponderNode, Config) -> + RequestQueue = <<"tests.amqpl_direct_reply_to.rpc.requests">>, + %% This is the pseudo queue that is specially interpreted by RabbitMQ. + ReplyQueue = <<"amq.rabbitmq.reply-to">>, + RequestPayload = <<"my request">>, + ReplyPayload = <<"my reply">>, + CorrelationId = <<"my correlation ID">>, + RequesterCh = rabbit_ct_client_helpers:open_channel(Config, RequesterNode), + ResponderCh = rabbit_ct_client_helpers:open_channel(Config, ResponderNode), + + %% There is no need to declare this pseudo queue first. + amqp_channel:subscribe(RequesterCh, + #'basic.consume'{queue = ReplyQueue, + no_ack = true}, + self()), + CTag = receive #'basic.consume_ok'{consumer_tag = CTag0} -> CTag0 + end, + #'queue.declare_ok'{} = amqp_channel:call( + RequesterCh, + #'queue.declare'{queue = RequestQueue}), + #'confirm.select_ok'{} = amqp_channel:call(RequesterCh, #'confirm.select'{}), + amqp_channel:register_confirm_handler(RequesterCh, self()), + %% Send the request. + amqp_channel:cast( + RequesterCh, + #'basic.publish'{routing_key = RequestQueue}, + #amqp_msg{props = #'P_basic'{reply_to = ReplyQueue, + correlation_id = CorrelationId}, + payload = RequestPayload}), + receive #'basic.ack'{} -> ok + after 5000 -> ct:fail(confirm_timeout) + end, + + ok = wait_for_queue_declared(RequestQueue, ResponderNode, Config), + %% Receive the request. + {#'basic.get_ok'{}, + #amqp_msg{props = #'P_basic'{reply_to = ReplyTo, + correlation_id = CorrelationId}, + payload = RequestPayload} + } = amqp_channel:call(ResponderCh, #'basic.get'{queue = RequestQueue}), + %% Send the reply. + amqp_channel:cast( + ResponderCh, + #'basic.publish'{routing_key = ReplyTo}, + #amqp_msg{props = #'P_basic'{correlation_id = CorrelationId}, + payload = ReplyPayload}), + + %% Receive the reply. + receive {#'basic.deliver'{consumer_tag = CTag}, + #amqp_msg{payload = ReplyPayload, + props = #'P_basic'{correlation_id = CorrelationId}}} -> + ok + after 5000 -> ct:fail(missing_reply) + end. + +wait_for_queue_declared(Queue, Node, Config) -> + eventually( + ?_assert( + begin + Ch = rabbit_ct_client_helpers:open_channel(Config, Node), + #'queue.declare_ok'{} = amqp_channel:call( + Ch, #'queue.declare'{queue = Queue, + passive = true}), + rabbit_ct_client_helpers:close_channel(Ch), + true + end)). diff --git a/deps/rabbit/test/amqqueue_backward_compatibility_SUITE.erl b/deps/rabbit/test/amqqueue_backward_compatibility_SUITE.erl index db2a4c8e4855..ddf3cc93e321 100644 --- a/deps/rabbit/test/amqqueue_backward_compatibility_SUITE.erl +++ b/deps/rabbit/test/amqqueue_backward_compatibility_SUITE.erl @@ -1,6 +1,5 @@ -module(amqqueue_backward_compatibility_SUITE). --include_lib("common_test/include/ct.hrl"). -include_lib("eunit/include/eunit.hrl"). -include("amqqueue.hrl"). diff --git a/deps/rabbit/test/backing_queue_SUITE.erl b/deps/rabbit/test/backing_queue_SUITE.erl index 68568ee8b76e..10129201b9dc 100644 --- a/deps/rabbit/test/backing_queue_SUITE.erl +++ b/deps/rabbit/test/backing_queue_SUITE.erl @@ -1,8 +1,8 @@ - % This Source Code Form is subject to the terms of the Mozilla Public +%% This Source Code Form is subject to the terms of the Mozilla Public %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2011-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(backing_queue_SUITE). @@ -27,17 +27,13 @@ variable_queue_drop, variable_queue_fold_msg_on_disk, variable_queue_dropfetchwhile, - variable_queue_dropwhile_varying_ram_duration, variable_queue_dropwhile_restart, variable_queue_dropwhile_sync_restart, - variable_queue_fetchwhile_varying_ram_duration, variable_queue_ack_limiting, variable_queue_purge, variable_queue_requeue, variable_queue_requeue_ram_beta, - variable_queue_fold, - variable_queue_batch_publish, - variable_queue_batch_publish_delivered + variable_queue_fold ]). -define(BACKING_QUEUE_TESTCASES, [ @@ -66,8 +62,8 @@ groups() -> [ {backing_queue_tests, [], [ msg_store, - {backing_queue_v2, [], Common ++ V2Only}, - {backing_queue_v1, [], Common} + msg_store_file_scan, + {backing_queue_v2, [], Common ++ V2Only} ]} ]. @@ -93,7 +89,7 @@ end_per_suite(Config) -> init_per_group(Group, Config) -> case lists:member({group, Group}, all()) of true -> - ClusterSize = 2, + ClusterSize = 1, Config1 = rabbit_ct_helpers:set_config(Config, [ {rmq_nodename_suffix, Group}, {rmq_nodes_count, ClusterSize} @@ -101,8 +97,7 @@ init_per_group(Group, Config) -> rabbit_ct_helpers:run_steps(Config1, rabbit_ct_broker_helpers:setup_steps() ++ rabbit_ct_client_helpers:setup_steps() ++ [ - fun(C) -> init_per_group1(Group, C) end, - fun setup_file_handle_cache/1 + fun(C) -> init_per_group1(Group, C) end ]); false -> rabbit_ct_helpers:run_steps(Config, [ @@ -123,14 +118,6 @@ init_per_group1(backing_queue_tests, Config) -> "Backing queue module not supported by this test group: ~tp~n", [Module])} end; -init_per_group1(backing_queue_v1, Config) -> - ok = rabbit_ct_broker_helpers:rpc(Config, 0, - application, set_env, [rabbit, classic_queue_default_version, 1]), - Config; -init_per_group1(backing_queue_v2, Config) -> - ok = rabbit_ct_broker_helpers:rpc(Config, 0, - application, set_env, [rabbit, classic_queue_default_version, 2]), - Config; init_per_group1(backing_queue_embed_limit_0, Config) -> ok = rabbit_ct_broker_helpers:rpc(Config, 0, application, set_env, [rabbit, queue_index_embed_msgs_below, 0]), @@ -149,17 +136,6 @@ init_per_group1(from_cluster_node2, Config) -> init_per_group1(_, Config) -> Config. -setup_file_handle_cache(Config) -> - ok = rabbit_ct_broker_helpers:rpc(Config, 0, - ?MODULE, setup_file_handle_cache1, []), - Config. - -setup_file_handle_cache1() -> - %% FIXME: Why are we doing this? - application:set_env(rabbit, file_handles_high_watermark, 100), - ok = file_handle_cache:set_limit(100), - ok. - end_per_group(Group, Config) -> case lists:member({group, Group}, all()) of true -> @@ -175,12 +151,6 @@ end_per_group1(backing_queue_tests, Config) -> rabbit_ct_broker_helpers:rpc(Config, 0, ?MODULE, teardown_backing_queue_test_group, [Config]); end_per_group1(Group, Config) -when Group =:= backing_queue_v1 -orelse Group =:= backing_queue_v2 -> - ok = rabbit_ct_broker_helpers:rpc(Config, 0, - application, unset_env, [rabbit, classic_queue_default_version]), - Config; -end_per_group1(Group, Config) when Group =:= backing_queue_embed_limit_0 orelse Group =:= backing_queue_embed_limit_1024 -> ok = rabbit_ct_broker_helpers:rpc(Config, 0, @@ -192,18 +162,12 @@ end_per_group1(_, Config) -> init_per_testcase(Testcase, Config) when Testcase == variable_queue_requeue; Testcase == variable_queue_fold -> - ok = rabbit_ct_broker_helpers:rpc( - Config, 0, application, set_env, - [rabbit, queue_explicit_gc_run_operation_threshold, 0]), rabbit_ct_helpers:testcase_started(Config, Testcase); init_per_testcase(Testcase, Config) -> rabbit_ct_helpers:testcase_started(Config, Testcase). end_per_testcase(Testcase, Config) when Testcase == variable_queue_requeue; Testcase == variable_queue_fold -> - ok = rabbit_ct_broker_helpers:rpc( - Config, 0, application, set_env, - [rabbit, queue_explicit_gc_run_operation_threshold, 1000]), rabbit_ct_helpers:testcase_finished(Config, Testcase); end_per_testcase(Testcase, Config) -> rabbit_ct_helpers:testcase_finished(Config, Testcase). @@ -535,6 +499,191 @@ test_msg_store_client_delete_and_terminate(GenRef) -> ok = rabbit_msg_store:client_delete_and_terminate(MSCState), passed. +%% ------------------------------------------------------------------- +%% Message store file scanning. +%% ------------------------------------------------------------------- + +%% While it is possible although very unlikely that this test case +%% produces false positives, all failures of this test case should +%% be investigated thoroughly as they test an algorithm that is +%% central to the reliability of the data in the shared message store. +%% Failing files can be found in the CT private data. +msg_store_file_scan(Config) -> + passed = rabbit_ct_broker_helpers:rpc(Config, 0, + ?MODULE, msg_store_file_scan1, [Config]). + +msg_store_file_scan1(Config) -> + Scan = fun (Blocks) -> + Expected = gen_result(Blocks), + Path = gen_msg_file(Config, Blocks), + Result = rabbit_msg_store:scan_file_for_valid_messages(Path), + case Result of + Expected -> ok; + _ -> {expected, Expected, got, Result} + end + end, + %% Empty files. + ok = Scan([]), + ok = Scan([{pad, 1024}]), + ok = Scan([{pad, 1024 * 1024}]), + %% One-message files. + ok = Scan([{msg, gen_id(), <<0>>}]), + ok = Scan([{msg, gen_id(), <<255>>}]), + ok = Scan([{msg, gen_id(), gen_msg()}]), + ok = Scan([{pad, 1024}, {msg, gen_id(), gen_msg()}]), + ok = Scan([{pad, 1024 * 1024}, {msg, gen_id(), gen_msg()}]), + ok = Scan([{msg, gen_id(), gen_msg()}, {pad, 1024}]), + ok = Scan([{msg, gen_id(), gen_msg()}, {pad, 1024 * 1024}]), + %% Multiple messages. + ok = Scan([{msg, gen_id(), gen_msg()} || _ <- lists:seq(1, 2)]), + ok = Scan([{msg, gen_id(), gen_msg()} || _ <- lists:seq(1, 5)]), + ok = Scan([{msg, gen_id(), gen_msg()} || _ <- lists:seq(1, 20)]), + ok = Scan([{msg, gen_id(), gen_msg()} || _ <- lists:seq(1, 100)]), + %% Multiple messages with padding. + ok = Scan([ + {pad, 1024}, + {msg, gen_id(), gen_msg()}, + {msg, gen_id(), gen_msg()} + ]), + ok = Scan([ + {msg, gen_id(), gen_msg()}, + {pad, 1024}, + {msg, gen_id(), gen_msg()} + ]), + ok = Scan([ + {msg, gen_id(), gen_msg()}, + {msg, gen_id(), gen_msg()}, + {pad, 1024} + ]), + ok = Scan([ + {pad, 1024}, + {msg, gen_id(), gen_msg()}, + {pad, 1024}, + {msg, gen_id(), gen_msg()} + ]), + ok = Scan([ + {msg, gen_id(), gen_msg()}, + {pad, 1024}, + {msg, gen_id(), gen_msg()}, + {pad, 1024} + ]), + ok = Scan([ + {pad, 1024}, + {msg, gen_id(), gen_msg()}, + {msg, gen_id(), gen_msg()}, + {pad, 1024} + ]), + ok = Scan([ + {pad, 1024}, + {msg, gen_id(), gen_msg()}, + {pad, 1024}, + {msg, gen_id(), gen_msg()}, + {pad, 1024} + ]), + OneOf = fun(A, B) -> + case rand:uniform() of + F when F < +0.5 -> A; + _ -> B + end + end, + ok = Scan([OneOf({msg, gen_id(), gen_msg()}, {pad, 1024}) || _ <- lists:seq(1, 2)]), + ok = Scan([OneOf({msg, gen_id(), gen_msg()}, {pad, 1024}) || _ <- lists:seq(1, 5)]), + ok = Scan([OneOf({msg, gen_id(), gen_msg()}, {pad, 1024}) || _ <- lists:seq(1, 20)]), + ok = Scan([OneOf({msg, gen_id(), gen_msg()}, {pad, 1024}) || _ <- lists:seq(1, 100)]), + %% Duplicate messages. + Msg = {msg, gen_id(), gen_msg()}, + ok = Scan([Msg, Msg]), + ok = Scan([Msg, Msg, Msg, Msg, Msg]), + ok = Scan([Msg, {pad, 1024}, Msg]), + ok = Scan([Msg] + ++ [OneOf({msg, gen_id(), gen_msg()}, {pad, 1024}) || _ <- lists:seq(1, 100)] + ++ [Msg]), + %% Truncated start of message. + ok = Scan([{bin, <<21:56, "deadbeefdeadbeef", "hello", 255>>}]), + ok = Scan([{bin, <<21:48, "deadbeefdeadbeef", "hello", 255>>}]), + ok = Scan([{bin, <<21:40, "deadbeefdeadbeef", "hello", 255>>}]), + ok = Scan([{bin, <<21:32, "deadbeefdeadbeef", "hello", 255>>}]), + ok = Scan([{bin, <<21:24, "deadbeefdeadbeef", "hello", 255>>}]), + ok = Scan([{bin, <<21:16, "deadbeefdeadbeef", "hello", 255>>}]), + ok = Scan([{bin, <<21:8, "deadbeefdeadbeef", "hello", 255>>}]), + ok = Scan([{bin, <<"deadbeefdeadbeef", "hello", 255>>}]), + ok = Scan([{bin, <<"beefdeadbeef", "hello", 255>>}]), + ok = Scan([{bin, <<"deadbeef", "hello", 255>>}]), + ok = Scan([{bin, <<"beef", "hello", 255>>}]), + ok = Scan([{bin, <<"hello", 255>>}]), + ok = Scan([{bin, <<255>>}]), + %% Truncated end of message (unlikely). + ok = Scan([{bin, <<255>>}]), + ok = Scan([{bin, <<255, 255>>}]), + ok = Scan([{bin, <<255, 255, 255>>}]), + ok = Scan([{bin, <<255, 255, 255, 255>>}]), + ok = Scan([{bin, <<255, 255, 255, 255, 255>>}]), + ok = Scan([{bin, <<255, 255, 255, 255, 255, 255>>}]), + ok = Scan([{bin, <<255, 255, 255, 255, 255, 255, 255>>}]), + ok = Scan([{bin, <<255, 255, 255, 255, 255, 255, 255, 255>>}]), + ok = Scan([{bin, <<15:64, "deadbeefdeadbee">>}]), + ok = Scan([{bin, <<16:64, "deadbeefdeadbeef">>}]), + ok = Scan([{bin, <<17:64, "deadbeefdeadbeef", 0>>}]), + ok = Scan([{bin, <<17:64, "deadbeefdeadbeef", 255>>}]), + ok = Scan([{bin, <<17:64, "deadbeefdeadbeef", 255, 254>>}]), + %% Messages with no content. + ok = Scan([{bin, <<0:64, "deadbeefdeadbeef", 255>>}]), + ok = Scan([{msg, gen_id(), <<>>}]), + %% All good!! + passed. + +gen_id() -> + rand:bytes(16). + +gen_msg() -> + gen_msg(1024 * 1024). + +gen_msg(MaxSize) -> + %% This might generate false positives but very rarely + %% so we don't do anything to prevent them. + rand:bytes(rand:uniform(MaxSize)). + +gen_msg_file(Config, Blocks) -> + PrivDir = ?config(priv_dir, Config), + TmpFile = integer_to_list(erlang:unique_integer([positive])), + Path = filename:join(PrivDir, TmpFile), + ok = file:write_file(Path, [case Block of + {bin, Bin} -> + Bin; + {pad, Size} -> + %% This might generate false positives although very unlikely. + rand:bytes(Size); + {msg, MsgId, Msg} -> + Size = 16 + byte_size(Msg), + [<>, MsgId, Msg, <<255>>] + end || Block <- Blocks]), + Path. + +gen_result(Blocks) -> + Messages = gen_result(Blocks, 0, []), + case Messages of + [] -> + {ok, [], 0}; + [{_, TotalSize, Offset}|_] -> + {ok, Messages, Offset + TotalSize} + end. + +gen_result([], _, Acc) -> + Acc; +gen_result([{bin, Bin}|Tail], Offset, Acc) -> + gen_result(Tail, Offset + byte_size(Bin), Acc); +gen_result([{pad, Size}|Tail], Offset, Acc) -> + gen_result(Tail, Offset + Size, Acc); +gen_result([{msg, MsgId, Msg}|Tail], Offset, Acc) -> + Size = 9 + 16 + byte_size(Msg), + %% Only the first MsgId found is returned when duplicates exist. + case lists:keymember(MsgId, 1, Acc) of + false -> + gen_result(Tail, Offset + Size, [{MsgId, Size, Offset}|Acc]); + true -> + gen_result(Tail, Offset + Size, Acc) + end. + %% ------------------------------------------------------------------- %% Backing queue. %% ------------------------------------------------------------------- @@ -565,10 +714,7 @@ bq_queue_index(Config) -> ?MODULE, bq_queue_index1, [Config]). index_mod() -> - case application:get_env(rabbit, classic_queue_default_version) of - {ok, 1} -> rabbit_queue_index; - {ok, 2} -> rabbit_classic_queue_index_v2 - end. + rabbit_classic_queue_index_v2. bq_queue_index1(_Config) -> init_queue_index(), @@ -581,10 +727,7 @@ bq_queue_index1(_Config) -> SeqIdsC = lists:seq(0, trunc(SegmentSize/2)), SeqIdsD = lists:seq(0, SegmentSize*4), - VerifyReadWithPublishedFun = case IndexMod of - rabbit_queue_index -> fun verify_read_with_published_v1/3; - rabbit_classic_queue_index_v2 -> fun verify_read_with_published_v2/3 - end, + VerifyReadWithPublishedFun = fun verify_read_with_published_v2/3, with_empty_test_queue( fun (Qi0, QName) -> @@ -674,8 +817,7 @@ bq_queue_index1(_Config) -> end), %% d) get messages in all states to a segment, then flush, then do - %% the same again, don't flush and read. CQ v1: this will hit all - %% possibilities in combining the segment with the journal. + %% the same again, don't flush and read. with_empty_test_queue( fun (Qi0, _QName) -> {Qi1, [Seven,Five,Four|_]} = queue_index_publish([0,1,2,4,5,7], @@ -702,8 +844,7 @@ bq_queue_index1(_Config) -> Qi10 end), - %% e) as for (d), but use terminate instead of read, which (CQ v1) will - %% exercise journal_minus_segment, not segment_plus_journal. + %% e) as for (d), but use terminate instead of read. with_empty_test_queue( fun (Qi0, QName) -> {Qi1, _SeqIdsMsgIdsE} = queue_index_publish([0,1,2,4,5,7], @@ -729,15 +870,6 @@ bq_queue_index1(_Config) -> passed. -verify_read_with_published_v1(_Persistent, [], _) -> - ok; -verify_read_with_published_v1(Persistent, - [{MsgId, SeqId, _Location, _Props, Persistent}|Read], - [{SeqId, MsgId}|Published]) -> - verify_read_with_published_v1(Persistent, Read, Published); -verify_read_with_published_v1(_Persistent, _Read, _Published) -> - ko. - %% The v2 index does not store the MsgId unless required. %% We therefore do not check it. verify_read_with_published_v2(_Persistent, [], _) -> @@ -872,8 +1004,6 @@ bq_variable_queue_delete_msg_store_files_callback1(Config) -> Count = 30, QTState = publish_and_confirm(Q, Payload, Count), - rabbit_amqqueue:set_ram_duration_target(QPid, 0), - {ok, Limiter} = rabbit_limiter:start_link(no_id), CountMinusOne = Count - 1, @@ -965,17 +1095,16 @@ variable_queue_partial_segments_delta_thing2(VQ0, _QName) -> HalfSegment = SegmentSize div 2, OneAndAHalfSegment = SegmentSize + HalfSegment, VQ1 = variable_queue_publish(true, OneAndAHalfSegment, VQ0), - {_Duration, VQ2} = rabbit_variable_queue:ram_duration(VQ1), + VQ2 = rabbit_variable_queue:update_rates(VQ1), VQ3 = check_variable_queue_status( - variable_queue_set_ram_duration_target(0, VQ2), + VQ2, %% We only have one message in memory because the amount in memory %% depends on the consume rate, which is nil in this test. [{delta, {delta, 1, OneAndAHalfSegment - 1, 0, OneAndAHalfSegment}}, {q3, 1}, {len, OneAndAHalfSegment}]), - VQ4 = variable_queue_set_ram_duration_target(infinity, VQ3), VQ5 = check_variable_queue_status( - variable_queue_publish(true, 1, VQ4), + variable_queue_publish(true, 1, VQ3), %% one alpha, but it's in the same segment as the deltas %% @todo That's wrong now! v1/v2 [{delta, {delta, 1, OneAndAHalfSegment, 0, OneAndAHalfSegment + 1}}, @@ -1012,9 +1141,8 @@ variable_queue_all_the_bits_not_covered_elsewhere_A2(VQ0, QName) -> Count = 2 * IndexMod:next_segment_boundary(0), VQ1 = variable_queue_publish(true, Count, VQ0), VQ2 = variable_queue_publish(false, Count, VQ1), - VQ3 = variable_queue_set_ram_duration_target(0, VQ2), {VQ4, _AckTags} = variable_queue_fetch(Count, true, false, - Count + Count, VQ3), + Count + Count, VQ2), {VQ5, _AckTags1} = variable_queue_fetch(Count, false, false, Count, VQ4), _VQ6 = rabbit_variable_queue:terminate(shutdown, VQ5), @@ -1022,8 +1150,7 @@ variable_queue_all_the_bits_not_covered_elsewhere_A2(VQ0, QName) -> {{_Msg1, true, _AckTag1}, VQ8} = rabbit_variable_queue:fetch(true, VQ7), Count1 = rabbit_variable_queue:len(VQ8), VQ9 = variable_queue_publish(false, 1, VQ8), - VQ10 = variable_queue_set_ram_duration_target(0, VQ9), - {VQ11, _AckTags2} = variable_queue_fetch(Count1, true, true, Count, VQ10), + {VQ11, _AckTags2} = variable_queue_fetch(Count1, true, true, Count, VQ9), {VQ12, _AckTags3} = variable_queue_fetch(1, false, false, 1, VQ11), VQ12. @@ -1036,8 +1163,7 @@ variable_queue_all_the_bits_not_covered_elsewhere_B1(Config) -> fun variable_queue_all_the_bits_not_covered_elsewhere_B2/2, ?config(variable_queue_type, Config)). -variable_queue_all_the_bits_not_covered_elsewhere_B2(VQ0, QName) -> - VQ1 = variable_queue_set_ram_duration_target(0, VQ0), +variable_queue_all_the_bits_not_covered_elsewhere_B2(VQ1, QName) -> VQ2 = variable_queue_publish(false, 4, VQ1), {VQ3, AckTags} = variable_queue_fetch(2, false, false, 4, VQ2), {_Guids, VQ4} = @@ -1218,51 +1344,6 @@ variable_queue_dropwhile_sync_restart2(VQ0, QName) -> VQ5. -variable_queue_dropwhile_varying_ram_duration(Config) -> - passed = rabbit_ct_broker_helpers:rpc(Config, 0, - ?MODULE, variable_queue_dropwhile_varying_ram_duration1, [Config]). - -variable_queue_dropwhile_varying_ram_duration1(Config) -> - with_fresh_variable_queue( - fun variable_queue_dropwhile_varying_ram_duration2/2, - ?config(variable_queue_type, Config)). - -variable_queue_dropwhile_varying_ram_duration2(VQ0, _QName) -> - test_dropfetchwhile_varying_ram_duration( - fun (VQ1) -> - {_, VQ2} = rabbit_variable_queue:dropwhile( - fun (_) -> false end, VQ1), - VQ2 - end, VQ0). - -variable_queue_fetchwhile_varying_ram_duration(Config) -> - passed = rabbit_ct_broker_helpers:rpc(Config, 0, - ?MODULE, variable_queue_fetchwhile_varying_ram_duration1, [Config]). - -variable_queue_fetchwhile_varying_ram_duration1(Config) -> - with_fresh_variable_queue( - fun variable_queue_fetchwhile_varying_ram_duration2/2, - ?config(variable_queue_type, Config)). - -variable_queue_fetchwhile_varying_ram_duration2(VQ0, _QName) -> - test_dropfetchwhile_varying_ram_duration( - fun (VQ1) -> - {_, ok, VQ2} = rabbit_variable_queue:fetchwhile( - fun (_) -> false end, - fun (_, _, A) -> A end, - ok, VQ1), - VQ2 - end, VQ0). - -test_dropfetchwhile_varying_ram_duration(Fun, VQ0) -> - VQ1 = variable_queue_publish(false, 1, VQ0), - VQ2 = variable_queue_set_ram_duration_target(0, VQ1), - VQ3 = Fun(VQ2), - VQ4 = variable_queue_set_ram_duration_target(infinity, VQ3), - VQ5 = variable_queue_publish(false, 1, VQ4), - VQ6 = Fun(VQ5), - VQ6. - variable_queue_ack_limiting(Config) -> passed = rabbit_ct_broker_helpers:rpc(Config, 0, ?MODULE, variable_queue_ack_limiting1, [Config]). @@ -1281,8 +1362,8 @@ variable_queue_ack_limiting2(VQ0, _Config) -> Churn = Len div 32, VQ2 = publish_fetch_and_ack(Churn, Len, VQ1), - %% update stats for duration - {_Duration, VQ3} = rabbit_variable_queue:ram_duration(VQ2), + %% update stats + VQ3 = rabbit_variable_queue:update_rates(VQ2), %% fetch half the messages {VQ4, _AckTags} = variable_queue_fetch(Len div 2, false, false, Len, VQ3), @@ -1291,9 +1372,7 @@ variable_queue_ack_limiting2(VQ0, _Config) -> %% that's the only predictable stats we got. VQ5 = check_variable_queue_status(VQ4, [{len, Len div 2}]), - VQ6 = variable_queue_set_ram_duration_target(0, VQ5), - - VQ6. + VQ5. variable_queue_purge(Config) -> passed = rabbit_ct_broker_helpers:rpc(Config, 0, @@ -1363,8 +1442,7 @@ variable_queue_requeue_ram_beta2(VQ0, _Config) -> {VQ2, AcksR} = variable_queue_fetch(Count, false, false, Count, VQ1), {Back, Front} = lists:split(Count div 2, AcksR), {_, VQ3} = rabbit_variable_queue:requeue(erlang:tl(Back), VQ2), - VQ4 = variable_queue_set_ram_duration_target(0, VQ3), - {_, VQ5} = rabbit_variable_queue:requeue([erlang:hd(Back)], VQ4), + {_, VQ5} = rabbit_variable_queue:requeue([erlang:hd(Back)], VQ3), VQ6 = requeue_one_by_one(Front, VQ5), {VQ7, AcksAll} = variable_queue_fetch(Count, false, true, Count, VQ6), {_, VQ8} = rabbit_variable_queue:ack(AcksAll, VQ7), @@ -1403,36 +1481,6 @@ test_variable_queue_fold(Cut, Msgs, PendingMsgs, VQ0) -> Expected = lists:reverse(Acc), %% assertion VQ1. -variable_queue_batch_publish(Config) -> - passed = rabbit_ct_broker_helpers:rpc(Config, 0, - ?MODULE, variable_queue_batch_publish1, [Config]). - -variable_queue_batch_publish1(Config) -> - with_fresh_variable_queue( - fun variable_queue_batch_publish2/2, - ?config(variable_queue_type, Config)). - -variable_queue_batch_publish2(VQ, _Config) -> - Count = 10, - VQ1 = variable_queue_batch_publish(true, Count, VQ), - Count = rabbit_variable_queue:len(VQ1), - VQ1. - -variable_queue_batch_publish_delivered(Config) -> - passed = rabbit_ct_broker_helpers:rpc(Config, 0, - ?MODULE, variable_queue_batch_publish_delivered1, [Config]). - -variable_queue_batch_publish_delivered1(Config) -> - with_fresh_variable_queue( - fun variable_queue_batch_publish_delivered2/2, - ?config(variable_queue_type, Config)). - -variable_queue_batch_publish_delivered2(VQ, _Config) -> - Count = 10, - VQ1 = variable_queue_batch_publish_delivered(true, Count, VQ), - Count = rabbit_variable_queue:depth(VQ1), - VQ1. - %% same as test_variable_queue_requeue_ram_beta but randomly changing %% the queue mode after every step. variable_queue_mode_change(Config) -> @@ -1454,8 +1502,7 @@ variable_queue_mode_change2(VQ0, _Config) -> {Back, Front} = lists:split(Count div 2, AcksR), {_, VQ5} = rabbit_variable_queue:requeue(erlang:tl(Back), VQ4), VQ6 = maybe_switch_queue_mode(VQ5), - VQ7 = variable_queue_set_ram_duration_target(0, VQ6), - VQ8 = maybe_switch_queue_mode(VQ7), + VQ8 = maybe_switch_queue_mode(VQ6), {_, VQ9} = rabbit_variable_queue:requeue([erlang:hd(Back)], VQ8), VQ10 = maybe_switch_queue_mode(VQ9), VQ11 = requeue_one_by_one(Front, VQ10), @@ -1609,7 +1656,7 @@ publish_and_confirm(Q, Payload, Count) -> Payload), Content = BMsg#basic_message.content, Ex = BMsg#basic_message.exchange_name, - Msg = mc_amqpl:message(Ex, <<>>, Content), + {ok, Msg} = mc_amqpl:message(Ex, <<>>, Content), Options = #{correlation => Seq}, {ok, Acc, _Actions} = rabbit_queue_type:deliver([Q], Msg, Options, Acc0), @@ -1682,44 +1729,9 @@ variable_queue_publish(IsPersistent, Start, Count, PropFun, PayloadFun, VQ) -> rabbit_variable_queue:publish( Msg, PropFun(N, #message_properties{size = 10}), - false, self(), noflow, VQN) + false, self(), VQN) end, VQ, lists:seq(Start, Start + Count - 1))). -variable_queue_batch_publish(IsPersistent, Count, VQ) -> - variable_queue_batch_publish(IsPersistent, Count, fun (_N, P) -> P end, VQ). - -variable_queue_batch_publish(IsPersistent, Count, PropFun, VQ) -> - variable_queue_batch_publish(IsPersistent, 1, Count, PropFun, - fun (_N) -> <<>> end, VQ). - -variable_queue_batch_publish(IsPersistent, Start, Count, PropFun, PayloadFun, VQ) -> - variable_queue_batch_publish0(IsPersistent, Start, Count, PropFun, - PayloadFun, fun make_publish/4, - fun rabbit_variable_queue:batch_publish/4, - VQ). - -variable_queue_batch_publish_delivered(IsPersistent, Count, VQ) -> - variable_queue_batch_publish_delivered(IsPersistent, Count, fun (_N, P) -> P end, VQ). - -variable_queue_batch_publish_delivered(IsPersistent, Count, PropFun, VQ) -> - variable_queue_batch_publish_delivered(IsPersistent, 1, Count, PropFun, - fun (_N) -> <<>> end, VQ). - -variable_queue_batch_publish_delivered(IsPersistent, Start, Count, PropFun, PayloadFun, VQ) -> - variable_queue_batch_publish0(IsPersistent, Start, Count, PropFun, - PayloadFun, fun make_publish_delivered/4, - fun rabbit_variable_queue:batch_publish_delivered/4, - VQ). - -variable_queue_batch_publish0(IsPersistent, Start, Count, PropFun, PayloadFun, - MakePubFun, PubFun, VQ) -> - Publishes = - [MakePubFun(IsPersistent, PayloadFun, PropFun, N) - || N <- lists:seq(Start, Start + Count - 1)], - Res = PubFun(Publishes, self(), noflow, VQ), - VQ1 = pub_res(Res), - variable_queue_wait_for_shuffling_end(VQ1). - variable_queue_fetch(Count, IsPersistent, IsDelivered, Len, VQ) -> lists:foldl(fun (N, {VQN, AckTagsAcc}) -> Rem = Len - N, @@ -1746,10 +1758,6 @@ assert_props(List, PropVals) -> Error -> error(Error -- [ok]) end. -variable_queue_set_ram_duration_target(Duration, VQ) -> - variable_queue_wait_for_shuffling_end( - rabbit_variable_queue:set_ram_duration_target(Duration, VQ)). - publish_fetch_and_ack(0, _Len, VQ0) -> VQ0; publish_fetch_and_ack(N, Len, VQ0) -> @@ -1802,8 +1810,7 @@ variable_queue_with_holes(VQ0) -> VQ1 = variable_queue_publish( false, 1, Count, fun (_, P) -> P end, fun erlang:term_to_binary/1, VQ0), - VQ2 = variable_queue_set_ram_duration_target(0, VQ1), - {VQ3, AcksR} = variable_queue_fetch(Count, false, false, Count, VQ2), + {VQ3, AcksR} = variable_queue_fetch(Count, false, false, Count, VQ1), Acks = lists:reverse(AcksR), AckSeqs = lists:zip(Acks, Seq), [{Subset1, _Seq1}, {Subset2, _Seq2}, {Subset3, Seq3}] = @@ -1815,11 +1822,10 @@ variable_queue_with_holes(VQ0) -> VQ5 = requeue_one_by_one(Subset1, VQ4), %% by now we have some messages (and holes) in delta VQ6 = requeue_one_by_one(Subset2, VQ5), - VQ7 = variable_queue_set_ram_duration_target(infinity, VQ6), %% add the q1 tail VQ8 = variable_queue_publish( true, Count + 1, Interval, - fun (_, P) -> P end, fun erlang:term_to_binary/1, VQ7), + fun (_, P) -> P end, fun erlang:term_to_binary/1, VQ6), %% assertions vq_with_holes_assertions(VQ8), Depth = Count + Interval, @@ -1863,4 +1869,5 @@ message(IsPersistent, PayloadFun, N) -> false -> 1 end}, PayloadFun(N)), - mc_amqpl:message(Ex, <<>>, Content, #{id => Id}). + {ok, Msg} = mc_amqpl:message(Ex, <<>>, Content, #{id => Id}), + Msg. diff --git a/deps/rabbit/test/bindings_SUITE.erl b/deps/rabbit/test/bindings_SUITE.erl index 536a1c5f84f4..b80a09eb1afc 100644 --- a/deps/rabbit/test/bindings_SUITE.erl +++ b/deps/rabbit/test/bindings_SUITE.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(bindings_SUITE). @@ -20,16 +20,18 @@ suite() -> all() -> [ - {group, mnesia_store}, - {group, mnesia_cluster} + % {group, tests}, + {group, khepri_migration}, + {group, cluster} ]. groups() -> [ - {mnesia_store, [], all_tests()}, - {mnesia_cluster, [], [ - transient_queue_on_node_down_mnesia - ]} + % {tests, [], all_tests()}, + {khepri_migration, [], [ + from_mnesia_to_khepri + ]}, + {cluster, [], all_tests()} ]. all_tests() -> @@ -51,7 +53,8 @@ all_tests() -> bind_and_unbind_exchange, bind_and_delete_exchange_source, bind_and_delete_exchange_destination, - bind_to_unknown_exchange + bind_to_unknown_exchange, + transient_queue_on_node_down ]. %% ------------------------------------------------------------------- @@ -65,16 +68,24 @@ init_per_suite(Config) -> end_per_suite(Config) -> rabbit_ct_helpers:run_teardown_steps(Config). -init_per_group(mnesia_store = Group, Config) -> - init_per_group_common(Group, Config, 1); -init_per_group(mnesia_cluster = Group, Config) -> +% init_per_group(tests = Group, Config) -> +% init_per_group_common(Group, Config, 1); +init_per_group(khepri_migration = Group, Config) -> + case rabbit_ct_broker_helpers:configured_metadata_store(Config) of + {khepri, _} -> + {skip, "skip khepri migration test when khepri already configured"}; + mnesia -> + init_per_group_common(Group, Config, 1) + end; +init_per_group(cluster = Group, Config) -> init_per_group_common(Group, Config, 3). init_per_group_common(Group, Config, Size) -> Config1 = rabbit_ct_helpers:set_config(Config, [{rmq_nodes_count, Size}, {rmq_nodename_suffix, Group}, - {tcp_ports_base}]), + {tcp_ports_base, {skip_n_nodes, Size}} + ]), rabbit_ct_helpers:run_steps(Config1, rabbit_ct_broker_helpers:setup_steps()). end_per_group(_, Config) -> @@ -107,7 +118,7 @@ end_per_testcase(Testcase, Config) -> %% ------------------------------------------------------------------- bind_and_unbind(Config) -> Server = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename), - + Ch = rabbit_ct_client_helpers:open_channel(Config, Server), Q = ?config(queue_name, Config), ?assertEqual({'queue.declare_ok', Q, 0, 0}, declare(Ch, Q, [])), @@ -115,35 +126,35 @@ bind_and_unbind(Config) -> DefaultExchange = rabbit_misc:r(<<"/">>, exchange, <<>>), QResource = rabbit_misc:r(<<"/">>, queue, Q), DefaultBinding = binding_record(DefaultExchange, QResource, Q, []), - + %% Binding to the default exchange, it's always present ?assertEqual([DefaultBinding], rabbit_ct_broker_helpers:rpc(Config, 0, rabbit_binding, list, [<<"/">>])), - + %% Let's bind to other exchange #'queue.bind_ok'{} = amqp_channel:call(Ch, #'queue.bind'{exchange = <<"amq.direct">>, queue = Q, routing_key = Q}), - + DirectBinding = binding_record(rabbit_misc:r(<<"/">>, exchange, <<"amq.direct">>), QResource, Q, []), Bindings = lists:sort([DefaultBinding, DirectBinding]), - + ?assertEqual(Bindings, lists:sort( rabbit_ct_broker_helpers:rpc(Config, 0, rabbit_binding, list, [<<"/">>]))), - + #'queue.unbind_ok'{} = amqp_channel:call(Ch, #'queue.unbind'{exchange = <<"amq.direct">>, queue = Q, routing_key = Q}), - + ?assertEqual([DefaultBinding], rabbit_ct_broker_helpers:rpc(Config, 0, rabbit_binding, list, [<<"/">>])), ok. bind_and_delete(Config) -> Server = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename), - + Ch = rabbit_ct_client_helpers:open_channel(Config, Server), Q = ?config(queue_name, Config), ?assertEqual({'queue.declare_ok', Q, 0, 0}, declare(Ch, Q, [])), @@ -151,34 +162,34 @@ bind_and_delete(Config) -> DefaultExchange = rabbit_misc:r(<<"/">>, exchange, <<>>), QResource = rabbit_misc:r(<<"/">>, queue, Q), DefaultBinding = binding_record(DefaultExchange, QResource, Q, []), - + %% Binding to the default exchange, it's always present ?assertEqual([DefaultBinding], rabbit_ct_broker_helpers:rpc(Config, 0, rabbit_binding, list, [<<"/">>])), - + %% Let's bind to other exchange #'queue.bind_ok'{} = amqp_channel:call(Ch, #'queue.bind'{exchange = <<"amq.direct">>, queue = Q, routing_key = Q}), - + DirectBinding = binding_record(rabbit_misc:r(<<"/">>, exchange, <<"amq.direct">>), QResource, Q, []), Bindings = lists:sort([DefaultBinding, DirectBinding]), - + ?assertEqual(Bindings, lists:sort( rabbit_ct_broker_helpers:rpc(Config, 0, rabbit_binding, list, [<<"/">>]))), - + ?assertMatch(#'queue.delete_ok'{}, amqp_channel:call(Ch, #'queue.delete'{queue = Q})), - + ?assertEqual([], rabbit_ct_broker_helpers:rpc(Config, 0, rabbit_binding, list, [<<"/">>])), ok. bind_and_delete_source_exchange(Config) -> Server = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename), - + Ch = rabbit_ct_client_helpers:open_channel(Config, Server), Q = ?config(queue_name, Config), X = ?config(exchange_name, Config), @@ -188,26 +199,26 @@ bind_and_delete_source_exchange(Config) -> DefaultExchange = rabbit_misc:r(<<"/">>, exchange, <<>>), QResource = rabbit_misc:r(<<"/">>, queue, Q), DefaultBinding = binding_record(DefaultExchange, QResource, Q, []), - + %% Binding to the default exchange, it's always present ?assertEqual([DefaultBinding], rabbit_ct_broker_helpers:rpc(Config, 0, rabbit_binding, list, [<<"/">>])), - + %% Let's bind to other exchange #'queue.bind_ok'{} = amqp_channel:call(Ch, #'queue.bind'{exchange = X, queue = Q, routing_key = Q}), - + XBinding = binding_record(rabbit_misc:r(<<"/">>, exchange, X), QResource, Q, []), Bindings = lists:sort([DefaultBinding, XBinding]), - + ?assertEqual(Bindings, lists:sort( rabbit_ct_broker_helpers:rpc(Config, 0, rabbit_binding, list, [<<"/">>]))), - + ?assertMatch(#'exchange.delete_ok'{}, amqp_channel:call(Ch, #'exchange.delete'{exchange = X})), - + ?assertEqual([DefaultBinding], rabbit_ct_broker_helpers:rpc(Config, 0, rabbit_binding, list, [<<"/">>])), ok. @@ -226,7 +237,7 @@ list_bindings(Config) -> %% Binding to the default exchange, it's always present ?assertEqual([DefaultBinding], rabbit_ct_broker_helpers:rpc(Config, 0, rabbit_binding, list, [<<"/">>])), - + %% Let's bind to all other exchanges #'queue.bind_ok'{} = amqp_channel:call(Ch, #'queue.bind'{exchange = <<"amq.direct">>, queue = Q, @@ -246,7 +257,7 @@ list_bindings(Config) -> #'queue.bind_ok'{} = amqp_channel:call(Ch, #'queue.bind'{exchange = <<"amq.topic">>, queue = Q, routing_key = Q}), - + DirectBinding = binding_record(rabbit_misc:r(<<"/">>, exchange, <<"amq.direct">>), QResource, Q, []), FanoutBinding = binding_record(rabbit_misc:r(<<"/">>, exchange, <<"amq.fanout">>), @@ -261,7 +272,7 @@ list_bindings(Config) -> QResource, Q, []), Bindings = lists:sort([DefaultBinding, DirectBinding, FanoutBinding, HeadersBinding, MatchBinding, TraceBinding, TopicBinding]), - + ?assertEqual(Bindings, lists:sort( rabbit_ct_broker_helpers:rpc(Config, 0, rabbit_binding, list, [<<"/">>]))), @@ -276,10 +287,10 @@ list_for_source(Config) -> QAlt = ?config(alt_queue_name, Config), ?assertEqual({'queue.declare_ok', Q, 0, 0}, declare(Ch, Q, [])), ?assertEqual({'queue.declare_ok', QAlt, 0, 0}, declare(Ch, QAlt, [])), - + QResource = rabbit_misc:r(<<"/">>, queue, Q), QAltResource = rabbit_misc:r(<<"/">>, queue, QAlt), - + #'queue.bind_ok'{} = amqp_channel:call(Ch, #'queue.bind'{exchange = <<"amq.direct">>, queue = Q, routing_key = Q}), @@ -301,7 +312,7 @@ list_for_source(Config) -> TopicABinding = binding_record(TopicExchange, QAltResource, QAlt, []), DirectBindings = lists:sort([DirectBinding, DirectABinding]), TopicBindings = lists:sort([TopicBinding, TopicABinding]), - + ?assertEqual( DirectBindings, lists:sort(rabbit_ct_broker_helpers:rpc(Config, 0, rabbit_binding, list_for_source, @@ -309,7 +320,7 @@ list_for_source(Config) -> ?assertEqual( TopicBindings, lists:sort(rabbit_ct_broker_helpers:rpc(Config, 0, rabbit_binding, list_for_source, - [TopicExchange]))). + [TopicExchange]))). list_with_multiple_vhosts(Config) -> Server = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename), @@ -334,13 +345,13 @@ list_with_multiple_vhosts(Config) -> ?assertEqual({'queue.declare_ok', QAlt, 0, 0}, declare(Ch1, QAlt, [])), ?assertEqual({'queue.declare_ok', Q, 0, 0}, declare(Ch2, Q, [])), ?assertEqual({'queue.declare_ok', QAlt, 0, 0}, declare(Ch2, QAlt, [])), - + QResource = rabbit_misc:r(<<"/">>, queue, Q), QAltResource = rabbit_misc:r(<<"/">>, queue, QAlt), QAltResource1 = rabbit_misc:r(VHost1, queue, QAlt), QResource2 = rabbit_misc:r(VHost2, queue, Q), QAltResource2 = rabbit_misc:r(VHost2, queue, QAlt), - + %% Default vhost: %% direct - queue %% topic - altqueue @@ -405,6 +416,12 @@ list_with_multiple_vhosts(Config) -> [QAltResource2]))). list_with_multiple_arguments(Config) -> + %% Bindings are made of source, destination, routing key and arguments. + %% Arguments are difficult to use on khepri paths and also are not relevant to any + %% existing query. Thus, internally the bindings in Khepri are indexed using + %% source, destination and key. Each entry on Khepri contains a set of bindings. + %% For the `rabbit_binding` API nothing has changed, let's test here listing outputs + %% with multiple arguments for the same source, destination and routing key. Server = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename), Ch = rabbit_ct_client_helpers:open_channel(Config, Server), @@ -414,11 +431,11 @@ list_with_multiple_arguments(Config) -> DefaultExchange = rabbit_misc:r(<<"/">>, exchange, <<>>), QResource = rabbit_misc:r(<<"/">>, queue, Q), DefaultBinding = binding_record(DefaultExchange, QResource, Q, []), - + %% Binding to the default exchange, it's always present ?assertEqual([DefaultBinding], rabbit_ct_broker_helpers:rpc(Config, 0, rabbit_binding, list, [<<"/">>])), - + %% Let's bind with multiple arguments #'queue.bind_ok'{} = amqp_channel:call(Ch, #'queue.bind'{exchange = <<"amq.headers">>, queue = Q, @@ -428,7 +445,7 @@ list_with_multiple_arguments(Config) -> queue = Q, routing_key = Q, arguments = [{<<"x-match">>, longstr, <<"any">>}]}), - + AllBinding = binding_record(rabbit_misc:r(<<"/">>, exchange, <<"amq.headers">>), QResource, Q, [{<<"x-match">>, longstr, <<"all">>}]), AnyBinding = binding_record(rabbit_misc:r(<<"/">>, exchange, <<"amq.headers">>), @@ -449,10 +466,10 @@ list_for_destination(Config) -> QAlt = ?config(alt_queue_name, Config), ?assertEqual({'queue.declare_ok', Q, 0, 0}, declare(Ch, Q, [])), ?assertEqual({'queue.declare_ok', QAlt, 0, 0}, declare(Ch, QAlt, [])), - + QResource = rabbit_misc:r(<<"/">>, queue, Q), QAltResource = rabbit_misc:r(<<"/">>, queue, QAlt), - + #'queue.bind_ok'{} = amqp_channel:call(Ch, #'queue.bind'{exchange = <<"amq.direct">>, queue = Q, routing_key = Q}), @@ -478,7 +495,7 @@ list_for_destination(Config) -> Bindings = lists:sort([DefaultBinding, DirectBinding, TopicBinding]), AltBindings = lists:sort([DefaultABinding, DirectABinding, TopicABinding]), - + ?assertEqual( Bindings, lists:sort(rabbit_ct_broker_helpers:rpc(Config, 0, rabbit_binding, list_for_destination, @@ -496,10 +513,10 @@ list_for_source_and_destination(Config) -> QAlt = ?config(alt_queue_name, Config), ?assertEqual({'queue.declare_ok', Q, 0, 0}, declare(Ch, Q, [])), ?assertEqual({'queue.declare_ok', QAlt, 0, 0}, declare(Ch, QAlt, [])), - + QResource = rabbit_misc:r(<<"/">>, queue, Q), QAltResource = rabbit_misc:r(<<"/">>, queue, QAlt), - + #'queue.bind_ok'{} = amqp_channel:call(Ch, #'queue.bind'{exchange = <<"amq.direct">>, queue = Q, routing_key = Q}), @@ -519,7 +536,7 @@ list_for_source_and_destination(Config) -> DirectBinding = binding_record(DirectExchange, QResource, Q, []), TopicBinding = binding_record(TopicExchange, QResource, Q, []), DefaultABinding = binding_record(DefaultExchange, QAltResource, QAlt, []), - + ?assertEqual( [DirectBinding], lists:sort(rabbit_ct_broker_helpers:rpc(Config, 0, rabbit_binding, @@ -544,10 +561,10 @@ list_for_source_and_destination(Config) -> ?assertEqual({'queue.declare_ok', Q, 0, 0}, declare(Ch, Q, [])), QResource = rabbit_misc:r(<<"/">>, queue, Q), - + ?assertEqual([], rabbit_ct_broker_helpers:rpc(Config, 0, rabbit_binding, list_explicit, [])), - + %% Let's bind to other exchanges #'queue.bind_ok'{} = amqp_channel:call(Ch, #'queue.bind'{exchange = <<"amq.direct">>, queue = Q, @@ -555,22 +572,22 @@ list_for_source_and_destination(Config) -> #'queue.bind_ok'{} = amqp_channel:call(Ch, #'queue.bind'{exchange = <<"amq.fanout">>, queue = Q, routing_key = Q}), - + DirectBinding = binding_record(rabbit_misc:r(<<"/">>, exchange, <<"amq.direct">>), QResource, Q, []), FanoutBinding = binding_record(rabbit_misc:r(<<"/">>, exchange, <<"amq.fanout">>), QResource, Q, []), Bindings = lists:sort([DirectBinding, FanoutBinding]), - + ?assertEqual(Bindings, lists:sort( rabbit_ct_broker_helpers:rpc(Config, 0, rabbit_binding, list_explicit, []))), - + ok. info_all(Config) -> Server = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename), - + Ch = rabbit_ct_client_helpers:open_channel(Config, Server), Q = ?config(queue_name, Config), ?assertEqual({'queue.declare_ok', Q, 0, 0}, declare(Ch, Q, [])), @@ -593,7 +610,7 @@ info_all(Config) -> {routing_key,<<"info_all">>}, {arguments,[]}, {vhost,<<"/">>}], - + #'queue.bind_ok'{} = amqp_channel:call(Ch, #'queue.bind'{exchange = <<"amq.direct">>, queue = Q, routing_key = Q}), @@ -602,9 +619,69 @@ info_all(Config) -> ?assertEqual(Infos, lists:sort( rabbit_ct_broker_helpers:rpc(Config, 0, rabbit_binding, info_all, [<<"/">>]))), - + ok. +from_mnesia_to_khepri(Config) -> + Server = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename), + + Ch = rabbit_ct_client_helpers:open_channel(Config, Server), + Q = ?config(queue_name, Config), + ?assertEqual({'queue.declare_ok', Q, 0, 0}, declare(Ch, Q, [])), + AltQ = ?config(alt_queue_name, Config), + ?assertEqual({'queue.declare_ok', AltQ, 0, 0}, declare(Ch, AltQ, [], false)), + + %% Combine durable and transient queues and exchanges to test the migration of durable, + %% semi-durable and transient bindings + #'queue.bind_ok'{} = amqp_channel:call(Ch, #'queue.bind'{exchange = <<"amq.direct">>, + queue = Q, + routing_key = Q}), + #'queue.bind_ok'{} = amqp_channel:call(Ch, #'queue.bind'{exchange = <<"amq.direct">>, + queue = AltQ, + routing_key = AltQ}), + + X = ?config(exchange_name, Config), + #'exchange.declare_ok'{} = amqp_channel:call(Ch, #'exchange.declare'{exchange = X, + durable = false}), + #'queue.bind_ok'{} = amqp_channel:call(Ch, #'queue.bind'{exchange = X, + queue = Q, + routing_key = Q}), + #'queue.bind_ok'{} = amqp_channel:call(Ch, #'queue.bind'{exchange = X, + queue = AltQ, + routing_key = AltQ}), + + + DefaultExchange = rabbit_misc:r(<<"/">>, exchange, <<>>), + QResource = rabbit_misc:r(<<"/">>, queue, Q), + AltQResource = rabbit_misc:r(<<"/">>, queue, AltQ), + DefaultBinding = binding_record(DefaultExchange, QResource, Q, []), + DirectBinding = binding_record(rabbit_misc:r(<<"/">>, exchange, <<"amq.direct">>), + QResource, Q, []), + AltDefaultBinding = binding_record(DefaultExchange, AltQResource, AltQ, []), + AltDirectBinding = binding_record(rabbit_misc:r(<<"/">>, exchange, <<"amq.direct">>), + AltQResource, AltQ, []), + XBinding = binding_record(rabbit_misc:r(<<"/">>, exchange, X), QResource, Q, []), + AltXBinding = binding_record(rabbit_misc:r(<<"/">>, exchange, X), + AltQResource, AltQ, []), + Bindings = lists:sort([DefaultBinding, DirectBinding, AltDefaultBinding, AltDirectBinding, + XBinding, AltXBinding]), + + ?assertEqual(Bindings, + lists:sort( + rabbit_ct_broker_helpers:rpc(Config, 0, rabbit_binding, list, [<<"/">>]))), + + case rabbit_ct_broker_helpers:enable_feature_flag(Config, khepri_db) of + ok -> + rabbit_ct_helpers:await_condition( + fun() -> + Bindings == + lists:sort( + rabbit_ct_broker_helpers:rpc(Config, 0, rabbit_binding, list, [<<"/">>])) + end); + Skip -> + Skip + end. + bind_to_unknown_queue(Config) -> Server = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename), @@ -622,32 +699,32 @@ bind_to_unknown_queue(Config) -> bind_and_unbind_exchange(Config) -> Server = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename), - + Ch = rabbit_ct_client_helpers:open_channel(Config, Server), X = ?config(exchange_name, Config), - + ?assertEqual([], rabbit_ct_broker_helpers:rpc(Config, 0, rabbit_binding, list, [<<"/">>])), - + #'exchange.declare_ok'{} = amqp_channel:call(Ch, #'exchange.declare'{exchange = X}), %% Let's bind to other exchange #'exchange.bind_ok'{} = amqp_channel:call(Ch, #'exchange.bind'{destination = X, source = <<"amq.direct">>, routing_key = <<"key">>}), - + DirectBinding = binding_record(rabbit_misc:r(<<"/">>, exchange, <<"amq.direct">>), rabbit_misc:r(<<"/">>, exchange, X), <<"key">>, []), - + ?assertEqual([DirectBinding], lists:sort( rabbit_ct_broker_helpers:rpc(Config, 0, rabbit_binding, list, [<<"/">>]))), - + #'exchange.unbind_ok'{} = amqp_channel:call(Ch, #'exchange.unbind'{destination = X, source = <<"amq.direct">>, routing_key = <<"key">>}), - + ?assertEqual([], rabbit_ct_broker_helpers:rpc(Config, 0, rabbit_binding, list, [<<"/">>])), ok. @@ -672,63 +749,63 @@ bind_to_unknown_exchange(Config) -> bind_and_delete_exchange_destination(Config) -> Server = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename), - + Ch = rabbit_ct_client_helpers:open_channel(Config, Server), X = ?config(exchange_name, Config), - + ?assertEqual([], rabbit_ct_broker_helpers:rpc(Config, 0, rabbit_binding, list, [<<"/">>])), - + #'exchange.declare_ok'{} = amqp_channel:call(Ch, #'exchange.declare'{exchange = X}), %% Let's bind to other exchange #'exchange.bind_ok'{} = amqp_channel:call(Ch, #'exchange.bind'{destination = X, source = <<"amq.direct">>, routing_key = <<"key">>}), - + DirectBinding = binding_record(rabbit_misc:r(<<"/">>, exchange, <<"amq.direct">>), rabbit_misc:r(<<"/">>, exchange, X), <<"key">>, []), - + ?assertEqual([DirectBinding], lists:sort( rabbit_ct_broker_helpers:rpc(Config, 0, rabbit_binding, list, [<<"/">>]))), - + #'exchange.delete_ok'{} = amqp_channel:call(Ch, #'exchange.delete'{exchange = X}), - + ?assertEqual([], rabbit_ct_broker_helpers:rpc(Config, 0, rabbit_binding, list, [<<"/">>])), ok. bind_and_delete_exchange_source(Config) -> Server = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename), - + Ch = rabbit_ct_client_helpers:open_channel(Config, Server), X = ?config(exchange_name, Config), - + ?assertEqual([], rabbit_ct_broker_helpers:rpc(Config, 0, rabbit_binding, list, [<<"/">>])), - + #'exchange.declare_ok'{} = amqp_channel:call(Ch, #'exchange.declare'{exchange = X}), %% Let's bind to other exchange #'exchange.bind_ok'{} = amqp_channel:call(Ch, #'exchange.bind'{destination = <<"amq.direct">>, source = X, routing_key = <<"key">>}), - + DirectBinding = binding_record(rabbit_misc:r(<<"/">>, exchange, X), rabbit_misc:r(<<"/">>, exchange, <<"amq.direct">>), <<"key">>, []), - + ?assertEqual([DirectBinding], lists:sort( rabbit_ct_broker_helpers:rpc(Config, 0, rabbit_binding, list, [<<"/">>]))), - + #'exchange.delete_ok'{} = amqp_channel:call(Ch, #'exchange.delete'{exchange = X}), - + ?assertEqual([], rabbit_ct_broker_helpers:rpc(Config, 0, rabbit_binding, list, [<<"/">>])), ok. -transient_queue_on_node_down_mnesia(Config) -> +transient_queue_on_node_down(Config) -> Server = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename), Ch = rabbit_ct_client_helpers:open_channel(Config, Server), @@ -759,27 +836,33 @@ transient_queue_on_node_down_mnesia(Config) -> QResource, Q, []), DirectAltBinding = binding_record(rabbit_misc:r(<<"/">>, exchange, <<"amq.direct">>), QAltResource, QAlt, []), - Bindings = lists:sort([DefaultBinding, DirectBinding, DefaultAltBinding, DirectAltBinding]), - ?assertEqual(Bindings, - lists:sort( - rabbit_ct_broker_helpers:rpc(Config, 1, rabbit_binding, list, [<<"/">>]))), + Bindings1 = lists:sort([DefaultBinding, DirectBinding, DefaultAltBinding, DirectAltBinding]), + ?awaitMatch(Bindings1, + lists:sort( + rabbit_ct_broker_helpers:rpc(Config, 1, rabbit_binding, list, [<<"/">>])), + 30000), - rabbit_ct_broker_helpers:stop_node(Config, Server), - Bindings1 = lists:sort([DefaultBinding, DirectBinding]), - ?assertEqual([DirectBinding], - lists:sort(rabbit_ct_broker_helpers:rpc(Config, 1, rabbit_binding, list, [<<"/">>]))), - ?assertMatch([], - rabbit_ct_broker_helpers:rpc(Config, 1, rabbit_amqqueue, list, [<<"/">>])), + ?assertEqual(ok, rabbit_control_helper:command(stop_app, Server)), - rabbit_ct_broker_helpers:start_node(Config, Server), + ?awaitMatch([DirectBinding], + lists:sort( + rabbit_ct_broker_helpers:rpc(Config, 1, rabbit_binding, list, [<<"/">>])), + 30000), + ?awaitMatch([], + rabbit_ct_broker_helpers:rpc(Config, 1, rabbit_amqqueue, list, [<<"/">>]), + 30000), - ?awaitMatch(Bindings1, + ?assertEqual(ok, rabbit_control_helper:command(start_app, Server)), + + Bindings2 = lists:sort([DefaultBinding, DirectBinding]), + ?awaitMatch(Bindings2, lists:sort( rabbit_ct_broker_helpers:rpc(Config, 1, rabbit_binding, list, [<<"/">>])), 30000), - ?awaitMatch([_], rabbit_ct_broker_helpers:rpc(Config, 1, rabbit_amqqueue, list, [<<"/">>]), + ?awaitMatch([_], + rabbit_ct_broker_helpers:rpc(Config, 1, rabbit_amqqueue, list, [<<"/">>]), 30000), ok. @@ -790,7 +873,8 @@ delete_queues() -> || Q <- rabbit_amqqueue:list()]. delete_exchange(Name) -> - _ = rabbit_exchange:delete(rabbit_misc:r(<<"/">>, exchange, Name), false, <<"dummy">>). + ok = rabbit_exchange:ensure_deleted( + rabbit_misc:r(<<"/">>, exchange, Name), false, <<"dummy">>). declare(Ch, Q, Args) -> declare(Ch, Q, Args, true). diff --git a/deps/rabbit/test/channel_interceptor_SUITE.erl b/deps/rabbit/test/channel_interceptor_SUITE.erl index dfda16aeb210..2a62a388cbf3 100644 --- a/deps/rabbit/test/channel_interceptor_SUITE.erl +++ b/deps/rabbit/test/channel_interceptor_SUITE.erl @@ -2,12 +2,11 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2016-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(channel_interceptor_SUITE). --include_lib("common_test/include/ct.hrl"). -include_lib("amqp_client/include/amqp_client.hrl"). -include_lib("eunit/include/eunit.hrl"). @@ -23,6 +22,7 @@ groups() -> {non_parallel_tests, [], [ register_interceptor, register_interceptor_failing_with_amqp_error, + register_interceptor_crashing_with_amqp_error_exception, register_failing_interceptors ]} ]. @@ -120,7 +120,7 @@ register_interceptor_failing_with_amqp_error1(Config, Interceptor) -> #'queue.declare_ok'{} = amqp_channel:call(Ch1, #'queue.declare'{queue = Q1}), - Q2 = <<"failing-q">>, + Q2 = <<"failing-with-amqp-error-q">>, try amqp_channel:call(Ch1, #'queue.declare'{queue = Q2}) catch @@ -145,6 +145,55 @@ register_interceptor_failing_with_amqp_error1(Config, Interceptor) -> passed. +register_interceptor_crashing_with_amqp_error_exception(Config) -> + passed = rabbit_ct_broker_helpers:rpc(Config, 0, + ?MODULE, register_interceptor_crashing_with_amqp_error_exception1, + [Config, dummy_interceptor]). + +register_interceptor_crashing_with_amqp_error_exception1(Config, Interceptor) -> + PredefinedChannels = rabbit_channel:list(), + + Ch1 = rabbit_ct_client_helpers:open_channel(Config, 0), + + [ChannelProc] = rabbit_channel:list() -- PredefinedChannels, + + [{interceptors, []}] = rabbit_channel:info(ChannelProc, [interceptors]), + + ok = rabbit_registry:register(channel_interceptor, + <<"dummy interceptor">>, + Interceptor), + [{interceptors, [{Interceptor, undefined}]}] = + rabbit_channel:info(ChannelProc, [interceptors]), + + Q1 = <<"succeeding-q">>, + #'queue.declare_ok'{} = + amqp_channel:call(Ch1, #'queue.declare'{queue = Q1}), + + Q2 = <<"crashing-with-amqp-exception-q">>, + try + amqp_channel:call(Ch1, #'queue.declare'{queue = Q2}) + catch + _:Reason -> + ?assertMatch( + {{shutdown, {_, _, <<"PRECONDITION_FAILED - inequivalent arg 'durable' for queue 'crashing-with-amqp-exception-q' in vhost '/': received 'false' but current is 'true'">>}}, _}, + Reason) + end, + + Ch2 = rabbit_ct_client_helpers:open_channel(Config, 0), + [ChannelProc1] = rabbit_channel:list() -- PredefinedChannels, + + ok = rabbit_registry:unregister(channel_interceptor, + <<"dummy interceptor">>), + [{interceptors, []}] = rabbit_channel:info(ChannelProc1, [interceptors]), + + #'queue.declare_ok'{} = + amqp_channel:call(Ch2, #'queue.declare'{queue = Q2}), + + #'queue.delete_ok'{} = amqp_channel:call(Ch2, #'queue.delete' {queue = Q1}), + #'queue.delete_ok'{} = amqp_channel:call(Ch2, #'queue.delete' {queue = Q2}), + + passed. + register_failing_interceptors(Config) -> passed = rabbit_ct_broker_helpers:rpc(Config, 0, ?MODULE, register_interceptor1, [Config, failing_dummy_interceptor]). diff --git a/deps/rabbit/test/channel_operation_timeout_SUITE.erl b/deps/rabbit/test/channel_operation_timeout_SUITE.erl index 3fe12b013996..7772ea1df830 100644 --- a/deps/rabbit/test/channel_operation_timeout_SUITE.erl +++ b/deps/rabbit/test/channel_operation_timeout_SUITE.erl @@ -2,12 +2,11 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(channel_operation_timeout_SUITE). --include_lib("common_test/include/ct.hrl"). -include_lib("amqp_client/include/amqp_client.hrl"). -include("amqqueue.hrl"). diff --git a/deps/rabbit/test/channel_operation_timeout_test_queue.erl b/deps/rabbit/test/channel_operation_timeout_test_queue.erl index 1bae6ca25747..9756bd448d64 100644 --- a/deps/rabbit/test/channel_operation_timeout_test_queue.erl +++ b/deps/rabbit/test/channel_operation_timeout_test_queue.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% %% @todo This module also needs to be updated when variable queue changes. @@ -10,12 +10,11 @@ -export([init/3, terminate/2, delete_and_terminate/2, delete_crashed/1, purge/1, purge_acks/1, - publish/6, publish_delivered/5, - batch_publish/4, batch_publish_delivered/4, - discard/4, drain_confirmed/1, + publish/5, publish_delivered/4, + discard/3, drain_confirmed/1, dropwhile/2, fetchwhile/4, fetch/2, drop/2, ack/2, requeue/2, ackfold/4, fold/3, len/1, is_empty/1, depth/1, - set_ram_duration_target/2, ram_duration/1, needs_timeout/1, timeout/1, + update_rates/1, needs_timeout/1, timeout/1, handle_pre_hibernate/1, resume/1, msg_rates/1, info/2, invoke/3, is_duplicate/2, set_queue_mode/2, set_queue_version/2, @@ -119,8 +118,6 @@ -include_lib("rabbit_common/include/rabbit.hrl"). --include_lib("rabbit_common/include/rabbit_framing.hrl"). - -define(QUEUE, lqueue). -define(TIMEOUT_TEST_MSG, <<"timeout_test_msg!">>). @@ -226,19 +223,13 @@ purge(State) -> purge_acks(State) -> rabbit_variable_queue:purge_acks(State). -publish(Msg, MsgProps, IsDelivered, ChPid, Flow, State) -> - rabbit_variable_queue:publish(Msg, MsgProps, IsDelivered, ChPid, Flow, State). - -batch_publish(Publishes, ChPid, Flow, State) -> - rabbit_variable_queue:batch_publish(Publishes, ChPid, Flow, State). +publish(Msg, MsgProps, IsDelivered, ChPid, State) -> + rabbit_variable_queue:publish(Msg, MsgProps, IsDelivered, ChPid, State). -publish_delivered(Msg, MsgProps, ChPid, Flow, State) -> - rabbit_variable_queue:publish_delivered(Msg, MsgProps, ChPid, Flow, State). +publish_delivered(Msg, MsgProps, ChPid, State) -> + rabbit_variable_queue:publish_delivered(Msg, MsgProps, ChPid, State). -batch_publish_delivered(Publishes, ChPid, Flow, State) -> - rabbit_variable_queue:batch_publish_delivered(Publishes, ChPid, Flow, State). - -discard(_MsgId, _ChPid, _Flow, State) -> State. +discard(_MsgId, _ChPid, State) -> State. drain_confirmed(State) -> rabbit_variable_queue:drain_confirmed(State). @@ -287,11 +278,8 @@ is_empty(State) -> 0 == len(State). depth(State) -> rabbit_variable_queue:depth(State). -set_ram_duration_target(DurationTarget, State) -> - rabbit_variable_queue:set_ram_duration_target(DurationTarget, State). - -ram_duration(State) -> - rabbit_variable_queue:ram_duration(State). +update_rates(State) -> + rabbit_variable_queue:update_rates(State). needs_timeout(State) -> rabbit_variable_queue:needs_timeout(State). diff --git a/deps/rabbit/test/classic_queue_SUITE.erl b/deps/rabbit/test/classic_queue_SUITE.erl new file mode 100644 index 000000000000..5b54d7150fb0 --- /dev/null +++ b/deps/rabbit/test/classic_queue_SUITE.erl @@ -0,0 +1,135 @@ +%% This Source Code Form is subject to the terms of the Mozilla Public +%% License, v. 2.0. If a copy of the MPL was not distributed with this +%% file, You can obtain one at https://mozilla.org/MPL/2.0/. +%% +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. + +-module(classic_queue_SUITE). + +-include_lib("eunit/include/eunit.hrl"). +-include_lib("amqp_client/include/amqp_client.hrl"). + +-compile([nowarn_export_all, export_all]). + +-import(rabbit_ct_broker_helpers, + [get_node_config/3, + rpc/4, + rpc/5]). + +all() -> + [ + {group, cluster_size_3} + ]. + +groups() -> + [ + {cluster_size_3, [], [ + leader_locator_client_local, + leader_locator_balanced, + locator_deprecated + ] + }]. + +%% ------------------------------------------------------------------- +%% Testsuite setup/teardown. +%% ------------------------------------------------------------------- + +init_per_suite(Config) -> + rabbit_ct_helpers:log_environment(), + rabbit_ct_helpers:run_setup_steps(Config, []). + +end_per_suite(Config) -> + rabbit_ct_helpers:run_teardown_steps(Config). + +init_per_group(Group, Config) -> + Config1 = rabbit_ct_helpers:set_config(Config, + [ + {rmq_nodename_suffix, Group}, + {rmq_nodes_count, 3}, + {rmq_nodes_clustered, true}, + {tcp_ports_base, {skip_n_nodes, 3}} + ]), + Config2 = rabbit_ct_helpers:run_steps( + Config1, + rabbit_ct_broker_helpers:setup_steps() ++ + rabbit_ct_client_helpers:setup_steps()), + Config2. + +end_per_group(_, Config) -> + rabbit_ct_helpers:run_steps(Config, + rabbit_ct_client_helpers:teardown_steps() ++ + rabbit_ct_broker_helpers:teardown_steps()). + +init_per_testcase(T, Config) -> + case rabbit_ct_broker_helpers:enable_feature_flag(Config, 'rabbitmq_4.0.0') of + ok -> + rabbit_ct_helpers:testcase_started(Config, T); + Skip -> + Skip + end. + +%% ------------------------------------------------------------------- +%% Testcases. +%% ------------------------------------------------------------------- + +leader_locator_client_local(Config) -> + Servers = rabbit_ct_broker_helpers:get_node_configs(Config, nodename), + Q = <<"q1">>, + + [begin + Ch = rabbit_ct_client_helpers:open_channel(Config, Server), + ?assertEqual({'queue.declare_ok', Q, 0, 0}, + declare(Ch, Q, [{<<"x-queue-type">>, longstr, <<"classic">>}, + {<<"x-queue-leader-locator">>, longstr, <<"client-local">>}])), + {ok, Leader0} = rabbit_ct_broker_helpers:rpc(Config, Server, rabbit_amqqueue, lookup, [rabbit_misc:r(<<"/">>, queue, Q)]), + Leader = amqqueue:qnode(Leader0), + ?assertEqual(Server, Leader), + ?assertMatch(#'queue.delete_ok'{}, + amqp_channel:call(Ch, #'queue.delete'{queue = Q})) + end || Server <- Servers]. + +leader_locator_balanced(Config) -> + test_leader_locator(Config, <<"x-queue-leader-locator">>, [<<"balanced">>]). + +%% This test can be delted once we remove x-queue-master-locator support +locator_deprecated(Config) -> + test_leader_locator(Config, <<"x-queue-master-locator">>, [<<"least-leaders">>, + <<"random">>, + <<"min-masters">>]). + +test_leader_locator(Config, Argument, Strategies) -> + Server = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename), + Ch = rabbit_ct_client_helpers:open_channel(Config, Server), + Qs = [<<"q1">>, <<"q2">>, <<"q3">>], + + [begin + Leaders = [begin + ?assertMatch({'queue.declare_ok', Q, 0, 0}, + declare(Ch, Q, + [{<<"x-queue-type">>, longstr, <<"classic">>}, + {Argument, longstr, Strategy}])), + + {ok, Leader0} = rabbit_ct_broker_helpers:rpc(Config, Server, rabbit_amqqueue, lookup, [rabbit_misc:r(<<"/">>, queue, Q)]), + Leader = amqqueue:qnode(Leader0), + Leader + end || Q <- Qs], + ?assertEqual(3, sets:size(sets:from_list(Leaders))), + + [?assertMatch(#'queue.delete_ok'{}, + amqp_channel:call(Ch, #'queue.delete'{queue = Q})) + || Q <- Qs] + end || Strategy <- Strategies ]. + +declare(Ch, Q) -> + declare(Ch, Q, []). + +declare(Ch, Q, Args) -> + amqp_channel:call(Ch, #'queue.declare'{queue = Q, + durable = true, + auto_delete = false, + arguments = Args}). + +delete_queues() -> + [rabbit_amqqueue:delete(Q, false, false, <<"dummy">>) + || Q <- rabbit_amqqueue:list()]. + diff --git a/deps/rabbit/test/classic_queue_prop_SUITE.erl b/deps/rabbit/test/classic_queue_prop_SUITE.erl index 1f45b4a8b075..4cff9e0ec67e 100644 --- a/deps/rabbit/test/classic_queue_prop_SUITE.erl +++ b/deps/rabbit/test/classic_queue_prop_SUITE.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(classic_queue_prop_SUITE). @@ -23,7 +23,6 @@ -record(cq, { amq = undefined :: amqqueue:amqqueue(), name :: atom(), - version :: 1 | 2, %% We have one queue per way of publishing messages (such as channels). %% We can only confirm the publish order on a per-channel level because @@ -73,19 +72,12 @@ %% Common Test. all() -> - [{group, classic_queue_tests}, {group, classic_queue_regressions}]. + [{group, classic_queue_tests}]. groups() -> [{classic_queue_tests, [], [ % manual%, - classic_queue_v1, classic_queue_v2 - ]}, - {classic_queue_regressions, [], [ - reg_v1_full_recover_only_journal, - reg_v1_no_del_jif, - reg_v1_no_del_idx, - reg_v1_no_del_idx_unclean ]} ]. @@ -136,10 +128,10 @@ instrs_to_manual([Instrs]) -> io:format("~ndo_manual(Config) ->~n~n"), lists:foreach(fun ({init, CQ}) -> - #cq{name=Name, version=Version} = CQ, - io:format(" St0 = #cq{name=~0p, version=~0p,~n" + #cq{name=Name} = CQ, + io:format(" St0 = #cq{name=~0p,~n" " config=minimal_config(Config)},~n~n", - [Name, Version]); + [Name]); ({set, {var,Var}, {call, ?MODULE, cmd_setup_queue, _}}) -> Res = "Res" ++ integer_to_list(Var), PrevSt = "St" ++ integer_to_list(Var - 1), @@ -197,15 +189,6 @@ manual(Config) -> do_manual(Config) -> Config =:= Config. -classic_queue_v1(Config) -> - true = rabbit_ct_broker_helpers:rpc(Config, 0, - ?MODULE, do_classic_queue_v1, [Config]). - -do_classic_queue_v1(Config) -> - true = proper:quickcheck(prop_classic_queue_v1(Config), - [{on_output, on_output_fun()}, - {numtests, ?NUM_TESTS}]). - classic_queue_v2(Config) -> true = rabbit_ct_broker_helpers:rpc(Config, 0, ?MODULE, do_classic_queue_v2, [Config]). @@ -225,16 +208,11 @@ on_output_fun() -> %% Properties. -prop_classic_queue_v1(Config) -> - {ok, LimiterPid} = rabbit_limiter:start_link(no_id), - InitialState = #cq{name=?FUNCTION_NAME, version=1, - config=minimal_config(Config), limiter=LimiterPid}, - prop_common(InitialState). - prop_classic_queue_v2(Config) -> {ok, LimiterPid} = rabbit_limiter:start_link(no_id), - InitialState = #cq{name=?FUNCTION_NAME, version=2, - config=minimal_config(Config), limiter=LimiterPid}, + InitialState = #cq{name=?FUNCTION_NAME, + config=minimal_config(Config), + limiter=LimiterPid}, prop_common(InitialState). prop_common(InitialState) -> @@ -343,8 +321,8 @@ next_state(St=#cq{q=Q0, confirmed=Confirmed, uncertain=Uncertain0}, AMQ, {call, St#cq{amq=AMQ, q=Q, restarted=true, crashed=true, uncertain=Uncertain}; next_state(St, _, {call, _, cmd_set_v2_check_crc32, _}) -> St; -next_state(St, _, {call, _, cmd_set_version, [Version]}) -> - St#cq{version=Version}; +next_state(St, _, {call, _, cmd_set_version, _}) -> + St; next_state(St=#cq{q=Q}, Msg, {call, _, cmd_publish_msg, _}) -> IntQ = maps:get(internal, Q, queue:new()), St#cq{q=Q#{internal => queue:in(Msg, IntQ)}}; @@ -530,8 +508,10 @@ postcondition(_, {call, _, Cmd, _}, Q) when element(1, Q) =:= amqqueue; postcondition(_, {call, _, cmd_set_v2_check_crc32, _}, Res) -> Res =:= ok; -postcondition(#cq{amq=AMQ}, {call, _, cmd_set_version, [Version]}, _) -> - do_check_queue_version(AMQ, Version) =:= ok; +postcondition(#cq{amq=AMQ}, {call, _, cmd_set_version, _}, _) -> + %% We cannot use CQv1 anymore so we always + %% expect the queue to use v2. + do_check_queue_version(AMQ, 2) =:= ok; postcondition(_, {call, _, cmd_publish_msg, _}, Msg) -> is_record(Msg, amqp_msg); postcondition(_, {call, _, cmd_purge, _}, Res) -> @@ -698,21 +678,16 @@ crashed_and_previously_received(#cq{crashed=Crashed, received=Received}, Msg) -> %% Helpers. -cmd_setup_queue(St=#cq{name=Name, version=Version}) -> +cmd_setup_queue(St=#cq{name=Name}) -> ?DEBUG("~0p", [St]), IsDurable = true, %% We want to be able to restart the queue process. IsAutoDelete = false, - %% We cannot use args to set the version as the arguments override - %% the policies and we also want to test policy changes. - cmd_set_version(Version), - Args = [ -% {<<"x-queue-version">>, long, Version} - ], + Args = [], QName = rabbit_misc:r(<<"/">>, queue, iolist_to_binary([atom_to_binary(Name, utf8), $_, integer_to_binary(erlang:unique_integer([positive]))])), {new, AMQ} = rabbit_amqqueue:declare(QName, IsDurable, IsAutoDelete, Args, none, <<"acting-user">>), - %% We check that the queue was creating with the right version. - ok = do_check_queue_version(AMQ, Version), + %% We check that the queue was created with the right version. + ok = do_check_queue_version(AMQ, 2), AMQ. cmd_teardown_queue(St=#cq{amq=undefined}) -> @@ -770,7 +745,7 @@ do_wait_updated_amqqueue(Name, Pid) -> end. cmd_set_v2_check_crc32(Value) -> - application:set_env(rabbit, classic_queue_store_v2_check_crc32, Value). + persistent_term:put(classic_queue_store_v2_check_crc32, Value). cmd_set_version(Version) -> ?DEBUG("~0p ~0p", [Version]), @@ -788,7 +763,7 @@ do_check_queue_version(AMQ, Version, N) -> timer:sleep(1), [{backing_queue_status, Status}] = rabbit_amqqueue:info(AMQ, [backing_queue_status]), case proplists:get_value(version, Status) of - Version -> ok; + 2 -> ok; _ -> do_check_queue_version(AMQ, Version, N - 1) end. @@ -801,7 +776,7 @@ cmd_publish_msg(St=#cq{amq=AMQ}, PayloadSize, DeliveryMode, Mandatory, Expiratio expiration = do_encode_expiration(Expiration)}, Payload), - Msg0 = mc_amqpl:message(Ex, <<>>, BasicMsg#basic_message.content), + {ok, Msg0} = mc_amqpl:message(Ex, <<>>, BasicMsg#basic_message.content), Msg = mc:set_annotation(id, BasicMsg#basic_message.id, Msg0), {ok, _, _} = rabbit_queue_type:deliver([AMQ], Msg, #{}, stateless), Content = mc:protocol_state(Msg), @@ -1098,243 +1073,6 @@ queue_fold(Fun, Acc0, {R, F}) when is_function(Fun, 2), is_list(R), is_list(F) - queue_fold(Fun, Acc0, Q) -> erlang:error(badarg, [Fun, Acc0, Q]). -%% Regression tests. -%% -%% These tests are hard to reproduce by running the test suite normally -%% because they require a very specific sequence of events. - -reg_v1_full_recover_only_journal(Config) -> - true = rabbit_ct_broker_helpers:rpc(Config, 0, - ?MODULE, do_reg_v1_full_recover_only_journal, [Config]). - -do_reg_v1_full_recover_only_journal(Config) -> - - St0 = #cq{name=prop_classic_queue_v1, version=1, - config=minimal_config(Config)}, - - Res1 = cmd_setup_queue(St0), - St3 = St0#cq{amq=Res1}, - - Res4 = cmd_channel_open(St3), - true = postcondition(St3, {call, undefined, cmd_channel_open, [St3]}, Res4), - St7 = next_state(St3, Res4, {call, undefined, cmd_channel_open, [St3]}), - - Res8 = cmd_restart_queue_dirty(St7), - true = postcondition(St7, {call, undefined, cmd_restart_queue_dirty, [St7]}, Res8), - St11 = next_state(St7, Res8, {call, undefined, cmd_restart_queue_dirty, [St7]}), - - Res12 = cmd_channel_publish_many(St11, Res4, 117, 4541, 2, true, undefined), - true = postcondition(St11, {call, undefined, cmd_channel_publish_many, [St11, Res4, 117, 4541, 2, true, undefined]}, Res12), - St14 = next_state(St11, Res12, {call, undefined, cmd_channel_publish_many, [St11, Res4, 117, 4541, 2, true, undefined]}), - - Res15 = cmd_restart_vhost_clean(St14), - true = postcondition(St14, {call, undefined, cmd_restart_vhost_clean, [St14]}, Res15), - St15 = next_state(St14, Res15, {call, undefined, cmd_restart_vhost_clean, [St14]}), - - cmd_teardown_queue(St15), - - true. - -%% The following reg_v1_no_del_* cases test when a classic queue has a -%% published message before an upgrade to 3.10. In that case there is -%% no delivery marker in the v1 queue index. - -%% After upgrade to 3.10 there is a published message in the journal file. -%% Consuming and acknowledging the message should work fine. -reg_v1_no_del_jif(Config) -> - try - true = rabbit_ct_broker_helpers:rpc( - Config, 0, ?MODULE, do_reg_v1_no_del_jif, [Config]) - catch exit:{exception, Reason} -> - exit(Reason) - end. - -do_reg_v1_no_del_jif(Config) -> - St0 = #cq{name=prop_classic_queue_v1, version=1, - config=minimal_config(Config)}, - - Res1 = cmd_setup_queue(St0), - St3 = St0#cq{amq=Res1}, - - {St4, Ch} = cmd(cmd_channel_open, St3, []), - - %% Simulate pre-3.10.0 behaviour by making deliver a noop - ok = meck:new(rabbit_queue_index, [passthrough]), - ok = meck:expect(rabbit_queue_index, deliver, fun(_, State) -> State end), - - {St5, _Res5} = cmd(cmd_channel_publish, St4, [Ch, 4, _Persistent = 2, _NotMandatory = false, _NoExpiration = undefined]), - - %% Enforce syncing journal to disk - %% (Not strictly necessary as vhost restart also triggers a sync) - %% At this point there should be a publish entry in the journal and no segment files - rabbit_amqqueue:pid_of(St5#cq.amq) ! timeout, - - {SyncTime, ok} = timer:tc(fun() -> meck:wait(rabbit_queue_index, sync, '_', 1000) end), - ct:pal("wait for sync took ~p ms", [SyncTime div 1000]), - - %% Simulate RabbitMQ version upgrade by a clean vhost restart - %% (also reset delivery to normal operation) - ok = meck:delete(rabbit_queue_index, deliver, 2), - {St10, _} = cmd(cmd_restart_vhost_clean, St5, []), - - meck:reset(rabbit_queue_index), - - %% Consume the message and acknowledge it - %% The queue index should not crash when finding a pub+ack but no_del in the journal - %% (It used to crash in `action_to_entry/3' with a case_clause) - {St6, _Tag} = cmd(cmd_channel_consume, St10, [Ch]), - receive SomeMsg -> self() ! SomeMsg - after 5000 -> ct:fail(no_message_consumed) - end, - {St7, _Msg = #amqp_msg{}} = cmd(cmd_channel_receive_and_ack, St6, [Ch]), - - %% enforce syncing journal to disk - rabbit_amqqueue:pid_of(St7#cq.amq) ! timeout, - - {SyncTime2, ok} = timer:tc(fun() -> meck:wait(rabbit_queue_index, sync, '_', 1000) end), - ct:pal("wait for sync took ~p ms", [SyncTime2 div 1000]), - - validate_and_teaddown(St7). - -%% After upgrade to 3.10 there is a published message in a segment file. -%% Consuming and acknowledging the message inserts an ack entry in the journal file. -%% A subsequent restart (of the queue/vhost/node) should work fine. -reg_v1_no_del_idx(Config) -> - try - true = rabbit_ct_broker_helpers:rpc( - Config, 0, ?MODULE, do_reg_v1_no_del_idx, [Config]) - catch exit:{exception, Reason} -> - exit(Reason) - end. - -do_reg_v1_no_del_idx(Config) -> - St0 = #cq{name=prop_classic_queue_v1, version=1, - config=minimal_config(Config)}, - - Res1 = cmd_setup_queue(St0), - St3 = St0#cq{amq=Res1}, - - {St4, Ch} = cmd(cmd_channel_open, St3, []), - - %% Simulate pre-3.10.0 behaviour by making deliver a noop - ok = meck:new(rabbit_queue_index, [passthrough]), - ok = meck:expect(rabbit_queue_index, deliver, fun(_, State) -> State end), - - ok = meck:new(rabbit_variable_queue, [passthrough]), - - {St5, _Res5} = cmd(cmd_channel_publish, St4, [Ch, 4, _Persistent = 2, _NotMandatory = false, _NoExpiration = undefined]), - - %% Wait for the queue process to get hibernated - %% handle_pre_hibernate syncs and flushes the journal - %% At this point there should be a publish entry in the segment file and an empty journal - {Time, ok} = timer:tc(fun() -> meck:wait(rabbit_variable_queue, handle_pre_hibernate, '_', 10000) end), - ct:pal("wait for hibernate took ~p ms", [Time div 1000]), - ok = meck:unload(rabbit_variable_queue), - - %% Simulate RabbitMQ version upgrade by a clean vhost restart - %% (also reset delivery to normal operation) - ok = meck:delete(rabbit_queue_index, deliver, 2), - {St10, _} = cmd(cmd_restart_vhost_clean, St5, []), - - %% Consume the message and acknowledge it - {St6, _Tag} = cmd(cmd_channel_consume, St10, [Ch]), - receive SomeMsg -> self() ! SomeMsg - after 5000 -> ct:fail(no_message_consumed) - end, - {St7, _Msg = #amqp_msg{}} = cmd(cmd_channel_receive_and_ack, St6, [Ch]), - - meck:reset(rabbit_queue_index), - - %% enforce syncing journal to disk - %% At this point there should be a publish entry in the segment file and an ack in the journal - rabbit_amqqueue:pid_of(St7#cq.amq) ! timeout, - {SyncTime, ok} = timer:tc(fun() -> meck:wait(rabbit_queue_index, sync, '_', 1000) end), - ct:pal("wait for sync took ~p ms", [SyncTime div 1000]), - - meck:reset(rabbit_queue_index), - - %% Another clean vhost restart - %% The queue index should not crash when finding a pub in a - %% segment, an ack in the journal, but no_del - %% (It used to crash in `segment_plus_journal1/2' with a function_clause) - catch cmd(cmd_restart_vhost_clean, St7, []), - - {ReadTime, ok} = timer:tc(fun() -> meck:wait(rabbit_queue_index, read, '_', 1000) end), - ct:pal("wait for queue read took ~p ms", [ReadTime div 1000]), - - validate_and_teaddown(St7). - -%% After upgrade to 3.10 there is a published message in a segment file. -%% Consuming and acknowledging the message inserts an ack entry in the journal file. -%% The recovery after a subsequent unclean shutdown (of the queue/vhost/node) should work fine. -reg_v1_no_del_idx_unclean(Config) -> - try - true = rabbit_ct_broker_helpers:rpc( - Config, 0, ?MODULE, do_reg_v1_no_del_idx_unclean, [Config]) - catch exit:{exception, Reason} -> - exit(Reason) - end. - -do_reg_v1_no_del_idx_unclean(Config) -> - St0 = #cq{name=prop_classic_queue_v1, version=1, - config=minimal_config(Config)}, - - Res1 = cmd_setup_queue(St0), - St3 = St0#cq{amq=Res1}, - - {St4, Ch} = cmd(cmd_channel_open, St3, []), - - %% Simulate pre-3.10.0 behaviour by making deliver a noop - ok = meck:new(rabbit_queue_index, [passthrough]), - ok = meck:expect(rabbit_queue_index, deliver, fun(_, State) -> State end), - - ok = meck:new(rabbit_variable_queue, [passthrough]), - - {St5, _Res5} = cmd(cmd_channel_publish, St4, [Ch, 4, _Persistent = 2, _NotMandatory = false, _NoExpiration = undefined]), - - %% Wait for the queue process to get hibernated - %% handle_pre_hibernate syncs and flushes the journal - %% At this point there should be a publish entry in the segment file and an empty journal - {Time, ok} = timer:tc(fun() -> meck:wait(rabbit_variable_queue, handle_pre_hibernate, '_', 10000) end), - ct:pal("wait for hibernate took ~p ms", [Time div 1000]), - ok = meck:unload(rabbit_variable_queue), - - %% Simulate RabbitMQ version upgrade by a clean vhost restart - %% (also reset delivery to normal operation) - ok = meck:delete(rabbit_queue_index, deliver, 2), - {St10, _} = cmd(cmd_restart_vhost_clean, St5, []), - - %% Consume the message and acknowledge it - {St6, _Tag} = cmd(cmd_channel_consume, St10, [Ch]), - receive SomeMsg -> self() ! SomeMsg - after 5000 -> ct:fail(no_message_consumed) - end, - meck:reset(rabbit_queue_index), - {St7, _Msg = #amqp_msg{}} = cmd(cmd_channel_receive_and_ack, St6, [Ch]), - - %% (need to ensure that the queue processed the ack before triggering the sync) - {AckTime, ok} = timer:tc(fun() -> meck:wait(rabbit_queue_index, ack, '_', 1000) end), - ct:pal("wait for ack took ~p ms", [AckTime div 1000]), - - %% enforce syncing journal to disk - %% At this point there should be a publish entry in the segment file and an ack in the journal - rabbit_amqqueue:pid_of(St7#cq.amq) ! timeout, - {SyncTime, ok} = timer:tc(fun() -> meck:wait(rabbit_queue_index, sync, '_', 1000) end), - ct:pal("wait for sync took ~p ms", [SyncTime div 1000]), - - meck:reset(rabbit_queue_index), - - %% Recovery after unclean queue shutdown - %% The queue index should not crash when finding a pub in a - %% segment, an ack in the journal, but no_del - %% (It used to crash in `journal_minus_segment1/2' with a function_clause) - {St20, _} = cmd(cmd_restart_queue_dirty, St7, []), - - {RecoverTime, ok} = timer:tc(fun() -> meck:wait(rabbit_queue_index, recover, '_', 1000) end), - ct:pal("wait for queue recover took ~p ms", [RecoverTime div 1000]), - - validate_and_teaddown(St20). - cmd(CmdName, StIn, ExtraArgs) -> Res0 = apply(?MODULE, CmdName, [StIn | ExtraArgs]), true = postcondition(StIn, {call, undefined, CmdName, [StIn | ExtraArgs]}, Res0), diff --git a/deps/rabbit/test/cli_forget_cluster_node_SUITE.erl b/deps/rabbit/test/cli_forget_cluster_node_SUITE.erl new file mode 100644 index 000000000000..b088cf68daff --- /dev/null +++ b/deps/rabbit/test/cli_forget_cluster_node_SUITE.erl @@ -0,0 +1,388 @@ +%% This Source Code Form is subject to the terms of the Mozilla Public +%% License, v. 2.0. If a copy of the MPL was not distributed with this +%% file, You can obtain one at https://mozilla.org/MPL/2.0/. +%% +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. +%% + +-module(cli_forget_cluster_node_SUITE). + +-include_lib("common_test/include/ct.hrl"). +-include_lib("amqp_client/include/amqp_client.hrl"). +-include_lib("eunit/include/eunit.hrl"). +-include_lib("rabbitmq_ct_helpers/include/rabbit_assert.hrl"). + +-compile(nowarn_export_all). +-compile(export_all). + +-import(clustering_utils, [ + assert_cluster_status/2, + assert_clustered/1, + assert_not_clustered/1 + ]). + +all() -> + [ + {group, cluster_size_3} + ]. + +groups() -> + [ + {cluster_size_3, [], [ + forget_cluster_node_with_quorum_queues, + forget_cluster_node_with_one_last_quorum_member, + forget_cluster_node_with_all_last_quorum_member, + forget_cluster_node_with_streams, + forget_cluster_node_with_one_last_stream, + forget_cluster_node_with_all_last_streams, + forget_cluster_node_with_quorum_queues_and_streams, + forget_cluster_node_with_one_last_quorum_member_and_streams, + forget_cluster_node_with_one_last_stream_and_quorum_queues + ]} + ]. + +suite() -> + [ + %% If a testcase hangs, no need to wait for 30 minutes. + {timetrap, {minutes, 5}} + ]. + +%% ------------------------------------------------------------------- +%% Testsuite setup/teardown. +%% ------------------------------------------------------------------- + +init_per_suite(Config) -> + rabbit_ct_helpers:log_environment(), + Config1 = rabbit_ct_helpers:merge_app_env( + Config, {rabbit, [ + {mnesia_table_loading_retry_limit, 2}, + {mnesia_table_loading_retry_timeout,1000} + ]}), + rabbit_ct_helpers:run_setup_steps(Config1). + +end_per_suite(Config) -> + rabbit_ct_helpers:run_teardown_steps(Config). + +init_per_group(cluster_size_3, Config) -> + rabbit_ct_helpers:set_config(Config, [{rmq_nodes_count, 3}, + {rmq_nodes_clustered, true}]). + +end_per_group(_, Config) -> + Config. + +init_per_testcase(Testcase, Config) -> + rabbit_ct_helpers:testcase_started(Config, Testcase), + ClusterSize = ?config(rmq_nodes_count, Config), + TestNumber = rabbit_ct_helpers:testcase_number(Config, ?MODULE, Testcase), + Config1 = rabbit_ct_helpers:set_config(Config, [ + {rmq_nodename_suffix, Testcase}, + {tcp_ports_base, {skip_n_nodes, TestNumber * ClusterSize}}, + {keep_pid_file_on_exit, true} + ]), + rabbit_ct_helpers:run_steps(Config1, + rabbit_ct_broker_helpers:setup_steps() ++ + rabbit_ct_client_helpers:setup_steps()). + +end_per_testcase(Testcase, Config) -> + Config1 = rabbit_ct_helpers:run_steps(Config, + rabbit_ct_client_helpers:teardown_steps() ++ + rabbit_ct_broker_helpers:teardown_steps()), + rabbit_ct_helpers:testcase_finished(Config1, Testcase). + +%% ------------------------------------------------------------------- +%% Test cases +%% ------------------------------------------------------------------- +forget_cluster_node_with_quorum_queues(Config) -> + [Rabbit, Hare, Bunny] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename), + + assert_clustered([Rabbit, Hare, Bunny]), + + Ch = rabbit_ct_client_helpers:open_channel(Config, Rabbit), + QQ1 = <<"quorum-queue-1">>, + QQ2 = <<"quorum-queue-2">>, + declare(Ch, QQ1, [{<<"x-queue-type">>, longstr, <<"quorum">>}]), + declare(Ch, QQ2, [{<<"x-queue-type">>, longstr, <<"quorum">>}]), + + ?awaitMatch(Members when length(Members) == 3, get_quorum_members(Rabbit, QQ1), 30000), + ?awaitMatch(Members when length(Members) == 3, get_quorum_members(Rabbit, QQ2), 30000), + + ?assertEqual(ok, rabbit_control_helper:command(stop_app, Bunny)), + %% Leaders are most likely on Rabbit where the queue was declared. + %% But let's wait anyway until a new leader is elected to not have unexpected + %% delete_replica failures. This can easily happen if queues are declared in Bunny + %% as it takes some time to elect a new leader + ?awaitMatch(Members when length(Members) == 2, get_online_members(Rabbit, QQ1), 30000), + ?awaitMatch(Members when length(Members) == 2, get_online_members(Rabbit, QQ2), 30000), + ?awaitMatch(Member when Member =/= '', get_leader(Rabbit, QQ1), 30000), + ?awaitMatch(Member when Member =/= '', get_leader(Rabbit, QQ2), 30000), + + ?assertEqual(ok, forget_cluster_node(Rabbit, Bunny)), + assert_cluster_status({[Rabbit, Hare], [Rabbit, Hare], [Rabbit, Hare]}, + [Rabbit, Hare]), + ?awaitMatch(Members when length(Members) == 2, get_quorum_members(Rabbit, QQ1), 30000), + ?awaitMatch(Members when length(Members) == 2, get_quorum_members(Rabbit, QQ2), 30000). + +forget_cluster_node_with_one_last_quorum_member(Config) -> + [Rabbit, Hare, Bunny] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename), + + assert_clustered([Rabbit, Hare, Bunny]), + + Ch = rabbit_ct_client_helpers:open_channel(Config, Bunny), + QQ1 = <<"quorum-queue-1">>, + QQ2 = <<"quorum-queue-2">>, + declare(Ch, QQ1, [{<<"x-queue-type">>, longstr, <<"quorum">>}, + {<<"x-quorum-initial-group-size">>, long, 1}]), + declare(Ch, QQ2, [{<<"x-queue-type">>, longstr, <<"quorum">>}]), + + ?awaitMatch(Members when length(Members) == 1, get_quorum_members(Rabbit, QQ1), 30000), + ?awaitMatch(Members when length(Members) == 3, get_quorum_members(Rabbit, QQ2), 30000), + + ?assertEqual(ok, rabbit_control_helper:command(stop_app, Bunny)), + %% Leaders are most likely on Bunny where the queue was declared. Let's wait until + %% a new leader is elected to not have unexpected delete_replica failures + ?awaitMatch(Members when length(Members) == 0, get_online_members(Rabbit, QQ1), 30000), + ?awaitMatch(Members when length(Members) == 2, get_online_members(Rabbit, QQ2), 30000), + ?awaitMatch(Member when Member =/= '', get_leader(Rabbit, QQ2), 30000), + + ?assertMatch({error, 69, _}, forget_cluster_node(Rabbit, Bunny)), + assert_cluster_status({[Rabbit, Hare], [Rabbit, Hare], [Rabbit, Hare]}, + [Rabbit, Hare]), + ?awaitMatch(Members when length(Members) == 1, get_quorum_members(Rabbit, QQ1), 30000), + ?awaitMatch(Members when length(Members) == 2, get_quorum_members(Rabbit, QQ2), 30000). + +forget_cluster_node_with_all_last_quorum_member(Config) -> + [Rabbit, Hare, Bunny] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename), + + assert_clustered([Rabbit, Hare, Bunny]), + + Ch = rabbit_ct_client_helpers:open_channel(Config, Bunny), + QQ1 = <<"quorum-queue-1">>, + QQ2 = <<"quorum-queue-2">>, + declare(Ch, QQ1, [{<<"x-queue-type">>, longstr, <<"quorum">>}, + {<<"x-quorum-initial-group-size">>, long, 1}]), + declare(Ch, QQ2, [{<<"x-queue-type">>, longstr, <<"quorum">>}, + {<<"x-quorum-initial-group-size">>, long, 1}]), + + ?awaitMatch(Members when length(Members) == 1, get_quorum_members(Rabbit, QQ1), 30000), + ?awaitMatch(Members when length(Members) == 1, get_quorum_members(Rabbit, QQ2), 30000), + + ?assertEqual(ok, rabbit_control_helper:command(stop_app, Bunny)), + + ?assertMatch({error, 69, _}, forget_cluster_node(Rabbit, Bunny)), + assert_cluster_status({[Rabbit, Hare], [Rabbit, Hare], [Rabbit, Hare]}, + [Rabbit, Hare]), + ?awaitMatch(Members when length(Members) == 1, get_quorum_members(Rabbit, QQ1), 30000), + ?awaitMatch(Members when length(Members) == 1, get_quorum_members(Rabbit, QQ2), 30000). + +forget_cluster_node_with_streams(Config) -> + [Rabbit, Hare, Bunny] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename), + + assert_clustered([Rabbit, Hare, Bunny]), + + Ch = rabbit_ct_client_helpers:open_channel(Config, Rabbit), + S1 = <<"stream-1">>, + S2 = <<"stream-2">>, + declare(Ch, S1, [{<<"x-queue-type">>, longstr, <<"stream">>}]), + declare(Ch, S2, [{<<"x-queue-type">>, longstr, <<"stream">>}]), + + ?awaitMatch(Members when length(Members) == 3, proplists:get_value(members, find_queue_info(Config, Rabbit, S1, [members])), 30000), + ?awaitMatch(Members when length(Members) == 3, proplists:get_value(members, find_queue_info(Config, Rabbit, S2, [members]), 30000), 30000), + + ?assertEqual(ok, rabbit_control_helper:command(stop_app, Bunny)), + + ?assertEqual(ok, forget_cluster_node(Rabbit, Bunny)), + assert_cluster_status({[Rabbit, Hare], [Rabbit, Hare], [Rabbit, Hare]}, + [Rabbit, Hare]), + ?awaitMatch(Members when length(Members) == 2, proplists:get_value(members, find_queue_info(Config, Rabbit, S1, [members]), 30000), 30000), + ?awaitMatch(Members when length(Members) == 2, proplists:get_value(members, find_queue_info(Config, Rabbit, S2, [members]), 30000), 30000). + +forget_cluster_node_with_one_last_stream(Config) -> + [Rabbit, Hare, Bunny] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename), + + assert_clustered([Rabbit, Hare, Bunny]), + + Ch = rabbit_ct_client_helpers:open_channel(Config, Bunny), + S1 = <<"stream-1">>, + S2 = <<"stream-2">>, + declare(Ch, S1, [{<<"x-queue-type">>, longstr, <<"stream">>}, + {<<"x-initial-cluster-size">>, long, 1}]), + declare(Ch, S2, [{<<"x-queue-type">>, longstr, <<"stream">>}]), + + ?awaitMatch(Members when length(Members) == 1, proplists:get_value(members, find_queue_info(Config, Rabbit, S1, [members]), 30000), 30000), + ?awaitMatch(Members when length(Members) == 3, proplists:get_value(members, find_queue_info(Config, Rabbit, S2, [members]), 30000), 30000), + + ?assertEqual(ok, rabbit_control_helper:command(stop_app, Bunny)), + ?assertMatch({error, 69, _}, forget_cluster_node(Rabbit, Bunny)), + assert_cluster_status({[Rabbit, Hare], [Rabbit, Hare], [Rabbit, Hare]}, + [Rabbit, Hare]), + ?awaitMatch(Members when length(Members) == 1, proplists:get_value(members, find_queue_info(Config, Rabbit, S1, [members]), 30000), 30000), + ?awaitMatch(Members when length(Members) == 2, proplists:get_value(members, find_queue_info(Config, Rabbit, S2, [members]), 30000), 30000). + +forget_cluster_node_with_all_last_streams(Config) -> + [Rabbit, Hare, Bunny] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename), + + assert_clustered([Rabbit, Hare, Bunny]), + + Ch = rabbit_ct_client_helpers:open_channel(Config, Bunny), + S1 = <<"stream-1">>, + S2 = <<"stream-2">>, + declare(Ch, S1, [{<<"x-queue-type">>, longstr, <<"stream">>}, + {<<"x-initial-cluster-size">>, long, 1}]), + declare(Ch, S2, [{<<"x-queue-type">>, longstr, <<"stream">>}, + {<<"x-initial-cluster-size">>, long, 1}]), + + ?awaitMatch(Members when length(Members) == 1, proplists:get_value(members, find_queue_info(Config, Rabbit, S1, [members]), 30000), 30000), + ?awaitMatch(Members when length(Members) == 1, proplists:get_value(members, find_queue_info(Config, Rabbit, S2, [members]), 30000), 30000), + + ?assertEqual(ok, rabbit_control_helper:command(stop_app, Bunny)), + ?assertMatch({error, 69, _}, forget_cluster_node(Rabbit, Bunny)), + assert_cluster_status({[Rabbit, Hare], [Rabbit, Hare], [Rabbit, Hare]}, + [Rabbit, Hare]), + ?awaitMatch(Members when length(Members) == 1, proplists:get_value(members, find_queue_info(Config, Rabbit, S1, [members]), 30000), 30000), + ?awaitMatch(Members when length(Members) == 1, proplists:get_value(members, find_queue_info(Config, Rabbit, S2, [members]), 30000), 30000). + +forget_cluster_node_with_quorum_queues_and_streams(Config) -> + [Rabbit, Hare, Bunny] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename), + + assert_clustered([Rabbit, Hare, Bunny]), + + Ch = rabbit_ct_client_helpers:open_channel(Config, Rabbit), + QQ1 = <<"quorum-queue-1">>, + QQ2 = <<"quorum-queue-2">>, + S1 = <<"stream-1">>, + S2 = <<"stream-2">>, + declare(Ch, QQ1, [{<<"x-queue-type">>, longstr, <<"quorum">>}]), + declare(Ch, QQ2, [{<<"x-queue-type">>, longstr, <<"quorum">>}]), + declare(Ch, S1, [{<<"x-queue-type">>, longstr, <<"stream">>}]), + declare(Ch, S2, [{<<"x-queue-type">>, longstr, <<"stream">>}]), + + ?awaitMatch(Members when length(Members) == 3, get_quorum_members(Rabbit, QQ1), 30000), + ?awaitMatch(Members when length(Members) == 3, get_quorum_members(Rabbit, QQ2), 30000), + ?awaitMatch(Members when length(Members) == 3, proplists:get_value(members, find_queue_info(Config, Rabbit, S1, [members])), 30000), + ?awaitMatch(Members when length(Members) == 3, proplists:get_value(members, find_queue_info(Config, Rabbit, S2, [members]), 30000), 30000), + + ?assertEqual(ok, rabbit_control_helper:command(stop_app, Bunny)), + %% Leaders are most likely on Rabbit where the queue was declared. + %% But let's wait anyway until a new leader is elected to not have unexpected + %% delete_replica failures. This can easily happen if queues are declared in Bunny + %% as it takes some time to elect a new leader + ?awaitMatch(Members when length(Members) == 2, get_online_members(Rabbit, QQ1), 30000), + ?awaitMatch(Members when length(Members) == 2, get_online_members(Rabbit, QQ2), 30000), + ?awaitMatch(Member when Member =/= '', get_leader(Rabbit, QQ1), 30000), + ?awaitMatch(Member when Member =/= '', get_leader(Rabbit, QQ2), 30000), + + ?assertEqual(ok, forget_cluster_node(Rabbit, Bunny)), + assert_cluster_status({[Rabbit, Hare], [Rabbit, Hare], [Rabbit, Hare]}, + [Rabbit, Hare]), + ?awaitMatch(Members when length(Members) == 2, get_quorum_members(Rabbit, QQ1), 30000), + ?awaitMatch(Members when length(Members) == 2, get_quorum_members(Rabbit, QQ2), 30000), + ?awaitMatch(Members when length(Members) == 2, proplists:get_value(members, find_queue_info(Config, Rabbit, S1, [members])), 30000), +?awaitMatch(Members when length(Members) == 2, proplists:get_value(members, find_queue_info(Config, Rabbit, S2, [members]), 30000), 30000). + +forget_cluster_node_with_one_last_quorum_member_and_streams(Config) -> + [Rabbit, Hare, Bunny] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename), + + assert_clustered([Rabbit, Hare, Bunny]), + + Ch = rabbit_ct_client_helpers:open_channel(Config, Bunny), + QQ1 = <<"quorum-queue-1">>, + QQ2 = <<"quorum-queue-2">>, + S1 = <<"stream-1">>, + S2 = <<"stream-2">>, + declare(Ch, QQ1, [{<<"x-queue-type">>, longstr, <<"quorum">>}, + {<<"x-quorum-initial-group-size">>, long, 1}]), + declare(Ch, QQ2, [{<<"x-queue-type">>, longstr, <<"quorum">>}]), + declare(Ch, S1, [{<<"x-queue-type">>, longstr, <<"stream">>}]), + declare(Ch, S2, [{<<"x-queue-type">>, longstr, <<"stream">>}]), + + ?awaitMatch(Members when length(Members) == 1, get_quorum_members(Rabbit, QQ1), 30000), + ?awaitMatch(Members when length(Members) == 3, get_quorum_members(Rabbit, QQ2), 30000), + ?awaitMatch(Members when length(Members) == 3, proplists:get_value(members, find_queue_info(Config, Rabbit, S1, [members])), 30000), + ?awaitMatch(Members when length(Members) == 3, proplists:get_value(members, find_queue_info(Config, Rabbit, S2, [members]), 30000), 30000), + + ?assertEqual(ok, rabbit_control_helper:command(stop_app, Bunny)), + %% Leaders are most likely on Bunny where the queue was declared. Let's wait until + %% a new leader is elected to not have unexpected delete_replica failures + ?awaitMatch(Members when length(Members) == 0, get_online_members(Rabbit, QQ1), 30000), + ?awaitMatch(Members when length(Members) == 2, get_online_members(Rabbit, QQ2), 30000), + ?awaitMatch(Member when Member =/= '', get_leader(Rabbit, QQ2), 30000), + + ?assertMatch({error, 69, _}, forget_cluster_node(Rabbit, Bunny)), + assert_cluster_status({[Rabbit, Hare], [Rabbit, Hare], [Rabbit, Hare]}, + [Rabbit, Hare]), + ?awaitMatch(Members when length(Members) == 1, get_quorum_members(Rabbit, QQ1), 30000), + ?awaitMatch(Members when length(Members) == 2, get_quorum_members(Rabbit, QQ2), 30000), + ?awaitMatch(Members when length(Members) == 2, proplists:get_value(members, find_queue_info(Config, Rabbit, S1, [members])), 30000), +?awaitMatch(Members when length(Members) == 2, proplists:get_value(members, find_queue_info(Config, Rabbit, S2, [members]), 30000), 30000). + +forget_cluster_node_with_one_last_stream_and_quorum_queues(Config) -> + [Rabbit, Hare, Bunny] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename), + + assert_clustered([Rabbit, Hare, Bunny]), + + Ch = rabbit_ct_client_helpers:open_channel(Config, Bunny), + S1 = <<"stream-1">>, + S2 = <<"stream-2">>, + QQ1 = <<"quorum-queue-1">>, + QQ2 = <<"quorum-queue-2">>, + declare(Ch, S1, [{<<"x-queue-type">>, longstr, <<"stream">>}, + {<<"x-initial-cluster-size">>, long, 1}]), + declare(Ch, S2, [{<<"x-queue-type">>, longstr, <<"stream">>}]), + declare(Ch, QQ1, [{<<"x-queue-type">>, longstr, <<"quorum">>}]), + declare(Ch, QQ2, [{<<"x-queue-type">>, longstr, <<"quorum">>}]), + + ?awaitMatch(Members when length(Members) == 1, proplists:get_value(members, find_queue_info(Config, Rabbit, S1, [members]), 30000), 30000), + ?awaitMatch(Members when length(Members) == 3, proplists:get_value(members, find_queue_info(Config, Rabbit, S2, [members]), 30000), 30000), + ?awaitMatch(Members when length(Members) == 3, get_quorum_members(Rabbit, QQ1), 30000), + ?awaitMatch(Members when length(Members) == 3, get_quorum_members(Rabbit, QQ2), 30000), + + ?assertEqual(ok, rabbit_control_helper:command(stop_app, Bunny)), + %% Leaders are most likely on Rabbit where the queue was declared. + %% But let's wait anyway until a new leader is elected to not have unexpected + %% delete_replica failures. This can easily happen if queues are declared in Bunny + %% as it takes some time to elect a new leader + ?awaitMatch(Members when length(Members) == 2, get_online_members(Rabbit, QQ1), 30000), + ?awaitMatch(Members when length(Members) == 2, get_online_members(Rabbit, QQ2), 30000), + ?awaitMatch(Member when Member =/= '', get_leader(Rabbit, QQ1), 30000), + ?awaitMatch(Member when Member =/= '', get_leader(Rabbit, QQ2), 30000), + + ?assertMatch({error, 69, _}, forget_cluster_node(Rabbit, Bunny)), + assert_cluster_status({[Rabbit, Hare], [Rabbit, Hare], [Rabbit, Hare]}, + [Rabbit, Hare]), + ?awaitMatch(Members when length(Members) == 1, proplists:get_value(members, find_queue_info(Config, Rabbit, S1, [members]), 30000), 30000), + ?awaitMatch(Members when length(Members) == 2, proplists:get_value(members, find_queue_info(Config, Rabbit, S2, [members]), 30000), 30000), + ?awaitMatch(Members when length(Members) == 2, get_quorum_members(Rabbit, QQ1), 30000), + ?awaitMatch(Members when length(Members) == 2, get_quorum_members(Rabbit, QQ2), 30000). + +forget_cluster_node(Node, Removee) -> + rabbit_control_helper:command(forget_cluster_node, Node, [atom_to_list(Removee)], + []). + +get_quorum_members(Server, Q) -> + Info = rpc:call(Server, rabbit_quorum_queue, infos, [rabbit_misc:r(<<"/">>, queue, Q)]), + proplists:get_value(members, Info). + +get_stream_members(Server, Q) -> + Info = rpc:call(Server, rabbit_stream_queue, info, [rabbit_misc:r(<<"/">>, queue, Q), all_keys]), + proplists:get_value(members, Info). + +get_online_members(Server, Q) -> + Info = rpc:call(Server, rabbit_quorum_queue, infos, [rabbit_misc:r(<<"/">>, queue, Q)]), + proplists:get_value(online, Info). + +get_leader(Server, Q) -> + Info = rpc:call(Server, rabbit_quorum_queue, infos, [rabbit_misc:r(<<"/">>, queue, Q)]), + proplists:get_value(leader, Info). + +declare(Ch, Q, Args) -> + amqp_channel:call(Ch, #'queue.declare'{queue = Q, + durable = true, + auto_delete = false, + arguments = Args}). + +find_queue_info(Config, Node, Name, Keys) -> + QName = rabbit_misc:r(<<"/">>, queue, Name), + Infos = rabbit_ct_broker_helpers:rpc(Config, Node, rabbit_amqqueue, info_all, + [<<"/">>, [name] ++ Keys]), + [Info] = [Props || Props <- Infos, lists:member({name, QName}, Props)], + Info. diff --git a/deps/rabbit/test/cluster_SUITE.erl b/deps/rabbit/test/cluster_SUITE.erl index ff72656692c0..dcd4e19f43dc 100644 --- a/deps/rabbit/test/cluster_SUITE.erl +++ b/deps/rabbit/test/cluster_SUITE.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2011-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(cluster_SUITE). @@ -93,6 +93,16 @@ end_per_group(Group, Config) -> Config end. +init_per_testcase(queue_cleanup = Testcase, Config) -> + case lists:any(fun(B) -> B end, + rabbit_ct_broker_helpers:rpc_all( + Config, rabbit_feature_flags, is_enabled, + [khepri_db])) of + true -> + {skip, "Invalid testcase using Khepri. All queues are durable"}; + false -> + rabbit_ct_helpers:testcase_started(Config, Testcase) + end; init_per_testcase(Testcase, Config) -> rabbit_ct_helpers:testcase_started(Config, Testcase). diff --git a/deps/rabbit/test/cluster_limit_SUITE.erl b/deps/rabbit/test/cluster_limit_SUITE.erl new file mode 100644 index 000000000000..c8aa31614587 --- /dev/null +++ b/deps/rabbit/test/cluster_limit_SUITE.erl @@ -0,0 +1,149 @@ +%% This Source Code Form is subject to the terms of the Mozilla Public +%% License, v. 2.0. If a copy of the MPL was not distributed with this +%% file, You can obtain one at https://mozilla.org/MPL/2.0/. +%% +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. + + +-module(cluster_limit_SUITE). + +-include_lib("common_test/include/ct.hrl"). +-include_lib("eunit/include/eunit.hrl"). +-include_lib("amqp_client/include/amqp_client.hrl"). +-compile([nowarn_export_all, export_all]). + + +all() -> + [ + {group, clustered} + ]. + +groups() -> + [ + {clustered, [], + [ + {size_2, [], [queue_limit]} + ]} + ]. + +%% ------------------------------------------------------------------- +%% Testsuite setup/teardown. +%% ------------------------------------------------------------------- + +init_per_suite(Config0) -> + rabbit_ct_helpers:log_environment(), + Config1 = rabbit_ct_helpers:merge_app_env( + Config0, {rabbit, [{quorum_tick_interval, 1000}, + {cluster_queue_limit, 3}]}), + rabbit_ct_helpers:run_setup_steps(Config1, []). + +end_per_suite(Config) -> + rabbit_ct_helpers:run_teardown_steps(Config). +init_per_group(clustered, Config) -> + rabbit_ct_helpers:set_config(Config, [{rmq_nodes_clustered, true}]); +init_per_group(Group, Config) -> + ClusterSize = case Group of + size_2 -> 2 + end, + IsMixed = rabbit_ct_helpers:is_mixed_versions(), + case ClusterSize of + 2 when IsMixed -> + {skip, "cluster size 2 isn't mixed versions compatible"}; + _ -> + Config1 = rabbit_ct_helpers:set_config(Config, + [{rmq_nodes_count, ClusterSize}, + {rmq_nodename_suffix, Group}, + {tcp_ports_base}]), + Config1b = rabbit_ct_helpers:set_config(Config1, [{net_ticktime, 10}]), + rabbit_ct_helpers:run_steps(Config1b, + [fun merge_app_env/1 ] ++ + rabbit_ct_broker_helpers:setup_steps()) + end. + +end_per_group(clustered, Config) -> + Config; +end_per_group(_, Config) -> + rabbit_ct_helpers:run_steps(Config, + rabbit_ct_broker_helpers:teardown_steps()). + +init_per_testcase(Testcase, Config) -> + Config1 = rabbit_ct_helpers:testcase_started(Config, Testcase), + rabbit_ct_broker_helpers:rpc(Config, 0, ?MODULE, delete_queues, []), + Q = rabbit_data_coercion:to_binary(Testcase), + Config2 = rabbit_ct_helpers:set_config(Config1, + [{queue_name, Q}, + {alt_queue_name, <>}, + {alt_2_queue_name, <>}, + {over_limit_queue_name, <>} + ]), + rabbit_ct_helpers:run_steps(Config2, rabbit_ct_client_helpers:setup_steps()). + +merge_app_env(Config) -> + rabbit_ct_helpers:merge_app_env( + rabbit_ct_helpers:merge_app_env(Config, + {rabbit, [{core_metrics_gc_interval, 100}]}), + {ra, [{min_wal_roll_over_interval, 30000}]}). + +end_per_testcase(Testcase, Config) -> + Config1 = rabbit_ct_helpers:run_steps( + Config, + rabbit_ct_client_helpers:teardown_steps()), + rabbit_ct_helpers:testcase_finished(Config1, Testcase). + + +%% ------------------------------------------------------------------- +%% Testcases. +%% ------------------------------------------------------------------- + +queue_limit(Config) -> + [Server0, Server1] = + rabbit_ct_broker_helpers:get_node_configs(Config, nodename), + Ch = rabbit_ct_client_helpers:open_channel(Config, Server0), + Ch2 = rabbit_ct_client_helpers:open_channel(Config, Server1), + Q1 = ?config(queue_name, Config), + ?assertEqual({'queue.declare_ok', Q1, 0, 0}, + declare(Ch, Q1)), + + Q2 = ?config(alt_queue_name, Config), + ?assertEqual({'queue.declare_ok', Q2, 0, 0}, + declare(Ch, Q2)), + + Q3 = ?config(alt_2_queue_name, Config), + ?assertEqual({'queue.declare_ok', Q3, 0, 0}, + declare(Ch, Q3)), + Q4 = ?config(over_limit_queue_name, Config), + ExpectedError = list_to_binary(io_lib:format("PRECONDITION_FAILED - cannot declare queue '~s': queue limit in cluster (3) is reached", [Q4])), + ?assertExit( + {{shutdown, {server_initiated_close, 406, ExpectedError}}, _}, + declare(Ch, Q4)), + + %% Trying the second server, in the cluster, but no queues on it, + %% but should still fail as the limit is cluster wide. + ?assertExit( + {{shutdown, {server_initiated_close, 406, ExpectedError}}, _}, + declare(Ch2, Q4)), + + %Trying other types of queues + ChQQ = rabbit_ct_client_helpers:open_channel(Config, Server0), + ChStream = rabbit_ct_client_helpers:open_channel(Config, Server1), + ?assertExit( + {{shutdown, {server_initiated_close, 406, ExpectedError}}, _}, + declare(ChQQ, Q4, [{<<"x-queue-type">>, longstr, <<"quorum">>}])), + ?assertExit( + {{shutdown, {server_initiated_close, 406, ExpectedError}}, _}, + declare(ChStream, Q4, [{<<"x-queue-type">>, longstr, <<"stream">>}])), + rabbit_ct_broker_helpers:rpc(Config, 0, ?MODULE, delete_queues, []), + ok. + +declare(Ch, Q) -> + declare(Ch, Q, []). + +declare(Ch, Q, Args) -> + amqp_channel:call(Ch, #'queue.declare'{queue = Q, + durable = true, + auto_delete = false, + arguments = Args}). + +delete_queues() -> + [rabbit_amqqueue:delete(Q, false, false, <<"dummy">>) + || Q <- rabbit_amqqueue:list()]. diff --git a/deps/rabbit/test/cluster_minority_SUITE.erl b/deps/rabbit/test/cluster_minority_SUITE.erl new file mode 100644 index 000000000000..a6a8f4759ba4 --- /dev/null +++ b/deps/rabbit/test/cluster_minority_SUITE.erl @@ -0,0 +1,336 @@ +%% This Source Code Form is subject to the terms of the Mozilla Public +%% License, v. 2.0. If a copy of the MPL was not distributed with this +%% file, You can obtain one at https://mozilla.org/MPL/2.0/. +%% +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. +%% + +-module(cluster_minority_SUITE). + +-include_lib("amqp_client/include/amqp_client.hrl"). +-include_lib("eunit/include/eunit.hrl"). + +-compile([export_all, nowarn_export_all]). + +all() -> + [ + {group, client_operations}, + {group, cluster_operation_add}, + {group, cluster_operation_remove} + ]. + +groups() -> + [ + {client_operations, [], [open_connection, + open_channel, + declare_exchange, + delete_exchange, + declare_binding, + delete_binding, + declare_queue, + publish_to_exchange, + publish_and_consume_to_local_classic_queue, + consume_from_queue, + add_vhost, + update_vhost, + delete_vhost, + add_user, + update_user, + delete_user, + set_policy, + delete_policy, + export_definitions + ]}, + {cluster_operation_add, [], [add_node]}, + {cluster_operation_remove, [], [remove_node]}, + {feature_flags, [], [enable_feature_flag]} + ]. + +suite() -> + [ + %% If a testcase hangs, no need to wait for 30 minutes. + {timetrap, {minutes, 5}} + ]. + +%% ------------------------------------------------------------------- +%% Testsuite setup/teardown. +%% ------------------------------------------------------------------- + +init_per_suite(Config) -> + rabbit_ct_helpers:log_environment(), + case rabbit_ct_broker_helpers:configured_metadata_store(Config) of + mnesia -> + %% This SUITE is meant to test how Khepri behaves in a minority, + %% so mnesia should be skipped. + {skip, "Minority testing not supported by mnesia"}; + _ -> + rabbit_ct_helpers:run_setup_steps( + Config, + [ + fun rabbit_ct_broker_helpers:configure_dist_proxy/1 + ]) + end. + +end_per_suite(Config) -> + rabbit_ct_helpers:run_teardown_steps(Config). + +init_per_group(Group, Config0) when Group == client_operations; + Group == feature_flags -> + Config = rabbit_ct_helpers:set_config(Config0, [{rmq_nodes_count, 5}, + {rmq_nodename_suffix, Group}, + {tcp_ports_base}, + {net_ticktime, 5}]), + Config1 = rabbit_ct_helpers:run_steps(Config, + rabbit_ct_broker_helpers:setup_steps() ++ + rabbit_ct_client_helpers:setup_steps()), + case Config1 of + {skip, _} -> + Config1; + _ -> + %% Before partitioning the cluster, create resources that can be used in + %% the test cases. They're needed for delete and consume operations, which can list + %% them but fail to operate anything else. + %% + %% To be used in delete_policy + ok = rabbit_ct_broker_helpers:set_policy(Config1, 0, <<"policy-to-delete">>, <<".*">>, <<"queues">>, [{<<"max-age">>, <<"1Y">>}]), + Ch = rabbit_ct_client_helpers:open_channel(Config1, 0), + %% To be used in consume_from_queue + #'queue.declare_ok'{} = amqp_channel:call(Ch, #'queue.declare'{queue = <<"test-queue">>, + arguments = [{<<"x-queue-type">>, longstr, <<"classic">>}]}), + %% To be used in delete_binding + #'exchange.bind_ok'{} = amqp_channel:call(Ch, #'exchange.bind'{destination = <<"amq.fanout">>, + source = <<"amq.direct">>, + routing_key = <<"binding-to-be-deleted">>}), + %% To be used in delete_exchange + #'exchange.declare_ok'{} = amqp_channel:call(Ch, #'exchange.declare'{exchange = <<"exchange-to-be-deleted">>}), + + %% Lower the default Khepri command timeout. By default this is set + %% to 30s in `rabbit_khepri:setup/1' which makes the cases in this + %% group run unnecessarily slow. + [ok = rabbit_ct_broker_helpers:rpc( + Config1, N, + application, set_env, + [khepri, default_timeout, 100]) || N <- lists:seq(0, 4)], + + %% Create partition + partition_5_node_cluster(Config1), + Config1 + end; +init_per_group(Group, Config0) -> + Config = rabbit_ct_helpers:set_config(Config0, [{rmq_nodes_count, 5}, + {rmq_nodename_suffix, Group}, + {rmq_nodes_clustered, false}, + {tcp_ports_base}, + {net_ticktime, 5}]), + rabbit_ct_helpers:run_steps(Config, + rabbit_ct_broker_helpers:setup_steps() ++ + rabbit_ct_client_helpers:setup_steps()). + +end_per_group(_, Config) -> + rabbit_ct_helpers:run_steps(Config, + rabbit_ct_client_helpers:teardown_steps() ++ + rabbit_ct_broker_helpers:teardown_steps()). + +init_per_testcase(Testcase, Config) -> + rabbit_ct_helpers:testcase_started(Config, Testcase). + +end_per_testcase(Testcase, Config) -> + rabbit_ct_helpers:testcase_finished(Config, Testcase). + +%% ------------------------------------------------------------------- +%% Test cases +%% ------------------------------------------------------------------- +open_connection(Config) -> + [A, B | _] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename), + ConnA = rabbit_ct_client_helpers:open_unmanaged_connection(Config, A, <<"/">>), + ConnB = rabbit_ct_client_helpers:open_unmanaged_connection(Config, B, <<"/">>), + rabbit_ct_client_helpers:close_connection(ConnA), + rabbit_ct_client_helpers:close_connection(ConnB). + +open_channel(Config) -> + [A, B | _] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename), + ChA = rabbit_ct_client_helpers:open_channel(Config, A), + ChB = rabbit_ct_client_helpers:open_channel(Config, B), + rabbit_ct_client_helpers:close_channel(ChA), + rabbit_ct_client_helpers:close_channel(ChB). + +declare_exchange(Config) -> + [A | _] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename), + {_, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, A), + ?assertExit({{shutdown, {connection_closing, {server_initiated_close, 541, _}}}, _}, + amqp_channel:call(Ch, #'exchange.declare'{exchange = <<"test-exchange">>})). + +delete_exchange(Config) -> + [A | _] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename), + {_, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, A), + ?assertExit({{shutdown, {connection_closing, {server_initiated_close, 541, _}}}, _}, + amqp_channel:call(Ch, #'exchange.delete'{exchange = <<"exchange-to-be-deleted">>})). + +declare_binding(Config) -> + [A | _] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename), + {_, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, A), + ?assertExit({{shutdown, {connection_closing, {server_initiated_close, 541, _}}}, _}, + amqp_channel:call(Ch, #'exchange.bind'{destination = <<"amq.fanout">>, + source = <<"amq.direct">>, + routing_key = <<"key">>})). + +delete_binding(Config) -> + [A | _] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename), + {_, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, A), + ?assertExit({{shutdown, {connection_closing, {server_initiated_close, 541, _}}}, _}, + amqp_channel:call(Ch, #'exchange.unbind'{destination = <<"amq.fanout">>, + source = <<"amq.direct">>, + routing_key = <<"binding-to-be-deleted">>})). + +declare_queue(Config) -> + [A | _] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename), + {_, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, A), + ?assertExit({{shutdown, {connection_closing, {server_initiated_close, 541, _}}}, _}, + amqp_channel:call(Ch, #'queue.declare'{queue = <<"test-queue-2">>})). + +publish_to_exchange(Config) -> + [A | _] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename), + {_, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, A), + ?assertEqual(ok, amqp_channel:call(Ch, #'basic.publish'{routing_key = <<"test-queue-2">>}, + #amqp_msg{payload = <<"msg">>})). + +publish_and_consume_to_local_classic_queue(Config) -> + [A | _] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename), + {_, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, A), + ?assertEqual(ok, amqp_channel:call(Ch, #'basic.publish'{routing_key = <<"test-queue">>}, + #amqp_msg{payload = <<"msg">>})), + ?assertMatch({#'basic.get_ok'{}, _}, + amqp_channel:call(Ch, #'basic.get'{queue = <<"test-queue">>})). + +consume_from_queue(Config) -> + [A | _] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename), + {_, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, A), + ?assertMatch(#'basic.consume_ok'{}, + amqp_channel:call(Ch, #'basic.consume'{queue = <<"test-queue">>})). + +add_vhost(Config) -> + ?assertMatch({error, timeout}, + rabbit_ct_broker_helpers:add_vhost(Config, <<"vhost1">>)). + +update_vhost(Config) -> + ?assertThrow({error, timeout}, + rabbit_ct_broker_helpers:rpc(Config, 0, rabbit_vhost, update_tags, + [<<"/">>, [carrots], <<"user">>])). + +delete_vhost(Config) -> + ?assertMatch({'EXIT', _}, rabbit_ct_broker_helpers:delete_vhost(Config, <<"vhost1">>)). + +add_user(Config) -> + ?assertMatch({error, timeout}, + rabbit_ct_broker_helpers:add_user(Config, <<"user1">>)). + +update_user(Config) -> + ?assertMatch({error, timeout}, + rabbit_ct_broker_helpers:set_user_tags(Config, 0, <<"user1">>, [<<"admin">>])). + +delete_user(Config) -> + ?assertMatch({error, timeout}, + rabbit_ct_broker_helpers:delete_user(Config, <<"user1">>)). + +set_policy(Config) -> + ?assertError(_, rabbit_ct_broker_helpers:set_policy(Config, 0, <<"max-age-policy">>, <<".*">>, <<"queues">>, [{<<"max-age">>, <<"1Y">>}])). + +delete_policy(Config) -> + ?assertError(_, rabbit_ct_broker_helpers:clear_policy(Config, 0, <<"policy-to-delete">>)). + +add_node(Config) -> + [A, B, C, D, _E] = rabbit_ct_broker_helpers:get_node_configs( + Config, nodename), + + %% Three node cluster: A, B, C + ok = rabbit_control_helper:command(stop_app, B), + ok = rabbit_control_helper:command(join_cluster, B, [atom_to_list(A)], []), + rabbit_control_helper:command(start_app, B), + + ok = rabbit_control_helper:command(stop_app, C), + ok = rabbit_control_helper:command(join_cluster, C, [atom_to_list(A)], []), + rabbit_control_helper:command(start_app, C), + + %% Minority partition: A + Cluster = [A, B, C], + partition_3_node_cluster(Config), + + ok = rabbit_control_helper:command(stop_app, D), + %% The command is appended to the log, but it will be dropped once the connectivity + %% is restored + ?assertMatch(ok, + rabbit_control_helper:command(join_cluster, D, [atom_to_list(A)], [])), + timer:sleep(10000), + join_3_node_cluster(Config), + clustering_utils:assert_cluster_status({Cluster, Cluster}, Cluster). + +remove_node(Config) -> + [A, B, C | _] = rabbit_ct_broker_helpers:get_node_configs( + Config, nodename), + + %% Three node cluster: A, B, C + ok = rabbit_control_helper:command(stop_app, B), + ok = rabbit_control_helper:command(join_cluster, B, [atom_to_list(A)], []), + rabbit_control_helper:command(start_app, B), + + ok = rabbit_control_helper:command(stop_app, C), + ok = rabbit_control_helper:command(join_cluster, C, [atom_to_list(A)], []), + rabbit_control_helper:command(start_app, C), + + %% Minority partition: A + partition_3_node_cluster(Config), + Cluster = [A, B, C], + + ok = rabbit_control_helper:command(forget_cluster_node, A, [atom_to_list(B)], []), + timer:sleep(10000), + join_3_node_cluster(Config), + clustering_utils:assert_cluster_status({Cluster, Cluster}, Cluster). + +enable_feature_flag(Config) -> + [A | _] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename), + ?assertMatch({error, missing_clustered_nodes}, rabbit_ct_broker_helpers:rpc(Config, A, rabbit_feature_flags, enable, [khepri_db])). + +export_definitions(Config) -> + Definitions = rabbit_ct_broker_helpers:rpc( + Config, 0, + rabbit_definitions, all_definitions, []), + ?assert(is_map(Definitions)). + +%% ------------------------------------------------------------------- +%% Internal helpers. +%% ------------------------------------------------------------------- + +partition_3_node_cluster(Config) -> + [A, B, C | _] = rabbit_ct_broker_helpers:get_node_configs( + Config, nodename), + Cluster = [A, B, C], + clustering_utils:assert_cluster_status({Cluster, Cluster}, Cluster), + NodePairs = [{A, B}, + {A, C}], + [rabbit_ct_broker_helpers:block_traffic_between(X, Y) || {X, Y} <- NodePairs], + %% Wait for the network partition to happen + clustering_utils:assert_cluster_status({Cluster, [B, C]}, [B, C]). + +partition_5_node_cluster(Config) -> + [A, B, C, D, E] = All = rabbit_ct_broker_helpers:get_node_configs( + Config, nodename), + %% Wait for the cluster to be ready + clustering_utils:assert_cluster_status({All, All}, All), + %% Minority partition A, B + NodePairs = [{A, C}, + {A, D}, + {A, E}, + {B, C}, + {B, D}, + {B, E}], + [rabbit_ct_broker_helpers:block_traffic_between(X, Y) || {X, Y} <- NodePairs], + %% Wait for the network partition to happen + clustering_utils:assert_cluster_status({All, [C, D, E]}, [C, D, E]). + +join_3_node_cluster(Config)-> + [A, B, C | _] = rabbit_ct_broker_helpers:get_node_configs( + Config, nodename), + %% Minority partition A + NodePairs = [{A, B}, + {A, C}], + [rabbit_ct_broker_helpers:allow_traffic_between(X, Y) || {X, Y} <- NodePairs]. diff --git a/deps/rabbit/test/cluster_rename_SUITE.erl b/deps/rabbit/test/cluster_rename_SUITE.erl deleted file mode 100644 index b9b3af7f79b4..000000000000 --- a/deps/rabbit/test/cluster_rename_SUITE.erl +++ /dev/null @@ -1,301 +0,0 @@ -%% This Source Code Form is subject to the terms of the Mozilla Public -%% License, v. 2.0. If a copy of the MPL was not distributed with this -%% file, You can obtain one at https://mozilla.org/MPL/2.0/. -%% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. -%% - --module(cluster_rename_SUITE). - --include_lib("common_test/include/ct.hrl"). --include_lib("amqp_client/include/amqp_client.hrl"). - --compile(export_all). - -all() -> - [ - {group, cluster_size_2}, - {group, cluster_size_3} - ]. - -groups() -> - [ - {cluster_size_2, [], [ - % XXX post_change_nodename, - abortive_rename, - rename_fail, - rename_twice_fail - ]}, - {cluster_size_3, [], [ - rename_cluster_one_by_one, - rename_cluster_big_bang, - partial_one_by_one, - partial_big_bang - ]} - ]. - -suite() -> - [ - %% If a test hangs, no need to wait for 30 minutes. - {timetrap, {minutes, 15}} - ]. - -%% ------------------------------------------------------------------- -%% Testsuite setup/teardown. -%% ------------------------------------------------------------------- - -init_per_suite(Config) -> - rabbit_ct_helpers:log_environment(), - rabbit_ct_helpers:run_setup_steps(Config). - -end_per_suite(Config) -> - rabbit_ct_helpers:run_teardown_steps(Config). - -init_per_group(cluster_size_2, Config) -> - rabbit_ct_helpers:set_config(Config, [ - {rmq_nodes_count, 2} %% Replaced with a list of node names later. - ]); -init_per_group(cluster_size_3, Config) -> - rabbit_ct_helpers:set_config(Config, [ - {rmq_nodes_count, 3} %% Replaced with a list of node names later. - ]). - -end_per_group(_, Config) -> - Config. - -init_per_testcase(Testcase, Config) -> - rabbit_ct_helpers:testcase_started(Config, Testcase), - ClusterSize = ?config(rmq_nodes_count, Config), - Nodenames = [ - list_to_atom(rabbit_misc:format("~ts-~b", [Testcase, I])) - || I <- lists:seq(1, ClusterSize) - ], - Config1 = rabbit_ct_helpers:set_config(Config, [ - {rmq_nodes_count, Nodenames}, - {rmq_nodes_clustered, true} - ]), - rabbit_ct_helpers:run_steps(Config1, - rabbit_ct_broker_helpers:setup_steps() ++ - rabbit_ct_client_helpers:setup_steps()). - -end_per_testcase(Testcase, Config) -> - Config1 = case rabbit_ct_helpers:get_config(Config, save_config) of - undefined -> Config; - C -> C - end, - Config2 = rabbit_ct_helpers:run_steps(Config1, - rabbit_ct_client_helpers:teardown_steps() ++ - rabbit_ct_broker_helpers:teardown_steps()), - rabbit_ct_helpers:testcase_finished(Config2, Testcase). - -%% ------------------------------------------------------------------- -%% Testcases. -%% ------------------------------------------------------------------- - -%% Rolling rename of a cluster, each node should do a secondary rename. -rename_cluster_one_by_one(Config) -> - [Node1, Node2, Node3] = rabbit_ct_broker_helpers:get_node_configs( - Config, nodename), - publish_all(Config, - [{Node1, <<"1">>}, {Node2, <<"2">>}, {Node3, <<"3">>}]), - - Config1 = stop_rename_start(Config, Node1, [Node1, jessica]), - Config2 = stop_rename_start(Config1, Node2, [Node2, hazel]), - Config3 = stop_rename_start(Config2, Node3, [Node3, flopsy]), - - [Jessica, Hazel, Flopsy] = rabbit_ct_broker_helpers:get_node_configs( - Config3, nodename), - consume_all(Config3, - [{Jessica, <<"1">>}, {Hazel, <<"2">>}, {Flopsy, <<"3">>}]), - {save_config, Config3}. - -%% Big bang rename of a cluster, Node1 should do a primary rename. -rename_cluster_big_bang(Config) -> - [Node1, Node2, Node3] = rabbit_ct_broker_helpers:get_node_configs(Config, - nodename), - publish_all(Config, - [{Node1, <<"1">>}, {Node2, <<"2">>}, {Node3, <<"3">>}]), - - ok = rabbit_ct_broker_helpers:stop_node(Config, Node3), - ok = rabbit_ct_broker_helpers:stop_node(Config, Node2), - ok = rabbit_ct_broker_helpers:stop_node(Config, Node1), - - Map = [Node1, jessica, Node2, hazel, Node3, flopsy], - Config1 = rename_node(Config, Node1, Map), - Config2 = rename_node(Config1, Node2, Map), - Config3 = rename_node(Config2, Node3, Map), - - [Jessica, Hazel, Flopsy] = rabbit_ct_broker_helpers:get_node_configs( - Config3, nodename), - ok = rabbit_ct_broker_helpers:start_node(Config3, Jessica), - ok = rabbit_ct_broker_helpers:start_node(Config3, Hazel), - ok = rabbit_ct_broker_helpers:start_node(Config3, Flopsy), - - consume_all(Config3, - [{Jessica, <<"1">>}, {Hazel, <<"2">>}, {Flopsy, <<"3">>}]), - {save_config, Config3}. - -%% Here we test that Node1 copes with things being renamed around it. -partial_one_by_one(Config) -> - [Node1, Node2, Node3] = rabbit_ct_broker_helpers:get_node_configs(Config, - nodename), - publish_all(Config, - [{Node1, <<"1">>}, {Node2, <<"2">>}, {Node3, <<"3">>}]), - - Config1 = stop_rename_start(Config, Node1, [Node1, jessica]), - Config2 = stop_rename_start(Config1, Node2, [Node2, hazel]), - - [Jessica, Hazel, Node3] = rabbit_ct_broker_helpers:get_node_configs( - Config2, nodename), - consume_all(Config2, - [{Jessica, <<"1">>}, {Hazel, <<"2">>}, {Node3, <<"3">>}]), - {save_config, Config2}. - -%% Here we test that Node1 copes with things being renamed around it. -partial_big_bang(Config) -> - [Node1, Node2, Node3] = rabbit_ct_broker_helpers:get_node_configs(Config, - nodename), - publish_all(Config, - [{Node1, <<"1">>}, {Node2, <<"2">>}, {Node3, <<"3">>}]), - - ok = rabbit_ct_broker_helpers:stop_node(Config, Node3), - ok = rabbit_ct_broker_helpers:stop_node(Config, Node2), - ok = rabbit_ct_broker_helpers:stop_node(Config, Node1), - - Map = [Node2, hazel, Node3, flopsy], - Config1 = rename_node(Config, Node2, Map), - Config2 = rename_node(Config1, Node3, Map), - - [Node1, Hazel, Flopsy] = rabbit_ct_broker_helpers:get_node_configs(Config2, - nodename), - ok = rabbit_ct_broker_helpers:start_node(Config2, Node1), - ok = rabbit_ct_broker_helpers:start_node(Config2, Hazel), - ok = rabbit_ct_broker_helpers:start_node(Config2, Flopsy), - - consume_all(Config2, - [{Node1, <<"1">>}, {Hazel, <<"2">>}, {Flopsy, <<"3">>}]), - {save_config, Config2}. - -% XXX %% We should be able to specify the -n parameter on ctl with either -% XXX %% the before or after name for the local node (since in real cases -% XXX %% one might want to invoke the command before or after the hostname -% XXX %% has changed) - usually we test before so here we test after. -% XXX post_change_nodename([Node1, _Bigwig]) -> -% XXX publish(Node1, <<"Node1">>), -% XXX -% XXX Bugs1 = rabbit_test_configs:stop_node(Node1), -% XXX Bugs2 = [{nodename, jessica} | proplists:delete(nodename, Bugs1)], -% XXX Jessica0 = rename_node(Bugs2, jessica, [Node1, jessica]), -% XXX Jessica = rabbit_test_configs:start_node(Jessica0), -% XXX -% XXX consume(Jessica, <<"Node1">>), -% XXX stop_all([Jessica]), -% XXX ok. - -%% If we invoke rename but the node name does not actually change, we -%% should roll back. -abortive_rename(Config) -> - Node1 = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename), - publish(Config, Node1, <<"Node1">>), - - ok = rabbit_ct_broker_helpers:stop_node(Config, Node1), - _Config1 = rename_node(Config, Node1, [Node1, jessica]), - ok = rabbit_ct_broker_helpers:start_node(Config, Node1), - - consume(Config, Node1, <<"Node1">>), - ok. - -%% And test some ways the command can fail. -rename_fail(Config) -> - [Node1, Node2] = rabbit_ct_broker_helpers:get_node_configs(Config, - nodename), - ok = rabbit_ct_broker_helpers:stop_node(Config, Node1), - %% Rename from a node that does not exist - ok = rename_node_fail(Config, Node1, [bugzilla, jessica]), - %% Rename to a node which does - ok = rename_node_fail(Config, Node1, [Node1, Node2]), - %% Rename two nodes to the same thing - ok = rename_node_fail(Config, Node1, [Node1, jessica, Node2, jessica]), - %% Rename while impersonating a node not in the cluster - Config1 = rabbit_ct_broker_helpers:set_node_config(Config, Node1, - {nodename, 'rabbit@localhost'}), - ok = rename_node_fail(Config1, Node1, [Node1, jessica]), - ok. - -rename_twice_fail(Config) -> - Node1 = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename), - ok = rabbit_ct_broker_helpers:stop_node(Config, Node1), - Config1 = rename_node(Config, Node1, [Node1, indecisive]), - ok = rename_node_fail(Config, Node1, [indecisive, jessica]), - {save_config, Config1}. - -%% ---------------------------------------------------------------------------- - -stop_rename_start(Config, Nodename, Map) -> - ok = rabbit_ct_broker_helpers:stop_node(Config, Nodename), - Config1 = rename_node(Config, Nodename, Map), - ok = rabbit_ct_broker_helpers:start_node(Config1, Nodename), - Config1. - -rename_node(Config, Nodename, Map) -> - {ok, Config1} = do_rename_node(Config, Nodename, Map), - Config1. - -rename_node_fail(Config, Nodename, Map) -> - {error, _, _} = do_rename_node(Config, Nodename, Map), - ok. - -do_rename_node(Config, Nodename, Map) -> - Map1 = [ - begin - NStr = atom_to_list(N), - case lists:member($@, NStr) of - true -> N; - false -> rabbit_nodes:make({NStr, "localhost"}) - end - end - || N <- Map - ], - Ret = rabbit_ct_broker_helpers:rabbitmqctl(Config, Nodename, - ["rename_cluster_node" | Map1], 120000), - case Ret of - {ok, _} -> - Config1 = update_config_after_rename(Config, Map1), - {ok, Config1}; - {error, _, _} = Error -> - Error - end. - -update_config_after_rename(Config, [Old, New | Rest]) -> - Config1 = rabbit_ct_broker_helpers:set_node_config(Config, Old, - {nodename, New}), - update_config_after_rename(Config1, Rest); -update_config_after_rename(Config, []) -> - Config. - -publish(Config, Node, Q) -> - Ch = rabbit_ct_client_helpers:open_channel(Config, Node), - amqp_channel:call(Ch, #'confirm.select'{}), - amqp_channel:call(Ch, #'queue.declare'{queue = Q, durable = true}), - amqp_channel:cast(Ch, #'basic.publish'{routing_key = Q}, - #amqp_msg{props = #'P_basic'{delivery_mode = 2}, - payload = Q}), - amqp_channel:wait_for_confirms(Ch), - rabbit_ct_client_helpers:close_channels_and_connection(Config, Node). - -consume(Config, Node, Q) -> - Ch = rabbit_ct_client_helpers:open_channel(Config, Node), - amqp_channel:call(Ch, #'queue.declare'{queue = Q, durable = true}), - {#'basic.get_ok'{}, #amqp_msg{payload = Q}} = - amqp_channel:call(Ch, #'basic.get'{queue = Q}), - rabbit_ct_client_helpers:close_channels_and_connection(Config, Node). - - -publish_all(Config, Nodes) -> - [publish(Config, Node, Key) || {Node, Key} <- Nodes]. - -consume_all(Config, Nodes) -> - [consume(Config, Node, Key) || {Node, Key} <- Nodes]. - -set_node(Nodename, Cfg) -> - [{nodename, Nodename} | proplists:delete(nodename, Cfg)]. diff --git a/deps/rabbit/test/cluster_upgrade_SUITE.erl b/deps/rabbit/test/cluster_upgrade_SUITE.erl new file mode 100644 index 000000000000..2b78f119c904 --- /dev/null +++ b/deps/rabbit/test/cluster_upgrade_SUITE.erl @@ -0,0 +1,158 @@ +%% This Source Code Form is subject to the terms of the Mozilla Public +%% License, v. 2.0. If a copy of the MPL was not distributed with this +%% file, You can obtain one at https://mozilla.org/MPL/2.0/. +%% +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. +%% + +-module(cluster_upgrade_SUITE). + +-include_lib("eunit/include/eunit.hrl"). +-include_lib("amqp_client/include/amqp_client.hrl"). +-include_lib("common_test/include/ct.hrl"). + +-compile([export_all, nowarn_export_all]). + +all() -> + [ + {group, all_tests} + ]. + +groups() -> + [ + {all_tests, [], all_tests()} + ]. + +all_tests() -> + [ + queue_upgrade + ]. + +%% ------------------------------------------------------------------- +%% Test suite setup/teardown. +%% ------------------------------------------------------------------- + +init_per_suite(Config) -> + case rabbit_ct_helpers:is_mixed_versions() of + true -> + rabbit_ct_helpers:log_environment(), + rabbit_ct_helpers:run_setup_steps(Config); + false -> + {skip, "cluster upgrade tests must be run in mixed versions " + "testing only"} + end. + +end_per_suite(Config) -> + rabbit_ct_helpers:run_teardown_steps(Config). + +init_per_group(_Group, Config) -> + Config. + +end_per_group(_Group, _Config) -> + ok. + +init_per_testcase(Testcase, Config) -> + Config1 = rabbit_ct_helpers:set_config(Config, [ + {rmq_nodename_suffix, Testcase}, + {rmq_nodes_count, 3}, + {force_secondary_umbrella, true} + ]), + Config2 = rabbit_ct_helpers:run_steps(Config1, + rabbit_ct_broker_helpers:setup_steps() ++ + rabbit_ct_client_helpers:setup_steps()), + rabbit_ct_helpers:testcase_started(Config2, Testcase). + +end_per_testcase(Testcase, Config) -> + Config1 = rabbit_ct_helpers:run_steps(Config, + rabbit_ct_client_helpers:teardown_steps() ++ + rabbit_ct_broker_helpers:teardown_steps()), + rabbit_ct_helpers:testcase_finished(Config1, Testcase). + +%% --------------------------------------------------------------------------- +%% Test Cases +%% --------------------------------------------------------------------------- + +queue_upgrade(Config) -> + ok = print_cluster_versions(Config), + + %% Declare some resources before upgrading. + {Conn, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config), + ClassicQName = <<"classic-q">>, + QQName = <<"quorum-q">>, + StreamQName = <<"stream-q">>, + declare(Ch, ClassicQName, [{<<"x-queue-type">>, longstr, <<"classic">>}]), + declare(Ch, QQName, [{<<"x-queue-type">>, longstr, <<"quorum">>}]), + declare(Ch, StreamQName, [{<<"x-queue-type">>, longstr, <<"stream">>}]), + [begin + #'queue.bind_ok'{} = amqp_channel:call( + Ch, + #'queue.bind'{queue = Name, + exchange = <<"amq.fanout">>, + routing_key = Name}) + end || Name <- [ClassicQName, QQName, StreamQName]], + Msgs = [<<"msg">>, <<"msg">>, <<"msg">>], + publish_confirm(Ch, <<"amq.fanout">>, <<>>, Msgs), + ok = rabbit_ct_client_helpers:close_connection_and_channel(Conn, Ch), + + %% Restart the servers + Config1 = upgrade_cluster(Config), + ok = print_cluster_versions(Config1), + + %% Check that the resources are still there + queue_utils:wait_for_messages(Config, [[ClassicQName, <<"3">>, <<"3">>, <<"0">>], + [QQName, <<"3">>, <<"3">>, <<"0">>], + [StreamQName, <<"3">>, <<"3">>, <<"0">>]]), + + ok. + +%% ---------------------------------------------------------------------------- +%% Internal utils +%% ---------------------------------------------------------------------------- + +declare(Ch, Q, Args) -> + #'queue.declare_ok'{} = amqp_channel:call( + Ch, #'queue.declare'{queue = Q, + durable = true, + auto_delete = false, + arguments = Args}). + +publish(Ch, X, RK, Msg) -> + ok = amqp_channel:cast(Ch, + #'basic.publish'{exchange = X, + routing_key = RK}, + #amqp_msg{props = #'P_basic'{delivery_mode = 2}, + payload = Msg}). + +publish_confirm(Ch, X, RK, Msgs) -> + #'confirm.select_ok'{} = amqp_channel:call(Ch, #'confirm.select'{}), + amqp_channel:register_confirm_handler(Ch, self()), + [publish(Ch, X, RK, Msg) || Msg <- Msgs], + amqp_channel:wait_for_confirms(Ch, 5). + +cluster_members(Config) -> + rabbit_ct_broker_helpers:get_node_configs(Config, nodename). + +upgrade_cluster(Config) -> + Cluster = cluster_members(Config), + ct:pal(?LOW_IMPORTANCE, "Stopping cluster ~p", [Cluster]), + [ok = rabbit_ct_broker_helpers:stop_node(Config, N) + || N <- Cluster], + ct:pal(?LOW_IMPORTANCE, "Restarting cluster ~p", [Cluster]), + Config1 = rabbit_ct_helpers:set_config( + Config, {force_secondary_umbrella, false}), + [ok = rabbit_ct_broker_helpers:async_start_node(Config1, N) + || N <- Cluster], + [ok = rabbit_ct_broker_helpers:wait_for_async_start_node(N) + || N <- Cluster], + Config1. + +print_cluster_versions(Config) -> + Cluster = cluster_members(Config), + Versions = [begin + Version = rabbit_ct_broker_helpers:rpc( + Config, N, + rabbit, product_version, []), + {N, Version} + end || N <- Cluster], + ct:pal("Cluster versions: ~p", [Versions]), + ok. diff --git a/deps/rabbit/test/clustering_events_SUITE.erl b/deps/rabbit/test/clustering_events_SUITE.erl new file mode 100644 index 000000000000..a12c0b5af42f --- /dev/null +++ b/deps/rabbit/test/clustering_events_SUITE.erl @@ -0,0 +1,117 @@ +%% This Source Code Form is subject to the terms of the Mozilla Public +%% License, v. 2.0. If a copy of the MPL was not distributed with this +%% file, You can obtain one at https://mozilla.org/MPL/2.0/. +%% +%% Copyright (c) 2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. +%% + +-module(clustering_events_SUITE). + +-include_lib("common_test/include/ct.hrl"). +-include_lib("eunit/include/eunit.hrl"). +-include_lib("amqp_client/include/amqp_client.hrl"). +-include_lib("rabbitmq_ct_helpers/include/rabbit_assert.hrl"). + +-import(rabbit_ct_helpers, [eventually/3]). +-import(event_recorder, + [assert_event_type/2, + assert_event_prop/2]). + +-compile(export_all). + +all() -> + [ + {group, tests} + ]. + +groups() -> + [ + {tests, [], [ + node_added_event, + node_deleted_event + ]} + ]. + +%% ------------------------------------------------------------------- +%% Per Suite +%% ------------------------------------------------------------------- + +init_per_suite(Config) -> + rabbit_ct_helpers:log_environment(), + rabbit_ct_helpers:run_setup_steps(Config). + +end_per_suite(Config) -> + rabbit_ct_helpers:run_teardown_steps(Config). + +%% +%% Per Group +%% + +init_per_group(_, Config) -> + Config. + +end_per_group(_, Config) -> + Config. + +%% +%% Per Test Case +%% +init_per_testcase(node_added_event = TestCase, Config) -> + Config1 = configure_cluster_essentials(Config, TestCase, false), + Config2 = rabbit_ct_helpers:run_steps(Config1, + rabbit_ct_broker_helpers:setup_steps() ++ + rabbit_ct_client_helpers:setup_steps()), + rabbit_ct_helpers:testcase_started(Config2, TestCase); +init_per_testcase(node_deleted_event = TestCase, Config) -> + Config1 = configure_cluster_essentials(Config, TestCase, true), + Config2 = rabbit_ct_helpers:run_steps(Config1, + rabbit_ct_broker_helpers:setup_steps() ++ + rabbit_ct_client_helpers:setup_steps()), + rabbit_ct_helpers:testcase_started(Config2, TestCase). + +end_per_testcase(TestCase, Config) -> + Config1 = rabbit_ct_helpers:run_steps(Config, + rabbit_ct_client_helpers:teardown_steps() ++ + rabbit_ct_broker_helpers:teardown_steps()), + rabbit_ct_helpers:testcase_finished(Config1, TestCase). + +%% +%% Helpers +%% +configure_cluster_essentials(Config, Group, Clustered) -> + rabbit_ct_helpers:set_config(Config, [ + {rmq_nodename_suffix, Group}, + {rmq_nodes_count, 3}, + {rmq_nodes_clustered, Clustered} + ]). + +node_added_event(Config) -> + [Server1, Server2, _Server3] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename), + ok = event_recorder:start(Config), + join_cluster(Server2, Server1), + E = event_recorder:get_events(Config), + ok = event_recorder:stop(Config), + ?assert(lists:any(fun(#event{type = node_added}) -> + true; + (_) -> + false + end, E)). + +node_deleted_event(Config) -> + [Server1, Server2, _Server3] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename), + ok = event_recorder:start(Config), + ok = rabbit_ct_broker_helpers:stop_node(Config, Server2), + ok = rabbit_control_helper:command(forget_cluster_node, Server1, [atom_to_list(Server2)], + []), + E = event_recorder:get_events(Config), + ok = event_recorder:stop(Config), + ?assert(lists:any(fun(#event{type = node_deleted}) -> + true; + (_) -> + false + end, E)). + +join_cluster(Node, Cluster) -> + ok = rabbit_control_helper:command(stop_app, Node), + ok = rabbit_control_helper:command(join_cluster, Node, [atom_to_list(Cluster)], []), + rabbit_control_helper:command(start_app, Node). diff --git a/deps/rabbit/test/clustering_management_SUITE.erl b/deps/rabbit/test/clustering_management_SUITE.erl index 44b94aeea139..9f72008c34a9 100644 --- a/deps/rabbit/test/clustering_management_SUITE.erl +++ b/deps/rabbit/test/clustering_management_SUITE.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(clustering_management_SUITE). @@ -10,10 +10,11 @@ -include_lib("common_test/include/ct.hrl"). -include_lib("amqp_client/include/amqp_client.hrl"). -include_lib("eunit/include/eunit.hrl"). - +-compile(nowarn_export_all). -compile(export_all). -import(clustering_utils, [ + assert_status/2, assert_cluster_status/2, assert_clustered/1, assert_not_clustered/1 @@ -21,43 +22,89 @@ all() -> [ - {group, unclustered_2_nodes}, - {group, unclustered_3_nodes}, - {group, clustered_2_nodes} + {group, mnesia_store}, + {group, khepri_store} ]. groups() -> - [ - {unclustered_2_nodes, [], [ - {cluster_size_2, [], [ - classic_config_discovery_node_list - ]} - ]}, - {unclustered_3_nodes, [], [ - {cluster_size_3, [], [ - join_and_part_cluster, - join_cluster_bad_operations, - join_to_start_interval, - forget_cluster_node, - change_cluster_node_type, - change_cluster_when_node_offline, - update_cluster_nodes, - force_reset_node - ]} - ]}, - {clustered_2_nodes, [], [ - {cluster_size_2, [], [ - forget_removes_things, - reset_removes_things, - forget_offline_removes_things, - force_boot, - status_with_alarm, - pid_file_and_await_node_startup, - await_running_count, - start_with_invalid_schema_in_path, - persistent_cluster_id - ]} - ]} + [{mnesia_store, [], [ + {unclustered_2_nodes, [], + [ + {cluster_size_2, [], [ + classic_config_discovery_node_list + ]} + ]}, + {unclustered_3_nodes, [], + [ + {cluster_size_3, [], [ + join_and_part_cluster, + join_cluster_bad_operations, + join_to_start_interval, + forget_cluster_node, + change_cluster_node_type, + change_cluster_when_node_offline + ]} + ]}, + {clustered_2_nodes, [], + [ + {cluster_size_2, [], [ + forget_removes_things, + reset_removes_things, + forget_offline_removes_things, + forget_unavailable_node, + force_boot, + status_with_alarm, + pid_file_and_await_node_startup, + await_running_count, + start_with_invalid_schema_in_path, + persistent_cluster_id, + reset_last_disc_node + ]} + ]} + ]}, + {khepri_store, [], [ + {clustered_2_nodes, [], + [ + {cluster_size_2, [], [ + change_cluster_node_type_in_khepri, + forget_node_in_khepri, + forget_removes_things_in_khepri, + reset_in_khepri, + reset_removes_things_in_khepri, + reset_in_minority, + force_boot_in_khepri, + status_with_alarm, + pid_file_and_await_node_startup_in_khepri, + await_running_count_in_khepri, + start_with_invalid_schema_in_path, + persistent_cluster_id, + stop_start_cluster_node, + restart_cluster_node, + unsupported_forget_cluster_node_offline + + ]} + ]}, + {clustered_3_nodes, [], + [{cluster_size_3, [], [ + forget_unavailable_node, + forget_unavailable_node_in_minority + ]}]}, + {unclustered_3_nodes, [], + [ + {cluster_size_3, [], [ + join_and_part_cluster_in_khepri, + join_cluster_bad_operations_in_khepri, + join_cluster_in_minority, + join_cluster_with_rabbit_stopped, + force_reset_node_in_khepri, + join_to_start_interval, + forget_cluster_node_in_khepri, + start_nodes_in_reverse_order, + start_nodes_in_stop_order_in_khepri, + start_nodes_in_stop_order_with_force_boot + ]} + ]} + ]} ]. suite() -> @@ -82,12 +129,28 @@ init_per_suite(Config) -> end_per_suite(Config) -> rabbit_ct_helpers:run_teardown_steps(Config). +init_per_group(khepri_store, Config) -> + case rabbit_ct_broker_helpers:configured_metadata_store(Config) of + mnesia -> + {skip, "These tests target Khepri"}; + _ -> + Config + end; +init_per_group(mnesia_store, Config) -> + case rabbit_ct_broker_helpers:configured_metadata_store(Config) of + {khepri, _} -> + {skip, "These tests target mnesia"}; + _ -> + Config + end; init_per_group(unclustered_2_nodes, Config) -> rabbit_ct_helpers:set_config(Config, [{rmq_nodes_clustered, false}]); init_per_group(unclustered_3_nodes, Config) -> rabbit_ct_helpers:set_config(Config, [{rmq_nodes_clustered, false}]); init_per_group(clustered_2_nodes, Config) -> rabbit_ct_helpers:set_config(Config, [{rmq_nodes_clustered, true}]); +init_per_group(clustered_3_nodes, Config) -> + rabbit_ct_helpers:set_config(Config, [{rmq_nodes_clustered, true}]); init_per_group(clustered_4_nodes, Config) -> rabbit_ct_helpers:set_config(Config, [{rmq_nodes_clustered, true}]); init_per_group(cluster_size_2, Config) -> @@ -100,7 +163,17 @@ init_per_group(cluster_size_4, Config) -> end_per_group(_, Config) -> Config. +init_per_testcase(create_bad_schema = Testcase, Config) -> + case rabbit_ct_broker_helpers:configured_metadata_store(Config) of + mnesia -> + init_per_testcase0(Testcase, Config); + _ -> + {skip, "Mnesia operations not supported by Khepri"} + end; init_per_testcase(Testcase, Config) -> + init_per_testcase0(Testcase, Config). + +init_per_testcase0(Testcase, Config) -> rabbit_ct_helpers:testcase_started(Config, Testcase), ClusterSize = ?config(rmq_nodes_count, Config), TestNumber = rabbit_ct_helpers:testcase_number(Config, ?MODULE, Testcase), @@ -125,13 +198,13 @@ end_per_testcase(Testcase, Config) -> start_with_invalid_schema_in_path(Config) -> [Rabbit, Hare] = cluster_members(Config), - stop_app(Rabbit), - stop_app(Hare), + stop_app(Config, Rabbit), + stop_app(Config, Hare), create_bad_schema(Rabbit, Hare, Config), - start_app(Hare), - case start_app(Rabbit) of + spawn(fun() -> start_app(Config, Hare) end), + case start_app(Config, Rabbit) of ok -> ok; ErrRabbit -> error({unable_to_start_with_bad_schema_in_work_dir, ErrRabbit}) end. @@ -182,28 +255,115 @@ join_and_part_cluster(Config) -> assert_not_clustered(Hare), assert_not_clustered(Bunny), - stop_join_start(Rabbit, Bunny), + stop_join_start(Config, Rabbit, Bunny), assert_clustered([Rabbit, Bunny]), - stop_join_start(Hare, Bunny, true), + stop_join_start(Config, Hare, Bunny, true), assert_cluster_status( {[Bunny, Hare, Rabbit], [Bunny, Rabbit], [Bunny, Hare, Rabbit]}, [Rabbit, Hare, Bunny]), %% Allow clustering with already clustered node - ok = stop_app(Rabbit), - {ok, <<"The node is already a member of this cluster">>} = - join_cluster(Rabbit, Hare), - ok = start_app(Rabbit), + ok = stop_app(Config, Rabbit), + ok = join_cluster(Config, Rabbit, Hare), + ok = start_app(Config, Rabbit), - stop_reset_start(Rabbit), + stop_reset_start(Config, Rabbit), assert_not_clustered(Rabbit), assert_cluster_status({[Bunny, Hare], [Bunny], [Bunny, Hare]}, [Hare, Bunny]), - stop_reset_start(Hare), + stop_reset_start(Config, Hare), + assert_not_clustered(Hare), + assert_not_clustered(Bunny), + + %% Using `join_cluster` is allowed without stopping `rabbit` first since + %% 3.13.0. It will only work if all nodes support it: check if they all + %% expose `rabbit_ff_controller:is_running/0`. + Supported = lists:all( + fun(R) -> R end, + rabbit_ct_broker_helpers:rpc_all( + Config, erlang, function_exported, + [rabbit_ff_controller, is_running, 0])), + ct:pal( + "Do all nodes support `join_cluster` without stopping `rabbit` " + "first? ~p", + [Supported]), + case Supported of + true -> + ?assertEqual(ok, join_cluster(Config, Rabbit, Bunny)), + assert_clustered([Rabbit, Bunny]), + + ?assertEqual(ok, join_cluster(Config, Hare, Bunny)), + assert_clustered([Rabbit, Bunny, Hare]); + false -> + ok + end. + +stop_start_cluster_node(Config) -> + [Rabbit, Hare] = cluster_members(Config), + + assert_clustered([Rabbit, Hare]), + + ok = stop_app(Config, Rabbit), + ok = start_app(Config, Rabbit), + + assert_clustered([Rabbit, Hare]), + + ok = stop_app(Config, Hare), + ok = start_app(Config, Hare), + + assert_clustered([Rabbit, Hare]). + +restart_cluster_node(Config) -> + [Rabbit, Hare] = cluster_members(Config), + + assert_clustered([Rabbit, Hare]), + + ok = rabbit_ct_broker_helpers:stop_node(Config, Hare), + ok = rabbit_ct_broker_helpers:start_node(Config, Hare), + + assert_clustered([Rabbit, Hare]), + + ok = rabbit_ct_broker_helpers:stop_node(Config, Rabbit), + ok = rabbit_ct_broker_helpers:start_node(Config, Rabbit), + + assert_clustered([Rabbit, Hare]). + +join_and_part_cluster_in_khepri(Config) -> + [Rabbit, Hare, Bunny] = cluster_members(Config), + assert_not_clustered(Rabbit), + assert_not_clustered(Hare), + assert_not_clustered(Bunny), + + stop_join_start(Config, Rabbit, Bunny), + assert_clustered([Rabbit, Bunny]), + + stop_join_start(Config, Hare, Bunny), + assert_clustered([Rabbit, Bunny, Hare]), + + %% Allow clustering with already clustered node + ok = stop_app(Config, Rabbit), + ?assertEqual(ok, join_cluster(Config, Rabbit, Hare)), + ok = start_app(Config, Rabbit), + + assert_clustered([Rabbit, Bunny, Hare]), + + stop_reset_start(Config, Bunny), + assert_not_clustered(Bunny), + assert_clustered([Hare, Rabbit]), + + stop_reset_start(Config, Rabbit), + assert_not_clustered(Rabbit), assert_not_clustered(Hare), - assert_not_clustered(Bunny). + + %% Using `join_cluster` is allowed without stopping `rabbit` first since + %% 3.13.0. + ?assertEqual(ok, join_cluster(Config, Rabbit, Bunny)), + assert_clustered([Rabbit, Bunny]), + + ?assertEqual(ok, join_cluster(Config, Hare, Bunny)), + assert_clustered([Rabbit, Bunny, Hare]). join_cluster_bad_operations(Config) -> [Rabbit, Hare, Bunny] = cluster_members(Config), @@ -214,162 +374,418 @@ join_cluster_bad_operations(Config) -> [rabbit_prelaunch, get_context, 0]), %% Nonexistent node - ok = stop_app(Rabbit), - assert_failure(fun () -> join_cluster(Rabbit, non@existent) end), - ok = start_app(Rabbit), - assert_not_clustered(Rabbit), - - %% Trying to cluster with mnesia running - assert_failure(fun () -> join_cluster(Rabbit, Bunny) end), + ok = stop_app(Config, Rabbit), + assert_failure(fun () -> join_cluster(Config, Rabbit, non@existent) end), + ok = start_app(Config, Rabbit), assert_not_clustered(Rabbit), %% Trying to cluster the node with itself - ok = stop_app(Rabbit), - assert_failure(fun () -> join_cluster(Rabbit, Rabbit) end), - ok = start_app(Rabbit), + ok = stop_app(Config, Rabbit), + assert_failure(fun () -> join_cluster(Config, Rabbit, Rabbit) end), + ok = start_app(Config, Rabbit), assert_not_clustered(Rabbit), %% Do not let the node leave the cluster or reset if it's the only %% ram node - stop_join_start(Hare, Rabbit, true), + stop_join_start(Config, Hare, Rabbit, true), assert_cluster_status({[Rabbit, Hare], [Rabbit], [Rabbit, Hare]}, [Rabbit, Hare]), - ok = stop_app(Hare), - assert_failure(fun () -> join_cluster(Rabbit, Bunny) end), - assert_failure(fun () -> reset(Rabbit) end), - ok = start_app(Hare), + ok = stop_app(Config, Hare), + assert_failure(fun () -> join_cluster(Config, Rabbit, Bunny) end), + assert_failure(fun () -> reset(Config, Rabbit) end), + ok = start_app(Config, Hare), assert_cluster_status({[Rabbit, Hare], [Rabbit], [Rabbit, Hare]}, [Rabbit, Hare]), %% Cannot start RAM-only node first - ok = stop_app(Rabbit), - ok = stop_app(Hare), - assert_failure(fun () -> start_app(Hare) end), - ok = start_app(Rabbit), + ok = stop_app(Config, Rabbit), + ok = stop_app(Config, Hare), + assert_failure(fun () -> start_app(Config, Hare) end), + ok = start_app(Config, Rabbit), case UsePrelaunch of true -> - ok = start_app(Hare); + ok = start_app(Config, Hare); false -> %% The Erlang VM has stopped after previous rabbit app failure ok = rabbit_ct_broker_helpers:start_node(Config, Hare) end, ok. +join_cluster_bad_operations_in_khepri(Config) -> + [Rabbit, _Hare, _Bunny] = cluster_members(Config), + + %% Nonexistent node + ok = stop_app(Config, Rabbit), + assert_failure(fun () -> join_cluster(Config, Rabbit, non@existent) end), + ok = start_app(Config, Rabbit), + assert_not_clustered(Rabbit), + + %% Trying to cluster the node with itself + ok = stop_app(Config, Rabbit), + assert_failure(fun () -> join_cluster(Config, Rabbit, Rabbit) end), + ok = start_app(Config, Rabbit), + assert_not_clustered(Rabbit), + + ok. + %% This tests that the nodes in the cluster are notified immediately of a node %% join, and not just after the app is started. join_to_start_interval(Config) -> [Rabbit, Hare, _Bunny] = cluster_members(Config), - ok = stop_app(Rabbit), - ok = join_cluster(Rabbit, Hare), + ok = stop_app(Config, Rabbit), + ok = join_cluster(Config, Rabbit, Hare), assert_cluster_status({[Rabbit, Hare], [Rabbit, Hare], [Hare]}, [Rabbit, Hare]), - ok = start_app(Rabbit), + ok = start_app(Config, Rabbit), assert_clustered([Rabbit, Hare]). +join_cluster_in_minority(Config) -> + [Rabbit, Hare, Bunny] = cluster_members(Config), + assert_not_clustered(Rabbit), + assert_not_clustered(Hare), + assert_not_clustered(Bunny), + + stop_join_start(Config, Bunny, Rabbit), + assert_clustered([Rabbit, Bunny]), + ok = rabbit_ct_broker_helpers:stop_node(Config, Bunny), + + ok = stop_app(Config, Hare), + ?assertEqual(ok, join_cluster(Config, Hare, Rabbit, false)), + + ok = rabbit_ct_broker_helpers:start_node(Config, Bunny), + ?assertEqual(ok, join_cluster(Config, Hare, Rabbit, false)), + ?assertEqual(ok, start_app(Config, Hare)), + + assert_clustered([Rabbit, Bunny, Hare]). + +join_cluster_with_rabbit_stopped(Config) -> + [Rabbit, Hare, Bunny] = cluster_members(Config), + assert_not_clustered(Rabbit), + assert_not_clustered(Hare), + assert_not_clustered(Bunny), + + stop_join_start(Config, Bunny, Rabbit), + assert_clustered([Rabbit, Bunny]), + ok = stop_app(Config, Bunny), + + ok = stop_app(Config, Hare), + ?assertEqual(ok, join_cluster(Config, Hare, Rabbit, false)), + + ok = start_app(Config, Bunny), + ?assertEqual(ok, join_cluster(Config, Hare, Rabbit, false)), + ?assertEqual(ok, start_app(Config, Hare)), + + assert_clustered([Rabbit, Bunny, Hare]). + forget_cluster_node(Config) -> [Rabbit, Hare, Bunny] = cluster_members(Config), %% Trying to remove a node not in the cluster should fail - assert_failure(fun () -> forget_cluster_node(Hare, Rabbit) end), + assert_failure(fun () -> forget_cluster_node(Config, Hare, Rabbit) end), - stop_join_start(Rabbit, Hare), + stop_join_start(Config, Rabbit, Hare), assert_clustered([Rabbit, Hare]), %% Trying to remove an online node should fail - assert_failure(fun () -> forget_cluster_node(Hare, Rabbit) end), + assert_failure(fun () -> forget_cluster_node(Config, Hare, Rabbit) end), - ok = stop_app(Rabbit), + ok = stop_app(Config, Rabbit), %% We're passing the --offline flag, but Hare is online - assert_failure(fun () -> forget_cluster_node(Hare, Rabbit, true) end), + assert_failure(fun () -> forget_cluster_node(Config, Hare, Rabbit, true) end), %% Removing some nonexistent node will fail - assert_failure(fun () -> forget_cluster_node(Hare, non@existent) end), - ok = forget_cluster_node(Hare, Rabbit), + assert_failure(fun () -> forget_cluster_node(Config, Hare, non@existent) end), + ok = forget_cluster_node(Config, Hare, Rabbit), assert_not_clustered(Hare), assert_cluster_status({[Rabbit, Hare], [Rabbit, Hare], [Hare]}, [Rabbit]), %% Now we can't start Rabbit since it thinks that it's still in the cluster %% with Hare, while Hare disagrees. - assert_failure(fun () -> start_app(Rabbit) end), + assert_failure(fun () -> start_app(Config, Rabbit) end), - ok = reset(Rabbit), - ok = start_app(Rabbit), + ok = reset(Config, Rabbit), + ok = start_app(Config, Rabbit), assert_not_clustered(Rabbit), %% Now we remove Rabbit from an offline node. - stop_join_start(Bunny, Hare), - stop_join_start(Rabbit, Hare), + stop_join_start(Config, Bunny, Hare), + stop_join_start(Config, Rabbit, Hare), assert_clustered([Rabbit, Hare, Bunny]), - ok = stop_app(Hare), - ok = stop_app(Rabbit), - ok = stop_app(Bunny), + ok = stop_app(Config, Hare), + ok = stop_app(Config, Rabbit), + ok = stop_app(Config, Bunny), %% This is fine but we need the flag - assert_failure(fun () -> forget_cluster_node(Hare, Bunny) end), + assert_failure(fun () -> forget_cluster_node(Config, Hare, Bunny) end), %% Also fails because hare node is still running - assert_failure(fun () -> forget_cluster_node(Hare, Bunny, true) end), + assert_failure(fun () -> forget_cluster_node(Config, Hare, Bunny, true) end), %% But this works ok = rabbit_ct_broker_helpers:stop_node(Config, Hare), - {ok, _} = rabbit_ct_broker_helpers:rabbitmqctl(Config, Hare, - ["forget_cluster_node", "--offline", Bunny]), + ok = forget_cluster_node(Config, Hare, Bunny, true), ok = rabbit_ct_broker_helpers:start_node(Config, Hare), - ok = start_app(Rabbit), + ok = start_app(Config, Rabbit), %% Bunny still thinks its clustered with Rabbit and Hare - assert_failure(fun () -> start_app(Bunny) end), - ok = reset(Bunny), - ok = start_app(Bunny), + assert_failure(fun () -> start_app(Config, Bunny) end), + ok = reset(Config, Bunny), + ok = start_app(Config, Bunny), assert_not_clustered(Bunny), assert_clustered([Rabbit, Hare]). +forget_cluster_node_in_khepri(Config) -> + [Rabbit, Hare, _Bunny] = cluster_members(Config), + + %% Trying to remove a node not in the cluster should fail + assert_failure(fun () -> forget_cluster_node(Config, Hare, Rabbit) end), + + stop_join_start(Config, Rabbit, Hare), + assert_clustered([Rabbit, Hare]), + + %% Trying to remove an online node should fail + assert_failure(fun () -> forget_cluster_node(Config, Hare, Rabbit) end), + + ok = stop_app(Config, Rabbit), + %% Removing some nonexistent node will fail + assert_failure(fun () -> forget_cluster_node(Config, Hare, non@existent) end), + ok = forget_cluster_node(Config, Hare, Rabbit), + assert_not_clustered(Hare), + + ok = start_app(Config, Rabbit), + assert_not_clustered(Rabbit), + assert_not_clustered(Hare). + +unsupported_forget_cluster_node_offline(Config) -> + [Rabbit, Hare] = cluster_members(Config), + ok = rabbit_ct_broker_helpers:stop_node(Config, Hare), + ok = stop_app(Config, Rabbit), + Ret0 = rabbit_ct_broker_helpers:rabbitmqctl(Config, Hare, + ["forget_cluster_node", "--offline", Rabbit]), + is_not_supported(Ret0). + forget_removes_things(Config) -> - test_removes_things(Config, fun (R, H) -> ok = forget_cluster_node(H, R) end). + test_removes_things(Config, fun (R, H) -> ok = forget_cluster_node(Config, H, R) end). reset_removes_things(Config) -> - test_removes_things(Config, fun (R, _H) -> ok = reset(R) end). + test_removes_things(Config, fun (R, _H) -> ok = reset(Config, R) end). test_removes_things(Config, LoseRabbit) -> - Unmirrored = <<"unmirrored-queue">>, - [Rabbit, Hare] = cluster_members(Config), + Classic = <<"classic-queue">>, + [Rabbit, Hare | _] = cluster_members(Config), RCh = rabbit_ct_client_helpers:open_channel(Config, Rabbit), - declare(RCh, Unmirrored), - ok = stop_app(Rabbit), + declare(RCh, Classic), + ok = stop_app(Config, Rabbit), HCh = rabbit_ct_client_helpers:open_channel(Config, Hare), {'EXIT',{{shutdown,{server_initiated_close,404,_}}, _}} = - (catch declare(HCh, Unmirrored)), + (catch declare(HCh, Classic)), ok = LoseRabbit(Rabbit, Hare), HCh2 = rabbit_ct_client_helpers:open_channel(Config, Hare), - declare(HCh2, Unmirrored), + declare(HCh2, Classic), + ok. + +forget_node_in_khepri(Config) -> + [Rabbit, Hare] = cluster_members(Config), + + assert_cluster_status({[Rabbit, Hare], [Rabbit, Hare], [Rabbit, Hare]}, + [Rabbit, Hare]), + + ok = stop_app(Config, Rabbit), + ok = forget_cluster_node(Config, Hare, Rabbit), + + assert_cluster_status({[Hare], [Hare]}, [Hare]), + + ok. + +forget_removes_things_in_khepri(Config) -> + ClassicQueue = <<"classic-queue">>, + [Rabbit, Hare | _] = cluster_members(Config), + + RCh = rabbit_ct_client_helpers:open_channel(Config, Rabbit), + ?assertMatch(#'queue.declare_ok'{}, declare(RCh, ClassicQueue)), + + ok = stop_app(Config, Rabbit), + ok = forget_cluster_node(Config, Hare, Rabbit), + + HCh = rabbit_ct_client_helpers:open_channel(Config, Hare), + ?assertExit( + {{shutdown, {server_initiated_close, 404, _}}, _}, + declare_passive(HCh, ClassicQueue)), + ok. +forget_unavailable_node(Config) -> + [Rabbit, Hare | _] = Nodes = cluster_members(Config), + + ok = rabbit_ct_broker_helpers:stop_node(Config, Rabbit), + ?assertMatch(ok, forget_cluster_node(Config, Hare, Rabbit)), + + NNodes = lists:nthtail(1, Nodes), + + assert_cluster_status({NNodes, NNodes}, NNodes). + +forget_unavailable_node_in_minority(Config) -> + All = [Rabbit, Hare, Bunny] = cluster_members(Config), + + assert_cluster_status({All, All}, All), + + %% Find out the raft status of the soon to be only + %% running node + RaftStatus = get_raft_status(Config, Hare), + + %% Stop other two nodes + ok = rabbit_ct_broker_helpers:stop_node(Config, Rabbit), + ok = rabbit_ct_broker_helpers:stop_node(Config, Bunny), + + %% If Hare was the leader, it is able to forget one of the nodes. Change takes place as soon as it is written on the log. The other membership change will be rejected until the last change has consensus. + ct:pal("Initial Raft status: ~p", [RaftStatus]), + case RaftStatus of + leader -> + ?assertMatch(ok, forget_cluster_node(Config, Hare, Rabbit)), + not_permitted(forget_cluster_node(Config, Hare, Bunny)); + follower -> + %% Follower might have been promoted before the second node goes down, check the status again + RaftStatus1 = get_raft_status(Config, Hare), + ct:pal("Latest Raft status: ~p", [RaftStatus1]), + case RaftStatus1 of + leader -> + ?assertMatch(ok, forget_cluster_node(Config, Hare, Rabbit)), + not_permitted(forget_cluster_node(Config, Hare, Bunny)); + _ -> + is_in_minority(forget_cluster_node(Config, Hare, Rabbit)) + end + end. + +not_permitted(Ret) -> + ?assertMatch({error, 69, _}, Ret), + {error, _, Msg} = Ret, + ?assertMatch(match, re:run(Msg, ".*not_permitted.*", [{capture, none}])). + +get_raft_status(Config, Node) -> + AllStatus = rabbit_ct_broker_helpers:rpc(Config, Node, rabbit_khepri, status, []), + case lists:filter(fun(S) -> + proplists:get_value(<<"Node Name">>, S) == Node + end, AllStatus) of + [NodeStatus] -> + proplists:get_value(<<"Raft State">>, NodeStatus); + [] -> + unknown + end. + +reset_in_khepri(Config) -> + ClassicQueue = <<"classic-queue">>, + [Rabbit, Hare | _] = cluster_members(Config), + + RCh = rabbit_ct_client_helpers:open_channel(Config, Rabbit), + ?assertMatch(#'queue.declare_ok'{}, declare(RCh, ClassicQueue)), + + stop_app(Config, Hare), + ok = reset(Config, Hare), + + %% Rabbit is a 1-node cluster. The classic queue is still there. + assert_cluster_status({[Rabbit], [Rabbit]}, [Rabbit]), + ?assertMatch(#'queue.declare_ok'{}, declare_passive(RCh, ClassicQueue)), + + %% Can't reset a running node + ?assertMatch({error, 64, _}, reset(Config, Rabbit)), + + %% Start Hare, it should work as standalone node. + start_app(Config, Hare), + + assert_cluster_status({[Hare], [Hare]}, [Hare]), + + ok. + +reset_removes_things_in_khepri(Config) -> + ClassicQueue = <<"classic-queue">>, + [Rabbit, Hare | _] = cluster_members(Config), + + RCh = rabbit_ct_client_helpers:open_channel(Config, Rabbit), + ?assertMatch(#'queue.declare_ok'{}, declare(RCh, ClassicQueue)), + + stop_app(Config, Rabbit), + ok = reset(Config, Rabbit), + + assert_cluster_status({[Hare], [Hare]}, [Hare]), + + start_app(Config, Rabbit), + assert_cluster_status({[Rabbit], [Rabbit]}, [Rabbit]), + + %% The classic queue was declared in Rabbit, once that node is reset + %% the queue needs to be removed from the rest of the cluster + HCh = rabbit_ct_client_helpers:open_channel(Config, Hare), + ?assertExit( + {{shutdown, {server_initiated_close, 404, _}}, _}, + declare_passive(HCh, ClassicQueue)), + + ok. + +reset_in_minority(Config) -> + [Rabbit, Hare | _] = cluster_members(Config), + + rabbit_ct_broker_helpers:stop_node(Config, Hare), + + ok = rpc:call(Rabbit, application, set_env, + [rabbit, khepri_leader_wait_retry_timeout, 1000]), + ok = rpc:call(Rabbit, application, set_env, + [rabbit, khepri_leader_wait_retry_limit, 3]), + stop_app(Config, Rabbit), + + is_in_minority(reset(Config, Rabbit)), + + ok. + +is_in_minority(Ret) -> + ?assertMatch({error, 75, _}, Ret), + {error, _, Msg} = Ret, + ?assertMatch(match, re:run(Msg, ".*timed out.*minority.*", [{capture, none}])). + +reset_last_disc_node(Config) -> + Servers = [Rabbit, Hare | _] = cluster_members(Config), + + stop_app(Config, Hare), + ?assertEqual(ok, change_cluster_node_type(Config, Hare, ram)), + start_app(Config, Hare), + + case rabbit_ct_broker_helpers:enable_feature_flag(Config, Servers, khepri_db) of + ok -> + %% The reset works after the switch to Khepri because the RAM node was + %% implicitly converted to a disc one as Khepri always writes data on disc. + stop_app(Config, Rabbit), + ?assertEqual(ok, reset(Config, Rabbit)), + start_app(Config, Rabbit), + assert_not_clustered(Rabbit), + assert_not_clustered(Hare), + ok; + {skip, _} = Skip -> + Skip + end. + forget_offline_removes_things(Config) -> [Rabbit, Hare] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename), - Unmirrored = <<"unmirrored-queue">>, + Classic = <<"classic-queue">>, X = <<"X">>, RCh = rabbit_ct_client_helpers:open_channel(Config, Rabbit), - declare(RCh, Unmirrored), + declare(RCh, Classic), amqp_channel:call(RCh, #'exchange.declare'{durable = true, exchange = X, auto_delete = true}), - amqp_channel:call(RCh, #'queue.bind'{queue = Unmirrored, + amqp_channel:call(RCh, #'queue.bind'{queue = Classic, exchange = X}), ok = rabbit_ct_broker_helpers:stop_broker(Config, Rabbit), HCh = rabbit_ct_client_helpers:open_channel(Config, Hare), {'EXIT',{{shutdown,{server_initiated_close,404,_}}, _}} = - (catch declare(HCh, Unmirrored)), + (catch declare(HCh, Classic)), ok = rabbit_ct_broker_helpers:stop_node(Config, Hare), ok = rabbit_ct_broker_helpers:stop_node(Config, Rabbit), - {ok, _} = rabbit_ct_broker_helpers:rabbitmqctl(Config, Hare, - ["forget_cluster_node", "--offline", Rabbit]), + ok = forget_cluster_node(Config, Hare, Rabbit, true), ok = rabbit_ct_broker_helpers:start_node(Config, Hare), HCh2 = rabbit_ct_client_helpers:open_channel(Config, Hare), - declare(HCh2, Unmirrored), + declare(HCh2, Classic), {'EXIT',{{shutdown,{server_initiated_close,404,_}}, _}} = (catch amqp_channel:call(HCh2,#'exchange.declare'{durable = true, exchange = X, @@ -377,49 +793,6 @@ forget_offline_removes_things(Config) -> passive = true})), ok. -set_ha_policy(Config, QName, Master, Slaves) -> - Nodes = [list_to_binary(atom_to_list(N)) || N <- [Master | Slaves]], - HaPolicy = {<<"nodes">>, Nodes}, - rabbit_ct_broker_helpers:set_ha_policy(Config, Master, QName, HaPolicy), - await_followers(QName, Master, Slaves). - -await_followers(QName, Master, Slaves) -> - await_followers_0(QName, Master, Slaves, 10). - -await_followers_0(QName, Master, Slaves0, Tries) -> - {ok, Queue} = await_followers_lookup_queue(QName, Master), - SPids = amqqueue:get_slave_pids(Queue), - ActMaster = amqqueue:qnode(Queue), - ActSlaves = lists:usort([node(P) || P <- SPids]), - Slaves1 = lists:usort(Slaves0), - await_followers_1(QName, ActMaster, ActSlaves, Master, Slaves1, Tries). - -await_followers_1(QName, _ActMaster, _ActSlaves, _Master, _Slaves, 0) -> - error({timeout_waiting_for_followers, QName}); -await_followers_1(QName, ActMaster, ActSlaves, Master, Slaves, Tries) -> - case {Master, Slaves} of - {ActMaster, ActSlaves} -> - ok; - _ -> - timer:sleep(250), - await_followers_0(QName, Master, Slaves, Tries - 1) - end. - -await_followers_lookup_queue(QName, Master) -> - await_followers_lookup_queue(QName, Master, 10). - -await_followers_lookup_queue(QName, _Master, 0) -> - error({timeout_looking_up_queue, QName}); -await_followers_lookup_queue(QName, Master, Tries) -> - RpcArgs = [rabbit_misc:r(<<"/">>, queue, QName)], - case rpc:call(Master, rabbit_amqqueue, lookup, RpcArgs) of - {error, not_found} -> - timer:sleep(250), - await_followers_lookup_queue(QName, Master, Tries - 1); - {ok, Q} -> - {ok, Q} - end. - force_boot(Config) -> [Rabbit, Hare] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename), @@ -433,156 +806,163 @@ force_boot(Config) -> ok = rabbit_ct_broker_helpers:start_node(Config, Rabbit), ok. +force_boot_in_khepri(Config) -> + [Rabbit, _Hare] = rabbit_ct_broker_helpers:get_node_configs(Config, + nodename), + stop_app(Config, Rabbit), + %% It executes force boot for mnesia, currently Khepri does nothing + ?assertMatch({ok, []}, rabbit_ct_broker_helpers:rabbitmqctl(Config, Rabbit, ["force_boot"])), + ok. + change_cluster_node_type(Config) -> [Rabbit, Hare, _Bunny] = cluster_members(Config), %% Trying to change the node to the ram type when not clustered should always fail - ok = stop_app(Rabbit), - assert_failure(fun () -> change_cluster_node_type(Rabbit, ram) end), - ok = start_app(Rabbit), + ok = stop_app(Config, Rabbit), + assert_failure(fun () -> change_cluster_node_type(Config, Rabbit, ram) end), + ok = start_app(Config, Rabbit), - ok = stop_app(Rabbit), - join_cluster(Rabbit, Hare), + ok = stop_app(Config, Rabbit), + join_cluster(Config, Rabbit, Hare), assert_cluster_status({[Rabbit, Hare], [Rabbit, Hare], [Hare]}, [Rabbit, Hare]), - change_cluster_node_type(Rabbit, ram), - assert_cluster_status({[Rabbit, Hare], [Hare], [Hare]}, + change_cluster_node_type(Config, Rabbit, ram), + assert_cluster_status({[Rabbit, Hare], [Hare], [Rabbit, Hare], [Hare], [Hare]}, [Rabbit, Hare]), - change_cluster_node_type(Rabbit, disc), + change_cluster_node_type(Config, Rabbit, disc), assert_cluster_status({[Rabbit, Hare], [Rabbit, Hare], [Hare]}, [Rabbit, Hare]), - change_cluster_node_type(Rabbit, ram), - ok = start_app(Rabbit), + change_cluster_node_type(Config, Rabbit, ram), + ok = start_app(Config, Rabbit), assert_cluster_status({[Rabbit, Hare], [Hare], [Hare, Rabbit]}, [Rabbit, Hare]), %% Changing to ram when you're the only ram node should fail - ok = stop_app(Hare), - assert_failure(fun () -> change_cluster_node_type(Hare, ram) end), - ok = start_app(Hare). + ok = stop_app(Config, Hare), + assert_failure(fun () -> change_cluster_node_type(Config, Hare, ram) end), + ok = start_app(Config, Hare). + +change_cluster_node_type_in_khepri(Config) -> + [Rabbit, Hare] = cluster_members(Config), + + assert_cluster_status({[Rabbit, Hare], [Rabbit, Hare], [Rabbit, Hare]}, + [Rabbit, Hare]), + + ok = stop_app(Config, Rabbit), + {error, 69, Msg} = change_cluster_node_type(Config, Rabbit, ram), + ?assertEqual( + match, + re:run( + Msg, "Feature `ram_node_type` is deprecated", + [{capture, none}])), + + ok = change_cluster_node_type(Config, Rabbit, disc), + ok = start_app(Config, Rabbit), + assert_cluster_status({[Rabbit, Hare], [Rabbit, Hare], [Rabbit, Hare]}, + [Rabbit, Hare]). change_cluster_when_node_offline(Config) -> [Rabbit, Hare, Bunny] = cluster_members(Config), %% Cluster the three notes - stop_join_start(Rabbit, Hare), + stop_join_start(Config, Rabbit, Hare), assert_clustered([Rabbit, Hare]), - stop_join_start(Bunny, Hare), + stop_join_start(Config, Bunny, Hare), assert_clustered([Rabbit, Hare, Bunny]), %% Bring down Rabbit, and remove Bunny from the cluster while %% Rabbit is offline - ok = stop_app(Rabbit), - ok = stop_app(Bunny), - ok = reset(Bunny), + ok = stop_app(Config, Rabbit), + ok = stop_app(Config, Bunny), + ok = reset(Config, Bunny), assert_cluster_status({[Bunny], [Bunny], []}, [Bunny]), assert_cluster_status({[Rabbit, Hare], [Rabbit, Hare], [Hare]}, [Hare]), assert_cluster_status( - {[Rabbit, Hare, Bunny], [Hare], [Rabbit, Hare, Bunny], - [Rabbit, Hare, Bunny], [Hare, Bunny]}, [Rabbit]), + {[Rabbit, Hare, Bunny], [Hare], [Rabbit, Hare, Bunny], [Rabbit, Hare, Bunny], [Hare, Bunny]}, [Rabbit]), %% Bring Rabbit back up - ok = start_app(Rabbit), + ok = start_app(Config, Rabbit), assert_clustered([Rabbit, Hare]), - ok = start_app(Bunny), - assert_not_clustered(Bunny), - - %% Now the same, but Rabbit is a RAM node, and we bring up Bunny - %% before - ok = stop_app(Rabbit), - ok = change_cluster_node_type(Rabbit, ram), - ok = start_app(Rabbit), - stop_join_start(Bunny, Hare), - assert_cluster_status( - {[Rabbit, Hare, Bunny], [Hare, Bunny], [Rabbit, Hare, Bunny]}, - [Rabbit, Hare, Bunny]), - ok = stop_app(Rabbit), - ok = stop_app(Bunny), - ok = reset(Bunny), - ok = start_app(Bunny), + ok = start_app(Config, Bunny), assert_not_clustered(Bunny), - assert_cluster_status({[Rabbit, Hare], [Hare], [Hare]}, [Hare]), - assert_cluster_status( - {[Rabbit, Hare, Bunny], [Hare, Bunny], [Hare, Bunny]}, - [Rabbit]), - ok = start_app(Rabbit), - assert_cluster_status({[Rabbit, Hare], [Hare], [Rabbit, Hare]}, - [Rabbit, Hare]), - assert_not_clustered(Bunny). - -update_cluster_nodes(Config) -> - [Rabbit, Hare, Bunny] = cluster_members(Config), + ok. - %% Mnesia is running... - assert_failure(fun () -> update_cluster_nodes(Rabbit, Hare) end), - - ok = stop_app(Rabbit), - ok = join_cluster(Rabbit, Hare), - ok = stop_app(Bunny), - ok = join_cluster(Bunny, Hare), - ok = start_app(Bunny), - stop_reset_start(Hare), - assert_failure(fun () -> start_app(Rabbit) end), - %% Bogus node - assert_failure(fun () -> update_cluster_nodes(Rabbit, non@existent) end), - %% Inconsistent node - assert_failure(fun () -> update_cluster_nodes(Rabbit, Hare) end), - ok = update_cluster_nodes(Rabbit, Bunny), - ok = start_app(Rabbit), - assert_not_clustered(Hare), - assert_clustered([Rabbit, Bunny]). +is_not_supported(Ret) -> + ?assertMatch({error, _, _}, Ret), + {error, _, Msg} = Ret, + ?assertMatch(match, re:run(Msg, ".*not_supported.*", [{capture, none}])). classic_config_discovery_node_list(Config) -> [Rabbit, Hare] = cluster_members(Config), - ok = stop_app(Hare), - ok = reset(Hare), + %% We restart the node that is reconfigured during this testcase to make + %% sure it has the latest start time. This ensures that peer discovery will + %% always select the other node as the one to join. + %% + %% We do this because this testcase does not really reflect a real world + %% situation. Indeed, both nodes have inconsistent peer discovery + %% configuration and the configuration is changed at runtime using internal + %% calls (which we don't support). + %% + %% Without this, if node 2 was started first, it will select itself and + %% thus boot as a standalone node, expecting node 1 to join it. But node 1 + %% is ready and never restarted/reconfigured. + rabbit_ct_broker_helpers:restart_node(Config, Hare), + + ok = stop_app(Config, Hare), + ok = reset(Config, Hare), ok = rpc:call(Hare, application, set_env, [rabbit, cluster_nodes, {[Rabbit], disc}]), - ok = start_app(Hare), + ok = start_app(Config, Hare), assert_clustered([Rabbit, Hare]), - ok = stop_app(Hare), - ok = reset(Hare), + ok = stop_app(Config, Hare), + ok = reset(Config, Hare), ok = rpc:call(Hare, application, set_env, [rabbit, cluster_nodes, {[Rabbit], ram}]), - ok = start_app(Hare), + ok = start_app(Config, Hare), assert_cluster_status({[Rabbit, Hare], [Rabbit], [Rabbit, Hare]}, [Rabbit, Hare]), %% List of nodes [node()] is equivalent to {[node()], disk} - ok = stop_app(Hare), - ok = reset(Hare), + ok = stop_app(Config, Hare), + ok = reset(Config, Hare), ok = rpc:call(Hare, application, set_env, [rabbit, cluster_nodes, [Rabbit]]), - ok = start_app(Hare), + ok = start_app(Config, Hare), assert_clustered([Rabbit, Hare]), - ok = stop_app(Hare), - ok = reset(Hare), + ok = stop_app(Config, Hare), + ok = reset(Config, Hare), %% If we use an invalid cluster_nodes conf, the node fails to start. ok = rpc:call(Hare, application, set_env, [rabbit, cluster_nodes, "Yes, please"]), - assert_failure(fun () -> start_app(Hare) end), + assert_failure(fun () -> start_app(Config, Hare) end), assert_not_clustered(Rabbit). -force_reset_node(Config) -> +force_reset_node_in_khepri(Config) -> [Rabbit, Hare, _Bunny] = cluster_members(Config), - stop_join_start(Rabbit, Hare), - stop_app(Rabbit), - force_reset(Rabbit), + stop_join_start(Config, Rabbit, Hare), + stop_app(Config, Rabbit), + ok = force_reset(Config, Rabbit), + assert_cluster_status({[Rabbit, Hare], [Rabbit, Hare], [Hare]}, [Hare]), + %% Khepri is stopped, so it won't report anything. + assert_status({[Rabbit], [], [Rabbit], [Rabbit], []}, [Rabbit]), %% Hare thinks that Rabbit is still clustered assert_cluster_status({[Rabbit, Hare], [Rabbit, Hare], [Hare]}, [Hare]), - %% %% ...but it isn't - assert_cluster_status({[Rabbit], [Rabbit], []}, [Rabbit]), - %% We can rejoin Rabbit and Hare - update_cluster_nodes(Rabbit, Hare), - start_app(Rabbit), - assert_clustered([Rabbit, Hare]). + ok = start_app(Config, Rabbit), + assert_not_clustered(Rabbit), + %% We can rejoin Rabbit and Hare. Unlike with Mnesia, we try to solve the + %% inconsistency instead of returning an error. + ok = stop_app(Config, Rabbit), + ?assertEqual(ok, join_cluster(Config, Rabbit, Hare, false)), + ok = start_app(Config, Rabbit), + assert_cluster_status({[Rabbit, Hare], [Rabbit, Hare], [Rabbit, Hare]}, + [Rabbit, Hare]). status_with_alarm(Config) -> [Rabbit, Hare] = rabbit_ct_broker_helpers:get_node_configs(Config, @@ -642,6 +1022,34 @@ pid_file_and_await_node_startup(Config) -> {error, _, _} = rabbit_ct_broker_helpers:rabbitmqctl(Config, Rabbit, ["wait", RabbitPidFile]). +pid_file_and_await_node_startup_in_khepri(Config) -> + [Rabbit, Hare] = rabbit_ct_broker_helpers:get_node_configs(Config, + nodename), + RabbitConfig = rabbit_ct_broker_helpers:get_node_config(Config,Rabbit), + RabbitPidFile = ?config(pid_file, RabbitConfig), + %% ensure pid file is readable + {ok, _} = file:read_file(RabbitPidFile), + %% ensure wait works on running node + {ok, _} = rabbit_ct_broker_helpers:rabbitmqctl(Config, Rabbit, + ["wait", RabbitPidFile]), + %% stop both nodes + ok = rabbit_ct_broker_helpers:stop_node(Config, Rabbit), + ok = rabbit_ct_broker_helpers:stop_node(Config, Hare), + %% start first node in the background. It will wait for Khepri + %% and then Mnesia tables (which will already be available) + spawn_link(fun() -> + rabbit_ct_broker_helpers:start_node(Config, Rabbit) + end), + PreviousPid = pid_from_file(RabbitPidFile), + Attempts = 200, + Timeout = 50, + wait_for_pid_file_to_change(RabbitPidFile, PreviousPid, Attempts, Timeout), + %% The node is blocked waiting for Khepri, so this will timeout. Mnesia + %% alone would fail here as it wasn't the last node to stop + %% Let's make it a short wait. + {error, timeout, _} = rabbit_ct_broker_helpers:rabbitmqctl(Config, Rabbit, + ["wait", RabbitPidFile], 10000). + await_running_count(Config) -> [Rabbit, Hare] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename), @@ -682,6 +1090,135 @@ await_running_count(Config) -> rabbit_nodes, await_running_count, [5, 1000])). +await_running_count_in_khepri(Config) -> + [Rabbit, Hare] = rabbit_ct_broker_helpers:get_node_configs(Config, + nodename), + RabbitConfig = rabbit_ct_broker_helpers:get_node_config(Config,Rabbit), + RabbitPidFile = ?config(pid_file, RabbitConfig), + {ok, _} = rabbit_ct_broker_helpers:rabbitmqctl(Config, Rabbit, + ["wait", RabbitPidFile]), + %% stop both nodes + ok = rabbit_ct_broker_helpers:stop_node(Config, Hare), + ok = rabbit_ct_broker_helpers:stop_node(Config, Rabbit), + %% start one node in the background + %% One khepri node in minority won't finish starting up, but will wait a reasonable + %% amount of time for a new leader to be elected. Hopefully on that time + %% a second (or more) node is brought up so they can reach consensus + %% Kind of similar to the wait for tables that we had on mnesia + rabbit_ct_broker_helpers:async_start_node(Config, Rabbit), + rabbit_ct_broker_helpers:start_node(Config, Hare), + {ok, _} = rabbit_ct_broker_helpers:rabbitmqctl(Config, Rabbit, + ["wait", RabbitPidFile]), + %% this now succeeds + ?assertEqual(ok, rabbit_ct_broker_helpers:rpc(Config, Rabbit, + rabbit_nodes, + await_running_count, [2, 30000])), + %% this still succeeds + ?assertEqual(ok, rabbit_ct_broker_helpers:rpc(Config, Rabbit, + rabbit_nodes, + await_running_count, [1, 30000])), + %% this still fails + ?assertEqual({error, timeout}, + rabbit_ct_broker_helpers:rpc(Config, Rabbit, + rabbit_nodes, + await_running_count, [5, 1000])). + +start_nodes_in_reverse_order(Config) -> + [Rabbit, Hare, Bunny] = cluster_members(Config), + assert_not_clustered(Rabbit), + assert_not_clustered(Hare), + assert_not_clustered(Bunny), + + stop_join_start(Config, Rabbit, Bunny), + stop_join_start(Config, Hare, Bunny), + assert_clustered([Rabbit, Hare, Bunny]), + + ok = rabbit_ct_broker_helpers:stop_node(Config, Rabbit), + ok = rabbit_ct_broker_helpers:stop_node(Config, Hare), + ok = rabbit_ct_broker_helpers:stop_node(Config, Bunny), + + spawn(fun() -> ok = rabbit_ct_broker_helpers:start_node(Config, Bunny) end), + ok = rabbit_ct_broker_helpers:start_node(Config, Hare), + assert_cluster_status({[Bunny, Hare, Rabbit], [Bunny, Hare, Rabbit], [Bunny, Hare]}, + [Bunny, Hare]), + + ok = rabbit_ct_broker_helpers:start_node(Config, Rabbit), + assert_clustered([Rabbit, Hare, Bunny]). + +%% Test booting nodes in the wrong order for Mnesia. Interesting... +start_nodes_in_stop_order(Config) -> + [Rabbit, Hare, Bunny] = cluster_members(Config), + assert_not_clustered(Rabbit), + assert_not_clustered(Hare), + assert_not_clustered(Bunny), + + stop_join_start(Config, Rabbit, Bunny), + stop_join_start(Config, Hare, Bunny), + assert_clustered([Rabbit, Hare, Bunny]), + + ok = rabbit_ct_broker_helpers:stop_node(Config, Rabbit), + ok = rabbit_ct_broker_helpers:stop_node(Config, Hare), + ok = rabbit_ct_broker_helpers:stop_node(Config, Bunny), + + Self = self(), + spawn(fun() -> + Reply = rabbit_ct_broker_helpers:start_node(Config, Rabbit), + Self ! {start_node_reply, Reply} + end), + ?assertMatch({error, {skip, _}}, rabbit_ct_broker_helpers:start_node(Config, Hare)), + receive + {start_node_reply, Reply} -> + ?assertMatch({error, {skip, _}}, Reply) + end. + +start_nodes_in_stop_order_in_khepri(Config) -> + [Rabbit, Hare, Bunny] = cluster_members(Config), + assert_not_clustered(Rabbit), + assert_not_clustered(Hare), + assert_not_clustered(Bunny), + + stop_join_start(Config, Rabbit, Bunny), + stop_join_start(Config, Hare, Bunny), + assert_clustered([Rabbit, Hare, Bunny]), + + ok = rabbit_ct_broker_helpers:stop_node(Config, Rabbit), + ok = rabbit_ct_broker_helpers:stop_node(Config, Hare), + ok = rabbit_ct_broker_helpers:stop_node(Config, Bunny), + + ok = rabbit_ct_broker_helpers:async_start_node(Config, Rabbit), + ok = rabbit_ct_broker_helpers:async_start_node(Config, Hare), + ok = rabbit_ct_broker_helpers:async_start_node(Config, Bunny), + + ?assertMatch(ok, rabbit_ct_broker_helpers:wait_for_async_start_node(Rabbit)), + ?assertMatch(ok, rabbit_ct_broker_helpers:wait_for_async_start_node(Hare)), + ?assertMatch(ok, rabbit_ct_broker_helpers:wait_for_async_start_node(Bunny)). + +%% TODO test force_boot with Khepri involved +start_nodes_in_stop_order_with_force_boot(Config) -> + [Rabbit, Hare, Bunny] = cluster_members(Config), + assert_not_clustered(Rabbit), + assert_not_clustered(Hare), + assert_not_clustered(Bunny), + + stop_join_start(Config, Rabbit, Bunny), + stop_join_start(Config, Hare, Bunny), + assert_clustered([Rabbit, Hare, Bunny]), + + ok = rabbit_ct_broker_helpers:stop_node(Config, Rabbit), + ok = rabbit_ct_broker_helpers:stop_node(Config, Hare), + ok = rabbit_ct_broker_helpers:stop_node(Config, Bunny), + + {ok, []} = rabbit_ct_broker_helpers:rabbitmqctl(Config, Rabbit, + ["force_boot"]), + + spawn(fun() -> rabbit_ct_broker_helpers:start_node(Config, Rabbit) end), + ok = rabbit_ct_broker_helpers:start_node(Config, Hare), + assert_cluster_status({[Bunny, Hare, Rabbit], [Bunny, Hare, Rabbit], [Rabbit, Hare]}, + [Rabbit, Hare]), + + ok = rabbit_ct_broker_helpers:start_node(Config, Bunny), + assert_clustered([Rabbit, Hare, Bunny]). + %% ---------------------------------------------------------------------------- %% Internal utils %% ---------------------------------------------------------------------------- @@ -723,49 +1260,84 @@ assert_failure(Fun) -> Other -> error({expected_failure, Other}) end. -stop_app(Node) -> - rabbit_control_helper:command(stop_app, Node). - -start_app(Node) -> - rabbit_control_helper:command(start_app, Node). - -join_cluster(Node, To) -> - join_cluster(Node, To, false). +stop_app(Config, Node) -> + case rabbit_ct_broker_helpers:rabbitmqctl(Config, Node, ["stop_app"]) of + {ok, _} -> ok; + Error -> Error + end. -join_cluster(Node, To, Ram) -> - rabbit_control_helper:command_with_output(join_cluster, Node, [atom_to_list(To)], [{"--ram", Ram}]). +start_app(Config, Node) -> + case rabbit_ct_broker_helpers:rabbitmqctl(Config, Node, ["start_app"]) of + {ok, _} -> ok; + Error -> Error + end. -reset(Node) -> - rabbit_control_helper:command(reset, Node). +join_cluster(Config, Node, To) -> + join_cluster(Config, Node, To, false). + +join_cluster(Config, Node, To, Ram) -> + Cmd = case Ram of + true -> + ["join_cluster", "--ram", atom_to_list(To)]; + false -> + ["join_cluster", atom_to_list(To)] + end, + case rabbit_ct_broker_helpers:rabbitmqctl(Config, Node, Cmd) of + {ok, _} -> ok; + Error -> Error + end. -force_reset(Node) -> - rabbit_control_helper:command(force_reset, Node). +reset(Config, Node) -> + case rabbit_ct_broker_helpers:rabbitmqctl(Config, Node, ["reset"]) of + {ok, _} -> ok; + Error -> Error + end. -forget_cluster_node(Node, Removee, RemoveWhenOffline) -> - rabbit_control_helper:command(forget_cluster_node, Node, [atom_to_list(Removee)], - [{"--offline", RemoveWhenOffline}]). +force_reset(Config, Node) -> + Ret = rabbit_ct_broker_helpers:rabbitmqctl( + Config, Node, ["force_reset"]), + case Ret of + {ok, _} -> ok; + Error -> Error + end. -forget_cluster_node(Node, Removee) -> - forget_cluster_node(Node, Removee, false). +forget_cluster_node(Config, Node, Removee, RemoveWhenOffline) -> + Cmd = case RemoveWhenOffline of + true -> + ["forget_cluster_node", "--offline", + atom_to_list(Removee)]; + false -> + ["forget_cluster_node", + atom_to_list(Removee)] + end, + case rabbit_ct_broker_helpers:rabbitmqctl(Config, Node, Cmd) of + {ok, _} -> ok; + Error -> Error + end. -change_cluster_node_type(Node, Type) -> - rabbit_control_helper:command(change_cluster_node_type, Node, [atom_to_list(Type)]). +forget_cluster_node(Config, Node, Removee) -> + forget_cluster_node(Config, Node, Removee, false). -update_cluster_nodes(Node, DiscoveryNode) -> - rabbit_control_helper:command(update_cluster_nodes, Node, [atom_to_list(DiscoveryNode)]). +change_cluster_node_type(Config, Node, Type) -> + Ret = rabbit_ct_broker_helpers:rabbitmqctl( + Config, Node, ["change_cluster_node_type", atom_to_list(Type)]), + case Ret of + {ok, _} -> ok; + Error -> Error + end. -stop_join_start(Node, ClusterTo, Ram) -> - ok = stop_app(Node), - ok = join_cluster(Node, ClusterTo, Ram), - ok = start_app(Node). +stop_join_start(Config, Node, ClusterTo, Ram) -> + ok = stop_app(Config, Node), + ok = join_cluster(Config, Node, ClusterTo, Ram), + ok = start_app(Config, Node). -stop_join_start(Node, ClusterTo) -> - stop_join_start(Node, ClusterTo, false). +stop_join_start(Config, Node, ClusterTo) -> + stop_join_start(Config, Node, ClusterTo, false). -stop_reset_start(Node) -> - ok = stop_app(Node), - ok = reset(Node), - ok = start_app(Node). +stop_reset_start(Config, Node) -> + ok = stop_app(Config, Node), + ok = reset(Config, Node), + ok = start_app(Config, Node). declare(Ch, Name) -> Res = amqp_channel:call(Ch, #'queue.declare'{durable = true, @@ -773,3 +1345,8 @@ declare(Ch, Name) -> amqp_channel:call(Ch, #'queue.bind'{queue = Name, exchange = <<"amq.fanout">>}), Res. + +declare_passive(Ch, Name) -> + amqp_channel:call(Ch, #'queue.declare'{durable = true, + passive = true, + queue = Name}). diff --git a/deps/rabbit/test/clustering_recovery_SUITE.erl b/deps/rabbit/test/clustering_recovery_SUITE.erl index ca008d92b1e5..b5dd04260839 100644 --- a/deps/rabbit/test/clustering_recovery_SUITE.erl +++ b/deps/rabbit/test/clustering_recovery_SUITE.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(clustering_recovery_SUITE). @@ -11,11 +11,20 @@ -include_lib("amqp_client/include/amqp_client.hrl"). -include_lib("eunit/include/eunit.hrl"). +-compile(nowarn_export_all). -compile(export_all). +-import(clustering_utils, [ + assert_status/2, + assert_cluster_status/2, + assert_clustered/1, + assert_not_clustered/1 + ]). + all() -> [ - {group, mnesia_store} + {group, mnesia_store}, + {group, khepri_store} ]. groups() -> @@ -26,13 +35,29 @@ groups() -> force_shrink_all_quorum_queues ]} ]} + ]}, + {khepri_store, [], [ + {clustered_3_nodes, [], + [{cluster_size_3, [], [ + force_standalone_boot, + force_standalone_boot_and_restart, + force_standalone_boot_and_restart_with_quorum_queues + ]} + ]}, + {clustered_5_nodes, [], + [{cluster_size_5, [], [ + rolling_restart, + rolling_kill_restart, + forget_down_node + ]}] + } ]} ]. suite() -> [ %% If a testcase hangs, no need to wait for 30 minutes. - {timetrap, {minutes, 5}} + {timetrap, {minutes, 10}} ]. %% ------------------------------------------------------------------- @@ -46,12 +71,28 @@ init_per_suite(Config) -> end_per_suite(Config) -> rabbit_ct_helpers:run_teardown_steps(Config). +init_per_group(khepri_store, Config) -> + case rabbit_ct_broker_helpers:configured_metadata_store(Config) of + mnesia -> + {skip, "These tests target Khepri"}; + _ -> + Config + end; init_per_group(mnesia_store, Config) -> - Config; + case rabbit_ct_broker_helpers:configured_metadata_store(Config) of + {khepri, _} -> + {skip, "These tests target mnesia"}; + _ -> + Config + end; init_per_group(clustered_3_nodes, Config) -> rabbit_ct_helpers:set_config(Config, [{rmq_nodes_clustered, true}]); +init_per_group(clustered_5_nodes, Config) -> + rabbit_ct_helpers:set_config(Config, [{rmq_nodes_clustered, true}]); init_per_group(cluster_size_3, Config) -> - rabbit_ct_helpers:set_config(Config, [{rmq_nodes_count, 3}]). + rabbit_ct_helpers:set_config(Config, [{rmq_nodes_count, 3}]); +init_per_group(cluster_size_5, Config) -> + rabbit_ct_helpers:set_config(Config, [{rmq_nodes_count, 5}]). end_per_group(_, Config) -> Config. @@ -82,9 +123,9 @@ end_per_testcase(Testcase, Config) -> force_shrink_all_quorum_queues(Config) -> [Rabbit, Hare, Bunny] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename), - QName1 = quorum_test_queue(1), - QName2 = quorum_test_queue(2), - QName3 = quorum_test_queue(3), + QName1 = quorum_queue_name(1), + QName2 = quorum_queue_name(2), + QName3 = quorum_queue_name(3), Args = [{<<"x-queue-type">>, longstr, <<"quorum">>}], declare_and_publish_to_queue(Config, Rabbit, QName1, Args), declare_and_publish_to_queue(Config, Rabbit, QName2, Args), @@ -111,7 +152,7 @@ force_shrink_all_quorum_queues(Config) -> force_shrink_quorum_queue(Config) -> [Rabbit, Hare, Bunny] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename), - QName1 = quorum_test_queue(1), + QName1 = quorum_queue_name(1), Args = [{<<"x-queue-type">>, longstr, <<"quorum">>}], declare_and_publish_to_queue(Config, Rabbit, QName1, Args), @@ -127,7 +168,150 @@ force_shrink_quorum_queue(Config) -> ok = rabbit_ct_broker_helpers:rpc(Config, Rabbit, rabbit_quorum_queue, force_shrink_member_to_current_member, [<<"/">>, QName1]), - ok = consume_from_queue(Config, Rabbit, QName1), + ok = consume_from_queue(Config, Rabbit, QName1). + +force_standalone_boot(Config) -> + %% Test for disaster recovery procedure command + [Rabbit, Hare, Bunny] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename), + + assert_cluster_status({[Rabbit, Hare, Bunny], [Rabbit, Hare, Bunny], [Rabbit, Hare, Bunny]}, + [Rabbit, Hare, Bunny]), + + ok = rabbit_ct_broker_helpers:stop_node(Config, Hare), + ok = rabbit_ct_broker_helpers:stop_node(Config, Bunny), + ok = force_standalone_khepri_boot(Rabbit), + + assert_cluster_status({[Rabbit], [Rabbit], [Rabbit], [Rabbit], [Rabbit]}, + [Rabbit]), + + ok. + +force_standalone_boot_and_restart(Config) -> + %% Test for disaster recovery procedure + %% + %% 3-node cluster. Declare and publish to a classic queue on node 1. + %% Stop the two remaining nodes. Force standalone boot on the node + %% left. Restart it. Consume all the messages. + [Rabbit, Hare, Bunny] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename), + + assert_cluster_status({[Rabbit, Hare, Bunny], [Rabbit, Hare, Bunny], [Rabbit, Hare, Bunny]}, + [Rabbit, Hare, Bunny]), + + QName = classic_queue_name(Rabbit), + Args = [{<<"x-queue-type">>, longstr, <<"classic">>}], + declare_and_publish_to_queue(Config, Rabbit, QName, Args), + + ok = rabbit_ct_broker_helpers:stop_node(Config, Hare), + ok = rabbit_ct_broker_helpers:stop_node(Config, Bunny), + ok = force_standalone_khepri_boot(Rabbit), + + assert_cluster_status({[Rabbit], [Rabbit], [Rabbit], [Rabbit], [Rabbit]}, + [Rabbit]), + + ok = rabbit_ct_broker_helpers:stop_node(Config, Rabbit), + ok = rabbit_ct_broker_helpers:start_node(Config, Rabbit), + + consume_from_queue(Config, Rabbit, QName), + + ok. + +force_standalone_boot_and_restart_with_quorum_queues(Config) -> + %% Test for disaster recovery procedure + %% + %% 3-node cluster. Declare and publish to a classic queue on node 1. + %% Stop the two remaining nodes. Force standalone boot on the node + %% left. Restart it. Consume all the messages. + [Rabbit, Hare, Bunny] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename), + + assert_cluster_status({[Rabbit, Hare, Bunny], [Rabbit, Hare, Bunny], [Rabbit, Hare, Bunny]}, + [Rabbit, Hare, Bunny]), + + QName1 = quorum_queue_name(1), + QName2 = quorum_queue_name(2), + Args = [{<<"x-queue-type">>, longstr, <<"quorum">>}], + declare_and_publish_to_queue(Config, Rabbit, QName1, Args), + declare_and_publish_to_queue(Config, Rabbit, QName2, Args), + + ok = rabbit_ct_broker_helpers:stop_node(Config, Hare), + ok = rabbit_ct_broker_helpers:stop_node(Config, Bunny), + + ok = force_standalone_khepri_boot(Rabbit), + ok = rabbit_ct_broker_helpers:rpc(Config, Rabbit, rabbit_quorum_queue, force_all_queues_shrink_member_to_current_member, []), + + assert_cluster_status({[Rabbit], [Rabbit], [Rabbit], [Rabbit], [Rabbit]}, + [Rabbit]), + + ok = rabbit_ct_broker_helpers:stop_node(Config, Rabbit), + ok = rabbit_ct_broker_helpers:start_node(Config, Rabbit), + + consume_from_queue(Config, Rabbit, QName1), + consume_from_queue(Config, Rabbit, QName2), + + ok. + +rolling_restart(Config) -> + Nodes = rabbit_ct_broker_helpers:get_node_configs(Config, nodename), + + Args = [{<<"x-queue-type">>, longstr, <<"classic">>}], + [begin + QName = classic_queue_name(N), + declare_and_publish_to_queue(Config, N, QName, Args) + end || N <- Nodes], + + [begin + ok = rabbit_ct_broker_helpers:stop_node(Config, N), + ok = rabbit_ct_broker_helpers:start_node(Config, N) + end || N <- Nodes], + + assert_cluster_status({Nodes, Nodes, Nodes}, Nodes), + [begin + QName = classic_queue_name(N), + consume_from_queue(Config, N, QName) + end || N <- Nodes], + + ok. + +rolling_kill_restart(Config) -> + Nodes = rabbit_ct_broker_helpers:get_node_configs(Config, nodename), + + Args = [{<<"x-queue-type">>, longstr, <<"classic">>}], + [begin + QName = classic_queue_name(N), + declare_and_publish_to_queue(Config, N, QName, Args) + end || N <- Nodes], + + Ret0 = + [begin + ok = rabbit_ct_broker_helpers:kill_node(Config, N), + {N, rabbit_ct_broker_helpers:start_node(Config, N)} + end || N <- Nodes], + Failed = [Pair || {_, V} = Pair <- Ret0, V =/= ok], + + ?assert(length(Failed) =< 1), + + case Failed of + [] -> + assert_cluster_status({Nodes, Nodes, Nodes}, Nodes), + [begin + QName = classic_queue_name(N), + consume_from_queue(Config, N, QName) + end || N <- Nodes]; + [{FailedNode, {error, _}}] -> + [Node0 | _] = RemainingNodes = Nodes -- [FailedNode], + ok = forget_cluster_node(Node0, FailedNode), + assert_cluster_status({RemainingNodes, RemainingNodes, RemainingNodes}, RemainingNodes) + end, + ok. + +forget_down_node(Config) -> + [Rabbit, Hare | _] = Nodes = rabbit_ct_broker_helpers:get_node_configs(Config, nodename), + + ok = rabbit_ct_broker_helpers:stop_node(Config, Rabbit), + ok = forget_cluster_node(Hare, Rabbit), + + NNodes = lists:nthtail(1, Nodes), + + assert_cluster_status({NNodes, NNodes, NNodes}, NNodes), ok. @@ -140,16 +324,16 @@ declare_and_publish_to_queue(Config, Node, QName, Args) -> publish_many(Ch, QName, 10), rabbit_ct_client_helpers:close_connection_and_channel(Conn, Ch). -quorum_test_queue(Number) -> +quorum_queue_name(Number) -> list_to_binary(io_lib:format("quorum_queue_~p", [Number])). +classic_queue_name(Node) -> + list_to_binary(io_lib:format("classic_queue_~p", [Node])). + declare(Ch, Name, Args) -> - Res = amqp_channel:call(Ch, #'queue.declare'{durable = true, - queue = Name, - arguments = Args}), - amqp_channel:call(Ch, #'queue.bind'{queue = Name, - exchange = <<"amq.fanout">>}), - Res. + amqp_channel:call(Ch, #'queue.declare'{durable = true, + queue = Name, + arguments = Args}). consume_from_queue(Config, Node, QName) -> {Conn, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, Node), @@ -157,6 +341,12 @@ consume_from_queue(Config, Node, QName) -> consume(10), rabbit_ct_client_helpers:close_connection_and_channel(Conn, Ch). +force_standalone_khepri_boot(Node) -> + rabbit_control_helper:command(force_standalone_khepri_boot, Node, []). + +forget_cluster_node(Node, Removee) -> + rabbit_control_helper:command(forget_cluster_node, Node, [atom_to_list(Removee)], []). + publish_many(Ch, QName, N) -> amqp_channel:call(Ch, #'confirm.select'{}), [amqp_channel:cast(Ch, #'basic.publish'{routing_key = QName}, diff --git a/deps/rabbit/test/clustering_utils.erl b/deps/rabbit/test/clustering_utils.erl index 5ff479dd5b60..34c2e06e115c 100644 --- a/deps/rabbit/test/clustering_utils.erl +++ b/deps/rabbit/test/clustering_utils.erl @@ -2,11 +2,12 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2018-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(clustering_utils). -export([ + assert_status/2, assert_cluster_status/2, assert_clustered/1, assert_not_clustered/1 @@ -14,9 +15,17 @@ -define(LOOP_RECURSION_DELAY, 100). -assert_cluster_status({All, Disc, Running}, Nodes) -> - assert_cluster_status({All, Running, All, Disc, Running}, Nodes); -assert_cluster_status(Status0, Nodes) -> +assert_status(Tuple, Nodes) -> + assert_cluster_status(Tuple, Nodes, fun verify_status_equal/3). + +assert_cluster_status(Tuple, Nodes) -> + assert_cluster_status(Tuple, Nodes, fun verify_cluster_status_equal/3). + +assert_cluster_status({All, Running}, Nodes, VerifyFun) -> + assert_cluster_status({All, Running, All, All, Running}, Nodes, VerifyFun); +assert_cluster_status({All, Disc, Running}, Nodes, VerifyFun) -> + assert_cluster_status({All, Running, All, Disc, Running}, Nodes, VerifyFun); +assert_cluster_status(Status0, Nodes, VerifyFun) -> Status = sort_cluster_status(Status0), AllNodes = case Status of {undef, undef, All, _, _} -> @@ -25,36 +34,41 @@ assert_cluster_status(Status0, Nodes) -> {All, _, _, _, _} -> All end, - wait_for_cluster_status(Status, AllNodes, Nodes). + wait_for_cluster_status(Status, AllNodes, Nodes, VerifyFun). -wait_for_cluster_status(Status, AllNodes, Nodes) -> +wait_for_cluster_status(Status, AllNodes, Nodes, VerifyFun) -> Max = 10000 / ?LOOP_RECURSION_DELAY, - wait_for_cluster_status(0, Max, Status, AllNodes, Nodes). + wait_for_cluster_status(0, Max, Status, AllNodes, Nodes, VerifyFun). -wait_for_cluster_status(N, Max, Status, _AllNodes, Nodes) when N >= Max -> +wait_for_cluster_status(N, Max, Status, _AllNodes, Nodes, _VerifyFun) when N >= Max -> erlang:error({cluster_status_max_tries_failed, [{nodes, Nodes}, {expected_status, Status}, {max_tried, Max}, - {status, sort_cluster_status(cluster_status(hd(Nodes)))}]}); -wait_for_cluster_status(N, Max, Status, AllNodes, Nodes) -> + {status, [{Node, sort_cluster_status(cluster_status(Node))} || Node <- Nodes]}]}); +wait_for_cluster_status(N, Max, Status, AllNodes, Nodes, VerifyFun) -> case lists:all(fun (Node) -> - verify_status_equal(Node, Status, AllNodes) + VerifyFun(Node, Status, AllNodes) end, Nodes) of true -> ok; false -> timer:sleep(?LOOP_RECURSION_DELAY), - wait_for_cluster_status(N + 1, Max, Status, AllNodes, Nodes) + wait_for_cluster_status(N + 1, Max, Status, AllNodes, Nodes, VerifyFun) end. -verify_status_equal(Node, Status, AllNodes) -> +verify_status_equal(Node, Status, _AllNodes) -> NodeStatus = sort_cluster_status(cluster_status(Node)), - IsClustered = case rpc:call(Node, rabbit_db_cluster, is_clustered, []) of - {badrpc, {'EXIT', {undef, _}}} -> - rpc:call(Node, rabbit_mnesia, is_clustered, []); - Ret -> - Ret + equal(Status, NodeStatus). + +verify_cluster_status_equal(Node, Status, AllNodes) -> + NodeStatus = sort_cluster_status(cluster_status(Node)), + %% To be compatible with mixed version clusters in 3.11.x we use here + %% rabbit_mnesia:is_clustered/0 instead of rabbit_db_cluster:is_clustered/0 + IsClustered0 = rpc:call(Node, rabbit_db_cluster, is_clustered, []), + IsClustered = case maybe_undef(IsClustered0) of + undef -> rpc:call(Node, rabbit_mnesia, is_clustered, []); + _ -> IsClustered0 end, - (AllNodes =/= [Node]) =:= IsClustered andalso equal(Status, NodeStatus). + ((AllNodes =/= [Node]) =:= IsClustered andalso equal(Status, NodeStatus)). equal({_, _, A, B, C}, {undef, undef, A, B, C}) -> true; @@ -64,17 +78,36 @@ equal(Status0, Status1) -> Status0 == Status1. cluster_status(Node) -> - AllMembers = rpc:call(Node, rabbit_nodes, list_members, []), - RunningMembers = rpc:call(Node, rabbit_nodes, list_running, []), - - AllDbNodes = case rpc:call(Node, rabbit_db_cluster, members, []) of - {badrpc, {'EXIT', {undef, _}}} -> - rpc:call(Node, rabbit_mnesia, cluster_nodes, [all]); - Ret -> - Ret + %% To be compatible with mixed version clusters in 3.11.x we use here + %% rabbit_nodes:all/0 instead of rabbit_nodes:list_members/0 and + %% rabbit_nodes:all_running/0 instead of rabbit_nodes:list_running/0 + %% which are part of the new API. + AllMembers0 = rpc:call(Node, rabbit_nodes, list_members, []), + AllMembers = case maybe_undef(AllMembers0) of + undef -> rpc:call(Node, rabbit_nodes, all, []); + _ -> AllMembers0 end, - DiscDbNodes = rpc:call(Node, rabbit_mnesia, cluster_nodes, [disc]), - RunningDbNodes = rpc:call(Node, rabbit_mnesia, cluster_nodes, [running]), + RunningMembers0 = rpc:call(Node, rabbit_nodes, list_running, []), + RunningMembers = case maybe_undef(RunningMembers0) of + undef -> rpc:call(Node, rabbit_nodes, all_running, []); + _ -> RunningMembers0 + end, + + %% To be compatible with mixed version clusters in 3.11.x we use here + %% rabbit_mnesia:cluster_nodes/1 instead of rabbit_db_cluster:members/0 + AllDbNodes0 = rpc:call(Node, rabbit_db_cluster, members, []), + AllDbNodes = case maybe_undef(AllDbNodes0) of + undef -> rpc:call(Node, rabbit_mnesia, cluster_nodes, [all]); + _ -> AllDbNodes0 + end, + {DiscDbNodes, RunningDbNodes} = + case rpc:call(Node, rabbit_khepri, is_enabled, []) of + true -> + {AllMembers, RunningMembers}; + _ -> + {rpc:call(Node, rabbit_mnesia, cluster_nodes, [disc]), + rpc:call(Node, rabbit_mnesia, cluster_nodes, [running])} + end, {AllMembers, RunningMembers, @@ -82,14 +115,25 @@ cluster_status(Node) -> DiscDbNodes, RunningDbNodes}. -sort_cluster_status({{badrpc, {'EXIT', {undef, _}}}, {badrpc, {'EXIT', {undef, _}}}, AllM, DiscM, RunningM}) -> - {undef, undef, lists:sort(AllM), lists:sort(DiscM), lists:sort(RunningM)}; sort_cluster_status({All, Running, AllM, DiscM, RunningM}) -> - {lists:sort(All), lists:sort(Running), lists:sort(AllM), lists:sort(DiscM), lists:sort(RunningM)}. + {maybe_sort(All), maybe_sort(Running), maybe_sort(AllM), maybe_sort(DiscM), maybe_sort(RunningM)}. + +maybe_sort({badrpc, {'EXIT', {undef, _}}}) -> + undef; +maybe_sort({badrpc, nodedown}) -> + nodedown; +maybe_sort({badrpc, Reason}) -> + Reason; +maybe_sort(List) -> + lists:sort(List). + +maybe_undef({badrpc, {'EXIT', {undef, _}}}) -> + undef; +maybe_undef(Any) -> + Any. assert_clustered(Nodes) -> assert_cluster_status({Nodes, Nodes, Nodes, Nodes, Nodes}, Nodes). assert_not_clustered(Node) -> assert_cluster_status({[Node], [Node], [Node], [Node], [Node]}, [Node]). - diff --git a/deps/rabbit/test/config_schema_SUITE.erl b/deps/rabbit/test/config_schema_SUITE.erl index b3a0ed54ca7d..3dec2849739e 100644 --- a/deps/rabbit/test/config_schema_SUITE.erl +++ b/deps/rabbit/test/config_schema_SUITE.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2016-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(config_schema_SUITE). diff --git a/deps/rabbit/test/config_schema_SUITE_data/rabbit.snippets b/deps/rabbit/test/config_schema_SUITE_data/rabbit.snippets index 239632630860..79ac25b4d576 100644 --- a/deps/rabbit/test/config_schema_SUITE_data/rabbit.snippets +++ b/deps/rabbit/test/config_schema_SUITE_data/rabbit.snippets @@ -125,10 +125,53 @@ ssl_options.fail_if_no_peer_cert = true", "disk_free_limit.relative = 1.0", [{rabbit, [{disk_free_limit, {mem_relative, 1.0}}]}],[]}, - {disk_free_limit_only_absolute, + %% + %% Absolute free disk space limit + %% + + {disk_free_limit_only_absolute_integer, "disk_free_limit.absolute = 50000", [{rabbit, [{disk_free_limit, 50000}]}],[]}, + {disk_free_limit_only_absolute_units_gb, + "disk_free_limit.absolute = 2GB", + [{rabbit,[{disk_free_limit, "2GB"}]}], + []}, + {disk_free_limit_only_absolute_units_gib, + "disk_free_limit.absolute = 2GiB", + [{rabbit,[{disk_free_limit, "2GiB"}]}], + []}, + {disk_free_limit_only_absolute_units_g, + "disk_free_limit.absolute = 2G", + [{rabbit,[{disk_free_limit, "2G"}]}], + []}, + + {disk_free_limit_only_absolute_units_tb, + "disk_free_limit.absolute = 2TB", + [{rabbit,[{disk_free_limit, "2TB"}]}], + []}, + {disk_free_limit_only_absolute_units_tib, + "disk_free_limit.absolute = 2TiB", + [{rabbit,[{disk_free_limit, "2TiB"}]}], + []}, + {disk_free_limit_only_absolute_units_t, + "disk_free_limit.absolute = 2T", + [{rabbit,[{disk_free_limit, "2T"}]}], + []}, + + {disk_free_limit_only_absolute_units_pb, + "disk_free_limit.absolute = 2PB", + [{rabbit,[{disk_free_limit, "2PB"}]}], + []}, + {disk_free_limit_only_absolute_units_pib, + "disk_free_limit.absolute = 2PiB", + [{rabbit,[{disk_free_limit, "2PiB"}]}], + []}, + {disk_free_limit_only_absolute_units_p, + "disk_free_limit.absolute = 2P", + [{rabbit,[{disk_free_limit, "2P"}]}], + []}, + {default_users, " default_users.a.vhost_pattern = banana @@ -139,7 +182,7 @@ ssl_options.fail_if_no_peer_cert = true", [{rabbit, [{default_users, [ {<<"a">>, [{<<"vhost_pattern">>, "banana"}, {<<"tags">>, [administrator, operator]}, - {<<"password">>, "SECRET"}, + {<<"password">>, <<"SECRET">>}, {<<"read">>, ".*"}]}]}]}], []}, @@ -151,6 +194,7 @@ ssl_options.fail_if_no_peer_cert = true", default_policies.operator.a.classic_queues.ha_mode = exactly default_policies.operator.a.classic_queues.ha_params = 2 default_policies.operator.a.classic_queues.ha_sync_mode = automatic + default_policies.operator.a.classic_queues.queue_version = 2 ", [{rabbit, [{default_policies, [{operator, [ @@ -158,7 +202,8 @@ ssl_options.fail_if_no_peer_cert = true", {<<"ha_mode">>, <<"exactly">>}, {<<"ha_params">>, 2}, {<<"ha_sync_mode">>, <<"automatic">>}, - {<<"queue_pattern">>, "apple"}, + {<<"queue_pattern">>, <<"apple">>}, + {<<"queue_version">>, 2}, {<<"vhost_pattern">>, "banana"}]}]}]}]}], []}, @@ -175,6 +220,8 @@ ssl_options.fail_if_no_peer_cert = true", {default_user_settings, "default_user = guest default_pass = guest +anonymous_login_user = guest +anonymous_login_pass = guest default_user_tags.administrator = true default_permissions.configure = .* default_permissions.read = .* @@ -182,9 +229,30 @@ default_permissions.write = .*", [{rabbit, [{default_user,<<"guest">>}, {default_pass,<<"guest">>}, + {anonymous_login_user,<<"guest">>}, + {anonymous_login_pass,<<"guest">>}, {default_user_tags,[administrator]}, {default_permissions,[<<".*">>,<<".*">>,<<".*">>]}]}], []}, + {anonymous_login_user, + "anonymous_login_user = none", + [{rabbit, + [{anonymous_login_user, none}]}], + []}, + + {auth_mechanisms_ordered, + "auth_mechanisms.1 = PLAIN +auth_mechanisms.2 = AMQPLAIN +auth_mechanisms.3 = ANONYMOUS", + [], + [{rabbit, + %% We expect the mechanisms in the order as declared. + [{auth_mechanisms, ['PLAIN', 'AMQPLAIN', 'ANONYMOUS']}] + }], + [], + nosort + }, + {cluster_formation, "cluster_formation.peer_discovery_backend = rabbit_peer_discovery_classic_config cluster_formation.classic_config.nodes.peer1 = rabbit@hostname1 @@ -250,14 +318,55 @@ tcp_listen_options.exit_on_close = false", [{tcp_listen_options, [{backlog,128},{nodelay,true},{exit_on_close,false}]}]}], []}, - {vm_memory_watermark_absolute, + + %% + %% Absolute high runtime memory watermark + %% + + {vm_memory_watermark_absolute_integer, "vm_memory_high_watermark.absolute = 1073741824", [{rabbit,[{vm_memory_high_watermark,{absolute,1073741824}}]}], []}, - {vm_memory_watermark_absolute_units, + + {vm_memory_watermark_absolute_units_mb, "vm_memory_high_watermark.absolute = 1024MB", [{rabbit,[{vm_memory_high_watermark,{absolute,"1024MB"}}]}], []}, + {vm_memory_watermark_absolute_units_mib, + "vm_memory_high_watermark.absolute = 1024MiB", + [{rabbit,[{vm_memory_high_watermark,{absolute,"1024MiB"}}]}], + []}, + {vm_memory_watermark_absolute_units_m, + "vm_memory_high_watermark.absolute = 1024M", + [{rabbit,[{vm_memory_high_watermark,{absolute,"1024M"}}]}], + []}, + + {vm_memory_watermark_absolute_units_gb, + "vm_memory_high_watermark.absolute = 4GB", + [{rabbit,[{vm_memory_high_watermark,{absolute,"4GB"}}]}], + []}, + {vm_memory_watermark_absolute_units_gib, + "vm_memory_high_watermark.absolute = 3GiB", + [{rabbit,[{vm_memory_high_watermark,{absolute,"3GiB"}}]}], + []}, + {vm_memory_watermark_absolute_units_g, + "vm_memory_high_watermark.absolute = 10G", + [{rabbit,[{vm_memory_high_watermark,{absolute,"10G"}}]}], + []}, + + {vm_memory_watermark_absolute_units_tb, + "vm_memory_high_watermark.absolute = 1TB", + [{rabbit,[{vm_memory_high_watermark,{absolute,"1TB"}}]}], + []}, + {vm_memory_watermark_absolute_units_tib, + "vm_memory_high_watermark.absolute = 1TiB", + [{rabbit,[{vm_memory_high_watermark,{absolute,"1TiB"}}]}], + []}, + {vm_memory_watermark_absolute_units_t, + "vm_memory_high_watermark.absolute = 1T", + [{rabbit,[{vm_memory_high_watermark,{absolute,"1T"}}]}], + []}, + {vm_memory_watermark_absolute_priority, "vm_memory_high_watermark.absolute = 1073741824 vm_memory_high_watermark.relative = 0.4", @@ -310,6 +419,22 @@ tcp_listen_options.exit_on_close = false", "channel_max = 16", [{rabbit,[{channel_max, 16}]}], []}, + {channel_max_per_node, + "channel_max_per_node = 16", + [{rabbit,[{channel_max_per_node, 16}]}], + []}, + {channel_max_per_node, + "channel_max_per_node = infinity", + [{rabbit,[{channel_max_per_node, infinity}]}], + []}, + {consumer_max_per_channel, + "consumer_max_per_channel = 16", + [{rabbit,[{consumer_max_per_channel, 16}]}], + []}, + {consumer_max_per_channel, + "consumer_max_per_channel = infinity", + [{rabbit,[{consumer_max_per_channel, infinity}]}], + []}, {max_message_size, "max_message_size = 131072", [{rabbit, [{max_message_size, 131072}]}], @@ -408,7 +533,7 @@ tcp_listen_options.exit_on_close = false", [{cacertfile,"test/config_schema_SUITE_data/certs/cacert.pem"}, {certfile,"test/config_schema_SUITE_data/certs/cert.pem"}, {keyfile,"test/config_schema_SUITE_data/certs/key.pem"}, - {password,"t0p$3kRe7"}]}]}], + {password,<<"t0p$3kRe7">>}]}]}], []}, {ssl_options_tls_ver_old, "listeners.ssl.1 = 5671 @@ -645,22 +770,6 @@ tcp_listen_options.exit_on_close = false", [{rabbit,[{tcp_listen_options,[{linger,{false,100}}]}]}], []}, - {cluster_formation_randomized_startup_delay_both_values, - "cluster_formation.randomized_startup_delay_range.min = 10 - cluster_formation.randomized_startup_delay_range.max = 30", - [], - []}, - - {cluster_formation_randomized_startup_delay_min_only, - "cluster_formation.randomized_startup_delay_range.min = 10", - [], - []}, - - {cluster_formation_randomized_startup_delay_max_only, - "cluster_formation.randomized_startup_delay_range.max = 30", - [], - []}, - {cluster_formation_internal_lock_retries, "cluster_formation.internal_lock_retries = 10", [{rabbit,[{cluster_formation,[{internal_lock_retries,10}]}]}], @@ -849,9 +958,9 @@ credential_validator.regexp = ^abc\\d+", []}, {raft_segment_max_entries, - "raft.segment_max_entries = 65536", + "raft.segment_max_entries = 32768", [{ra, [ - {segment_max_entries, 65536} + {segment_max_entries, 32768} ]}], []}, @@ -955,6 +1064,38 @@ credential_validator.regexp = ^abc\\d+", {incoming_message_interceptors, [{set_header_routing_node, false}, {set_header_timestamp, false}]} ]}], + []}, + + %% + %% Stream replication port range + %% + + {stream_replication_port_range, + " + stream.replication.port_range.min = 4000 + stream.replication.port_range.max = 4600 + ", + [{osiris, [ + {port_range, {4000, 4600}} + ]}], + []}, + + {stream_replication_port_range, + " + stream.replication.port_range.min = 4000 + ", + [{osiris, [ + {port_range, {4000, 4500}} + ]}], + []}, + + {stream_replication_port_range, + " + stream.replication.port_range.max = 4600 + ", + [{osiris, [ + {port_range, {4100, 4600}} + ]}], []} ]. diff --git a/deps/rabbit/test/consumer_timeout_SUITE.erl b/deps/rabbit/test/consumer_timeout_SUITE.erl index 7d9584525814..c3988571a510 100644 --- a/deps/rabbit/test/consumer_timeout_SUITE.erl +++ b/deps/rabbit/test/consumer_timeout_SUITE.erl @@ -2,19 +2,19 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2011-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(consumer_timeout_SUITE). -include_lib("common_test/include/ct.hrl"). --include_lib("kernel/include/file.hrl"). -include_lib("amqp_client/include/amqp_client.hrl"). -include_lib("eunit/include/eunit.hrl"). +-compile(nowarn_export_all). -compile(export_all). --define(CONSUMER_TIMEOUT, 3000). --define(RECEIVE_TIMEOUT, 5000). +-define(CONSUMER_TIMEOUT, 2000). +-define(RECEIVE_TIMEOUT, ?CONSUMER_TIMEOUT * 2). -define(GROUP_CONFIG, #{global_consumer_timeout => [{rabbit, [{consumer_timeout, ?CONSUMER_TIMEOUT}]}, @@ -27,7 +27,7 @@ {queue_policy, []}, {queue_arguments, [{<<"x-consumer-timeout">>, long, ?CONSUMER_TIMEOUT}]}]}). --import(quorum_queue_utils, [wait_for_messages/2]). +-import(queue_utils, [wait_for_messages/2]). all() -> [ @@ -43,7 +43,6 @@ groups() -> AllTestsParallel = [ {classic_queue, [parallel], AllTests}, - {mirrored_queue, [parallel], AllTests}, {quorum_queue, [parallel], AllTests} ], [ @@ -80,23 +79,15 @@ init_per_group(quorum_queue, Config) -> [{policy_type, <<"quorum_queues">>}, {queue_args, [{<<"x-queue-type">>, longstr, <<"quorum">>}]}, {queue_durable, true}]); -init_per_group(mirrored_queue, Config) -> - rabbit_ct_broker_helpers:set_ha_policy(Config, 0, <<"^max_length.*queue">>, - <<"all">>, [{<<"ha-sync-mode">>, <<"automatic">>}]), - Config1 = rabbit_ct_helpers:set_config( - Config, [{policy_type, <<"classic_queues">>}, - {is_mirrored, true}, - {queue_args, [{<<"x-queue-type">>, longstr, <<"classic">>}]}, - {queue_durable, true}]), - rabbit_ct_helpers:run_steps(Config1, []); init_per_group(Group, Config0) -> case lists:member({group, Group}, all()) of true -> GroupConfig = maps:get(Group, ?GROUP_CONFIG), ClusterSize = 3, Config = rabbit_ct_helpers:merge_app_env( - Config0, {rabbit, [{channel_tick_interval, 1000}, - {quorum_tick_interval, 1000}] ++ ?config(rabbit, GroupConfig)}), + Config0, {rabbit, [{channel_tick_interval, 256}, + {quorum_tick_interval, 256}] ++ + ?config(rabbit, GroupConfig)}), Config1 = rabbit_ct_helpers:set_config( Config, [ {rmq_nodename_suffix, Group}, {rmq_nodes_count, ClusterSize} diff --git a/deps/rabbit/test/crashing_queues_SUITE.erl b/deps/rabbit/test/crashing_queues_SUITE.erl index 55fe8acd54b8..1a7fdf05ce98 100644 --- a/deps/rabbit/test/crashing_queues_SUITE.erl +++ b/deps/rabbit/test/crashing_queues_SUITE.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(crashing_queues_SUITE). @@ -20,9 +20,9 @@ all() -> groups() -> [ {cluster_size_2, [], [ - crashing_unmirrored, - crashing_mirrored, - give_up_after_repeated_crashes + crashing_durable, + give_up_after_repeated_crashes, + crashing_transient ]} ]. @@ -45,7 +45,17 @@ init_per_group(cluster_size_2, Config) -> end_per_group(_, Config) -> Config. +init_per_testcase(crashing_transient = Testcase, Config) -> + case rabbit_ct_broker_helpers:configured_metadata_store(Config) of + mnesia -> + init_per_testcase0(Testcase, Config); + _ -> + {skip, "Transient queues not supported by Khepri"} + end; init_per_testcase(Testcase, Config) -> + init_per_testcase0(Testcase, Config). + +init_per_testcase0(Testcase, Config) -> rabbit_ct_helpers:testcase_started(Config, Testcase), ClusterSize = ?config(rmq_nodes_count, Config), TestNumber = rabbit_ct_helpers:testcase_number(Config, ?MODULE, Testcase), @@ -58,36 +68,31 @@ init_per_testcase(Testcase, Config) -> rabbit_ct_client_helpers:setup_steps()). end_per_testcase(Testcase, Config) -> - Config1 = rabbit_ct_helpers:run_steps(Config, - rabbit_ct_client_helpers:teardown_steps() ++ - rabbit_ct_broker_helpers:teardown_steps()), - rabbit_ct_helpers:testcase_finished(Config1, Testcase). + rabbit_ct_helpers:testcase_finished(Config, Testcase), + rabbit_ct_helpers:run_teardown_steps(Config, rabbit_ct_broker_helpers:teardown_steps()). %% ------------------------------------------------------------------- %% Testcases. %% ------------------------------------------------------------------- -crashing_unmirrored(Config) -> +crashing_durable(Config) -> [A, B] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename), ChA = rabbit_ct_client_helpers:open_channel(Config, A), ConnB = rabbit_ct_client_helpers:open_connection(Config, B), - QName = <<"crashing_unmirrored-q">>, + QName = <<"crashing-q">>, amqp_channel:call(ChA, #'confirm.select'{}), test_queue_failure(A, ChA, ConnB, 1, 0, #'queue.declare'{queue = QName, durable = true}), - test_queue_failure(A, ChA, ConnB, 0, 0, - #'queue.declare'{queue = QName, durable = false}), ok. -crashing_mirrored(Config) -> +crashing_transient(Config) -> [A, B] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename), - rabbit_ct_broker_helpers:set_ha_policy(Config, A, <<".*">>, <<"all">>), ChA = rabbit_ct_client_helpers:open_channel(Config, A), ConnB = rabbit_ct_client_helpers:open_connection(Config, B), - QName = <<"crashing_mirrored-q">>, + QName = <<"crashing-q">>, amqp_channel:call(ChA, #'confirm.select'{}), - test_queue_failure(A, ChA, ConnB, 2, 1, - #'queue.declare'{queue = QName, durable = true}), + test_queue_failure(A, ChA, ConnB, 0, 0, + #'queue.declare'{queue = QName, durable = false}), ok. test_queue_failure(Node, Ch, RaceConn, MsgCount, FollowerCount, Decl) -> @@ -99,7 +104,6 @@ test_queue_failure(Node, Ch, RaceConn, MsgCount, FollowerCount, Decl) -> QRes = rabbit_misc:r(<<"/">>, queue, QName), rabbit_amqqueue:kill_queue(Node, QRes), assert_message_count(MsgCount, Ch, QName), - assert_follower_count(FollowerCount, Node, QName), stop_declare_racer(Racer) after amqp_channel:call(Ch, #'queue.delete'{queue = QName}) @@ -183,20 +187,3 @@ assert_message_count(Count, Ch, QName) -> #'queue.declare_ok'{message_count = Count} = amqp_channel:call(Ch, #'queue.declare'{queue = QName, passive = true}). - -assert_follower_count(Count, Node, QName) -> - Q = lookup(Node, QName), - [{_, Pids}] = rpc:call(Node, rabbit_amqqueue, info, [Q, [slave_pids]]), - RealCount = case Pids of - '' -> 0; - _ -> length(Pids) - end, - case RealCount of - Count -> - ok; - _ when RealCount < Count -> - timer:sleep(10), - assert_follower_count(Count, Node, QName); - _ -> - exit({too_many_replicas, Count, RealCount}) - end. diff --git a/deps/rabbit/test/dead_lettering_SUITE.erl b/deps/rabbit/test/dead_lettering_SUITE.erl index f02ceec72707..853f8fa59c64 100644 --- a/deps/rabbit/test/dead_lettering_SUITE.erl +++ b/deps/rabbit/test/dead_lettering_SUITE.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2011-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% %% For the full spec see: https://www.rabbitmq.com/dlx.html %% @@ -15,11 +15,11 @@ -compile([export_all, nowarn_export_all]). --import(quorum_queue_utils, [wait_for_messages/2]). +-import(queue_utils, [wait_for_messages/2]). all() -> [ - {group, dead_letter_tests} + {group, tests} ]. groups() -> @@ -28,12 +28,17 @@ groups() -> dead_letter_nack_requeue, dead_letter_nack_requeue_multiple, dead_letter_reject, + dead_letter_reject_expire_expire, dead_letter_reject_many, dead_letter_reject_requeue, dead_letter_max_length_drop_head, dead_letter_reject_requeue_reject_norequeue, + dead_letter_nack_requeue_nack_norequeue_basic_get, + dead_letter_nack_requeue_nack_norequeue_basic_consume, dead_letter_missing_exchange, dead_letter_routing_key, + dead_letter_headers_should_be_appended_for_each_event, + dead_letter_headers_should_not_be_appended_for_republish, dead_letter_routing_key_header_CC, dead_letter_routing_key_header_BCC, dead_letter_routing_key_cycle_max_length, @@ -60,12 +65,10 @@ groups() -> metric_expired_per_msg_msg_ttl], Opts = [shuffle], [ - {dead_letter_tests, Opts, + {tests, Opts, [ {classic_queue, Opts, [{at_most_once, Opts, [dead_letter_max_length_reject_publish_dlx | DeadLetterTests]}, {disabled, Opts, DisabledMetricTests}]}, - {mirrored_queue, Opts, [{at_most_once, Opts, [dead_letter_max_length_reject_publish_dlx | DeadLetterTests]}, - {disabled, Opts, DisabledMetricTests}]}, {quorum_queue, Opts, [{at_most_once, Opts, DeadLetterTests}, {disabled, Opts, DisabledMetricTests}, {at_least_once, Opts, DeadLetterTests -- @@ -75,10 +78,13 @@ groups() -> dead_letter_routing_key_cycle_max_length, dead_letter_headers_reason_maxlen, %% tested separately in rabbit_fifo_dlx_integration_SUITE - dead_letter_missing_exchange + dead_letter_missing_exchange, + dead_letter_routing_key_cycle_ttl ]} ] - }]}]. + }, + {stream_queue, Opts, [stream]} + ]}]. suite() -> [ @@ -90,9 +96,14 @@ suite() -> %% ------------------------------------------------------------------- init_per_suite(Config0) -> + Tick = 256, rabbit_ct_helpers:log_environment(), Config = rabbit_ct_helpers:merge_app_env( - Config0, {rabbit, [{dead_letter_worker_publisher_confirm_timeout, 2000}]}), + Config0, {rabbit, [{dead_letter_worker_publisher_confirm_timeout, 2000}, + {collect_statistics_interval, Tick}, + {channel_tick_interval, Tick}, + {quorum_tick_interval, Tick}, + {stream_tick_interval, Tick}]}), rabbit_ct_helpers:run_setup_steps(Config). end_per_suite(Config) -> @@ -103,14 +114,6 @@ init_per_group(classic_queue, Config) -> Config, [{queue_args, [{<<"x-queue-type">>, longstr, <<"classic">>}]}, {queue_durable, false}]); -init_per_group(mirrored_queue, Config) -> - rabbit_ct_broker_helpers:set_ha_policy(Config, 0, <<"^max_length.*queue">>, - <<"all">>, [{<<"ha-sync-mode">>, <<"automatic">>}]), - Config1 = rabbit_ct_helpers:set_config( - Config, [{is_mirrored, true}, - {queue_args, [{<<"x-queue-type">>, longstr, <<"classic">>}]}, - {queue_durable, false}]), - rabbit_ct_helpers:run_steps(Config1, []); init_per_group(quorum_queue, Config) -> rabbit_ct_helpers:set_config( Config, @@ -148,17 +151,15 @@ init_per_group(Group, Config) -> case lists:member({group, Group}, all()) of true -> ClusterSize = 3, - Config1 = rabbit_ct_helpers:set_config(Config, [ - {rmq_nodename_suffix, Group}, - {rmq_nodes_count, ClusterSize} - ]), - Config2 = rabbit_ct_helpers:run_steps( - Config1, - rabbit_ct_broker_helpers:setup_steps() ++ - rabbit_ct_client_helpers:setup_steps()), - _ = rabbit_ct_broker_helpers:enable_feature_flag(Config2, - message_containers), - Config2; + Config1 = rabbit_ct_helpers:set_config( + Config, [ + {rmq_nodename_suffix, Group}, + {rmq_nodes_count, ClusterSize} + ]), + rabbit_ct_helpers:run_steps( + Config1, + rabbit_ct_broker_helpers:setup_steps() ++ + rabbit_ct_client_helpers:setup_steps()); false -> rabbit_ct_helpers:run_steps(Config, []) end. @@ -173,9 +174,24 @@ end_per_group(Group, Config) -> Config end. +init_per_testcase(T, Config) + when T =:= dead_letter_reject_expire_expire orelse + T =:= stream -> + case rabbit_ct_broker_helpers:enable_feature_flag(Config, message_containers_deaths_v2) of + ok -> + init_per_testcase0(T, Config); + {skip, _} = Skip -> + %% With feature flag message_containers_deaths_v2 disabled, test case: + %% * dead_letter_reject_expire_expire is known to fail due to https://github.com/rabbitmq/rabbitmq-server/issues/11159 + %% * stream is known to fail due to https://github.com/rabbitmq/rabbitmq-server/issues/11173 + Skip + end; init_per_testcase(Testcase, Config) -> + init_per_testcase0(Testcase, Config). + +init_per_testcase0(Testcase, Config) -> Group = proplists:get_value(name, ?config(tc_group_properties, Config)), - Q = rabbit_data_coercion:to_binary(io_lib:format("~p_~tp", [Group, Testcase])), + Q = rabbit_data_coercion:to_binary(io_lib:format("~p_~p", [Group, Testcase])), Q2 = rabbit_data_coercion:to_binary(io_lib:format("~p_~p_2", [Group, Testcase])), Q3 = rabbit_data_coercion:to_binary(io_lib:format("~p_~p_3", [Group, Testcase])), Policy = rabbit_data_coercion:to_binary(io_lib:format("~p_~p_policy", [Group, Testcase])), @@ -366,6 +382,65 @@ dead_letter_reject(Config) -> consume_empty(Ch, QName), ?assertEqual(1, counted(messages_dead_lettered_rejected_total, Config)). +dead_letter_reject_expire_expire(Config) -> + {_Conn, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, 0), + %% In 3.13.0 - 3.13.2 there is a bug in mc:is_death_cycle/2 where the queue names matter: + %% https://github.com/rabbitmq/rabbitmq-server/issues/11159 + %% The following queue names triggered the bug because they affect the order returned by maps:keys/1. + Q1 = <<"b">>, + Q2 = <<"a2">>, + Q3 = <<"a3">>, + Args = ?config(queue_args, Config), + Durable = ?config(queue_durable, Config), + + %% Test the followig topology message flow: + %% Q1 --rejected--> Q2 --expired--> Q3 --expired--> + %% Q1 --rejected--> Q2 --expired--> Q3 --expired--> + %% Q1 + + #'queue.declare_ok'{} = amqp_channel:call( + Ch, + #'queue.declare'{ + queue = Q1, + arguments = Args ++ [{<<"x-dead-letter-exchange">>, longstr, <<>>}, + {<<"x-dead-letter-routing-key">>, longstr, Q2}], + durable = Durable}), + #'queue.declare_ok'{} = amqp_channel:call( + Ch, + #'queue.declare'{ + queue = Q2, + arguments = Args ++ [{<<"x-dead-letter-exchange">>, longstr, <<>>}, + {<<"x-dead-letter-routing-key">>, longstr, Q3}, + {<<"x-message-ttl">>, long, 5}], + durable = Durable}), + #'queue.declare_ok'{} = amqp_channel:call( + Ch, + #'queue.declare'{ + queue = Q3, + arguments = Args ++ [{<<"x-dead-letter-exchange">>, longstr, <<>>}, + {<<"x-dead-letter-routing-key">>, longstr, Q1}, + {<<"x-message-ttl">>, long, 5}], + durable = Durable}), + + %% Send a single message. + P = <<"msg">>, + publish(Ch, Q1, [P]), + wait_for_messages(Config, [[Q1, <<"1">>, <<"1">>, <<"0">>]]), + + %% Reject the 1st time. + [DTag1] = consume(Ch, Q1, [P]), + amqp_channel:cast(Ch, #'basic.reject'{delivery_tag = DTag1, + requeue = false}), + %% Message should now flow from Q1 -> Q2 -> Q3 -> Q1 + wait_for_messages(Config, [[Q1, <<"1">>, <<"1">>, <<"0">>]]), + + %% Reject the 2nd time. + [DTag2] = consume(Ch, Q1, [P]), + amqp_channel:cast(Ch, #'basic.reject'{delivery_tag = DTag2, + requeue = false}), + %% Message should again flow from Q1 -> Q2 -> Q3 -> Q1 + wait_for_messages(Config, [[Q1, <<"1">>, <<"1">>, <<"0">>]]). + %% 1) Many messages are rejected. They get dead-lettered in correct order. dead_letter_reject_many(Config) -> {_Conn, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, 0), @@ -501,6 +576,136 @@ dead_letter_reject_requeue_reject_norequeue(Config) -> consume_empty(Ch, QName), ?assertEqual(1, counted(messages_dead_lettered_rejected_total, Config)). +dead_letter_nack_requeue_nack_norequeue_basic_get(Config) -> + {_Conn, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, 0), + QName = ?config(queue_name, Config), + DLXQName = ?config(queue_name_dlx, Config), + ok = rabbit_ct_broker_helpers:set_policy(Config, 0, ?config(policy, Config), QName, + <<"queues">>, [{<<"delivery-limit">>, 50}]), + declare_dead_letter_queues(Ch, Config, QName, DLXQName), + + P1 = <<"msg1">>, + P2 = <<"msg2">>, + P3 = <<"msg3">>, + %% Publish 3 messages + publish(Ch, QName, [P1, P2, P3]), + + wait_for_messages(Config, [[QName, <<"3">>, <<"3">>, <<"0">>]]), + [_DTag1, DTag2, _DTag3] = consume(Ch, QName, [P1, P2, P3]), + wait_for_messages(Config, [[QName, <<"3">>, <<"0">>, <<"3">>]]), + + %% Nack 2 out of 3 with requeue + amqp_channel:cast(Ch, #'basic.nack'{delivery_tag = DTag2, + multiple = true, + requeue = true}), + wait_for_messages(Config, [[QName, <<"3">>, <<"2">>, <<"1">>]]), + + [_DTag4, DTag5] = consume(Ch, QName, [P1, P2]), + wait_for_messages(Config, [[QName, <<"3">>, <<"0">>, <<"3">>]]), + + %% Nack all 3 without requeue + amqp_channel:cast(Ch, #'basic.nack'{delivery_tag = DTag5, + multiple = true, + requeue = false}), + wait_for_messages(Config, [[DLXQName, <<"3">>, <<"3">>, <<"0">>]]), + + %% We should receive all 3 messages in the same order as we just nacked. + [_, _, _] = consume(Ch, DLXQName, [P3, P1, P2]), + consume_empty(Ch, DLXQName), + consume_empty(Ch, QName), + ?assertEqual(3, counted(messages_dead_lettered_rejected_total, Config)). + +dead_letter_nack_requeue_nack_norequeue_basic_consume(Config) -> + {_Conn, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, 0), + QName = ?config(queue_name, Config), + DLXQName = ?config(queue_name_dlx, Config), + ok = rabbit_ct_broker_helpers:set_policy(Config, 0, ?config(policy, Config), QName, + <<"queues">>, [{<<"delivery-limit">>, 50}]), + declare_dead_letter_queues(Ch, Config, QName, DLXQName), + + %% Publish 3 messages + publish(Ch, QName, [<<"m1">>, <<"m2">>, <<"m3">>]), + + Ctag1 = <<"ctag 1">>, + amqp_channel:subscribe(Ch, + #'basic.consume'{queue = QName, + consumer_tag = Ctag1}, + self()), + receive #'basic.consume_ok'{consumer_tag = Ctag1} -> ok + after 5000 -> ct:fail({missing_event, ?LINE}) + end, + + Ctag2 = <<"ctag 2">>, + amqp_channel:subscribe(Ch, + #'basic.consume'{queue = DLXQName, + consumer_tag = Ctag2}, + self()), + receive #'basic.consume_ok'{consumer_tag = Ctag2} -> ok + after 5000 -> ct:fail({missing_event, ?LINE}) + end, + + receive {#'basic.deliver'{}, + #amqp_msg{payload = <<"m1">>}} -> ok + after 5000 -> ct:fail({missing_event, ?LINE}) + end, + D2 = receive {#'basic.deliver'{delivery_tag = Del2}, + #amqp_msg{payload = <<"m2">>}} -> Del2 + after 5000 -> ct:fail({missing_event, ?LINE}) + end, + receive {#'basic.deliver'{}, + #amqp_msg{payload = <<"m3">>}} -> ok + after 5000 -> ct:fail({missing_event, ?LINE}) + end, + wait_for_messages(Config, [[QName, <<"3">>, <<"0">>, <<"3">>]]), + + %% Nack 2 out of 3 with requeue + amqp_channel:cast(Ch, #'basic.nack'{delivery_tag = D2, + multiple = true, + requeue = true}), + + %% m1 and m2 should be redelivered in the same order. + receive {#'basic.deliver'{}, + #amqp_msg{payload = P1a}} -> + ?assertEqual(<<"m1">>, P1a) + after 5000 -> ct:fail({missing_event, ?LINE}) + end, + D5 = receive {#'basic.deliver'{delivery_tag = Del5}, + #amqp_msg{payload = P2a}} -> + ?assertEqual(<<"m2">>, P2a), + Del5 + after 5000 -> ct:fail({missing_event, ?LINE}) + end, + + %% Nack all 3 without requeue + amqp_channel:cast(Ch, #'basic.nack'{delivery_tag = D5, + multiple = true, + requeue = false}), + + %% We should receive all 3 messages in the same order as we just nacked. + receive {#'basic.deliver'{}, + #amqp_msg{payload = P3b}} -> + ?assertEqual(<<"m3">>, P3b) + after 5000 -> ct:fail({missing_event, ?LINE}) + end, + receive {#'basic.deliver'{}, + #amqp_msg{payload = P1b}} -> + ?assertEqual(<<"m1">>, P1b) + after 5000 -> ct:fail({missing_event, ?LINE}) + end, + LastD = receive {#'basic.deliver'{delivery_tag = LastDel}, + #amqp_msg{payload = P2b}} -> + ?assertEqual(<<"m2">>, P2b), + LastDel + after 5000 -> ct:fail({missing_event, ?LINE}) + end, + wait_for_messages(Config, [[DLXQName, <<"3">>, <<"0">>, <<"3">>]]), + + amqp_channel:cast(Ch, #'basic.ack'{delivery_tag = LastD, + multiple = true}), + wait_for_messages(Config, [[QName, <<"0">>, <<"0">>, <<"0">>]]), + wait_for_messages(Config, [[DLXQName, <<"0">>, <<"0">>, <<"0">>]]), + ?assertEqual(3, counted(messages_dead_lettered_rejected_total, Config)). + %% Another strategy: reject-publish-dlx dead_letter_max_length_reject_publish_dlx(Config) -> {_Conn, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, 0), @@ -741,7 +946,9 @@ dead_letter_routing_key_cycle_max_length(Config) -> DeadLetterArgs = [{<<"x-max-length">>, long, 1}, {<<"x-dead-letter-exchange">>, longstr, <<>>}], - #'queue.declare_ok'{} = amqp_channel:call(Ch, #'queue.declare'{queue = QName, arguments = DeadLetterArgs ++ Args, durable = Durable}), + #'queue.declare_ok'{} = amqp_channel:call(Ch, #'queue.declare'{queue = QName, + arguments = DeadLetterArgs ++ Args, + durable = Durable}), P1 = <<"msg1">>, P2 = <<"msg2">>, @@ -766,7 +973,9 @@ dead_letter_routing_key_cycle_ttl(Config) -> DeadLetterArgs = [{<<"x-message-ttl">>, long, 1}, {<<"x-dead-letter-exchange">>, longstr, <<>>}], - #'queue.declare_ok'{} = amqp_channel:call(Ch, #'queue.declare'{queue = QName, arguments = DeadLetterArgs ++ Args, durable = Durable}), + #'queue.declare_ok'{} = amqp_channel:call(Ch, #'queue.declare'{queue = QName, + arguments = DeadLetterArgs ++ Args, + durable = Durable}), P1 = <<"msg1">>, P2 = <<"msg2">>, @@ -785,7 +994,9 @@ dead_letter_routing_key_cycle_with_reject(Config) -> QName = ?config(queue_name, Config), DeadLetterArgs = [{<<"x-dead-letter-exchange">>, longstr, <<>>}], - #'queue.declare_ok'{} = amqp_channel:call(Ch, #'queue.declare'{queue = QName, arguments = DeadLetterArgs ++ Args, durable = Durable}), + #'queue.declare_ok'{} = amqp_channel:call(Ch, #'queue.declare'{queue = QName, + arguments = DeadLetterArgs ++ Args, + durable = Durable}), P = <<"msg1">>, @@ -1072,6 +1283,101 @@ dead_letter_headers_cycle(Config) -> {array, [{table, Death2}]} = rabbit_misc:table_lookup(Headers2, <<"x-death">>), ?assertEqual({long, 2}, rabbit_misc:table_lookup(Death2, <<"count">>)). +dead_letter_headers_should_be_appended_for_each_event(Config) -> + {Conn, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, 0), + Args = ?config(queue_args, Config), + Durable = ?config(queue_durable, Config), + QName = ?config(queue_name, Config), + Dlx1Name = ?config(queue_name_dlx, Config), + Dlx2Name = ?config(queue_name_dlx_2, Config), + + DeadLetterArgs = [{<<"x-dead-letter-exchange">>, longstr, <<>>}, + {<<"x-dead-letter-routing-key">>, longstr, Dlx1Name}], + #'queue.declare_ok'{} = amqp_channel:call(Ch, #'queue.declare'{queue = QName, arguments = DeadLetterArgs ++ Args, durable = Durable}), + DeadLetterArgsDlx = [{<<"x-dead-letter-exchange">>, longstr, <<>>}, + {<<"x-dead-letter-routing-key">>, longstr, Dlx2Name}], + #'queue.declare_ok'{} = amqp_channel:call(Ch, #'queue.declare'{queue = Dlx1Name, arguments = DeadLetterArgsDlx ++ Args, durable = Durable}), + #'queue.declare_ok'{} = amqp_channel:call(Ch, #'queue.declare'{queue = Dlx2Name, arguments = Args, durable = Durable}), + + P = <<"msg1">>, + + %% Publish message + publish(Ch, QName, [P]), + wait_for_messages(Config, [[QName, <<"1">>, <<"1">>, <<"0">>]]), + [DTag] = consume(Ch, QName, [P]), + wait_for_messages(Config, [[QName, <<"1">>, <<"0">>, <<"1">>]]), + amqp_channel:cast(Ch, #'basic.nack'{delivery_tag = DTag, + multiple = false, + requeue = false}), + wait_for_messages(Config, [[Dlx1Name, <<"1">>, <<"1">>, <<"0">>]]), + {#'basic.get_ok'{delivery_tag = DTag1}, #amqp_msg{payload = P, + props = #'P_basic'{headers = Headers1}}} = + amqp_channel:call(Ch, #'basic.get'{queue = Dlx1Name}), + {array, [{table, Death1}]} = rabbit_misc:table_lookup(Headers1, <<"x-death">>), + ?assertEqual({longstr, QName}, rabbit_misc:table_lookup(Death1, <<"queue">>)), + + wait_for_messages(Config, [[Dlx1Name, <<"1">>, <<"0">>, <<"1">>]]), + amqp_channel:cast(Ch, #'basic.nack'{delivery_tag = DTag1, + multiple = false, + requeue = false}), + %% Message is being republished + wait_for_messages(Config, [[Dlx2Name, <<"1">>, <<"1">>, <<"0">>]]), + {#'basic.get_ok'{}, #amqp_msg{payload = P, + props = #'P_basic'{headers = Headers2}}} = + amqp_channel:call(Ch, #'basic.get'{queue = Dlx2Name}), + {array, [{table, DeathDlx}, {table, _DeathQ}]} = rabbit_misc:table_lookup(Headers2, <<"x-death">>), + ?assertEqual({longstr, Dlx1Name}, rabbit_misc:table_lookup(DeathDlx, <<"queue">>)), + ok = rabbit_ct_client_helpers:close_connection(Conn). + +dead_letter_headers_should_not_be_appended_for_republish(Config) -> + %% here we (re-)publish a message with the DL headers already set + {Conn, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, 0), + Args = ?config(queue_args, Config), + Durable = ?config(queue_durable, Config), + QName = ?config(queue_name, Config), + DlxName = ?config(queue_name_dlx, Config), + + DeadLetterArgs = [{<<"x-dead-letter-exchange">>, longstr, <<>>}, + {<<"x-dead-letter-routing-key">>, longstr, DlxName}], + #'queue.declare_ok'{} = amqp_channel:call(Ch, #'queue.declare'{queue = QName, arguments = DeadLetterArgs ++ Args, durable = Durable}), + #'queue.declare_ok'{} = amqp_channel:call(Ch, #'queue.declare'{queue = DlxName, arguments = Args, durable = Durable}), + + P = <<"msg1">>, + + %% Publish message + publish(Ch, QName, [P]), + wait_for_messages(Config, [[QName, <<"1">>, <<"1">>, <<"0">>]]), + [DTag] = consume(Ch, QName, [P]), + wait_for_messages(Config, [[QName, <<"1">>, <<"0">>, <<"1">>]]), + amqp_channel:cast(Ch, #'basic.nack'{delivery_tag = DTag, + multiple = false, + requeue = false}), + wait_for_messages(Config, [[DlxName, <<"1">>, <<"1">>, <<"0">>]]), + {#'basic.get_ok'{delivery_tag = DTag1}, #amqp_msg{payload = P, + props = #'P_basic'{headers = Headers1}}} = + amqp_channel:call(Ch, #'basic.get'{queue = DlxName}), + {array, [{table, Death1}]} = rabbit_misc:table_lookup(Headers1, <<"x-death">>), + ?assertEqual({longstr, <<"rejected">>}, rabbit_misc:table_lookup(Death1, <<"reason">>)), + + amqp_channel:cast(Ch, #'basic.ack'{delivery_tag = DTag1}), + + wait_for_messages(Config, [[DlxName, <<"0">>, <<"0">>, <<"0">>]]), + + #'queue.delete_ok'{} = amqp_channel:call(Ch, #'queue.delete'{queue = QName}), + DeadLetterArgs1 = DeadLetterArgs ++ [{<<"x-message-ttl">>, long, 1}], + #'queue.declare_ok'{} = amqp_channel:call(Ch, #'queue.declare'{queue = QName, arguments = DeadLetterArgs1 ++ Args, durable = Durable}), + + publish(Ch, QName, [P], Headers1), + + wait_for_messages(Config, [[DlxName, <<"1">>, <<"1">>, <<"0">>]]), + {#'basic.get_ok'{}, #amqp_msg{payload = P, + props = #'P_basic'{headers = Headers2}}} = + amqp_channel:call(Ch, #'basic.get'{queue = DlxName}), + + {array, [{table, Death2}]} = rabbit_misc:table_lookup(Headers2, <<"x-death">>), + ?assertEqual({longstr, <<"expired">>}, rabbit_misc:table_lookup(Death2, <<"reason">>)), + ok = rabbit_ct_client_helpers:close_connection(Conn). + %% Dead-lettering a message modifies its headers: %% the exchange name is replaced with that of the latest dead-letter exchange, %% the routing key may be replaced with that specified in a queue performing dead lettering, @@ -1117,12 +1423,15 @@ dead_letter_headers_CC(Config) -> multiple = false, requeue = false}), wait_for_messages(Config, [[DLXQName, <<"2">>, <<"1">>, <<"1">>]]), - {#'basic.get_ok'{}, #amqp_msg{payload = P1, - props = #'P_basic'{headers = Headers3}}} = + {#'basic.get_ok'{delivery_tag = DTag2}, #amqp_msg{payload = P1, + props = #'P_basic'{headers = Headers3}}} = amqp_channel:call(Ch, #'basic.get'{queue = DLXQName}), consume_empty(Ch, QName), ?assertEqual({array, [{longstr, DLXQName}]}, rabbit_misc:table_lookup(Headers3, <<"CC">>)), - ?assertMatch({array, _}, rabbit_misc:table_lookup(Headers3, <<"x-death">>)). + ?assertMatch({array, _}, rabbit_misc:table_lookup(Headers3, <<"x-death">>)), + amqp_channel:cast(Ch, #'basic.ack'{delivery_tag = DTag2, + multiple = true}), + wait_for_messages(Config, [[DLXQName, <<"0">>, <<"0">>, <<"0">>]]). %% 15) CC header is removed when routing key is specified dead_letter_headers_CC_with_routing_key(Config) -> @@ -1133,7 +1442,6 @@ dead_letter_headers_CC_with_routing_key(Config) -> Durable = ?config(queue_durable, Config), DLXExchange = ?config(dlx_exchange, Config), - %% Do not use a specific key for dead lettering, the CC header is passed DeadLetterArgs = [{<<"x-dead-letter-routing-key">>, longstr, DLXQName}, {<<"x-dead-letter-exchange">>, longstr, DLXExchange}], #'exchange.declare_ok'{} = amqp_channel:call(Ch, #'exchange.declare'{exchange = DLXExchange}), @@ -1170,10 +1478,9 @@ dead_letter_headers_CC_with_routing_key(Config) -> props = #'P_basic'{headers = Headers3}}} = amqp_channel:call(Ch, #'basic.get'{queue = DLXQName}), consume_empty(Ch, QName), - %% TODO: commented out assert, - %% this only checks that the message was mutated, which is bad not that - %% it wasn't included in routing - % ?assertEqual(undefined, rabbit_misc:table_lookup(Headers3, <<"CC">>)), + %% we keep the CC header as of RabbitMQ 3.13 (with message containers) + %% to avoid mutating the message + ?assertMatch({array, _}, rabbit_misc:table_lookup(Headers3, <<"CC">>)), ?assertMatch({array, _}, rabbit_misc:table_lookup(Headers3, <<"x-death">>)). %% 16) the BCC header will always be removed @@ -1195,17 +1502,18 @@ dead_letter_headers_BCC(Config) -> routing_key = DLXQName}), P1 = <<"msg1">>, - BCCHeader = {<<"BCC">>, array, [{longstr, DLXQName}]}, - publish(Ch, QName, [P1], [BCCHeader]), + CCHeader = {<<"CC">>, array, [{longstr, <<"cc 1">>}, {longstr, <<"cc 2">>}]}, + BCCHeader = {<<"BCC">>, array, [{longstr, DLXQName}, {longstr, <<"bcc 2">>}]}, + publish(Ch, QName, [P1], [CCHeader, BCCHeader]), %% Message is published to both queues because of BCC header and DLX queue bound to both %% exchanges wait_for_messages(Config, [[QName, <<"1">>, <<"1">>, <<"0">>]]), {#'basic.get_ok'{delivery_tag = DTag1}, #amqp_msg{payload = P1, props = #'P_basic'{headers = Headers1}}} = - amqp_channel:call(Ch, #'basic.get'{queue = QName}), + amqp_channel:call(Ch, #'basic.get'{queue = QName}), {#'basic.get_ok'{}, #amqp_msg{payload = P1, props = #'P_basic'{headers = Headers2}}} = - amqp_channel:call(Ch, #'basic.get'{queue = DLXQName}), + amqp_channel:call(Ch, #'basic.get'{queue = DLXQName}), %% We check the headers to ensure no dead lettering has happened ?assertEqual(undefined, header_lookup(Headers1, <<"x-death">>)), ?assertEqual(undefined, header_lookup(Headers2, <<"x-death">>)), @@ -1217,10 +1525,15 @@ dead_letter_headers_BCC(Config) -> wait_for_messages(Config, [[DLXQName, <<"2">>, <<"1">>, <<"1">>]]), {#'basic.get_ok'{}, #amqp_msg{payload = P1, props = #'P_basic'{headers = Headers3}}} = - amqp_channel:call(Ch, #'basic.get'{queue = DLXQName}), + amqp_channel:call(Ch, #'basic.get'{queue = DLXQName}), consume_empty(Ch, QName), ?assertEqual(undefined, rabbit_misc:table_lookup(Headers3, <<"BCC">>)), - ?assertMatch({array, _}, rabbit_misc:table_lookup(Headers3, <<"x-death">>)). + {array, [{table, Death}]} = rabbit_misc:table_lookup(Headers3, <<"x-death">>), + {array, RKeys0} = rabbit_misc:table_lookup(Death, <<"routing-keys">>), + RKeys = [RKey || {longstr, RKey} <- RKeys0], + %% routing-keys in the death history should include CC but exclude BCC keys + ?assertEqual(lists:sort([QName, <<"cc 1">>, <<"cc 2">>]), + lists:sort(RKeys)). %% Three top-level headers are added for the very first dead-lettering event. %% They are @@ -1453,6 +1766,103 @@ metric_expired_per_msg_msg_ttl(Config) -> || Payload <- Payloads], ?awaitMatch(1000, counted(messages_dead_lettered_expired_total, Config), 3000, 300). +%% The final dead letter queue is a stream. +stream(Config) -> + Ch0 = rabbit_ct_client_helpers:open_channel(Config, 0), + Ch1 = rabbit_ct_client_helpers:open_channel(Config, 1), + Q1 = <<"q1">>, + Q2 = <<"q2">>, + Q3 = <<"q3">>, + #'queue.declare_ok'{} = amqp_channel:call( + Ch0, + #'queue.declare'{queue = Q1, + arguments = [{<<"x-dead-letter-exchange">>, longstr, <<>>}, + {<<"x-dead-letter-routing-key">>, longstr, Q2}]}), + #'queue.declare_ok'{} = amqp_channel:call( + Ch0, + #'queue.declare'{queue = Q2, + arguments = [{<<"x-message-ttl">>, long, 2500}, + {<<"x-dead-letter-exchange">>, longstr, <<>>}, + {<<"x-dead-letter-routing-key">>, longstr, Q3}]}), + #'queue.declare_ok'{} = amqp_channel:call( + Ch1, + #'queue.declare'{queue = Q3, + arguments = [{<<"x-queue-type">>, longstr, <<"stream">>}, + {<<"x-initial-cluster-size">>, long, 1}], + durable = true}), + + Payload = <<"my payload">>, + %% Message should travel Q1 -> Q2 -> Q3 + amqp_channel:call( + Ch0, + #'basic.publish'{routing_key = Q1}, + #amqp_msg{payload = Payload, + props = #'P_basic'{expiration = <<"0">>, + headers = [{<<"CC">>, array, [{longstr, <<"cc 1">>}, + {longstr, <<"cc 2">>}]}, + {<<"BCC">>, array, [{longstr, <<"bcc 1">>}, + {longstr, <<"bcc 2">>}]} + ]} + }), + + #'basic.qos_ok'{} = amqp_channel:call(Ch1, #'basic.qos'{prefetch_count = 1}), + Ctag = <<"my ctag">>, + amqp_channel:subscribe( + Ch1, + #'basic.consume'{queue = Q3, + consumer_tag = Ctag, + arguments = [{<<"x-stream-offset">>, longstr, <<"first">>}]}, + self()), + receive + #'basic.consume_ok'{consumer_tag = Ctag} -> ok + after 5000 -> ct:fail({missing_event, ?LINE}) + end, + + Headers = receive {#'basic.deliver'{delivery_tag = DeliveryTag}, + #amqp_msg{payload = Payload, + props = #'P_basic'{headers = Headers0} + }} -> + ok = amqp_channel:cast(Ch1, #'basic.ack'{delivery_tag = DeliveryTag, + multiple = false}), + Headers0 + after 10_000 -> ct:fail({missing_event, ?LINE}) + end, + + Reason = <<"expired">>, + ?assertEqual({longstr, Reason}, rabbit_misc:table_lookup(Headers, <<"x-first-death-reason">>)), + ?assertEqual({longstr, Q1}, rabbit_misc:table_lookup(Headers, <<"x-first-death-queue">>)), + ?assertEqual({longstr, <<>>}, rabbit_misc:table_lookup(Headers, <<"x-first-death-exchange">>)), + ?assertEqual({longstr, Reason}, rabbit_misc:table_lookup(Headers, <<"x-last-death-reason">>)), + ?assertEqual({longstr, Q2}, rabbit_misc:table_lookup(Headers, <<"x-last-death-queue">>)), + ?assertEqual({longstr, <<>>}, rabbit_misc:table_lookup(Headers, <<"x-last-death-exchange">>)), + + %% We expect the array to be ordered by death recency. + {array, [{table, Death2}, {table, Death1}]} = rabbit_misc:table_lookup(Headers, <<"x-death">>), + + ?assertEqual({longstr, Q1}, rabbit_misc:table_lookup(Death1, <<"queue">>)), + ?assertEqual({longstr, Reason}, rabbit_misc:table_lookup(Death1, <<"reason">>)), + ?assertEqual({longstr, <<>>}, rabbit_misc:table_lookup(Death1, <<"exchange">>)), + ?assertEqual({long, 1}, rabbit_misc:table_lookup(Death1, <<"count">>)), + %% routing-keys in the death history should include CC but exclude BCC keys + ?assertEqual({array, [{longstr, Q1}, + {longstr, <<"cc 1">>}, + {longstr, <<"cc 2">>}]}, + rabbit_misc:table_lookup(Death1, <<"routing-keys">>)), + ?assertEqual({longstr, <<"0">>}, rabbit_misc:table_lookup(Death1, <<"original-expiration">>)), + {timestamp, T1} = rabbit_misc:table_lookup(Death1, <<"time">>), + + ?assertEqual({longstr, Q2}, rabbit_misc:table_lookup(Death2, <<"queue">>)), + ?assertEqual({longstr, Reason}, rabbit_misc:table_lookup(Death2, <<"reason">>)), + ?assertEqual({longstr, <<>>}, rabbit_misc:table_lookup(Death2, <<"exchange">>)), + ?assertEqual({long, 1}, rabbit_misc:table_lookup(Death2, <<"count">>)), + ?assertEqual({array, [{longstr, Q2}]}, rabbit_misc:table_lookup(Death2, <<"routing-keys">>)), + ?assertEqual(undefined, rabbit_misc:table_lookup(Death2, <<"original-expiration">>)), + {timestamp, T2} = rabbit_misc:table_lookup(Death2, <<"time">>), + ?assert(T1 < T2), + + ok = rabbit_ct_client_helpers:close_channel(Ch0), + ok = rabbit_ct_client_helpers:close_channel(Ch1). + %%%%%%%%%%%%%%%%%%%%%%%% %% Test helpers %%%%%%%%%%%%%%%%%%%%%%%% @@ -1500,13 +1910,6 @@ consume(Ch, QName, Payloads) -> consume_empty(Ch, QName) -> #'basic.get_empty'{} = amqp_channel:call(Ch, #'basic.get'{queue = QName}). -sync_mirrors(QName, Config) -> - case ?config(is_mirrored, Config) of - true -> - rabbit_ct_broker_helpers:rabbitmqctl(Config, 0, [<<"sync_queue">>, QName]); - _ -> ok - end. - get_global_counters(Config) -> rabbit_ct_broker_helpers:rpc(Config, rabbit_global_counters, overview, []). diff --git a/deps/rabbit/test/definition_import_SUITE.erl b/deps/rabbit/test/definition_import_SUITE.erl index 5276c6a57136..327301f00ede 100644 --- a/deps/rabbit/test/definition_import_SUITE.erl +++ b/deps/rabbit/test/definition_import_SUITE.erl @@ -2,14 +2,13 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(definition_import_SUITE). -include_lib("rabbitmq_ct_helpers/include/rabbit_assert.hrl"). -include_lib("common_test/include/ct.hrl"). --include_lib("rabbit_common/include/rabbit.hrl"). -include_lib("eunit/include/eunit.hrl"). -compile(export_all). @@ -53,9 +52,10 @@ groups() -> import_case17, import_case18, import_case19, - import_case20 + import_case20, + import_case21 ]}, - + {boot_time_import_using_classic_source, [], [ import_on_a_booting_node_using_classic_local_source ]}, @@ -185,9 +185,9 @@ end_per_testcase(Testcase, Config) -> %% Tests %% -import_case1(Config) -> import_file_case(Config, "case1"). +import_case1(Config) -> import_invalid_file_case_in_khepri(Config, "case1"). import_case2(Config) -> import_file_case(Config, "case2"). -import_case3(Config) -> import_file_case(Config, "case3"). +import_case3(Config) -> import_invalid_file_case_in_khepri(Config, "case3"). import_case4(Config) -> import_file_case(Config, "case4"). import_case6(Config) -> import_file_case(Config, "case6"). import_case7(Config) -> import_file_case(Config, "case7"). @@ -210,8 +210,9 @@ import_case11(Config) -> import_file_case(Config, "case11"). import_case12(Config) -> import_invalid_file_case(Config, "failing_case12"). import_case13(Config) -> - import_file_case(Config, "case13"), VHost = <<"/">>, + delete_vhost(Config, VHost), + import_file_case(Config, "case13"), QueueName = <<"definitions.import.case13.qq.1">>, QueueIsImported = fun () -> @@ -230,8 +231,9 @@ import_case13(Config) -> amqqueue:get_arguments(Q)). import_case13a(Config) -> - import_file_case(Config, "case13"), VHost = <<"/">>, + delete_vhost(Config, VHost), + import_file_case(Config, "case13"), QueueName = <<"definitions.import.case13.qq.1">>, QueueIsImported = fun () -> @@ -253,20 +255,26 @@ import_case14(Config) -> import_file_case(Config, "case14"). import_case15(Config) -> import_file_case(Config, "case15"). %% contains a virtual host with tags import_case16(Config) -> - import_file_case(Config, "case16"), VHost = <<"tagged">>, + delete_vhost(Config, VHost), + import_file_case(Config, "case16"), VHostIsImported = fun () -> case vhost_lookup(Config, VHost) of - {error, {no_such_vhosts, _}} -> false; + {error, {no_such_vhost, _}} -> false; + {error, _} -> false; _ -> true end end, rabbit_ct_helpers:await_condition(VHostIsImported, 20000), VHostRec = vhost_lookup(Config, VHost), - ?assertEqual(<<"A case16 description">>, vhost:get_description(VHostRec)), - ?assertEqual(<<"quorum">>, vhost:get_default_queue_type(VHostRec)), - ?assertEqual([multi_dc_replication,ab,cde], vhost:get_tags(VHostRec)), + case VHostRec of + {error, _} -> ct:fail("Failed to import virtual host named 'tagged' in case 16"); + Val when is_tuple(Val) -> + ?assertEqual(<<"A case16 description">>, vhost:get_description(VHostRec)), + ?assertEqual(<<"quorum">>, vhost:get_default_queue_type(VHostRec)), + ?assertEqual([multi_dc_replication,ab,cde], vhost:get_tags(VHostRec)) + end, ok. @@ -305,6 +313,8 @@ import_case20(Config) -> {skip, "Should not run in mixed version environments"} end. +import_case21(Config) -> import_invalid_file_case(Config, "failing_case21"). + export_import_round_trip_case1(Config) -> case rabbit_ct_helpers:is_mixed_versions() of false -> @@ -385,6 +395,14 @@ import_file_case(Config, Subdirectory, CaseName) -> ok. import_invalid_file_case(Config, CaseName) -> + CasePath = filename:join(?config(data_dir, Config), CaseName ++ ".json"), + try + rabbit_ct_broker_helpers:rpc(Config, 0, ?MODULE, run_invalid_import_case, [CasePath]) + catch _:_:_ -> ok + end, + ok. + +import_invalid_file_case_in_khepri(Config, CaseName) -> CasePath = filename:join(?config(data_dir, Config), CaseName ++ ".json"), rabbit_ct_broker_helpers:rpc(Config, 0, ?MODULE, run_invalid_import_case, [CasePath]), ok. @@ -470,7 +488,6 @@ run_invalid_import_case_if_unchanged(Path) -> {error, _E} -> ok end. - queue_lookup(Config, VHost, Name) -> rabbit_ct_broker_helpers:rpc(Config, 0, rabbit_amqqueue, lookup, [rabbit_misc:r(VHost, queue, Name)]). @@ -479,3 +496,6 @@ vhost_lookup(Config, VHost) -> user_lookup(Config, User) -> rabbit_ct_broker_helpers:rpc(Config, 0, rabbit_auth_backend_internal, lookup_user, [User]). + +delete_vhost(Config, VHost) -> + rabbit_ct_broker_helpers:rpc(Config, 0, rabbit_vhost, delete, [VHost, <<"CT tests">>]). \ No newline at end of file diff --git a/deps/rabbit/test/definition_import_SUITE_data/failing_case21.json b/deps/rabbit/test/definition_import_SUITE_data/failing_case21.json new file mode 100644 index 000000000000..2e318367e4e5 --- /dev/null +++ b/deps/rabbit/test/definition_import_SUITE_data/failing_case21.json @@ -0,0 +1,33 @@ +{ + "rabbit_version": "3.12.8", + "parameters": [], + "policies": [], + "queues": [ + { + "name": "qq.1", + "durable": true, + "auto_delete": false, + "arguments": { + "x-queue-type": "quorum" + } + }, + { + "name": "cq.1", + "durable": true, + "auto_delete": false, + "arguments": { + "x-queue-type": "classic" + } + }, + { + "name": "sq.1", + "durable": true, + "auto_delete": false, + "arguments": { + "x-queue-type": "stream" + } + } + ], + "exchanges": [], + "bindings": [] +} \ No newline at end of file diff --git a/deps/rabbit/test/deprecated_features_SUITE.erl b/deps/rabbit/test/deprecated_features_SUITE.erl index 603f9a88fe05..6d8ead9d371a 100644 --- a/deps/rabbit/test/deprecated_features_SUITE.erl +++ b/deps/rabbit/test/deprecated_features_SUITE.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(deprecated_features_SUITE). @@ -36,6 +36,8 @@ get_appropriate_warning_when_disconnected/1, get_appropriate_warning_when_removed/1, deprecated_feature_enabled_if_feature_flag_depends_on_it/1, + list_all_deprecated_features/1, + list_used_deprecated_features/1, feature_is_unused/1, feature_is_used/1 @@ -67,7 +69,9 @@ groups() -> get_appropriate_warning_when_denied, get_appropriate_warning_when_disconnected, get_appropriate_warning_when_removed, - deprecated_feature_enabled_if_feature_flag_depends_on_it + deprecated_feature_enabled_if_feature_flag_depends_on_it, + list_all_deprecated_features, + list_used_deprecated_features ], [ {cluster_size_1, [], Tests}, @@ -726,3 +730,50 @@ deprecated_feature_enabled_if_feature_flag_depends_on_it(Config) -> ok end ) || Node <- AllNodes]. + +list_all_deprecated_features(Config) -> + [FirstNode | _] = AllNodes = ?config(nodes, Config), + feature_flags_v2_SUITE:connect_nodes(AllNodes), + feature_flags_v2_SUITE:override_running_nodes(AllNodes), + + FeatureName = ?FUNCTION_NAME, + FeatureFlags = #{FeatureName => + #{provided_by => rabbit, + deprecation_phase => permitted_by_default}}, + ?assertEqual( + ok, + feature_flags_v2_SUITE:inject_on_nodes(AllNodes, FeatureFlags)), + + feature_flags_v2_SUITE:run_on_node( + FirstNode, + fun() -> + Map = rabbit_deprecated_features:list(all), + ?assert(maps:is_key(FeatureName, Map)) + end). + +list_used_deprecated_features(Config) -> + [FirstNode | _] = AllNodes = ?config(nodes, Config), + feature_flags_v2_SUITE:connect_nodes(AllNodes), + feature_flags_v2_SUITE:override_running_nodes(AllNodes), + + UsedFeatureName = used_deprecated_feature, + UnusedFeatureName = unused_deprecated_feature, + FeatureFlags = #{UsedFeatureName => + #{provided_by => rabbit, + deprecation_phase => permitted_by_default, + callbacks => #{is_feature_used => {?MODULE, feature_is_used}}}, + UnusedFeatureName => + #{provided_by => rabbit, + deprecation_phase => permitted_by_default, + callbacks => #{is_feature_used => {?MODULE, feature_is_unused}}}}, + ?assertEqual( + ok, + feature_flags_v2_SUITE:inject_on_nodes(AllNodes, FeatureFlags)), + + feature_flags_v2_SUITE:run_on_node( + FirstNode, + fun() -> + Map = rabbit_deprecated_features:list(used), + ?assertNot(maps:is_key(UnusedFeatureName, Map)), + ?assert(maps:is_key(UsedFeatureName, Map)) + end). diff --git a/deps/rabbit/test/direct_exchange_routing_v2_SUITE.erl b/deps/rabbit/test/direct_exchange_routing_v2_SUITE.erl index 083ad75b66ff..abef0dd18748 100644 --- a/deps/rabbit/test/direct_exchange_routing_v2_SUITE.erl +++ b/deps/rabbit/test/direct_exchange_routing_v2_SUITE.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2016-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. -module(direct_exchange_routing_v2_SUITE). @@ -61,14 +61,21 @@ end_per_suite(Config) -> rabbit_ct_helpers:run_teardown_steps(Config). init_per_group(Group = cluster_size_1, Config0) -> - Config = rabbit_ct_helpers:set_config(Config0, {rmq_nodes_count, 1}), + Config = rabbit_ct_helpers:set_config(Config0, [{rmq_nodes_count, 1}, + {metadata_store, mnesia}]), start_broker(Group, Config); init_per_group(Group = cluster_size_2, Config0) -> - Config = rabbit_ct_helpers:set_config(Config0, {rmq_nodes_count, 2}), + Config = rabbit_ct_helpers:set_config(Config0, [{rmq_nodes_count, 2}, + {metadata_store, mnesia}]), + start_broker(Group, Config); +init_per_group(Group = cluster_size_3, Config0) -> + Config = rabbit_ct_helpers:set_config(Config0, [{rmq_nodes_count, 3}, + {metadata_store, mnesia}]), start_broker(Group, Config); init_per_group(Group = unclustered_cluster_size_2, Config0) -> Config = rabbit_ct_helpers:set_config(Config0, [{rmq_nodes_count, 2}, - {rmq_nodes_clustered, false}]), + {rmq_nodes_clustered, false}, + {metadata_store, mnesia}]), start_broker(Group, Config). start_broker(Group, Config0) -> @@ -304,8 +311,8 @@ route_exchange_to_exchange(Config) -> bind_queue(Ch, Q2, FanoutX, <<"ignored">>), publish(Ch, DirectX, RKey), - quorum_queue_utils:wait_for_messages(Config, [[Q1, <<"1">>, <<"1">>, <<"0">>]]), - quorum_queue_utils:wait_for_messages(Config, [[Q2, <<"1">>, <<"1">>, <<"0">>]]), + queue_utils:wait_for_messages(Config, [[Q1, <<"1">>, <<"1">>, <<"0">>]]), + queue_utils:wait_for_messages(Config, [[Q2, <<"1">>, <<"1">>, <<"0">>]]), ?assertEqual(1, table_size(Config, ?INDEX_TABLE_NAME)), %% cleanup diff --git a/deps/rabbit/test/disconnect_detected_during_alarm_SUITE.erl b/deps/rabbit/test/disconnect_detected_during_alarm_SUITE.erl index c0f2d15c70c5..b44c6de1440f 100644 --- a/deps/rabbit/test/disconnect_detected_during_alarm_SUITE.erl +++ b/deps/rabbit/test/disconnect_detected_during_alarm_SUITE.erl @@ -2,12 +2,11 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2011-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(disconnect_detected_during_alarm_SUITE). --include_lib("common_test/include/ct.hrl"). -include_lib("amqp_client/include/amqp_client.hrl"). -compile(export_all). diff --git a/deps/rabbit/test/disk_monitor_SUITE.erl b/deps/rabbit/test/disk_monitor_SUITE.erl index 9e480662f695..609ea283441e 100644 --- a/deps/rabbit/test/disk_monitor_SUITE.erl +++ b/deps/rabbit/test/disk_monitor_SUITE.erl @@ -2,12 +2,11 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2011-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(disk_monitor_SUITE). --include_lib("common_test/include/ct.hrl"). -include_lib("eunit/include/eunit.hrl"). -compile(export_all). diff --git a/deps/rabbit/test/dummy_event_receiver.erl b/deps/rabbit/test/dummy_event_receiver.erl index 5c8327711fbf..4de78401192b 100644 --- a/deps/rabbit/test/dummy_event_receiver.erl +++ b/deps/rabbit/test/dummy_event_receiver.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(dummy_event_receiver). diff --git a/deps/rabbit/test/dummy_interceptor.erl b/deps/rabbit/test/dummy_interceptor.erl index d173e1474a26..836ea744962f 100644 --- a/deps/rabbit/test/dummy_interceptor.erl +++ b/deps/rabbit/test/dummy_interceptor.erl @@ -20,11 +20,15 @@ intercept(#'basic.publish'{} = Method, Content, _IState) -> {Method, Content2}; %% Use 'queue.declare' to test #amqp_error{} handling -intercept(#'queue.declare'{queue = <<"failing-q">>}, _Content, _IState) -> +intercept(#'queue.declare'{queue = <<"failing-with-amqp-error-q">>}, _Content, _IState) -> rabbit_misc:amqp_error( 'precondition_failed', "operation not allowed", [], 'queue.declare'); +intercept(#'queue.declare'{queue = QName = <<"crashing-with-amqp-exception-q">>}, _Content, _IState) -> + QRes = rabbit_misc:r(<<"/">>, queue, QName), + rabbit_misc:assert_field_equivalence(true, false, QRes, durable); + intercept(Method, Content, _VHost) -> {Method, Content}. diff --git a/deps/rabbit/test/dummy_runtime_parameters.erl b/deps/rabbit/test/dummy_runtime_parameters.erl index 6da40891ae09..0db6340dbeb8 100644 --- a/deps/rabbit/test/dummy_runtime_parameters.erl +++ b/deps/rabbit/test/dummy_runtime_parameters.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(dummy_runtime_parameters). diff --git a/deps/rabbit/test/dummy_supervisor2.erl b/deps/rabbit/test/dummy_supervisor2.erl index 7377968f050a..49fbfb309b73 100644 --- a/deps/rabbit/test/dummy_supervisor2.erl +++ b/deps/rabbit/test/dummy_supervisor2.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2011-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(dummy_supervisor2). diff --git a/deps/rabbit/test/dynamic_ha_SUITE.erl b/deps/rabbit/test/dynamic_ha_SUITE.erl deleted file mode 100644 index 1d174b9f5ece..000000000000 --- a/deps/rabbit/test/dynamic_ha_SUITE.erl +++ /dev/null @@ -1,1044 +0,0 @@ -%% This Source Code Form is subject to the terms of the Mozilla Public -%% License, v. 2.0. If a copy of the MPL was not distributed with this -%% file, You can obtain one at https://mozilla.org/MPL/2.0/. -%% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. -%% - --module(dynamic_ha_SUITE). - -%% rabbit_tests:test_dynamic_mirroring() is a unit test which should -%% test the logic of what all the policies decide to do, so we don't -%% need to exhaustively test that here. What we need to test is that: -%% -%% * Going from non-mirrored to mirrored works and vice versa -%% * Changing policy can add / remove mirrors and change the master -%% * Adding a node will create a new mirror when there are not enough nodes -%% for the policy -%% * Removing a node will not create a new mirror even if the policy -%% logic wants it (since this gives us a good way to lose messages -%% on cluster shutdown, by repeated failover to new nodes) -%% -%% The first two are change_policy, the last two are change_cluster - --include_lib("common_test/include/ct.hrl"). --include_lib("proper/include/proper.hrl"). --include_lib("eunit/include/eunit.hrl"). --include_lib("amqp_client/include/amqp_client.hrl"). --include_lib("rabbitmq_ct_helpers/include/rabbit_assert.hrl"). - --compile(nowarn_export_all). --compile(export_all). - --define(QNAME, <<"ha.test">>). --define(POLICY, <<"^ha.test$">>). %% " emacs --define(VHOST, <<"/">>). - -all() -> - [ - {group, unclustered}, - {group, clustered} - ]. - -groups() -> - [ - {unclustered, [], [ - {cluster_size_5, [], [ - change_cluster - ]} - ]}, - {clustered, [], [ - {cluster_size_2, [], [ - vhost_deletion, - force_delete_if_no_master, - promote_on_shutdown, - promote_on_failure, - follower_recovers_after_vhost_failure, - follower_recovers_after_vhost_down_and_up, - master_migrates_on_vhost_down, - follower_recovers_after_vhost_down_and_master_migrated, - queue_survive_adding_dead_vhost_mirror, - dynamic_mirroring - ]}, - {cluster_size_3, [], [ - change_policy, - rapid_change, - nodes_policy_should_pick_master_from_its_params, - promote_follower_after_standalone_restart, - queue_survive_adding_dead_vhost_mirror, - rebalance_all, - rebalance_exactly, - rebalance_nodes, - rebalance_multiple_blocked - ]} - ]} - ]. - -%% ------------------------------------------------------------------- -%% Testsuite setup/teardown. -%% ------------------------------------------------------------------- - -init_per_suite(Config) -> - rabbit_ct_helpers:log_environment(), - rabbit_ct_helpers:run_setup_steps(Config). - -end_per_suite(Config) -> - rabbit_ct_helpers:run_teardown_steps(Config). - -init_per_group(unclustered, Config) -> - rabbit_ct_helpers:set_config(Config, [{rmq_nodes_clustered, false}]); -init_per_group(clustered, Config) -> - rabbit_ct_helpers:set_config(Config, [{rmq_nodes_clustered, true}]); -init_per_group(cluster_size_2, Config) -> - rabbit_ct_helpers:set_config(Config, [{rmq_nodes_count, 2}]); -init_per_group(cluster_size_3, Config) -> - rabbit_ct_helpers:set_config(Config, [{rmq_nodes_count, 3}]); -init_per_group(cluster_size_5, Config) -> - rabbit_ct_helpers:set_config(Config, [{rmq_nodes_count, 5}]). - -end_per_group(_, Config) -> - Config. - -init_per_testcase(Testcase, Config) -> - rabbit_ct_helpers:testcase_started(Config, Testcase), - ClusterSize = ?config(rmq_nodes_count, Config), - TestNumber = rabbit_ct_helpers:testcase_number(Config, ?MODULE, Testcase), - Config1 = rabbit_ct_helpers:set_config(Config, [ - {rmq_nodename_suffix, Testcase}, - {tcp_ports_base, {skip_n_nodes, TestNumber * ClusterSize}} - ]), - Config2 = rabbit_ct_helpers:run_steps(Config1, - rabbit_ct_broker_helpers:setup_steps() ++ - rabbit_ct_client_helpers:setup_steps()), - case Testcase of - change_cluster -> - %% do not enable message_containers feature flag as it will stop - %% nodes in mixed versions joining later - ok; - _ -> - _ = rabbit_ct_broker_helpers:enable_feature_flag(Config2, message_containers) - end, - Config2. - -end_per_testcase(Testcase, Config) -> - Config1 = rabbit_ct_helpers:run_steps(Config, - rabbit_ct_client_helpers:teardown_steps() ++ - rabbit_ct_broker_helpers:teardown_steps()), - rabbit_ct_helpers:testcase_finished(Config1, Testcase). - -%% ------------------------------------------------------------------- -%% Test Cases -%% ------------------------------------------------------------------- - -dynamic_mirroring(Config) -> - passed = rabbit_ct_broker_helpers:rpc(Config, 0, - ?MODULE, dynamic_mirroring1, [Config]). - -dynamic_mirroring1(_Config) -> - %% Just unit tests of the node selection logic, see multi node - %% tests for the rest... - Test = fun ({NewM, NewSs, ExtraSs}, Policy, Params, - {MNode, SNodes, SSNodes}, All) -> - {ok, M} = rabbit_mirror_queue_misc:module(Policy), - {NewM, NewSs0} = M:suggested_queue_nodes( - Params, MNode, SNodes, SSNodes, All), - NewSs1 = lists:sort(NewSs0), - case dm_list_match(NewSs, NewSs1, ExtraSs) of - ok -> ok; - error -> exit({no_match, NewSs, NewSs1, ExtraSs}) - end - end, - - Test({a,[b,c],0},<<"all">>,'_',{a,[], []}, [a,b,c]), - Test({a,[b,c],0},<<"all">>,'_',{a,[b,c],[b,c]},[a,b,c]), - Test({a,[b,c],0},<<"all">>,'_',{a,[d], [d]}, [a,b,c]), - - N = fun (Atoms) -> [list_to_binary(atom_to_list(A)) || A <- Atoms] end, - - %% Add a node - Test({a,[b,c],0},<<"nodes">>,N([a,b,c]),{a,[b],[b]},[a,b,c,d]), - Test({b,[a,c],0},<<"nodes">>,N([a,b,c]),{b,[a],[a]},[a,b,c,d]), - %% Add two nodes and drop one - Test({a,[b,c],0},<<"nodes">>,N([a,b,c]),{a,[d],[d]},[a,b,c,d]), - %% Don't try to include nodes that are not running - Test({a,[b], 0},<<"nodes">>,N([a,b,f]),{a,[b],[b]},[a,b,c,d]), - %% If we can't find any of the nodes listed then just keep the master - Test({a,[], 0},<<"nodes">>,N([f,g,h]),{a,[b],[b]},[a,b,c,d]), - %% And once that's happened, still keep the master even when not listed, - %% if nothing is synced - Test({a,[b,c],0},<<"nodes">>,N([b,c]), {a,[], []}, [a,b,c,d]), - Test({a,[b,c],0},<<"nodes">>,N([b,c]), {a,[b],[]}, [a,b,c,d]), - %% But if something is synced we can lose the master - but make - %% sure we pick the new master from the nodes which are synced! - Test({b,[c], 0},<<"nodes">>,N([b,c]), {a,[b],[b]},[a,b,c,d]), - Test({b,[c], 0},<<"nodes">>,N([c,b]), {a,[b],[b]},[a,b,c,d]), - - Test({a,[], 1},<<"exactly">>,2,{a,[], []}, [a,b,c,d]), - Test({a,[], 2},<<"exactly">>,3,{a,[], []}, [a,b,c,d]), - Test({a,[c], 0},<<"exactly">>,2,{a,[c], [c]}, [a,b,c,d]), - Test({a,[c], 1},<<"exactly">>,3,{a,[c], [c]}, [a,b,c,d]), - Test({a,[c], 0},<<"exactly">>,2,{a,[c,d],[c,d]},[a,b,c,d]), - Test({a,[c,d],0},<<"exactly">>,3,{a,[c,d],[c,d]},[a,b,c,d]), - - passed. - -%% Does the first list match the second where the second is required -%% to have exactly Extra superfluous items? -dm_list_match([], [], 0) -> ok; -dm_list_match(_, [], _Extra) -> error; -dm_list_match([H|T1], [H |T2], Extra) -> dm_list_match(T1, T2, Extra); -dm_list_match(L1, [_H|T2], Extra) -> dm_list_match(L1, T2, Extra - 1). - -change_policy(Config) -> - [A, B, C] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename), - ACh = rabbit_ct_client_helpers:open_channel(Config, A), - - %% When we first declare a queue with no policy, it's not HA. - amqp_channel:call(ACh, #'queue.declare'{queue = ?QNAME}), - timer:sleep(200), - assert_followers(A, ?QNAME, {A, ''}), - - %% Give it policy "all", it becomes HA and gets all mirrors - rabbit_ct_broker_helpers:set_ha_policy(Config, A, ?POLICY, <<"all">>), - assert_followers(A, ?QNAME, {A, [B, C]}, [{A, []}, {A, [B]}, {A, [C]}]), - - %% Give it policy "nodes", it gets specific mirrors - rabbit_ct_broker_helpers:set_ha_policy(Config, A, ?POLICY, - {<<"nodes">>, [atom_to_binary(A), - atom_to_binary(B)]}), - assert_followers(A, ?QNAME, {A, [B]}, [{A, [B, C]}]), - - %% Now explicitly change the mirrors - rabbit_ct_broker_helpers:set_ha_policy(Config, A, ?POLICY, - {<<"nodes">>, [atom_to_binary(A), - atom_to_binary(C)]}), - assert_followers(A, ?QNAME, {A, [C]}, [{A, [B, C]}]), - - %% Clear the policy, and we go back to non-mirrored - ok = rabbit_ct_broker_helpers:clear_policy(Config, A, ?POLICY), - assert_followers(A, ?QNAME, {A, ''}), - - %% Test switching "away" from an unmirrored node - rabbit_ct_broker_helpers:set_ha_policy(Config, A, ?POLICY, - {<<"nodes">>, [atom_to_binary(B), - atom_to_binary(C)]}), - assert_followers(A, ?QNAME, {B, [C]}, [{A, []}, {A, [B]}, {A, [C]}, {A, [B, C]}]), - - ok. - -change_cluster(Config) -> - [A, B, C, D, E] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename), - rabbit_ct_broker_helpers:cluster_nodes(Config, [A, B, C]), - ACh = rabbit_ct_client_helpers:open_channel(Config, A), - - amqp_channel:call(ACh, #'queue.declare'{queue = ?QNAME}), - assert_followers(A, ?QNAME, {A, ''}), - - %% Give it policy exactly 4, it should mirror to all 3 nodes - rabbit_ct_broker_helpers:set_ha_policy(Config, A, ?POLICY, {<<"exactly">>, 4}), - assert_followers(A, ?QNAME, {A, [B, C]}, [{A, []}, {A, [B]}, {A, [C]}]), - - %% Add D and E, D or E joins in - rabbit_ct_broker_helpers:cluster_nodes(Config, [A, D, E]), - assert_followers(A, ?QNAME, [{A, [B, C, D]}, {A, [B, C, E]}], [{A, [B, C]}]), - - %% Remove one, the other joins in - rabbit_ct_broker_helpers:stop_node(Config, D), - assert_followers(A, ?QNAME, [{A, [B, C, D]}, {A, [B, C, E]}], [{A, [B, C]}]), - - ok. - -rapid_change(Config) -> - A = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename), - ACh = rabbit_ct_client_helpers:open_channel(Config, A), - {_Pid, MRef} = spawn_monitor( - fun() -> - [rapid_amqp_ops(ACh, I) || I <- lists:seq(1, 100)] - end), - rapid_loop(Config, A, MRef), - ok. - -rapid_amqp_ops(Ch, I) -> - Payload = list_to_binary(integer_to_list(I)), - amqp_channel:call(Ch, #'queue.declare'{queue = ?QNAME}), - amqp_channel:cast(Ch, #'basic.publish'{exchange = <<"">>, - routing_key = ?QNAME}, - #amqp_msg{payload = Payload}), - amqp_channel:subscribe(Ch, #'basic.consume'{queue = ?QNAME, - no_ack = true}, self()), - receive #'basic.consume_ok'{} -> ok - end, - receive {#'basic.deliver'{}, #amqp_msg{payload = Payload}} -> - ok - end, - amqp_channel:call(Ch, #'queue.delete'{queue = ?QNAME}). - -rapid_loop(Config, Node, MRef) -> - receive - {'DOWN', MRef, process, _Pid, normal} -> - ok; - {'DOWN', MRef, process, _Pid, Reason} -> - exit({amqp_ops_died, Reason}) - after 0 -> - rabbit_ct_broker_helpers:set_ha_policy(Config, Node, ?POLICY, - <<"all">>), - ok = rabbit_ct_broker_helpers:clear_policy(Config, Node, ?POLICY), - rapid_loop(Config, Node, MRef) - end. - -queue_survive_adding_dead_vhost_mirror(Config) -> - rabbit_ct_broker_helpers:force_vhost_failure(Config, 1, <<"/">>), - NodeA = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename), - ChA = rabbit_ct_client_helpers:open_channel(Config, NodeA), - QName = <<"queue_survive_adding_dead_vhost_mirror-q-1">>, - amqp_channel:call(ChA, #'queue.declare'{queue = QName}), - Q = find_queue(QName, NodeA), - Pid = proplists:get_value(pid, Q), - rabbit_ct_broker_helpers:set_ha_policy_all(Config), - %% Queue should not fail - Q1 = find_queue(QName, NodeA), - Pid = proplists:get_value(pid, Q1). - -%% Vhost deletion needs to successfully tear down policies and queues -%% with policies. At least smoke-test that it doesn't blow up. -vhost_deletion(Config) -> - A = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename), - rabbit_ct_broker_helpers:set_ha_policy_all(Config), - ACh = rabbit_ct_client_helpers:open_channel(Config, A), - amqp_channel:call(ACh, #'queue.declare'{queue = <<"vhost_deletion-q">>}), - ok = rpc:call(A, rabbit_vhost, delete, [<<"/">>, <<"acting-user">>]), - ok. - -force_delete_if_no_master(Config) -> - [A, B] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename), - rabbit_ct_broker_helpers:set_ha_policy(Config, A, <<"^ha.nopromote">>, - <<"all">>), - ACh = rabbit_ct_client_helpers:open_channel(Config, A), - [begin - amqp_channel:call(ACh, #'queue.declare'{queue = Q, - durable = true}), - rabbit_ct_client_helpers:publish(ACh, Q, 10) - end || Q <- [<<"ha.nopromote.test1">>, <<"ha.nopromote.test2">>]], - ok = rabbit_ct_broker_helpers:restart_node(Config, B), - ok = rabbit_ct_broker_helpers:stop_node(Config, A), - - BCh = rabbit_ct_client_helpers:open_channel(Config, B), - ?assertExit( - {{shutdown, {server_initiated_close, 404, _}}, _}, - amqp_channel:call( - BCh, #'queue.declare'{queue = <<"ha.nopromote.test1">>, - durable = true})), - - BCh1 = rabbit_ct_client_helpers:open_channel(Config, B), - ?assertExit( - {{shutdown, {server_initiated_close, 404, _}}, _}, - amqp_channel:call( - BCh1, #'queue.declare'{queue = <<"ha.nopromote.test2">>, - durable = true})), - BCh2 = rabbit_ct_client_helpers:open_channel(Config, B), - #'queue.delete_ok'{} = - amqp_channel:call(BCh2, #'queue.delete'{queue = <<"ha.nopromote.test1">>}), - %% Delete with if_empty will fail, since we don't know if the queue is empty - ?assertExit( - {{shutdown, {server_initiated_close, 406, _}}, _}, - amqp_channel:call(BCh2, #'queue.delete'{queue = <<"ha.nopromote.test2">>, - if_empty = true})), - BCh3 = rabbit_ct_client_helpers:open_channel(Config, B), - #'queue.delete_ok'{} = - amqp_channel:call(BCh3, #'queue.delete'{queue = <<"ha.nopromote.test2">>}), - ok. - -promote_on_failure(Config) -> - [A, B] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename), - rabbit_ct_broker_helpers:set_ha_policy(Config, A, <<"^ha.promote">>, - <<"all">>, [{<<"ha-promote-on-failure">>, <<"always">>}]), - rabbit_ct_broker_helpers:set_ha_policy(Config, A, <<"^ha.nopromote">>, - <<"all">>, [{<<"ha-promote-on-failure">>, <<"when-synced">>}]), - - ACh = rabbit_ct_client_helpers:open_channel(Config, A), - [begin - amqp_channel:call(ACh, #'queue.declare'{queue = Q, - durable = true}), - rabbit_ct_client_helpers:publish(ACh, Q, 10) - end || Q <- [<<"ha.promote.test">>, <<"ha.nopromote.test">>]], - ok = rabbit_ct_broker_helpers:restart_node(Config, B), - ok = rabbit_ct_broker_helpers:kill_node(Config, A), - BCh = rabbit_ct_client_helpers:open_channel(Config, B), - #'queue.declare_ok'{message_count = 0} = - amqp_channel:call( - BCh, #'queue.declare'{queue = <<"ha.promote.test">>, - durable = true}), - ?assertExit( - {{shutdown, {server_initiated_close, 404, _}}, _}, - amqp_channel:call( - BCh, #'queue.declare'{queue = <<"ha.nopromote.test">>, - durable = true})), - ok = rabbit_ct_broker_helpers:start_node(Config, A), - ACh2 = rabbit_ct_client_helpers:open_channel(Config, A), - #'queue.declare_ok'{message_count = 10} = - amqp_channel:call( - ACh2, #'queue.declare'{queue = <<"ha.nopromote.test">>, - durable = true}), - ok. - -promote_on_shutdown(Config) -> - [A, B] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename), - rabbit_ct_broker_helpers:set_ha_policy(Config, A, <<"^ha.promote">>, - <<"all">>, [{<<"ha-promote-on-shutdown">>, <<"always">>}]), - rabbit_ct_broker_helpers:set_ha_policy(Config, A, <<"^ha.nopromote">>, - <<"all">>), - rabbit_ct_broker_helpers:set_ha_policy(Config, A, <<"^ha.nopromoteonfailure">>, - <<"all">>, [{<<"ha-promote-on-failure">>, <<"when-synced">>}, - {<<"ha-promote-on-shutdown">>, <<"always">>}]), - - ACh = rabbit_ct_client_helpers:open_channel(Config, A), - [begin - amqp_channel:call(ACh, #'queue.declare'{queue = Q, - durable = true}), - rabbit_ct_client_helpers:publish(ACh, Q, 10) - end || Q <- [<<"ha.promote.test">>, - <<"ha.nopromote.test">>, - <<"ha.nopromoteonfailure.test">>]], - ok = rabbit_ct_broker_helpers:restart_node(Config, B), - ok = rabbit_ct_broker_helpers:stop_node(Config, A), - BCh = rabbit_ct_client_helpers:open_channel(Config, B), - BCh1 = rabbit_ct_client_helpers:open_channel(Config, B), - #'queue.declare_ok'{message_count = 0} = - amqp_channel:call( - BCh, #'queue.declare'{queue = <<"ha.promote.test">>, - durable = true}), - ?assertExit( - {{shutdown, {server_initiated_close, 404, _}}, _}, - amqp_channel:call( - BCh, #'queue.declare'{queue = <<"ha.nopromote.test">>, - durable = true})), - ?assertExit( - {{shutdown, {server_initiated_close, 404, _}}, _}, - amqp_channel:call( - BCh1, #'queue.declare'{queue = <<"ha.nopromoteonfailure.test">>, - durable = true})), - ok = rabbit_ct_broker_helpers:start_node(Config, A), - ACh2 = rabbit_ct_client_helpers:open_channel(Config, A), - #'queue.declare_ok'{message_count = 10} = - amqp_channel:call( - ACh2, #'queue.declare'{queue = <<"ha.nopromote.test">>, - durable = true}), - #'queue.declare_ok'{message_count = 10} = - amqp_channel:call( - ACh2, #'queue.declare'{queue = <<"ha.nopromoteonfailure.test">>, - durable = true}), - ok. - -nodes_policy_should_pick_master_from_its_params(Config) -> - [A | _] = rabbit_ct_broker_helpers:get_node_configs(Config, - nodename), - - Ch = rabbit_ct_client_helpers:open_channel(Config, A), - ?assertEqual(true, apply_policy_to_declared_queue(Config, Ch, [A], [all])), - %% --> Master: A - %% Slaves: [B, C] or [C, B] - SSPids = ?awaitMatch(SSPids when is_list(SSPids), - proplists:get_value(synchronised_slave_pids, - find_queue(?QNAME, A)), - 10000), - - %% Choose mirror that isn't the first sync mirror. Cover a bug that always - %% chose the first, even if it was not part of the policy - LastSlave = node(lists:last(SSPids)), - ?assertEqual(true, apply_policy_to_declared_queue(Config, Ch, [A], - [{nodes, [LastSlave]}])), - %% --> Master: B or C (depends on the order of current mirrors ) - %% Slaves: [] - - %% Now choose a new master that isn't synchronised. The previous - %% policy made sure that the queue only runs on one node (the last - %% from the initial synchronised list). Thus, by taking the first - %% node from this list, we know it is not synchronised. - %% - %% Because the policy doesn't cover any synchronised mirror, RabbitMQ - %% should instead use an existing synchronised mirror as the new master, - %% even though that isn't in the policy. - ?assertEqual(true, apply_policy_to_declared_queue(Config, Ch, [A], - [{nodes, [LastSlave, A]}])), - %% --> Master: B or C (same as previous policy) - %% Slaves: [A] - - NewMaster = node(erlang:hd(SSPids)), - ?assertEqual(true, apply_policy_to_declared_queue(Config, Ch, [A], - [{nodes, [NewMaster]}])), - %% --> Master: B or C (the other one compared to previous policy) - %% Slaves: [] - - amqp_channel:call(Ch, #'queue.delete'{queue = ?QNAME}), - _ = rabbit_ct_broker_helpers:clear_policy(Config, A, ?POLICY). - -follower_recovers_after_vhost_failure(Config) -> - [A, B] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename), - rabbit_ct_broker_helpers:set_ha_policy_all(Config), - ACh = rabbit_ct_client_helpers:open_channel(Config, A), - QName = <<"follower_recovers_after_vhost_failure-q">>, - amqp_channel:call(ACh, #'queue.declare'{queue = QName}), - timer:sleep(500), - assert_followers(A, QName, {A, [B]}, [{A, []}]), - - %% Crash vhost on a node hosting a mirror - {ok, Sup} = rabbit_ct_broker_helpers:rpc(Config, B, rabbit_vhost_sup_sup, get_vhost_sup, [<<"/">>]), - exit(Sup, foo), - - assert_followers(A, QName, {A, [B]}, [{A, []}]). - -follower_recovers_after_vhost_down_and_up(Config) -> - [A, B] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename), - rabbit_ct_broker_helpers:set_ha_policy_all(Config), - ACh = rabbit_ct_client_helpers:open_channel(Config, A), - QName = <<"follower_recovers_after_vhost_down_and_up-q">>, - amqp_channel:call(ACh, #'queue.declare'{queue = QName}), - timer:sleep(200), - assert_followers(A, QName, {A, [B]}, [{A, []}]), - - %% Crash vhost on a node hosting a mirror - rabbit_ct_broker_helpers:force_vhost_failure(Config, B, <<"/">>), - %% rabbit_ct_broker_helpers:force_vhost_failure/2 will retry up to 10 times to - %% make sure that the top vhost supervision tree process did go down. MK. - timer:sleep(500), - %% Vhost is back up - case rabbit_ct_broker_helpers:rpc(Config, B, rabbit_vhost_sup_sup, start_vhost, [<<"/">>]) of - {ok, _Sup} -> ok; - {error,{already_started, _Sup}} -> ok - end, - - assert_followers(A, QName, {A, [B]}, [{A, []}]). - -master_migrates_on_vhost_down(Config) -> - [A, B] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename), - rabbit_ct_broker_helpers:set_ha_policy_all(Config), - ACh = rabbit_ct_client_helpers:open_channel(Config, A), - QName = <<"master_migrates_on_vhost_down-q">>, - amqp_channel:call(ACh, #'queue.declare'{queue = QName}), - timer:sleep(500), - assert_followers(A, QName, {A, [B]}, [{A, []}]), - - %% Crash vhost on the node hosting queue master - rabbit_ct_broker_helpers:force_vhost_failure(Config, A, <<"/">>), - timer:sleep(500), - assert_followers(A, QName, {B, []}). - -follower_recovers_after_vhost_down_and_master_migrated(Config) -> - [A, B] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename), - rabbit_ct_broker_helpers:set_ha_policy_all(Config), - ACh = rabbit_ct_client_helpers:open_channel(Config, A), - QName = <<"follower_recovers_after_vhost_down_and_master_migrated-q">>, - amqp_channel:call(ACh, #'queue.declare'{queue = QName}), - timer:sleep(500), - assert_followers(A, QName, {A, [B]}, [{A, []}]), - %% Crash vhost on the node hosting queue master - rabbit_ct_broker_helpers:force_vhost_failure(Config, A, <<"/">>), - timer:sleep(500), - assert_followers(B, QName, {B, []}), - - %% Restart the vhost on the node (previously) hosting queue master - case rabbit_ct_broker_helpers:rpc(Config, A, rabbit_vhost_sup_sup, start_vhost, [<<"/">>]) of - {ok, _Sup} -> ok; - {error,{already_started, _Sup}} -> ok - end, - timer:sleep(500), - assert_followers(B, QName, {B, [A]}, [{B, []}]). - -random_policy(Config) -> - run_proper(fun prop_random_policy/1, [Config]). - -failing_random_policies(Config) -> - [A, B | _] = Nodes = rabbit_ct_broker_helpers:get_node_configs(Config, - nodename), - %% Those set of policies were found as failing by PropEr in the - %% `random_policy` test above. We add them explicitly here to make - %% sure they get tested. - ?assertEqual(true, test_random_policy(Config, Nodes, - [{nodes, [A, B]}, {nodes, [A]}])), - ?assertEqual(true, test_random_policy(Config, Nodes, - [{exactly, 3}, undefined, all, {nodes, [B]}])), - ?assertEqual(true, test_random_policy(Config, Nodes, - [all, undefined, {exactly, 2}, all, {exactly, 3}, {exactly, 3}, - undefined, {exactly, 3}, all])). - -promote_follower_after_standalone_restart(Config) -> - %% Tests that mirrors can be brought up standalone after forgetting the rest - %% of the cluster. Slave ordering should be irrelevant. - %% https://github.com/rabbitmq/rabbitmq-server/issues/1213 - [A, B, C] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename), - Ch = rabbit_ct_client_helpers:open_channel(Config, A), - - rabbit_ct_broker_helpers:set_ha_policy(Config, A, ?POLICY, <<"all">>), - amqp_channel:call(Ch, #'queue.declare'{queue = ?QNAME, - durable = true}), - - rabbit_ct_client_helpers:publish(Ch, ?QNAME, 15), - rabbit_ct_client_helpers:close_channel(Ch), - - rabbit_ct_helpers:await_condition(fun() -> - 15 =:= proplists:get_value(messages, find_queue(?QNAME, A)) - end, 60000), - - rabbit_ct_broker_helpers:stop_node(Config, C), - rabbit_ct_broker_helpers:stop_node(Config, B), - rabbit_ct_broker_helpers:stop_node(Config, A), - - %% Restart one mirror - forget_cluster_node(Config, B, C), - forget_cluster_node(Config, B, A), - - ok = rabbit_ct_broker_helpers:start_node(Config, B), - rabbit_ct_helpers:await_condition(fun() -> - 15 =:= proplists:get_value(messages, find_queue(?QNAME, B)) - end, 60000), - ok = rabbit_ct_broker_helpers:stop_node(Config, B), - - %% Restart the other - forget_cluster_node(Config, C, B), - forget_cluster_node(Config, C, A), - - ok = rabbit_ct_broker_helpers:start_node(Config, C), - 15 = proplists:get_value(messages, find_queue(?QNAME, C)), - ok = rabbit_ct_broker_helpers:stop_node(Config, C), - - ok. - -rebalance_all(Config) -> - [A, B, C] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename), - ACh = rabbit_ct_client_helpers:open_channel(Config, A), - - Q1 = <<"q1">>, - Q2 = <<"q2">>, - Q3 = <<"q3">>, - Q4 = <<"q4">>, - Q5 = <<"q5">>, - - amqp_channel:call(ACh, #'queue.declare'{queue = Q1}), - amqp_channel:call(ACh, #'queue.declare'{queue = Q2}), - amqp_channel:call(ACh, #'queue.declare'{queue = Q3}), - amqp_channel:call(ACh, #'queue.declare'{queue = Q4}), - amqp_channel:call(ACh, #'queue.declare'{queue = Q5}), - rabbit_ct_broker_helpers:set_ha_policy(Config, A, <<"q.*">>, <<"all">>), - timer:sleep(1000), - - rabbit_ct_client_helpers:publish(ACh, Q1, 5), - rabbit_ct_client_helpers:publish(ACh, Q2, 3), - assert_followers(A, Q1, {A, [B, C]}, [{A, []}, {A, [B]}, {A, [C]}]), - assert_followers(A, Q2, {A, [B, C]}, [{A, []}, {A, [B]}, {A, [C]}]), - assert_followers(A, Q3, {A, [B, C]}, [{A, []}, {A, [B]}, {A, [C]}]), - assert_followers(A, Q4, {A, [B, C]}, [{A, []}, {A, [B]}, {A, [C]}]), - assert_followers(A, Q5, {A, [B, C]}, [{A, []}, {A, [B]}, {A, [C]}]), - - {ok, Summary} = rpc:call(A, rabbit_amqqueue, rebalance, [classic, ".*", ".*"]), - - %% Check that we have at most 2 queues per node - Condition1 = fun() -> - lists:all(fun(NodeData) -> - lists:all(fun({_, V}) when is_integer(V) -> V =< 2; - (_) -> true end, - NodeData) - end, Summary) - end, - rabbit_ct_helpers:await_condition(Condition1, 60000), - - %% Check that Q1 and Q2 haven't moved - assert_followers(A, Q1, {A, [B, C]}, [{A, []}, {A, [B]}, {A, [C]}]), - assert_followers(A, Q2, {A, [B, C]}, [{A, []}, {A, [B]}, {A, [C]}]), - - ok. - -rebalance_exactly(Config) -> - [A, _, _] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename), - ACh = rabbit_ct_client_helpers:open_channel(Config, A), - - Q1 = <<"q1">>, - Q2 = <<"q2">>, - Q3 = <<"q3">>, - Q4 = <<"q4">>, - Q5 = <<"q5">>, - - amqp_channel:call(ACh, #'queue.declare'{queue = Q1}), - amqp_channel:call(ACh, #'queue.declare'{queue = Q2}), - amqp_channel:call(ACh, #'queue.declare'{queue = Q3}), - amqp_channel:call(ACh, #'queue.declare'{queue = Q4}), - amqp_channel:call(ACh, #'queue.declare'{queue = Q5}), - rabbit_ct_broker_helpers:set_ha_policy(Config, A, <<"q.*">>, {<<"exactly">>, 2}), - timer:sleep(1000), - - %% Rebalancing happens with existing mirrors. Thus, before we - %% can verify it works as expected, we need the queues to be on - %% different mirrors. - %% - %% We only test Q3, Q4 and Q5 because the first two are expected to - %% stay where they are. - ensure_queues_are_mirrored_on_different_mirrors([Q3, Q4, Q5], A, ACh), - - rabbit_ct_client_helpers:publish(ACh, Q1, 5), - rabbit_ct_client_helpers:publish(ACh, Q2, 3), - - ?assertEqual(A, node(proplists:get_value(pid, find_queue(Q1, A)))), - ?assertEqual(A, node(proplists:get_value(pid, find_queue(Q2, A)))), - ?assertEqual(A, node(proplists:get_value(pid, find_queue(Q3, A)))), - ?assertEqual(A, node(proplists:get_value(pid, find_queue(Q4, A)))), - ?assertEqual(A, node(proplists:get_value(pid, find_queue(Q5, A)))), - - {ok, Summary} = rpc:call(A, rabbit_amqqueue, rebalance, [classic, ".*", ".*"]), - - %% Check that we have at most 2 queues per node - Condition1 = fun() -> - lists:all(fun(NodeData) -> - lists:all(fun({_, V}) when is_integer(V) -> V =< 2; - (_) -> true end, - NodeData) - end, Summary) - end, - rabbit_ct_helpers:await_condition(Condition1, 60000), - - %% Check that Q1 and Q2 haven't moved - Condition2 = fun () -> - A =:= node(proplists:get_value(pid, find_queue(Q1, A))) andalso - A =:= node(proplists:get_value(pid, find_queue(Q2, A))) - end, - rabbit_ct_helpers:await_condition(Condition2, 40000), - - ok. - -ensure_queues_are_mirrored_on_different_mirrors(Queues, Master, Ch) -> - SNodes = [node(SPid) - || Q <- Queues, - SPid <- proplists:get_value(slave_pids, find_queue(Q, Master))], - UniqueSNodes = lists:usort(SNodes), - case UniqueSNodes of - [_] -> - %% All passed queues are on the same mirror. Let's redeclare - %% one of them and test again. - Q = hd(Queues), - amqp_channel:call(Ch, #'queue.delete'{queue = Q}), - amqp_channel:call(Ch, #'queue.declare'{queue = Q}), - ensure_queues_are_mirrored_on_different_mirrors(Queues, Master, Ch); - _ -> - ok - end. - -rebalance_nodes(Config) -> - [A, B, _] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename), - ACh = rabbit_ct_client_helpers:open_channel(Config, A), - - Q1 = <<"q1">>, - Q2 = <<"q2">>, - Q3 = <<"q3">>, - Q4 = <<"q4">>, - Q5 = <<"q5">>, - - amqp_channel:call(ACh, #'queue.declare'{queue = Q1}), - amqp_channel:call(ACh, #'queue.declare'{queue = Q2}), - amqp_channel:call(ACh, #'queue.declare'{queue = Q3}), - amqp_channel:call(ACh, #'queue.declare'{queue = Q4}), - amqp_channel:call(ACh, #'queue.declare'{queue = Q5}), - rabbit_ct_broker_helpers:set_ha_policy( - Config, A, <<"q.*">>, - {<<"nodes">>, [atom_to_binary(A), atom_to_binary(B)]}), - timer:sleep(1000), - - rabbit_ct_client_helpers:publish(ACh, Q1, 5), - rabbit_ct_client_helpers:publish(ACh, Q2, 3), - - ?assertEqual(A, node(proplists:get_value(pid, find_queue(Q1, A)))), - ?assertEqual(A, node(proplists:get_value(pid, find_queue(Q2, A)))), - ?assertEqual(A, node(proplists:get_value(pid, find_queue(Q3, A)))), - ?assertEqual(A, node(proplists:get_value(pid, find_queue(Q4, A)))), - ?assertEqual(A, node(proplists:get_value(pid, find_queue(Q5, A)))), - - {ok, Summary} = rpc:call(A, rabbit_amqqueue, rebalance, [classic, ".*", ".*"]), - - %% Check that we have at most 3 queues per node - ?assert(lists:all(fun(NodeData) -> - lists:all(fun({_, V}) when is_integer(V) -> V =< 3; - (_) -> true end, - NodeData) - end, Summary)), - %% Check that Q1 and Q2 haven't moved - ?assertEqual(A, node(proplists:get_value(pid, find_queue(Q1, A)))), - ?assertEqual(A, node(proplists:get_value(pid, find_queue(Q2, A)))), - - ok. - -rebalance_multiple_blocked(Config) -> - [A, _, _] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename), - ACh = rabbit_ct_client_helpers:open_channel(Config, A), - Q1 = <<"q1">>, - Q2 = <<"q2">>, - Q3 = <<"q3">>, - Q4 = <<"q4">>, - Q5 = <<"q5">>, - amqp_channel:call(ACh, #'queue.declare'{queue = Q1}), - amqp_channel:call(ACh, #'queue.declare'{queue = Q2}), - amqp_channel:call(ACh, #'queue.declare'{queue = Q3}), - amqp_channel:call(ACh, #'queue.declare'{queue = Q4}), - amqp_channel:call(ACh, #'queue.declare'{queue = Q5}), - ?assertEqual(A, node(proplists:get_value(pid, find_queue(Q1, A)))), - ?assertEqual(A, node(proplists:get_value(pid, find_queue(Q2, A)))), - ?assertEqual(A, node(proplists:get_value(pid, find_queue(Q3, A)))), - ?assertEqual(A, node(proplists:get_value(pid, find_queue(Q4, A)))), - ?assertEqual(A, node(proplists:get_value(pid, find_queue(Q5, A)))), - ?assert(rabbit_ct_broker_helpers:rpc( - Config, A, - ?MODULE, rebalance_multiple_blocked1, [Config])). - -rebalance_multiple_blocked1(_) -> - Parent = self(), - Fun = fun() -> - Parent ! rabbit_amqqueue:rebalance(classic, ".*", ".*") - end, - spawn(Fun), - spawn(Fun), - Rets = [receive Ret1 -> Ret1 end, - receive Ret2 -> Ret2 end], - lists:member({error, rebalance_in_progress}, Rets). - -%%---------------------------------------------------------------------------- - -assert_followers(RPCNode, QName, Exp) -> - assert_followers(RPCNode, QName, Exp, []). - -assert_followers(RPCNode, QName, Exp, PermittedIntermediate) -> - assert_followers0(RPCNode, QName, Exp, - [{get(previous_exp_m_node), get(previous_exp_s_nodes)} | - PermittedIntermediate], 1000). - -assert_followers0(_RPCNode, _QName, [], _PermittedIntermediate, _Attempts) -> - error(invalid_expectation); -assert_followers0(RPCNode, QName, [{ExpMNode, ExpSNodes}|T], PermittedIntermediate, Attempts) -> - case assert_followers1(RPCNode, QName, {ExpMNode, ExpSNodes}, PermittedIntermediate, Attempts, nofail) of - ok -> - ok; - failed -> - assert_followers0(RPCNode, QName, T, PermittedIntermediate, Attempts - 1) - end; -assert_followers0(RPCNode, QName, {ExpMNode, ExpSNodes}, PermittedIntermediate, Attempts) -> - assert_followers1(RPCNode, QName, {ExpMNode, ExpSNodes}, PermittedIntermediate, Attempts, fail). - -assert_followers1(_RPCNode, _QName, _Exp, _PermittedIntermediate, 0, fail) -> - error(give_up_waiting_for_followers); -assert_followers1(_RPCNode, _QName, _Exp, _PermittedIntermediate, 0, nofail) -> - failed; -assert_followers1(RPCNode, QName, {ExpMNode, ExpSNodes}, PermittedIntermediate, Attempts, FastFail) -> - Q = find_queue(QName, RPCNode), - Pid = proplists:get_value(pid, Q), - SPids = proplists:get_value(slave_pids, Q), - ActMNode = node(Pid), - ActSNodes = case SPids of - '' -> ''; - _ -> [node(SPid) || SPid <- SPids] - end, - case ExpMNode =:= ActMNode andalso equal_list(ExpSNodes, ActSNodes) of - false -> - %% It's an async change, so if nothing has changed let's - %% just wait - of course this means if something does not - %% change when expected then we time out the test which is - %% a bit tedious - case [{PermMNode, PermSNodes} || {PermMNode, PermSNodes} <- PermittedIntermediate, - PermMNode =:= ActMNode, - equal_list(PermSNodes, ActSNodes)] of - [] -> - case FastFail of - fail -> - ct:fail("Expected ~tp / ~tp, got ~tp / ~tp~nat ~tp~n", - [ExpMNode, ExpSNodes, ActMNode, ActSNodes, - get_stacktrace()]); - nofail -> - failed - end; - State -> - ct:pal("Waiting to leave state ~tp~n Waiting for ~tp~n", - [State, {ExpMNode, ExpSNodes}]), - timer:sleep(200), - assert_followers1(RPCNode, QName, {ExpMNode, ExpSNodes}, - PermittedIntermediate, - Attempts - 1, FastFail) - end; - true -> - put(previous_exp_m_node, ExpMNode), - put(previous_exp_s_nodes, ExpSNodes), - ok - end. - -equal_list('', '') -> true; -equal_list('', _Act) -> false; -equal_list(_Exp, '') -> false; -equal_list([], []) -> true; -equal_list(_Exp, []) -> false; -equal_list([], _Act) -> false; -equal_list([H|T], Act) -> case lists:member(H, Act) of - true -> equal_list(T, Act -- [H]); - false -> false - end. - -find_queue(QName, RPCNode) -> - find_queue(QName, RPCNode, 1000). - -find_queue(QName, RPCNode, 0) -> error({did_not_find_queue, QName, RPCNode}); -find_queue(QName, RPCNode, Attempts) -> - Qs = rpc:call(RPCNode, rabbit_amqqueue, info_all, [?VHOST], infinity), - case find_queue0(QName, Qs) of - did_not_find_queue -> timer:sleep(100), - find_queue(QName, RPCNode, Attempts - 1); - Q -> Q - end. - -find_queue0(QName, Qs) -> - case [Q || Q <- Qs, proplists:get_value(name, Q) =:= - rabbit_misc:r(?VHOST, queue, QName)] of - [R] -> R; - [] -> did_not_find_queue - end. - -get_stacktrace() -> - try - throw(e) - catch - _:e:Stacktrace -> - Stacktrace - end. - -%%---------------------------------------------------------------------------- -run_proper(Fun, Args) -> - ?assertEqual(true, - proper:counterexample(erlang:apply(Fun, Args), - [{numtests, 25}, - {on_output, fun(F, A) -> ct:pal(?LOW_IMPORTANCE, F, A) end}])). - -prop_random_policy(Config) -> - Nodes = rabbit_ct_broker_helpers:get_node_configs( - Config, nodename), - ?FORALL( - Policies, non_empty(list(policy_gen(Nodes))), - test_random_policy(Config, Nodes, Policies)). - -apply_policy_to_declared_queue(Config, Ch, Nodes, Policies) -> - [NodeA | _] = Nodes, - amqp_channel:call(Ch, #'queue.declare'{queue = ?QNAME}), - %% Add some load so mirrors can be busy synchronising - rabbit_ct_client_helpers:publish(Ch, ?QNAME, 100000), - %% Apply policies in parallel on all nodes - apply_in_parallel(Config, Nodes, Policies), - %% Give it some time to generate all internal notifications - timer:sleep(2000), - %% Check the result - wait_for_last_policy(?QNAME, NodeA, Policies, 30). - -test_random_policy(Config, Nodes, Policies) -> - [NodeA | _] = Nodes, - Ch = rabbit_ct_client_helpers:open_channel(Config, NodeA), - Result = apply_policy_to_declared_queue(Config, Ch, Nodes, Policies), - %% Cleanup - amqp_channel:call(Ch, #'queue.delete'{queue = ?QNAME}), - _ = rabbit_ct_broker_helpers:clear_policy(Config, NodeA, ?POLICY), - Result. - -apply_in_parallel(Config, Nodes, Policies) -> - Self = self(), - [spawn_link(fun() -> - [begin - - apply_policy(Config, N, Policy) - end || Policy <- Policies], - Self ! parallel_task_done - end) || N <- Nodes], - [receive - parallel_task_done -> - ok - end || _ <- Nodes]. - -%% Proper generators -policy_gen(Nodes) -> - %% Stop mirroring needs to be called often to trigger rabbitmq-server#803 - frequency([{3, undefined}, - {1, all}, - {1, {nodes, nodes_gen(Nodes)}}, - {1, {exactly, choose(1, 3)}} - ]). - -nodes_gen(Nodes) -> - ?LET(List, non_empty(list(oneof(Nodes))), - sets:to_list(sets:from_list(List))). - -%% Checks -wait_for_last_policy(QueueName, NodeA, TestedPolicies, Tries) -> - %% Ensure the owner/master is able to process a call request, - %% which means that all pending casts have been processed. - %% Use the information returned by owner/master to verify the - %% test result - Info = find_queue(QueueName, NodeA), - Pid = proplists:get_value(pid, Info), - Node = node(Pid), - %% Gets owner/master - case rpc:call(Node, gen_server, call, [Pid, info], 5000) of - {badrpc, _} -> - %% The queue is probably being migrated to another node. - %% Let's wait a bit longer. - timer:sleep(1000), - wait_for_last_policy(QueueName, NodeA, TestedPolicies, Tries - 1); - Result -> - FinalInfo = case Result of - {ok, I} -> I; - _ when is_list(Result) -> - Result - end, - %% The last policy is the final state - LastPolicy = lists:last(TestedPolicies), - case verify_policy(LastPolicy, FinalInfo) of - true -> - true; - false when Tries =:= 1 -> - Policies = rpc:call(Node, rabbit_policy, list, [], 5000), - ct:pal( - "Last policy not applied:~n" - " Queue node: ~ts (~tp)~n" - " Queue info: ~tp~n" - " Configured policies: ~tp~n" - " Tested policies: ~tp", - [Node, Pid, FinalInfo, Policies, TestedPolicies]), - false; - false -> - timer:sleep(1000), - wait_for_last_policy(QueueName, NodeA, TestedPolicies, - Tries - 1) - end - end. - -verify_policy(undefined, Info) -> - %% If the queue is not mirrored, it returns '' - '' == proplists:get_value(slave_pids, Info); -verify_policy(all, Info) -> - 2 == length(proplists:get_value(slave_pids, Info)); -verify_policy({exactly, 1}, Info) -> - %% If the queue is mirrored, it returns a list - [] == proplists:get_value(slave_pids, Info); -verify_policy({exactly, N}, Info) -> - (N - 1) == length(proplists:get_value(slave_pids, Info)); -verify_policy({nodes, Nodes}, Info) -> - Master = node(proplists:get_value(pid, Info)), - Slaves = [node(P) || P <- proplists:get_value(slave_pids, Info)], - lists:sort(Nodes) == lists:sort([Master | Slaves]). - -%% Policies -apply_policy(Config, N, undefined) -> - _ = rabbit_ct_broker_helpers:clear_policy(Config, N, ?POLICY); -apply_policy(Config, N, all) -> - rabbit_ct_broker_helpers:set_ha_policy( - Config, N, ?POLICY, <<"all">>, - [{<<"ha-sync-mode">>, <<"automatic">>}, {<<"queue-mode">>, <<"lazy">>}]); -apply_policy(Config, N, {nodes, Nodes}) -> - NNodes = [atom_to_binary(Node) || Node <- Nodes], - rabbit_ct_broker_helpers:set_ha_policy( - Config, N, ?POLICY, {<<"nodes">>, NNodes}, - [{<<"ha-sync-mode">>, <<"automatic">>}, {<<"queue-mode">>, <<"lazy">>}]); -apply_policy(Config, N, {exactly, Exactly}) -> - rabbit_ct_broker_helpers:set_ha_policy( - Config, N, ?POLICY, {<<"exactly">>, Exactly}, - [{<<"ha-sync-mode">>, <<"automatic">>}, {<<"queue-mode">>, <<"lazy">>}]). - -forget_cluster_node(Config, Node, NodeToRemove) -> - rabbit_ct_broker_helpers:rabbitmqctl( - Config, Node, ["forget_cluster_node", "--offline", NodeToRemove]). diff --git a/deps/rabbit/test/dynamic_qq_SUITE.erl b/deps/rabbit/test/dynamic_qq_SUITE.erl index 48ea75f67e6a..e87f51c79c46 100644 --- a/deps/rabbit/test/dynamic_qq_SUITE.erl +++ b/deps/rabbit/test/dynamic_qq_SUITE.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(dynamic_qq_SUITE). @@ -11,8 +11,8 @@ -include_lib("amqp_client/include/amqp_client.hrl"). -include_lib("rabbitmq_ct_helpers/include/rabbit_assert.hrl"). --import(quorum_queue_utils, [wait_for_messages_ready/3, - ra_name/1]). +-import(queue_utils, [wait_for_messages_ready/3, + ra_name/1]). -compile(nowarn_export_all). -compile(export_all). @@ -26,12 +26,12 @@ groups() -> [ {clustered, [], [ {cluster_size_3, [], [ - recover_follower_after_standalone_restart, vhost_deletion, + quorum_unaffected_after_vhost_failure, + forget_cluster_node, force_delete_if_no_consensus, takeover_on_failure, - takeover_on_shutdown, - quorum_unaffected_after_vhost_failure + takeover_on_shutdown ]} ]} ]. @@ -49,8 +49,8 @@ end_per_suite(Config) -> init_per_group(clustered, Config) -> rabbit_ct_helpers:set_config(Config, [{rmq_nodes_clustered, true}]); -init_per_group(cluster_size_2, Config) -> - rabbit_ct_helpers:set_config(Config, [{rmq_nodes_count, 2}]); +init_per_group(cluster_size_5, Config) -> + rabbit_ct_helpers:set_config(Config, [{rmq_nodes_count, 5}]); init_per_group(cluster_size_3, Config) -> rabbit_ct_helpers:set_config(Config, [{rmq_nodes_count, 3}]). @@ -72,14 +72,13 @@ init_per_testcase(Testcase, Config) -> {rmq_nodename_suffix, Testcase}, {tcp_ports_base, {skip_n_nodes, TestNumber * ClusterSize}}, {queue_name, Q}, - {queue_args, [{<<"x-queue-type">>, longstr, <<"quorum">>}]} + {queue_args, [{<<"x-queue-type">>, longstr, <<"quorum">>}, + {<<"x-quorum-initial-group-size">>, long, 3}]} ]), - Config2 = rabbit_ct_helpers:run_steps( - Config1, - rabbit_ct_broker_helpers:setup_steps() ++ - rabbit_ct_client_helpers:setup_steps()), - _ = rabbit_ct_broker_helpers:enable_feature_flag(Config2, message_containers), - Config2 + rabbit_ct_helpers:run_steps( + Config1, + rabbit_ct_broker_helpers:setup_steps() ++ + rabbit_ct_client_helpers:setup_steps()) end. end_per_testcase(Testcase, Config) -> @@ -108,17 +107,27 @@ vhost_deletion(Config) -> ok. force_delete_if_no_consensus(Config) -> - [A, B, C] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename), - ACh = rabbit_ct_client_helpers:open_channel(Config, A), + [Server | _] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename), + Ch = rabbit_ct_client_helpers:open_channel(Config, Server), QName = ?config(queue_name, Config), Args = ?config(queue_args, Config), - amqp_channel:call(ACh, #'queue.declare'{queue = QName, - arguments = Args, - durable = true - }), + amqp_channel:call(Ch, #'queue.declare'{queue = QName, + arguments = Args, + durable = true + }), + rabbit_ct_client_helpers:close_channel(Ch), + + RaName = queue_utils:ra_name(QName), + {ok, [{_, A}, {_, B}, {_, C}], _} = ra:members({RaName, Server}), + + ACh = rabbit_ct_client_helpers:open_channel(Config, A), rabbit_ct_client_helpers:publish(ACh, QName, 10), - ok = rabbit_ct_broker_helpers:restart_node(Config, B), - ok = rabbit_ct_broker_helpers:stop_node(Config, A), + + %% Delete a member on one node + ?assertEqual(ok, + rpc:call(Server, rabbit_quorum_queue, delete_member, + [<<"/">>, QName, B])), + %% stop another node ok = rabbit_ct_broker_helpers:stop_node(Config, C), BCh = rabbit_ct_client_helpers:open_channel(Config, B), @@ -132,6 +141,7 @@ force_delete_if_no_consensus(Config) -> BCh2 = rabbit_ct_client_helpers:open_channel(Config, B), ?assertMatch(#'queue.delete_ok'{}, amqp_channel:call(BCh2, #'queue.delete'{queue = QName})), + ok = rabbit_ct_broker_helpers:restart_node(Config, C), ok. takeover_on_failure(Config) -> @@ -141,16 +151,19 @@ takeover_on_shutdown(Config) -> takeover_on(Config, stop_node). takeover_on(Config, Fun) -> - [A, B, C] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename), + [Server | _] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename), + Ch = rabbit_ct_client_helpers:open_channel(Config, Server), - ACh = rabbit_ct_client_helpers:open_channel(Config, A), QName = ?config(queue_name, Config), Args = ?config(queue_args, Config), - amqp_channel:call(ACh, #'queue.declare'{queue = QName, + amqp_channel:call(Ch, #'queue.declare'{queue = QName, arguments = Args, durable = true }), - rabbit_ct_client_helpers:publish(ACh, QName, 10), + rabbit_ct_client_helpers:publish(Ch, QName, 10), + + RaName = queue_utils:ra_name(QName), + {ok, [{_, A}, {_, B}, {_, C}], _} = ra:members({RaName, Server}), ok = rabbit_ct_broker_helpers:restart_node(Config, B), ok = rabbit_ct_broker_helpers:Fun(Config, C), @@ -206,53 +219,39 @@ quorum_unaffected_after_vhost_failure(Config) -> end, 60000). -recover_follower_after_standalone_restart(Config) -> - case rabbit_ct_helpers:is_mixed_versions() of - false -> - %% Tests that followers can be brought up standalone after forgetting the - %% rest of the cluster. Consensus won't be reached as there is only one node in the - %% new cluster. - Servers = [A, B, C] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename), - Ch = rabbit_ct_client_helpers:open_channel(Config, A), - - QName = ?config(queue_name, Config), - Args = ?config(queue_args, Config), - amqp_channel:call(Ch, #'queue.declare'{queue = QName, - arguments = Args, - durable = true - }), - - rabbit_ct_client_helpers:publish(Ch, QName, 15), - rabbit_ct_client_helpers:close_channel(Ch), - - Name = ra_name(QName), - wait_for_messages_ready(Servers, Name, 15), - - rabbit_ct_broker_helpers:stop_node(Config, C), - rabbit_ct_broker_helpers:stop_node(Config, B), - rabbit_ct_broker_helpers:stop_node(Config, A), - - %% Restart one follower - forget_cluster_node(Config, B, C), - forget_cluster_node(Config, B, A), - - ok = rabbit_ct_broker_helpers:start_node(Config, B), - wait_for_messages_ready([B], Name, 15), - ok = rabbit_ct_broker_helpers:stop_node(Config, B), - - %% Restart the other - forget_cluster_node(Config, C, B), - forget_cluster_node(Config, C, A), - - ok = rabbit_ct_broker_helpers:start_node(Config, C), - wait_for_messages_ready([C], Name, 15), - ok = rabbit_ct_broker_helpers:stop_node(Config, C), - ok; - _ -> - {skip, "cannot be run in mixed mode"} - end. +forget_cluster_node(Config) -> + %% Tests that quorum queues shrink when forget_cluster_node + %% operations are issues. + [Server | _] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename), + Ch = rabbit_ct_client_helpers:open_channel(Config, Server), + + QName = ?config(queue_name, Config), + Args = ?config(queue_args, Config), + amqp_channel:call(Ch, #'queue.declare'{queue = QName, + arguments = Args, + durable = true + }), + + RaName = queue_utils:ra_name(QName), + {ok, [{_, A}, {_, B}, {_, C}], _} = ra:members({RaName, Server}), + Servers = [A, B, C], + + Name = ra_name(QName), + + rabbit_ct_client_helpers:publish(Ch, QName, 15), + wait_for_messages_ready(Servers, Name, 15), + rabbit_ct_client_helpers:close_channel(Ch), + + %% Restart one follower + forget_cluster_node(Config, C, B), + wait_for_messages_ready([C], Name, 15), + forget_cluster_node(Config, C, A), + wait_for_messages_ready([C], Name, 15), + + ok. %%---------------------------------------------------------------------------- forget_cluster_node(Config, Node, NodeToRemove) -> + ok = rabbit_control_helper:command(stop_app, NodeToRemove), rabbit_ct_broker_helpers:rabbitmqctl( - Config, Node, ["forget_cluster_node", "--offline", NodeToRemove]). + Config, Node, ["forget_cluster_node", NodeToRemove]). diff --git a/deps/rabbit/test/eager_sync_SUITE.erl b/deps/rabbit/test/eager_sync_SUITE.erl deleted file mode 100644 index 1ae07d12cbb2..000000000000 --- a/deps/rabbit/test/eager_sync_SUITE.erl +++ /dev/null @@ -1,280 +0,0 @@ -%% This Source Code Form is subject to the terms of the Mozilla Public -%% License, v. 2.0. If a copy of the MPL was not distributed with this -%% file, You can obtain one at https://mozilla.org/MPL/2.0/. -%% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. -%% - --module(eager_sync_SUITE). - --include_lib("common_test/include/ct.hrl"). --include_lib("amqp_client/include/amqp_client.hrl"). - --compile(export_all). - --define(QNAME, <<"ha.two.test">>). --define(QNAME_AUTO, <<"ha.auto.test">>). --define(MESSAGE_COUNT, 200000). - -all() -> - [ - {group, non_parallel_tests} - ]. - -groups() -> - [ - {non_parallel_tests, [], [ - eager_sync, - eager_sync_cancel, - eager_sync_auto, - eager_sync_auto_on_policy_change, - eager_sync_requeue - ]} - ]. - -suite() -> - [ - %% If a test hangs, no need to wait for 30 minutes. - {timetrap, {minutes, 15}} - ]. - -%% ------------------------------------------------------------------- -%% Testsuite setup/teardown. -%% ------------------------------------------------------------------- - -init_per_suite(Config) -> - rabbit_ct_helpers:log_environment(), - rabbit_ct_helpers:run_setup_steps(Config). - -end_per_suite(Config) -> - rabbit_ct_helpers:run_teardown_steps(Config). - -init_per_group(_, Config) -> - Config. - -end_per_group(_, Config) -> - Config. - -init_per_testcase(Testcase, Config) -> - rabbit_ct_helpers:testcase_started(Config, Testcase), - ClusterSize = 3, - TestNumber = rabbit_ct_helpers:testcase_number(Config, ?MODULE, Testcase), - Config1 = rabbit_ct_helpers:set_config(Config, [ - {rmq_nodes_count, ClusterSize}, - {rmq_nodes_clustered, true}, - {rmq_nodename_suffix, Testcase}, - {tcp_ports_base, {skip_n_nodes, TestNumber * ClusterSize}} - ]), - Config2 = rabbit_ct_helpers:run_steps( - Config1, - rabbit_ct_broker_helpers:setup_steps() ++ - rabbit_ct_client_helpers:setup_steps() ++ [ - fun rabbit_ct_broker_helpers:set_ha_policy_two_pos/1, - fun rabbit_ct_broker_helpers:set_ha_policy_two_pos_batch_sync/1 - ]), - _ = rabbit_ct_broker_helpers:enable_feature_flag(Config2, message_containers), - Config2. - -end_per_testcase(Testcase, Config) -> - Config1 = rabbit_ct_helpers:run_steps(Config, - rabbit_ct_client_helpers:teardown_steps() ++ - rabbit_ct_broker_helpers:teardown_steps()), - rabbit_ct_helpers:testcase_finished(Config1, Testcase). - -%% ------------------------------------------------------------------- -%% Testcases. -%% ------------------------------------------------------------------- - -eager_sync(Config) -> - [A, B, C] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename), - %% Queue is on AB but not C. - ACh = rabbit_ct_client_helpers:open_channel(Config, A), - Ch = rabbit_ct_client_helpers:open_channel(Config, C), - amqp_channel:call(ACh, #'queue.declare'{queue = ?QNAME, - durable = true}), - - %% Don't sync, lose messages - rabbit_ct_client_helpers:publish(Ch, ?QNAME, ?MESSAGE_COUNT), - restart(Config, A), - restart(Config, B), - rabbit_ct_client_helpers:consume(Ch, ?QNAME, 0), - - %% Sync, keep messages - rabbit_ct_client_helpers:publish(Ch, ?QNAME, ?MESSAGE_COUNT), - restart(Config, A), - ok = sync(C, ?QNAME), - restart(Config, B), - rabbit_ct_client_helpers:consume(Ch, ?QNAME, ?MESSAGE_COUNT), - - %% Check the no-need-to-sync path - rabbit_ct_client_helpers:publish(Ch, ?QNAME, ?MESSAGE_COUNT), - ok = sync(C, ?QNAME), - rabbit_ct_client_helpers:consume(Ch, ?QNAME, ?MESSAGE_COUNT), - - %% keep unacknowledged messages - rabbit_ct_client_helpers:publish(Ch, ?QNAME, ?MESSAGE_COUNT), - rabbit_ct_client_helpers:fetch(Ch, ?QNAME, 2), - restart(Config, A), - rabbit_ct_client_helpers:fetch(Ch, ?QNAME, 3), - sync(C, ?QNAME), - restart(Config, B), - rabbit_ct_client_helpers:consume(Ch, ?QNAME, ?MESSAGE_COUNT), - - ok. - -eager_sync_cancel(Config) -> - [A, B, C] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename), - %% Queue is on AB but not C. - ACh = rabbit_ct_client_helpers:open_channel(Config, A), - Ch = rabbit_ct_client_helpers:open_channel(Config, C), - - set_app_sync_batch_size(A), - set_app_sync_batch_size(B), - set_app_sync_batch_size(C), - - amqp_channel:call(ACh, #'queue.declare'{queue = ?QNAME, - durable = true}), - {ok, not_syncing} = sync_cancel(C, ?QNAME), %% Idempotence - eager_sync_cancel_test2(Config, A, B, C, Ch, 100). - -eager_sync_cancel_test2(_, _, _, _, _, 0) -> - error(no_more_attempts_left); -eager_sync_cancel_test2(Config, A, B, C, Ch, Attempts) -> - %% Sync then cancel - rabbit_ct_client_helpers:publish(Ch, ?QNAME, ?MESSAGE_COUNT), - restart(Config, A), - set_app_sync_batch_size(A), - spawn_link(fun() -> ok = sync_nowait(C, ?QNAME) end), - case wait_for_syncing(C, ?QNAME, 1) of - ok -> - case sync_cancel(C, ?QNAME) of - ok -> - wait_for_running(C, ?QNAME), - restart(Config, B), - set_app_sync_batch_size(B), - rabbit_ct_client_helpers:consume(Ch, ?QNAME, 0), - - {ok, not_syncing} = sync_cancel(C, ?QNAME), %% Idempotence - ok; - {ok, not_syncing} -> - %% Damn. Syncing finished between wait_for_syncing/3 and - %% sync_cancel/2 above. Start again. - amqp_channel:call(Ch, #'queue.purge'{queue = ?QNAME}), - eager_sync_cancel_test2(Config, A, B, C, Ch, Attempts - 1) - end; - synced_already -> - %% Damn. Syncing finished before wait_for_syncing/3. Start again. - amqp_channel:call(Ch, #'queue.purge'{queue = ?QNAME}), - eager_sync_cancel_test2(Config, A, B, C, Ch, Attempts - 1) - end. - -eager_sync_auto(Config) -> - [A, B, C] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename), - ACh = rabbit_ct_client_helpers:open_channel(Config, A), - Ch = rabbit_ct_client_helpers:open_channel(Config, C), - amqp_channel:call(ACh, #'queue.declare'{queue = ?QNAME_AUTO, - durable = true}), - - %% Sync automatically, don't lose messages - rabbit_ct_client_helpers:publish(Ch, ?QNAME_AUTO, ?MESSAGE_COUNT), - restart(Config, A), - wait_for_sync(C, ?QNAME_AUTO), - restart(Config, B), - wait_for_sync(C, ?QNAME_AUTO), - rabbit_ct_client_helpers:consume(Ch, ?QNAME_AUTO, ?MESSAGE_COUNT), - - ok. - -eager_sync_auto_on_policy_change(Config) -> - [A, B, C] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename), - %% Queue is on AB but not C. - ACh = rabbit_ct_client_helpers:open_channel(Config, A), - Ch = rabbit_ct_client_helpers:open_channel(Config, C), - amqp_channel:call(ACh, #'queue.declare'{queue = ?QNAME, - durable = true}), - - %% Sync automatically once the policy is changed to tell us to. - rabbit_ct_client_helpers:publish(Ch, ?QNAME, ?MESSAGE_COUNT), - restart(Config, A), - Params = [atom_to_binary(N) || N <- [A, B]], - rabbit_ct_broker_helpers:set_ha_policy(Config, - A, <<"^ha.two.">>, {<<"nodes">>, Params}, - [{<<"ha-sync-mode">>, <<"automatic">>}]), - wait_for_sync(C, ?QNAME), - - ok. - -eager_sync_requeue(Config) -> - [A, B, C] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename), - %% Queue is on AB but not C. - ACh = rabbit_ct_client_helpers:open_channel(Config, A), - Ch = rabbit_ct_client_helpers:open_channel(Config, C), - amqp_channel:call(ACh, #'queue.declare'{queue = ?QNAME, - durable = true}), - - rabbit_ct_client_helpers:publish(Ch, ?QNAME, 2), - {#'basic.get_ok'{delivery_tag = TagA}, _} = - amqp_channel:call(Ch, #'basic.get'{queue = ?QNAME}), - {#'basic.get_ok'{delivery_tag = TagB}, _} = - amqp_channel:call(Ch, #'basic.get'{queue = ?QNAME}), - amqp_channel:cast(Ch, #'basic.reject'{delivery_tag = TagA, requeue = true}), - restart(Config, B), - ok = sync(C, ?QNAME), - amqp_channel:cast(Ch, #'basic.reject'{delivery_tag = TagB, requeue = true}), - rabbit_ct_client_helpers:consume(Ch, ?QNAME, 2), - - ok. - -restart(Config, Node) -> - rabbit_ct_broker_helpers:restart_broker(Config, Node). - -sync(Node, QName) -> - case sync_nowait(Node, QName) of - ok -> wait_for_sync(Node, QName), - ok; - R -> R - end. - -sync_nowait(Node, QName) -> action(Node, sync_queue, QName). -sync_cancel(Node, QName) -> action(Node, cancel_sync_queue, QName). - -wait_for_sync(Node, QName) -> - sync_detection_SUITE:wait_for_sync_status(true, Node, QName). - -action(Node, Action, QName) -> - rabbit_control_helper:command_with_output( - Action, Node, [binary_to_list(QName)], [{"-p", "/"}]). - -queue(Node, QName) -> - QNameRes = rabbit_misc:r(<<"/">>, queue, QName), - {ok, Q} = rpc:call(Node, rabbit_amqqueue, lookup, [QNameRes]), - Q. - -wait_for_syncing(Node, QName, Target) -> - case state(Node, QName) of - {{syncing, _}, _} -> ok; - {running, Target} -> synced_already; - _ -> timer:sleep(100), - wait_for_syncing(Node, QName, Target) - end. - -wait_for_running(Node, QName) -> - case state(Node, QName) of - {running, _} -> ok; - _ -> timer:sleep(100), - wait_for_running(Node, QName) - end. - -state(Node, QName) -> - [{state, State}, {synchronised_slave_pids, Pids}] = - rpc:call(Node, rabbit_amqqueue, info, - [queue(Node, QName), [state, synchronised_slave_pids]]), - {State, length(Pids)}. - -%% eager_sync_cancel_test needs a batch size that's < ?MESSAGE_COUNT -%% in order to pass, because a SyncBatchSize >= ?MESSAGE_COUNT will -%% always finish before the test is able to cancel the sync. -set_app_sync_batch_size(Node) -> - rabbit_control_helper:command( - eval, Node, - ["application:set_env(rabbit, mirroring_sync_batch_size, 1)."]). diff --git a/deps/rabbit/test/event_recorder.erl b/deps/rabbit/test/event_recorder.erl new file mode 100644 index 000000000000..08a621ddcd1c --- /dev/null +++ b/deps/rabbit/test/event_recorder.erl @@ -0,0 +1,71 @@ +%% This Source Code Form is subject to the terms of the Mozilla Public +%% License, v. 2.0. If a copy of the MPL was not distributed with this +%% file, You can obtain one at https://mozilla.org/MPL/2.0/. +%% +%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% + +-module(event_recorder). +-behaviour(gen_event). + +-include_lib("stdlib/include/assert.hrl"). +-include_lib("rabbit_common/include/rabbit.hrl"). + +%% gen_event callbacks +-export([init/1, + handle_event/2, + handle_call/2]). +%% client API +-export([start/1, + stop/1, + get_events/1]). +-export([assert_event_type/2, + assert_event_prop/2]). + +-import(rabbit_ct_broker_helpers, + [get_node_config/3]). + +-define(INIT_STATE, []). + +init(_) -> + {ok, ?INIT_STATE}. + +handle_event(#event{type = T}, State) + when T =:= node_stats orelse + T =:= node_node_stats orelse + T =:= node_node_deleted -> + {ok, State}; +handle_event(Event, State) -> + {ok, [Event | State]}. + +handle_call(take_state, State) -> + {ok, lists:reverse(State), ?INIT_STATE}. + +start(Config) -> + ok = rabbit_ct_broker_helpers:add_code_path_to_all_nodes(Config, ?MODULE), + ok = gen_event:add_handler(event_manager_ref(Config), ?MODULE, []). + +stop(Config) -> + ok = gen_event:delete_handler(event_manager_ref(Config), ?MODULE, []). + +get_events(Config) -> + %% events are sent and processed asynchronously + timer:sleep(500), + Result = gen_event:call(event_manager_ref(Config), ?MODULE, take_state), + ?assert(is_list(Result)), + Result. + +event_manager_ref(Config) -> + Node = get_node_config(Config, 0, nodename), + {rabbit_event, Node}. + +assert_event_type(ExpectedType, #event{type = ActualType}) -> + ?assertEqual(ExpectedType, ActualType). + +assert_event_prop(ExpectedProp = {Key, _Value}, #event{props = Props}) -> + ?assertEqual(ExpectedProp, lists:keyfind(Key, 1, Props)); +assert_event_prop(ExpectedProps, Event) + when is_list(ExpectedProps) -> + lists:foreach(fun(P) -> + assert_event_prop(P, Event) + end, ExpectedProps). diff --git a/deps/rabbit/test/exchanges_SUITE.erl b/deps/rabbit/test/exchanges_SUITE.erl index 7f7459cecbfc..e74cd95917e9 100644 --- a/deps/rabbit/test/exchanges_SUITE.erl +++ b/deps/rabbit/test/exchanges_SUITE.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2018-2022 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(exchanges_SUITE). @@ -19,12 +19,18 @@ suite() -> all() -> [ - {group, mnesia_store} + {group, mnesia_store}, + {group, khepri_store}, + {group, khepri_migration} ]. groups() -> [ - {mnesia_store, [], all_tests()} + {mnesia_store, [], all_tests()}, + {khepri_store, [], all_tests()}, + {khepri_migration, [], [ + from_mnesia_to_khepri + ]} ]. all_tests() -> @@ -47,7 +53,14 @@ init_per_suite(Config) -> end_per_suite(Config) -> rabbit_ct_helpers:run_teardown_steps(Config). -init_per_group(mnesia_store = Group, Config) -> +init_per_group(mnesia_store = Group, Config0) -> + Config = rabbit_ct_helpers:set_config(Config0, [{metadata_store, mnesia}]), + init_per_group_common(Group, Config, 1); +init_per_group(khepri_store = Group, Config0) -> + Config = rabbit_ct_helpers:set_config(Config0, [{metadata_store, khepri}]), + init_per_group_common(Group, Config, 1); +init_per_group(khepri_migration = Group, Config0) -> + Config = rabbit_ct_helpers:set_config(Config0, [{metadata_store, mnesia}]), init_per_group_common(Group, Config, 1). init_per_group_common(Group, Config, Size) -> @@ -87,7 +100,7 @@ end_per_testcase(Testcase, Config) -> %% ------------------------------------------------------------------- direct_exchange(Config) -> Server = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename), - + Ch = rabbit_ct_client_helpers:open_channel(Config, Server), Q = ?config(queue_name, Config), ?assertEqual({'queue.declare_ok', Q, 0, 0}, declare(Ch, Q, [])), @@ -103,8 +116,8 @@ direct_exchange(Config) -> routing_key = AltQ}), publish(Ch, Direct, Q, <<"msg1">>), publish(Ch, Direct, <<"anyotherkey">>, <<"msg2">>), - - quorum_queue_utils:wait_for_messages(Config, [[Q, <<"1">>, <<"1">>, <<"0">>], + + queue_utils:wait_for_messages(Config, [[Q, <<"1">>, <<"1">>, <<"0">>], [AltQ, <<"0">>, <<"0">>, <<"0">>]]), ?assertMatch({#'basic.get_ok'{}, #amqp_msg{payload = <<"msg1">>}}, amqp_channel:call(Ch, #'basic.get'{queue = Q})), @@ -116,12 +129,12 @@ direct_exchange(Config) -> topic_exchange(Config) -> Server = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename), - + Ch = rabbit_ct_client_helpers:open_channel(Config, Server), Q = ?config(queue_name, Config), ?assertEqual({'queue.declare_ok', Q, 0, 0}, declare(Ch, Q, [])), - Topic = <<"amq.topic">>, + Topic = <<"amq.topic">>, #'queue.bind_ok'{} = amqp_channel:call(Ch, #'queue.bind'{exchange = Topic, queue = Q, routing_key = <<"this.*.rules">>}), @@ -139,7 +152,7 @@ topic_exchange(Config) -> publish(Ch, Topic, <<"simplycarrots">>, <<"msg5">>), publish(Ch, Topic, <<"*.queue.rules">>, <<"msg6">>), - quorum_queue_utils:wait_for_messages(Config, [[Q, <<"3">>, <<"3">>, <<"0">>]]), + queue_utils:wait_for_messages(Config, [[Q, <<"3">>, <<"3">>, <<"0">>]]), ?assertMatch({#'basic.get_ok'{}, #amqp_msg{payload = <<"msg1">>}}, amqp_channel:call(Ch, #'basic.get'{queue = Q})), ?assertMatch({#'basic.get_ok'{}, #amqp_msg{payload = <<"msg2">>}}, @@ -153,21 +166,21 @@ topic_exchange(Config) -> queue = Q, routing_key = <<"#.noclue">>}), publish(Ch, Topic, <<"simplycarrots">>, <<"msg7">>), - quorum_queue_utils:wait_for_messages(Config, [[Q, <<"3">>, <<"0">>, <<"3">>]]), + queue_utils:wait_for_messages(Config, [[Q, <<"3">>, <<"0">>, <<"3">>]]), publish(Ch, Topic, <<"#.bla">>, <<"msg8">>), - quorum_queue_utils:wait_for_messages(Config, [[Q, <<"3">>, <<"0">>, <<"3">>]]), + queue_utils:wait_for_messages(Config, [[Q, <<"3">>, <<"0">>, <<"3">>]]), publish(Ch, Topic, <<"#.noclue">>, <<"msg9">>), - quorum_queue_utils:wait_for_messages(Config, [[Q, <<"4">>, <<"1">>, <<"3">>]]), + queue_utils:wait_for_messages(Config, [[Q, <<"4">>, <<"1">>, <<"3">>]]), ?assertMatch({#'basic.get_ok'{}, #amqp_msg{payload = <<"msg9">>}}, amqp_channel:call(Ch, #'basic.get'{queue = Q})), ?assertMatch(#'basic.get_empty'{}, amqp_channel:call(Ch, #'basic.get'{queue = Q})), - + #'queue.bind_ok'{} = amqp_channel:call(Ch, #'queue.bind'{exchange = Topic, queue = Q, routing_key = <<"#">>}), publish(Ch, Topic, <<"simplycarrots">>, <<"msg10">>), - quorum_queue_utils:wait_for_messages(Config, [[Q, <<"5">>, <<"1">>, <<"4">>]]), + queue_utils:wait_for_messages(Config, [[Q, <<"5">>, <<"1">>, <<"4">>]]), ?assertMatch({#'basic.get_ok'{}, #amqp_msg{payload = <<"msg10">>}}, amqp_channel:call(Ch, #'basic.get'{queue = Q})), ?assertMatch(#'basic.get_empty'{}, @@ -176,7 +189,7 @@ topic_exchange(Config) -> fanout_exchange(Config) -> Server = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename), - + Ch = rabbit_ct_client_helpers:open_channel(Config, Server), Q = ?config(queue_name, Config), ?assertEqual({'queue.declare_ok', Q, 0, 0}, declare(Ch, Q, [])), @@ -192,8 +205,8 @@ fanout_exchange(Config) -> routing_key = AltQ}), publish(Ch, Fanout, Q, <<"msg1">>), publish(Ch, Fanout, <<"anyotherkey">>, <<"msg2">>), - - quorum_queue_utils:wait_for_messages(Config, [[Q, <<"2">>, <<"2">>, <<"0">>], + + queue_utils:wait_for_messages(Config, [[Q, <<"2">>, <<"2">>, <<"0">>], [AltQ, <<"2">>, <<"2">>, <<"0">>]]), ?assertMatch({#'basic.get_ok'{}, #amqp_msg{payload = <<"msg1">>}}, amqp_channel:call(Ch, #'basic.get'{queue = Q})), @@ -241,7 +254,7 @@ headers_exchange(Config) -> publish(Ch, Headers, <<>>, <<"msg2">>, [{<<"foo">>, longstr, <<"bar">>}]), publish(Ch, Headers, <<>>, <<"msg3">>), - quorum_queue_utils:wait_for_messages(Config, [[Q, <<"1">>, <<"1">>, <<"0">>], + queue_utils:wait_for_messages(Config, [[Q, <<"1">>, <<"1">>, <<"0">>], [AltQ, <<"2">>, <<"2">>, <<"0">>]]), ?assertMatch({#'basic.get_ok'{}, #amqp_msg{payload = <<"msg1">>}}, @@ -270,6 +283,56 @@ invalid_exchange(Config) -> queue = Q, routing_key = Q})). +from_mnesia_to_khepri(Config) -> + Server = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename), + + Ch = rabbit_ct_client_helpers:open_channel(Config, Server), + Q = ?config(queue_name, Config), + ?assertEqual({'queue.declare_ok', Q, 0, 0}, declare(Ch, Q, [])), + + %% Test transient exchanges + X = ?config(exchange_name, Config), + #'exchange.declare_ok'{} = amqp_channel:call(Ch, #'exchange.declare'{exchange = X, + durable = false}), + + %% Topic is the only exchange type that has its own mnesia/khepri tables. + %% Let's test that the exchange works as expected after migration + Topic = <<"amq.topic">>, + #'queue.bind_ok'{} = amqp_channel:call(Ch, #'queue.bind'{exchange = Topic, + queue = Q, + routing_key = <<"this.queue.rules">>}), + + Exchanges = lists:sort([rabbit_misc:r(<<"/">>, exchange, <<>>), + rabbit_misc:r(<<"/">>, exchange, <<"amq.direct">>), + rabbit_misc:r(<<"/">>, exchange, <<"amq.fanout">>), + rabbit_misc:r(<<"/">>, exchange, <<"amq.headers">>), + rabbit_misc:r(<<"/">>, exchange, <<"amq.match">>), + rabbit_misc:r(<<"/">>, exchange, <<"amq.rabbitmq.trace">>), + rabbit_misc:r(<<"/">>, exchange, <<"amq.topic">>), + rabbit_misc:r(<<"/">>, exchange, X)]), + ?assertEqual( + Exchanges, + lists:sort([X0#exchange.name || + X0 <- rabbit_ct_broker_helpers:rpc(Config, 0, rabbit_exchange, list, [])])), + + case rabbit_ct_broker_helpers:enable_feature_flag(Config, khepri_db) of + ok -> + rabbit_ct_helpers:await_condition( + fun() -> + RecoveredExchanges = + lists:sort([X0#exchange.name || + X0 <- rabbit_ct_broker_helpers:rpc(Config, 0, rabbit_exchange, list, [])]), + Exchanges == RecoveredExchanges + end), + publish(Ch, Topic, <<"this.queue.rules">>, <<"msg1">>), + ?assertMatch({#'basic.get_ok'{}, #amqp_msg{payload = <<"msg1">>}}, + amqp_channel:call(Ch, #'basic.get'{queue = Q})), + ?assertMatch(#'basic.get_empty'{}, + amqp_channel:call(Ch, #'basic.get'{queue = Q})); + Skip -> + Skip + end. + %% Internal delete_queues() -> @@ -277,7 +340,8 @@ delete_queues() -> || Q <- rabbit_amqqueue:list()]. delete_exchange(Name) -> - _ = rabbit_exchange:delete(rabbit_misc:r(<<"/">>, exchange, Name), false, <<"dummy">>). + ok = rabbit_exchange:ensure_deleted( + rabbit_misc:r(<<"/">>, exchange, Name), false, <<"dummy">>). declare(Ch, Q, Args) -> declare(Ch, Q, Args, true). diff --git a/deps/rabbit/test/feature_flags_SUITE.erl b/deps/rabbit/test/feature_flags_SUITE.erl index 360959635245..55a469209202 100644 --- a/deps/rabbit/test/feature_flags_SUITE.erl +++ b/deps/rabbit/test/feature_flags_SUITE.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2018-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(feature_flags_SUITE). @@ -25,8 +25,6 @@ registry_general_usage/1, registry_concurrent_reloads/1, - try_to_deadlock_in_registry_reload_1/1, - try_to_deadlock_in_registry_reload_2/1, registry_reset/1, enable_feature_flag_in_a_healthy_situation/1, enable_unsupported_feature_flag_in_a_healthy_situation/1, @@ -104,8 +102,6 @@ groups() -> [ registry_general_usage, registry_concurrent_reloads, - try_to_deadlock_in_registry_reload_1, - try_to_deadlock_in_registry_reload_2, registry_reset ]}, {feature_flags_v2, [], Groups} @@ -548,8 +544,12 @@ registry_concurrent_reloads(_Config) -> %% all added feature flags. timer:sleep(1000), + MRef = erlang:monitor(process, Spammer), unlink(Spammer), - exit(Spammer, normal). + exit(Spammer, kill), + receive {'DOWN', MRef, process, Spammer, _} -> ok end, + %% All feature flags appeared. + ?assertEqual(FinalFFList, ?list_ff(all)). registry_spammer(CurrentFeatureNames, FinalFeatureNames) -> %% Infinite loop. @@ -571,218 +571,6 @@ registry_spammer1(FeatureNames) -> ?assertEqual(FeatureNames, ?list_ff(all)), registry_spammer1(FeatureNames). -try_to_deadlock_in_registry_reload_1(_Config) -> - rabbit_ff_registry_factory:purge_old_registry(rabbit_ff_registry), - _ = code:delete(rabbit_ff_registry), - ?assertEqual(false, code:is_loaded(rabbit_ff_registry)), - - FeatureName = ?FUNCTION_NAME, - FeatureProps = #{provided_by => rabbit, - stability => stable}, - - Parent = self(), - - %% Deadlock steps: - %% * Process A acquires the lock first. - %% * Process B loads the registry stub and waits for the lock. - %% * Process A deletes the registry stub and loads the initialized - %% registry. - %% * Process B wants to purge the deleted registry stub by sending a - %% request to the Code server. - %% - %% => Process B waits forever the return from the Code server because the - %% Code server waits for process B to be runnable again to handle the - %% signal. - - ProcessA = spawn_link( - fun() -> - %% Process A acquires the lock manually first to - %% ensure the ordering of events. It can be acquired - %% recursively, so the feature flag injection can - %% "acquire" it again and proceed. - ct:pal("Process A: Acquire registry loading lock"), - Lock = - rabbit_ff_registry_factory:registry_loading_lock(), - global:set_lock(Lock, [node()]), - receive proceed -> ok end, - - ct:pal( - "Process A: " - "Inject arbitrary feature flag to reload " - "registry"), - rabbit_feature_flags:inject_test_feature_flags( - #{FeatureName => FeatureProps}), - - ct:pal("Process A: Release registry loading lock"), - global:del_lock(Lock, [node()]), - - ct:pal("Process A: Exiting..."), - erlang:unlink(Parent) - end), - timer:sleep(500), - - ProcessB = spawn_link( - fun() -> - %% Process B is the one loading the registry stub and - %% wants to initialize the real registry. - ct:pal( - "Process B: " - "Trigger automatic initial registry load"), - FF = rabbit_ff_registry_wrapper:get(FeatureName), - - ct:pal("Process B: Exiting..."), - erlang:unlink(Parent), - Parent ! {self(), FF} - end), - timer:sleep(500), - - begin - {_, StacktraceA} = erlang:process_info(ProcessA, current_stacktrace), - {_, StacktraceB} = erlang:process_info(ProcessB, current_stacktrace), - ct:pal( - "Process stacktraces:~n" - " Process A: ~p~n" - " Process B: ~p", - [StacktraceA, StacktraceB]) - end, - - %% Process A is resumed. Without a proper check, process B would try to - %% purge the copy of the registry it is currently using itself, causing a - %% deadlock because the Code server wants process A to handle a signal, but - %% process A is not runnable. - ProcessA ! proceed, - - ct:pal("Waiting for process B to exit"), - receive - {ProcessB, FF} -> - ?assertEqual(FeatureProps#{name => FeatureName}, FF), - ok - after 10000 -> - {_, StacktraceB} = erlang:process_info( - ProcessB, current_stacktrace), - ct:pal("Process B stuck; stacktrace: ~p", [StacktraceB]), - error(registry_reload_deadlock) - end. - -try_to_deadlock_in_registry_reload_2(_Config) -> - rabbit_ff_registry_factory:purge_old_registry(rabbit_ff_registry), - _ = code:delete(rabbit_ff_registry), - ?assertEqual(false, code:is_loaded(rabbit_ff_registry)), - - FeatureName = ?FUNCTION_NAME, - FeatureProps = #{provided_by => rabbit, - stability => stable}, - - ct:pal("Inject arbitrary feature flag to reload registry"), - rabbit_feature_flags:inject_test_feature_flags( - #{FeatureName => FeatureProps}, - false), - - _ = erlang:process_flag(trap_exit, true), - - ct:pal("Parent ~p: Acquire registry loading lock", [self()]), - Lock = rabbit_ff_registry_factory:registry_loading_lock(), - global:set_lock(Lock, [node()]), - - Parent = self(), - - %% Deadlock steps: - %% * Processes A, B1 and B2 wait for the lock. The registry stub is loaded. - %% * Process B1 acquires the lock. - %% * Process B1 deletes the registry stub and loads the initialized - %% registry. - %% * Process A acquires the lock. - %% * Process A wants to purge the deleted registry stub by sending a - %% request to the Code server. - %% - %% => Process A waits forever the return from the Code server because the - %% Code server waits for process B2 to stop lingering on the deleted - %% registry stub, but process B2 waits for the lock. - - ProcessA = spawn_link( - fun() -> - %% Process A acquires the lock automatically as part - %% of requesting an explicit initialization of the - %% registry. Process A doesn't linger on the registry - %% stub. - ct:pal( - "Process A ~p: " - "Trigger manual initial registry load", - [self()]), - rabbit_ff_registry_factory:initialize_registry(), - - ct:pal("Process A ~p: Exiting...", [self()]), - erlang:unlink(Parent), - Parent ! {self(), done} - end), - - FunB = fun() -> - %% Processes B1 and B2 acquire the lock automatically as - %% part of trying to load the registry as part of they - %% querying a feature flag. - ct:pal( - "Process B ~p: " - "Trigger automatic initial registry load", - [self()]), - _ = rabbit_ff_registry_wrapper:get(FeatureName), - - ct:pal( - "Process B ~p: Exiting...", - [self()]), - erlang:unlink(Parent), - Parent ! {self(), done} - end, - ProcessB1 = spawn_link(FunB), - ProcessB2 = spawn_link(FunB), - timer:sleep(500), - - %% We use `erlang:suspend_process/1' and `erlang:resume_process/1' to - %% ensure the order in which processes acquire the lock. - erlang:suspend_process(ProcessA), - erlang:suspend_process(ProcessB1), - erlang:suspend_process(ProcessB2), - timer:sleep(500), - - ct:pal("Parent ~p: Release registry loading lock", [self()]), - global:del_lock(Lock, [node()]), - - erlang:resume_process(ProcessB1), - timer:sleep(500), - erlang:resume_process(ProcessA), - timer:sleep(500), - erlang:resume_process(ProcessB2), - - ct:pal("Waiting for processes to exit"), - Procs = [ProcessA, ProcessB1, ProcessB2], - lists:foreach( - fun(Pid) -> - receive - {Pid, done} -> - ok; - {'EXIT', Pid, Reason} -> - ct:pal("Process ~p exited; reason: ~p", [Pid, Reason]), - error(test_process_killed) - after 10000 -> - lists:foreach( - fun(Pid1) -> - PI = erlang:process_info( - Pid1, current_stacktrace), - case PI of - undefined -> - ok; - {_, Stacktrace} -> - ct:pal( - "Process ~p stuck; " - "stacktrace: ~p", - [Pid1, Stacktrace]) - end - end, Procs), - error(registry_reload_deadlock) - end - end, Procs), - - ok. - registry_reset(_Config) -> %% At first, the registry must be uninitialized. ?assertNot(rabbit_ff_registry:is_registry_initialized()), @@ -924,7 +712,9 @@ do_enable_feature_flag_when_ff_file_is_unwritable(Config) -> %% the `rabbit_ff_controller' process because it was pretty fragile. %% That's why the rest of the testcase is commentted out now. We should %% revisit this at some point. - [?assertEqual(ok, rabbit_ct_broker_helpers:start_node(Config, N)) + [?assertEqual(ok, rabbit_ct_broker_helpers:async_start_node(Config, N)) + || N <- lists:reverse(Nodes)], + [?assertEqual(ok, rabbit_ct_broker_helpers:wait_for_async_start_node(N)) || N <- lists:reverse(Nodes)]. % XXX ?assertEqual( @@ -960,7 +750,7 @@ enable_feature_flag_with_a_network_partition(Config) -> block(NodePairs), %% Wait for the network partition to happen - clustering_utils:assert_cluster_status({All, All, [A, C, D]}, [A, C, D]), + clustering_utils:assert_cluster_status({All, [A, C, D]}, [A, C, D]), %% Enabling the feature flag should fail in the specific case of %% `ff_from_testsuite', if the network is broken. @@ -977,7 +767,7 @@ enable_feature_flag_with_a_network_partition(Config) -> || N <- [A, C, D]], [?assertEqual(ok, rabbit_ct_broker_helpers:start_node(Config, N)) || N <- [A, C, D]], - clustering_utils:assert_cluster_status({All, All, All}, All), + clustering_utils:assert_cluster_status({All, All}, All), declare_arbitrary_feature_flag(Config), %% Enabling the feature flag works. @@ -1013,7 +803,8 @@ mark_feature_flag_as_enabled_with_a_network_partition(Config) -> {B, D}, {B, E}], block(NodePairs), - clustering_utils:assert_cluster_status({AllNodes, AllNodes, [A, C, D, E]}, [A, C, D, E]), + clustering_utils:assert_cluster_status( + {AllNodes, [A, C, D, E]}, [A, C, D, E]), %% Mark the feature flag as enabled on all nodes from node B. This %% is expected to timeout. diff --git a/deps/rabbit/test/feature_flags_SUITE_data/my_plugin/.gitignore b/deps/rabbit/test/feature_flags_SUITE_data/my_plugin/.gitignore deleted file mode 100644 index f6d56e0687e5..000000000000 --- a/deps/rabbit/test/feature_flags_SUITE_data/my_plugin/.gitignore +++ /dev/null @@ -1,7 +0,0 @@ -/.erlang.mk/ -/deps/ -/ebin/ -/escript -/plugins/ -/my_plugin.d -/sbin diff --git a/deps/rabbit/test/feature_flags_SUITE_data/my_plugin/src/my_plugin.erl b/deps/rabbit/test/feature_flags_SUITE_data/my_plugin/src/my_plugin.erl index fa0a63cdf186..f06505a38c1c 100644 --- a/deps/rabbit/test/feature_flags_SUITE_data/my_plugin/src/my_plugin.erl +++ b/deps/rabbit/test/feature_flags_SUITE_data/my_plugin/src/my_plugin.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2019-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(my_plugin). diff --git a/deps/rabbit/test/feature_flags_v2_SUITE.erl b/deps/rabbit/test/feature_flags_v2_SUITE.erl index 753dee1f789e..8678d7a2d877 100644 --- a/deps/rabbit/test/feature_flags_v2_SUITE.erl +++ b/deps/rabbit/test/feature_flags_v2_SUITE.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2021-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(feature_flags_v2_SUITE). @@ -11,8 +11,6 @@ -include_lib("eunit/include/eunit.hrl"). -include_lib("common_test/include/ct.hrl"). --include_lib("rabbit_common/include/logging.hrl"). - -export([suite/0, all/0, groups/0, diff --git a/deps/rabbit/test/feature_flags_with_unpriveleged_user_SUITE.erl b/deps/rabbit/test/feature_flags_with_unpriveleged_user_SUITE.erl index 5d9928c37f10..d8b627da39d4 100644 --- a/deps/rabbit/test/feature_flags_with_unpriveleged_user_SUITE.erl +++ b/deps/rabbit/test/feature_flags_with_unpriveleged_user_SUITE.erl @@ -2,12 +2,11 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2018-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(feature_flags_with_unpriveleged_user_SUITE). --include_lib("common_test/include/ct.hrl"). -include_lib("eunit/include/eunit.hrl"). -export([suite/0, diff --git a/deps/rabbit/test/list_consumers_sanity_check_SUITE.erl b/deps/rabbit/test/list_consumers_sanity_check_SUITE.erl index a7675066be20..bf6eff781584 100644 --- a/deps/rabbit/test/list_consumers_sanity_check_SUITE.erl +++ b/deps/rabbit/test/list_consumers_sanity_check_SUITE.erl @@ -2,12 +2,11 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2011-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(list_consumers_sanity_check_SUITE). --include_lib("common_test/include/ct.hrl"). -include_lib("amqp_client/include/amqp_client.hrl"). -compile(export_all). diff --git a/deps/rabbit/test/list_queues_online_and_offline_SUITE.erl b/deps/rabbit/test/list_queues_online_and_offline_SUITE.erl index 7db09969dec8..343f149a54f1 100644 --- a/deps/rabbit/test/list_queues_online_and_offline_SUITE.erl +++ b/deps/rabbit/test/list_queues_online_and_offline_SUITE.erl @@ -2,12 +2,11 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2011-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(list_queues_online_and_offline_SUITE). --include_lib("common_test/include/ct.hrl"). -include_lib("amqp_client/include/amqp_client.hrl"). -compile(export_all). @@ -42,7 +41,7 @@ init_per_group(Group, Config) -> Config1 = rabbit_ct_helpers:set_config(Config, [ {rmq_nodename_suffix, Group}, - {rmq_nodes_count, 2} + {rmq_nodes_count, 3} ]), rabbit_ct_helpers:run_steps( Config1, @@ -65,7 +64,7 @@ end_per_testcase(Testcase, Config) -> %% --------------------------------------------------------------------------- list_queues_online_and_offline(Config) -> - [A, B] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename), + [A, B, C] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename), ACh = rabbit_ct_client_helpers:open_channel(Config, A), %% Node B will be stopped BCh = rabbit_ct_client_helpers:open_channel(Config, B), @@ -78,7 +77,7 @@ list_queues_online_and_offline(Config) -> rabbit_ct_helpers:await_condition( fun() -> - [A] == rpc:call(A, rabbit_nodes, list_running, []) + lists:sort([A, C]) == lists:sort(rpc:call(A, rabbit_nodes, list_running, [])) end, 60000), GotUp = lists:sort(rabbit_ct_broker_helpers:rabbitmqctl_list(Config, A, diff --git a/deps/rabbit/test/logging_SUITE.erl b/deps/rabbit/test/logging_SUITE.erl index 1b023ec8e0fd..0d2ecc8db510 100644 --- a/deps/rabbit/test/logging_SUITE.erl +++ b/deps/rabbit/test/logging_SUITE.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2016-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(logging_SUITE). @@ -13,6 +13,7 @@ -include_lib("kernel/include/logger.hrl"). -include_lib("rabbit_common/include/logging.hrl"). -include_lib("amqp_client/include/amqp_client.hrl"). +-include_lib("rabbitmq_ct_helpers/include/rabbit_assert.hrl"). -export([suite/0, all/0, @@ -106,9 +107,12 @@ groups() -> init_per_suite(Config) -> rabbit_ct_helpers:log_environment(), - rabbit_ct_helpers:run_setup_steps(Config). + Config1 = rabbit_ct_helpers:run_setup_steps(Config), + meck_rabbit_logger_std_h(), + Config1. end_per_suite(Config) -> + unmeck_rabbit_logger_std_h(), Config. init_per_group(syslog_output, Config) -> @@ -185,6 +189,23 @@ end_per_testcase(Testcase, Config) -> end, rabbit_ct_helpers:testcase_finished(Config1, Testcase). +meck_rabbit_logger_std_h() -> + ok = meck:new(rabbit_logger_std_h, [no_link, passthrough]), + ok = meck:expect( + rabbit_logger_std_h, io_put_chars, + fun(DEVICE, DATA) -> + %% We log to Common Test log as well. + %% This is the file we use to check + %% the message made it to + %% stdout/stderr. + ct:log("~ts", [DATA]), + io:put_chars(DEVICE, DATA) + end). + +unmeck_rabbit_logger_std_h() -> + ?assert(meck:validate(rabbit_logger_std_h)), + ok = meck:unload(rabbit_logger_std_h). + remove_all_handlers() -> _ = [logger:remove_handler(Id) || #{id := Id} <- logger:get_handler_config()]. @@ -214,13 +235,17 @@ logging_with_default_config_works(Config) -> file := MainFile}}, MainFileHandler), - ?assert(ping_log(rmq_1_file_1, info)), - ?assert(ping_log(rmq_1_file_1, info, - #{domain => ?RMQLOG_DOMAIN_GLOBAL})), - ?assert(ping_log(rmq_1_file_1, info, - #{domain => ['3rd_party']})), - ?assert(ping_log(rmq_1_file_1, info, - #{domain => ?RMQLOG_DOMAIN_UPGRADE})), + ContainsLogEntryFun1 = ping_log(rmq_1_file_1, info), + rabbit_ct_helpers:await_condition(ContainsLogEntryFun1, 30_000), + ContainsLogEntryFun2 = ping_log(rmq_1_file_1, info, + #{domain => ?RMQLOG_DOMAIN_GLOBAL}), + rabbit_ct_helpers:await_condition(ContainsLogEntryFun2, 30_000), + ContainsLogEntry3 = ping_log(rmq_1_file_1, info, + #{domain => ['3rd_party']}), + rabbit_ct_helpers:await_condition(ContainsLogEntry3, 30_000), + ContainsLogEntry4 = ping_log(rmq_1_file_1, info, + #{domain => ?RMQLOG_DOMAIN_UPGRADE}), + rabbit_ct_helpers:await_condition(ContainsLogEntry4, 30_000), ok. setting_log_levels_in_env_works(Config) -> @@ -252,27 +277,42 @@ setting_log_levels_in_env_works(Config) -> file := MainFile}}, MainFileHandler), - ?assertNot(ping_log(rmq_1_file_1, info)), - ?assertNot(ping_log(rmq_1_file_1, info, - #{domain => ?RMQLOG_DOMAIN_GLOBAL})), - ?assertNot(ping_log(rmq_1_file_1, info, - #{domain => ?RMQLOG_DOMAIN_PRELAUNCH})), - ?assertNot(ping_log(rmq_1_file_1, GlobalLevel, - #{domain => ?RMQLOG_DOMAIN_PRELAUNCH})), - ?assertNot(ping_log(rmq_1_file_1, info, - #{domain => ['3rd_party']})), - ?assertNot(ping_log(rmq_1_file_1, info, - #{domain => ?RMQLOG_DOMAIN_UPGRADE})), - - ?assert(ping_log(rmq_1_file_1, GlobalLevel)), - ?assert(ping_log(rmq_1_file_1, GlobalLevel, - #{domain => ?RMQLOG_DOMAIN_GLOBAL})), - ?assert(ping_log(rmq_1_file_1, PrelaunchLevel, - #{domain => ?RMQLOG_DOMAIN_PRELAUNCH})), - ?assert(ping_log(rmq_1_file_1, GlobalLevel, - #{domain => ['3rd_party']})), - ?assert(ping_log(rmq_1_file_1, GlobalLevel, - #{domain => ?RMQLOG_DOMAIN_UPGRADE})), + ContainsLogEntry1 = ping_log(rmq_1_file_1, info), + ContainsLogEntry2 = ping_log(rmq_1_file_1, info, + #{domain => ?RMQLOG_DOMAIN_GLOBAL}), + ContainsLogEntry3 = ping_log(rmq_1_file_1, info, + #{domain => ?RMQLOG_DOMAIN_PRELAUNCH}), + ContainsLogEntry4 = ping_log(rmq_1_file_1, GlobalLevel, + #{domain => ?RMQLOG_DOMAIN_PRELAUNCH}), + ContainsLogEntry5 = ping_log(rmq_1_file_1, info, + #{domain => ['3rd_party']}), + ContainsLogEntry6 = ping_log(rmq_1_file_1, info, + #{domain => ?RMQLOG_DOMAIN_UPGRADE}), + %% This is testing that the log entry is NOT present. Random sleeps + %% are not ideal, but in this case we can just wait a reasonable + %% amount of time and then check for absence. + timer:sleep(10_000), + ?assertNot(ContainsLogEntry1()), + ?assertNot(ContainsLogEntry2()), + ?assertNot(ContainsLogEntry3()), + ?assertNot(ContainsLogEntry4()), + ?assertNot(ContainsLogEntry5()), + ?assertNot(ContainsLogEntry6()), + + ContainsLogEntry7 = ping_log(rmq_1_file_1, GlobalLevel), + rabbit_ct_helpers:await_condition(ContainsLogEntry7, 30_000), + ContainsLogEntry8 = ping_log(rmq_1_file_1, GlobalLevel, + #{domain => ?RMQLOG_DOMAIN_GLOBAL}), + rabbit_ct_helpers:await_condition(ContainsLogEntry8, 30_000), + ContainsLogEntry9 = ping_log(rmq_1_file_1, PrelaunchLevel, + #{domain => ?RMQLOG_DOMAIN_PRELAUNCH}), + rabbit_ct_helpers:await_condition(ContainsLogEntry9, 30_000), + ContainsLogEntry10 = ping_log(rmq_1_file_1, GlobalLevel, + #{domain => ['3rd_party']}), + rabbit_ct_helpers:await_condition(ContainsLogEntry10, 30_000), + ContainsLogEntry11 = ping_log(rmq_1_file_1, GlobalLevel, + #{domain => ?RMQLOG_DOMAIN_UPGRADE}), + rabbit_ct_helpers:await_condition(ContainsLogEntry11, 30_000), ok. setting_log_levels_in_config_works(Config) -> @@ -306,27 +346,43 @@ setting_log_levels_in_config_works(Config) -> file := MainFile}}, MainFileHandler), - ?assertNot(ping_log(rmq_1_file_1, info)), - ?assertNot(ping_log(rmq_1_file_1, info, - #{domain => ?RMQLOG_DOMAIN_GLOBAL})), - ?assertNot(ping_log(rmq_1_file_1, info, - #{domain => ?RMQLOG_DOMAIN_PRELAUNCH})), - ?assertNot(ping_log(rmq_1_file_1, GlobalLevel, - #{domain => ?RMQLOG_DOMAIN_PRELAUNCH})), - ?assertNot(ping_log(rmq_1_file_1, info, - #{domain => ['3rd_party']})), - ?assertNot(ping_log(rmq_1_file_1, info, - #{domain => ?RMQLOG_DOMAIN_UPGRADE})), - - ?assert(ping_log(rmq_1_file_1, GlobalLevel)), - ?assert(ping_log(rmq_1_file_1, GlobalLevel, - #{domain => ?RMQLOG_DOMAIN_GLOBAL})), - ?assert(ping_log(rmq_1_file_1, PrelaunchLevel, - #{domain => ?RMQLOG_DOMAIN_PRELAUNCH})), - ?assert(ping_log(rmq_1_file_1, GlobalLevel, - #{domain => ['3rd_party']})), - ?assert(ping_log(rmq_1_file_1, GlobalLevel, - #{domain => ?RMQLOG_DOMAIN_UPGRADE})), + ContainsLogEntry1 = ping_log(rmq_1_file_1, info), + ContainsLogEntry2 = ping_log(rmq_1_file_1, info, + #{domain => ?RMQLOG_DOMAIN_GLOBAL}), + ContainsLogEntry3 = ping_log(rmq_1_file_1, info, + #{domain => ?RMQLOG_DOMAIN_PRELAUNCH}), + ContainsLogEntry4 = ping_log(rmq_1_file_1, GlobalLevel, + #{domain => ?RMQLOG_DOMAIN_PRELAUNCH}), + ContainsLogEntry5 = ping_log(rmq_1_file_1, info, + #{domain => ['3rd_party']}), + ContainsLogEntry6 = ping_log(rmq_1_file_1, info, + #{domain => ?RMQLOG_DOMAIN_UPGRADE}), + + %% This is testing that the log entry is NOT present. Random sleeps + %% are not ideal, but in this case we can just wait a reasonable + %% amount of time and then check for absence. + timer:sleep(10_000), + ?assertNot(ContainsLogEntry1()), + ?assertNot(ContainsLogEntry2()), + ?assertNot(ContainsLogEntry3()), + ?assertNot(ContainsLogEntry4()), + ?assertNot(ContainsLogEntry5()), + ?assertNot(ContainsLogEntry6()), + + ContainsLogEntry7 = ping_log(rmq_1_file_1, GlobalLevel), + rabbit_ct_helpers:await_condition(ContainsLogEntry7, 30_000), + ContainsLogEntry8 = ping_log(rmq_1_file_1, GlobalLevel, + #{domain => ?RMQLOG_DOMAIN_GLOBAL}), + rabbit_ct_helpers:await_condition(ContainsLogEntry8, 30_000), + ContainsLogEntry9 = ping_log(rmq_1_file_1, PrelaunchLevel, + #{domain => ?RMQLOG_DOMAIN_PRELAUNCH}), + rabbit_ct_helpers:await_condition(ContainsLogEntry9, 30_000), + ContainsLogEntry10 = ping_log(rmq_1_file_1, GlobalLevel, + #{domain => ['3rd_party']}), + rabbit_ct_helpers:await_condition(ContainsLogEntry10, 30_000), + ContainsLogEntry11 = ping_log(rmq_1_file_1, GlobalLevel, + #{domain => ?RMQLOG_DOMAIN_UPGRADE}), + rabbit_ct_helpers:await_condition(ContainsLogEntry11, 30_000), ok. setting_log_rotation_in_config_works(Config) -> @@ -448,13 +504,17 @@ setting_log_levels_in_config_with_output_overridden_in_env_works(Config) -> config := #{type := standard_io}}, StddevHandler), - ?assert(ping_log(rmq_1_stdout, debug, Config)), - ?assert(ping_log(rmq_1_stdout, debug, - #{domain => ?RMQLOG_DOMAIN_GLOBAL}, Config)), - ?assert(ping_log(rmq_1_stdout, debug, - #{domain => ['3rd_party']}, Config)), - ?assert(ping_log(rmq_1_stdout, debug, - #{domain => ?RMQLOG_DOMAIN_UPGRADE}, Config)), + ContainsLogEntry1 = ping_log(rmq_1_stdout, debug, Config), + rabbit_ct_helpers:await_condition(ContainsLogEntry1, 30_000), + ContainsLogEntry2 = ping_log(rmq_1_stdout, debug, + #{domain => ?RMQLOG_DOMAIN_GLOBAL}, Config), + rabbit_ct_helpers:await_condition(ContainsLogEntry2, 30_000), + ContainsLogEntry3 = ping_log(rmq_1_stdout, debug, + #{domain => ['3rd_party']}, Config), + rabbit_ct_helpers:await_condition(ContainsLogEntry3, 30_000), + ContainsLogEntry4 = ping_log(rmq_1_stdout, debug, + #{domain => ?RMQLOG_DOMAIN_UPGRADE}, Config), + rabbit_ct_helpers:await_condition(ContainsLogEntry4, 30_000), ok. setting_message_format_works(Config) -> @@ -661,7 +721,9 @@ formatting_as_json_works(_, Context) -> file := MainFile}}, MainFileHandler), - ?assertNot(ping_log(rmq_1_file_1, info)), + ContainsLogEntry = ping_log(rmq_1_file_1, info), + timer:sleep(10_000), + ?assertNot(ContainsLogEntry()), Metadata = #{atom => rabbit, integer => 1, @@ -848,13 +910,17 @@ logging_to_stddev_works(Stddev, Id, Config, Context) -> config := #{type := Stddev}}, StddevHandler), - ?assert(ping_log(Id, info, Config)), - ?assert(ping_log(Id, info, - #{domain => ?RMQLOG_DOMAIN_GLOBAL}, Config)), - ?assert(ping_log(Id, info, - #{domain => ['3rd_party']}, Config)), - ?assert(ping_log(Id, info, - #{domain => ?RMQLOG_DOMAIN_UPGRADE}, Config)), + ContainsLogEntry1 = ping_log(Id, info, Config), + rabbit_ct_helpers:await_condition(ContainsLogEntry1, 30_000), + ContainsLogEntry2 = ping_log(Id, info, + #{domain => ?RMQLOG_DOMAIN_GLOBAL}, Config), + rabbit_ct_helpers:await_condition(ContainsLogEntry2, 30_000), + ContainsLogEntry3 = ping_log(Id, info, + #{domain => ['3rd_party']}, Config), + rabbit_ct_helpers:await_condition(ContainsLogEntry3, 30_000), + ContainsLogEntry4 = ping_log(Id, info, + #{domain => ?RMQLOG_DOMAIN_UPGRADE}, Config), + rabbit_ct_helpers:await_condition(ContainsLogEntry4, 30_000), ok. formatting_with_colors_works(Config) -> @@ -892,14 +958,22 @@ formatting_maybe_with_colors_works(Config, Context, _EscSeqs) -> rabbit_prelaunch_logging:clear_config_run_number(), rabbit_prelaunch_logging:setup(Context), - ?assert(ping_log(rmq_1_stdout, debug, Config)), - ?assert(ping_log(rmq_1_stdout, info, Config)), - ?assert(ping_log(rmq_1_stdout, notice, Config)), - ?assert(ping_log(rmq_1_stdout, warning, Config)), - ?assert(ping_log(rmq_1_stdout, error, Config)), - ?assert(ping_log(rmq_1_stdout, critical, Config)), - ?assert(ping_log(rmq_1_stdout, alert, Config)), - ?assert(ping_log(rmq_1_stdout, emergency, Config)), + ContainsLogEntry1 = ping_log(rmq_1_stdout, debug, Config), + rabbit_ct_helpers:await_condition(ContainsLogEntry1, 30_000), + ContainsLogEntry2 = ping_log(rmq_1_stdout, info, Config), + rabbit_ct_helpers:await_condition(ContainsLogEntry2, 30_000), + ContainsLogEntry3 = ping_log(rmq_1_stdout, notice, Config), + rabbit_ct_helpers:await_condition(ContainsLogEntry3, 30_000), + ContainsLogEntry4 = ping_log(rmq_1_stdout, warning, Config), + rabbit_ct_helpers:await_condition(ContainsLogEntry4, 30_000), + ContainsLogEntry5 = ping_log(rmq_1_stdout, error, Config), + rabbit_ct_helpers:await_condition(ContainsLogEntry5, 30_000), + ContainsLogEntry6 = ping_log(rmq_1_stdout, critical, Config), + rabbit_ct_helpers:await_condition(ContainsLogEntry6, 30_000), + ContainsLogEntry7 = ping_log(rmq_1_stdout, alert, Config), + rabbit_ct_helpers:await_condition(ContainsLogEntry7, 30_000), + ContainsLogEntry8 = ping_log(rmq_1_stdout, emergency, Config), + rabbit_ct_helpers:await_condition(ContainsLogEntry8, 30_000), ok. logging_to_exchange_works(Config) -> @@ -922,18 +996,10 @@ logging_to_exchange_works(Config) -> #{exchange := #resource{name = XName} = Exchange}} = ExchangeHandler, %% Wait for the expected exchange to be automatically declared. - ?assert( - lists:any( - fun(_) -> - Ret = rabbit_ct_broker_helpers:rpc( - Config, 0, - rabbit_exchange, lookup, [Exchange]), - case Ret of - {ok, _} -> true; - _ -> timer:sleep(500), - false - end - end, lists:seq(1, 20))), + ?awaitMatch({ok, _}, rabbit_ct_broker_helpers:rpc( + Config, 0, + rabbit_exchange, lookup, [Exchange]), + 30000), %% Declare a queue to collect all logged messages. {Conn, Chan} = rabbit_ct_client_helpers:open_connection_and_channel( @@ -951,33 +1017,43 @@ logging_to_exchange_works(Config) -> Config1 = rabbit_ct_helpers:set_config( Config, {test_channel_and_queue, {Chan, QName}}), - ?assert(ping_log(rmq_1_exchange, info, Config1)), - ?assert(ping_log(rmq_1_exchange, info, - #{domain => ?RMQLOG_DOMAIN_GLOBAL}, Config1)), - ?assert(ping_log(rmq_1_exchange, info, - #{domain => ['3rd_party']}, Config1)), - ?assert(ping_log(rmq_1_exchange, info, - #{domain => ?RMQLOG_DOMAIN_UPGRADE}, Config1)), + ContainsLogEntry1 = ping_log(rmq_1_exchange, info, Config1), + rabbit_ct_helpers:await_condition(ContainsLogEntry1, 30_000), + ContainsLogEntry2 = ping_log(rmq_1_exchange, info, + #{domain => ?RMQLOG_DOMAIN_GLOBAL}, Config1), + rabbit_ct_helpers:await_condition(ContainsLogEntry2, 30_000), + ContainsLogEntry3 = ping_log(rmq_1_exchange, info, + #{domain => ['3rd_party']}, Config1), + rabbit_ct_helpers:await_condition(ContainsLogEntry3, 30_000), + ContainsLogEntry4 = ping_log(rmq_1_exchange, info, + #{domain => ?RMQLOG_DOMAIN_UPGRADE}, Config1), + rabbit_ct_helpers:await_condition(ContainsLogEntry4, 30_000), %% increase log level ok = rabbit_ct_broker_helpers:rpc( Config, 0, rabbit_prelaunch_logging, set_log_level, [debug]), - ?assert(ping_log(rmq_1_exchange, debug, Config1)), - ?assert(ping_log(rmq_1_exchange, debug, - #{domain => ?RMQLOG_DOMAIN_GLOBAL}, Config1)), + ContainsLogEntry5 = ping_log(rmq_1_exchange, debug, Config1), + rabbit_ct_helpers:await_condition(ContainsLogEntry5, 30_000), + ContainsLogEntry6 = ping_log(rmq_1_exchange, debug, + #{domain => ?RMQLOG_DOMAIN_GLOBAL}, Config1), + rabbit_ct_helpers:await_condition(ContainsLogEntry6, 30_000), %% decrease log level ok = rabbit_ct_broker_helpers:rpc( Config, 0, rabbit_prelaunch_logging, set_log_level, [error]), - ?assert(ping_log(rmq_1_exchange, error, Config1)), - ?assert(ping_log(rmq_1_exchange, error, - #{domain => ?RMQLOG_DOMAIN_GLOBAL}, Config1)), + ContainsLogEntry7 = ping_log(rmq_1_exchange, error, Config1), + rabbit_ct_helpers:await_condition(ContainsLogEntry7, 30_000), + ContainsLogEntry8 = ping_log(rmq_1_exchange, error, + #{domain => ?RMQLOG_DOMAIN_GLOBAL}, Config1), + rabbit_ct_helpers:await_condition(ContainsLogEntry8, 30_000), - ?assertNot(ping_log(rmq_1_exchange, info, Config1)), + ContainsLogEntry9 = ping_log(rmq_1_exchange, info, Config1), + timer:sleep(10_000), + ?assertNot(ContainsLogEntry9()), amqp_channel:call(Chan, #'queue.delete'{queue = QName}), rabbit_ct_client_helpers:close_connection_and_channel(Conn, Chan), @@ -1044,13 +1120,17 @@ logging_to_syslog_works(Config) -> config := #{}}, SyslogHandler), - ?assert(ping_log(rmq_1_syslog, info, Config)), - ?assert(ping_log(rmq_1_syslog, info, - #{domain => ?RMQLOG_DOMAIN_GLOBAL}, Config)), - ?assert(ping_log(rmq_1_syslog, info, - #{domain => ['3rd_party']}, Config)), - ?assert(ping_log(rmq_1_syslog, info, - #{domain => ?RMQLOG_DOMAIN_UPGRADE}, Config)), + ContainsLogEntry1 = ping_log(rmq_1_syslog, info, Config), + rabbit_ct_helpers:await_condition(ContainsLogEntry1, 30_000), + ContainsLogEntry2 = ping_log(rmq_1_syslog, info, + #{domain => ?RMQLOG_DOMAIN_GLOBAL}, Config), + rabbit_ct_helpers:await_condition(ContainsLogEntry2, 30_000), + ContainsLogEntry3 = ping_log(rmq_1_syslog, info, + #{domain => ['3rd_party']}, Config), + rabbit_ct_helpers:await_condition(ContainsLogEntry3, 30_000), + ContainsLogEntry4 = ping_log(rmq_1_syslog, info, + #{domain => ?RMQLOG_DOMAIN_UPGRADE}, Config), + rabbit_ct_helpers:await_condition(ContainsLogEntry4, 30_000), ok. %% ------------------------------------------------------------------- @@ -1081,6 +1161,11 @@ get_handler_by_id([_ | Rest], Id) -> get_handler_by_id([], _) -> undefined. +%% ping_log calls logger:log/3 and then returns a function that checks +%% the log for the given log entry returning a boolean. +%% This return function can be used with an await condition function, +%% to ensure the log entry is eventually added to the log. +%% Also it can be used to check it's absence. ping_log(Id, Level) -> ping_log(Id, Level, #{}, []). @@ -1129,9 +1214,11 @@ check_log1(#{id := Id, Config, 0, rabbit_logger_std_h, filesync, [Id]) end, - {ok, Content} = file:read_file(Filename), - ReOpts = [{capture, none}, multiline], - match =:= re:run(Content, RandomMsg ++ "$", ReOpts); + fun() -> + {ok, Content} = file:read_file(Filename), + ReOpts = [{capture, none}, multiline], + match =:= re:run(Content, RandomMsg ++ "$", ReOpts) + end; check_log1(#{module := Mod, config := #{type := Stddev}} = Handler, Level, @@ -1141,57 +1228,40 @@ check_log1(#{module := Mod, Filename = html_report_filename(Config), {ColorStart, ColorEnd} = get_color_config(Handler, Level), ReOpts = [{capture, none}, multiline], - lists:any( - fun(_) -> - {ok, Content} = file:read_file(Filename), - Regex = - "^" ++ ColorStart ++ ".+" ++ RandomMsg ++ ColorEnd ++ "$", - case re:run(Content, Regex, ReOpts) of - match -> true; - _ -> timer:sleep(500), - false - end - end, lists:seq(1, 10)); + fun() -> + {ok, Content} = file:read_file(Filename), + Regex = + "^" ++ ColorStart ++ ".+" ++ RandomMsg ++ ColorEnd ++ "$", + match =:= re:run(Content, Regex, ReOpts) + end; check_log1(#{module := rabbit_logger_exchange_h}, _Level, RandomMsg, Config) -> {Chan, QName} = ?config(test_channel_and_queue, Config), ReOpts = [{capture, none}, multiline], - lists:any( - fun(_) -> - Ret = amqp_channel:call( - Chan, #'basic.get'{queue = QName, no_ack = false}), - case Ret of - {#'basic.get_ok'{}, #amqp_msg{payload = Content}} -> - case re:run(Content, RandomMsg ++ "$", ReOpts) of - match -> true; - _ -> timer:sleep(500), - false - end; - #'basic.get_empty'{} -> - timer:sleep(500), - false; - Other -> - io:format(standard_error, "OTHER -> ~tp~n", [Other]), - timer:sleep(500), - false - end - end, lists:seq(1, 10)); + fun() -> + Ret = amqp_channel:call( + Chan, #'basic.get'{queue = QName, no_ack = false}), + case Ret of + {#'basic.get_ok'{}, #amqp_msg{payload = Content}} -> + match =:= re:run(Content, RandomMsg ++ "$", ReOpts); + #'basic.get_empty'{} -> + false; + Other -> + io:format(standard_error, "OTHER -> ~tp~n", [Other]), + false + end + end; check_log1(#{module := syslog_logger_h}, _Level, RandomMsg, Config) -> ReOpts = [{capture, none}, multiline], - lists:any( - fun(_) -> - Buffer = get_syslogd_messages(Config), - case re:run(Buffer, RandomMsg ++ "$", ReOpts) of - match -> true; - _ -> timer:sleep(500), - false - end - end, lists:seq(1, 10)). + fun() -> + Buffer = get_syslogd_messages(Config), + match =:= re:run(Buffer, RandomMsg ++ "$", ReOpts) + end. get_random_string(Length, AllowedChars) -> lists:foldl(fun(_, Acc) -> diff --git a/deps/rabbit/test/maintenance_mode_SUITE.erl b/deps/rabbit/test/maintenance_mode_SUITE.erl index 5b398b34878b..f02a5878455f 100644 --- a/deps/rabbit/test/maintenance_mode_SUITE.erl +++ b/deps/rabbit/test/maintenance_mode_SUITE.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(maintenance_mode_SUITE). @@ -12,6 +12,7 @@ -include_lib("eunit/include/eunit.hrl"). -include_lib("rabbitmq_ct_helpers/include/rabbit_assert.hrl"). +-compile(nowarn_export_all). -compile(export_all). all() -> @@ -29,7 +30,8 @@ groups() -> maintenance_mode_status, listener_suspension_status, client_connection_closure, - quorum_queue_leadership_transfer + quorum_queue_leadership_transfer, + metadata_store_leadership_transfer ]} ]. @@ -67,6 +69,24 @@ init_per_testcase(quorum_queue_leadership_transfer = Testcase, Config) -> Config1, rabbit_ct_broker_helpers:setup_steps() ++ rabbit_ct_client_helpers:setup_steps()); +init_per_testcase(metadata_store_leadership_transfer = Testcase, Config) -> + case rabbit_ct_broker_helpers:configured_metadata_store(Config) of + mnesia -> + {skip, "Leadership transfer does not apply to mnesia"}; + _ -> + rabbit_ct_helpers:testcase_started(Config, Testcase), + ClusterSize = ?config(rmq_nodes_count, Config), + TestNumber = rabbit_ct_helpers:testcase_number(Config, ?MODULE, Testcase), + Config1 = rabbit_ct_helpers:set_config(Config, [ + {rmq_nodes_clustered, true}, + {rmq_nodename_suffix, Testcase}, + {tcp_ports_base, {skip_n_nodes, TestNumber * ClusterSize}} + ]), + rabbit_ct_helpers:run_steps( + Config1, + rabbit_ct_broker_helpers:setup_steps() ++ + rabbit_ct_client_helpers:setup_steps()) + end; init_per_testcase(Testcase, Config) -> rabbit_ct_helpers:testcase_started(Config, Testcase), ClusterSize = ?config(rmq_nodes_count, Config), @@ -79,8 +99,7 @@ init_per_testcase(Testcase, Config) -> rabbit_ct_helpers:run_steps( Config1, rabbit_ct_broker_helpers:setup_steps() ++ - rabbit_ct_client_helpers:setup_steps() ++ - [fun rabbit_ct_broker_helpers:set_ha_policy_all/1]). + rabbit_ct_client_helpers:setup_steps()). end_per_testcase(Testcase, Config) -> Config1 = rabbit_ct_helpers:run_steps(Config, @@ -242,7 +261,7 @@ quorum_queue_leadership_transfer(Config) -> fun () -> rabbit_ct_broker_helpers:is_being_drained_local_read(Config, A) end, 10000), %% quorum queue leader election is asynchronous - AllTheSame = quorum_queue_utils:fifo_machines_use_same_version( + AllTheSame = queue_utils:fifo_machines_use_same_version( Config, Nodenames), case AllTheSame of true -> @@ -262,3 +281,31 @@ quorum_queue_leadership_transfer(Config) -> end, rabbit_ct_broker_helpers:revive_node(Config, A). + +metadata_store_leadership_transfer(Config) -> + [A | _] = Nodenames = rabbit_ct_broker_helpers:get_node_configs( + Config, nodename), + + {_, LeaderNode} = rabbit_ct_broker_helpers:rpc(Config, A, ra_leaderboard, lookup_leader, + [rabbit_khepri:get_ra_cluster_name()]), + rabbit_ct_helpers:await_condition( + fun () -> not rabbit_ct_broker_helpers:is_being_drained_local_read(Config, LeaderNode) end, + 10000), + rabbit_ct_broker_helpers:drain_node(Config, LeaderNode), + rabbit_ct_helpers:await_condition( + fun () -> rabbit_ct_broker_helpers:is_being_drained_local_read(Config, LeaderNode) end, + 10000), + + %% Check it is still functional + [N | _] = Nodenames -- [LeaderNode], + Conn = rabbit_ct_client_helpers:open_connection(Config, N), + {ok, Ch} = amqp_connection:open_channel(Conn), + QName = <<"qq.1">>, + amqp_channel:call(Ch, + #'queue.declare'{queue = QName, durable = true, + arguments = [{<<"x-queue-type">>, longstr, <<"quorum">>}]}), + + {_, NewLeaderNode} = rabbit_ct_broker_helpers:rpc(Config, N, ra_leaderboard, lookup_leader, + [rabbit_khepri:get_ra_cluster_name()]), + ?assertNot(LeaderNode == NewLeaderNode), + rabbit_ct_broker_helpers:revive_node(Config, LeaderNode). diff --git a/deps/rabbit/test/many_node_ha_SUITE.erl b/deps/rabbit/test/many_node_ha_SUITE.erl deleted file mode 100644 index fbaa7f90f80b..000000000000 --- a/deps/rabbit/test/many_node_ha_SUITE.erl +++ /dev/null @@ -1,112 +0,0 @@ -%% This Source Code Form is subject to the terms of the Mozilla Public -%% License, v. 2.0. If a copy of the MPL was not distributed with this -%% file, You can obtain one at https://mozilla.org/MPL/2.0/. -%% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. -%% - --module(many_node_ha_SUITE). - --include_lib("common_test/include/ct.hrl"). --include_lib("amqp_client/include/amqp_client.hrl"). - --compile(export_all). - -suite() -> - [ - {timetrap, {minutes, 5}} - ]. - -all() -> - [ - {group, cluster_size_6} - ]. - -groups() -> - [ - {cluster_size_6, [], [ - kill_intermediate - ]} - ]. - -%% ------------------------------------------------------------------- -%% Testsuite setup/teardown. -%% ------------------------------------------------------------------- - -init_per_suite(Config) -> - rabbit_ct_helpers:log_environment(), - rabbit_ct_helpers:run_setup_steps(Config). - -end_per_suite(Config) -> - rabbit_ct_helpers:run_teardown_steps(Config). - -init_per_group(cluster_size_6, Config) -> - rabbit_ct_helpers:set_config(Config, [ - {rmq_nodes_count, 6} - ]). - -end_per_group(_, Config) -> - Config. - -init_per_testcase(Testcase, Config) -> - rabbit_ct_helpers:testcase_started(Config, Testcase), - ClusterSize = ?config(rmq_nodes_count, Config), - TestNumber = rabbit_ct_helpers:testcase_number(Config, ?MODULE, Testcase), - Config1 = rabbit_ct_helpers:set_config(Config, [ - {rmq_nodes_clustered, true}, - {rmq_nodename_suffix, Testcase}, - {tcp_ports_base, {skip_n_nodes, TestNumber * ClusterSize}} - ]), - rabbit_ct_helpers:run_steps(Config1, - rabbit_ct_broker_helpers:setup_steps() ++ - rabbit_ct_client_helpers:setup_steps() ++ [ - fun rabbit_ct_broker_helpers:set_ha_policy_all/1 - ]). - -end_per_testcase(Testcase, Config) -> - Config1 = rabbit_ct_helpers:run_steps(Config, - rabbit_ct_client_helpers:teardown_steps() ++ - rabbit_ct_broker_helpers:teardown_steps()), - rabbit_ct_helpers:testcase_finished(Config1, Testcase). - -%% ------------------------------------------------------------------- -%% Test Cases -%% ------------------------------------------------------------------- - -kill_intermediate(Config) -> - [A, B, C, D, E, F] = rabbit_ct_broker_helpers:get_node_configs(Config, - nodename), - Msgs = rabbit_ct_helpers:cover_work_factor(Config, 20000), - MasterChannel = rabbit_ct_client_helpers:open_channel(Config, A), - ConsumerChannel = rabbit_ct_client_helpers:open_channel(Config, E), - ProducerChannel = rabbit_ct_client_helpers:open_channel(Config, F), - Queue = <<"test">>, - amqp_channel:call(MasterChannel, #'queue.declare'{queue = Queue, - auto_delete = false}), - - %% TODO: this seems *highly* timing dependant - the assumption being - %% that the kill will work quickly enough that there will still be - %% some messages in-flight that we *must* receive despite the intervening - %% node deaths. It would be nice if we could find a means to do this - %% in a way that is not actually timing dependent. - - %% Worse still, it assumes that killing the master will cause a - %% failover to Slave1, and so on. Nope. - - ConsumerPid = rabbit_ha_test_consumer:create(ConsumerChannel, - Queue, self(), false, Msgs), - - ProducerPid = rabbit_ha_test_producer:create(ProducerChannel, - Queue, self(), false, Msgs), - - %% create a killer for the master and the first 3 mirrors - [rabbit_ct_broker_helpers:kill_node_after(Config, Node, Time) || - {Node, Time} <- [{A, 50}, - {B, 50}, - {C, 100}, - {D, 100}]], - - %% verify that the consumer got all msgs, or die, or time out - rabbit_ha_test_producer:await_response(ProducerPid), - rabbit_ha_test_consumer:await_response(ConsumerPid), - ok. diff --git a/deps/rabbit/test/mc_SUITE.erl b/deps/rabbit/test/mc_unit_SUITE.erl similarity index 56% rename from deps/rabbit/test/mc_SUITE.erl rename to deps/rabbit/test/mc_unit_SUITE.erl index 4d60275d561e..d7fc929005f0 100644 --- a/deps/rabbit/test/mc_SUITE.erl +++ b/deps/rabbit/test/mc_unit_SUITE.erl @@ -1,4 +1,4 @@ --module(mc_SUITE). +-module(mc_unit_SUITE). -compile([export_all, nowarn_export_all]). @@ -24,14 +24,23 @@ groups() -> all_tests() -> [ + mc_util_uuid_to_urn_roundtrip, amqpl_defaults, amqpl_compat, amqpl_table_x_header, amqpl_table_x_header_array_of_tbls, - amqpl_death_records, + amqpl_death_v1_records, + amqpl_death_v2_records, + is_death_cycle, amqpl_amqp_bin_amqpl, amqpl_cc_amqp_bin_amqpl, + amqp_amqpl_amqp_uuid_correlation_id, amqp_amqpl, + amqp_amqpl_message_id_ulong, + amqp_amqpl_amqp_message_id_uuid, + amqp_amqpl_message_id_large, + amqp_amqpl_message_id_binary, + amqp_amqpl_unsupported_values_not_converted, amqp_to_amqpl_data_body, amqp_amqpl_amqp_bodies ]. @@ -45,9 +54,7 @@ amqpl_defaults(_Config) -> Payload = [<<"data">>], Content = #content{properties = Props, payload_fragments_rev = Payload}, - Anns = #{exchange => <<"exch">>, - routing_keys => [<<"apple">>]}, - Msg = mc:init(mc_amqpl, Content, Anns), + Msg = mc:init(mc_amqpl, Content, annotations()), ?assertEqual(undefined, mc:priority(Msg)), ?assertEqual(false, mc:is_persistent(Msg)), @@ -104,7 +111,6 @@ amqpl_compat(_Config) -> ?assertEqual({utf8, <<"apple">>}, mc:x_header(<<"x-stream-filter">>, Msg)), RoutingHeaders = mc:routing_headers(Msg, []), - ct:pal("routing headers ~p", [RoutingHeaders]), ?assertMatch(#{<<"a-binary">> := <<"data">>, <<"a-bool">> := false, <<"a-double">> := 1.0, @@ -145,9 +151,7 @@ amqpl_table_x_header(_Config) -> Payload = [<<"data">>], Content = #content{properties = Props, payload_fragments_rev = Payload}, - Anns = #{exchange => <<"exch">>, - routing_keys => [<<"apple">>]}, - Msg = mc:init(mc_amqpl, Content, Anns), + Msg = mc:init(mc_amqpl, Content, annotations()), %% x-header values come back AMQP 1.0 ish formatted ?assertMatch({map, @@ -175,9 +179,7 @@ amqpl_table_x_header_array_of_tbls(_Config) -> Payload = [<<"data">>], Content = #content{properties = Props, payload_fragments_rev = Payload}, - Anns = #{exchange => <<"exch">>, - routing_keys => [<<"apple">>]}, - Msg = mc:init(mc_amqpl, Content, Anns), + Msg = mc:init(mc_amqpl, Content, annotations()), ?assertMatch({list, [{map, [{{symbol, <<"type">>}, {utf8, <<"apple">>}}, @@ -191,29 +193,29 @@ amqpl_table_x_header_array_of_tbls(_Config) -> ok. -amqpl_death_records(_Config) -> +amqpl_death_v1_records(_Config) -> + ok = amqpl_death_records(#{?FF_MC_DEATHS_V2 => false}). + +amqpl_death_v2_records(_Config) -> + ok = amqpl_death_records(#{?FF_MC_DEATHS_V2 => true}). + +amqpl_death_records(Env) -> Content = #content{class_id = 60, properties = #'P_basic'{headers = []}, payload_fragments_rev = [<<"data">>]}, - Anns = #{exchange => <<"exch">>, - routing_keys => [<<"apple">>]}, - Msg0 = mc:prepare(store, mc:init(mc_amqpl, Content, Anns)), + Msg0 = mc:prepare(store, mc:init(mc_amqpl, Content, annotations())), - Msg1 = mc:record_death(rejected, <<"q1">>, Msg0), + Msg1 = mc:record_death(rejected, <<"q1">>, Msg0, Env), ?assertEqual([<<"q1">>], mc:death_queue_names(Msg1)), - ?assertMatch({{<<"q1">>, rejected}, - #death{exchange = <<"exch">>, - routing_keys = [<<"apple">>], - count = 1}}, mc:last_death(Msg1)), ?assertEqual(false, mc:is_death_cycle(<<"q1">>, Msg1)), #content{properties = #'P_basic'{headers = H1}} = mc:protocol_state(Msg1), ?assertMatch({_, array, [_]}, header(<<"x-death">>, H1)), ?assertMatch({_, longstr, <<"q1">>}, header(<<"x-first-death-queue">>, H1)), - ?assertMatch({_, longstr, <<"q1">>}, header(<<"x-last-death-queue">>, H1)), ?assertMatch({_, longstr, <<"exch">>}, header(<<"x-first-death-exchange">>, H1)), - ?assertMatch({_, longstr, <<"exch">>}, header(<<"x-last-death-exchange">>, H1)), ?assertMatch({_, longstr, <<"rejected">>}, header(<<"x-first-death-reason">>, H1)), + ?assertMatch({_, longstr, <<"q1">>}, header(<<"x-last-death-queue">>, H1)), + ?assertMatch({_, longstr, <<"exch">>}, header(<<"x-last-death-exchange">>, H1)), ?assertMatch({_, longstr, <<"rejected">>}, header(<<"x-last-death-reason">>, H1)), {_, array, [{table, T1}]} = header(<<"x-death">>, H1), ?assertMatch({_, long, 1}, header(<<"count">>, T1)), @@ -224,26 +226,80 @@ amqpl_death_records(_Config) -> ?assertMatch({_, array, [{longstr, <<"apple">>}]}, header(<<"routing-keys">>, T1)), - %% second dead letter, e.g. a ttl reason returning to source queue + %% second dead letter, e.g. an expired reason returning to source queue %% record_death uses a timestamp for death record ordering, ensure %% it is definitely higher than the last timestamp taken timer:sleep(2), - Msg2 = mc:record_death(ttl, <<"dl">>, Msg1), + Msg2 = mc:record_death(expired, <<"dl">>, Msg1, Env), #content{properties = #'P_basic'{headers = H2}} = mc:protocol_state(Msg2), {_, array, [{table, T2a}, {table, T2b}]} = header(<<"x-death">>, H2), ?assertMatch({_, longstr, <<"dl">>}, header(<<"queue">>, T2a)), ?assertMatch({_, longstr, <<"q1">>}, header(<<"queue">>, T2b)), - - ct:pal("H2 ~p", [T2a]), - ct:pal("routing headers ~p", [mc:routing_headers(Msg2, [x_headers])]), - - - - ok. +is_death_cycle(_Config) -> + Content = #content{class_id = 60, + properties = #'P_basic'{headers = []}, + payload_fragments_rev = [<<"data">>]}, + Msg0 = mc:prepare(store, mc:init(mc_amqpl, Content, annotations())), + + %% Test the followig topology: + %% Q1 --rejected--> Q2 --expired--> Q3 --expired--> + %% Q1 --rejected--> Q2 --expired--> Q3 + + Msg1 = mc:record_death(rejected, <<"q1">>, Msg0, #{}), + ?assertNot(mc:is_death_cycle(<<"q1">>, Msg1), + "A queue that dead letters to itself due to rejected is not considered a cycle."), + ?assertNot(mc:is_death_cycle(<<"q2">>, Msg1)), + ?assertNot(mc:is_death_cycle(<<"q3">>, Msg1)), + + Msg2 = mc:record_death(expired, <<"q2">>, Msg1, #{}), + ?assertNot(mc:is_death_cycle(<<"q1">>, Msg2)), + ?assert(mc:is_death_cycle(<<"q2">>, Msg2), + "A queue that dead letters to itself due to expired is considered a cycle."), + ?assertNot(mc:is_death_cycle(<<"q3">>, Msg2)), + + Msg3 = mc:record_death(expired, <<"q3">>, Msg2, #{}), + ?assertNot(mc:is_death_cycle(<<"q1">>, Msg3)), + ?assert(mc:is_death_cycle(<<"q2">>, Msg3)), + ?assert(mc:is_death_cycle(<<"q3">>, Msg3)), + + Msg4 = mc:record_death(rejected, <<"q1">>, Msg3, #{}), + ?assertNot(mc:is_death_cycle(<<"q1">>, Msg4)), + ?assertNot(mc:is_death_cycle(<<"q2">>, Msg4)), + ?assertNot(mc:is_death_cycle(<<"q3">>, Msg4)), + + Msg5 = mc:record_death(expired, <<"q2">>, Msg4, #{}), + ?assertNot(mc:is_death_cycle(<<"q1">>, Msg5)), + ?assert(mc:is_death_cycle(<<"q2">>, Msg5)), + ?assertNot(mc:is_death_cycle(<<"q3">>, Msg5)), + + DeathQsOrderedByRecency = [<<"q2">>, <<"q1">>, <<"q3">>], + ?assertEqual(DeathQsOrderedByRecency, mc:death_queue_names(Msg5)), + + #content{properties = #'P_basic'{headers = H}} = mc:protocol_state(Msg5), + ?assertMatch({_, longstr, <<"q1">>}, header(<<"x-first-death-queue">>, H)), + ?assertMatch({_, longstr, <<"rejected">>}, header(<<"x-first-death-reason">>, H)), + ?assertMatch({_, longstr, <<"q2">>}, header(<<"x-last-death-queue">>, H)), + ?assertMatch({_, longstr, <<"expired">>}, header(<<"x-last-death-reason">>, H)), + + %% We expect the array to be ordered by recency. + {_, array, [{table, T1}, {table, T2}, {table, T3}]} = header(<<"x-death">>, H), + + ?assertMatch({_, longstr, <<"q2">>}, header(<<"queue">>, T1)), + ?assertMatch({_, longstr, <<"expired">>}, header(<<"reason">>, T1)), + ?assertMatch({_, long, 2}, header(<<"count">>, T1)), + + ?assertMatch({_, longstr, <<"q1">>}, header(<<"queue">>, T2)), + ?assertMatch({_, longstr, <<"rejected">>}, header(<<"reason">>, T2)), + ?assertMatch({_, long, 2}, header(<<"count">>, T2)), + + ?assertMatch({_, longstr, <<"q3">>}, header(<<"queue">>, T3)), + ?assertMatch({_, longstr, <<"expired">>}, header(<<"reason">>, T3)), + ?assertMatch({_, long, 1}, header(<<"count">>, T3)). + header(K, H) -> rabbit_basic:header(K, H). @@ -265,6 +321,7 @@ amqpl_amqp_bin_amqpl(_Config) -> {<<"a-float">>, float, 1.0}, {<<"a-void">>, void, undefined}, {<<"a-binary">>, binary, <<"data">>}, + {<<"a-array">>, array, [{long, 1}, {long, 2}]}, {<<"x-stream-filter">>, longstr, <<"apple">>} ], delivery_mode = 2, @@ -278,15 +335,12 @@ amqpl_amqp_bin_amqpl(_Config) -> user_id = <<"banana">>, app_id = <<"rmq">> }, - Payload = [<<"data">>], Content = #content{properties = Props, - payload_fragments_rev = Payload}, - Anns = #{exchange => <<"exch">>, - routing_keys => [<<"apple">>]}, - Msg = mc:init(mc_amqpl, Content, Anns), + payload_fragments_rev = [<<"data">>]}, + Msg = mc:init(mc_amqpl, Content, annotations()), - ?assertEqual(<<"exch">>, mc:get_annotation(exchange, Msg)), - ?assertEqual([<<"apple">>], mc:get_annotation(routing_keys, Msg)), + ?assertEqual(<<"exch">>, mc:exchange(Msg)), + ?assertEqual([<<"apple">>], mc:routing_keys(Msg)), ?assertEqual(98, mc:priority(Msg)), ?assertEqual(true, mc:is_persistent(Msg)), ?assertEqual(99000, mc:timestamp(Msg)), @@ -294,16 +348,17 @@ amqpl_amqp_bin_amqpl(_Config) -> ?assertEqual({utf8, <<"msg-id">>}, mc:message_id(Msg)), ?assertEqual(1, mc:ttl(Msg)), ?assertEqual({utf8, <<"apple">>}, mc:x_header(<<"x-stream-filter">>, Msg)), + ?assert(is_integer(mc:get_annotation(rts, Msg))), - RoutingHeaders = mc:routing_headers(Msg, []), + %% array type non x-headers cannot be converted into amqp + RoutingHeaders = maps:remove(<<"a-array">>, mc:routing_headers(Msg, [])), %% roundtrip to binary Msg10Pre = mc:convert(mc_amqp, Msg), - Sections = amqp10_framing:decode_bin( - iolist_to_binary(amqp_serialize(Msg10Pre))), - Msg10 = mc:init(mc_amqp, Sections, #{}), - ?assertEqual(<<"exch">>, mc:get_annotation(exchange, Msg10)), - ?assertEqual([<<"apple">>], mc:get_annotation(routing_keys, Msg10)), + Payload = iolist_to_binary(mc:protocol_state(Msg10Pre)), + Msg10 = mc:init(mc_amqp, Payload, #{}), + ?assertEqual(<<"exch">>, mc:exchange(Msg10)), + ?assertEqual([<<"apple">>], mc:routing_keys(Msg10)), ?assertEqual(98, mc:priority(Msg10)), ?assertEqual(true, mc:is_persistent(Msg10)), ?assertEqual(99000, mc:timestamp(Msg10)), @@ -311,12 +366,57 @@ amqpl_amqp_bin_amqpl(_Config) -> ?assertEqual({utf8, <<"msg-id">>}, mc:message_id(Msg10)), ?assertEqual(1, mc:ttl(Msg10)), ?assertEqual({utf8, <<"apple">>}, mc:x_header(<<"x-stream-filter">>, Msg10)), + %% at this point the type is now present as a message annotation + ?assertEqual({utf8, <<"45">>}, mc:x_header(<<"x-basic-type">>, Msg10)), ?assertEqual(RoutingHeaders, mc:routing_headers(Msg10, [])), + ?assert(is_integer(mc:get_annotation(rts, Msg10))), + + Sections = amqp10_framing:decode_bin(Payload), + [ + #'v1_0.header'{} = Hdr10, + #'v1_0.message_annotations'{}, + #'v1_0.properties'{} = Props10, + #'v1_0.application_properties'{content = AP10} + | _] = Sections, + + ?assertMatch(#'v1_0.header'{durable = true, + ttl = {uint, 1}, + priority = {ubyte, 98}}, + Hdr10), + ?assertMatch(#'v1_0.properties'{content_encoding = {symbol, <<"gzip">>}, + content_type = {symbol, <<"text/plain">>}, + reply_to = {utf8, <<"reply-to">>}, + creation_time = {timestamp, 99000}, + user_id = {binary, <<"banana">>}, + group_id = {utf8, <<"rmq">>} + }, + Props10), + + Get = fun(K, AP) -> amqp_map_get(utf8(K), AP) end, + + + ?assertEqual({long, 99}, Get(<<"a-stream-offset">>, AP10)), + ?assertEqual({utf8, <<"a string">>}, Get(<<"a-string">>, AP10)), + ?assertEqual(false, Get(<<"a-bool">>, AP10)), + ?assertEqual({ubyte, 1}, Get(<<"a-unsignedbyte">>, AP10)), + ?assertEqual({ushort, 1}, Get(<<"a-unsignedshort">>, AP10)), + ?assertEqual({uint, 1}, Get(<<"a-unsignedint">>, AP10)), + ?assertEqual({int, 1}, Get(<<"a-signedint">>, AP10)), + ?assertEqual({timestamp, 1000}, Get(<<"a-timestamp">>, AP10)), + ?assertEqual({double, 1.0}, Get(<<"a-double">>, AP10)), + ?assertEqual({float, 1.0}, Get(<<"a-float">>, AP10)), + ?assertEqual(undefined, Get(<<"a-void">>, AP10)), + ?assertEqual({binary, <<"data">>}, Get(<<"a-binary">>, AP10)), + %% x-headers do not go into app props + ?assertEqual(undefined, Get(<<"x-stream-filter">>, AP10)), + %% arrays are not converted + ?assertEqual(undefined, Get(<<"a-array">>, AP10)), + %% assert properties MsgL2 = mc:convert(mc_amqpl, Msg10), - ?assertEqual(<<"exch">>, mc:get_annotation(exchange, MsgL2)), - ?assertEqual([<<"apple">>], mc:get_annotation(routing_keys, MsgL2)), + ?assertEqual(<<"exch">>, mc:exchange(MsgL2)), + ?assertEqual([<<"apple">>], mc:routing_keys(MsgL2)), ?assertEqual(98, mc:priority(MsgL2)), ?assertEqual(true, mc:is_persistent(MsgL2)), ?assertEqual(99000, mc:timestamp(MsgL2)), @@ -325,6 +425,7 @@ amqpl_amqp_bin_amqpl(_Config) -> ?assertEqual(1, mc:ttl(MsgL2)), ?assertEqual({utf8, <<"apple">>}, mc:x_header(<<"x-stream-filter">>, MsgL2)), ?assertEqual(RoutingHeaders, mc:routing_headers(MsgL2, [])), + ?assert(is_integer(mc:get_annotation(rts, MsgL2))), ok. amqpl_cc_amqp_bin_amqpl(_Config) -> @@ -334,36 +435,99 @@ amqpl_cc_amqp_bin_amqpl(_Config) -> Content = #content{properties = Props, payload_fragments_rev = [<<"data">>]}, X = rabbit_misc:r(<<"/">>, exchange, <<"exch">>), - Msg = mc_amqpl:message(X, <<"apple">>, Content, #{}, true), + {ok, Msg} = mc_amqpl:message(X, <<"apple">>, Content, #{}), RoutingKeys = [<<"apple">>, <<"q1">>, <<"q2">>], - ?assertEqual(RoutingKeys, mc:get_annotation(routing_keys, Msg)), + ?assertEqual(RoutingKeys, mc:routing_keys(Msg)), Msg10Pre = mc:convert(mc_amqp, Msg), - Sections = amqp10_framing:decode_bin( - iolist_to_binary(amqp_serialize(Msg10Pre))), + Sections = iolist_to_binary(mc:protocol_state(Msg10Pre)), Msg10 = mc:init(mc_amqp, Sections, #{}), - ?assertEqual(RoutingKeys, mc:get_annotation(routing_keys, Msg10)), + ?assertEqual(RoutingKeys, mc:routing_keys(Msg10)), MsgL2 = mc:convert(mc_amqpl, Msg10), - ?assertEqual(RoutingKeys, mc:get_annotation(routing_keys, MsgL2)), + ?assertEqual(RoutingKeys, mc:routing_keys(MsgL2)), ?assertMatch(#content{properties = #'P_basic'{headers = Headers}}, mc:protocol_state(MsgL2)). thead2(T, Value) -> {symbol(atom_to_binary(T)), {T, Value}}. +thead2(K, T, Value) -> + {symbol(atom_to_binary(K)), {T, Value}}. + thead(T, Value) -> {utf8(atom_to_binary(T)), {T, Value}}. +mc_util_uuid_to_urn_roundtrip(_Config) -> + %% roundtrip uuid test + UUID = <<88,184,103,176,129,81,31,86,27,212,115,34,152,7,253,96>>, + S = mc_util:uuid_to_urn_string(UUID), + ?assertEqual(<<"urn:uuid:58b867b0-8151-1f56-1bd4-73229807fd60">>, S), + ?assertEqual({ok, UUID}, mc_util:urn_string_to_uuid(S)), + ok. + +do_n(0, _) -> + ok; +do_n(N, Fun) -> + Fun(), + do_n(N -1, Fun). + +amqp_amqpl_unsupported_values_not_converted(_Config) -> + LongKey = binary:copy(<<"a">>, 256), + UTF8Key = <<"I am a 🐰"/utf8>>, + APC = [ + {{utf8, <<"area">>}, {utf8, <<"East Sussex">>}}, + {{utf8, LongKey}, {utf8, <<"apple">>}}, + {{utf8, UTF8Key}, {utf8, <<"dog">>}} + ], + AP = #'v1_0.application_properties'{content = APC}, + + %% invalid utf8 + UserId = <<0, "banana"/utf8>>, + ?assertEqual(false, mc_util:is_valid_shortstr(UserId)), + + P = #'v1_0.properties'{user_id = {binary, UserId}}, + D = #'v1_0.data'{content = <<"data">>}, + Payload = serialize_sections([P, AP, D]), + + Msg = mc:init(mc_amqp, Payload, annotations()), + MsgL = mc:convert(mc_amqpl, Msg), + #content{properties = #'P_basic'{user_id = undefined, + headers = HL}} = mc:protocol_state(MsgL), + ?assertMatch({_, longstr, <<"East Sussex">>}, header(<<"area">>, HL)), + ?assertMatch(undefined, header(LongKey, HL)), + %% RabbitMQ does not validate that keys are ascii as per spec + %% that's ok after all who really cares? + ok. + +amqp_amqpl_amqp_uuid_correlation_id(_Config) -> + %% ensure uuid correlation ids are correctly roundtripped via urn formatting + UUID = crypto:strong_rand_bytes(16), + + P = #'v1_0.properties'{correlation_id = {uuid, UUID}, + message_id = {uuid, UUID}}, + D = #'v1_0.data'{content = <<"data">>}, + BareMsgIn = serialize_sections([P, D]), + + Msg = mc:init(mc_amqp, BareMsgIn, annotations()), + MsgL = mc:convert(mc_amqpl, Msg), + MsgOut = mc:convert(mc_amqp, MsgL), + + [_HeaderSect, _MessageAnnotationsSect | BareMsgIoList] = mc:protocol_state(MsgOut), + BareMsgOut = iolist_to_binary(BareMsgIoList), + ?assertEqual(BareMsgIn, BareMsgOut). + amqp_amqpl(_Config) -> H = #'v1_0.header'{priority = {ubyte, 3}, ttl = {uint, 20000}, durable = true}, MAC = [ {{symbol, <<"x-stream-filter">>}, {utf8, <<"apple">>}}, - thead2(list, [utf8(<<"1">>)]), - thead2(map, [{utf8(<<"k">>), utf8(<<"v">>)}]) + thead2(list, [utf8(<<"l">>)]), + thead2(map, [{utf8(<<"k">>), utf8(<<"v">>)}]), + thead2('x-list', list, [utf8(<<"l">>)]), + thead2('x-map', map, [{utf8(<<"k">>), utf8(<<"v">>)}]) ], M = #'v1_0.message_annotations'{content = MAC}, P = #'v1_0.properties'{content_type = {symbol, <<"ctype">>}, @@ -380,7 +544,8 @@ amqp_amqpl(_Config) -> thead(ulong, 5), thead(utf8, <<"a-string">>), thead(binary, <<"data">>), - thead(ubyte, 1), + thead(symbol, <<"symbol">>), + thead(ubyte, 255), thead(short, 2), thead(ushort, 3), thead(uint, 4), @@ -388,18 +553,18 @@ amqp_amqpl(_Config) -> thead(double, 5.0), thead(float, 6.0), thead(timestamp, 7000), - thead(byte, 128), + thead(byte, -128), thead(boolean, true), + {{utf8, <<"boolean2">>}, false}, {utf8(<<"null">>), null} ], A = #'v1_0.application_properties'{content = AC}, D = #'v1_0.data'{content = <<"data">>}, - Anns = #{exchange => <<"exch">>, - routing_keys => [<<"apple">>]}, - Msg = mc:init(mc_amqp, [H, M, P, A, D], Anns), + Payload = serialize_sections([H, M, P, A, D]), + Msg = mc:init(mc_amqp, Payload, annotations()), %% validate source data is serialisable - _ = amqp_serialize(Msg), + _ = mc:protocol_state(Msg), ?assertEqual(3, mc:priority(Msg)), ?assertEqual(true, mc:is_persistent(Msg)), @@ -411,8 +576,10 @@ amqp_amqpl(_Config) -> ?assertEqual(3, mc:priority(MsgL)), ?assertEqual(true, mc:is_persistent(MsgL)), ?assertEqual({utf8, <<"msg-id">>}, mc:message_id(MsgL)), - #content{properties = #'P_basic'{headers = HL} = Props} = Content = mc:protocol_state(MsgL), + #content{properties = #'P_basic'{headers = HL} = Props} = Content = + mc:protocol_state(MsgL), + %% the user id is valid utf8 shortstr ?assertMatch(#'P_basic'{user_id = <<"user-id">>}, Props), ?assertMatch(#'P_basic'{reply_to = <<"reply-to">>}, Props), ?assertMatch(#'P_basic'{content_type = <<"ctype">>}, Props), @@ -424,12 +591,18 @@ amqp_amqpl(_Config) -> ?assertMatch(#'P_basic'{expiration = <<"20000">>}, Props), ?assertMatch({_, longstr, <<"apple">>}, header(<<"x-stream-filter">>, HL)), + %% these are not coverted as not x- headers + ?assertEqual(undefined, header(<<"list">>, HL)), + ?assertEqual(undefined, header(<<"map">>, HL)), + ?assertMatch({_ ,array, [{longstr,<<"l">>}]}, header(<<"x-list">>, HL)), + ?assertMatch({_, table, [{<<"k">>,longstr,<<"v">>}]}, header(<<"x-map">>, HL)), ?assertMatch({_, long, 5}, header(<<"long">>, HL)), ?assertMatch({_, long, 5}, header(<<"ulong">>, HL)), ?assertMatch({_, longstr, <<"a-string">>}, header(<<"utf8">>, HL)), - ?assertMatch({_, binary, <<"data">>}, header(<<"binary">>, HL)), - ?assertMatch({_, unsignedbyte, 1}, header(<<"ubyte">>, HL)), + ?assertMatch({_, longstr, <<"data">>}, header(<<"binary">>, HL)), + ?assertMatch({_, longstr, <<"symbol">>}, header(<<"symbol">>, HL)), + ?assertMatch({_, unsignedbyte, 255}, header(<<"ubyte">>, HL)), ?assertMatch({_, short, 2}, header(<<"short">>, HL)), ?assertMatch({_, unsignedshort, 3}, header(<<"ushort">>, HL)), ?assertMatch({_, unsignedint, 4}, header(<<"uint">>, HL)), @@ -437,8 +610,9 @@ amqp_amqpl(_Config) -> ?assertMatch({_, double, 5.0}, header(<<"double">>, HL)), ?assertMatch({_, float, 6.0}, header(<<"float">>, HL)), ?assertMatch({_, timestamp, 7}, header(<<"timestamp">>, HL)), - ?assertMatch({_, byte, 128}, header(<<"byte">>, HL)), + ?assertMatch({_, byte, -128}, header(<<"byte">>, HL)), ?assertMatch({_, bool, true}, header(<<"boolean">>, HL)), + ?assertMatch({_, bool, false}, header(<<"boolean2">>, HL)), ?assertMatch({_, void, undefined}, header(<<"null">>, HL)), %% validate content is serialisable @@ -448,6 +622,74 @@ amqp_amqpl(_Config) -> ok. +amqp_amqpl_message_id_ulong(_Config) -> + Num = 9876789, + ULong = erlang:integer_to_binary(Num), + P = #'v1_0.properties'{message_id = {ulong, Num}, + correlation_id = {ulong, Num}}, + D = #'v1_0.data'{content = <<"data">>}, + Payload = serialize_sections([P, D]), + Msg = mc:init(mc_amqp, Payload, annotations()), + MsgL = mc:convert(mc_amqpl, Msg), + ?assertEqual({utf8, ULong}, mc:message_id(MsgL)), + ?assertEqual({utf8, ULong}, mc:correlation_id(MsgL)), + #content{properties = #'P_basic'{} = Props} = mc:protocol_state(MsgL), + ?assertMatch(#'P_basic'{message_id = ULong, + correlation_id = ULong}, Props), + %% NB we can't practically roundtrip ulong correlation ids + ok. + +amqp_amqpl_amqp_message_id_uuid(_Config) -> + %% uuid message-ids are roundtripped using a urn uuid format + UUId = crypto:strong_rand_bytes(16), + Urn = mc_util:uuid_to_urn_string(UUId), + P = #'v1_0.properties'{message_id = {uuid, UUId}, + correlation_id = {uuid, UUId}}, + D = #'v1_0.data'{content = <<"data">>}, + BareMsgIn = serialize_sections([P, D]), + Msg = mc:init(mc_amqp, BareMsgIn, annotations()), + MsgL = mc:convert(mc_amqpl, Msg), + ?assertEqual({utf8, Urn}, mc:message_id(MsgL)), + ?assertEqual({utf8, Urn}, mc:correlation_id(MsgL)), + #content{properties = #'P_basic'{} = Props} = mc:protocol_state(MsgL), + ?assertMatch(#'P_basic'{message_id = Urn, + correlation_id = Urn}, Props), + %% check roundtrip back + Msg2 = mc:convert(mc_amqp, MsgL), + [_HeaderSect, _MessageAnnotationsSect | BareMsgIoList] = mc:protocol_state(Msg2), + BareMsgOut = iolist_to_binary(BareMsgIoList), + ?assertEqual(BareMsgIn, BareMsgOut). + +amqp_amqpl_message_id_large(_Config) -> + Orig = binary:copy(<<"hi">>, 256), + P = #'v1_0.properties'{message_id = {utf8, Orig}, + correlation_id = {utf8, Orig}}, + D = #'v1_0.data'{content = <<"data">>}, + Payload = serialize_sections([P, D]), + Msg = mc:init(mc_amqp, Payload, annotations()), + MsgL = mc:convert(mc_amqpl, Msg), + ?assertEqual(undefined, mc:message_id(MsgL)), + ?assertEqual(undefined, mc:correlation_id(MsgL)), + #content{properties = #'P_basic'{headers = Hdrs}} = mc:protocol_state(MsgL), + ?assertMatch({_, longstr, Orig}, header(<<"x-message-id">>, Hdrs)), + ?assertMatch({_, longstr, Orig}, header(<<"x-correlation-id">>, Hdrs)), + ok. + +amqp_amqpl_message_id_binary(_Config) -> + Orig = crypto:strong_rand_bytes(128), + P = #'v1_0.properties'{message_id = {binary, Orig}, + correlation_id = {binary, Orig}}, + D = #'v1_0.data'{content = <<"data">>}, + Payload = serialize_sections([P, D]), + Msg = mc:init(mc_amqp, Payload, annotations()), + MsgL = mc:convert(mc_amqpl, Msg), + ?assertEqual(undefined, mc:message_id(MsgL)), + ?assertEqual(undefined, mc:correlation_id(MsgL)), + #content{properties = #'P_basic'{headers = Hdrs}} = mc:protocol_state(MsgL), + ?assertMatch({_, binary, Orig}, header(<<"x-message-id">>, Hdrs)), + ?assertMatch({_, binary, Orig}, header(<<"x-correlation-id">>, Hdrs)), + ok. + amqp_to_amqpl_data_body(_Config) -> Cases = [#'v1_0.data'{content = <<"helloworld">>}, #'v1_0.data'{content = [<<"hello">>, <<"world">>]}], @@ -457,7 +699,8 @@ amqp_to_amqpl_data_body(_Config) -> true -> Section; false -> [Section] end, - Mc0 = mc:init(mc_amqp, Sections, #{}), + Payload = serialize_sections(Sections), + Mc0 = mc:init(mc_amqp, Payload, #{}), Mc = mc:convert(mc_amqpl, Mc0), #content{payload_fragments_rev = PayFragRev} = mc:protocol_state(Mc), PayFrag = lists:reverse(PayFragRev), @@ -480,205 +723,56 @@ amqp_amqpl_amqp_bodies(_Config) -> #'v1_0.amqp_sequence'{content = [{utf8, <<"two">>}]} ] ], - [begin - EncodedPayload = amqp10_encode_bin(Payload), - + EncodedBody = amqp10_encode_bin(Body), Ex = #resource{virtual_host = <<"/">>, kind = exchange, name = <<"ex">>}, - LegacyMsg = mc_amqpl:message(Ex, <<"rkey">>, - #content{payload_fragments_rev = - lists:reverse(EncodedPayload), - properties = Props}, - #{}, true), - + {ok, LegacyMsg} = mc_amqpl:message(Ex, + <<"rkey">>, + #content{payload_fragments_rev = + lists:reverse(EncodedBody), + properties = Props}, + #{}), AmqpMsg = mc:convert(mc_amqp, LegacyMsg), %% drop any non body sections - BodySections = lists:nthtail(3, mc:protocol_state(AmqpMsg)), - - AssertBody = case is_list(Payload) of - true -> - Payload; - false -> - [Payload] - end, - % ct:pal("ProtoState ~p", [BodySections]), - ?assertEqual(AssertBody, BodySections) - end || Payload <- Bodies], - ok. - -unsupported_091_header_is_dropped(_Config) -> - Props = #'P_basic'{ - headers = [ - {<<"x-received-from">>, array, []} - ] - }, - MsgRecord0 = rabbit_msg_record:from_amqp091(Props, <<"payload">>), - MsgRecord = rabbit_msg_record:init( - iolist_to_binary(rabbit_msg_record:to_iodata(MsgRecord0))), - % meck:unload(), - {PropsOut, <<"payload">>} = rabbit_msg_record:to_amqp091(MsgRecord), - - ?assertMatch(#'P_basic'{headers = undefined}, PropsOut), - - ok. - -message_id_ulong(_Config) -> - Num = 9876789, - ULong = erlang:integer_to_binary(Num), - P = #'v1_0.properties'{message_id = {ulong, Num}, - correlation_id = {ulong, Num}}, - D = #'v1_0.data'{content = <<"data">>}, - Bin = [amqp10_framing:encode_bin(P), - amqp10_framing:encode_bin(D)], - R = rabbit_msg_record:init(iolist_to_binary(Bin)), - {Props, _} = rabbit_msg_record:to_amqp091(R), - ?assertMatch(#'P_basic'{message_id = ULong, - correlation_id = ULong, - headers = - [ - %% ordering shouldn't matter - {<<"x-correlation-id-type">>, longstr, <<"ulong">>}, - {<<"x-message-id-type">>, longstr, <<"ulong">>} - ]}, - Props), + [_HeaderSect, _MessageAnnotationsSect | BodySectionsIoList] = mc:protocol_state(AmqpMsg), + BodySectionsBin = iolist_to_binary(BodySectionsIoList), + BodySections = amqp10_framing:decode_bin(BodySectionsBin), + ExpectedBodySections = case is_list(Body) of + true -> Body; + false -> [Body] + end, + ?assertEqual(ExpectedBodySections, BodySections) + end || Body <- Bodies], ok. -message_id_uuid(_Config) -> - %% fake a uuid - UUId = erlang:md5(term_to_binary(make_ref())), - TextUUId = rabbit_data_coercion:to_binary(rabbit_guid:to_string(UUId)), - P = #'v1_0.properties'{message_id = {uuid, UUId}, - correlation_id = {uuid, UUId}}, - D = #'v1_0.data'{content = <<"data">>}, - Bin = [amqp10_framing:encode_bin(P), - amqp10_framing:encode_bin(D)], - R = rabbit_msg_record:init(iolist_to_binary(Bin)), - {Props, _} = rabbit_msg_record:to_amqp091(R), - ?assertMatch(#'P_basic'{message_id = TextUUId, - correlation_id = TextUUId, - headers = - [ - %% ordering shouldn't matter - {<<"x-correlation-id-type">>, longstr, <<"uuid">>}, - {<<"x-message-id-type">>, longstr, <<"uuid">>} - ]}, - Props), - ok. - -message_id_binary(_Config) -> - %% fake a uuid - Orig = <<"asdfasdf">>, - Text = base64:encode(Orig), - P = #'v1_0.properties'{message_id = {binary, Orig}, - correlation_id = {binary, Orig}}, - D = #'v1_0.data'{content = <<"data">>}, - Bin = [amqp10_framing:encode_bin(P), - amqp10_framing:encode_bin(D)], - R = rabbit_msg_record:init(iolist_to_binary(Bin)), - {Props, _} = rabbit_msg_record:to_amqp091(R), - ?assertMatch(#'P_basic'{message_id = Text, - correlation_id = Text, - headers = - [ - %% ordering shouldn't matter - {<<"x-correlation-id-type">>, longstr, <<"binary">>}, - {<<"x-message-id-type">>, longstr, <<"binary">>} - ]}, - Props), - ok. - -message_id_large_binary(_Config) -> - %% cannot fit in a shortstr - Orig = crypto:strong_rand_bytes(500), - P = #'v1_0.properties'{message_id = {binary, Orig}, - correlation_id = {binary, Orig}}, - D = #'v1_0.data'{content = <<"data">>}, - Bin = [amqp10_framing:encode_bin(P), - amqp10_framing:encode_bin(D)], - R = rabbit_msg_record:init(iolist_to_binary(Bin)), - {Props, _} = rabbit_msg_record:to_amqp091(R), - ?assertMatch(#'P_basic'{message_id = undefined, - correlation_id = undefined, - headers = - [ - %% ordering shouldn't matter - {<<"x-correlation-id">>, longstr, Orig}, - {<<"x-message-id">>, longstr, Orig} - ]}, - Props), - ok. - -message_id_large_string(_Config) -> - %% cannot fit in a shortstr - Orig = base64:encode(crypto:strong_rand_bytes(500)), - P = #'v1_0.properties'{message_id = {utf8, Orig}, - correlation_id = {utf8, Orig}}, - D = #'v1_0.data'{content = <<"data">>}, - Bin = [amqp10_framing:encode_bin(P), - amqp10_framing:encode_bin(D)], - R = rabbit_msg_record:init(iolist_to_binary(Bin)), - {Props, _} = rabbit_msg_record:to_amqp091(R), - ?assertMatch(#'P_basic'{message_id = undefined, - correlation_id = undefined, - headers = - [ - %% ordering shouldn't matter - {<<"x-correlation-id">>, longstr, Orig}, - {<<"x-message-id">>, longstr, Orig} - ]}, - Props), - ok. - -reuse_amqp10_binary_chunks(_Config) -> - Amqp10MsgAnnotations = #'v1_0.message_annotations'{content = - [{{symbol, <<"x-route">>}, {utf8, <<"dummy">>}}]}, - Amqp10MsgAnnotationsBin = amqp10_encode_bin(Amqp10MsgAnnotations), - Amqp10Props = #'v1_0.properties'{group_id = {utf8, <<"my-group">>}, - group_sequence = {uint, 42}}, - Amqp10PropsBin = amqp10_encode_bin(Amqp10Props), - Amqp10AppProps = #'v1_0.application_properties'{content = [{{utf8, <<"foo">>}, {utf8, <<"bar">>}}]}, - Amqp10AppPropsBin = amqp10_encode_bin(Amqp10AppProps), - Amqp091Headers = [{<<"x-amqp-1.0-message-annotations">>, longstr, Amqp10MsgAnnotationsBin}, - {<<"x-amqp-1.0-properties">>, longstr, Amqp10PropsBin}, - {<<"x-amqp-1.0-app-properties">>, longstr, Amqp10AppPropsBin}], - Amqp091Props = #'P_basic'{type= <<"amqp-1.0">>, headers = Amqp091Headers}, - Body = #'v1_0.amqp_value'{content = {utf8, <<"hello world">>}}, - EncodedBody = amqp10_encode_bin(Body), - R = rabbit_msg_record:from_amqp091(Amqp091Props, EncodedBody), - RBin = rabbit_msg_record:to_iodata(R), - Amqp10DecodedMsg = amqp10_framing:decode_bin(iolist_to_binary(RBin)), - [Amqp10DecodedMsgAnnotations, Amqp10DecodedProps, - Amqp10DecodedAppProps, DecodedBody] = Amqp10DecodedMsg, - ?assertEqual(Amqp10MsgAnnotations, Amqp10DecodedMsgAnnotations), - ?assertEqual(Amqp10Props, Amqp10DecodedProps), - ?assertEqual(Amqp10AppProps, Amqp10DecodedAppProps), - ?assertEqual(Body, DecodedBody), - ok. +%% Utility amqp10_encode_bin(L) when is_list(L) -> [iolist_to_binary(amqp10_framing:encode_bin(X)) || X <- L]; amqp10_encode_bin(X) -> - [iolist_to_binary(amqp10_framing:encode_bin(X))]. - -%% Utility + amqp10_encode_bin([X]). -test_amqp091_roundtrip(Props, Payload) -> - MsgRecord0 = rabbit_msg_record:from_amqp091(Props, Payload), - MsgRecord = rabbit_msg_record:init( - iolist_to_binary(rabbit_msg_record:to_iodata(MsgRecord0))), - % meck:unload(), - {PropsOut, PayloadOut} = rabbit_msg_record:to_amqp091(MsgRecord), - ?assertEqual(Props, PropsOut), - ?assertEqual(iolist_to_binary(Payload), - iolist_to_binary(PayloadOut)), - ok. +serialize_sections(Sections) -> + iolist_to_binary([amqp10_framing:encode_bin(S) || S <- Sections]). utf8(V) -> {utf8, V}. + symbol(V) -> {symbol, V}. -amqp_serialize(Msg) -> - mc_amqp:serialize(mc:protocol_state(Msg)). +amqp_map_get(_K, []) -> + undefined; +amqp_map_get(K, Tuples) -> + case lists:keyfind(K, 1, Tuples) of + false -> + undefined; + {_, V} -> + V + end. + +annotations() -> + #{?ANN_EXCHANGE => <<"exch">>, + ?ANN_ROUTING_KEYS => [<<"apple">>]}. diff --git a/deps/rabbit/test/message_containers_SUITE.erl b/deps/rabbit/test/message_containers_SUITE.erl deleted file mode 100644 index 5747613baaad..000000000000 --- a/deps/rabbit/test/message_containers_SUITE.erl +++ /dev/null @@ -1,226 +0,0 @@ --module(message_containers_SUITE). - --compile([export_all, nowarn_export_all]). - --include_lib("common_test/include/ct.hrl"). --include_lib("eunit/include/eunit.hrl"). --include_lib("amqp_client/include/amqp_client.hrl"). - --define(FEATURE_FLAG, message_containers). - -%%%=================================================================== -%%% Common Test callbacks -%%%=================================================================== - -all() -> - [ - {group, classic}, - {group, quorum}, - {group, stream} - ]. - - -groups() -> - [ - {classic, [], all_tests()}, - {quorum, [], all_tests()}, - {stream, [], all_tests()} - ]. - -all_tests() -> - [ - enable_ff - ]. - -init_per_suite(Config0) -> - rabbit_ct_helpers:log_environment(), - Config = rabbit_ct_helpers:merge_app_env( - Config0, {rabbit, [{quorum_tick_interval, 1000}]}), - rabbit_ct_helpers:run_setup_steps(Config). - -end_per_suite(Config) -> - rabbit_ct_helpers:run_teardown_steps(Config), - ok. - -init_per_group(Group, Config) -> - ct:pal("init per group ~p", [Group]), - ClusterSize = 3, - Config1 = rabbit_ct_helpers:set_config(Config, - [{rmq_nodes_count, ClusterSize}, - {rmq_nodename_suffix, Group}, - {tcp_ports_base}]), - Config1b = rabbit_ct_helpers:set_config(Config1, - [{queue_type, atom_to_binary(Group, utf8)}, - {net_ticktime, 10}]), - - Config1c = rabbit_ct_helpers:merge_app_env( - Config1b, {rabbit, [{forced_feature_flags_on_init, []}]}), - Config2 = rabbit_ct_helpers:run_steps(Config1c, - [fun merge_app_env/1 ] ++ - rabbit_ct_broker_helpers:setup_steps()), - ok = rabbit_ct_broker_helpers:rpc( - Config2, 0, application, set_env, - [rabbit, channel_tick_interval, 100]), - - AllFFs = rabbit_ct_broker_helpers:rpc(Config2, rabbit_feature_flags, list, [all, stable]), - FFs = maps:keys(maps:remove(?FEATURE_FLAG, AllFFs)), - ct:pal("FFs ~p", [FFs]), - rabbit_ct_broker_helpers:set_policy(Config2, 0, - <<"ha-policy">>, <<".*">>, <<"queues">>, - [{<<"ha-mode">>, <<"all">>}]), - Config2. - -merge_app_env(Config) -> - rabbit_ct_helpers:merge_app_env( - rabbit_ct_helpers:merge_app_env(Config, - {rabbit, - [{core_metrics_gc_interval, 100}, - {log, [{file, [{level, debug}]}]}]}), - {ra, [{min_wal_roll_over_interval, 30000}]}). - -end_per_group(_Group, Config) -> - rabbit_ct_helpers:run_steps(Config, - rabbit_ct_broker_helpers:teardown_steps()). - -init_per_testcase(Testcase, Config) -> - case rabbit_ct_broker_helpers:is_feature_flag_supported(Config, ?FEATURE_FLAG) of - false -> - {skip, "feature flag message_containers is unsupported"}; - true -> - Config1 = rabbit_ct_helpers:testcase_started(Config, Testcase), - ?assertNot(rabbit_ct_broker_helpers:is_feature_flag_enabled(Config, ?FEATURE_FLAG)), - Q = rabbit_data_coercion:to_binary(Testcase), - Config2 = rabbit_ct_helpers:set_config(Config1, - [{queue_name, Q}, - {alt_queue_name, <>} - ]), - rabbit_ct_helpers:run_steps(Config2, - rabbit_ct_client_helpers:setup_steps()) - end. - -end_per_testcase(Testcase, Config) -> - rabbit_ct_broker_helpers:rpc(Config, 0, ?MODULE, delete_queues, []), - Config1 = rabbit_ct_helpers:run_steps( - Config, - rabbit_ct_client_helpers:teardown_steps()), - rabbit_ct_helpers:testcase_finished(Config1, Testcase). - -%%%=================================================================== -%%% Test cases -%%%=================================================================== - -enable_ff(Config) -> - Server = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename), - Ch = rabbit_ct_client_helpers:open_channel(Config, Server), - QName = ?config(queue_name, Config), - ?assertEqual({'queue.declare_ok', QName, 0, 0}, - declare(Ch, QName, [{<<"x-queue-type">>, longstr, - ?config(queue_type, Config)}])), - #'confirm.select_ok'{} = amqp_channel:call(Ch, #'confirm.select'{}), - amqp_channel:register_confirm_handler(Ch, self()), - - timer:sleep(100), - - ConsumerTag1 = <<"ctag1">>, - Ch2 = rabbit_ct_client_helpers:open_channel(Config, 2), - qos(Ch2, 2), - ok = subscribe(Ch2, QName, ConsumerTag1), - publish_and_confirm(Ch, QName, <<"msg1">>), - - receive_and_ack(Ch2), - %% consume - publish(Ch, QName, <<"msg2">>), - - ok = rabbit_ct_broker_helpers:enable_feature_flag(Config, ?FEATURE_FLAG), - - confirm(), - publish_and_confirm(Ch, QName, <<"msg3">>), - receive_and_ack(Ch2), - receive_and_ack(Ch2). - -receive_and_ack(Ch) -> - receive - {#'basic.deliver'{delivery_tag = DeliveryTag, - redelivered = false}, - #amqp_msg{}} -> - basic_ack(Ch, DeliveryTag) - after 5000 -> - flush(), - exit(basic_deliver_timeout) - end. - -%% Utility - -delete_queues() -> - [{ok, 0} = rabbit_amqqueue:delete(Q, false, false, <<"dummy">>) - || Q <- rabbit_amqqueue:list()]. - -declare(Ch, Q, Args) -> - amqp_channel:call(Ch, #'queue.declare'{queue = Q, - durable = true, - auto_delete = false, - arguments = Args}). - -delete(Ch, Q) -> - amqp_channel:call(Ch, #'queue.delete'{queue = Q}). - -publish(Ch, Queue, Msg) -> - ok = amqp_channel:cast(Ch, - #'basic.publish'{routing_key = Queue}, - #amqp_msg{props = #'P_basic'{delivery_mode = 2}, - payload = Msg}). - -publish_and_confirm(Ch, Queue, Msg) -> - publish(Ch, Queue, Msg), - ct:pal("waiting for ~ts message confirmation from ~ts", [Msg, Queue]), - confirm(). - -confirm() -> - ok = receive - #'basic.ack'{} -> ok; - #'basic.nack'{} -> fail - after 2500 -> - flush(), - exit(confirm_timeout) - end. - -subscribe(Ch, Queue, CTag) -> - amqp_channel:subscribe(Ch, #'basic.consume'{queue = Queue, - no_ack = false, - consumer_tag = CTag}, - self()), - receive - #'basic.consume_ok'{consumer_tag = CTag} -> - ok - after 5000 -> - exit(basic_consume_timeout) - end. - -basic_ack(Ch, DTag) -> - amqp_channel:cast(Ch, #'basic.ack'{delivery_tag = DTag, - multiple = false}). - -basic_cancel(Ch, CTag) -> - #'basic.cancel_ok'{} = - amqp_channel:call(Ch, #'basic.cancel'{consumer_tag = CTag}). - -basic_nack(Ch, DTag) -> - amqp_channel:cast(Ch, #'basic.nack'{delivery_tag = DTag, - requeue = true, - multiple = false}). - -flush() -> - receive - Any -> - ct:pal("flush ~tp", [Any]), - flush() - after 0 -> - ok - end. - -get_global_counters(Config) -> - rabbit_ct_broker_helpers:rpc(Config, 0, rabbit_global_counters, overview, []). - -qos(Ch, Prefetch) -> - ?assertMatch(#'basic.qos_ok'{}, - amqp_channel:call(Ch, #'basic.qos'{prefetch_count = Prefetch})). diff --git a/deps/rabbit/test/message_containers_deaths_v2_SUITE.erl b/deps/rabbit/test/message_containers_deaths_v2_SUITE.erl new file mode 100644 index 000000000000..93806b0978e4 --- /dev/null +++ b/deps/rabbit/test/message_containers_deaths_v2_SUITE.erl @@ -0,0 +1,124 @@ +%% This Source Code Form is subject to the terms of the Mozilla Public +%% License, v. 2.0. If a copy of the MPL was not distributed with this +%% file, You can obtain one at https://mozilla.org/MPL/2.0/. +%% +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. + +%% This SUITE should be deleted when feature flag message_containers_deaths_v2 becomes required. +-module(message_containers_deaths_v2_SUITE). + +-define(FEATURE_FLAG, message_containers_deaths_v2). + +-compile([export_all, nowarn_export_all]). + +-include_lib("eunit/include/eunit.hrl"). +-include_lib("amqp_client/include/amqp_client.hrl"). +-include_lib("rabbitmq_ct_helpers/include/rabbit_assert.hrl"). + +all() -> + [ + {group, cluster_size_1} + ]. + +groups() -> + [ + {cluster_size_1, [], [enable_feature_flag]} + ]. + +init_per_suite(Config) -> + rabbit_ct_helpers:log_environment(), + rabbit_ct_helpers:run_setup_steps(Config, []). + +end_per_suite(Config) -> + rabbit_ct_helpers:run_teardown_steps(Config). + +init_per_group(_Group, Config0) -> + Config = rabbit_ct_helpers:merge_app_env( + Config0, {rabbit, [{forced_feature_flags_on_init, []}]}), + rabbit_ct_helpers:run_steps(Config, + rabbit_ct_broker_helpers:setup_steps() ++ + rabbit_ct_client_helpers:setup_steps()). + +end_per_group(_Group, Config) -> + rabbit_ct_helpers:run_steps(Config, + rabbit_ct_client_helpers:teardown_steps() ++ + rabbit_ct_broker_helpers:teardown_steps()). + +enable_feature_flag(Config) -> + Ch = rabbit_ct_client_helpers:open_channel(Config), + Q1 = <<"q1">>, + Q2 = <<"q2">>, + #'queue.declare_ok'{} = amqp_channel:call( + Ch, #'queue.declare'{queue = Q1, + arguments = [{<<"x-dead-letter-exchange">>, longstr, <<>>}, + {<<"x-dead-letter-routing-key">>, longstr, Q2}, + {<<"x-message-ttl">>, long, 3}]}), + #'queue.declare_ok'{} = amqp_channel:call( + Ch, #'queue.declare'{queue = Q2, + arguments = [{<<"x-dead-letter-exchange">>, longstr, <<>>}, + {<<"x-dead-letter-routing-key">>, longstr, Q1}]}), + P1 = <<"payload 1">>, + P2 = <<"payload 2">>, + amqp_channel:call(Ch, + #'basic.publish'{routing_key = Q1}, + #amqp_msg{payload = P1}), + ?assertNot(rabbit_ct_broker_helpers:is_feature_flag_enabled(Config, ?FEATURE_FLAG)), + ?assertEqual(ok, rabbit_ct_broker_helpers:enable_feature_flag(Config, ?FEATURE_FLAG)), + amqp_channel:call(Ch, + #'basic.publish'{routing_key = Q1}, + #amqp_msg{payload = P2}), + %% We now have 2 messages in Q2 with different values for the mc deaths annotation for v1 and v2. + + reject(Ch, Q2, P1), + reject(Ch, Q2, P2), + reject(Ch, Q2, P1), + reject(Ch, Q2, P2), + + {#'basic.get_ok'{}, #amqp_msg{props = #'P_basic'{headers = H1}}} = + ?awaitMatch({#'basic.get_ok'{}, + #amqp_msg{payload = P1}}, + amqp_channel:call(Ch, #'basic.get'{queue = Q2}), + 5000), + + {#'basic.get_ok'{}, #amqp_msg{props = #'P_basic'{headers = H2}}} = + ?awaitMatch({#'basic.get_ok'{}, + #amqp_msg{payload = P2}}, + amqp_channel:call(Ch, #'basic.get'{queue = Q2}), + 5000), + + lists:foreach( + fun(Headers) -> + ?assertEqual({longstr, <<"expired">>}, rabbit_misc:table_lookup(Headers, <<"x-first-death-reason">>)), + ?assertEqual({longstr, Q1}, rabbit_misc:table_lookup(Headers, <<"x-first-death-queue">>)), + ?assertEqual({longstr, <<>>}, rabbit_misc:table_lookup(Headers, <<"x-first-death-exchange">>)), + ?assertEqual({longstr, <<"expired">>}, rabbit_misc:table_lookup(Headers, <<"x-last-death-reason">>)), + ?assertEqual({longstr, Q1}, rabbit_misc:table_lookup(Headers, <<"x-last-death-queue">>)), + ?assertEqual({longstr, <<>>}, rabbit_misc:table_lookup(Headers, <<"x-last-death-exchange">>)), + + {array, [{table, Death1}, + {table, Death2}]} = rabbit_misc:table_lookup(Headers, <<"x-death">>), + + ?assertEqual({longstr, Q1}, rabbit_misc:table_lookup(Death1, <<"queue">>)), + ?assertEqual({longstr, <<"expired">>}, rabbit_misc:table_lookup(Death1, <<"reason">>)), + ?assertMatch({timestamp, _}, rabbit_misc:table_lookup(Death1, <<"time">>)), + ?assertEqual({longstr, <<>>}, rabbit_misc:table_lookup(Death1, <<"exchange">>)), + ?assertEqual({long, 3}, rabbit_misc:table_lookup(Death1, <<"count">>)), + ?assertEqual({array, [{longstr, Q1}]}, rabbit_misc:table_lookup(Death1, <<"routing-keys">>)), + + ?assertEqual({longstr, Q2}, rabbit_misc:table_lookup(Death2, <<"queue">>)), + ?assertEqual({longstr, <<"rejected">>}, rabbit_misc:table_lookup(Death2, <<"reason">>)), + ?assertMatch({timestamp, _}, rabbit_misc:table_lookup(Death2, <<"time">>)), + ?assertEqual({longstr, <<>>}, rabbit_misc:table_lookup(Death2, <<"exchange">>)), + ?assertEqual({long, 2}, rabbit_misc:table_lookup(Death2, <<"count">>)), + ?assertEqual({array, [{longstr, Q2}]}, rabbit_misc:table_lookup(Death2, <<"routing-keys">>)) + end, [H1, H2]), + ok. + +reject(Ch, Queue, Payload) -> + {#'basic.get_ok'{delivery_tag = DTag}, #amqp_msg{}} = + ?awaitMatch({#'basic.get_ok'{}, + #amqp_msg{payload = Payload}}, + amqp_channel:call(Ch, #'basic.get'{queue = Queue}), + 5000), + amqp_channel:cast(Ch, #'basic.reject'{delivery_tag = DTag, + requeue = false}). diff --git a/deps/rabbit/test/message_size_limit_SUITE.erl b/deps/rabbit/test/message_size_limit_SUITE.erl index e3d0e0c9c778..09aedd007ca8 100644 --- a/deps/rabbit/test/message_size_limit_SUITE.erl +++ b/deps/rabbit/test/message_size_limit_SUITE.erl @@ -2,34 +2,30 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2011-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(message_size_limit_SUITE). --include_lib("common_test/include/ct.hrl"). --include_lib("kernel/include/file.hrl"). +-compile([export_all, nowarn_export_all]). -include_lib("amqp_client/include/amqp_client.hrl"). -include_lib("eunit/include/eunit.hrl"). - --compile(export_all). - --define(TIMEOUT_LIST_OPS_PASS, 5000). --define(TIMEOUT, 30000). -define(TIMEOUT_CHANNEL_EXCEPTION, 5000). --define(CLEANUP_QUEUE_NAME, <<"cleanup-queue">>). - all() -> [ - {group, parallel_tests} + {group, tests}, + {group, default} ]. groups() -> [ - {parallel_tests, [parallel], [ - max_message_size - ]} + {tests, [], [ + max_message_size + ]}, + {default, [], [ + default_max_message_size + ]} ]. suite() -> @@ -78,11 +74,7 @@ max_message_size(Config) -> Binary6M = gen_binary_mb(6), Binary10M = gen_binary_mb(10), - Size2Mb = 1024 * 1024 * 2, - Size2Mb = byte_size(Binary2M), - - rabbit_ct_broker_helpers:rpc(Config, 0, - application, set_env, [rabbit, max_message_size, 1024 * 1024 * 3]), + ok = rabbit_ct_broker_helpers:rpc(Config, persistent_term, put, [max_message_size, 1024 * 1024 * 3]), {_, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, 0), @@ -96,8 +88,7 @@ max_message_size(Config) -> assert_channel_fail_max_size(Ch, Monitor), %% increase the limit - rabbit_ct_broker_helpers:rpc(Config, 0, - application, set_env, [rabbit, max_message_size, 1024 * 1024 * 8]), + ok = rabbit_ct_broker_helpers:rpc(Config, persistent_term, put, [max_message_size, 1024 * 1024 * 8]), {_, Ch1} = rabbit_ct_client_helpers:open_connection_and_channel(Config, 0), @@ -112,15 +103,22 @@ max_message_size(Config) -> Monitor1 = monitor(process, Ch1), amqp_channel:call(Ch1, #'basic.publish'{routing_key = <<"none">>}, #amqp_msg{payload = Binary10M}), - assert_channel_fail_max_size(Ch1, Monitor1), + assert_channel_fail_max_size(Ch1, Monitor1). + +default_max_message_size(Config) -> + Binary15M = gen_binary_mb(15), + Binary17M = gen_binary_mb(20), - %% increase beyond the hard limit - rabbit_ct_broker_helpers:rpc(Config, 0, - application, set_env, [rabbit, max_message_size, 1024 * 1024 * 600]), - Val = rabbit_ct_broker_helpers:rpc(Config, 0, - rabbit_channel, get_max_message_size, []), + {_, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, 0), - ?assertEqual(?MAX_MSG_SIZE, Val). + %% Binary is within the default max size limit of 16MB + amqp_channel:call(Ch, #'basic.publish'{routing_key = <<"none">>}, #amqp_msg{payload = Binary15M}), + %% The channel process is alive + assert_channel_alive(Ch), + + Monitor = monitor(process, Ch), + amqp_channel:call(Ch, #'basic.publish'{routing_key = <<"none">>}, #amqp_msg{payload = Binary17M}), + assert_channel_fail_max_size(Ch, Monitor). %% ------------------------------------------------------------------- %% Implementation diff --git a/deps/rabbit/test/metadata_store_clustering_SUITE.erl b/deps/rabbit/test/metadata_store_clustering_SUITE.erl new file mode 100644 index 000000000000..e9bf9584d56b --- /dev/null +++ b/deps/rabbit/test/metadata_store_clustering_SUITE.erl @@ -0,0 +1,345 @@ +%% This Source Code Form is subject to the terms of the Mozilla Public +%% License, v. 2.0. If a copy of the MPL was not distributed with this +%% file, You can obtain one at https://mozilla.org/MPL/2.0/. +%% +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. +%% + +-module(metadata_store_clustering_SUITE). + +-include_lib("common_test/include/ct.hrl"). +-include_lib("eunit/include/eunit.hrl"). +-include_lib("amqp_client/include/amqp_client.hrl"). +-compile([nowarn_export_all, export_all]). + +suite() -> + [{timetrap, 5 * 60_000}]. + +all() -> + [ + {group, unclustered} + ]. + +groups() -> + [ + {unclustered, [], [{cluster_size_2, [], cluster_size_2_tests()}, + {cluster_size_3, [], cluster_size_3_tests()}]} + ]. + +cluster_size_2_tests() -> + [ + join_khepri_khepri_cluster, + join_mnesia_khepri_cluster, + join_mnesia_khepri_cluster_reverse, + join_khepri_mnesia_cluster, + join_khepri_mnesia_cluster_reverse + ]. + +cluster_size_3_tests() -> + [ + join_khepri_khepri_khepri_cluster, + join_mnesia_khepri_khepri_cluster, + join_mnesia_khepri_khepri_cluster_reverse, + join_khepri_mnesia_khepri_cluster, + join_khepri_mnesia_khepri_cluster_reverse, + join_khepri_khepri_mnesia_cluster, + join_khepri_khepri_mnesia_cluster_reverse, + join_mnesia_mnesia_khepri_cluster, + join_mnesia_mnesia_khepri_cluster_reverse, + join_mnesia_khepri_mnesia_cluster, + join_mnesia_khepri_mnesia_cluster_reverse, + join_khepri_mnesia_mnesia_cluster, + join_khepri_mnesia_mnesia_cluster_reverse + ]. + +%% ------------------------------------------------------------------- +%% Testsuite setup/teardown. +%% ------------------------------------------------------------------- + +init_per_suite(Config) -> + rabbit_ct_helpers:log_environment(), + case rabbit_ct_helpers:is_mixed_versions() of + true -> + %% Khepri is not yet compatible with mixed version testing and this + %% suite enables Khepri. + {skip, "This suite does not yet support mixed version testing"}; + false -> + rabbit_ct_helpers:run_setup_steps(Config, []) + end. + +end_per_suite(Config) -> + rabbit_ct_helpers:run_teardown_steps(Config). + +init_per_group(unclustered, Config) -> + rabbit_ct_helpers:set_config(Config, [{metadata_store, mnesia}, + {rmq_nodes_clustered, false}, + {tcp_ports_base}, + {net_ticktime, 10}]); +init_per_group(cluster_size_2, Config) -> + rabbit_ct_helpers:set_config(Config, [{rmq_nodes_count, 2}]); +init_per_group(cluster_size_3, Config) -> + rabbit_ct_helpers:set_config(Config, [{rmq_nodes_count, 3}]). + +end_per_group(_, Config) -> + Config. + +init_per_testcase(Testcase, Config) -> + Q = rabbit_data_coercion:to_binary(Testcase), + Config1 = rabbit_ct_helpers:set_config(Config, + [{rmq_nodename_suffix, Testcase}, + {queue_name, Q} + ]), + Config2 = rabbit_ct_helpers:testcase_started(Config1, Testcase), + rabbit_ct_helpers:run_steps(Config2, + rabbit_ct_broker_helpers:setup_steps() ++ + rabbit_ct_client_helpers:setup_steps()). + +end_per_testcase(Testcase, Config) -> + Config1 = rabbit_ct_helpers:run_steps(Config, + rabbit_ct_client_helpers:teardown_steps() ++ + rabbit_ct_broker_helpers:teardown_steps()), + rabbit_ct_helpers:testcase_finished(Config1, Testcase). + +%% ------------------------------------------------------------------- +%% Testcases. +%% ------------------------------------------------------------------- + +join_khepri_khepri_cluster(Config) -> + Servers = rabbit_ct_broker_helpers:get_node_configs(Config, nodename), + + Ret = rabbit_ct_broker_helpers:enable_feature_flag( Config, Servers, khepri_db), + case Ret of + ok -> join_size_2_cluster(Config, Servers); + {skip, _} = Skip -> Skip + end. + +join_khepri_mnesia_cluster(Config) -> + [Server0, _] = Servers = + rabbit_ct_broker_helpers:get_node_configs(Config, nodename), + + Ret = rabbit_ct_broker_helpers:enable_feature_flag(Config, [Server0], khepri_db), + case Ret of + ok -> join_size_2_cluster(Config, Servers); + {skip, _} = Skip -> Skip + end. + +join_khepri_mnesia_cluster_reverse(Config) -> + [Server0, _] = Servers = + rabbit_ct_broker_helpers:get_node_configs(Config, nodename), + + Ret = rabbit_ct_broker_helpers:enable_feature_flag(Config, [Server0], khepri_db), + case Ret of + ok -> join_size_2_cluster(Config, lists:reverse(Servers)); + {skip, _} = Skip -> Skip + end. + +join_mnesia_khepri_cluster(Config) -> + [_, Server1] = Servers = + rabbit_ct_broker_helpers:get_node_configs(Config, nodename), + + Ret = rabbit_ct_broker_helpers:enable_feature_flag(Config, [Server1], khepri_db), + case Ret of + ok -> join_size_2_cluster(Config, Servers); + {skip, _} = Skip -> Skip + end. + +join_mnesia_khepri_cluster_reverse(Config) -> + [_, Server1] = Servers = + rabbit_ct_broker_helpers:get_node_configs(Config, nodename), + + Ret = rabbit_ct_broker_helpers:enable_feature_flag(Config, [Server1], khepri_db), + case Ret of + ok -> join_size_2_cluster(Config, lists:reverse(Servers)); + {skip, _} = Skip -> Skip + end. + +join_khepri_khepri_khepri_cluster(Config) -> + Servers = rabbit_ct_broker_helpers:get_node_configs(Config, nodename), + + Ret = rabbit_ct_broker_helpers:enable_feature_flag(Config, Servers, khepri_db), + case Ret of + ok -> join_size_3_cluster(Config, Servers); + {skip, _} = Skip -> Skip + end. + +join_mnesia_khepri_khepri_cluster(Config) -> + [_, Server1, Server2] = Servers = + rabbit_ct_broker_helpers:get_node_configs(Config, nodename), + + Ret = rabbit_ct_broker_helpers:enable_feature_flag(Config, [Server1, Server2], khepri_db), + case Ret of + ok -> join_size_3_cluster(Config, Servers); + {skip, _} = Skip -> Skip + end. + +join_mnesia_khepri_khepri_cluster_reverse(Config) -> + [_, Server1, Server2] = Servers = + rabbit_ct_broker_helpers:get_node_configs(Config, nodename), + + Ret = rabbit_ct_broker_helpers:enable_feature_flag(Config, [Server1, Server2], khepri_db), + case Ret of + ok -> join_size_3_cluster(Config, lists:reverse(Servers)); + {skip, _} = Skip -> Skip + end. + +join_khepri_mnesia_khepri_cluster(Config) -> + [Server0, _, Server2] = Servers = + rabbit_ct_broker_helpers:get_node_configs(Config, nodename), + + Ret = rabbit_ct_broker_helpers:enable_feature_flag(Config, [Server0, Server2], khepri_db), + case Ret of + ok -> join_size_3_cluster(Config, Servers); + {skip, _} = Skip -> Skip + end. + +join_khepri_mnesia_khepri_cluster_reverse(Config) -> + [Server0, _, Server2] = Servers = + rabbit_ct_broker_helpers:get_node_configs(Config, nodename), + + Ret = rabbit_ct_broker_helpers:enable_feature_flag(Config, [Server0, Server2], khepri_db), + case Ret of + ok -> join_size_3_cluster(Config, lists:reverse(Servers)); + {skip, _} = Skip -> Skip + end. + +join_khepri_khepri_mnesia_cluster(Config) -> + [Server0, Server1, _] = Servers = + rabbit_ct_broker_helpers:get_node_configs(Config, nodename), + + Ret = rabbit_ct_broker_helpers:enable_feature_flag(Config, [Server0, Server1], khepri_db), + case Ret of + ok -> join_size_3_cluster(Config, Servers); + {skip, _} = Skip -> Skip + end. + +join_khepri_khepri_mnesia_cluster_reverse(Config) -> + [Server0, Server1, _] = Servers = + rabbit_ct_broker_helpers:get_node_configs(Config, nodename), + + Ret = rabbit_ct_broker_helpers:enable_feature_flag(Config, [Server0, Server1], khepri_db), + case Ret of + ok -> join_size_3_cluster(Config, lists:reverse(Servers)); + {skip, _} = Skip -> Skip + end. + +join_mnesia_mnesia_khepri_cluster(Config) -> + [_, _, Server2] = Servers = + rabbit_ct_broker_helpers:get_node_configs(Config, nodename), + + Ret = rabbit_ct_broker_helpers:enable_feature_flag(Config, [Server2], khepri_db), + case Ret of + ok -> join_size_3_cluster(Config, Servers); + {skip, _} = Skip -> Skip + end. + +join_mnesia_mnesia_khepri_cluster_reverse(Config) -> + [_, _, Server2] = Servers = + rabbit_ct_broker_helpers:get_node_configs(Config, nodename), + + Ret = rabbit_ct_broker_helpers:enable_feature_flag(Config, [Server2], khepri_db), + case Ret of + ok -> join_size_3_cluster(Config, lists:reverse(Servers)); + {skip, _} = Skip -> Skip + end. + +join_mnesia_khepri_mnesia_cluster(Config) -> + [_, Server1, _] = Servers = + rabbit_ct_broker_helpers:get_node_configs(Config, nodename), + + Ret = rabbit_ct_broker_helpers:enable_feature_flag(Config, [Server1], khepri_db), + case Ret of + ok -> join_size_3_cluster(Config, Servers); + {skip, _} = Skip -> Skip + end. + +join_mnesia_khepri_mnesia_cluster_reverse(Config) -> + [_, Server1, _] = Servers = + rabbit_ct_broker_helpers:get_node_configs(Config, nodename), + + Ret = rabbit_ct_broker_helpers:enable_feature_flag(Config, [Server1], khepri_db), + case Ret of + ok -> join_size_3_cluster(Config, lists:reverse(Servers)); + {skip, _} = Skip -> Skip + end. + +join_khepri_mnesia_mnesia_cluster(Config) -> + [Server0, _, _] = Servers = + rabbit_ct_broker_helpers:get_node_configs(Config, nodename), + + Ret = rabbit_ct_broker_helpers:enable_feature_flag(Config, [Server0], khepri_db), + case Ret of + ok -> join_size_3_cluster(Config, Servers); + {skip, _} = Skip -> Skip + end. + +join_khepri_mnesia_mnesia_cluster_reverse(Config) -> + [Server0, _, _] = Servers = + rabbit_ct_broker_helpers:get_node_configs(Config, nodename), + + Ret = rabbit_ct_broker_helpers:enable_feature_flag(Config, [Server0], khepri_db), + case Ret of + ok -> join_size_3_cluster(Config, lists:reverse(Servers)); + {skip, _} = Skip -> Skip + end. + +join_size_2_cluster(Config, [Server0, Server1]) -> + Ch = rabbit_ct_client_helpers:open_channel(Config, Server0), + Q = ?config(queue_name, Config), + + ?assertEqual({'queue.declare_ok', Q, 0, 0}, declare(Ch, Q)), + ?assertMatch([_], rpc:call(Server0, rabbit_amqqueue, list, [])), + + ok = rabbit_control_helper:command(stop_app, Server1), + ?assertMatch([_], rpc:call(Server0, rabbit_amqqueue, list, [])), + + Ret = rabbit_control_helper:command(join_cluster, Server1, [atom_to_list(Server0)], []), + case Ret of + ok -> + ?assertMatch([_], rpc:call(Server0, rabbit_amqqueue, list, [])), + + ok = rabbit_control_helper:command(start_app, Server1), + ?assertMatch([_], rpc:call(Server0, rabbit_amqqueue, list, [])); + {error, 69, <<"Error:\nincompatible_feature_flags">>} -> + {skip, "'khepri_db' feature flag is unsupported"} + end. + +join_size_3_cluster(Config, [Server0, Server1, Server2]) -> + Ch = rabbit_ct_client_helpers:open_channel(Config, Server0), + Q = ?config(queue_name, Config), + + ?assertEqual({'queue.declare_ok', Q, 0, 0}, declare(Ch, Q)), + ?assertMatch([_], rpc:call(Server0, rabbit_amqqueue, list, [])), + + ok = rabbit_control_helper:command(stop_app, Server1), + ?assertMatch([_], rpc:call(Server0, rabbit_amqqueue, list, [])), + + Ret1 = rabbit_control_helper:command(join_cluster, Server1, [atom_to_list(Server0)], []), + case Ret1 of + ok -> + ?assertMatch([_], rpc:call(Server0, rabbit_amqqueue, list, [])), + + ok = rabbit_control_helper:command(start_app, Server1), + ?assertMatch([_], rpc:call(Server0, rabbit_amqqueue, list, [])), + + ok = rabbit_control_helper:command(stop_app, Server2), + ?assertMatch([_], rpc:call(Server0, rabbit_amqqueue, list, [])), + + Ret2 = rabbit_control_helper:command(join_cluster, Server2, [atom_to_list(Server0)], []), + case Ret2 of + ok -> + ?assertMatch([_], rpc:call(Server0, rabbit_amqqueue, list, [])), + + ok = rabbit_control_helper:command(start_app, Server2), + ?assertMatch([_], rpc:call(Server0, rabbit_amqqueue, list, [])); + {error, 69, <<"Error:\nincompatible_feature_flags">>} -> + {skip, "'khepri_db' feature flag is unsupported"} + end; + {error, 69, <<"Error:\nincompatible_feature_flags">>} -> + {skip, "'khepri_db' feature flag is unsupported"} + end. + +declare(Ch, Q) -> + amqp_channel:call(Ch, #'queue.declare'{queue = Q, + durable = true, + auto_delete = false, + arguments = []}). diff --git a/deps/rabbit/test/metadata_store_migration_SUITE.erl b/deps/rabbit/test/metadata_store_migration_SUITE.erl new file mode 100644 index 000000000000..d2ddcf84a718 --- /dev/null +++ b/deps/rabbit/test/metadata_store_migration_SUITE.erl @@ -0,0 +1,160 @@ +%% This Source Code Form is subject to the terms of the Mozilla Public +%% License, v. 2.0. If a copy of the MPL was not distributed with this +%% file, You can obtain one at https://mozilla.org/MPL/2.0/. +%% +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. +%% + +-module(metadata_store_migration_SUITE). + +-include_lib("eunit/include/eunit.hrl"). +-include_lib("amqp_client/include/amqp_client.hrl"). + +-compile([nowarn_export_all, export_all]). +-compile(export_all). + +suite() -> + [{timetrap, 5 * 60000}]. + +all() -> + [ + {group, khepri_migration} + ]. + +groups() -> + [ + {khepri_migration, [], [ + from_mnesia_to_khepri + ]} + ]. + +%% ------------------------------------------------------------------- +%% Testsuite setup/teardown. +%% ------------------------------------------------------------------- + +init_per_suite(Config) -> + rabbit_ct_helpers:log_environment(), + rabbit_ct_helpers:run_setup_steps(Config, []). + +end_per_suite(Config) -> + rabbit_ct_helpers:run_teardown_steps(Config). + +init_per_group(khepri_migration = Group, Config0) -> + rabbit_ct_helpers:set_config(Config0, [{metadata_store, mnesia}, + {rmq_nodes_count, 1}, + {rmq_nodename_suffix, Group}, + {tcp_ports_base}]). + +end_per_group(_, Config) -> + Config. + +init_per_testcase(_Testcase, Config) -> + rabbit_ct_helpers:run_steps(Config, rabbit_ct_broker_helpers:setup_steps() ++ rabbit_ct_client_helpers:setup_steps()). + +end_per_testcase(Testcase, Config) -> + Config1 = rabbit_ct_helpers:run_steps( + Config, + rabbit_ct_client_helpers:teardown_steps()), + Config2 = rabbit_ct_helpers:testcase_finished(Config1, Testcase), + rabbit_ct_helpers:run_steps(Config2, + rabbit_ct_broker_helpers:teardown_steps()). + + +%% ------------------------------------------------------------------- +%% Testcases. +%% ------------------------------------------------------------------- +from_mnesia_to_khepri(Config) -> + %% 1) Ensure there is at least one entry on each Mnesia table + %% 2) Enable the Khepri feature flag + %% 3) Check that all listings return the same values than before the migration + + %% 1) + ok = rabbit_ct_broker_helpers:rpc( + Config, 0, rabbit_vhost, add, [<<"test">>, none]), + ok = rabbit_ct_broker_helpers:rpc( + Config, 0, rabbit_auth_backend_internal, set_topic_permissions, + [<<"guest">>, <<"/">>, <<"amq.topic">>, "^t", "^t", <<"acting-user">>]), + ok = rabbit_ct_broker_helpers:rpc( + Config, 0, rabbit_policy, set, + [<<"/">>, <<"policy">>, <<".*">>, [{<<"max-length">>, 100}], 0, <<"queues">>, none]), + ok = rabbit_ct_broker_helpers:rpc( + Config, 0, rabbit_runtime_parameters, set_global, + [<<"test-global-rt">>, <<"good">>, none]), + + Ch = rabbit_ct_client_helpers:open_channel(Config, 0), + amqp_channel:call(Ch, #'queue.declare'{queue = <<"test">>, + durable = true}), + amqp_channel:call(Ch, #'queue.declare'{queue = <<"test-transient">>, + durable = false}), + #'queue.bind_ok'{} = amqp_channel:call(Ch, #'queue.bind'{exchange = <<"amq.direct">>, + queue = <<"test">>, + routing_key = <<"test">>}), + #'queue.bind_ok'{} = amqp_channel:call(Ch, #'queue.bind'{exchange = <<"amq.topic">>, + queue = <<"test">>, + routing_key = <<"test">>}), + rabbit_ct_client_helpers:close_channel(Ch), + + VHosts = lists:sort(rabbit_ct_broker_helpers:rpc(Config, 0, rabbit_vhost, list, [])), + ?assertMatch(VHosts, lists:sort([<<"/">>, <<"test">>])), + Users = rabbit_ct_broker_helpers:rpc( + Config, 0, rabbit_auth_backend_internal, list_users, []), + ?assertMatch([_], Users), + UserPermissions = rabbit_ct_broker_helpers:rpc( + Config, 0, rabbit_auth_backend_internal, + list_user_permissions, [<<"guest">>]), + ?assertMatch([_], UserPermissions), + TopicPermissions = rabbit_ct_broker_helpers:rpc( + Config, 0, rabbit_auth_backend_internal, + list_user_topic_permissions, [<<"guest">>]), + ?assertMatch([_], TopicPermissions), + Policies = rabbit_ct_broker_helpers:rpc(Config, 0, rabbit_policy, list, []), + ?assertMatch([_], Policies), + GlobalRuntimeParameters = lists:sort(rabbit_ct_broker_helpers:rpc( + Config, 0, rabbit_runtime_parameters, list_global, [])), + GRPNames = [proplists:get_value(name, RT) || RT <- GlobalRuntimeParameters], + ?assert(lists:member('test-global-rt', GRPNames)), + ?assert(lists:member('internal_cluster_id', GRPNames)), + Queues = lists:sort(rabbit_ct_broker_helpers:rpc(Config, 0, rabbit_amqqueue, list, [])), + ?assertMatch([_, _], Queues), + Exchanges = lists:sort(rabbit_ct_broker_helpers:rpc(Config, 0, rabbit_exchange, list, [])), + ?assertEqual(14, length(Exchanges)), + Bindings = lists:sort(rabbit_ct_broker_helpers:rpc(Config, 0, rabbit_binding, list, [<<"/">>])), + ?assertEqual(4, length(Bindings)), + Server = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename), + Maintenance = rabbit_ct_broker_helpers:rpc(Config, 0, rabbit_db_maintenance, get, [Server]), + ?assertNot(undefined == Maintenance), + + %% 2) + Servers = rabbit_ct_broker_helpers:get_node_configs(Config, nodename), + ok = rabbit_ct_broker_helpers:enable_feature_flag(Config, Servers, khepri_db), + + %% 3) + VHostsK = lists:sort(rabbit_ct_broker_helpers:rpc(Config, 0, rabbit_vhost, list, [])), + ?assertEqual(VHosts, VHostsK), + UsersK = rabbit_ct_broker_helpers:rpc( + Config, 0, rabbit_auth_backend_internal, list_users, []), + ?assertEqual(Users, UsersK), + UserPermissionsK = rabbit_ct_broker_helpers:rpc( + Config, 0, rabbit_auth_backend_internal, + list_user_permissions, [<<"guest">>]), + ?assertEqual(UserPermissions, UserPermissionsK), + TopicPermissionsK = rabbit_ct_broker_helpers:rpc( + Config, 0, rabbit_auth_backend_internal, + list_user_topic_permissions, [<<"guest">>]), + ?assertEqual(TopicPermissions, TopicPermissionsK), + PoliciesK = rabbit_ct_broker_helpers:rpc(Config, 0, rabbit_policy, list, []), + ?assertEqual(Policies, PoliciesK), + GlobalRuntimeParametersK = lists:sort(rabbit_ct_broker_helpers:rpc( + Config, 0, rabbit_runtime_parameters, list_global, [])), + ?assertMatch(GlobalRuntimeParametersK, GlobalRuntimeParameters), + QueuesK = lists:sort(rabbit_ct_broker_helpers:rpc(Config, 0, rabbit_amqqueue, list, [])), + ?assertEqual(Queues, QueuesK), + ExchangesK = lists:sort(rabbit_ct_broker_helpers:rpc(Config, 0, rabbit_exchange, list, [])), + ?assertEqual(Exchanges, ExchangesK), + BindingsK = lists:sort(rabbit_ct_broker_helpers:rpc(Config, 0, rabbit_binding, list, [<<"/">>])), + ?assertEqual(Bindings, BindingsK), + MaintenanceK = rabbit_ct_broker_helpers:rpc(Config, 0, rabbit_db_maintenance, get, [Server]), + ?assertEqual(MaintenanceK, Maintenance), + + ok. + diff --git a/deps/rabbit/test/metadata_store_phase1_SUITE.erl b/deps/rabbit/test/metadata_store_phase1_SUITE.erl new file mode 100644 index 000000000000..af5b8aca6ebe --- /dev/null +++ b/deps/rabbit/test/metadata_store_phase1_SUITE.erl @@ -0,0 +1,2775 @@ +%% This Source Code Form is subject to the terms of the Mozilla Public +%% License, v. 2.0. If a copy of the MPL was not distributed with this +%% file, You can obtain one at https://mozilla.org/MPL/2.0/. +%% +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. +%% + +-module(metadata_store_phase1_SUITE). + +-include_lib("common_test/include/ct.hrl"). +-include_lib("eunit/include/eunit.hrl"). + +-include_lib("khepri/include/khepri.hrl"). + +-include_lib("rabbit_common/include/rabbit.hrl"). + +-export([suite/0, + all/0, + groups/0, + init_per_suite/1, end_per_suite/1, + init_per_group/2, end_per_group/2, + init_per_testcase/2, end_per_testcase/2, + + write_non_existing_vhost/1, + write_existing_vhost/1, + check_vhost_exists/1, + list_vhost_names/1, + list_vhost_objects/1, + update_non_existing_vhost/1, + update_existing_vhost/1, + update_non_existing_vhost_desc_and_tags/1, + update_existing_vhost_desc_and_tags/1, + delete_non_existing_vhost/1, + delete_existing_vhost/1, + + write_non_existing_user/1, + write_existing_user/1, + list_users/1, + update_non_existing_user/1, + update_existing_user/1, + delete_non_existing_user/1, + delete_existing_user/1, + + write_user_permission_for_non_existing_vhost/1, + write_user_permission_for_non_existing_user/1, + write_user_permission_for_existing_user/1, + check_resource_access/1, + list_user_permissions_on_non_existing_vhost/1, + list_user_permissions_for_non_existing_user/1, + list_user_permissions/1, + clear_user_permission_for_non_existing_vhost/1, + clear_user_permission_for_non_existing_user/1, + clear_user_permission/1, + delete_user_and_check_resource_access/1, + delete_vhost_and_check_resource_access/1, + + write_topic_permission_for_non_existing_vhost/1, + write_topic_permission_for_non_existing_user/1, + write_topic_permission_for_existing_user/1, + list_topic_permissions_on_non_existing_vhost/1, + list_topic_permissions_for_non_existing_user/1, + list_topic_permissions/1, + clear_specific_topic_permission_for_non_existing_vhost/1, + clear_specific_topic_permission_for_non_existing_user/1, + clear_specific_topic_permission/1, + clear_all_topic_permission_for_non_existing_vhost/1, + clear_all_topic_permission_for_non_existing_user/1, + clear_all_topic_permissions/1, + delete_user_and_check_topic_access/1, + delete_vhost_and_check_topic_access/1 + ]). + +suite() -> + [{timetrap, {minutes, 1}}]. + +all() -> + [ + {group, vhosts}, + {group, internal_users} + ]. + +groups() -> + [ + {vhosts, [], + [ + write_non_existing_vhost, + write_existing_vhost, + check_vhost_exists, + list_vhost_names, + list_vhost_objects, + update_non_existing_vhost, + update_existing_vhost, + update_non_existing_vhost_desc_and_tags, + update_existing_vhost_desc_and_tags, + delete_non_existing_vhost, + delete_existing_vhost + ] + }, + {internal_users, [], + [ + {users, [], + [ + write_non_existing_user, + write_existing_user, + list_users, + update_non_existing_user, + update_existing_user, + delete_non_existing_user, + delete_existing_user + ] + }, + {user_permissions, [], + [ + write_user_permission_for_non_existing_vhost, + write_user_permission_for_non_existing_user, + write_user_permission_for_existing_user, + check_resource_access, + list_user_permissions_on_non_existing_vhost, + list_user_permissions_for_non_existing_user, + list_user_permissions, + clear_user_permission_for_non_existing_vhost, + clear_user_permission_for_non_existing_user, + clear_user_permission, + delete_user_and_check_resource_access, + delete_vhost_and_check_resource_access + ] + }, + {topic_permissions, [], + [ + write_topic_permission_for_non_existing_vhost, + write_topic_permission_for_non_existing_user, + write_topic_permission_for_existing_user, + list_topic_permissions_on_non_existing_vhost, + list_topic_permissions_for_non_existing_user, + list_topic_permissions, + clear_specific_topic_permission_for_non_existing_vhost, + clear_specific_topic_permission_for_non_existing_user, + clear_specific_topic_permission, + clear_all_topic_permission_for_non_existing_vhost, + clear_all_topic_permission_for_non_existing_user, + clear_all_topic_permissions, + delete_user_and_check_topic_access, + delete_vhost_and_check_topic_access + ] + } + ] + } + ]. + +%% ------------------------------------------------------------------- +%% Testsuite setup/teardown. +%% ------------------------------------------------------------------- + +init_per_suite(Config) -> + rabbit_ct_helpers:run_setup_steps( + Config, + [ + fun init_feature_flags/1, + fun setup_code_mocking/1, + fun setup_mnesia/1, + fun setup_khepri/1 + ]). + +end_per_suite(Config) -> + rabbit_ct_helpers:run_teardown_steps( + Config, + [ + fun remove_code_mocking/1 + ]). + +setup_mnesia(Config) -> + %% Configure Mnesia directory in the common_test priv_dir and start it. + MnesiaDir = filename:join( + ?config(priv_dir, Config), + "mnesia"), + ct:pal("Mnesia directory: ~ts", [MnesiaDir]), + ok = file:make_dir(MnesiaDir), + ok = application:load(mnesia), + ok = application:set_env(mnesia, dir, MnesiaDir), + ok = application:set_env(rabbit, data_dir, MnesiaDir), + ok = mnesia:create_schema([node()]), + {ok, _} = application:ensure_all_started(mnesia), + + ct:pal("Mnesia info below:"), + mnesia:info(), + Config. + +setup_khepri(Config) -> + %% Start Khepri. + {ok, _} = application:ensure_all_started(khepri), + + %% Configure Khepri. It takes care of configuring Ra system & cluster. It + %% uses the Mnesia directory to store files. + ok = rabbit_khepri:setup(undefined), + + ct:pal("Khepri info below:"), + rabbit_khepri:info(), + Config. + +setup_code_mocking(Config) -> + %% Bypass rabbit_mnesia:execute_mnesia_transaction/1 (no worker_pool + %% configured in particular) but keep the behavior of throwing the error. + meck:new(rabbit_mnesia, [passthrough, no_link]), + meck:expect( + rabbit_mnesia, execute_mnesia_transaction, + fun(Fun) -> + case mnesia:sync_transaction(Fun) of + {atomic, Result} -> Result; + {aborted, Reason} -> throw({error, Reason}) + end + end), + ?assert(meck:validate(rabbit_mnesia)), + + %% Bypass calls inside rabbit_vhost:vhost_cluster_state/1 because these + %% are unit testcases without any sort of clustering. + meck:new(rabbit_nodes, [passthrough, no_link]), + meck:expect( + rabbit_nodes, all_running, + fun() -> [node()] end), + + meck:new(rabbit_vhost_sup_sup, [passthrough, no_link]), + meck:expect( + rabbit_vhost_sup_sup, is_vhost_alive, + fun(_) -> true end), + + %% We ensure that we use the `vhost_v2` #vhost{} record so we can play + %% with the description and tags. + meck:new(rabbit_feature_flags, [passthrough, no_link]), + meck:expect( + rabbit_feature_flags, is_enabled, + fun + (virtual_host_metadata) -> true; + (FeatureNames) -> meck:passthrough([FeatureNames]) + end), + + ct:pal("Mocked: ~p", [meck:mocked()]), + Config. + +remove_code_mocking(Config) -> + lists:foreach( + fun(Mod) -> meck:unload(Mod) end, + meck:mocked()), + Config. + +init_per_group(_, Config) -> + Config. + +end_per_group(_, Config) -> + Config. + +init_per_testcase(Testcase, Config) -> + rabbit_ct_helpers:testcase_started(Config, Testcase), + + rabbit_khepri:clear_forced_metadata_store(), + + %% Create Mnesia tables. + TableDefs = rabbit_table:pre_khepri_definitions(), + lists:foreach( + fun ({Table, Def}) -> ok = rabbit_table:create(Table, Def) end, + TableDefs), + + Config. + +end_per_testcase(Testcase, Config) -> + rabbit_khepri:clear_forced_metadata_store(), + + %% Delete Mnesia tables to clear any data. + TableDefs = rabbit_table:pre_khepri_definitions(), + lists:foreach( + fun ({Table, _}) -> {atomic, ok} = mnesia:delete_table(Table) end, + TableDefs), + + %% Clear all data in Khepri. + ok = rabbit_khepri:clear_store(), + + rabbit_ct_helpers:testcase_finished(Config, Testcase). + +init_feature_flags(Config) -> + FFFile = filename:join( + ?config(priv_dir, Config), + "feature_flags"), + ct:pal("Feature flags file: ~ts", [FFFile]), + ok = application:load(rabbit), + ok = application:set_env(rabbit, feature_flags_file, FFFile), + Config. + +%% ------------------------------------------------------------------- +%% Testcases. +%% ------------------------------------------------------------------- + +%% We use `_With' (with the leading underscore) on purpose: we don't know if +%% the code in `T' will use it. That code can still use `_With' of course. +%% This simply avoids compiler warnings. +-define(with(T), fun(_With) -> T end). + +-define(vhost_path(V), + [rabbit_db_vhost, V]). +-define(user_path(U), + [rabbit_db_user, users, U]). +-define(user_perm_path(U, V), + [rabbit_db_user, users, U, user_permissions, V]). +-define(topic_perm_path(U, V, E), + [rabbit_db_user, users, U, topic_permissions, V, E]). + +%% +%% Virtual hosts. +%% + +write_non_existing_vhost(_) -> + VHostName = <<"vhost">>, + VHostDesc = <<>>, + VHostTags = [], + VHost = vhost:new( + VHostName, + VHostTags, + #{description => VHostDesc, + tags => VHostTags}), + + Tests = + [ + ?with(?assertEqual( + undefined, + lookup_vhost(_With, VHostName))), + ?with(?assertEqual( + {new, VHost}, + add_vhost(_With, VHostName, VHost))), + ?with(?assertEqual( + VHost, + lookup_vhost(_With, VHostName))), + ?with(check_storage( + _With, + [{mnesia, rabbit_vhost, [VHost]}, + {khepri, [rabbit_db_vhost], + #{?vhost_path(VHostName) => VHost}}])) + ], + + ?assertEqual( + ok, + eunit:test( + [{setup, fun force_mnesia_use/0, [{with, mnesia, Tests}]}, + {setup, fun force_khepri_use/0, [{with, khepri, Tests}]}], + [verbose])). + +write_existing_vhost(_) -> + VHostName = <<"vhost">>, + VHostDesc = <<>>, + VHostTags = [], + VHost = vhost:new( + VHostName, + VHostTags, + #{description => VHostDesc, + tags => VHostTags}), + + Tests = + [ + ?with(?assertEqual( + undefined, + lookup_vhost(_With, VHostName))), + ?with(?assertEqual( + {new, VHost}, + add_vhost(_With, VHostName, VHost))), + ?with(?assertEqual( + VHost, + lookup_vhost(_With, VHostName))), + ?with(?assertEqual( + {existing, VHost}, + add_vhost(_With, VHostName, VHost))), + ?with(?assertEqual( + VHost, + lookup_vhost(_With, VHostName))), + ?with(check_storage( + _With, + [{mnesia, rabbit_vhost, [VHost]}, + {khepri, [rabbit_db_vhost], + #{?vhost_path(VHostName) => VHost}}])) + ], + + ?assertEqual( + ok, + eunit:test( + [{setup, fun force_mnesia_use/0, [{with, mnesia, Tests}]}, + {setup, fun force_khepri_use/0, [{with, khepri, Tests}]}], + [verbose])). + +check_vhost_exists(_) -> + VHostName = <<"vhost">>, + VHostDesc = <<>>, + VHostTags = [], + VHost = vhost:new( + VHostName, + VHostTags, + #{description => VHostDesc, + tags => VHostTags}), + + Tests = + [ + ?with(?assertEqual( + {new, VHost}, + add_vhost(_With, VHostName, VHost))), + ?with(?assert( + vhost_exists(_With, VHostName))), + ?with(?assertNot( + vhost_exists(_With, <<"non-existing-vhost">>))), + ?with(check_storage( + _With, + [{mnesia, rabbit_vhost, [VHost]}, + {khepri, [rabbit_db_vhost], + #{?vhost_path(VHostName) => VHost}}])) + ], + + ?assertEqual( + ok, + eunit:test( + [{setup, fun force_mnesia_use/0, [{with, mnesia, Tests}]}, + {setup, fun force_khepri_use/0, [{with, khepri, Tests}]}], + [verbose])). + +list_vhost_names(_) -> + VHostNameA = <<"vhost-a">>, + VHostDescA = <<>>, + VHostTagsA = [], + VHostA = vhost:new( + VHostNameA, + VHostTagsA, + #{description => VHostDescA, + tags => VHostTagsA}), + VHostNameB = <<"vhost-b">>, + VHostDescB = <<>>, + VHostTagsB = [], + VHostB = vhost:new( + VHostNameB, + VHostTagsB, + #{description => VHostDescB, + tags => VHostTagsB}), + + Tests = + [ + ?with(?assertEqual( + {new, VHostA}, + add_vhost(_With, VHostNameA, VHostA))), + ?with(?assertEqual( + {new, VHostB}, + add_vhost(_With, VHostNameB, VHostB))), + ?with(?assertEqual( + [VHostNameA, VHostNameB], + list_vhosts(_With))), + ?with(check_storage( + _With, + [{mnesia, rabbit_vhost, [VHostA, VHostB]}, + {khepri, [rabbit_db_vhost], + #{?vhost_path(VHostNameA) => VHostA, + ?vhost_path(VHostNameB) => VHostB}}])) + ], + + ?assertEqual( + ok, + eunit:test( + [{setup, fun force_mnesia_use/0, [{with, mnesia, Tests}]}, + {setup, fun force_khepri_use/0, [{with, khepri, Tests}]}], + [verbose])). + +list_vhost_objects(_) -> + VHostNameA = <<"vhost-a">>, + VHostDescA = <<>>, + VHostTagsA = [], + VHostA = vhost:new( + VHostNameA, + VHostTagsA, + #{description => VHostDescA, + tags => VHostTagsA}), + VHostNameB = <<"vhost-b">>, + VHostDescB = <<>>, + VHostTagsB = [], + VHostB = vhost:new( + VHostNameB, + VHostTagsB, + #{description => VHostDescB, + tags => VHostTagsB}), + + Tests = + [ + ?with(?assertEqual( + {new, VHostA}, + add_vhost(_With, VHostNameA, VHostA))), + ?with(?assertEqual( + {new, VHostB}, + add_vhost(_With, VHostNameB, VHostB))), + ?with(?assertEqual( + [VHostA, VHostB], + list_vhost_records(_With))), + ?with(check_storage( + _With, + [{mnesia, rabbit_vhost, [VHostA, VHostB]}, + {khepri, [rabbit_db_vhost], + #{?vhost_path(VHostNameA) => VHostA, + ?vhost_path(VHostNameB) => VHostB}}])) + ], + + ?assertEqual( + ok, + eunit:test( + [{setup, fun force_mnesia_use/0, [{with, mnesia, Tests}]}, + {setup, fun force_khepri_use/0, [{with, khepri, Tests}]}], + [verbose])). + +update_non_existing_vhost(_) -> + VHostName = <<"vhost">>, + VHostDesc = <<>>, + VHostTags = [], + VHost = vhost:new( + VHostName, + VHostTags, + #{description => VHostDesc, + tags => VHostTags}), + UpdatedVHost = vhost:set_limits(VHost, [limits]), + Fun = fun(_) -> UpdatedVHost end, + ?assertNotEqual(VHost, UpdatedVHost), + + Tests = + [ + ?with(?assertEqual( + undefined, + lookup_vhost(_With, VHostName))), + ?with(?assertThrow( + {error, {no_such_vhost, VHostName}}, + update_vhost(_With, VHostName, Fun))), + ?with(?assertEqual( + undefined, + lookup_vhost(_With, VHostName))), + ?with(check_storage( + _With, + [{mnesia, rabbit_vhost, []}, + {khepri, [rabbit_db_vhost], + #{}}])) + ], + + ?assertEqual( + ok, + eunit:test( + [{setup, fun force_mnesia_use/0, [{with, mnesia, Tests}]}, + {setup, fun force_khepri_use/0, [{with, khepri, Tests}]}], + [verbose])). + +update_existing_vhost(_) -> + VHostName = <<"vhost">>, + VHostDesc = <<>>, + VHostTags = [], + VHost = vhost:new( + VHostName, + VHostTags, + #{description => VHostDesc, + tags => VHostTags}), + UpdatedVHost = vhost:set_limits(VHost, [limits]), + Fun = fun(_) -> UpdatedVHost end, + ?assertNotEqual(VHost, UpdatedVHost), + + Tests = + [ + ?with(?assertEqual( + undefined, + lookup_vhost(_With, VHostName))), + ?with(?assertEqual( + {new, VHost}, + add_vhost(_With, VHostName, VHost))), + ?with(?assertEqual( + UpdatedVHost, + update_vhost(_With, VHostName, Fun))), + ?with(?assertEqual( + UpdatedVHost, + lookup_vhost(_With, VHostName))), + ?with(check_storage( + _With, + [{mnesia, rabbit_vhost, [UpdatedVHost]}, + {khepri, [rabbit_db_vhost], + #{?vhost_path(VHostName) => UpdatedVHost}}])) + ], + + ?assertEqual( + ok, + eunit:test( + [{setup, fun force_mnesia_use/0, [{with, mnesia, Tests}]}, + {setup, fun force_khepri_use/0, [{with, khepri, Tests}]}], + [verbose])). + +update_non_existing_vhost_desc_and_tags(_) -> + VHostName = <<"vhost">>, + NewVHostDesc = <<"New desc">>, + NewVHostTags = [new_tag], + + Tests = + [ + ?with(?assertEqual( + undefined, + lookup_vhost(_With, VHostName))), + ?with(?assertEqual( + {error, {no_such_vhost, VHostName}}, + update_vhost(_With, VHostName, NewVHostDesc, NewVHostTags))), + ?with(?assertEqual( + undefined, + lookup_vhost(_With, VHostName))), + ?with(check_storage( + _With, + [{mnesia, rabbit_vhost, []}, + {khepri, [rabbit_db_vhost], + #{}}])) + ], + + ?assertEqual( + ok, + eunit:test( + [{setup, fun force_mnesia_use/0, [{with, mnesia, Tests}]}, + {setup, fun force_khepri_use/0, [{with, khepri, Tests}]}], + [verbose])). + +update_existing_vhost_desc_and_tags(_) -> + VHostName = <<"vhost">>, + VHostDesc = <<>>, + VHostTags = [], + VHost = vhost:new( + VHostName, + VHostTags, + #{description => VHostDesc, + tags => VHostTags}), + NewVHostDesc = <<"New desc">>, + NewVHostTags = [new_tag], + UpdatedVHost = vhost:set_metadata( + VHost, + #{description => NewVHostDesc, + tags => NewVHostTags}), + ct:pal("VHost: ~p~nUpdatedVHost: ~p", [VHost, UpdatedVHost]), + ?assertNotEqual(VHost, UpdatedVHost), + + Tests = + [ + ?with(?assertEqual( + undefined, + lookup_vhost(_With, VHostName))), + ?with(?assertEqual( + {new, VHost}, + add_vhost(_With, VHostName, VHost))), + ?with(?assertEqual( + {ok, UpdatedVHost}, + update_vhost(_With, VHostName, NewVHostDesc, NewVHostTags))), + ?with(?assertEqual( + UpdatedVHost, + lookup_vhost(_With, VHostName))), + ?with(check_storage( + _With, + [{mnesia, rabbit_vhost, [UpdatedVHost]}, + {khepri, [rabbit_db_vhost], + #{?vhost_path(VHostName) => UpdatedVHost}}])) + ], + + ?assertEqual( + ok, + eunit:test( + [{setup, fun force_mnesia_use/0, [{with, mnesia, Tests}]}, + {setup, fun force_khepri_use/0, [{with, khepri, Tests}]}], + [verbose])). + +delete_non_existing_vhost(_) -> + VHostName = <<"vhost">>, + + Tests = + [ + ?with(?assertEqual( + undefined, + lookup_vhost(_With, VHostName))), + ?with(?assertEqual( + false, + delete_vhost(_With, VHostName))), + ?with(?assertEqual( + undefined, + lookup_vhost(_With, VHostName))), + ?with(check_storage( + _With, + [{mnesia, rabbit_vhost, []}, + {khepri, [rabbit_db_vhost], + #{}}])) + ], + + ?assertEqual( + ok, + eunit:test( + [{setup, fun force_mnesia_use/0, [{with, mnesia, Tests}]}, + {setup, fun force_khepri_use/0, [{with, khepri, Tests}]}], + [verbose])). + +delete_existing_vhost(_) -> + VHostName = <<"vhost">>, + VHostDesc = <<>>, + VHostTags = [], + VHost = vhost:new( + VHostName, + VHostTags, + #{description => VHostDesc, + tags => VHostTags}), + + Tests = + [ + ?with(?assertEqual( + {new, VHost}, + add_vhost(_With, VHostName, VHost))), + ?with(?assertEqual( + VHost, + lookup_vhost(_With, VHostName))), + ?with(?assertEqual( + true, + delete_vhost(_With, VHostName))), + ?with(?assertEqual( + undefined, + lookup_vhost(_With, VHostName))), + ?with(check_storage( + _With, + [{mnesia, rabbit_vhost, []}, + {khepri, [rabbit_db_vhost], + #{}}])) + ], + + ?assertEqual( + ok, + eunit:test( + [{setup, fun force_mnesia_use/0, [{with, mnesia, Tests}]}, + {setup, fun force_khepri_use/0, [{with, khepri, Tests}]}], + [verbose])). + +%% +%% Users. +%% + +write_non_existing_user(_) -> + Username = <<"alice">>, + User = internal_user:create_user(Username, <<"password">>, undefined), + + Tests = + [ + ?with(?assertEqual( + undefined, + lookup_user(_With, Username))), + ?with(?assertEqual( + ok, + add_user(_With, Username, User))), + ?with(?assertEqual( + User, + lookup_user(_With, Username))), + ?with(check_storage( + _With, + [{mnesia, rabbit_user, [User]}, + {khepri, [rabbit_db_user], + #{?user_path(Username) => User}}])) + ], + + ?assertEqual( + ok, + eunit:test( + [{setup, fun force_mnesia_use/0, [{with, mnesia, Tests}]}, + {setup, fun force_khepri_use/0, [{with, khepri, Tests}]}], + [verbose])). + +write_existing_user(_) -> + Username = <<"alice">>, + User = internal_user:create_user(Username, <<"password">>, undefined), + + Tests = + [ + ?with(?assertEqual( + undefined, + lookup_user(_With, Username))), + ?with(?assertEqual( + ok, + add_user(_With, Username, User))), + ?with(?assertEqual( + User, + lookup_user(_With, Username))), + ?with(?assertThrow( + {error, {user_already_exists, Username}}, + add_user(_With, Username, User))), + ?with(?assertEqual( + User, + lookup_user(_With, Username))), + ?with(check_storage( + _With, + [{mnesia, rabbit_user, [User]}, + {khepri, [rabbit_db_user], + #{?user_path(Username) => User}}])) + ], + + ?assertEqual( + ok, + eunit:test( + [{setup, fun force_mnesia_use/0, [{with, mnesia, Tests}]}, + {setup, fun force_khepri_use/0, [{with, khepri, Tests}]}], + [verbose])). + +list_users(_) -> + UsernameA = <<"alice">>, + UserA = internal_user:create_user(UsernameA, <<"password">>, undefined), + UsernameB = <<"bob">>, + UserB = internal_user:create_user(UsernameB, <<"password">>, undefined), + + Tests = + [ + ?with(?assertEqual( + ok, + add_user(_With, UsernameA, UserA))), + ?with(?assertEqual( + ok, + add_user(_With, UsernameB, UserB))), + ?with(?assertEqual( + [UserA, UserB], + list_user_records(_With))), + ?with(check_storage( + _With, + [{mnesia, rabbit_user, [UserA, UserB]}, + {khepri, [rabbit_db_user], + #{?user_path(UsernameA) => UserA, + ?user_path(UsernameB) => UserB}}])) + ], + + ?assertEqual( + ok, + eunit:test( + [{setup, fun force_mnesia_use/0, [{with, mnesia, Tests}]}, + {setup, fun force_khepri_use/0, [{with, khepri, Tests}]}], + [verbose])). + +update_non_existing_user(_) -> + Username = <<"alice">>, + User = internal_user:create_user(Username, <<"password">>, undefined), + UpdatedUser = internal_user:set_password_hash( + User, <<"updated-pw">>, undefined), + Fun = fun(_) -> UpdatedUser end, + ?assertNotEqual(User, UpdatedUser), + + Tests = + [ + ?with(?assertEqual( + undefined, + lookup_user(_With, Username))), + ?with(?assertThrow( + {error, {no_such_user, Username}}, + update_user(_With, Username, Fun))), + ?with(?assertEqual( + undefined, + lookup_user(_With, Username))), + ?with(check_storage( + _With, + [{mnesia, rabbit_user, []}, + {khepri, [rabbit_db_user], + #{}}])) + ], + + ?assertEqual( + ok, + eunit:test( + [{setup, fun force_mnesia_use/0, [{with, mnesia, Tests}]}, + {setup, fun force_khepri_use/0, [{with, khepri, Tests}]}], + [verbose])). + +update_existing_user(_) -> + Username = <<"alice">>, + User = internal_user:create_user(Username, <<"password">>, undefined), + UpdatedUser = internal_user:set_password_hash( + User, <<"updated-pw">>, undefined), + Fun = fun(_) -> UpdatedUser end, + ?assertNotEqual(User, UpdatedUser), + + Tests = + [ + ?with(?assertEqual( + ok, + add_user(_With, Username, User))), + ?with(?assertEqual( + User, + lookup_user(_With, Username))), + ?with(?assertEqual( + ok, + update_user(_With, Username, Fun))), + ?with(?assertEqual( + UpdatedUser, + lookup_user(_With, Username))), + ?with(check_storage( + _With, + [{mnesia, rabbit_user, [UpdatedUser]}, + {khepri, [rabbit_db_user], + #{?user_path(Username) => UpdatedUser}}])) + ], + + ?assertEqual( + ok, + eunit:test( + [{setup, fun force_mnesia_use/0, [{with, mnesia, Tests}]}, + {setup, fun force_khepri_use/0, [{with, khepri, Tests}]}], + [verbose])). + +delete_non_existing_user(_) -> + Username = <<"alice">>, + + Tests = + [ + ?with(?assertEqual( + undefined, + lookup_user(_With, Username))), + ?with(?assertEqual( + false, + delete_user(_With, Username))), + ?with(?assertEqual( + undefined, + lookup_user(_With, Username))), + ?with(check_storage( + _With, + [{mnesia, rabbit_user, []}, + {khepri, [rabbit_db_user], + #{}}])) + ], + + ?assertEqual( + ok, + eunit:test( + [{setup, fun force_mnesia_use/0, [{with, mnesia, Tests}]}, + {setup, fun force_khepri_use/0, [{with, khepri, Tests}]}], + [verbose])). + +delete_existing_user(_) -> + Username = <<"alice">>, + User = internal_user:create_user(Username, <<"password">>, undefined), + + Tests = + [ + ?with(?assertEqual( + ok, + add_user(_With, Username, User))), + ?with(?assertEqual( + User, + lookup_user(_With, Username))), + ?with(?assertEqual( + true, + delete_user(_With, Username))), + ?with(?assertEqual( + undefined, + lookup_user(_With, Username))), + ?with(check_storage( + _With, + [{mnesia, rabbit_user, []}, + {khepri, [rabbit_db_user], + #{}}])) + ], + + ?assertEqual( + ok, + eunit:test( + [{setup, fun force_mnesia_use/0, [{with, mnesia, Tests}]}, + {setup, fun force_khepri_use/0, [{with, khepri, Tests}]}], + [verbose])). + +%% +%% User permissions. +%% + +write_user_permission_for_non_existing_vhost(_) -> + VHostName = <<"vhost">>, + Username = <<"alice">>, + User = internal_user:create_user(Username, <<"password">>, undefined), + UserPermission = #user_permission{ + user_vhost = #user_vhost{ + username = Username, + virtual_host = VHostName}, + permission = #permission{ + configure = <<>>, + write = <<>>, + read = <<>>}}, + + Tests = + [ + ?with(?assertEqual( + ok, + add_user(_With, Username, User))), + ?with(?assertNot( + check_vhost_access(_With, Username, VHostName))), + ?with(?assertThrow( + {error, {no_such_vhost, VHostName}}, + set_permissions(_With, Username, VHostName, UserPermission))), + ?with(?assertNot( + check_vhost_access(_With, Username, VHostName))), + ?with(check_storage( + _With, + [{mnesia, rabbit_vhost, []}, + {mnesia, rabbit_user, [User]}, + {mnesia, rabbit_user_permission, []}, + {khepri, [rabbit_db_vhost], + #{}}, + {khepri, [rabbit_db_user], + #{?user_path(Username) => User}}])) + ], + + ?assertEqual( + ok, + eunit:test( + [{setup, fun force_mnesia_use/0, [{with, mnesia, Tests}]}, + {setup, fun force_khepri_use/0, [{with, khepri, Tests}]}], + [verbose])). + +write_user_permission_for_non_existing_user(_) -> + VHostName = <<"vhost">>, + VHostDesc = <<>>, + VHostTags = [], + VHost = vhost:new( + VHostName, + VHostTags, + #{description => VHostDesc, + tags => VHostTags}), + Username = <<"alice">>, + UserPermission = #user_permission{ + user_vhost = #user_vhost{ + username = Username, + virtual_host = VHostName}, + permission = #permission{ + configure = <<>>, + write = <<>>, + read = <<>>}}, + + Tests = + [ + ?with(?assertEqual( + {new, VHost}, + add_vhost(_With, VHostName, VHost))), + ?with(?assertNot( + check_vhost_access(_With, Username, VHostName))), + ?with(?assertThrow( + {error, {no_such_user, Username}}, + set_permissions(_With, Username, VHostName, UserPermission))), + ?with(?assertNot( + check_vhost_access(_With, Username, VHostName))), + ?with(check_storage( + _With, + [{mnesia, rabbit_vhost, [VHost]}, + {mnesia, rabbit_user, []}, + {mnesia, rabbit_user_permission, []}, + {khepri, [rabbit_db_vhost], + #{?vhost_path(VHostName) => VHost}}, + {khepri, [rabbit_db_user], + #{}}])) + ], + + ?assertEqual( + ok, + eunit:test( + [{setup, fun force_mnesia_use/0, [{with, mnesia, Tests}]}, + {setup, fun force_khepri_use/0, [{with, khepri, Tests}]}], + [verbose])). + +write_user_permission_for_existing_user(_) -> + VHostName = <<"vhost">>, + VHostDesc = <<>>, + VHostTags = [], + VHost = vhost:new( + VHostName, + VHostTags, + #{description => VHostDesc, + tags => VHostTags}), + Username = <<"alice">>, + User = internal_user:create_user(Username, <<"password">>, undefined), + UserPermission = #user_permission{ + user_vhost = #user_vhost{ + username = Username, + virtual_host = VHostName}, + permission = #permission{ + configure = <<>>, + write = <<>>, + read = <<>>}}, + + Tests = + [ + ?with(?assertNot( + check_vhost_access(_With, Username, VHostName))), + ?with(?assertEqual( + {new, VHost}, + add_vhost(_With, VHostName, VHost))), + ?with(?assertEqual( + ok, + add_user(_With, Username, User))), + ?with(?assertNot( + check_vhost_access(_With, Username, VHostName))), + ?with(?assertEqual( + ok, + set_permissions(_With, Username, VHostName, UserPermission))), + ?with(?assert( + check_vhost_access(_With, Username, VHostName))), + ?with(check_storage( + _With, + [{mnesia, rabbit_vhost, [VHost]}, + {mnesia, rabbit_user, [User]}, + {mnesia, rabbit_user_permission, [UserPermission]}, + {khepri, [rabbit_db_vhost], + #{?vhost_path(VHostName) => VHost}}, + {khepri, [rabbit_db_user], + #{?user_path(Username) => User, + ?user_perm_path(Username, VHostName) => UserPermission}}])) + ], + + ?assertEqual( + ok, + eunit:test( + [{setup, fun force_mnesia_use/0, [{with, mnesia, Tests}]}, + {setup, fun force_khepri_use/0, [{with, khepri, Tests}]}], + [verbose])). + +check_resource_access(_) -> + VHostName = <<"vhost">>, + VHostDesc = <<>>, + VHostTags = [], + VHost = vhost:new( + VHostName, + VHostTags, + #{description => VHostDesc, + tags => VHostTags}), + Username = <<"alice">>, + User = internal_user:create_user(Username, <<"password">>, undefined), + UserPermission = #user_permission{ + user_vhost = #user_vhost{ + username = Username, + virtual_host = VHostName}, + permission = #permission{ + configure = <<"my-resource">>, + write = <<>>, + read = <<>>}}, + + Tests = + [ + ?with(?assertEqual( + {new, VHost}, + add_vhost(_With, VHostName, VHost))), + ?with(?assertEqual( + ok, + add_user(_With, Username, User))), + ?with(?assertEqual( + ok, + set_permissions(_With, Username, VHostName, UserPermission))), + ?with(?assert( + check_resource_access( + _With, Username, VHostName, "my-resource", configure))), + ?with(?assertNot( + check_resource_access( + _With, Username, VHostName, "my-resource", write))), + ?with(?assertNot( + check_resource_access( + _With, Username, VHostName, "other-resource", configure))) + ], + + ?assertEqual( + ok, + eunit:test( + [{setup, fun force_mnesia_use/0, [{with, mnesia, Tests}]}, + {setup, fun force_khepri_use/0, [{with, khepri, Tests}]}], + [verbose])). + +list_user_permissions_on_non_existing_vhost(_) -> + VHostName = <<"non-existing-vhost">>, + Username = <<"alice">>, + User = internal_user:create_user(Username, <<"password">>, undefined), + + Tests = + [ + ?with(?assertEqual( + ok, + add_user(_With, Username, User))), + ?with(?assertThrow( + {error, {no_such_vhost, VHostName}}, + list_user_vhost_permissions(_With, '_', VHostName))), + ?with(?assertThrow( + {error, {no_such_vhost, VHostName}}, + list_user_vhost_permissions(_With, Username, VHostName))), + ?with(check_storage( + _With, + [{mnesia, rabbit_vhost, []}, + {mnesia, rabbit_user, [User]}, + {mnesia, rabbit_user_permission, []}, + {khepri, [rabbit_db_vhost], #{}}, + {khepri, [rabbit_db_user], + #{?user_path(Username) => User}}])) + ], + + ?assertEqual( + ok, + eunit:test( + [{setup, fun force_mnesia_use/0, [{with, mnesia, Tests}]}, + {setup, fun force_khepri_use/0, [{with, khepri, Tests}]}], + [verbose])). + +list_user_permissions_for_non_existing_user(_) -> + VHostName = <<"vhost">>, + VHostDesc = <<>>, + VHostTags = [], + VHost = vhost:new( + VHostName, + VHostTags, + #{description => VHostDesc, + tags => VHostTags}), + Username = <<"non-existing-user">>, + + Tests = + [ + ?with(?assertEqual( + {new, VHost}, + add_vhost(_With, VHostName, VHost))), + ?with(?assertEqual( + [], + list_user_vhost_permissions(_With, '_', VHostName))), + ?with(?assertThrow( + {error, {no_such_user, Username}}, + list_user_vhost_permissions(_With, Username, '_'))), + ?with(?assertThrow( + {error, {no_such_user, Username}}, + list_user_vhost_permissions(_With, Username, VHostName))), + ?with(check_storage( + _With, + [{mnesia, rabbit_vhost, [VHost]}, + {mnesia, rabbit_user, []}, + {mnesia, rabbit_user_permission, []}, + {khepri, [rabbit_db_vhost], #{?vhost_path(VHostName) => VHost}}, + {khepri, [rabbit_db_user], #{}}])) + ], + + ?assertEqual( + ok, + eunit:test( + [{setup, fun force_mnesia_use/0, [{with, mnesia, Tests}]}, + {setup, fun force_khepri_use/0, [{with, khepri, Tests}]}], + [verbose])). + +list_user_permissions(_) -> + VHostNameA = <<"vhost-a">>, + VHostDescA = <<>>, + VHostTagsA = [], + VHostA = vhost:new( + VHostNameA, + VHostTagsA, + #{description => VHostDescA, + tags => VHostTagsA}), + VHostNameB = <<"vhost-b">>, + VHostDescB = <<>>, + VHostTagsB = [], + VHostB = vhost:new( + VHostNameB, + VHostTagsB, + #{description => VHostDescB, + tags => VHostTagsB}), + UsernameA = <<"alice">>, + UserA = internal_user:create_user(UsernameA, <<"password">>, undefined), + UsernameB = <<"bob">>, + UserB = internal_user:create_user(UsernameB, <<"password">>, undefined), + + UserPermissionA1 = #user_permission{ + user_vhost = #user_vhost{ + username = UsernameA, + virtual_host = VHostNameA}, + permission = #permission{ + configure = <<"my-resource">>, + write = <<>>, + read = <<>>}}, + UserPermissionA2 = #user_permission{ + user_vhost = #user_vhost{ + username = UsernameA, + virtual_host = VHostNameB}, + permission = #permission{ + configure = <<"my-resource">>, + write = <<>>, + read = <<>>}}, + UserPermissionB1 = #user_permission{ + user_vhost = #user_vhost{ + username = UsernameB, + virtual_host = VHostNameA}, + permission = #permission{ + configure = <<"my-resource">>, + write = <<>>, + read = <<>>}}, + + Tests = + [ + ?with(?assertEqual( + {new, VHostA}, + add_vhost(_With, VHostNameA, VHostA))), + ?with(?assertEqual( + {new, VHostB}, + add_vhost(_With, VHostNameB, VHostB))), + ?with(?assertEqual( + ok, + add_user(_With, UsernameA, UserA))), + ?with(?assertEqual( + ok, + set_permissions( + _With, UsernameA, VHostNameA, UserPermissionA1))), + ?with(?assertEqual( + ok, + set_permissions( + _With, UsernameA, VHostNameB, UserPermissionA2))), + ?with(?assertEqual( + ok, + add_user(_With, UsernameB, UserB))), + ?with(?assertEqual( + ok, + set_permissions( + _With, UsernameB, VHostNameA, UserPermissionB1))), + ?with(?assertEqual( + [UserPermissionA1, UserPermissionA2, UserPermissionB1], + list_user_vhost_permissions(_With, '_', '_'))), + ?with(?assertEqual( + [UserPermissionA1, UserPermissionB1], + list_user_vhost_permissions(_With, '_', VHostNameA))), + ?with(?assertEqual( + [UserPermissionA1, UserPermissionA2], + list_user_vhost_permissions(_With, UsernameA, '_'))), + ?with(?assertEqual( + [UserPermissionA1], + list_user_vhost_permissions(_With, UsernameA, VHostNameA))), + ?with(check_storage( + _With, + [{mnesia, rabbit_vhost, [VHostA, VHostB]}, + {mnesia, rabbit_user, [UserA, UserB]}, + {mnesia, rabbit_user_permission, [UserPermissionA1, + UserPermissionA2, + UserPermissionB1]}, + {khepri, [rabbit_db_vhost], + #{?vhost_path(VHostNameA) => VHostA, + ?vhost_path(VHostNameB) => VHostB}}, + {khepri, [rabbit_db_user], + #{?user_path(UsernameA) => UserA, + ?user_path(UsernameB) => UserB, + ?user_perm_path(UsernameA, VHostNameA) => + UserPermissionA1, + ?user_perm_path(UsernameA, VHostNameB) => + UserPermissionA2, + ?user_perm_path(UsernameB, VHostNameA) => + UserPermissionB1}}])) + ], + + ?assertEqual( + ok, + eunit:test( + [{setup, fun force_mnesia_use/0, [{with, mnesia, Tests}]}, + {setup, fun force_khepri_use/0, [{with, khepri, Tests}]}], + [verbose])). + +clear_user_permission_for_non_existing_vhost(_) -> + VHostName = <<"vhost">>, + Username = <<"alice">>, + User = internal_user:create_user(Username, <<"password">>, undefined), + + Tests = + [ + ?with(?assertEqual( + ok, + add_user(_With, Username, User))), + ?with(?assertNot( + check_vhost_access(_With, Username, VHostName))), + ?with(?assertEqual( + ok, + clear_permissions(_With, Username, VHostName))), + ?with(?assertNot( + check_vhost_access(_With, Username, VHostName))), + ?with(check_storage( + _With, + [{mnesia, rabbit_vhost, []}, + {mnesia, rabbit_user, [User]}, + {mnesia, rabbit_user_permission, []}, + {khepri, [rabbit_db_vhost], + #{}}, + {khepri, [rabbit_db_user], + #{?user_path(Username) => User}}])) + ], + + ?assertEqual( + ok, + eunit:test( + [{setup, fun force_mnesia_use/0, [{with, mnesia, Tests}]}, + {setup, fun force_khepri_use/0, [{with, khepri, Tests}]}], + [verbose])). + +clear_user_permission_for_non_existing_user(_) -> + VHostName = <<"vhost">>, + VHostDesc = <<>>, + VHostTags = [], + VHost = vhost:new( + VHostName, + VHostTags, + #{description => VHostDesc, + tags => VHostTags}), + Username = <<"alice">>, + + Tests = + [ + ?with(?assertEqual( + {new, VHost}, + add_vhost(_With, VHostName, VHost))), + ?with(?assertNot( + check_vhost_access(_With, Username, VHostName))), + ?with(?assertEqual( + ok, + clear_permissions(_With, Username, VHostName))), + ?with(?assertNot( + check_vhost_access(_With, Username, VHostName))), + ?with(check_storage( + _With, + [{mnesia, rabbit_vhost, [VHost]}, + {mnesia, rabbit_user, []}, + {mnesia, rabbit_user_permission, []}, + {khepri, [rabbit_db_vhost], + #{?vhost_path(VHostName) => VHost}}, + {khepri, [rabbit_db_user], + #{}}])) + ], + + ?assertEqual( + ok, + eunit:test( + [{setup, fun force_mnesia_use/0, [{with, mnesia, Tests}]}, + {setup, fun force_khepri_use/0, [{with, khepri, Tests}]}], + [verbose])). + +clear_user_permission(_) -> + VHostName = <<"vhost">>, + VHostDesc = <<>>, + VHostTags = [], + VHost = vhost:new( + VHostName, + VHostTags, + #{description => VHostDesc, + tags => VHostTags}), + Username = <<"alice">>, + User = internal_user:create_user(Username, <<"password">>, undefined), + UserPermission = #user_permission{ + user_vhost = #user_vhost{ + username = Username, + virtual_host = VHostName}, + permission = #permission{ + configure = <<"my-resource">>, + write = <<>>, + read = <<>>}}, + + Tests = + [ + ?with(?assertEqual( + {new, VHost}, + add_vhost(_With, VHostName, VHost))), + ?with(?assertEqual( + ok, + add_user(_With, Username, User))), + ?with(?assertEqual( + ok, + set_permissions(_With, Username, VHostName, UserPermission))), + ?with(?assert( + check_resource_access( + _With, Username, VHostName, "my-resource", configure))), + ?with(?assertEqual( + ok, + clear_permissions(_With, Username, VHostName))), + ?with(?assertNot( + check_resource_access( + _With, Username, VHostName, "my-resource", configure))), + ?with(check_storage( + _With, + [{mnesia, rabbit_vhost, [VHost]}, + {mnesia, rabbit_user, [User]}, + {mnesia, rabbit_user_permission, []}, + {khepri, [rabbit_db_vhost], + #{?vhost_path(VHostName) => VHost}}, + {khepri, [rabbit_db_user], + #{?user_path(Username) => User}}])) + ], + + ?assertEqual( + ok, + eunit:test( + [{setup, fun force_mnesia_use/0, [{with, mnesia, Tests}]}, + {setup, fun force_khepri_use/0, [{with, khepri, Tests}]}], + [verbose])). + +delete_user_and_check_resource_access(_) -> + VHostName = <<"vhost">>, + VHostDesc = <<>>, + VHostTags = [], + VHost = vhost:new( + VHostName, + VHostTags, + #{description => VHostDesc, + tags => VHostTags}), + Username = <<"alice">>, + User = internal_user:create_user(Username, <<"password">>, undefined), + UserPermission = #user_permission{ + user_vhost = #user_vhost{ + username = Username, + virtual_host = VHostName}, + permission = #permission{ + configure = <<"my-resource">>, + write = <<>>, + read = <<>>}}, + + Tests = + [ + ?with(?assertEqual( + {new, VHost}, + add_vhost(_With, VHostName, VHost))), + ?with(?assertEqual( + ok, + add_user(_With, Username, User))), + ?with(?assertEqual( + ok, + set_permissions(_With, Username, VHostName, UserPermission))), + ?with(?assert( + check_vhost_access(_With, Username, VHostName))), + ?with(?assert( + check_resource_access( + _With, Username, VHostName, "my-resource", configure))), + ?with(?assertEqual( + true, + delete_user(_With, Username))), + ?with(?assertNot( + check_vhost_access(_With, Username, VHostName))), + ?with(?assertNot( + check_resource_access( + _With, Username, VHostName, "my-resource", configure))), + ?with(check_storage( + _With, + [{mnesia, rabbit_vhost, [VHost]}, + {mnesia, rabbit_user, []}, + {mnesia, rabbit_user_permission, []}, + {khepri, [rabbit_db_vhost], + #{?vhost_path(VHostName) => VHost}}, + {khepri, [rabbit_db_user], + #{}}])) + ], + + ?assertEqual( + ok, + eunit:test( + [{setup, fun force_mnesia_use/0, [{with, mnesia, Tests}]}, + {setup, fun force_khepri_use/0, [{with, khepri, Tests}]}], + [verbose])). + +delete_vhost_and_check_resource_access(_) -> + VHostName = <<"vhost">>, + VHostDesc = <<>>, + VHostTags = [], + VHost = vhost:new( + VHostName, + VHostTags, + #{description => VHostDesc, + tags => VHostTags}), + Username = <<"alice">>, + User = internal_user:create_user(Username, <<"password">>, undefined), + UserPermission = #user_permission{ + user_vhost = #user_vhost{ + username = Username, + virtual_host = VHostName}, + permission = #permission{ + configure = <<"my-resource">>, + write = <<>>, + read = <<>>}}, + + Tests = + [ + ?with(?assertEqual( + {new, VHost}, + add_vhost(_With, VHostName, VHost))), + ?with(?assertEqual( + ok, + add_user(_With, Username, User))), + ?with(?assertEqual( + ok, + set_permissions(_With, Username, VHostName, UserPermission))), + ?with(?assert( + check_vhost_access(_With, Username, VHostName))), + ?with(?assert( + check_resource_access( + _With, Username, VHostName, "my-resource", configure))), + ?with(?assertEqual( + true, + delete_vhost(_With, VHostName))), + ?with(check_storage( + _With, + [{mnesia, rabbit_vhost, []}, + {mnesia, rabbit_user, [User]}, + {mnesia, rabbit_user_permission, [UserPermission]}, + {khepri, [rabbit_db_vhost], + #{}}, + {khepri, [rabbit_db_user], + #{?user_path(Username) => User}}])) + ], + + %% In mnesia the permissions have to be deleted explicitly + %% Khepri permissions have a condition to automatically delete them + %% when the vhost is deleted + MnesiaTests = + [?with(?assert( + check_vhost_access(_With, Username, VHostName))), + ?with(?assert( + check_resource_access( + _With, Username, VHostName, "my-resource", configure)))], + + KhepriTests = + [?with(?assertNot( + check_vhost_access(_With, Username, VHostName))), + ?with(?assertNot( + check_resource_access( + _With, Username, VHostName, "my-resource", configure)))], + + ?assertEqual( + ok, + eunit:test( + [{setup, fun force_mnesia_use/0, [{with, mnesia, Tests ++ MnesiaTests}]}, + {setup, fun force_khepri_use/0, [{with, khepri, Tests ++ KhepriTests}]}], + [verbose])). + +%% +%% Topic permissions. +%% + +write_topic_permission_for_non_existing_vhost(_) -> + VHostName = <<"vhost">>, + Username = <<"alice">>, + User = internal_user:create_user(Username, <<"password">>, undefined), + Exchange = <<"exchange">>, + TopicPermission = #topic_permission{ + topic_permission_key = + #topic_permission_key{ + user_vhost = #user_vhost{ + username = Username, + virtual_host = VHostName}, + exchange = Exchange}, + permission = #permission{ + write = <<>>, + read = <<>>} + }, + Context = #{routing_key => <<"key">>, + variable_map => #{<<"vhost">> => VHostName, + <<"username">> => Username}}, + + Tests = + [ + ?with(?assertEqual( + ok, + add_user(_With, Username, User))), + ?with(?assertEqual( + undefined, + check_topic_access( + _With, Username, VHostName, Exchange, read, Context))), + ?with(?assertThrow( + {error, {no_such_vhost, VHostName}}, + set_topic_permissions( + _With, Username, VHostName, TopicPermission))), + ?with(?assertEqual( + undefined, + check_topic_access( + _With, Username, VHostName, Exchange, read, Context))), + ?with(check_storage( + _With, + [{mnesia, rabbit_vhost, []}, + {mnesia, rabbit_user, [User]}, + {mnesia, rabbit_topic_permission, []}, + {khepri, [rabbit_db_vhost], + #{}}, + {khepri, [rabbit_db_user], + #{?user_path(Username) => User}}])) + ], + + ?assertEqual( + ok, + eunit:test( + [{setup, fun force_mnesia_use/0, [{with, mnesia, Tests}]}, + {setup, fun force_khepri_use/0, [{with, khepri, Tests}]}], + [verbose])). + +write_topic_permission_for_non_existing_user(_) -> + VHostName = <<"vhost">>, + VHostDesc = <<>>, + VHostTags = [], + VHost = vhost:new( + VHostName, + VHostTags, + #{description => VHostDesc, + tags => VHostTags}), + Username = <<"alice">>, + Exchange = <<"exchange">>, + TopicPermission = #topic_permission{ + topic_permission_key = + #topic_permission_key{ + user_vhost = #user_vhost{ + username = Username, + virtual_host = VHostName}, + exchange = Exchange}, + permission = #permission{ + write = <<>>, + read = <<>>} + }, + Context = #{routing_key => <<"key">>, + variable_map => #{<<"vhost">> => VHostName, + <<"username">> => Username}}, + + Tests = + [ + ?with(?assertEqual( + {new, VHost}, + add_vhost(_With, VHostName, VHost))), + ?with(?assertEqual( + undefined, + check_topic_access( + _With, Username, VHostName, Exchange, read, Context))), + ?with(?assertThrow( + {error, {no_such_user, Username}}, + set_topic_permissions( + _With, Username, VHostName, TopicPermission))), + ?with(?assertEqual( + undefined, + check_topic_access( + _With, Username, VHostName, Exchange, read, Context))), + ?with(check_storage( + _With, + [{mnesia, rabbit_vhost, [VHost]}, + {mnesia, rabbit_user, []}, + {mnesia, rabbit_topic_permission, []}, + {khepri, [rabbit_db_vhost], + #{?vhost_path(VHostName) => VHost}}, + {khepri, [rabbit_db_user], + #{}}])) + ], + + ?assertEqual( + ok, + eunit:test( + [{setup, fun force_mnesia_use/0, [{with, mnesia, Tests}]}, + {setup, fun force_khepri_use/0, [{with, khepri, Tests}]}], + [verbose])). + +write_topic_permission_for_existing_user(_) -> + VHostName = <<"vhost">>, + VHostDesc = <<>>, + VHostTags = [], + VHost = vhost:new( + VHostName, + VHostTags, + #{description => VHostDesc, + tags => VHostTags}), + Username = <<"alice">>, + User = internal_user:create_user(Username, <<"password">>, undefined), + Exchange = <<"exchange">>, + TopicPermission = #topic_permission{ + topic_permission_key = + #topic_permission_key{ + user_vhost = #user_vhost{ + username = Username, + virtual_host = VHostName}, + exchange = Exchange}, + permission = #permission{ + write = <<>>, + read = <<"^key$">>} + }, + Context = #{routing_key => <<"key">>, + variable_map => #{<<"vhost">> => VHostName, + <<"username">> => Username}}, + + Tests = + [ + ?with(?assertEqual( + {new, VHost}, + add_vhost(_With, VHostName, VHost))), + ?with(?assertEqual( + ok, + add_user(_With, Username, User))), + ?with(?assertEqual( + undefined, + check_topic_access( + _With, Username, VHostName, Exchange, read, Context))), + ?with(?assertEqual( + ok, + set_topic_permissions( + _With, Username, VHostName, TopicPermission))), + ?with(?assert( + check_topic_access( + _With, Username, VHostName, Exchange, read, Context))), + ?with(?assertNot( + check_topic_access( + _With, Username, VHostName, Exchange, read, + Context#{routing_key => <<"something-else">>}))), + ?with(check_storage( + _With, + [{mnesia, rabbit_vhost, [VHost]}, + {mnesia, rabbit_user, [User]}, + {mnesia, rabbit_topic_permission, [TopicPermission]}, + {khepri, [rabbit_db_vhost], + #{?vhost_path(VHostName) => VHost}}, + {khepri, [rabbit_db_user], + #{?user_path(Username) => User, + ?topic_perm_path(Username, VHostName, Exchange) => + TopicPermission}}])) + ], + + ?assertEqual( + ok, + eunit:test( + [{setup, fun force_mnesia_use/0, [{with, mnesia, Tests}]}, + {setup, fun force_khepri_use/0, [{with, khepri, Tests}]}], + [verbose])). + +list_topic_permissions_on_non_existing_vhost(_) -> + VHostName = <<"non-existing-vhost">>, + Username = <<"alice">>, + User = internal_user:create_user(Username, <<"password">>, undefined), + + Tests = + [ + ?with(?assertEqual( + ok, + add_user(_With, Username, User))), + ?with(?assertThrow( + {error, {no_such_vhost, VHostName}}, + list_topic_permissions(_With, '_', VHostName, '_'))), + ?with(?assertThrow( + {error, {no_such_vhost, VHostName}}, + list_topic_permissions( + _With, Username, VHostName, '_'))), + ?with(check_storage( + _With, + [{mnesia, rabbit_vhost, []}, + {mnesia, rabbit_user, [User]}, + {mnesia, rabbit_topic_permission, []}, + {khepri, [rabbit_db_vhost], + #{}}, + {khepri, [rabbit_db_user], + #{?user_path(Username) => User}}])) + ], + + ?assertEqual( + ok, + eunit:test( + [{setup, fun force_mnesia_use/0, [{with, mnesia, Tests}]}, + {setup, fun force_khepri_use/0, [{with, khepri, Tests}]}], + [verbose])). + +list_topic_permissions_for_non_existing_user(_) -> + VHostName = <<"vhost">>, + VHostDesc = <<>>, + VHostTags = [], + VHost = vhost:new( + VHostName, + VHostTags, + #{description => VHostDesc, + tags => VHostTags}), + Username = <<"non-existing-user">>, + + Tests = + [ + ?with(?assertEqual( + {new, VHost}, + add_vhost(_With, VHostName, VHost))), + ?with(?assertEqual( + [], + list_topic_permissions(_With, '_', VHostName, '_'))), + ?with(?assertThrow( + {error, {no_such_user, Username}}, + list_topic_permissions(_With, Username, '_', '_'))), + ?with(?assertThrow( + {error, {no_such_user, Username}}, + list_topic_permissions(_With, Username, VHostName, '_'))), + ?with(check_storage( + _With, + [{mnesia, rabbit_vhost, [VHost]}, + {mnesia, rabbit_user, []}, + {mnesia, rabbit_topic_permission, []}, + {khepri, [rabbit_db_vhost], #{?vhost_path(VHostName) => VHost}}, + {khepri, [rabbit_db_user], #{}}])) + ], + + ?assertEqual( + ok, + eunit:test( + [{setup, fun force_mnesia_use/0, [{with, mnesia, Tests}]}, + {setup, fun force_khepri_use/0, [{with, khepri, Tests}]}], + [verbose])). + +list_topic_permissions(_) -> + VHostNameA = <<"vhost-a">>, + VHostDescA = <<>>, + VHostTagsA = [], + VHostA = vhost:new( + VHostNameA, + VHostTagsA, + #{description => VHostDescA, + tags => VHostTagsA}), + VHostNameB = <<"vhost-b">>, + VHostDescB = <<>>, + VHostTagsB = [], + VHostB = vhost:new( + VHostNameB, + VHostTagsB, + #{description => VHostDescB, + tags => VHostTagsB}), + UsernameA = <<"alice">>, + UserA = internal_user:create_user(UsernameA, <<"password">>, undefined), + UsernameB = <<"bob">>, + UserB = internal_user:create_user(UsernameB, <<"password">>, undefined), + + ExchangeA = <<"exchange-a">>, + ExchangeB = <<"exchange-b">>, + TopicPermissionA1 = #topic_permission{ + topic_permission_key = + #topic_permission_key{ + user_vhost = #user_vhost{ + username = UsernameA, + virtual_host = VHostNameA}, + exchange = ExchangeA}, + permission = #permission{ + write = <<>>, + read = <<"^key$">>} + }, + TopicPermissionA2 = #topic_permission{ + topic_permission_key = + #topic_permission_key{ + user_vhost = #user_vhost{ + username = UsernameA, + virtual_host = VHostNameB}, + exchange = ExchangeB}, + permission = #permission{ + write = <<>>, + read = <<"^key$">>} + }, + TopicPermissionB1 = #topic_permission{ + topic_permission_key = + #topic_permission_key{ + user_vhost = #user_vhost{ + username = UsernameB, + virtual_host = VHostNameA}, + exchange = ExchangeA}, + permission = #permission{ + write = <<>>, + read = <<"^key$">>} + }, + + Tests = + [ + ?with(?assertEqual( + {new, VHostA}, + add_vhost(_With, VHostNameA, VHostA))), + ?with(?assertEqual( + {new, VHostB}, + add_vhost(_With, VHostNameB, VHostB))), + ?with(?assertEqual( + ok, + add_user(_With, UsernameA, UserA))), + ?with(?assertEqual( + ok, + set_topic_permissions( + _With, UsernameA, VHostNameA, TopicPermissionA1))), + ?with(?assertEqual( + ok, + set_topic_permissions( + _With, UsernameA, VHostNameB, TopicPermissionA2))), + ?with(?assertEqual( + ok, + add_user(_With, UsernameB, UserB))), + ?with(?assertEqual( + ok, + set_topic_permissions( + _With, UsernameB, VHostNameA, TopicPermissionB1))), + ?with(?assertEqual( + [TopicPermissionA1, TopicPermissionA2, TopicPermissionB1], + list_topic_permissions(_With, '_', '_', '_'))), + ?with(?assertEqual( + [TopicPermissionA1, TopicPermissionB1], + list_topic_permissions(_With, '_', VHostNameA, '_'))), + ?with(?assertEqual( + [TopicPermissionA1, TopicPermissionA2], + list_topic_permissions(_With, UsernameA, '_', '_'))), + ?with(?assertEqual( + [TopicPermissionA1], + list_topic_permissions(_With, UsernameA, VHostNameA, '_'))), + ?with(check_storage( + _With, + [{mnesia, rabbit_vhost, [VHostA, VHostB]}, + {mnesia, rabbit_user, [UserA, UserB]}, + {mnesia, rabbit_topic_permission, [TopicPermissionA1, + TopicPermissionA2, + TopicPermissionB1]}, + {khepri, [rabbit_db_vhost], + #{?vhost_path(VHostNameA) => VHostA, + ?vhost_path(VHostNameB) => VHostB}}, + {khepri, [rabbit_db_user], + #{?user_path(UsernameA) => UserA, + ?user_path(UsernameB) => UserB, + ?topic_perm_path(UsernameA, VHostNameA, ExchangeA) => + TopicPermissionA1, + ?topic_perm_path(UsernameA, VHostNameB, ExchangeB) => + TopicPermissionA2, + ?topic_perm_path(UsernameB, VHostNameA, ExchangeA) => + TopicPermissionB1}}])) + ], + + ?assertEqual( + ok, + eunit:test( + [{setup, fun force_mnesia_use/0, [{with, mnesia, Tests}]}, + {setup, fun force_khepri_use/0, [{with, khepri, Tests}]}], + [verbose])). + +clear_specific_topic_permission_for_non_existing_vhost(_) -> + VHostName = <<"vhost">>, + Username = <<"alice">>, + User = internal_user:create_user(Username, <<"password">>, undefined), + Exchange = <<"exchange">>, + Context = #{routing_key => <<"key">>, + variable_map => #{<<"vhost">> => VHostName, + <<"username">> => Username}}, + + Tests = + [ + ?with(?assertEqual( + ok, + add_user(_With, Username, User))), + ?with(?assertEqual( + undefined, + check_topic_access( + _With, Username, VHostName, Exchange, read, Context))), + ?with(?assertEqual( + ok, + clear_topic_permissions(_With, Username, VHostName, Exchange))), + ?with(?assertEqual( + undefined, + check_topic_access( + _With, Username, VHostName, Exchange, read, Context))), + ?with(check_storage( + _With, + [{mnesia, rabbit_vhost, []}, + {mnesia, rabbit_user, [User]}, + {mnesia, rabbit_user_permission, []}, + {khepri, [rabbit_db_vhost], + #{}}, + {khepri, [rabbit_db_user], + #{?user_path(Username) => User}}])) + ], + + ?assertEqual( + ok, + eunit:test( + [{setup, fun force_mnesia_use/0, [{with, mnesia, Tests}]}, + {setup, fun force_khepri_use/0, [{with, khepri, Tests}]}], + [verbose])). + +clear_specific_topic_permission_for_non_existing_user(_) -> + VHostName = <<"vhost">>, + VHostDesc = <<>>, + VHostTags = [], + VHost = vhost:new( + VHostName, + VHostTags, + #{description => VHostDesc, + tags => VHostTags}), + Username = <<"alice">>, + Exchange = <<"exchange">>, + Context = #{routing_key => <<"key">>, + variable_map => #{<<"vhost">> => VHostName, + <<"username">> => Username}}, + + Tests = + [ + ?with(?assertEqual( + {new, VHost}, + add_vhost(_With, VHostName, VHost))), + ?with(?assertEqual( + undefined, + check_topic_access( + _With, Username, VHostName, Exchange, read, Context))), + ?with(?assertEqual( + ok, + clear_topic_permissions(_With, Username, VHostName, Exchange))), + ?with(?assertEqual( + undefined, + check_topic_access( + _With, Username, VHostName, Exchange, read, Context))), + ?with(check_storage( + _With, + [{mnesia, rabbit_vhost, [VHost]}, + {mnesia, rabbit_user, []}, + {mnesia, rabbit_user_permission, []}, + {khepri, [rabbit_db_vhost], + #{?vhost_path(VHostName) => VHost}}, + {khepri, [rabbit_db_user], + #{}}])) + ], + + ?assertEqual( + ok, + eunit:test( + [{setup, fun force_mnesia_use/0, [{with, mnesia, Tests}]}, + {setup, fun force_khepri_use/0, [{with, khepri, Tests}]}], + [verbose])). + +clear_specific_topic_permission(_) -> + VHostName = <<"vhost">>, + VHostDesc = <<>>, + VHostTags = [], + VHost = vhost:new( + VHostName, + VHostTags, + #{description => VHostDesc, + tags => VHostTags}), + Username = <<"alice">>, + User = internal_user:create_user(Username, <<"password">>, undefined), + ExchangeA = <<"exchange-a">>, + ExchangeB = <<"exchange-b">>, + TopicPermissionA = #topic_permission{ + topic_permission_key = + #topic_permission_key{ + user_vhost = #user_vhost{ + username = Username, + virtual_host = VHostName}, + exchange = ExchangeA}, + permission = #permission{ + write = <<>>, + read = <<"^key$">>} + }, + TopicPermissionB = #topic_permission{ + topic_permission_key = + #topic_permission_key{ + user_vhost = #user_vhost{ + username = Username, + virtual_host = VHostName}, + exchange = ExchangeB}, + permission = #permission{ + write = <<>>, + read = <<"^key$">>} + }, + Context = #{routing_key => <<"key">>, + variable_map => #{<<"vhost">> => VHostName, + <<"username">> => Username}}, + + Tests = + [ + ?with(?assertEqual( + {new, VHost}, + add_vhost(_With, VHostName, VHost))), + ?with(?assertEqual( + ok, + add_user(_With, Username, User))), + ?with(?assertEqual( + ok, + set_topic_permissions( + _With, Username, VHostName, TopicPermissionA))), + ?with(?assertEqual( + ok, + set_topic_permissions( + _With, Username, VHostName, TopicPermissionB))), + ?with(?assert( + check_topic_access( + _With, Username, VHostName, ExchangeA, read, Context))), + ?with(?assertNot( + check_topic_access( + _With, Username, VHostName, ExchangeA, read, + Context#{routing_key => <<"something-else">>}))), + ?with(?assert( + check_topic_access( + _With, Username, VHostName, ExchangeB, read, Context))), + ?with(?assertNot( + check_topic_access( + _With, Username, VHostName, ExchangeB, read, + Context#{routing_key => <<"something-else">>}))), + ?with(?assertEqual( + ok, + clear_topic_permissions(_With, Username, VHostName, ExchangeA))), + ?with(?assertEqual( + undefined, + check_topic_access( + _With, Username, VHostName, ExchangeA, read, Context))), + ?with(?assertEqual( + undefined, + check_topic_access( + _With, Username, VHostName, ExchangeA, read, + Context#{routing_key => <<"something-else">>}))), + ?with(?assert( + check_topic_access( + _With, Username, VHostName, ExchangeB, read, Context))), + ?with(?assertNot( + check_topic_access( + _With, Username, VHostName, ExchangeB, read, + Context#{routing_key => <<"something-else">>}))), + ?with(check_storage( + _With, + [{mnesia, rabbit_vhost, [VHost]}, + {mnesia, rabbit_user, [User]}, + {mnesia, rabbit_topic_permission, [TopicPermissionB]}, + {khepri, [rabbit_db_vhost], + #{?vhost_path(VHostName) => VHost}}, + {khepri, [rabbit_db_user], + #{?user_path(Username) => User, + ?topic_perm_path(Username, VHostName, ExchangeB) => + TopicPermissionB}}])) + ], + + ?assertEqual( + ok, + eunit:test( + [{setup, fun force_mnesia_use/0, [{with, mnesia, Tests}]}, + {setup, fun force_khepri_use/0, [{with, khepri, Tests}]}], + [verbose])). + +clear_all_topic_permission_for_non_existing_vhost(_) -> + VHostName = <<"vhost">>, + Username = <<"alice">>, + User = internal_user:create_user(Username, <<"password">>, undefined), + Exchange = <<"exchange">>, + Context = #{routing_key => <<"key">>, + variable_map => #{<<"vhost">> => VHostName, + <<"username">> => Username}}, + + Tests = + [ + ?with(?assertEqual( + ok, + add_user(_With, Username, User))), + ?with(?assertEqual( + undefined, + check_topic_access( + _With, Username, VHostName, Exchange, read, Context))), + ?with(?assertEqual( + ok, + clear_topic_permissions(_With, Username, VHostName, '_'))), + ?with(?assertEqual( + undefined, + check_topic_access( + _With, Username, VHostName, Exchange, read, Context))), + ?with(check_storage( + _With, + [{mnesia, rabbit_vhost, []}, + {mnesia, rabbit_user, [User]}, + {mnesia, rabbit_user_permission, []}, + {khepri, [rabbit_db_vhost], + #{}}, + {khepri, [rabbit_db_user], + #{?user_path(Username) => User}}])) + ], + + ?assertEqual( + ok, + eunit:test( + [{setup, fun force_mnesia_use/0, [{with, mnesia, Tests}]}, + {setup, fun force_khepri_use/0, [{with, khepri, Tests}]}], + [verbose])). + +clear_all_topic_permission_for_non_existing_user(_) -> + VHostName = <<"vhost">>, + VHostDesc = <<>>, + VHostTags = [], + VHost = vhost:new( + VHostName, + VHostTags, + #{description => VHostDesc, + tags => VHostTags}), + Username = <<"alice">>, + Exchange = <<"exchange">>, + Context = #{routing_key => <<"key">>, + variable_map => #{<<"vhost">> => VHostName, + <<"username">> => Username}}, + + Tests = + [ + ?with(?assertEqual( + {new, VHost}, + add_vhost(_With, VHostName, VHost))), + ?with(?assertEqual( + undefined, + check_topic_access( + _With, Username, VHostName, Exchange, read, Context))), + ?with(?assertEqual( + ok, + clear_topic_permissions(_With, Username, VHostName, '_'))), + ?with(?assertEqual( + undefined, + check_topic_access( + _With, Username, VHostName, Exchange, read, Context))), + ?with(check_storage( + _With, + [{mnesia, rabbit_vhost, [VHost]}, + {mnesia, rabbit_user, []}, + {mnesia, rabbit_user_permission, []}, + {khepri, [rabbit_db_vhost], + #{?vhost_path(VHostName) => VHost}}, + {khepri, [rabbit_db_user], + #{}}])) + ], + + ?assertEqual( + ok, + eunit:test( + [{setup, fun force_mnesia_use/0, [{with, mnesia, Tests}]}, + {setup, fun force_khepri_use/0, [{with, khepri, Tests}]}], + [verbose])). + +clear_all_topic_permissions(_) -> + VHostName = <<"vhost">>, + VHostDesc = <<>>, + VHostTags = [], + VHost = vhost:new( + VHostName, + VHostTags, + #{description => VHostDesc, + tags => VHostTags}), + Username = <<"alice">>, + User = internal_user:create_user(Username, <<"password">>, undefined), + ExchangeA = <<"exchange-a">>, + ExchangeB = <<"exchange-b">>, + TopicPermissionA = #topic_permission{ + topic_permission_key = + #topic_permission_key{ + user_vhost = #user_vhost{ + username = Username, + virtual_host = VHostName}, + exchange = ExchangeA}, + permission = #permission{ + write = <<>>, + read = <<"^key$">>} + }, + TopicPermissionB = #topic_permission{ + topic_permission_key = + #topic_permission_key{ + user_vhost = #user_vhost{ + username = Username, + virtual_host = VHostName}, + exchange = ExchangeB}, + permission = #permission{ + write = <<>>, + read = <<"^key$">>} + }, + Context = #{routing_key => <<"key">>, + variable_map => #{<<"vhost">> => VHostName, + <<"username">> => Username}}, + + Tests = + [ + ?with(?assertEqual( + {new, VHost}, + add_vhost(_With, VHostName, VHost))), + ?with(?assertEqual( + ok, + add_user(_With, Username, User))), + ?with(?assertEqual( + ok, + set_topic_permissions( + _With, Username, VHostName, TopicPermissionA))), + ?with(?assertEqual( + ok, + set_topic_permissions( + _With, Username, VHostName, TopicPermissionB))), + ?with(?assert( + check_topic_access( + _With, Username, VHostName, ExchangeA, read, Context))), + ?with(?assertNot( + check_topic_access( + _With, Username, VHostName, ExchangeA, read, + Context#{routing_key => <<"something-else">>}))), + ?with(?assert( + check_topic_access( + _With, Username, VHostName, ExchangeB, read, Context))), + ?with(?assertNot( + check_topic_access( + _With, Username, VHostName, ExchangeB, read, + Context#{routing_key => <<"something-else">>}))), + ?with(?assertEqual( + ok, + clear_topic_permissions(_With, Username, VHostName, '_'))), + ?with(?assertEqual( + undefined, + check_topic_access( + _With, Username, VHostName, ExchangeA, read, Context))), + ?with(?assertEqual( + undefined, + check_topic_access( + _With, Username, VHostName, ExchangeA, read, + Context#{routing_key => <<"something-else">>}))), + ?with(?assertEqual( + undefined, + check_topic_access( + _With, Username, VHostName, ExchangeB, read, Context))), + ?with(?assertEqual( + undefined, + check_topic_access( + _With, Username, VHostName, ExchangeB, read, + Context#{routing_key => <<"something-else">>}))), + ?with(check_storage( + _With, + [{mnesia, rabbit_vhost, [VHost]}, + {mnesia, rabbit_user, [User]}, + {mnesia, rabbit_topic_permission, []}, + {khepri, [rabbit_db_vhost], + #{?vhost_path(VHostName) => VHost}}, + {khepri, [rabbit_db_user], + #{?user_path(Username) => User}}])) + ], + + ?assertEqual( + ok, + eunit:test( + [{setup, fun force_mnesia_use/0, [{with, mnesia, Tests}]}, + {setup, fun force_khepri_use/0, [{with, khepri, Tests}]}], + [verbose])). + +delete_user_and_check_topic_access(_) -> + VHostName = <<"vhost">>, + VHostDesc = <<>>, + VHostTags = [], + VHost = vhost:new( + VHostName, + VHostTags, + #{description => VHostDesc, + tags => VHostTags}), + Username = <<"alice">>, + User = internal_user:create_user(Username, <<"password">>, undefined), + Exchange = <<"exchange">>, + TopicPermission = #topic_permission{ + topic_permission_key = + #topic_permission_key{ + user_vhost = #user_vhost{ + username = Username, + virtual_host = VHostName}, + exchange = Exchange}, + permission = #permission{ + write = <<>>, + read = <<"^key$">>} + }, + Context = #{routing_key => <<"key">>, + variable_map => #{<<"vhost">> => VHostName, + <<"username">> => Username}}, + + Tests = + [ + ?with(?assertEqual( + {new, VHost}, + add_vhost(_With, VHostName, VHost))), + ?with(?assertEqual( + ok, + add_user(_With, Username, User))), + ?with(?assertEqual( + ok, + set_topic_permissions( + _With, Username, VHostName, TopicPermission))), + ?with(?assert( + check_topic_access( + _With, Username, VHostName, Exchange, read, Context))), + ?with(?assertNot( + check_topic_access( + _With, Username, VHostName, Exchange, read, + Context#{routing_key => <<"something-else">>}))), + ?with(?assertEqual( + true, + delete_user(_With, Username))), + ?with(?assertEqual( + undefined, + check_topic_access( + _With, Username, VHostName, Exchange, read, Context))), + ?with(?assertEqual( + undefined, + check_topic_access( + _With, Username, VHostName, Exchange, read, + Context#{routing_key => <<"something-else">>}))), + ?with(check_storage( + _With, + [{mnesia, rabbit_vhost, [VHost]}, + {mnesia, rabbit_user, []}, + {mnesia, rabbit_topic_permission, []}, + {khepri, [rabbit_db_vhost], + #{?vhost_path(VHostName) => VHost}}, + {khepri, [rabbit_db_user], + #{}}])) + ], + + ?assertEqual( + ok, + eunit:test( + [{setup, fun force_mnesia_use/0, [{with, mnesia, Tests}]}, + {setup, fun force_khepri_use/0, [{with, khepri, Tests}]}], + [verbose])). + +delete_vhost_and_check_topic_access(_) -> + VHostName = <<"vhost">>, + VHostDesc = <<>>, + VHostTags = [], + VHost = vhost:new( + VHostName, + VHostTags, + #{description => VHostDesc, + tags => VHostTags}), + Username = <<"alice">>, + User = internal_user:create_user(Username, <<"password">>, undefined), + Exchange = <<"exchange">>, + TopicPermission = #topic_permission{ + topic_permission_key = + #topic_permission_key{ + user_vhost = #user_vhost{ + username = Username, + virtual_host = VHostName}, + exchange = Exchange}, + permission = #permission{ + write = <<>>, + read = <<"^key$">>} + }, + Context = #{routing_key => <<"key">>, + variable_map => #{<<"vhost">> => VHostName, + <<"username">> => Username}}, + + Tests = + [ + ?with(?assertEqual( + {new, VHost}, + add_vhost(_With, VHostName, VHost))), + ?with(?assertEqual( + ok, + add_user(_With, Username, User))), + ?with(?assertEqual( + ok, + set_topic_permissions( + _With, Username, VHostName, TopicPermission))), + ?with(?assert( + check_topic_access( + _With, Username, VHostName, Exchange, read, Context))), + ?with(?assertNot( + check_topic_access( + _With, Username, VHostName, Exchange, read, + Context#{routing_key => <<"something-else">>}))), + ?with(?assert( + delete_vhost(_With, VHostName))), + ?with(check_storage( + _With, + [{mnesia, rabbit_vhost, []}, + {mnesia, rabbit_user, [User]}, + {mnesia, rabbit_topic_permission, [TopicPermission]}, + {khepri, [rabbit_db_vhost], + #{}}, + {khepri, [rabbit_db_user], + #{?user_path(Username) => User}}])) + ], + + %% In mnesia the permissions have to be deleted explicitly + %% Khepri permissions have a condition to automatically delete them + %% when the vhost is deleted + MnesiaTests = + [?with(?assert( + check_topic_access( + _With, Username, VHostName, Exchange, read, Context))), + ?with(?assertNot( + check_topic_access( + _With, Username, VHostName, Exchange, read, + Context#{routing_key => <<"something-else">>})))], + + KhepriTests = + [?with(?assertEqual( + undefined, + check_topic_access( + _With, Username, VHostName, Exchange, read, Context))), + ?with(?assertEqual( + undefined, + check_topic_access( + _With, Username, VHostName, Exchange, read, + Context#{routing_key => <<"something-else">>})))], + + ?assertEqual( + ok, + eunit:test( + [{setup, fun force_mnesia_use/0, [{with, mnesia, Tests ++ MnesiaTests}]}, + {setup, fun force_khepri_use/0, [{with, khepri, Tests ++ KhepriTests}]}], + [verbose])). + +%% ------------------------------------------------------------------- +%% Helpers. +%% ------------------------------------------------------------------- + +force_mnesia_use() -> + ct:pal(?LOW_IMPORTANCE, "Using Mnesia (disabling feature flag)", []), + rabbit_khepri:force_metadata_store(mnesia). + +force_khepri_use() -> + ct:pal(?LOW_IMPORTANCE, "Using Khepri (enabling feature flag)", []), + rabbit_khepri:force_metadata_store(khepri). + +add_vhost(mnesia, VHostName, VHost) -> + rabbit_db_vhost:create_or_get_in_mnesia(VHostName, VHost); +add_vhost(khepri, VHostName, VHost) -> + rabbit_db_vhost:create_or_get_in_khepri(VHostName, VHost). + +lookup_vhost(mnesia, VHostName) -> + rabbit_db_vhost:get_in_mnesia(VHostName); +lookup_vhost(khepri, VHostName) -> + rabbit_db_vhost:get_in_khepri(VHostName). + +vhost_exists(mnesia, VHostName) -> + rabbit_db_vhost:exists_in_mnesia(VHostName); +vhost_exists(khepri, VHostName) -> + rabbit_db_vhost:exists_in_khepri(VHostName). + +list_vhosts(mnesia) -> + lists:sort(rabbit_db_vhost:list_in_mnesia()); +list_vhosts(khepri) -> + lists:sort(rabbit_db_vhost:list_in_khepri()). + +list_vhost_records(mnesia) -> + lists:sort(rabbit_db_vhost:get_all_in_mnesia()); +list_vhost_records(khepri) -> + lists:sort(rabbit_db_vhost:get_all_in_khepri()). + +update_vhost(mnesia, VHostName, Fun) -> + rabbit_db_vhost:update_in_mnesia(VHostName, Fun); +update_vhost(khepri, VHostName, Fun) -> + rabbit_db_vhost:update_in_khepri(VHostName, Fun). + +update_vhost(mnesia, VHostName, Description, Tags) -> + rabbit_db_vhost:merge_metadata_in_mnesia(VHostName, + #{description => Description, + tags => Tags}); +update_vhost(khepri, VHostName, Description, Tags) -> + rabbit_db_vhost:merge_metadata_in_khepri(VHostName, + #{description => Description, + tags => Tags}). + +delete_vhost(mnesia, VHostName) -> + rabbit_db_vhost:delete_in_mnesia(VHostName); +delete_vhost(khepri, VHostName) -> + rabbit_db_vhost:delete_in_khepri(VHostName). + +add_user(mnesia, Username, User) -> + rabbit_db_user:create_in_mnesia(Username, User); +add_user(khepri, Username, User) -> + rabbit_db_user:create_in_khepri(Username, User). + +lookup_user(mnesia, Username) -> + rabbit_db_user:get_in_mnesia(Username); +lookup_user(khepri, Username) -> + rabbit_db_user:get_in_khepri(Username). + +list_user_records(mnesia) -> + lists:sort(rabbit_db_user:get_all_in_mnesia()); +list_user_records(khepri) -> + lists:sort(rabbit_db_user:get_all_in_khepri()). + +update_user(mnesia, Username, Fun) -> + rabbit_db_user:update_in_mnesia(Username, Fun); +update_user(khepri, Username, Fun) -> + rabbit_db_user:update_in_khepri(Username, Fun). + +delete_user(mnesia, Username) -> + rabbit_db_user:delete_in_mnesia(Username); +delete_user(khepri, Username) -> + rabbit_db_user:delete_in_khepri(Username). + +set_permissions(mnesia, Username, VHostName, UserPermission) -> + rabbit_db_user:set_user_permissions_in_mnesia( + Username, VHostName, UserPermission); +set_permissions(khepri, Username, VHostName, UserPermission) -> + rabbit_db_user:set_user_permissions_in_khepri( + Username, VHostName, UserPermission). + +list_user_vhost_permissions(mnesia, Username, VHostName) -> + lists:sort( + rabbit_db_user:match_user_permissions_in_mnesia(Username, VHostName)); +list_user_vhost_permissions(khepri, Username, VHostName) -> + lists:sort( + rabbit_db_user:match_user_permissions_in_khepri(Username, VHostName)). + +list_topic_permissions(mnesia, Username, VHostName, ExchangeName) -> + lists:sort( + rabbit_db_user:match_topic_permissions_in_mnesia(Username, VHostName, ExchangeName)); +list_topic_permissions(khepri, Username, VHostName, ExchangeName) -> + lists:sort( + rabbit_db_user:match_topic_permissions_in_khepri(Username, VHostName, ExchangeName)). + +check_vhost_access(mnesia, Username, VHostName) -> + rabbit_db_user:get_user_permissions_in_mnesia( + Username, VHostName) =/= undefined; +check_vhost_access(khepri, Username, VHostName) -> + rabbit_db_user:get_user_permissions_in_khepri( + Username, VHostName) =/= undefined. + +set_topic_permissions( + mnesia, Username, VHostName, TopicPermission) -> + rabbit_db_user:set_topic_permissions_in_mnesia( + Username, VHostName, TopicPermission); +set_topic_permissions( + khepri, Username, VHostName, TopicPermission) -> + rabbit_db_user:set_topic_permissions_in_khepri( + Username, VHostName, TopicPermission). + +check_topic_access(mnesia, Username, VHostName, Exchange, Perm, Context) -> + case rabbit_db_user:get_topic_permissions_in_mnesia( + Username, VHostName, Exchange) of + undefined -> undefined; + #topic_permission{permission = P} -> + PermRegexp = case element(permission_index(Perm), P) of + <<"">> -> <<$^, $$>>; + RE -> RE + end, + case re:run(maps:get(routing_key, Context), PermRegexp, [{capture, none}]) of + match -> true; + nomatch -> false + end + end; +check_topic_access(khepri, Username, VHostName, Exchange, Perm, Context) -> + case rabbit_db_user:get_topic_permissions_in_khepri( + Username, VHostName, Exchange) of + undefined -> undefined; + #topic_permission{permission = P} -> + PermRegexp = case element(permission_index(Perm), P) of + <<"">> -> <<$^, $$>>; + RE -> RE + end, + case re:run(maps:get(routing_key, Context), PermRegexp, [{capture, none}]) of + match -> true; + nomatch -> false + end + end. + +clear_permissions(mnesia, Username, VHostName) -> + rabbit_db_user:clear_user_permissions_in_mnesia( + Username, VHostName); +clear_permissions(khepri, Username, VHostName) -> + rabbit_db_user:clear_user_permissions_in_khepri( + Username, VHostName). + +check_resource_access(mnesia, Username, VHostName, Resource, Perm) -> + case rabbit_db_user:get_user_permissions_in_mnesia(Username, VHostName) of + undefined -> false; + #user_permission{permission = P} -> + PermRegexp = case element(permission_index(Perm), P) of + <<"">> -> <<$^, $$>>; + RE -> RE + end, + case re:run(Resource, PermRegexp, [{capture, none}]) of + match -> true; + nomatch -> false + end + end; +check_resource_access(khepri, Username, VHostName, Resource, Perm) -> + case rabbit_db_user:get_user_permissions_in_khepri(Username, VHostName) of + undefined -> false; + #user_permission{permission = P} -> + PermRegexp = case element(permission_index(Perm), P) of + <<"">> -> <<$^, $$>>; + RE -> RE + end, + case re:run(Resource, PermRegexp, [{capture, none}]) of + match -> true; + nomatch -> false + end + end. + +permission_index(configure) -> #permission.configure; +permission_index(write) -> #permission.write; +permission_index(read) -> #permission.read. + +clear_topic_permissions(mnesia, Username, VHostName, Exchange) -> + rabbit_db_user:clear_topic_permissions_in_mnesia( + Username, VHostName, Exchange); +clear_topic_permissions(khepri, Username, VHostName, Exchange) -> + rabbit_db_user:clear_topic_permissions_in_khepri( + Username, VHostName, Exchange). + +check_storage(With, [{With, Source, Content} | Rest]) -> + check_storage(With, Source, Content), + check_storage(With, Rest); +check_storage(With, [_ | Rest]) -> + check_storage(With, Rest); +check_storage(_, []) -> + ok. + +check_storage(mnesia, Table, Content) -> + ?assertEqual(Content, lists:sort(ets:tab2list(Table))); +check_storage(khepri, Path, Content) -> + rabbit_khepri:info(), + Path1 = Path ++ [#if_all{conditions = [?KHEPRI_WILDCARD_STAR_STAR, + #if_has_data{has_data = true}]}], + ?assertEqual({ok, Content}, rabbit_khepri:match(Path1)). diff --git a/deps/rabbit/test/metrics_SUITE.erl b/deps/rabbit/test/metrics_SUITE.erl index f9cd01a82c1c..4f83f0959f5a 100644 --- a/deps/rabbit/test/metrics_SUITE.erl +++ b/deps/rabbit/test/metrics_SUITE.erl @@ -2,12 +2,12 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2016-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(metrics_SUITE). +-compile(nowarn_export_all). -compile(export_all). --include_lib("common_test/include/ct.hrl"). -include_lib("proper/include/proper.hrl"). -include_lib("eunit/include/eunit.hrl"). -include_lib("amqp_client/include/amqp_client.hrl"). @@ -142,7 +142,12 @@ connection_metric_idemp(Config, {N, R}) -> 5000), Table2 = [ Pid || {Pid, _} <- read_table_rpc(Config, connection_coarse_metrics)], % refresh stats 'R' times - [[Pid ! emit_stats || Pid <- Table] || _ <- lists:seq(1, R)], + [[begin + Pid ! emit_stats + end|| Pid <- Table] || _ <- lists:seq(1, R)], + [begin + _ = gen_server:call(Pid, {info, [pid]}) + end|| Pid <- Table], force_metric_gc(Config), TableAfter = [ Pid || {Pid, _} <- read_table_rpc(Config, connection_metrics)], TableAfter2 = [ Pid || {Pid, _} <- read_table_rpc(Config, connection_coarse_metrics)], @@ -159,6 +164,9 @@ channel_metric_idemp(Config, {N, R}) -> Table2 = [ Pid || {Pid, _} <- read_table_rpc(Config, channel_process_metrics)], % refresh stats 'R' times [[Pid ! emit_stats || Pid <- Table] || _ <- lists:seq(1, R)], + [begin + _ = gen_server:call(Pid, {info, [pid]}) + end|| Pid <- Table], force_metric_gc(Config), TableAfter = [ Pid || {Pid, _} <- read_table_rpc(Config, channel_metrics)], TableAfter2 = [ Pid || {Pid, _} <- read_table_rpc(Config, channel_process_metrics)], @@ -182,7 +190,10 @@ queue_metric_idemp(Config, {N, R}) -> Table2 = [ Pid || {Pid, _, _} <- read_table_rpc(Config, queue_coarse_metrics)], % refresh stats 'R' times ChanTable = read_table_rpc(Config, channel_created), - [[Pid ! emit_stats || {Pid, _, _} <- ChanTable ] || _ <- lists:seq(1, R)], + [[begin + Pid ! emit_stats, + gen_server2:call(Pid, flush) + end|| {Pid, _, _} <- ChanTable ] || _ <- lists:seq(1, R)], force_metric_gc(Config), TableAfter = [ Pid || {Pid, _, _} <- read_table_rpc(Config, queue_metrics)], TableAfter2 = [ Pid || {Pid, _, _} <- read_table_rpc(Config, queue_coarse_metrics)], @@ -383,8 +394,13 @@ ensure_channel_queue_metrics_populated(Chan, Queue) -> {#'basic.get_ok'{}, #amqp_msg{}} = amqp_channel:call(Chan, Get). force_channel_stats(Config) -> - [ Pid ! emit_stats || {Pid, _} <- read_table_rpc(Config, channel_created) ], - timer:sleep(100). + [begin + Pid ! emit_stats, + gen_server2:call(Pid, flush) + end + || {Pid, _} <- read_table_rpc(Config, channel_created) + ], + ok. read_table_rpc(Config, Table) -> rabbit_ct_broker_helpers:rpc(Config, 0, ?MODULE, read_table, [Table]). @@ -397,7 +413,6 @@ read_table(Table) -> ets:tab2list(Table). force_metric_gc(Config) -> - timer:sleep(300), rabbit_ct_broker_helpers:rpc(Config, 0, erlang, send, [rabbit_core_metrics_gc, start_gc]), rabbit_ct_broker_helpers:rpc(Config, 0, gen_server, call, diff --git a/deps/rabbit/test/mirrored_supervisor_SUITE.erl b/deps/rabbit/test/mirrored_supervisor_SUITE.erl index 8afb3a6b1aeb..7ce527a684fc 100644 --- a/deps/rabbit/test/mirrored_supervisor_SUITE.erl +++ b/deps/rabbit/test/mirrored_supervisor_SUITE.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2011-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(mirrored_supervisor_SUITE). @@ -331,7 +331,7 @@ childspec(Id) -> {id(Id), {?SERVER, start_link, [Id]}, transient, 16#ffffffff, worker, [?MODULE]}. id(Id) -> - {[Id], Id}. + Id. pid_of(Id) -> {received, Pid, ping} = call(Id, ping), diff --git a/deps/rabbit/test/mirrored_supervisor_SUITE_gs.erl b/deps/rabbit/test/mirrored_supervisor_SUITE_gs.erl index aca5581cb273..6a704858268d 100644 --- a/deps/rabbit/test/mirrored_supervisor_SUITE_gs.erl +++ b/deps/rabbit/test/mirrored_supervisor_SUITE_gs.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2011-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(mirrored_supervisor_SUITE_gs). diff --git a/deps/rabbit/test/msg_store_SUITE.erl b/deps/rabbit/test/msg_store_SUITE.erl index c47b70bf6861..117c9896f8ff 100644 --- a/deps/rabbit/test/msg_store_SUITE.erl +++ b/deps/rabbit/test/msg_store_SUITE.erl @@ -2,12 +2,11 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2011-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(msg_store_SUITE). --include_lib("common_test/include/ct.hrl"). -include_lib("rabbit_common/include/rabbit.hrl"). -compile(export_all). diff --git a/deps/rabbit/test/peer_discovery_classic_config_SUITE.erl b/deps/rabbit/test/peer_discovery_classic_config_SUITE.erl index df5751b2b890..b8c7597eeb22 100644 --- a/deps/rabbit/test/peer_discovery_classic_config_SUITE.erl +++ b/deps/rabbit/test/peer_discovery_classic_config_SUITE.erl @@ -2,13 +2,12 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2011-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(peer_discovery_classic_config_SUITE). -include_lib("common_test/include/ct.hrl"). --include_lib("amqp_client/include/amqp_client.hrl"). -include_lib("eunit/include/eunit.hrl"). -include_lib("rabbitmq_ct_helpers/include/rabbit_assert.hrl"). @@ -16,20 +15,34 @@ cluster_members_online/2 ]). +-compile(nowarn_export_all). -compile(export_all). all() -> [ - {group, non_parallel} + {group, non_parallel}, + {group, cluster_size_3}, + {group, cluster_size_5}, + {group, cluster_size_7} ]. groups() -> [ {non_parallel, [], [ - successful_discovery, - successful_discovery_with_a_subset_of_nodes_coming_online, no_nodes_configured - ]} + ]}, + {cluster_size_3, [], [ + successful_discovery, + successful_discovery_with_a_subset_of_nodes_coming_online + ]}, + {cluster_size_5, [], [ + successful_discovery, + successful_discovery_with_a_subset_of_nodes_coming_online + ]}, + {cluster_size_7, [], [ + successful_discovery, + successful_discovery_with_a_subset_of_nodes_coming_online + ]} ]. suite() -> @@ -50,12 +63,23 @@ init_per_suite(Config) -> end_per_suite(Config) -> rabbit_ct_helpers:run_teardown_steps(Config). +init_per_group(cluster_size_3 = Group, Config) -> + rabbit_ct_helpers:set_config(Config, [{rmq_nodes_count, 3}, {group, Group}]); +init_per_group(cluster_size_5 = Group, Config) -> + rabbit_ct_helpers:set_config(Config, [{rmq_nodes_count, 5}, {group, Group}]); +init_per_group(cluster_size_7 = Group, Config) -> + rabbit_ct_helpers:set_config(Config, [{rmq_nodes_count, 7}, {group, Group}]); +init_per_group(_, Config) -> + Config. + +end_per_group(_, Config) -> + Config. + init_per_testcase(successful_discovery = Testcase, Config) -> Config1 = rabbit_ct_helpers:testcase_started(Config, Testcase), - - N = 3, + N = ?config(rmq_nodes_count, Config), NodeNames = [ - list_to_atom(rabbit_misc:format("~ts-~b", [Testcase, I])) + list_to_atom(rabbit_misc:format("~ts-~ts-~b", [Testcase, ?config(group, Config), I])) || I <- lists:seq(1, N) ], Config2 = rabbit_ct_helpers:set_config(Config1, [ @@ -78,26 +102,30 @@ init_per_testcase(successful_discovery = Testcase, Config) -> init_per_testcase(successful_discovery_with_a_subset_of_nodes_coming_online = Testcase, Config) -> Config1 = rabbit_ct_helpers:testcase_started(Config, Testcase), - N = 2, + N = ?config(rmq_nodes_count, Config), NodeNames = [ - list_to_atom(rabbit_misc:format("~ts-~b", [Testcase, I])) + list_to_atom(rabbit_misc:format("~ts-~ts-~b", [Testcase, ?config(group, Config), I])) || I <- lists:seq(1, N) ], Config2 = rabbit_ct_helpers:set_config(Config1, [ {rmq_nodename_suffix, Testcase}, + %% We remove the first node in the list: it will be considered by peer + %% discovery (see `cluster_nodes' below), but it won't be started. %% note: this must not include the host part - {rmq_nodes_count, NodeNames}, + {rmq_nodes_count, tl(NodeNames)}, {rmq_nodes_clustered, false} ]), - NodeNamesWithHostname = [rabbit_nodes:make({Name, "localhost"}) || Name <- [nonexistent | NodeNames]], + NodeNamesWithHostname = [rabbit_nodes:make({Name, "localhost"}) || Name <- NodeNames], %% reduce retry time since we know one node on the list does %% not exist and not just unreachable + %% We no longer test non-existing nodes, it just times out + %% constantly in CI + %% To compare, this suite takes ~23min in my machine with + %% unreachable nodes vs ~6min without them Config3 = rabbit_ct_helpers:merge_app_env(Config2, {rabbit, [ {cluster_nodes, {NodeNamesWithHostname, disc}}, {cluster_formation, [ - {discovery_retry_limit, 2}, - {discovery_retry_interval, 100}, {internal_lock_retries, 10} ]} ]}), @@ -139,8 +167,9 @@ end_per_testcase(Testcase, Config) -> %% Test cases %% successful_discovery(Config) -> + N = length(?config(rmq_nodes_count, Config)), ?awaitMatch( - {M1, M2} when length(M1) =:= 3; length(M2) =:= 3, + {M1, M2} when length(M1) =:= N; length(M2) =:= N, {cluster_members_online(Config, 0), cluster_members_online(Config, 1)}, ?TIMEOUT). @@ -149,8 +178,9 @@ successful_discovery_with_a_subset_of_nodes_coming_online() -> [{timetrap, {minutes, 15}}]. successful_discovery_with_a_subset_of_nodes_coming_online(Config) -> + N = length(?config(rmq_nodes_count, Config)), ?awaitMatch( - {M1, M2} when length(M1) =:= 2; length(M2) =:= 2, + {M1, M2} when length(M1) =:= N; length(M2) =:= N, {cluster_members_online(Config, 0), cluster_members_online(Config, 1)}, ?TIMEOUT). diff --git a/deps/rabbit/test/peer_discovery_dns_SUITE.erl b/deps/rabbit/test/peer_discovery_dns_SUITE.erl index 059039785a43..27279c4bcb61 100644 --- a/deps/rabbit/test/peer_discovery_dns_SUITE.erl +++ b/deps/rabbit/test/peer_discovery_dns_SUITE.erl @@ -2,12 +2,11 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2011-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(peer_discovery_dns_SUITE). --include_lib("common_test/include/ct.hrl"). -include_lib("amqp_client/include/amqp_client.hrl"). -include_lib("eunit/include/eunit.hrl"). diff --git a/deps/rabbit/test/peer_discovery_tmp_hidden_node_SUITE.erl b/deps/rabbit/test/peer_discovery_tmp_hidden_node_SUITE.erl new file mode 100644 index 000000000000..61951a1fcd43 --- /dev/null +++ b/deps/rabbit/test/peer_discovery_tmp_hidden_node_SUITE.erl @@ -0,0 +1,285 @@ +%% This Source Code Form is subject to the terms of the Mozilla Public +%% License, v. 2.0. If a copy of the MPL was not distributed with this +%% file, You can obtain one at https://mozilla.org/MPL/2.0/. +%% +%% Copyright (c) 2024 Broadcom. All Rights Reserved. The term “Broadcom” +%% refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. +%% + +-module(peer_discovery_tmp_hidden_node_SUITE). + +-include_lib("kernel/include/inet.hrl"). +-include_lib("common_test/include/ct.hrl"). +-include_lib("eunit/include/eunit.hrl"). +-include_lib("public_key/include/public_key.hrl"). + +-include_lib("rabbit_common/include/logging.hrl"). + +-export([suite/0, + all/0, + init_per_suite/1, + end_per_suite/1, + init_per_group/2, + end_per_group/2, + init_per_testcase/2, + end_per_testcase/2, + + do_setup_test_node/1, + + no_connection_between_peers_is_opened/1, + long_names_work/1, + ipv6_works/1, + inetrc_file_as_atom_works/1, + tls_dist_works/1 + ]). + +suite() -> + [{timetrap, {minutes, 15}}]. + +all() -> + [no_connection_between_peers_is_opened, + long_names_work, + ipv6_works, + inetrc_file_as_atom_works, + tls_dist_works]. + +%% ------------------------------------------------------------------- +%% Testsuite setup/teardown +%% ------------------------------------------------------------------- + +init_per_suite(Config) -> + rabbit_ct_helpers:log_environment(), + rabbit_ct_helpers:run_setup_steps(Config). + +end_per_suite(Config) -> + rabbit_ct_helpers:run_teardown_steps(Config). + +init_per_group(_Group, Config) -> + Config. + +end_per_group(_Group, Config) -> + Config. + +init_per_testcase(Testcase, Config) -> + rabbit_ct_helpers:testcase_started(Config, Testcase), + Config. + +end_per_testcase(Testcase, Config) -> + rabbit_ct_helpers:testcase_finished(Config, Testcase). + +%% ------------------------------------------------------------------- +%% Testcases. +%% ------------------------------------------------------------------- + +no_connection_between_peers_is_opened(_Config) -> + PeerOptions = #{longnames => false}, + test_query_node_props(?FUNCTION_NAME, 2, PeerOptions). + +long_names_work(_Config) -> + PeerOptions = #{longnames => true}, + test_query_node_props(?FUNCTION_NAME, 2, PeerOptions). + +ipv6_works(Config) -> + PrivDir = ?config(priv_dir, Config), + InetrcFilename = filename:join(PrivDir, "inetrc-ipv6.erl"), + ct:pal("Inetrc filename:~n~0p", [InetrcFilename]), + Inetrc = [{inet6, true}], + InetrcContent = [io_lib:format("~p.~n", [Param]) || Param <- Inetrc], + ct:pal("Inetrc file content:~n---8<---~n~s---8<---", [InetrcContent]), + ok = file:write_file(InetrcFilename, InetrcContent), + InetrcArg = rabbit_misc:format("~0p", [InetrcFilename]), + + PeerOptions = #{host => "::1", + args => ["-proto_dist", "inet6_tcp", + "-kernel", "inetrc", InetrcArg]}, + test_query_node_props(?FUNCTION_NAME, 2, PeerOptions). + +inetrc_file_as_atom_works(_Config) -> + %% We can't write the inetrc file in `privdir' like we did in + %% `ipv6_works/1' because here we convert the filename to an atom and an + %% atom can't be more than 255 characters. It happens that in the + %% Buildbuddy CI worker, we reach a filename of 340+ characters. + %% + %% Instead, we write the file in the temporary directory. + %% + %% TEMP and TMP are used on Microsoft Windows, TMPDIR on Unix (but TMPDIR + %% might not be defined). + TmpDir = os:getenv("TEMP", os:getenv("TMP", os:getenv("TMPDIR", "/tmp"))), + InetrcFilename = filename:join(TmpDir, "inetrc-ipv6.erl"), + ct:pal("Inetrc filename:~n~0p", [InetrcFilename]), + Inetrc = [{inet6, true}], + InetrcContent = [io_lib:format("~p.~n", [Param]) || Param <- Inetrc], + ct:pal("Inetrc file content:~n---8<---~n~s---8<---", [InetrcContent]), + ok = file:write_file(InetrcFilename, InetrcContent), + InetrcArg = rabbit_misc:format("~0p", [list_to_atom(InetrcFilename)]), + + PeerOptions = #{host => "::1", + args => ["-proto_dist", "inet6_tcp", + "-kernel", "inetrc", InetrcArg]}, + test_query_node_props(?FUNCTION_NAME, 2, PeerOptions). + +tls_dist_works(Config) -> + CertsDir = ?config(rmq_certsdir, Config), + Password = ?config(rmq_certspwd, Config), + CACert = filename:join([CertsDir, "testca", "cacert.pem"]), + ServerCert = filename:join([CertsDir, "server", "cert.pem"]), + ServerKey = filename:join([CertsDir, "server", "key.pem"]), + SslOptions = [{server, + [{cacertfile, CACert}, + {certfile, ServerCert}, + {keyfile, ServerKey}, + {password, Password}, + {secure_renegotiate, true}, + {verify, verify_none}, + {fail_if_no_peer_cert, false}]}, + {client, + [{cacertfile, CACert}, + {secure_renegotiate, true}]}], + + PrivDir = ?config(priv_dir, Config), + SslOptFilename = filename:join(PrivDir, "ssl-options.erl"), + ct:pal("SSL options filename:~n~0p", [SslOptFilename]), + SslOptContent = rabbit_misc:format("~p.~n", [SslOptions]), + ct:pal("SSL options file content:~n---8<---~n~s---8<---", [SslOptContent]), + ok = file:write_file(SslOptFilename, SslOptContent), + + %% We need to read the certificate's Subject ID to see what hostname is + %% used in the certificate and use the same to start the test Erlang nodes. + %% We also need to pay attention if the name is short or long. + {ok, ServerCertBin} = file:read_file(ServerCert), + ct:pal("ServerCertBin = ~p", [ServerCertBin]), + [DecodedCert] = public_key:pem_decode(ServerCertBin), + ct:pal("DecodedCert = ~p", [DecodedCert]), + DecodedCert1 = element(2, DecodedCert), + {_SerialNr, {rdnSequence, IssuerAttrs}} = public_key:pkix_subject_id( + DecodedCert1), + ct:pal("IssuerAttrs = ~p", [IssuerAttrs]), + [ServerName] = [Value + || [#'AttributeTypeAndValue'{type = {2, 5, 4, 3}, + value = {utf8String, Value}}] + <- IssuerAttrs], + ct:pal("ServerName = ~p", [ServerName]), + UseLongnames = re:run(ServerName, "\\.", [{capture, none}]) =:= match, + + PeerOptions = #{host => binary_to_list(ServerName), + longnames => UseLongnames, + args => ["-proto_dist", "inet_tls", + "-ssl_dist_optfile", SslOptFilename]}, + test_query_node_props(?FUNCTION_NAME, 2, PeerOptions). + +test_query_node_props(Testcase, NodeCount, PeerOptions) -> + Peers = start_test_nodes(Testcase, NodeCount, PeerOptions), + try + do_test_query_node_props(Peers) + after + stop_test_nodes(Peers) + end. + +do_test_query_node_props(Peers) -> + %% Ensure no connection exists at the beginning. + ensure_no_connections_between_test_nodes(Peers), + + %% Query the remote node's properties. The return value should have the + %% properties of the peer node, otherwise it means that we failed to + %% contact it. + [NodeA, NodeB] = lists:sort(maps:keys(Peers)), + NodeAPid = maps:get(NodeA, Peers), + Ret = peer:call( + NodeAPid, + rabbit_peer_discovery, query_node_props, [[NodeB]], + infinity), + ct:pal("Discovered nodes properties:~n~p", [Ret]), + ?assertMatch([{NodeB, [NodeB], _, false}], Ret), + + %% Ensure no connection exists after the query. + ensure_no_connections_between_test_nodes(Peers). + +%% ------------------------------------------------------------------- +%% Helpers. +%% ------------------------------------------------------------------- + +start_test_nodes(Testcase, NodeCount, PeerOptions) -> + PeerOptions1 = PeerOptions#{ + %% We use an alternative connection channel, not the + %% regular Erlang distribution, because we want to test + %% the behavior of the temporary hidden node and + %% especially that it doesn't rely or create a connection + %% between the two nodes. + connection => standard_io, + wait_boot => infinity}, + TestEbin = filename:dirname(code:which(?MODULE)), + Args0 = maps:get(args, PeerOptions1, []), + Args1 = ["-pa", TestEbin | Args0], + Env0 = maps:get(env, PeerOptions1, []), + Env1 = [{"ERL_LIBS", os:getenv("ERL_LIBS")} | Env0], + PeerOptions2 = PeerOptions1#{args => Args1, + env => Env1}, + start_test_nodes(Testcase, 1, NodeCount, PeerOptions2, #{}). + +start_test_nodes(Testcase, NodeNumber, NodeCount, PeerOptions, Peers) + when NodeNumber =< NodeCount -> + PeerName0 = rabbit_misc:format("~s-~b", [Testcase, NodeNumber]), + PeerOptions1 = PeerOptions#{name => PeerName0}, + PeerOptions2 = case PeerOptions1 of + #{host := _} -> + PeerOptions1; + #{longnames := true} -> + %% To simulate Erlang long node names, we use a + %% hard-coded IP address that is likely to exist. + %% + %% We can't rely on the host proper network + %% configuration because it appears that several + %% hosts are half-configured (at least some random + %% GitHub workers and Broadcom-managed OSX laptops + %% in the team). + PeerOptions1#{host => "127.0.0.1"}; + _ -> + PeerOptions1 + end, + ct:pal("Starting peer with options: ~p", [PeerOptions2]), + case catch peer:start(PeerOptions2) of + {ok, PeerPid, PeerName} -> + ct:pal("Configuring peer '~ts'", [PeerName]), + setup_test_node(PeerPid, PeerOptions2), + Peers1 = Peers#{PeerName => PeerPid}, + start_test_nodes( + Testcase, NodeNumber + 1, NodeCount, PeerOptions, Peers1); + Error -> + ct:pal("Failed to started peer node:~n" + "Options: ~p~n" + "Error: ~p", [PeerOptions2, Error]), + stop_test_nodes(Peers), + erlang:throw(Error) + end; +start_test_nodes(_Testcase, _NodeNumber, _Count, _PeerOptions, Peers) -> + ct:pal("Peers: ~p", [Peers]), + Peers. + +setup_test_node(PeerPid, PeerOptions) -> + peer:call(PeerPid, ?MODULE, do_setup_test_node, [PeerOptions]). + +do_setup_test_node(PeerOptions) -> + Context = case maps:get(longnames, PeerOptions, false) of + true -> #{nodename_type => longnames}; + false -> #{} + end, + logger:set_primary_config(level, debug), + meck:new(rabbit_prelaunch, [unstick, passthrough, no_link]), + meck:expect(rabbit_prelaunch, get_context, fun() -> Context end), + meck:new(rabbit_nodes, [unstick, passthrough, no_link]), + Nodes = [node()], + meck:expect(rabbit_nodes, all, fun() -> Nodes end), + meck:expect(rabbit_nodes, list_members, fun() -> Nodes end), + ok. + +stop_test_nodes(Peers) -> + maps:foreach( + fun(_PeerName, PeerPid) -> + peer:stop(PeerPid) + end, Peers). + +ensure_no_connections_between_test_nodes(Peers) -> + maps:foreach( + fun(_PeerName, PeerPid) -> + ?assertEqual([], peer:call(PeerPid, erlang, nodes, [])) + end, Peers). diff --git a/deps/rabbit/test/per_node_limit_SUITE.erl b/deps/rabbit/test/per_node_limit_SUITE.erl index 21ce19bea034..98990c8dc364 100644 --- a/deps/rabbit/test/per_node_limit_SUITE.erl +++ b/deps/rabbit/test/per_node_limit_SUITE.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2011-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(per_node_limit_SUITE). @@ -15,15 +15,17 @@ all() -> [ - {group, parallel_tests} + {group, limit_tests} ]. groups() -> [ - {parallel_tests, [parallel], [ - node_connection_limit, - vhost_limit - ]} + {limit_tests, [], [ + node_connection_limit, + vhost_limit, + channel_consumers_limit, + node_channel_limit + ]} ]. suite() -> @@ -60,9 +62,17 @@ init_per_testcase(Testcase, Config) -> rabbit_ct_helpers:testcase_started(Config, Testcase). end_per_testcase(vhost_limit = Testcase, Config) -> + set_node_limit(Config, vhost_max, infinity), + set_node_limit(Config, channel_max_per_node, infinity), + set_node_limit(Config, consumer_max_per_channel, infinity), + set_node_limit(Config, connection_max, infinity), [rabbit_ct_broker_helpers:delete_vhost(Config, integer_to_binary(I)) || I <- lists:seq(1,4)], rabbit_ct_helpers:testcase_finished(Config, Testcase); end_per_testcase(Testcase, Config) -> + set_node_limit(Config, vhost_max, infinity), + set_node_limit(Config, channel_max_per_node, infinity), + set_node_limit(Config, consumer_max_per_channel, infinity), + set_node_limit(Config, connection_max, infinity), rabbit_ct_helpers:testcase_finished(Config, Testcase). %% ------------------------------------------------------------------- @@ -71,7 +81,7 @@ end_per_testcase(Testcase, Config) -> node_connection_limit(Config) -> %% Set limit to 0, don't accept any connections - set_node_limit(Config, 0), + set_node_limit(Config, connection_max, 0), {error, not_allowed} = rabbit_ct_client_helpers:open_unmanaged_connection(Config, 0), %% Set limit to 5, accept 5 connections @@ -80,34 +90,89 @@ node_connection_limit(Config) -> {error, not_allowed} = rabbit_ct_client_helpers:open_unmanaged_connection(Config, 0), close_all_connections(Connections), - set_node_limit(Config, infinity), + set_node_limit(Config, connection_max, infinity), C = rabbit_ct_client_helpers:open_unmanaged_connection(Config, 0), true = is_pid(C), close_all_connections([C]), ok. vhost_limit(Config) -> - set_vhost_limit(Config, 0), + set_node_limit(Config, vhost_max, 0), {'EXIT',{vhost_limit_exceeded, _}} = rabbit_ct_broker_helpers:add_vhost(Config, <<"foo">>), - set_vhost_limit(Config, 5), + set_node_limit(Config, vhost_max, 5), [ok = rabbit_ct_broker_helpers:add_vhost(Config, integer_to_binary(I)) || I <- lists:seq(1,4)], {'EXIT',{vhost_limit_exceeded, _}} = rabbit_ct_broker_helpers:add_vhost(Config, <<"5">>), [rabbit_ct_broker_helpers:delete_vhost(Config, integer_to_binary(I)) || I <- lists:seq(1,4)], - set_vhost_limit(Config, infinity), + set_node_limit(Config, vhost_max, infinity), [ok = rabbit_ct_broker_helpers:add_vhost(Config, integer_to_binary(I)) || I <- lists:seq(1,4)], ok = rabbit_ct_broker_helpers:add_vhost(Config, <<"5">>), [rabbit_ct_broker_helpers:delete_vhost(Config, integer_to_binary(I)) || I <- lists:seq(1,5)], ok. +node_channel_limit(Config) -> + set_node_limit(Config, channel_max_per_node, 5), + + VHost = <<"node_channel_limit">>, + User = <<"guest">>, + ok = rabbit_ct_broker_helpers:add_vhost(Config, VHost), + ok = rabbit_ct_broker_helpers:set_full_permissions(Config, User, VHost), + Conn1 = rabbit_ct_client_helpers:open_unmanaged_connection(Config, 0, VHost), + Conn2 = rabbit_ct_client_helpers:open_unmanaged_connection(Config, 0, VHost), + 0 = count_channels_per_node(Config), + + lists:foreach(fun(N) when (N band 1) == 1 -> {ok, _} = open_channel(Conn1); + (_) -> {ok,_ } = open_channel(Conn2) + end, lists:seq(1, 5)), + + 5 = count_channels_per_node(Config), + %% In total 5 channels are open on this node, so a new one, regardless of + %% connection, will not be allowed. It will terminate the connection with + %% its channels too. So + {error, not_allowed_crash} = open_channel(Conn2), + 3 = count_channels_per_node(Config), + %% As the connection is dead, so are the 2 channels, so we should be able to + %% create 2 more on Conn1 + {ok , _} = open_channel(Conn1), + {ok , _} = open_channel(Conn1), + %% But not a third + {error, not_allowed_crash} = open_channel(Conn1), + + %% Now all connections are closed, so there should be 0 open connections + 0 = count_channels_per_node(Config), + close_all_connections([Conn1, Conn2]), + + rabbit_ct_broker_helpers:delete_vhost(Config, VHost), + + ok. + +channel_consumers_limit(Config) -> + set_node_limit(Config, consumer_max_per_channel, 2), + + VHost = <<"channel_consumers_limit">>, + User = <<"guest">>, + ok = rabbit_ct_broker_helpers:add_vhost(Config, VHost), + ok = rabbit_ct_broker_helpers:set_full_permissions(Config, User, VHost), + Conn1 = rabbit_ct_client_helpers:open_unmanaged_connection(Config, 0, VHost), + {ok, Ch} = open_channel(Conn1), + Q = <<"Q">>, + + {ok, _} = consume(Ch, Q, <<"Tag1">>), + {ok, _} = consume(Ch, Q, <<"Tag2">>), + {error, not_allowed_crash} = consume(Ch, Q, <<"Tag3">>), % Third consumer should fail + + close_all_connections([Conn1]), + rabbit_ct_broker_helpers:delete_vhost(Config, VHost), + + ok. %% ------------------------------------------------------------------- %% Implementation %% ------------------------------------------------------------------- open_connections_to_limit(Config, Limit) -> - set_node_limit(Config, Limit), + set_node_limit(Config, connection_max, Limit), Connections = [rabbit_ct_client_helpers:open_unmanaged_connection(Config, 0) || _ <- lists:seq(1,Limit)], true = lists:all(fun(E) -> is_pid(E) end, Connections), Connections. @@ -115,12 +180,32 @@ open_connections_to_limit(Config, Limit) -> close_all_connections(Connections) -> [rabbit_ct_client_helpers:close_connection(C) || C <- Connections]. -set_node_limit(Config, Limit) -> +set_node_limit(Config, Type, Limit) -> rabbit_ct_broker_helpers:rpc(Config, 0, application, - set_env, [rabbit, connection_max, Limit]). - -set_vhost_limit(Config, Limit) -> + set_env, [rabbit, Type, Limit]). + +consume(Ch, Q, Tag) -> + #'queue.declare_ok'{queue = Q} = amqp_channel:call(Ch, #'queue.declare'{queue = Q}), + try amqp_channel:call(Ch, #'basic.consume'{queue = Q, consumer_tag = Tag}) of + #'basic.consume_ok'{} = OK -> {ok, OK}; + NotOk -> {error, NotOk} + catch + _:_Error -> {error, not_allowed_crash} + end. + +open_channel(Conn) when is_pid(Conn) -> + try amqp_connection:open_channel(Conn) of + {ok, Ch} -> {ok, Ch}; + {error, _} -> + {error, not_allowed} + catch + _:_Error -> {error, not_allowed_crash} + end. + +count_channels_per_node(Config) -> + NodeConfig = rabbit_ct_broker_helpers:get_node_config(Config, 0), rabbit_ct_broker_helpers:rpc(Config, 0, - application, - set_env, [rabbit, vhost_max, Limit]). + rabbit_channel_tracking, + channel_count_on_node, + [?config(nodename, NodeConfig)]). diff --git a/deps/rabbit/test/per_user_connection_channel_limit_SUITE.erl b/deps/rabbit/test/per_user_connection_channel_limit_SUITE.erl index 85a8606b5ba2..1a039329b38c 100644 --- a/deps/rabbit/test/per_user_connection_channel_limit_SUITE.erl +++ b/deps/rabbit/test/per_user_connection_channel_limit_SUITE.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2020-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(per_user_connection_channel_limit_SUITE). @@ -12,13 +12,13 @@ -include_lib("eunit/include/eunit.hrl"). -include_lib("rabbitmq_ct_helpers/include/rabbit_assert.hrl"). +-compile(nowarn_export_all). -compile(export_all). all() -> [ - {group, cluster_size_1_network}, - {group, cluster_size_2_network}, - {group, cluster_size_2_direct} + {group, tests}, + {group, khepri_migration} ]. groups() -> @@ -35,7 +35,7 @@ groups() -> single_node_multiple_users_zero_limit ], - ClusterSize2Tests = [ + ClusterSize3Tests = [ most_basic_cluster_connection_and_channel_count, cluster_single_user_connection_and_channel_count, cluster_multiple_users_connection_and_channel_count, @@ -49,9 +49,12 @@ groups() -> cluster_multiple_users_zero_limit ], [ - {cluster_size_1_network, [], ClusterSize1Tests}, - {cluster_size_2_network, [], ClusterSize2Tests}, - {cluster_size_2_direct, [], ClusterSize2Tests} + {tests, [], [ + {cluster_size_1_network, [], ClusterSize1Tests}, + {cluster_size_3_network, [], ClusterSize3Tests}, + {cluster_size_3_direct, [], ClusterSize3Tests} + ]}, + {khepri_migration, [], [from_mnesia_to_khepri]} ]. suite() -> @@ -71,36 +74,33 @@ init_per_suite(Config) -> end_per_suite(Config) -> rabbit_ct_helpers:run_teardown_steps(Config). +init_per_group(khepri_migration, Config) -> + Config1 = rabbit_ct_helpers:set_config(Config, [{connection_type, network}, + {metadata_store, mnesia}]), + init_per_multinode_group(cluster_size_1_network, Config1, 1); init_per_group(cluster_size_1_network, Config) -> Config1 = rabbit_ct_helpers:set_config(Config, [{connection_type, network}]), init_per_multinode_group(cluster_size_1_network, Config1, 1); -init_per_group(cluster_size_2_network, Config) -> +init_per_group(cluster_size_3_network, Config) -> Config1 = rabbit_ct_helpers:set_config(Config, [{connection_type, network}]), - init_per_multinode_group(cluster_size_2_network, Config1, 2); -init_per_group(cluster_size_2_direct, Config) -> + init_per_multinode_group(cluster_size_3_network, Config1, 3); +init_per_group(cluster_size_3_direct, Config) -> Config1 = rabbit_ct_helpers:set_config(Config, [{connection_type, direct}]), - init_per_multinode_group(cluster_size_2_direct, Config1, 2); - -init_per_group(cluster_rename, Config) -> - init_per_multinode_group(cluster_rename, Config, 2). + init_per_multinode_group(cluster_size_3_direct, Config1, 3); +init_per_group(tests, Config) -> + Config. -init_per_multinode_group(Group, Config, NodeCount) -> +init_per_multinode_group(_Group, Config, NodeCount) -> Suffix = rabbit_ct_helpers:testcase_absname(Config, "", "-"), Config1 = rabbit_ct_helpers:set_config(Config, [ {rmq_nodes_count, NodeCount}, {rmq_nodename_suffix, Suffix} ]), - case Group of - cluster_rename -> - % The broker is managed by {init,end}_per_testcase(). - Config1; - _ -> rabbit_ct_helpers:run_steps( Config1, rabbit_ct_broker_helpers:setup_steps() ++ - rabbit_ct_client_helpers:setup_steps()) - end. + rabbit_ct_client_helpers:setup_steps()). -end_per_group(cluster_rename, Config) -> +end_per_group(tests, Config) -> % The broker is managed by {init,end}_per_testcase(). Config; end_per_group(_Group, Config) -> @@ -670,7 +670,7 @@ cluster_node_restart_connection_and_channel_count(Config) -> end). cluster_node_list_on_node(Config) -> - [A, B] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename), + [A, B, _] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename), rabbit_ct_helpers:await_condition( fun () -> @@ -1459,6 +1459,33 @@ cluster_multiple_users_zero_limit(Config) -> set_user_connection_and_channel_limit(Config, Username1, -1, -1), set_user_connection_and_channel_limit(Config, Username2, -1, -1). +from_mnesia_to_khepri(Config) -> + Username = proplists:get_value(rmq_username, Config), + rabbit_ct_helpers:await_condition( + fun () -> + count_connections_of_user(Config, Username) =:= 0 andalso + count_channels_of_user(Config, Username) =:= 0 + end), + + [Conn] = open_connections(Config, [0]), + [_Chan] = open_channels(Conn, 1), + + rabbit_ct_helpers:await_condition( + fun () -> + count_connections_of_user(Config, Username) =:= 1 andalso + count_channels_of_user(Config, Username) =:= 1 + end), + case rabbit_ct_broker_helpers:enable_feature_flag(Config, khepri_db) of + ok -> + rabbit_ct_helpers:await_condition( + fun () -> + count_connections_of_user(Config, Username) =:= 1 andalso + count_channels_of_user(Config, Username) =:= 1 + end); + Skip -> + Skip + end. + %% ------------------------------------------------------------------- %% Helpers %% ------------------------------------------------------------------- diff --git a/deps/rabbit/test/per_user_connection_channel_limit_partitions_SUITE.erl b/deps/rabbit/test/per_user_connection_channel_limit_partitions_SUITE.erl index f542f0cc50d7..8c9ec8f4baed 100644 --- a/deps/rabbit/test/per_user_connection_channel_limit_partitions_SUITE.erl +++ b/deps/rabbit/test/per_user_connection_channel_limit_partitions_SUITE.erl @@ -2,12 +2,11 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2020-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(per_user_connection_channel_limit_partitions_SUITE). --include_lib("common_test/include/ct.hrl"). -include_lib("amqp_client/include/amqp_client.hrl"). -include_lib("eunit/include/eunit.hrl"). -include_lib("rabbitmq_ct_helpers/include/rabbit_assert.hrl"). @@ -124,8 +123,15 @@ cluster_full_partition_with_autoheal(Config) -> ?awaitMatch(All, list_running(Config, B), 60000, 3000), ?awaitMatch(All, list_running(Config, C), 60000, 3000), - %% during autoheal B's connections were dropped - ?awaitMatch({4, 10}, + %% During autoheal B's connections were dropped. Autoheal is not running + %% when Khepri is used. + KhepriEnabled = rabbit_ct_broker_helpers:is_feature_flag_enabled( + Config, khepri_db), + ExpectedCount = case KhepriEnabled of + true -> {6, 15}; + false -> {4, 10} + end, + ?awaitMatch(ExpectedCount, {count_connections_in(Config, Username), count_channels_in(Config, Username)}, 60000, 3000), diff --git a/deps/rabbit/test/per_user_connection_channel_tracking_SUITE.erl b/deps/rabbit/test/per_user_connection_channel_tracking_SUITE.erl index 4c4b1f7af7f8..d403edc1f1ab 100644 --- a/deps/rabbit/test/per_user_connection_channel_tracking_SUITE.erl +++ b/deps/rabbit/test/per_user_connection_channel_tracking_SUITE.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2020-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(per_user_connection_channel_tracking_SUITE). @@ -12,16 +12,14 @@ -include_lib("eunit/include/eunit.hrl"). -include_lib("rabbitmq_ct_helpers/include/rabbit_assert.hrl"). +-compile(nowarn_export_all). -compile(export_all). -define(A_TOUT, 20000). all() -> [ - {group, cluster_size_1_network}, - {group, cluster_size_2_network}, - {group, cluster_size_1_direct}, - {group, cluster_size_2_direct} + {group, tests} ]. groups() -> @@ -31,17 +29,19 @@ groups() -> single_node_vhost_down_mimic, single_node_vhost_deletion ], - ClusterSize2Tests = [ + ClusterSize3Tests = [ cluster_user_deletion, cluster_vhost_down_mimic, cluster_vhost_deletion, cluster_node_removed ], [ - {cluster_size_1_network, [], ClusterSize1Tests}, - {cluster_size_2_network, [], ClusterSize2Tests}, - {cluster_size_1_direct, [], ClusterSize1Tests}, - {cluster_size_2_direct, [], ClusterSize2Tests} + {tests, [], [ + {cluster_size_1_network, [], ClusterSize1Tests}, + {cluster_size_3_network, [], ClusterSize3Tests}, + {cluster_size_1_direct, [], ClusterSize1Tests}, + {cluster_size_3_direct, [], ClusterSize3Tests} + ]} ]. suite() -> @@ -64,15 +64,17 @@ end_per_suite(Config) -> init_per_group(cluster_size_1_network, Config) -> Config1 = rabbit_ct_helpers:set_config(Config, [{connection_type, network}]), init_per_multinode_group(cluster_size_1_network, Config1, 1); -init_per_group(cluster_size_2_network, Config) -> +init_per_group(cluster_size_3_network, Config) -> Config1 = rabbit_ct_helpers:set_config(Config, [{connection_type, network}]), - init_per_multinode_group(cluster_size_2_network, Config1, 2); + init_per_multinode_group(cluster_size_3_network, Config1, 3); init_per_group(cluster_size_1_direct, Config) -> Config1 = rabbit_ct_helpers:set_config(Config, [{connection_type, direct}]), init_per_multinode_group(cluster_size_1_direct, Config1, 1); -init_per_group(cluster_size_2_direct, Config) -> +init_per_group(cluster_size_3_direct, Config) -> Config1 = rabbit_ct_helpers:set_config(Config, [{connection_type, direct}]), - init_per_multinode_group(cluster_size_2_direct, Config1, 2). + init_per_multinode_group(cluster_size_3_direct, Config1, 3); +init_per_group(_Group, Config) -> + Config. init_per_multinode_group(_Group, Config, NodeCount) -> Suffix = rabbit_ct_helpers:testcase_absname(Config, "", "-"), @@ -84,6 +86,9 @@ init_per_multinode_group(_Group, Config, NodeCount) -> Config1, rabbit_ct_broker_helpers:setup_steps() ++ rabbit_ct_client_helpers:setup_steps()). +end_per_group(tests, Config) -> + % The broker is managed by {init,end}_per_testcase(). + Config; end_per_group(_Group, Config) -> rabbit_ct_helpers:run_steps(Config, rabbit_ct_client_helpers:teardown_steps() ++ diff --git a/deps/rabbit/test/per_user_connection_tracking_SUITE.erl b/deps/rabbit/test/per_user_connection_tracking_SUITE.erl index e62ce95b837b..2bded6b85ecc 100644 --- a/deps/rabbit/test/per_user_connection_tracking_SUITE.erl +++ b/deps/rabbit/test/per_user_connection_tracking_SUITE.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2011-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(per_user_connection_tracking_SUITE). @@ -12,16 +12,14 @@ -include_lib("amqp_client/include/amqp_client.hrl"). -include_lib("eunit/include/eunit.hrl"). +-compile(nowarn_export_all). -compile(export_all). -define(AWAIT_TIMEOUT, 30000). all() -> [ - {group, cluster_size_1_network}, - {group, cluster_size_2_network}, - {group, cluster_size_1_direct}, - {group, cluster_size_2_direct} + {group, tests} ]. groups() -> @@ -33,10 +31,12 @@ groups() -> cluster_user_deletion_forces_connection_closure ], [ - {cluster_size_1_network, [], ClusterSize1Tests}, - {cluster_size_2_network, [], ClusterSize2Tests}, - {cluster_size_1_direct, [], ClusterSize1Tests}, - {cluster_size_2_direct, [], ClusterSize2Tests} + {tests, [], [ + {cluster_size_1_network, [], ClusterSize1Tests}, + {cluster_size_2_network, [], ClusterSize2Tests}, + {cluster_size_1_direct, [], ClusterSize1Tests}, + {cluster_size_2_direct, [], ClusterSize2Tests} + ]} ]. suite() -> @@ -67,7 +67,9 @@ init_per_group(cluster_size_1_direct, Config) -> init_per_multinode_group(cluster_size_1_direct, Config1, 1); init_per_group(cluster_size_2_direct, Config) -> Config1 = rabbit_ct_helpers:set_config(Config, [{connection_type, direct}]), - init_per_multinode_group(cluster_size_2_direct, Config1, 2). + init_per_multinode_group(cluster_size_2_direct, Config1, 2); +init_per_group(tests, Config) -> + Config. init_per_multinode_group(_Group, Config, NodeCount) -> Suffix = rabbit_ct_helpers:testcase_absname(Config, "", "-"), @@ -79,6 +81,9 @@ init_per_multinode_group(_Group, Config, NodeCount) -> rabbit_ct_broker_helpers:setup_steps() ++ rabbit_ct_client_helpers:setup_steps()). +end_per_group(tests, Config) -> + % The broker is managed by {init,end}_per_testcase(). + Config; end_per_group(_Group, Config) -> rabbit_ct_helpers:run_steps(Config, rabbit_ct_client_helpers:teardown_steps() ++ diff --git a/deps/rabbit/test/per_vhost_connection_limit_SUITE.erl b/deps/rabbit/test/per_vhost_connection_limit_SUITE.erl index 89dd01fc0cf8..1e18f808ceef 100644 --- a/deps/rabbit/test/per_vhost_connection_limit_SUITE.erl +++ b/deps/rabbit/test/per_vhost_connection_limit_SUITE.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2011-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(per_vhost_connection_limit_SUITE). @@ -12,14 +12,13 @@ -include_lib("eunit/include/eunit.hrl"). -include_lib("rabbitmq_ct_helpers/include/rabbit_assert.hrl"). +-compile(nowarn_export_all). -compile(export_all). all() -> [ - {group, cluster_size_1_network}, - {group, cluster_size_2_network}, - {group, cluster_size_1_direct}, - {group, cluster_size_2_direct} + {group, tests}, + {group, khepri_migration} ]. groups() -> @@ -33,7 +32,9 @@ groups() -> single_node_multiple_vhosts_limit, single_node_multiple_vhosts_zero_limit ], - ClusterSize2Tests = [ + %% Use a cluster size of 3 so the khepri metadata store can keep + %% making progress even if one node is down/stopped + ClusterSize3Tests = [ most_basic_cluster_connection_count, cluster_single_vhost_connection_count, cluster_multiple_vhosts_connection_count, @@ -45,13 +46,13 @@ groups() -> cluster_multiple_vhosts_zero_limit ], [ - {cluster_size_1_network, [], ClusterSize1Tests}, - {cluster_size_2_network, [], ClusterSize2Tests}, - {cluster_size_1_direct, [], ClusterSize1Tests}, - {cluster_size_2_direct, [], ClusterSize2Tests}, - {cluster_rename, [], [ - vhost_limit_after_node_renamed - ]} + {tests, [], [ + {cluster_size_1_network, [], ClusterSize1Tests}, + {cluster_size_3_network, [], ClusterSize3Tests}, + {cluster_size_1_direct, [], ClusterSize1Tests}, + {cluster_size_3_direct, [], ClusterSize3Tests} + ]}, + {khepri_migration, [], [from_mnesia_to_khepri]} ]. suite() -> @@ -61,7 +62,6 @@ suite() -> ]. %% see partitions_SUITE --define(DELAY, 9000). -define(AWAIT, 1000). -define(INTERVAL, 250). @@ -76,39 +76,37 @@ init_per_suite(Config) -> end_per_suite(Config) -> rabbit_ct_helpers:run_teardown_steps(Config). +init_per_group(khepri_migration, Config) -> + Config1 = rabbit_ct_helpers:set_config(Config, [{connection_type, network}, + {metadata_store, mnesia}]), + init_per_multinode_group(cluster_size_1_network, Config1, 1); init_per_group(cluster_size_1_network, Config) -> Config1 = rabbit_ct_helpers:set_config(Config, [{connection_type, network}]), init_per_multinode_group(cluster_size_1_network, Config1, 1); -init_per_group(cluster_size_2_network, Config) -> +init_per_group(cluster_size_3_network, Config) -> Config1 = rabbit_ct_helpers:set_config(Config, [{connection_type, network}]), - init_per_multinode_group(cluster_size_2_network, Config1, 2); + init_per_multinode_group(cluster_size_3_network, Config1, 3); init_per_group(cluster_size_1_direct, Config) -> Config1 = rabbit_ct_helpers:set_config(Config, [{connection_type, direct}]), init_per_multinode_group(cluster_size_1_direct, Config1, 1); -init_per_group(cluster_size_2_direct, Config) -> +init_per_group(cluster_size_3_direct, Config) -> Config1 = rabbit_ct_helpers:set_config(Config, [{connection_type, direct}]), - init_per_multinode_group(cluster_size_2_direct, Config1, 2); - -init_per_group(cluster_rename, Config) -> - init_per_multinode_group(cluster_rename, Config, 2). + init_per_multinode_group(cluster_size_3_direct, Config1, 3); +init_per_group(tests, Config) -> + Config. -init_per_multinode_group(Group, Config, NodeCount) -> +init_per_multinode_group(_Group, Config, NodeCount) -> Suffix = rabbit_ct_helpers:testcase_absname(Config, "", "-"), Config1 = rabbit_ct_helpers:set_config(Config, [ {rmq_nodes_count, NodeCount}, {rmq_nodename_suffix, Suffix} ]), - case Group of - cluster_rename -> - % The broker is managed by {init,end}_per_testcase(). - Config1; - _ -> - rabbit_ct_helpers:run_steps(Config1, - rabbit_ct_broker_helpers:setup_steps() ++ - rabbit_ct_client_helpers:setup_steps()) - end. + rabbit_ct_helpers:run_steps(Config1, + rabbit_ct_broker_helpers:setup_steps() ++ + rabbit_ct_client_helpers:setup_steps()). -end_per_group(cluster_rename, Config) -> +end_per_group(Group, Config) when Group == tests; + Group == khepri_migration -> % The broker is managed by {init,end}_per_testcase(). Config; end_per_group(_Group, Config) -> @@ -116,21 +114,10 @@ end_per_group(_Group, Config) -> rabbit_ct_client_helpers:teardown_steps() ++ rabbit_ct_broker_helpers:teardown_steps()). -init_per_testcase(vhost_limit_after_node_renamed = Testcase, Config) -> - rabbit_ct_helpers:testcase_started(Config, Testcase), - rabbit_ct_helpers:run_steps(Config, - rabbit_ct_broker_helpers:setup_steps() ++ - rabbit_ct_client_helpers:setup_steps()); init_per_testcase(Testcase, Config) -> rabbit_ct_helpers:testcase_started(Config, Testcase), Config. -end_per_testcase(vhost_limit_after_node_renamed = Testcase, Config) -> - Config1 = ?config(save_config, Config), - rabbit_ct_helpers:run_steps(Config1, - rabbit_ct_client_helpers:teardown_steps() ++ - rabbit_ct_broker_helpers:teardown_steps()), - rabbit_ct_helpers:testcase_finished(Config1, Testcase); end_per_testcase(Testcase, Config) -> rabbit_ct_helpers:testcase_finished(Config, Testcase). @@ -376,7 +363,7 @@ cluster_node_restart_connection_count(Config) -> ?awaitMatch(0, count_connections_in(Config, VHost), ?AWAIT, ?INTERVAL). cluster_node_list_on_node(Config) -> - [A, B] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename), + [A, B, _C] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename), ?assertEqual(0, length(all_connections(Config))), ?assertEqual(0, length(connections_on_node(Config, 0))), @@ -654,45 +641,18 @@ cluster_multiple_vhosts_zero_limit(Config) -> set_vhost_connection_limit(Config, VHost1, -1), set_vhost_connection_limit(Config, VHost2, -1). -vhost_limit_after_node_renamed(Config) -> - A = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename), - - %% Make sure the maintenance mode states Mnesia table is replicated - %% everywhere. We do this here, just in case mixed-version testing is - %% against a version of RabbitMQ that doesn't have the fix yet. - %% - %% See https://github.com/rabbitmq/rabbitmq-server/pull/9005. - B = rabbit_ct_broker_helpers:get_node_config(Config, 1, nodename), - rabbit_ct_broker_helpers:rpc( - Config, B, - rabbit_table, ensure_table_copy, - [rabbit_node_maintenance_states, B, ram_copies]), - - VHost = <<"/renaming_node">>, - set_up_vhost(Config, VHost), - set_vhost_connection_limit(Config, VHost, 2), - +from_mnesia_to_khepri(Config) -> + VHost = <<"/">>, ?assertEqual(0, count_connections_in(Config, VHost)), + [_Conn] = open_connections(Config, [{0, VHost}]), + ?awaitMatch(1, count_connections_in(Config, VHost), ?AWAIT, ?INTERVAL), + case rabbit_ct_broker_helpers:enable_feature_flag(Config, khepri_db) of + ok -> + ?awaitMatch(1, count_connections_in(Config, VHost), ?AWAIT, ?INTERVAL); + Skip -> + Skip + end. - [Conn1, Conn2, {error, not_allowed}] = open_connections(Config, - [{0, VHost}, {1, VHost}, {0, VHost}]), - ?awaitMatch(2, count_connections_in(Config, VHost), ?AWAIT, ?INTERVAL), - close_connections([Conn1, Conn2]), - - Config1 = cluster_rename_SUITE:stop_rename_start(Config, A, [A, 'new-A']), - - ?awaitMatch(0, count_connections_in(Config1, VHost), ?AWAIT, ?INTERVAL), - - [Conn3, Conn4, {error, not_allowed}] = open_connections(Config1, - [{0, VHost}, {1, VHost}, {0, VHost}]), - ?awaitMatch(2, count_connections_in(Config1, VHost), ?AWAIT, ?INTERVAL), - close_connections([Conn3, Conn4]), - - set_vhost_connection_limit(Config1, VHost, -1), - {save_config, Config1}. - -%% ------------------------------------------------------------------- -%% Helpers %% ------------------------------------------------------------------- open_connections(Config, NodesAndVHosts) -> diff --git a/deps/rabbit/test/per_vhost_connection_limit_partitions_SUITE.erl b/deps/rabbit/test/per_vhost_connection_limit_partitions_SUITE.erl index 8d9e66b34792..9bcdedd0466a 100644 --- a/deps/rabbit/test/per_vhost_connection_limit_partitions_SUITE.erl +++ b/deps/rabbit/test/per_vhost_connection_limit_partitions_SUITE.erl @@ -2,16 +2,15 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2011-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(per_vhost_connection_limit_partitions_SUITE). --include_lib("common_test/include/ct.hrl"). --include_lib("amqp_client/include/amqp_client.hrl"). -include_lib("eunit/include/eunit.hrl"). -include_lib("rabbitmq_ct_helpers/include/rabbit_assert.hrl"). +-compile(nowarn_export_all). -compile(export_all). -import(rabbit_ct_client_helpers, [open_unmanaged_connection/2, @@ -113,8 +112,15 @@ cluster_full_partition_with_autoheal(Config) -> rabbit_ct_broker_helpers:allow_traffic_between(B, C), timer:sleep(?DELAY), - %% during autoheal B's connections were dropped - ?awaitMatch(Connections when length(Connections) == 4, + %% During autoheal B's connections were dropped. Autoheal is not running + %% when Khepri is used. + KhepriEnabled = rabbit_ct_broker_helpers:is_feature_flag_enabled( + Config, khepri_db), + ExpectedCount = case KhepriEnabled of + true -> 6; + false -> 4 + end, + ?awaitMatch(Connections when length(Connections) == ExpectedCount, connections_in(Config, VHost), 60000, 3000), diff --git a/deps/rabbit/test/per_vhost_msg_store_SUITE.erl b/deps/rabbit/test/per_vhost_msg_store_SUITE.erl index 44f00bf4cc39..4d035be27995 100644 --- a/deps/rabbit/test/per_vhost_msg_store_SUITE.erl +++ b/deps/rabbit/test/per_vhost_msg_store_SUITE.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(per_vhost_msg_store_SUITE). @@ -11,37 +11,51 @@ -include_lib("amqp_client/include/amqp_client.hrl"). -include_lib("rabbitmq_ct_helpers/include/rabbit_assert.hrl"). +-compile(nowarn_export_all). -compile(export_all). -define(MSGS_COUNT, 100). all() -> [ - publish_to_different_dirs, - storage_deleted_on_vhost_delete, - single_vhost_storage_delete_is_safe + {group, tests} ]. +groups() -> + [ + {tests, [], all_tests()} + ]. +all_tests() -> + [publish_to_different_dirs, + storage_deleted_on_vhost_delete, + single_vhost_storage_delete_is_safe]. init_per_suite(Config) -> rabbit_ct_helpers:log_environment(), + rabbit_ct_helpers:run_setup_steps(Config, []). + +end_per_suite(Config) -> + rabbit_ct_helpers:run_teardown_steps(Config). + +init_per_group(tests, Config) -> + init_per_group_common(Config). + +init_per_group_common(Config) -> Config1 = rabbit_ct_helpers:set_config( Config, [{rmq_nodename_suffix, ?MODULE}]), Config2 = rabbit_ct_helpers:merge_app_env( Config1, {rabbit, [{queue_index_embed_msgs_below, 100}]}), - rabbit_ct_helpers:run_setup_steps( - Config2, - rabbit_ct_broker_helpers:setup_steps() ++ - rabbit_ct_client_helpers:setup_steps()). - -end_per_suite(Config) -> - rabbit_ct_helpers:run_teardown_steps( - Config, - rabbit_ct_client_helpers:teardown_steps() ++ - rabbit_ct_broker_helpers:teardown_steps()). + rabbit_ct_helpers:run_steps(Config2, + rabbit_ct_broker_helpers:setup_steps() ++ + rabbit_ct_client_helpers:setup_steps()). + +end_per_group(_, Config) -> + rabbit_ct_helpers:run_steps(Config, + rabbit_ct_client_helpers:teardown_steps() ++ + rabbit_ct_broker_helpers:teardown_steps()). init_per_testcase(_, Config) -> Vhost1 = <<"vhost1">>, diff --git a/deps/rabbit/test/per_vhost_queue_limit_SUITE.erl b/deps/rabbit/test/per_vhost_queue_limit_SUITE.erl index 13c51a220bda..171995a94ca9 100644 --- a/deps/rabbit/test/per_vhost_queue_limit_SUITE.erl +++ b/deps/rabbit/test/per_vhost_queue_limit_SUITE.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2011-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(per_vhost_queue_limit_SUITE). @@ -12,6 +12,7 @@ -include_lib("amqp_client/include/amqp_client.hrl"). -include_lib("eunit/include/eunit.hrl"). +-compile(nowarn_export_all). -compile(export_all). -import(rabbit_ct_client_helpers, [open_unmanaged_connection/3, @@ -21,32 +22,39 @@ all() -> [ - {group, cluster_size_1} - , {group, cluster_size_2} + {group, tests} ]. groups() -> [ - {cluster_size_1, [], [ - most_basic_single_node_queue_count, - single_node_single_vhost_queue_count, - single_node_multiple_vhosts_queue_count, - single_node_single_vhost_limit, - single_node_single_vhost_zero_limit, - single_node_single_vhost_limit_with_durable_named_queue, - single_node_single_vhost_zero_limit_with_durable_named_queue, - single_node_single_vhost_limit_with_queue_ttl, - single_node_single_vhost_limit_with_redeclaration - ]}, - {cluster_size_2, [], [ - most_basic_cluster_queue_count, - cluster_multiple_vhosts_queue_count, - cluster_multiple_vhosts_limit, - cluster_multiple_vhosts_zero_limit, - cluster_multiple_vhosts_limit_with_durable_named_queue, - cluster_multiple_vhosts_zero_limit_with_durable_named_queue, - cluster_node_restart_queue_count - ]} + {tests, [], [ + {cluster_size_1, [], cluster_size_1_tests()}, + {cluster_size_2, [], cluster_size_2_tests()} + ]} + ]. + +cluster_size_1_tests() -> + [ + most_basic_single_node_queue_count, + single_node_single_vhost_queue_count, + single_node_multiple_vhosts_queue_count, + single_node_single_vhost_limit, + single_node_single_vhost_zero_limit, + single_node_single_vhost_limit_with_durable_named_queue, + single_node_single_vhost_zero_limit_with_durable_named_queue, + single_node_single_vhost_limit_with_queue_ttl, + single_node_single_vhost_limit_with_redeclaration + ]. + +cluster_size_2_tests() -> + [ + most_basic_cluster_queue_count, + cluster_multiple_vhosts_queue_count, + cluster_multiple_vhosts_limit, + cluster_multiple_vhosts_zero_limit, + cluster_multiple_vhosts_limit_with_durable_named_queue, + cluster_multiple_vhosts_zero_limit_with_durable_named_queue, + cluster_node_restart_queue_count ]. suite() -> @@ -66,31 +74,24 @@ init_per_suite(Config) -> end_per_suite(Config) -> rabbit_ct_helpers:run_teardown_steps(Config). -init_per_group(cluster_size_1, Config) -> - init_per_multinode_group(cluster_size_1, Config, 1); -init_per_group(cluster_size_2, Config) -> - init_per_multinode_group(cluster_size_2, Config, 2); -init_per_group(cluster_rename, Config) -> - init_per_multinode_group(cluster_rename, Config, 2). +init_per_group(tests, Config) -> + Config; +init_per_group(cluster_size_1 = Group, Config) -> + init_per_multinode_group(Group, Config, 1); +init_per_group(cluster_size_2 = Group, Config) -> + init_per_multinode_group(Group, Config, 2). -init_per_multinode_group(Group, Config, NodeCount) -> +init_per_multinode_group(_Group, Config, NodeCount) -> Suffix = rabbit_ct_helpers:testcase_absname(Config, "", "-"), Config1 = rabbit_ct_helpers:set_config(Config, [ {rmq_nodes_count, NodeCount}, {rmq_nodename_suffix, Suffix} ]), - case Group of - cluster_rename -> - % The broker is managed by {init,end}_per_testcase(). - Config1; - _ -> - rabbit_ct_helpers:run_steps(Config1, - rabbit_ct_broker_helpers:setup_steps() ++ - rabbit_ct_client_helpers:setup_steps()) - end. + rabbit_ct_helpers:run_steps(Config1, + rabbit_ct_broker_helpers:setup_steps() ++ + rabbit_ct_client_helpers:setup_steps()). -end_per_group(cluster_rename, Config) -> - % The broker is managed by {init,end}_per_testcase(). +end_per_group(tests, Config) -> Config; end_per_group(_Group, Config) -> rabbit_ct_helpers:run_steps(Config, diff --git a/deps/rabbit/test/policy_SUITE.erl b/deps/rabbit/test/policy_SUITE.erl index 822b925c58cd..c95175b377a1 100644 --- a/deps/rabbit/test/policy_SUITE.erl +++ b/deps/rabbit/test/policy_SUITE.erl @@ -2,32 +2,52 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2011-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(policy_SUITE). -include_lib("common_test/include/ct.hrl"). +-include_lib("eunit/include/eunit.hrl"). -include_lib("amqp_client/include/amqp_client.hrl"). -include_lib("stdlib/include/assert.hrl"). +-include_lib("rabbitmq_ct_helpers/include/rabbit_assert.hrl"). +-compile(nowarn_export_all). -compile(export_all). all() -> [ - {group, cluster_size_2} + {group, tests}, + {group, khepri_migration} ]. groups() -> [ - {cluster_size_2, [], [ - target_count_policy, - policy_ttl, - operator_policy_ttl, - operator_retroactive_policy_ttl, - operator_retroactive_policy_publish_ttl, - queue_type_specific_policies - ]} + {tests, [], all_tests()}, + {khepri_migration, [], [ + from_mnesia_to_khepri + ]} + ]. + +all_tests() -> + [ + policy_ttl, + operator_policy_ttl, + operator_retroactive_policy_ttl, + operator_retroactive_policy_publish_ttl, + queue_type_specific_policies, + classic_queue_version_policies, + overflow_policies, + is_supported_operator_policy_expires, + is_supported_operator_policy_message_ttl, + is_supported_operator_policy_max_length, + is_supported_operator_policy_max_length, + is_supported_operator_policy_max_in_memory_length, + is_supported_operator_policy_max_in_memory_bytes, + is_supported_operator_policy_delivery_limit, + is_supported_operator_policy_target_group_size, + is_supported_operator_policy_overflow ]. %% ------------------------------------------------------------------- @@ -41,29 +61,42 @@ init_per_suite(Config) -> end_per_suite(Config) -> rabbit_ct_helpers:run_teardown_steps(Config). -init_per_group(cluster_size_2, Config) -> - Suffix = rabbit_ct_helpers:testcase_absname(Config, "", "-"), - Config1 = rabbit_ct_helpers:set_config(Config, [ - {rmq_nodes_count, 2}, - {rmq_nodename_suffix, Suffix} - ]), - rabbit_ct_helpers:run_steps(Config1, - rabbit_ct_broker_helpers:setup_steps() ++ - rabbit_ct_client_helpers:setup_steps()). - -end_per_group(_Group, Config) -> +init_per_group(tests = Group, Config) -> + init_per_group_common(Group, Config, 2); +init_per_group(khepri_migration = Group, Config0) -> + Config = rabbit_ct_helpers:set_config(Config0, [{metadata_store, mnesia}]), + init_per_group_common(Group, Config, 1). + +init_per_group_common(Group, Config, Size) -> + Config1 = rabbit_ct_helpers:set_config(Config, + [{rmq_nodes_count, Size}, + {rmq_nodename_suffix, Group}, + {tcp_ports_base}]), + rabbit_ct_helpers:run_steps(Config1, rabbit_ct_broker_helpers:setup_steps()). + +end_per_group(_, Config) -> rabbit_ct_helpers:run_steps(Config, - rabbit_ct_client_helpers:teardown_steps() ++ - rabbit_ct_broker_helpers:teardown_steps()). + rabbit_ct_broker_helpers:teardown_steps()). init_per_testcase(Testcase, Config) -> - rabbit_ct_client_helpers:setup_steps(), - rabbit_ct_helpers:testcase_started(Config, Testcase). + Config1 = rabbit_ct_helpers:testcase_started(Config, Testcase), + Name = rabbit_data_coercion:to_binary(Testcase), + Group = proplists:get_value(name, ?config(tc_group_properties, Config)), + Policy = rabbit_data_coercion:to_binary(io_lib:format("~p_~p_policy", [Group, Testcase])), + OpPolicy = rabbit_data_coercion:to_binary(io_lib:format("~p_~p_op_policy", [Group, Testcase])), + Config2 = rabbit_ct_helpers:set_config(Config1, + [{queue_name, Name}, + {policy, Policy}, + {op_policy, OpPolicy} + ]), + rabbit_ct_helpers:run_steps(Config2, rabbit_ct_client_helpers:setup_steps()). end_per_testcase(Testcase, Config) -> - rabbit_ct_client_helpers:teardown_steps(), - rabbit_ct_helpers:testcase_finished(Config, Testcase). - + rabbit_ct_broker_helpers:rpc(Config, 0, ?MODULE, delete_queues, []), + _ = rabbit_ct_broker_helpers:clear_policy(Config, 0, ?config(policy, Config)), + _ = rabbit_ct_broker_helpers:clear_operator_policy(Config, 0, ?config(op_policy, Config)), + Config1 = rabbit_ct_helpers:run_steps(Config, rabbit_ct_client_helpers:teardown_steps()), + rabbit_ct_helpers:testcase_finished(Config1, Testcase). %% ------------------------------------------------------------------- %% Test cases. %% ------------------------------------------------------------------- @@ -152,63 +185,6 @@ operator_retroactive_policy_publish_ttl(Config) -> rabbit_ct_client_helpers:close_connection(Conn), passed. -target_count_policy(Config) -> - [Server | _] = Nodes = rabbit_ct_broker_helpers:get_node_configs(Config, nodename), - {Conn, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, 0), - QName = <<"policy_ha">>, - declare(Ch, QName), - BNodes = [atom_to_binary(N) || N <- Nodes], - - AllPolicy = [{<<"ha-mode">>, <<"all">>}], - ExactlyPolicyOne = [{<<"ha-mode">>, <<"exactly">>}, - {<<"ha-params">>, 1}], - ExactlyPolicyTwo = [{<<"ha-mode">>, <<"exactly">>}, - {<<"ha-params">>, 2}], - NodesPolicyAll = [{<<"ha-mode">>, <<"nodes">>}, - {<<"ha-params">>, BNodes}], - NodesPolicyOne = [{<<"ha-mode">>, <<"nodes">>}, - {<<"ha-params">>, [hd(BNodes)]}], - SyncModePolicyAuto = [{<<"ha-mode">>, <<"all">>}, {<<"ha-sync-mode">>, <<"automatic">>}], - SyncModePolicyMan = [{<<"ha-mode">>, <<"all">>}, {<<"ha-sync-mode">>, <<"manual">>}], - - %% ALL has precedence - Opts = #{config => Config, - server => Server, - qname => QName}, - verify_policies(AllPolicy, ExactlyPolicyTwo, [{<<"ha-mode">>, <<"all">>}], Opts), - - verify_policies(ExactlyPolicyTwo, AllPolicy, [{<<"ha-mode">>, <<"all">>}], Opts), - - verify_policies(AllPolicy, NodesPolicyAll, [{<<"ha-mode">>, <<"all">>}], Opts), - - verify_policies(NodesPolicyAll, AllPolicy, [{<<"ha-mode">>, <<"all">>}], Opts), - - %% %% Sync mode OperPolicy has precedence - verify_policies(SyncModePolicyMan, SyncModePolicyAuto, [{<<"ha-sync-mode">>, <<"automatic">>}], Opts), - verify_policies(SyncModePolicyAuto, SyncModePolicyMan, [{<<"ha-sync-mode">>, <<"manual">>}], Opts), - - %% exactly has precedence over nodes - verify_policies(ExactlyPolicyTwo, NodesPolicyAll,[{<<"ha-mode">>, <<"exactly">>}, {<<"ha-params">>, 2}], Opts), - - verify_policies(NodesPolicyAll, ExactlyPolicyTwo, [{<<"ha-mode">>, <<"exactly">>}, {<<"ha-params">>, 2}], Opts), - - %% Highest exactly value has precedence - verify_policies(ExactlyPolicyTwo, ExactlyPolicyOne, [{<<"ha-mode">>, <<"exactly">>}, {<<"ha-params">>, 2}], Opts), - - verify_policies(ExactlyPolicyOne, ExactlyPolicyTwo, [{<<"ha-mode">>, <<"exactly">>}, {<<"ha-params">>, 2}], Opts), - - %% Longest node count has precedence - SortedNodes = lists:sort(BNodes), - verify_policies(NodesPolicyAll, NodesPolicyOne, [{<<"ha-mode">>, <<"nodes">>}, {<<"ha-params">>, SortedNodes}], Opts), - verify_policies(NodesPolicyOne, NodesPolicyAll, [{<<"ha-mode">>, <<"nodes">>}, {<<"ha-params">>, SortedNodes}], Opts), - - delete(Ch, QName), - rabbit_ct_broker_helpers:clear_policy(Config, 0, <<"policy">>), - rabbit_ct_broker_helpers:clear_operator_policy(Config, 0, <<"op_policy">>), - rabbit_ct_client_helpers:close_channel(Ch), - rabbit_ct_client_helpers:close_connection(Conn), - passed. - queue_type_specific_policies(Config) -> [Server | _] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename), {Conn, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, 0), @@ -246,18 +222,186 @@ queue_type_specific_policies(Config) -> rabbit_ct_client_helpers:close_connection(Conn), passed. +classic_queue_version_policies(Config) -> + [Server | _] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename), + {Conn, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, 0), + QName = <<"policy_queue_version">>, + declare(Ch, QName), + QueueVersionOnePolicy = [{<<"queue-version">>, 1}], + QueueVersionTwoPolicy = [{<<"queue-version">>, 2}], + + Opts = #{config => Config, + server => Server, + qname => QName}, + + %% Queue version OperPolicy has precedence always + verify_policies(QueueVersionOnePolicy, QueueVersionTwoPolicy, QueueVersionTwoPolicy, Opts), + verify_policies(QueueVersionTwoPolicy, QueueVersionOnePolicy, QueueVersionOnePolicy, Opts), + + delete(Ch, QName), + rabbit_ct_broker_helpers:clear_policy(Config, 0, <<"policy">>), + rabbit_ct_broker_helpers:clear_operator_policy(Config, 0, <<"op_policy">>), + rabbit_ct_client_helpers:close_channel(Ch), + rabbit_ct_client_helpers:close_connection(Conn), + passed. + +overflow_policies(Config) -> + [Server | _] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename), + {Conn, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, 0), + QName = <<"policy_overflow">>, + declare(Ch, QName), + DropHead = [{<<"overflow">>, <<"drop-head">>}], + RejectPub = [{<<"overflow">>, <<"reject-publish">>}], + + Opts = #{config => Config, + server => Server, + qname => QName}, + + %% OperPolicy has precedence always + verify_policies(DropHead, RejectPub, RejectPub, Opts), + + delete(Ch, QName), + rabbit_ct_broker_helpers:clear_policy(Config, 0, <<"policy">>), + rabbit_ct_broker_helpers:clear_operator_policy(Config, 0, <<"op_policy">>), + rabbit_ct_client_helpers:close_channel(Ch), + rabbit_ct_client_helpers:close_connection(Conn), + passed. + + +%% See supported policies in https://www.rabbitmq.com/parameters.html#operator-policies +%% This test applies all supported operator policies to all queue types, +%% and later verifies the effective policy definitions. +%% Just those supported by each queue type should be present. + +is_supported_operator_policy_expires(Config) -> + Value = 6000000, + effective_operator_policy_per_queue_type( + Config, <<"expires">>, Value, Value, Value, undefined). + +is_supported_operator_policy_message_ttl(Config) -> + Value = 1000, + effective_operator_policy_per_queue_type( + Config, <<"message-ttl">>, Value, Value, Value, undefined). + +is_supported_operator_policy_max_length(Config) -> + Value = 500, + effective_operator_policy_per_queue_type( + Config, <<"max-length">>, Value, Value, Value, undefined). + +is_supported_operator_policy_max_length_bytes(Config) -> + Value = 1500, + effective_operator_policy_per_queue_type( + Config, <<"max-length-bytes">>, Value, Value, Value, Value). + +is_supported_operator_policy_max_in_memory_length(Config) -> + Value = 30, + effective_operator_policy_per_queue_type( + Config, <<"max-in-memory-length">>, Value, undefined, Value, undefined). + +is_supported_operator_policy_max_in_memory_bytes(Config) -> + Value = 50000, + effective_operator_policy_per_queue_type( + Config, <<"max-in-memory-bytes">>, Value, undefined, Value, undefined). + +is_supported_operator_policy_delivery_limit(Config) -> + Value = 3, + effective_operator_policy_per_queue_type( + Config, <<"delivery-limit">>, Value, undefined, Value, undefined). + +is_supported_operator_policy_target_group_size(Config) -> + Value = 5, + effective_operator_policy_per_queue_type( + Config, <<"target-group-size">>, Value, undefined, Value, undefined). + +is_supported_operator_policy_overflow(Config) -> + Value = <<"drop-head">>, + effective_operator_policy_per_queue_type( + Config, <<"overflow">>, Value, Value, Value, undefined). + +effective_operator_policy_per_queue_type(Config, Name, Value, ClassicValue, QuorumValue, StreamValue) -> + [Server | _] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename), + {Conn, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, 0), + ClassicQ = <<"classic_queue">>, + QuorumQ = <<"quorum_queue">>, + StreamQ = <<"stream_queue">>, + + declare(Ch, ClassicQ, [{<<"x-queue-type">>, longstr, <<"classic">>}]), + declare(Ch, QuorumQ, [{<<"x-queue-type">>, longstr, <<"quorum">>}]), + declare(Ch, StreamQ, [{<<"x-queue-type">>, longstr, <<"stream">>}]), + + rabbit_ct_broker_helpers:set_operator_policy( + Config, 0, <<"operator-policy">>, <<".*">>, <<"all">>, + [{Name, Value}]), + + ?awaitMatch(ClassicValue, check_policy_value(Server, ClassicQ, Name), 30_000), + ?awaitMatch(QuorumValue, check_policy_value(Server, QuorumQ, Name), 30_000), + ?awaitMatch(StreamValue, check_policy_value(Server, StreamQ, Name), 30_000), + + rabbit_ct_broker_helpers:clear_operator_policy(Config, 0, <<"operator-policy">>), + + delete(Ch, ClassicQ), + delete(Ch, QuorumQ), + delete(Ch, StreamQ), + + rabbit_ct_client_helpers:close_channel(Ch), + rabbit_ct_client_helpers:close_connection(Conn), + passed. %%---------------------------------------------------------------------------- +from_mnesia_to_khepri(Config) -> + Server = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename), + + Ch = rabbit_ct_client_helpers:open_channel(Config, Server), + Q = ?config(queue_name, Config), + ?assertEqual({'queue.declare_ok', Q, 0, 0}, declare(Ch, Q)), + + Policy = ?config(policy, Config), + ok = rabbit_ct_broker_helpers:set_policy(Config, 0, Policy, Q, + <<"queues">>, + [{<<"dead-letter-exchange">>, <<>>}, + {<<"dead-letter-routing-key">>, Q}]), + OpPolicy = ?config(op_policy, Config), + ok = rabbit_ct_broker_helpers:set_operator_policy(Config, 0, OpPolicy, Q, + <<"queues">>, + [{<<"max-length">>, 10000}]), + + Policies0 = lists:sort(rabbit_ct_broker_helpers:rpc(Config, 0, rabbit_policy, list, [])), + Names0 = lists:sort([proplists:get_value(name, Props) || Props <- Policies0]), + + ?assertEqual([Policy], Names0), + + OpPolicies0 = lists:sort(rabbit_ct_broker_helpers:rpc(Config, 0, rabbit_policy, list_op, [])), + OpNames0 = lists:sort([proplists:get_value(name, Props) || Props <- OpPolicies0]), + + ?assertEqual([OpPolicy], OpNames0), + + case rabbit_ct_broker_helpers:enable_feature_flag(Config, khepri_db) of + ok -> + rabbit_ct_helpers:await_condition( + fun() -> + (Policies0 == + lists:sort(rabbit_ct_broker_helpers:rpc(Config, 0, rabbit_policy, list, []))) + andalso + (OpPolicies0 == + lists:sort(rabbit_ct_broker_helpers:rpc(Config, 0, rabbit_policy, list_op, []))) + end); + Skip -> + Skip + end. +%%---------------------------------------------------------------------------- +delete_queues() -> + [{ok, _} = rabbit_amqqueue:delete(Q, false, false, <<"dummy">>) + || Q <- rabbit_amqqueue:list()]. declare(Ch, Q) -> amqp_channel:call(Ch, #'queue.declare'{queue = Q, - durable = true}). + durable = true}). declare(Ch, Q, Args) -> amqp_channel:call(Ch, #'queue.declare'{queue = Q, - durable = true, - arguments = Args}). + durable = true, + arguments = Args}). delete(Ch, Q) -> amqp_channel:call(Ch, #'queue.delete'{queue = Q}). @@ -305,17 +449,22 @@ get_messages(Number, Ch, Q) -> end. check_policy_value(Server, QName, Value) -> + ct:pal("QUEUES ~p", + [rpc:call(Server, rabbit_amqqueue, list, [])]), {ok, Q} = rpc:call(Server, rabbit_amqqueue, lookup, [rabbit_misc:r(<<"/">>, queue, QName)]), - proplists:get_value(Value, rpc:call(Server, rabbit_policy, effective_definition, [Q])). + case rpc:call(Server, rabbit_policy, effective_definition, [Q]) of + List when is_list(List) -> proplists:get_value(Value, List); + Any -> Any + end. verify_policies(Policy, OperPolicy, VerifyFuns, #{config := Config, server := Server, qname := QName}) -> rabbit_ct_broker_helpers:set_policy(Config, 0, <<"policy">>, - <<"policy_ha">>, <<"queues">>, + QName, <<"queues">>, Policy), rabbit_ct_broker_helpers:set_operator_policy(Config, 0, <<"op_policy">>, - <<"policy_ha">>, <<"queues">>, + QName, <<"queues">>, OperPolicy), verify_policy(VerifyFuns, Server, QName). diff --git a/deps/rabbit/test/priority_queue_SUITE.erl b/deps/rabbit/test/priority_queue_SUITE.erl index 64fb7b5246ef..0b5b331f9a71 100644 --- a/deps/rabbit/test/priority_queue_SUITE.erl +++ b/deps/rabbit/test/priority_queue_SUITE.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(priority_queue_SUITE). @@ -29,6 +29,7 @@ groups() -> dropwhile_fetchwhile, info_head_message_timestamp, info_backing_queue_version, + info_oldest_message_received_timestamp, unknown_info_key, matching, purge, @@ -39,7 +40,8 @@ groups() -> invoke, gen_server2_stats, negative_max_priorities, - max_priorities_above_hard_limit + max_priorities_above_hard_limit, + update_rates ]} ]. @@ -56,16 +58,13 @@ end_per_suite(Config) -> init_per_group(single_node, Config) -> Suffix = rabbit_ct_helpers:testcase_absname(Config, "", "-"), - Config1 = rabbit_ct_helpers:set_config(Config, [ - {rmq_nodes_count, 1}, - {rmq_nodename_suffix, Suffix} - ]), - Config2 = rabbit_ct_helpers:run_steps( - Config1, - rabbit_ct_broker_helpers:setup_steps() ++ - rabbit_ct_client_helpers:setup_steps()), - _ = rabbit_ct_broker_helpers:enable_feature_flag(Config2, message_containers), - Config2; + Config1 = rabbit_ct_helpers:set_config( + Config, [{rmq_nodes_count, 1}, + {rmq_nodename_suffix, Suffix}]), + rabbit_ct_helpers:run_steps( + Config1, + rabbit_ct_broker_helpers:setup_steps() ++ + rabbit_ct_client_helpers:setup_steps()); init_per_group(overflow_reject_publish, Config) -> rabbit_ct_helpers:set_config(Config, [ {overflow, <<"reject-publish">>} @@ -122,7 +121,7 @@ end_per_testcase(Testcase, Config) -> %% * len/1, is_empty/1 - info items %% * handle_pre_hibernate/1 - hibernation %% -%% * set_ram_duration_target/2, ram_duration/1, status/1 +%% * status/1 %% - maybe need unit testing? %% %% [0] publish enough to get credit flow from msg store @@ -368,16 +367,14 @@ info_head_message_timestamp1(_Config) -> Content1 = #content{properties = #'P_basic'{priority = 1, timestamp = 1000}, payload_fragments_rev = []}, - Msg1 = mc_amqpl:message(ExName, <<>>, Content1, #{id => <<"msg1">>}), - BQS2 = PQ:publish(Msg1, #message_properties{size = 0}, false, self(), - noflow, BQS1), + {ok, Msg1} = mc_amqpl:message(ExName, <<>>, Content1, #{id => <<"msg1">>}), + BQS2 = PQ:publish(Msg1, #message_properties{size = 0}, false, self(), BQS1), 1000 = PQ:info(head_message_timestamp, BQS2), %% Publish a higher priority message with no timestamp. Content2 = #content{properties = #'P_basic'{priority = 2}, payload_fragments_rev = []}, - Msg2 = mc_amqpl:message(ExName, <<>>, Content2, #{id => <<"msg2">>}), - BQS3 = PQ:publish(Msg2, #message_properties{size = 0}, false, self(), - noflow, BQS2), + {ok, Msg2} = mc_amqpl:message(ExName, <<>>, Content2, #{id => <<"msg2">>}), + BQS3 = PQ:publish(Msg2, #message_properties{size = 0}, false, self(), BQS2), '' = PQ:info(head_message_timestamp, BQS3), %% Consume message with no timestamp. {{Msg2, _, _}, BQS4} = PQ:fetch(false, BQS3), @@ -394,6 +391,8 @@ info_head_message_timestamp1(_Config) -> PQ:delete_and_terminate(a_whim, BQS6), passed. +%% Because queue version is now ignored, this test is expected +%% to always get a queue version 2. info_backing_queue_version(Config) -> {Conn, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, 0), Q1 = <<"info-priority-queue-v1">>, @@ -404,7 +403,7 @@ info_backing_queue_version(Config) -> {<<"x-queue-version">>, byte, 2}]), try {ok, [{backing_queue_status, BQS1}]} = info(Config, Q1, [backing_queue_status]), - 1 = proplists:get_value(version, BQS1), + 2 = proplists:get_value(version, BQS1), {ok, [{backing_queue_status, BQS2}]} = info(Config, Q2, [backing_queue_status]), 2 = proplists:get_value(version, BQS2) after @@ -415,6 +414,53 @@ info_backing_queue_version(Config) -> passed end. +info_oldest_message_received_timestamp(Config) -> + passed = rabbit_ct_broker_helpers:rpc(Config, 0, + ?MODULE, info_oldest_message_received_timestamp1, [Config]). + +info_oldest_message_received_timestamp1(_Config) -> + QName = rabbit_misc:r(<<"/">>, queue, + <<"info_oldest_message_received_timestamp-queue">>), + ExName = rabbit_misc:r(<<"/">>, exchange, <<>>), + Q0 = rabbit_amqqueue:pseudo_queue(QName, self()), + Q1 = amqqueue:set_arguments(Q0, [{<<"x-max-priority">>, long, 2}]), + PQ = rabbit_priority_queue, + BQS1 = PQ:init(Q1, new, fun(_, _) -> ok end), + %% The queue is empty: no timestamp. + true = PQ:is_empty(BQS1), + '' = PQ:info(oldest_message_received_timestamp, BQS1), + %% Publish one message. + Content1 = #content{properties = #'P_basic'{priority = 1}, + payload_fragments_rev = []}, + {ok, Msg1} = mc_amqpl:message(ExName, <<>>, Content1, #{id => <<"msg1">>}), + BQS2 = PQ:publish(Msg1, #message_properties{size = 0}, false, self(), + BQS1), + Ts1 = PQ:info(oldest_message_received_timestamp, BQS2), + ?assert(is_integer(Ts1)), + %% Publish a higher priority message. + Content2 = #content{properties = #'P_basic'{priority = 2}, + payload_fragments_rev = []}, + {ok, Msg2} = mc_amqpl:message(ExName, <<>>, Content2, #{id => <<"msg2">>}), + BQS3 = PQ:publish(Msg2, #message_properties{size = 0}, false, self(), + BQS2), + %% Even though is highest priority, the lower priority message is older. + %% Timestamp hasn't changed. + ?assertEqual(Ts1, PQ:info(oldest_message_received_timestamp, BQS3)), + %% Consume message. + {{Msg2, _, _}, BQS4} = PQ:fetch(false, BQS3), + ?assertEqual(Ts1, PQ:info(oldest_message_received_timestamp, BQS4)), + %% Consume the first message, but do not acknowledge it + %% yet. The goal is to verify that the unacknowledged message's + %% timestamp is returned. + {{Msg1, _, AckTag}, BQS5} = PQ:fetch(true, BQS4), + ?assertEqual(Ts1, PQ:info(oldest_message_received_timestamp, BQS5)), + %% Ack message. The queue is empty now. + {[<<"msg1">>], BQS6} = PQ:ack([AckTag], BQS5), + true = PQ:is_empty(BQS6), + ?assertEqual('', PQ:info(oldest_message_received_timestamp, BQS6)), + PQ:delete_and_terminate(a_whim, BQS6), + passed. + unknown_info_key(Config) -> {Conn, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, 0), Q = <<"info-priority-queue">>, @@ -428,18 +474,23 @@ unknown_info_key(Config) -> rabbit_ct_client_helpers:close_connection(Conn), passed. -ram_duration(_Config) -> - QName = rabbit_misc:r(<<"/">>, queue, <<"ram_duration-queue">>), - Q0 = rabbit_amqqueue:pseudo_queue(QName, self()), - Q1 = amqqueue:set_arguments(Q0, [{<<"x-max-priority">>, long, 5}]), - PQ = rabbit_priority_queue, - BQS1 = PQ:init(Q1, new, fun(_, _) -> ok end), - {_Duration1, BQS2} = PQ:ram_duration(BQS1), - BQS3 = PQ:set_ram_duration_target(infinity, BQS2), - BQS4 = PQ:set_ram_duration_target(1, BQS3), - {_Duration2, BQS5} = PQ:ram_duration(BQS4), - PQ:delete_and_terminate(a_whim, BQS5), - passed. +update_rates(Config) -> + Node = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename), + {Conn, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, 0), + Q = <<"update_rates-queue">>, + declare(Ch, Q, [{<<"x-max-priority">>, byte, 3}]), + QPid = queue_pid(Config, Node, rabbit_misc:r(<<"/">>, queue, Q)), + try + publish1(Ch, Q, 1), + QPid ! update_rates, + State = get_state(Config, Q), + ?assertEqual(live, State), + delete(Ch, Q) + after + rabbit_ct_client_helpers:close_channel(Ch), + rabbit_ct_client_helpers:close_connection(Conn), + passed + end. %%---------------------------------------------------------------------------- @@ -558,4 +609,14 @@ info(Config, Q, InfoKeys) -> Config, Nodename, rabbit_classic_queue, info, [Amq, InfoKeys]), {ok, Info}. + +get_state(Config, Q) -> + Nodename = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename), + {ok, Amq} = rabbit_ct_broker_helpers:rpc( + Config, Nodename, + rabbit_amqqueue, lookup, [rabbit_misc:r(<<"/">>, queue, Q)]), + rabbit_ct_broker_helpers:rpc( + Config, Nodename, + amqqueue, get_state, [Amq]). + %%---------------------------------------------------------------------------- diff --git a/deps/rabbit/test/priority_queue_recovery_SUITE.erl b/deps/rabbit/test/priority_queue_recovery_SUITE.erl index cad466498a06..5099cda128dd 100644 --- a/deps/rabbit/test/priority_queue_recovery_SUITE.erl +++ b/deps/rabbit/test/priority_queue_recovery_SUITE.erl @@ -2,12 +2,11 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2011-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(priority_queue_recovery_SUITE). --include_lib("common_test/include/ct.hrl"). -include_lib("amqp_client/include/amqp_client.hrl"). -compile(export_all). diff --git a/deps/rabbit/test/product_info_SUITE.erl b/deps/rabbit/test/product_info_SUITE.erl index 5bd6d601a479..b99caa7e652e 100644 --- a/deps/rabbit/test/product_info_SUITE.erl +++ b/deps/rabbit/test/product_info_SUITE.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2020-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(product_info_SUITE). @@ -55,53 +55,43 @@ init_per_suite(Config) -> end. end_per_suite(Config) -> - rabbit_ct_helpers:run_teardown_steps(Config). - -init_per_group(_, Config) -> - Config. - -end_per_group(_, Config) -> Config. -init_per_testcase(Testcase, Config) -> - rabbit_ct_helpers:testcase_started(Config, Testcase), +init_per_group(Group, Config0) -> ClusterSize = 1, - TestNumber = rabbit_ct_helpers:testcase_number(Config, ?MODULE, Testcase), + PrivDir = ?config(priv_dir, Config0), + MotdFile = filename:join(PrivDir, "motd.txt"), + ok = file:write_file(MotdFile, <<"My MOTD\n">>), Config1 = rabbit_ct_helpers:set_config( - Config, + Config0, [ - {rmq_nodename_suffix, Testcase}, - {tcp_ports_base, {skip_n_nodes, TestNumber * ClusterSize}} + {rmq_nodename_suffix, Group}, + {tcp_ports_base, {skip_n_nodes, ClusterSize}}, + {motd_file, MotdFile} ]), - Config2 = case Testcase of - override_product_name_in_conf -> - rabbit_ct_helpers:merge_app_env( - Config1, - {rabbit, [{product_name, "MyProduct"}]}); - override_product_version_in_conf -> - rabbit_ct_helpers:merge_app_env( - Config1, - {rabbit, [{product_version, "MyVersion"}]}); - set_motd_in_conf -> - PrivDir = ?config(priv_dir, Config), - MotdFile = filename:join(PrivDir, "motd.txt"), - ok = file:write_file(MotdFile, <<"My MOTD\n">>), - C2 = rabbit_ct_helpers:set_config( - Config1, - {motd_file, MotdFile}), - rabbit_ct_helpers:merge_app_env( - C2, - {rabbit, [{motd_file, MotdFile}]}) - end, - rabbit_ct_helpers:run_steps(Config2, + + Config = rabbit_ct_helpers:merge_app_env( + Config1, + {rabbit, [ + {product_name, "MyProduct"}, + {product_version, "MyVersion"}, + {motd_file, MotdFile}]}), + rabbit_ct_helpers:run_steps(Config, rabbit_ct_broker_helpers:setup_steps() ++ rabbit_ct_client_helpers:setup_steps()). +end_per_group(_, Config) -> + rabbit_ct_helpers:run_steps(Config, + rabbit_ct_client_helpers:teardown_steps() ++ + rabbit_ct_broker_helpers:teardown_steps()). + +init_per_testcase(Testcase, Config) -> + rabbit_ct_helpers:testcase_started(Config, Testcase), + Config. + end_per_testcase(Testcase, Config) -> - Config1 = rabbit_ct_helpers:run_steps(Config, - rabbit_ct_client_helpers:teardown_steps() ++ - rabbit_ct_broker_helpers:teardown_steps()), - rabbit_ct_helpers:testcase_finished(Config1, Testcase). + rabbit_ct_helpers:testcase_finished(Config, Testcase), + Config. %% ------------------------------------------------------------------- %% Testcases. diff --git a/deps/rabbit/test/proxy_protocol_SUITE.erl b/deps/rabbit/test/proxy_protocol_SUITE.erl index be16f356dde6..cd15c1d96b5c 100644 --- a/deps/rabbit/test/proxy_protocol_SUITE.erl +++ b/deps/rabbit/test/proxy_protocol_SUITE.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2017-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(proxy_protocol_SUITE). diff --git a/deps/rabbit/test/publisher_confirms_parallel_SUITE.erl b/deps/rabbit/test/publisher_confirms_parallel_SUITE.erl index 4bfe2c7845f5..f2e2c3370ebb 100644 --- a/deps/rabbit/test/publisher_confirms_parallel_SUITE.erl +++ b/deps/rabbit/test/publisher_confirms_parallel_SUITE.erl @@ -2,24 +2,24 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2011-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(publisher_confirms_parallel_SUITE). -include_lib("common_test/include/ct.hrl"). --include_lib("kernel/include/file.hrl"). -include_lib("amqp_client/include/amqp_client.hrl"). -include_lib("eunit/include/eunit.hrl"). +-compile(nowarn_export_all). -compile(export_all). -define(TIMEOUT, 60000). --import(quorum_queue_utils, [wait_for_messages/2]). +-import(queue_utils, [wait_for_messages/2]). all() -> [ - {group, publisher_confirm_tests} + {group, tests} ]. groups() -> @@ -33,15 +33,10 @@ groups() -> confirm_mandatory_unroutable, confirm_unroutable_message], [ - {publisher_confirm_tests, [], + {tests, [], [ {classic_queue, [parallel], PublisherConfirmTests ++ [confirm_nack]}, - {mirrored_queue, [parallel], PublisherConfirmTests ++ [confirm_nack]}, - {quorum_queue, [], - [ - {parllel_tests, [parallel], PublisherConfirmTests}, - confirm_minority - ]} + {quorum_queue, [parallel], PublisherConfirmTests} ]} ]. @@ -71,28 +66,20 @@ init_per_group(quorum_queue, Config) -> Config, [{queue_args, [{<<"x-queue-type">>, longstr, <<"quorum">>}]}, {queue_durable, true}]); -init_per_group(mirrored_queue, Config) -> - rabbit_ct_broker_helpers:set_ha_policy(Config, 0, <<"^max_length.*queue">>, - <<"all">>, [{<<"ha-sync-mode">>, <<"automatic">>}]), - Config1 = rabbit_ct_helpers:set_config( - Config, [{is_mirrored, true}, - {queue_args, [{<<"x-queue-type">>, longstr, <<"classic">>}]}, - {queue_durable, true}]), - rabbit_ct_helpers:run_steps(Config1, []); -init_per_group(Group, Config) -> - case lists:member({group, Group}, all()) of - true -> - ClusterSize = 3, - Config1 = rabbit_ct_helpers:set_config(Config, [ - {rmq_nodename_suffix, Group}, - {rmq_nodes_count, ClusterSize} - ]), - rabbit_ct_helpers:run_steps(Config1, - rabbit_ct_broker_helpers:setup_steps() ++ - rabbit_ct_client_helpers:setup_steps()); - false -> - Config - end. +init_per_group(Group, Config0) -> + Config = rabbit_ct_helpers:set_config(Config0, [{metadata_store, mnesia}]), + init_per_group0(Group, Config). + +init_per_group0(Group, Config) -> + ClusterSize = 3, + Config1 = rabbit_ct_helpers:set_config(Config, [ + {rmq_nodename_suffix, Group}, + {rmq_nodes_count, ClusterSize} + ]), + Config2 = rabbit_ct_helpers:run_steps(Config1, + rabbit_ct_broker_helpers:setup_steps() ++ + rabbit_ct_client_helpers:setup_steps()), + Config2. end_per_group(Group, Config) -> case lists:member({group, Group}, all()) of @@ -284,17 +271,14 @@ confirm_nack1(Config) -> #'confirm.select_ok'{} -> ok after ?TIMEOUT -> throw(failed_to_enable_confirms) end, + %% stop the queue + ok = gen_server:stop(QPid1, shutdown, 5000), %% Publish a message rabbit_channel:do(Ch, #'basic.publish'{exchange = <<"amq.direct">>, routing_key = <<"confirms-magic">> }, rabbit_basic:build_content( #'P_basic'{delivery_mode = 2}, <<"">>)), - %% We must not kill the queue before the channel has processed the - %% 'publish'. - ok = rabbit_channel:flush(Ch), - %% Crash the queue - QPid1 ! boom, %% Wait for a nack receive #'basic.nack'{} -> ok; @@ -310,35 +294,6 @@ confirm_nack1(Config) -> ok = rabbit_channel:shutdown(Ch), passed. -%% The closest to a nack behaviour that we can get on quorum queues is not answering while -%% the cluster is in minority. Once the cluster recovers, a 'basic.ack' will be issued. -confirm_minority(Config) -> - [_A, B, C] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename), - {_Conn, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, 0), - QName = ?config(queue_name, Config), - declare_queue(Ch, Config, QName), - ok = rabbit_ct_broker_helpers:stop_node(Config, B), - ok = rabbit_ct_broker_helpers:stop_node(Config, C), - amqp_channel:call(Ch, #'confirm.select'{}), - amqp_channel:register_confirm_handler(Ch, self()), - publish(Ch, QName, [<<"msg1">>]), - receive - #'basic.nack'{} -> ok; - #'basic.ack'{} -> throw(unexpected_ack) - after 120000 -> - ok - end, - ok = rabbit_ct_broker_helpers:start_node(Config, B), - publish(Ch, QName, [<<"msg2">>]), - receive - #'basic.nack'{} -> throw(unexpected_nack); - #'basic.ack'{} -> - ok - after 60000 -> - throw(missing_ack) - end, - ok = rabbit_ct_broker_helpers:start_node(Config, C), - ok. %%%%%%%%%%%%%%%%%%%%%%%% %% Test helpers @@ -370,13 +325,6 @@ consume(Ch, QName, Payloads) -> consume_empty(Ch, QName) -> #'basic.get_empty'{} = amqp_channel:call(Ch, #'basic.get'{queue = QName}). -sync_mirrors(QName, Config) -> - case ?config(is_mirrored, Config) of - true -> - rabbit_ct_broker_helpers:rabbitmqctl(Config, 0, [<<"sync_queue">>, QName]); - _ -> ok - end. - receive_many([]) -> ok; receive_many(DTags) -> diff --git a/deps/rabbit/test/queue_length_limits_SUITE.erl b/deps/rabbit/test/queue_length_limits_SUITE.erl index 881a5046be9d..b40cab4aa993 100644 --- a/deps/rabbit/test/queue_length_limits_SUITE.erl +++ b/deps/rabbit/test/queue_length_limits_SUITE.erl @@ -2,13 +2,12 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2011-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(queue_length_limits_SUITE). -include_lib("common_test/include/ct.hrl"). --include_lib("kernel/include/file.hrl"). -include_lib("amqp_client/include/amqp_client.hrl"). -include_lib("eunit/include/eunit.hrl"). @@ -22,29 +21,37 @@ all() -> [ - {group, parallel_tests} + {group, mnesia_parallel_tests}, + {group, khepri_parallel_tests} ]. groups() -> - MaxLengthTests = [max_length_default, - max_length_bytes_default, - max_length_drop_head, - max_length_bytes_drop_head, - max_length_reject_confirm, - max_length_bytes_reject_confirm, - max_length_drop_publish, - max_length_drop_publish_requeue, - max_length_bytes_drop_publish], [ - {parallel_tests, [parallel], [ - {max_length_classic, [], MaxLengthTests}, - {max_length_quorum, [], [max_length_default, - max_length_bytes_default] - }, - {max_length_mirrored, [], MaxLengthTests} + {mnesia_parallel_tests, [parallel], [ + {max_length_classic, [], max_length_tests()}, + {max_length_quorum, [], max_length_quorum_tests()} + ]}, + {khepri_parallel_tests, [parallel], [ + {max_length_classic, [], max_length_tests()}, + {max_length_quorum, [], max_length_quorum_tests()} ]} ]. +max_length_tests() -> + [max_length_default, + max_length_bytes_default, + max_length_drop_head, + max_length_bytes_drop_head, + max_length_reject_confirm, + max_length_bytes_reject_confirm, + max_length_drop_publish, + max_length_drop_publish_requeue, + max_length_bytes_drop_publish]. + +max_length_quorum_tests() -> + [max_length_default, + max_length_bytes_default]. + suite() -> [ {timetrap, {minutes, 3}} @@ -71,15 +78,14 @@ init_per_group(max_length_quorum, Config) -> Config, [{queue_args, [{<<"x-queue-type">>, longstr, <<"quorum">>}]}, {queue_durable, true}]); -init_per_group(max_length_mirrored, Config) -> - rabbit_ct_broker_helpers:set_ha_policy(Config, 0, <<"^max_length.*queue">>, - <<"all">>, [{<<"ha-sync-mode">>, <<"automatic">>}]), - Config1 = rabbit_ct_helpers:set_config( - Config, [{is_mirrored, true}, - {queue_args, [{<<"x-queue-type">>, longstr, <<"classic">>}]}, - {queue_durable, false}]), - rabbit_ct_helpers:run_steps(Config1, []); -init_per_group(Group, Config) -> +init_per_group(mnesia_parallel_tests = Group, Config0) -> + Config = rabbit_ct_helpers:set_config(Config0, [{metadata_store, mnesia}]), + init_per_group0(Group, Config); +init_per_group(khepri_parallel_tests = Group, Config0) -> + Config = rabbit_ct_helpers:set_config(Config0, [{metadata_store, khepri}]), + init_per_group0(Group, Config). + +init_per_group0(Group, Config) -> case lists:member({group, Group}, all()) of true -> ClusterSize = 3, @@ -88,16 +94,12 @@ init_per_group(Group, Config) -> {rmq_nodes_count, ClusterSize} ]), rabbit_ct_helpers:run_steps(Config1, - rabbit_ct_broker_helpers:setup_steps() ++ - rabbit_ct_client_helpers:setup_steps()); + rabbit_ct_broker_helpers:setup_steps() ++ + rabbit_ct_client_helpers:setup_steps()); false -> rabbit_ct_helpers:run_steps(Config, []) end. -end_per_group(max_length_mirrored, Config) -> - rabbit_ct_broker_helpers:clear_policy(Config, 0, <<"^max_length.*queue">>), - Config1 = rabbit_ct_helpers:set_config(Config, [{is_mirrored, false}]), - Config1; end_per_group(queue_max_length, Config) -> Config; end_per_group(Group, Config) -> @@ -154,7 +156,7 @@ max_length_bytes_drop_head(Config, ExtraArgs) -> Payload1 = << <<"1">> || _ <- lists:seq(1, 80) >>, Payload2 = << <<"2">> || _ <- lists:seq(1, 80) >>, Payload3 = << <<"3">> || _ <- lists:seq(1, 80) >>, - check_max_length_drops_head(Config, QName, Ch, Payload1, Payload2, Payload3). + check_max_length_drops_head(QName, Ch, Payload1, Payload2, Payload3). max_length_drop_head(Config) -> max_length_drop_head(Config, [{<<"x-overflow">>, longstr, <<"drop-head">>}]). @@ -172,7 +174,7 @@ max_length_drop_head(Config, ExtraArgs) -> MaxLengthArgs = [{<<"x-max-length">>, long, 1}], #'queue.declare_ok'{} = amqp_channel:call(Ch, #'queue.declare'{queue = QName, arguments = MaxLengthArgs ++ Args ++ ExtraArgs, durable = Durable}), - check_max_length_drops_head(Config, QName, Ch, <<"1">>, <<"2">>, <<"3">>). + check_max_length_drops_head(QName, Ch, <<"1">>, <<"2">>, <<"3">>). max_length_reject_confirm(Config) -> {_Conn, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, 0), @@ -183,8 +185,8 @@ max_length_reject_confirm(Config) -> OverflowArgs = [{<<"x-overflow">>, longstr, <<"reject-publish">>}], #'queue.declare_ok'{} = amqp_channel:call(Ch, #'queue.declare'{queue = QName, arguments = MaxLengthArgs ++ OverflowArgs ++ Args, durable = Durable}), #'confirm.select_ok'{} = amqp_channel:call(Ch, #'confirm.select'{}), - check_max_length_drops_publish(Config, QName, Ch, <<"1">>, <<"2">>, <<"3">>), - check_max_length_rejects(Config, QName, Ch, <<"1">>, <<"2">>, <<"3">>). + check_max_length_drops_publish(QName, Ch, <<"1">>, <<"2">>, <<"3">>), + check_max_length_rejects(QName, Ch, <<"1">>, <<"2">>, <<"3">>). max_length_bytes_reject_confirm(Config) -> {_Conn, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, 0), @@ -201,8 +203,8 @@ max_length_bytes_reject_confirm(Config) -> Payload2 = << <<"2">> || _ <- lists:seq(1, 80) >>, Payload3 = << <<"3">> || _ <- lists:seq(1, 80) >>, - check_max_length_drops_publish(Config, QNameBytes, Ch, Payload1, Payload2, Payload3), - check_max_length_rejects(Config, QNameBytes, Ch, Payload1, Payload2, Payload3). + check_max_length_drops_publish(QNameBytes, Ch, Payload1, Payload2, Payload3), + check_max_length_rejects(QNameBytes, Ch, Payload1, Payload2, Payload3). max_length_drop_publish(Config) -> {_Conn, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, 0), @@ -213,7 +215,7 @@ max_length_drop_publish(Config) -> OverflowArgs = [{<<"x-overflow">>, longstr, <<"reject-publish">>}], #'queue.declare_ok'{} = amqp_channel:call(Ch, #'queue.declare'{queue = QName, arguments = MaxLengthArgs ++ OverflowArgs ++ Args, durable = Durable}), %% If confirms are not enable, publishes will still be dropped in reject-publish mode. - check_max_length_drops_publish(Config, QName, Ch, <<"1">>, <<"2">>, <<"3">>). + check_max_length_drops_publish(QName, Ch, <<"1">>, <<"2">>, <<"3">>). max_length_drop_publish_requeue(Config) -> {_Conn, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, 0), @@ -224,7 +226,7 @@ max_length_drop_publish_requeue(Config) -> OverflowArgs = [{<<"x-overflow">>, longstr, <<"reject-publish">>}], #'queue.declare_ok'{} = amqp_channel:call(Ch, #'queue.declare'{queue = QName, arguments = MaxLengthArgs ++ OverflowArgs ++ Args, durable = Durable}), %% If confirms are not enable, publishes will still be dropped in reject-publish mode. - check_max_length_requeue(Config, QName, Ch, <<"1">>, <<"2">>). + check_max_length_requeue(QName, Ch, <<"1">>, <<"2">>). max_length_bytes_drop_publish(Config) -> {_Conn, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, 0), @@ -240,15 +242,13 @@ max_length_bytes_drop_publish(Config) -> Payload2 = << <<"2">> || _ <- lists:seq(1, 80) >>, Payload3 = << <<"3">> || _ <- lists:seq(1, 80) >>, - check_max_length_drops_publish(Config, QNameBytes, Ch, Payload1, Payload2, Payload3). + check_max_length_drops_publish(QNameBytes, Ch, Payload1, Payload2, Payload3). %% ------------------------------------------------------------------- %% Implementation %% ------------------------------------------------------------------- -check_max_length_requeue(Config, QName, Ch, Payload1, Payload2) -> - sync_mirrors(QName, Config), - +check_max_length_requeue(QName, Ch, Payload1, Payload2) -> #'confirm.select_ok'{} = amqp_channel:call(Ch, #'confirm.select'{}), amqp_channel:register_confirm_handler(Ch, self()), @@ -272,9 +272,7 @@ check_max_length_requeue(Config, QName, Ch, Payload1, Payload2) -> {#'basic.get_ok'{}, #amqp_msg{payload = Payload2}} = amqp_channel:call(Ch, #'basic.get'{queue = QName}), #'basic.get_empty'{} = amqp_channel:call(Ch, #'basic.get'{queue = QName}). -check_max_length_drops_publish(Config, QName, Ch, Payload1, Payload2, Payload3) -> - sync_mirrors(QName, Config), - +check_max_length_drops_publish(QName, Ch, Payload1, Payload2, Payload3) -> #'confirm.select_ok'{} = amqp_channel:call(Ch, #'confirm.select'{}), amqp_channel:register_confirm_handler(Ch, self()), @@ -301,8 +299,7 @@ check_max_length_drops_publish(Config, QName, Ch, Payload1, Payload2, Payload3) {#'basic.get_ok'{}, #amqp_msg{payload = Payload1}} = amqp_channel:call(Ch, #'basic.get'{queue = QName}), #'basic.get_empty'{} = amqp_channel:call(Ch, #'basic.get'{queue = QName}). -check_max_length_rejects(Config, QName, Ch, Payload1, Payload2, Payload3) -> - sync_mirrors(QName, Config), +check_max_length_rejects(QName, Ch, Payload1, Payload2, Payload3) -> amqp_channel:register_confirm_handler(Ch, self()), flush(), #'basic.get_empty'{} = amqp_channel:call(Ch, #'basic.get'{queue = QName}), @@ -334,9 +331,7 @@ check_max_length_rejects(Config, QName, Ch, Payload1, Payload2, Payload3) -> {#'basic.get_ok'{}, #amqp_msg{payload = Payload2}} = amqp_channel:call(Ch, #'basic.get'{queue = QName}). -check_max_length_drops_head(Config, QName, Ch, Payload1, Payload2, Payload3) -> - sync_mirrors(QName, Config), - +check_max_length_drops_head(QName, Ch, Payload1, Payload2, Payload3) -> #'confirm.select_ok'{} = amqp_channel:call(Ch, #'confirm.select'{}), amqp_channel:register_confirm_handler(Ch, self()), @@ -364,13 +359,6 @@ check_max_length_drops_head(Config, QName, Ch, Payload1, Payload2, Payload3) -> {#'basic.get_ok'{}, #amqp_msg{payload = Payload3}} = amqp_channel:call(Ch, #'basic.get'{queue = QName}), #'basic.get_empty'{} = amqp_channel:call(Ch, #'basic.get'{queue = QName}). -sync_mirrors(QName, Config) -> - case rabbit_ct_helpers:get_config(Config, is_mirrored) of - true -> - rabbit_ct_broker_helpers:rabbitmqctl(Config, 0, [<<"sync_queue">>, QName]); - _ -> ok - end. - flush() -> receive _ -> flush() after 10 -> ok diff --git a/deps/rabbit/test/queue_master_location_SUITE.erl b/deps/rabbit/test/queue_master_location_SUITE.erl deleted file mode 100644 index f80de76baca5..000000000000 --- a/deps/rabbit/test/queue_master_location_SUITE.erl +++ /dev/null @@ -1,468 +0,0 @@ -%% This Source Code Form is subject to the terms of the Mozilla Public -%% License, v. 2.0. If a copy of the MPL was not distributed with this -%% file, You can obtain one at https://mozilla.org/MPL/2.0/. -%% -%% Copyright (c) 2011-2023 VMware, Inc. or its affiliates. All rights reserved. -%% - --module(queue_master_location_SUITE). - -%% These tests use an ABC cluster with each node initialised with -%% a different number of queues. When a queue is declared, different -%% strategies can be applied to determine the queue's master node. Queue -%% location strategies can be applied in the following ways; -%% 1. As policy, -%% 2. As config (in rabbitmq.config), -%% 3. or as part of the queue's declare arguments. -%% -%% Currently supported strategies are; -%% min-masters : The queue master node is calculated as the one with the -%% least bound queues in the cluster. -%% client-local: The queue master node is the local node from which -%% the declaration is being carried out from -%% random : The queue master node is randomly selected. -%% - --include_lib("common_test/include/ct.hrl"). --include_lib("amqp_client/include/amqp_client.hrl"). --include_lib("eunit/include/eunit.hrl"). --include_lib("rabbitmq_ct_helpers/include/rabbit_assert.hrl"). - --compile(export_all). - --define(DEFAULT_VHOST_PATH, (<<"/">>)). --define(POLICY, <<"^qm.location$">>). - -all() -> - [ - {group, cluster_size_3}, - {group, maintenance_mode} - ]. - -groups() -> - [ - {cluster_size_3, [], [ - declare_args, - declare_policy, - declare_invalid_policy, - declare_policy_nodes, - declare_policy_all, - declare_policy_exactly, - declare_config, - calculate_min_master, - calculate_min_master_with_bindings, - calculate_random, - calculate_client_local - ]}, - - {maintenance_mode, [], [ - declare_with_min_masters_and_some_nodes_under_maintenance, - declare_with_min_masters_and_all_nodes_under_maintenance, - - declare_with_random_and_some_nodes_under_maintenance, - declare_with_random_and_all_nodes_under_maintenance - ]} - ]. - -%% ------------------------------------------------------------------- -%% Test suite setup/teardown -%% ------------------------------------------------------------------- - -merge_app_env(Config) -> - rabbit_ct_helpers:merge_app_env(Config, - {rabbit, [ - {collect_statistics, fine}, - {collect_statistics_interval, 500} - ]}). -init_per_suite(Config) -> - rabbit_ct_helpers:log_environment(), - rabbit_ct_helpers:run_setup_steps(Config, - [ fun merge_app_env/1 ] ++ - rabbit_ct_broker_helpers:setup_steps()). - -end_per_suite(Config) -> - rabbit_ct_helpers:run_teardown_steps(Config). - -init_per_group(cluster_size_3, Config) -> - rabbit_ct_helpers:set_config(Config, [ - %% Replaced with a list of node names later - {rmq_nodes_count, 3} - ]); -init_per_group(maintenance_mode, Config) -> - rabbit_ct_helpers:set_config(Config, [ - {rmq_nodes_count, 3} - ]). - -end_per_group(_, Config) -> - Config. - -init_per_testcase(Testcase, Config) -> - rabbit_ct_helpers:testcase_started(Config, Testcase), - ClusterSize = ?config(rmq_nodes_count, Config), - Nodenames = [ - list_to_atom(rabbit_misc:format("~ts-~b", [Testcase, I])) - || I <- lists:seq(1, ClusterSize) - ], - TestNumber = rabbit_ct_helpers:testcase_number(Config, ?MODULE, Testcase), - Config1 = rabbit_ct_helpers:set_config(Config, [ - {rmq_nodes_count, Nodenames}, - {rmq_nodes_clustered, true}, - {rmq_nodename_suffix, Testcase}, - {tcp_ports_base, {skip_n_nodes, TestNumber * ClusterSize}} - ]), - rabbit_ct_helpers:run_steps( - Config1, - rabbit_ct_broker_helpers:setup_steps() ++ - rabbit_ct_client_helpers:setup_steps()). - -end_per_testcase(Testcase, Config) -> - Config1 = rabbit_ct_helpers:run_steps(Config, - rabbit_ct_client_helpers:teardown_steps() ++ - rabbit_ct_broker_helpers:teardown_steps()), - rabbit_ct_helpers:testcase_finished(Config1, Testcase). - -%% ------------------------------------------------------------------- -%% Test cases -%% ------------------------------------------------------------------- - -%% -%% Queue 'declarations' -%% - -declare_args(Config) -> - setup_test_environment(Config), - unset_location_config(Config), - QueueName = rabbit_misc:r(<<"/">>, queue, Q = <<"qm.test">>), - Args = [{<<"x-queue-master-locator">>, longstr, <<"min-masters">>}], - declare(Config, QueueName, false, false, Args, none), - verify_min_master(Config, Q). - -declare_policy(Config) -> - setup_test_environment(Config), - unset_location_config(Config), - set_location_policy(Config, ?POLICY, <<"min-masters">>), - QueueName = rabbit_misc:r(<<"/">>, queue, Q = <<"qm.test">>), - declare(Config, QueueName, false, false, _Args=[], none), - verify_min_master(Config, Q). - -declare_invalid_policy(Config) -> - %% Tests that queue masters location returns 'ok', otherwise the validation of - %% any other parameter might be skipped and invalid policy accepted. - setup_test_environment(Config), - unset_location_config(Config), - Policy = [{<<"queue-master-locator">>, <<"min-masters">>}, - {<<"ha-mode">>, <<"exactly">>}, - %% this field is expected to be an integer - {<<"ha-params">>, <<"2">>}], - {error_string, _} = rabbit_ct_broker_helpers:rpc( - Config, 0, rabbit_policy, set, - [<<"/">>, ?POLICY, <<".*">>, Policy, 0, <<"queues">>, <<"acting-user">>]). - -declare_policy_nodes(Config) -> - setup_test_environment(Config), - unset_location_config(Config), - % Note: - % Node0 has 15 queues, Node1 has 8 and Node2 has 1 - Node0Name = rabbit_data_coercion:to_binary( - rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename)), - Node1 = rabbit_ct_broker_helpers:get_node_config(Config, 1, nodename), - Node1Name = rabbit_data_coercion:to_binary(Node1), - Nodes = [Node1Name, Node0Name], - Policy = [{<<"queue-master-locator">>, <<"min-masters">>}, - {<<"ha-mode">>, <<"nodes">>}, - {<<"ha-params">>, Nodes}], - ok = rabbit_ct_broker_helpers:set_policy(Config, 0, ?POLICY, - <<".*">>, <<"queues">>, Policy), - QueueName = rabbit_misc:r(<<"/">>, queue, Q = <<"qm.test">>), - declare(Config, QueueName, false, false, _Args=[], none), - verify_min_master(Config, Q, Node1). - -declare_policy_all(Config) -> - setup_test_environment(Config), - unset_location_config(Config), - % Note: - % Node0 has 15 queues, Node1 has 8 and Node2 has 1 - Policy = [{<<"queue-master-locator">>, <<"min-masters">>}, - {<<"ha-mode">>, <<"all">>}], - ok = rabbit_ct_broker_helpers:set_policy(Config, 0, ?POLICY, - <<".*">>, <<"queues">>, Policy), - QueueName = rabbit_misc:r(<<"/">>, queue, Q = <<"qm.test">>), - declare(Config, QueueName, false, false, _Args=[], none), - verify_min_master(Config, Q). - -declare_policy_exactly(Config) -> - setup_test_environment(Config), - unset_location_config(Config), - Policy = [{<<"queue-master-locator">>, <<"min-masters">>}, - {<<"ha-mode">>, <<"exactly">>}, - {<<"ha-params">>, 2}], - ok = rabbit_ct_broker_helpers:set_policy(Config, 0, ?POLICY, - <<".*">>, <<"queues">>, Policy), - QueueRes = rabbit_misc:r(<<"/">>, queue, Q = <<"qm.test">>), - declare(Config, QueueRes, false, false, _Args=[], none), - - Node0 = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename), - rabbit_ct_broker_helpers:control_action(sync_queue, Node0, - [binary_to_list(Q)], [{"-p", "/"}]), - ?awaitMatch(true, synced(Config, Node0, QueueRes, 1), 60000), - - {ok, Queue} = rabbit_ct_broker_helpers:rpc(Config, Node0, - rabbit_amqqueue, lookup, [QueueRes]), - {MNode0, [SNode], [SSNode]} = rabbit_ct_broker_helpers:rpc(Config, Node0, - rabbit_mirror_queue_misc, - actual_queue_nodes, [Queue]), - ?assertEqual(SNode, SSNode), - {ok, MNode1} = rabbit_ct_broker_helpers:rpc(Config, 0, - rabbit_queue_master_location_misc, - lookup_master, [Q, ?DEFAULT_VHOST_PATH]), - ?assertEqual(MNode0, MNode1), - Node2 = rabbit_ct_broker_helpers:get_node_config(Config, 2, nodename), - ?assertEqual(MNode1, Node2). - -declare_config(Config) -> - setup_test_environment(Config), - set_location_config(Config, <<"min-masters">>), - QueueName = rabbit_misc:r(<<"/">>, queue, Q = <<"qm.test">>), - declare(Config, QueueName, false, false, _Args = [], none), - verify_min_master(Config, Q), - unset_location_config(Config), - ok. - -%% -%% Maintenance mode effects -%% - -declare_with_min_masters_and_some_nodes_under_maintenance(Config) -> - set_location_policy(Config, ?POLICY, <<"min-masters">>), - rabbit_ct_broker_helpers:mark_as_being_drained(Config, 0), - rabbit_ct_broker_helpers:mark_as_being_drained(Config, 1), - - QName = <<"qm.tests.min_masters.maintenance.case1">>, - Resource = rabbit_misc:r(<<"/">>, queue, QName), - Record = declare(Config, Resource, false, false, _Args = [], none), - %% the only node that's not being drained - ?assertEqual(rabbit_ct_broker_helpers:get_node_config(Config, 2, nodename), - node(amqqueue:get_pid(Record))), - - rabbit_ct_broker_helpers:unmark_as_being_drained(Config, 0), - rabbit_ct_broker_helpers:unmark_as_being_drained(Config, 1). - -declare_with_min_masters_and_all_nodes_under_maintenance(Config) -> - declare_with_all_nodes_under_maintenance(Config, <<"min-masters">>). - -declare_with_random_and_some_nodes_under_maintenance(Config) -> - set_location_policy(Config, ?POLICY, <<"random">>), - rabbit_ct_broker_helpers:mark_as_being_drained(Config, 0), - rabbit_ct_broker_helpers:mark_as_being_drained(Config, 2), - - QName = <<"qm.tests.random.maintenance.case1">>, - Resource = rabbit_misc:r(<<"/">>, queue, QName), - Record = declare(Config, Resource, false, false, _Args = [], none), - %% the only node that's not being drained - ?assertEqual(rabbit_ct_broker_helpers:get_node_config(Config, 1, nodename), - node(amqqueue:get_pid(Record))), - - rabbit_ct_broker_helpers:unmark_as_being_drained(Config, 0), - rabbit_ct_broker_helpers:unmark_as_being_drained(Config, 2). - -declare_with_random_and_all_nodes_under_maintenance(Config) -> - declare_with_all_nodes_under_maintenance(Config, <<"random">>). - -declare_with_all_nodes_under_maintenance(Config, Locator) -> - set_location_policy(Config, ?POLICY, Locator), - rabbit_ct_broker_helpers:mark_as_being_drained(Config, 0), - rabbit_ct_broker_helpers:mark_as_being_drained(Config, 1), - rabbit_ct_broker_helpers:mark_as_being_drained(Config, 2), - - QName = rabbit_data_coercion:to_binary( - rabbit_misc:format("qm.tests.~ts.maintenance.case2", [Locator])), - Resource = rabbit_misc:r(<<"/">>, queue, QName), - Record = declare(Config, Resource, false, false, _Args = [], none), - %% when queue master locator returns no node, the node that handles - %% the declaration method will be used as a fallback - ?assertEqual(rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename), - node(amqqueue:get_pid(Record))), - - rabbit_ct_broker_helpers:unmark_as_being_drained(Config, 0), - rabbit_ct_broker_helpers:unmark_as_being_drained(Config, 1), - rabbit_ct_broker_helpers:unmark_as_being_drained(Config, 2). - -%% -%% Test 'calculations' -%% - -calculate_min_master(Config) -> - setup_test_environment(Config), - QueueName = rabbit_misc:r(<<"/">>, queue, Q = <<"qm.test">>), - Args = [{<<"x-queue-master-locator">>, longstr, <<"min-masters">>}], - declare(Config, QueueName, false, false, Args, none), - verify_min_master(Config, Q), - ok. - -calculate_min_master_with_bindings(Config) -> - setup_test_environment(Config), - QueueName = rabbit_misc:r(<<"/">>, queue, Q = <<"qm.test_bound">>), - Args = [{<<"x-queue-master-locator">>, longstr, <<"min-masters">>}], - declare(Config, QueueName, false, false, Args, none), - verify_min_master(Config, Q), - %% Add 20 bindings to this queue - [ bind(Config, QueueName, integer_to_binary(N)) || N <- lists:seq(1, 20) ], - - QueueName1 = rabbit_misc:r(<<"/">>, queue, Q1 = <<"qm.test_unbound">>), - declare(Config, QueueName1, false, false, Args, none), - % Another queue should still be on the same node, bindings should - % not account for min-masters counting - verify_min_master(Config, Q1), - ok. - -calculate_random(Config) -> - setup_test_environment(Config), - QueueName = rabbit_misc:r(<<"/">>, queue, Q = <<"qm.test">>), - Args = [{<<"x-queue-master-locator">>, longstr, <<"random">>}], - declare(Config, QueueName, false, false, Args, none), - verify_random(Config, Q), - ok. - -calculate_client_local(Config) -> - setup_test_environment(Config), - QueueName = rabbit_misc:r(<<"/">>, queue, Q = <<"qm.test">>), - Args = [{<<"x-queue-master-locator">>, longstr, <<"client-local">>}], - declare(Config, QueueName, false, false, Args, none), - verify_client_local(Config, Q), - ok. - -%% -%% Setup environment -%% - -setup_test_environment(Config) -> - Nodes = rabbit_ct_broker_helpers:get_node_configs(Config, nodename), - [distribute_queues(Config, Node) || Node <- Nodes], - ok. - -distribute_queues(Config, Node) -> - ok = rpc:call(Node, application, unset_env, [rabbit, queue_master_location]), - Count = case rabbit_ct_broker_helpers:nodename_to_index(Config, Node) of - 0 -> 15; - 1 -> 8; - 2 -> 1 - end, - - Channel = rabbit_ct_client_helpers:open_channel(Config, Node), - ok = declare_queues(Channel, declare_fun(), Count), - ok = create_e2e_binding(Channel, [<< "ex_1" >>, << "ex_2" >>]), - {ok, Channel}. - -%% -%% Internal queue handling -%% - -declare_queues(Channel, DeclareFun, 1) -> DeclareFun(Channel); -declare_queues(Channel, DeclareFun, N) -> - DeclareFun(Channel), - declare_queues(Channel, DeclareFun, N-1). - -declare_exchange(Channel, Ex) -> - #'exchange.declare_ok'{} = - amqp_channel:call(Channel, #'exchange.declare'{exchange = Ex}), - {ok, Ex}. - -declare_binding(Channel, Binding) -> - #'exchange.bind_ok'{} = amqp_channel:call(Channel, Binding), - ok. - -declare_fun() -> - fun(Channel) -> - #'queue.declare_ok'{} = amqp_channel:call(Channel, get_random_queue_declare()), - ok - end. - -create_e2e_binding(Channel, ExNamesBin) -> - [{ok, Ex1}, {ok, Ex2}] = [declare_exchange(Channel, Ex) || Ex <- ExNamesBin], - Binding = #'exchange.bind'{source = Ex1, destination = Ex2}, - ok = declare_binding(Channel, Binding). - -get_random_queue_declare() -> - #'queue.declare'{passive = false, - durable = false, - exclusive = true, - auto_delete = false, - nowait = false, - arguments = []}. - -%% -%% Internal helper functions -%% - -get_cluster() -> [node()|nodes()]. - -min_master_node(Config) -> - hd(lists:reverse( - rabbit_ct_broker_helpers:get_node_configs(Config, nodename))). - -set_location_config(Config, Strategy) -> - Nodes = rabbit_ct_broker_helpers:get_node_configs(Config, nodename), - [ok = rabbit_ct_broker_helpers:rpc(Config, Node, - application, set_env, - [rabbit, queue_master_locator, Strategy]) || Node <- Nodes], - ok. - -unset_location_config(Config) -> - Nodes = rabbit_ct_broker_helpers:get_node_configs(Config, nodename), - [ok = rabbit_ct_broker_helpers:rpc(Config, Node, - application, unset_env, - [rabbit, queue_master_locator]) || Node <- Nodes], - ok. - -declare(Config, QueueName, Durable, AutoDelete, Args0, Owner) -> - Args1 = [QueueName, Durable, AutoDelete, Args0, Owner, <<"acting-user">>], - case rabbit_ct_broker_helpers:rpc(Config, 0, rabbit_amqqueue, declare, Args1) of - {new, Queue} -> Queue; - Other -> Other - end. - -bind(Config, QueueName, RoutingKey) -> - ExchangeName = rabbit_misc:r(QueueName, exchange, <<"amq.direct">>), - - ok = rabbit_ct_broker_helpers:rpc( - Config, 0, rabbit_binding, add, - [#binding{source = ExchangeName, - destination = QueueName, - key = RoutingKey, - args = []}, - <<"acting-user">>]). - -verify_min_master(Config, Q, MinMasterNode) -> - Rpc = rabbit_ct_broker_helpers:rpc(Config, 0, - rabbit_queue_master_location_misc, - lookup_master, [Q, ?DEFAULT_VHOST_PATH]), - ?assertEqual({ok, MinMasterNode}, Rpc). - -verify_min_master(Config, Q) -> - MinMaster = min_master_node(Config), - verify_min_master(Config, Q, MinMaster). - -verify_random(Config, Q) -> - [Node | _] = Nodes = rabbit_ct_broker_helpers:get_node_configs(Config, nodename), - {ok, Master} = rabbit_ct_broker_helpers:rpc(Config, Node, - rabbit_queue_master_location_misc, - lookup_master, [Q, ?DEFAULT_VHOST_PATH]), - ?assert(lists:member(Master, Nodes)). - -verify_client_local(Config, Q) -> - Node = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename), - Rpc = rabbit_ct_broker_helpers:rpc(Config, Node, - rabbit_queue_master_location_misc, - lookup_master, [Q, ?DEFAULT_VHOST_PATH]), - ?assertEqual({ok, Node}, Rpc). - -set_location_policy(Config, Name, Strategy) -> - ok = rabbit_ct_broker_helpers:set_policy(Config, 0, - Name, <<".*">>, <<"queues">>, [{<<"queue-master-locator">>, Strategy}]). - -synced(Config, Nodename, Q, ExpectedSSPidLen) -> - Args = [<<"/">>, [name, synchronised_slave_pids]], - Info = rabbit_ct_broker_helpers:rpc(Config, Nodename, - rabbit_amqqueue, info_all, Args), - [SSPids] = [Pids || [{name, Q1}, {synchronised_slave_pids, Pids}] <- Info, Q =:= Q1], - length(SSPids) =:= ExpectedSSPidLen. diff --git a/deps/rabbit/test/queue_parallel_SUITE.erl b/deps/rabbit/test/queue_parallel_SUITE.erl index 9b53c778c9f9..2b4c4735bcd6 100644 --- a/deps/rabbit/test/queue_parallel_SUITE.erl +++ b/deps/rabbit/test/queue_parallel_SUITE.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2011-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% %% -module(queue_parallel_SUITE). @@ -56,7 +56,8 @@ groups() -> purge, purge_no_consumer, basic_recover, - delete_immediately_by_resource + delete_immediately_by_resource, + cc_header_non_array_should_close_channel ], ExtraBccTests = [extra_bcc_option, extra_bcc_option_multiple_1, @@ -66,11 +67,7 @@ groups() -> {parallel_tests, [], [ {classic_queue, GroupOptions, AllTests ++ [delete_immediately_by_pid_succeeds, trigger_message_store_compaction]}, - {mirrored_queue, GroupOptions, AllTests ++ [delete_immediately_by_pid_succeeds, - trigger_message_store_compaction]}, {quorum_queue, GroupOptions, AllTests ++ ExtraBccTests ++ [delete_immediately_by_pid_fails]}, - {quorum_queue_in_memory_limit, GroupOptions, AllTests ++ [delete_immediately_by_pid_fails]}, - {quorum_queue_in_memory_bytes, GroupOptions, AllTests ++ [delete_immediately_by_pid_fails]}, {stream_queue, GroupOptions, ExtraBccTests ++ [publish, subscribe]} ]} ]. @@ -103,29 +100,6 @@ init_per_group(quorum_queue, Config) -> [{queue_args, [{<<"x-queue-type">>, longstr, <<"quorum">>}]}, {consumer_args, []}, {queue_durable, true}]); -init_per_group(quorum_queue_in_memory_limit, Config) -> - rabbit_ct_helpers:set_config( - Config, - [{queue_args, [{<<"x-queue-type">>, longstr, <<"quorum">>}, - {<<"x-max-in-memory-length">>, long, 1}]}, - {consumer_args, []}, - {queue_durable, true}]); -init_per_group(quorum_queue_in_memory_bytes, Config) -> - rabbit_ct_helpers:set_config( - Config, - [{queue_args, [{<<"x-queue-type">>, longstr, <<"quorum">>}, - {<<"x-max-in-memory-bytes">>, long, 1}]}, - {consumer_args, []}, - {queue_durable, true}]); -init_per_group(mirrored_queue, Config) -> - rabbit_ct_broker_helpers:set_ha_policy(Config, 0, <<"^max_length.*queue">>, - <<"all">>, [{<<"ha-sync-mode">>, <<"automatic">>}]), - Config1 = rabbit_ct_helpers:set_config( - Config, [{is_mirrored, true}, - {queue_args, [{<<"x-queue-type">>, longstr, <<"classic">>}]}, - {consumer_args, []}, - {queue_durable, true}]), - rabbit_ct_helpers:run_steps(Config1, []); init_per_group(stream_queue, Config) -> rabbit_ct_helpers:set_config( Config, @@ -136,10 +110,11 @@ init_per_group(Group, Config0) -> case lists:member({group, Group}, all()) of true -> ClusterSize = 3, + Tick = 256, Config = rabbit_ct_helpers:merge_app_env( - Config0, {rabbit, [{channel_tick_interval, 1000}, - {quorum_tick_interval, 1000}, - {stream_tick_interval, 1000}]}), + Config0, {rabbit, [{channel_tick_interval, Tick}, + {quorum_tick_interval, Tick}, + {stream_tick_interval, Tick}]}), Config1 = rabbit_ct_helpers:set_config( Config, [ {rmq_nodename_suffix, Group}, {rmq_nodes_count, ClusterSize} @@ -670,6 +645,30 @@ delete_immediately_by_resource(Config) -> rabbit_ct_client_helpers:close_channel(Ch), ok. +cc_header_non_array_should_close_channel(Config) -> + {C, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, 0), + Name0 = ?FUNCTION_NAME, + Name = atom_to_binary(Name0), + QName = <<"queue_cc_header_non_array", Name/binary>>, + delete_queue(Ch, QName), + declare_queue(Ch, Config, QName), + amqp_channel:call(Ch, + #'basic.publish'{exchange = <<"">>, + routing_key = QName}, + #amqp_msg{ + props = #'P_basic'{headers = [{<<"CC">>, long, 99}]}, + payload = <<"foo">>}), + + Ref = erlang:monitor(process, Ch), + receive + {'DOWN', Ref, process, Ch, {shutdown, {server_initiated_close, 406, _}}} -> + ok + after 5000 -> + exit(channel_closed_timeout) + end, + + ok = rabbit_ct_client_helpers:close_connection(C). + extra_bcc_option(Config) -> {_, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, 0), Name0 = ?FUNCTION_NAME, diff --git a/deps/rabbit/test/queue_type_SUITE.erl b/deps/rabbit/test/queue_type_SUITE.erl index d787ea4a8ab8..d89859e4703b 100644 --- a/deps/rabbit/test/queue_type_SUITE.erl +++ b/deps/rabbit/test/queue_type_SUITE.erl @@ -35,9 +35,13 @@ groups() -> ]. init_per_suite(Config0) -> + Tick = 256, rabbit_ct_helpers:log_environment(), Config = rabbit_ct_helpers:merge_app_env( - Config0, {rabbit, [{quorum_tick_interval, 1000}]}), + Config0, {rabbit, [ + {quorum_tick_interval, Tick}, + {stream_tick_interval, Tick} + ]}), rabbit_ct_helpers:run_setup_steps(Config). end_per_suite(Config) -> @@ -45,39 +49,30 @@ end_per_suite(Config) -> ok. init_per_group(Group, Config) -> - ct:pal("init per group ~p", [Group]), ClusterSize = 3, Config1 = rabbit_ct_helpers:set_config(Config, [{rmq_nodes_count, ClusterSize}, {rmq_nodename_suffix, Group}, - {tcp_ports_base}]), + {tcp_ports_base, {skip_n_nodes, ClusterSize}} + ]), Config1b = rabbit_ct_helpers:set_config(Config1, [{queue_type, atom_to_binary(Group, utf8)}, - {net_ticktime, 10}]), + {net_ticktime, 5} + ]), Config2 = rabbit_ct_helpers:run_steps(Config1b, [fun merge_app_env/1 ] ++ rabbit_ct_broker_helpers:setup_steps()), - ok = rabbit_ct_broker_helpers:rpc( - Config2, 0, application, set_env, - [rabbit, channel_tick_interval, 100]), - %% HACK: the larger cluster sizes benefit for a bit more time - %% after clustering before running the tests. - Config3 = case Group of - cluster_size_5 -> - timer:sleep(5000), - Config2; - _ -> - Config2 - end, - EnableFF = rabbit_ct_broker_helpers:enable_feature_flag(Config3, - message_containers), - ct:pal("message_containers ff ~p", [EnableFF]), - - rabbit_ct_broker_helpers:set_policy( - Config3, 0, - <<"ha-policy">>, <<".*">>, <<"queues">>, - [{<<"ha-mode">>, <<"all">>}]), - Config3. + case Config2 of + {skip, _Reason} = Skip -> + %% To support mixed-version clusters, + %% Khepri feature flag is unsupported + Skip; + _ -> + ok = rabbit_ct_broker_helpers:rpc( + Config2, 0, application, set_env, + [rabbit, channel_tick_interval, 100]), + Config2 + end. merge_app_env(Config) -> rabbit_ct_helpers:merge_app_env( @@ -244,6 +239,12 @@ stream(Config) -> SubCh = rabbit_ct_client_helpers:open_channel(Config, 2), qos(SubCh, 10, false), + %% wait for local replica + rabbit_ct_helpers:await_condition( + fun() -> + queue_utils:has_local_stream_member(Config, 2, QName, <<"/">>) + end, 60000), + try amqp_channel:subscribe( SubCh, #'basic.consume'{queue = QName, diff --git a/deps/rabbit/test/quorum_queue_utils.erl b/deps/rabbit/test/queue_utils.erl similarity index 77% rename from deps/rabbit/test/quorum_queue_utils.erl rename to deps/rabbit/test/queue_utils.erl index 55355b43ee1d..3fbf143aeceb 100644 --- a/deps/rabbit/test/quorum_queue_utils.erl +++ b/deps/rabbit/test/queue_utils.erl @@ -1,4 +1,4 @@ --module(quorum_queue_utils). +-module(queue_utils). -include_lib("eunit/include/eunit.hrl"). @@ -13,20 +13,28 @@ dirty_query/3, ra_name/1, fifo_machines_use_same_version/1, - fifo_machines_use_same_version/2 + fifo_machines_use_same_version/2, + has_local_stream_member/4, + has_local_stream_member_rpc/1 ]). +-define(WFM_SLEEP, 256). +-define(WFM_DEFAULT_NUMS, 30_000 div ?WFM_SLEEP). %% ~30s + wait_for_messages_ready(Servers, QName, Ready) -> wait_for_messages(Servers, QName, Ready, - fun rabbit_fifo:query_messages_ready/1, 60). + fun rabbit_fifo:query_messages_ready/1, + ?WFM_DEFAULT_NUMS). wait_for_messages_pending_ack(Servers, QName, Ready) -> wait_for_messages(Servers, QName, Ready, - fun rabbit_fifo:query_messages_checked_out/1, 60). + fun rabbit_fifo:query_messages_checked_out/1, + ?WFM_DEFAULT_NUMS). wait_for_messages_total(Servers, QName, Total) -> wait_for_messages(Servers, QName, Total, - fun rabbit_fifo:query_messages_total/1, 60). + fun rabbit_fifo:query_messages_total/1, + ?WFM_DEFAULT_NUMS). wait_for_messages(Servers, QName, Number, Fun, 0) -> Msgs = dirty_query(Servers, QName, Fun), @@ -50,12 +58,12 @@ wait_for_messages(Servers, QName, Number, Fun, N) -> true -> ok; _ -> - timer:sleep(500), + timer:sleep(?WFM_SLEEP), wait_for_messages(Servers, QName, Number, Fun, N - 1) end. wait_for_messages(Config, Stats) -> - wait_for_messages(Config, lists:sort(Stats), 60). + wait_for_messages(Config, lists:sort(Stats), ?WFM_DEFAULT_NUMS). wait_for_messages(Config, Stats, 0) -> ?assertEqual(Stats, @@ -73,12 +81,12 @@ wait_for_messages(Config, Stats, N) -> Stats0 when Stats0 == Stats -> ok; _ -> - timer:sleep(500), + timer:sleep(?WFM_SLEEP), wait_for_messages(Config, Stats, N - 1) end. wait_for_min_messages(Config, Queue, Msgs) -> - wait_for_min_messages(Config, Queue, Msgs, 60). + wait_for_min_messages(Config, Queue, Msgs, ?WFM_DEFAULT_NUMS). wait_for_min_messages(Config, Queue, Msgs, 0) -> [[_, Got]] = filter_queues([[Queue, Msgs]], @@ -95,16 +103,16 @@ wait_for_min_messages(Config, Queue, Msgs, N) -> true -> ok; false -> - timer:sleep(500), + timer:sleep(?WFM_SLEEP), wait_for_min_messages(Config, Queue, Msgs, N - 1) end; _ -> - timer:sleep(500), + timer:sleep(?WFM_SLEEP), wait_for_min_messages(Config, Queue, Msgs, N - 1) end. wait_for_max_messages(Config, Queue, Msgs) -> - wait_for_max_messages(Config, Queue, Msgs, 60). + wait_for_max_messages(Config, Queue, Msgs, ?WFM_DEFAULT_NUMS). wait_for_max_messages(Config, Queue, Msgs, 0) -> [[_, Got]] = filter_queues([[Queue, Msgs]], @@ -121,11 +129,11 @@ wait_for_max_messages(Config, Queue, Msgs, N) -> true -> ok; false -> - timer:sleep(500), + timer:sleep(?WFM_SLEEP), wait_for_max_messages(Config, Queue, Msgs, N - 1) end; _ -> - timer:sleep(500), + timer:sleep(?WFM_SLEEP), wait_for_max_messages(Config, Queue, Msgs, N - 1) end. @@ -161,3 +169,23 @@ fifo_machines_use_same_version(Config, Nodenames) rabbit_fifo, version, [])) || Nodename <- Nodenames], lists:all(fun(V) -> V =:= MachineAVersion end, OtherMachinesVersions). + +has_local_stream_member(Config, Node, QName, VHost) -> + QRes = rabbit_misc:r(VHost, queue, QName), + rabbit_ct_broker_helpers:rpc(Config, Node, ?MODULE, + has_local_stream_member_rpc, + [QRes]). + +has_local_stream_member_rpc(QName) -> + case rabbit_amqqueue:lookup(QName) of + {ok, Q} -> + #{name := StreamId} = amqqueue:get_type_state(Q), + case rabbit_stream_coordinator:local_pid(StreamId) of + {ok, Pid} -> + is_process_alive(Pid); + _ -> + false + end; + _Err -> + false + end. diff --git a/deps/rabbit/test/quorum_queue_SUITE.erl b/deps/rabbit/test/quorum_queue_SUITE.erl index 4fe63b6fcf25..d34253beb793 100644 --- a/deps/rabbit/test/quorum_queue_SUITE.erl +++ b/deps/rabbit/test/quorum_queue_SUITE.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2018-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. -module(quorum_queue_SUITE). @@ -11,15 +11,22 @@ -include_lib("amqp_client/include/amqp_client.hrl"). -include_lib("rabbitmq_ct_helpers/include/rabbit_assert.hrl"). --import(quorum_queue_utils, [wait_for_messages_ready/3, - wait_for_messages_pending_ack/3, - wait_for_messages_total/3, - wait_for_messages/2, - dirty_query/3, - ra_name/1]). +-import(queue_utils, [wait_for_messages_ready/3, + wait_for_messages_pending_ack/3, + wait_for_messages_total/3, + wait_for_messages/2, + dirty_query/3, + ra_name/1]). + +-import(clustering_utils, [ + assert_cluster_status/2, + assert_clustered/1 + ]). -compile([nowarn_export_all, export_all]). + +-define(NET_TICKTIME_S, 5). -define(DEFAULT_AWAIT, 10_000). suite() -> @@ -34,14 +41,16 @@ all() -> groups() -> [ - {single_node, [], all_tests() - ++ memory_tests() - ++ [node_removal_is_quorum_critical]}, + {single_node, [], all_tests() ++ + memory_tests() ++ + [node_removal_is_quorum_critical, + format]}, {unclustered, [], [ {uncluster_size_2, [], [add_member]} ]}, {clustered, [], [ - {cluster_size_2, [], [add_member_not_running, + {cluster_size_2, [], [add_member_2, + add_member_not_running, add_member_classic, add_member_wrong_type, add_member_already_a_member, @@ -52,10 +61,11 @@ groups() -> delete_member_queue_not_found, delete_member, delete_member_not_a_member, - node_removal_is_quorum_critical, - cleanup_data_dir] + delete_member_member_already_deleted, + node_removal_is_quorum_critical] ++ memory_tests()}, {cluster_size_3, [], [ + cleanup_data_dir, channel_handles_ra_event, declare_during_node_down, simple_confirm_availability_on_leader_change, @@ -72,14 +82,17 @@ groups() -> reject_after_leader_transfer, shrink_all, rebalance, - file_handle_reservations, - file_handle_reservations_above_limit, node_removal_is_not_quorum_critical, leader_locator_client_local, leader_locator_balanced, leader_locator_balanced_maintenance, leader_locator_balanced_random_maintenance, - leader_locator_policy + leader_locator_policy, + status, + format, + add_member_2, + single_active_consumer_priority_take_over, + single_active_consumer_priority ] ++ all_tests()}, {cluster_size_5, [], [start_queue, @@ -128,6 +141,7 @@ all_tests() -> sync_queue, cancel_sync_queue, idempotent_recover, + server_system_recover, vhost_with_quorum_queue_is_deleted, vhost_with_default_queue_type_declares_quorum_queue, delete_immediately_by_resource, @@ -148,6 +162,7 @@ all_tests() -> delete_if_unused, queue_ttl, peek, + oldest_entry_timestamp, peek_with_wrong_queue_type, message_ttl, message_ttl_policy, @@ -157,8 +172,12 @@ all_tests() -> consumer_priorities, cancel_consumer_gh_3729, cancel_and_consume_with_same_tag, - validate_messages_on_queue - + validate_messages_on_queue, + amqpl_headers, + priority_queue_fifo, + priority_queue_2_1_ratio, + requeue_multiple_true, + requeue_multiple_false ]. memory_tests() -> @@ -175,7 +194,7 @@ memory_tests() -> init_per_suite(Config0) -> rabbit_ct_helpers:log_environment(), Config1 = rabbit_ct_helpers:merge_app_env( - Config0, {rabbit, [{quorum_tick_interval, 1000}]}), + Config0, {rabbit, [{quorum_tick_interval, 256}]}), rabbit_ct_helpers:run_setup_steps(Config1, []). end_per_suite(Config) -> @@ -193,7 +212,8 @@ init_per_group(clustered_with_partitions, Config0) -> Config1 = rabbit_ct_helpers:run_setup_steps( Config0, [fun rabbit_ct_broker_helpers:configure_dist_proxy/1]), - Config2 = rabbit_ct_helpers:set_config(Config1, [{net_ticktime, 10}]), + Config2 = rabbit_ct_helpers:set_config(Config1, + [{net_ticktime, ?NET_TICKTIME_S}]), Config2 end; init_per_group(Group, Config) -> @@ -212,23 +232,22 @@ init_per_group(Group, Config) -> Config1 = rabbit_ct_helpers:set_config(Config, [{rmq_nodes_count, ClusterSize}, {rmq_nodename_suffix, Group}, - {tcp_ports_base}]), - Config1b = rabbit_ct_helpers:set_config(Config1, [{net_ticktime, 10}]), - Ret = rabbit_ct_helpers:run_steps(Config1b, + {tcp_ports_base, {skip_n_nodes, ClusterSize}}, + {net_ticktime, ?NET_TICKTIME_S} + ]), + Ret = rabbit_ct_helpers:run_steps(Config1, [fun merge_app_env/1 ] ++ rabbit_ct_broker_helpers:setup_steps()), case Ret of {skip, _} -> Ret; Config2 -> - _ = rabbit_ct_broker_helpers:enable_feature_flag(Config2, message_containers), + Res = rabbit_ct_broker_helpers:enable_feature_flag( + Config2, 'rabbitmq_4.0.0'), + ct:pal("rabbitmq_4.0.0 enable result ~p", [Res]), ok = rabbit_ct_broker_helpers:rpc( Config2, 0, application, set_env, [rabbit, channel_tick_interval, 100]), - %% HACK: the larger cluster sizes benefit for a bit - %% more time after clustering before running the - %% tests. - timer:sleep(ClusterSize * 1000), Config2 end end. @@ -251,7 +270,7 @@ init_per_testcase(Testcase, Config) when Testcase == reconnect_consumer_and_publ Config2 = rabbit_ct_helpers:set_config(Config1, [{rmq_nodes_count, 3}, {rmq_nodename_suffix, Testcase}, - {tcp_ports_base}, + {tcp_ports_base, {skip_n_nodes, 3}}, {queue_name, Q}, {alt_queue_name, <>} ]), @@ -567,7 +586,7 @@ start_queue_concurrent(Config) -> [{<<"x-queue-type">>, longstr, <<"quorum">>}])), - timer:sleep(500), + timer:sleep(100), rabbit_ct_client_helpers:close_connection_and_channel(Conn, Ch), Self ! {done, Server} end) @@ -661,8 +680,11 @@ restart_queue(Config) -> ok = rabbit_ct_broker_helpers:start_node(Config, Server), %% Check that the application and one ra node are up - ?assertMatch({ra, _, _}, lists:keyfind(ra, 1, - rpc:call(Server, application, which_applications, []))), + %% The node has just been restarted, let's give it a bit of time to be ready if needed + ?awaitMatch({ra, _, _}, + lists:keyfind(ra, 1, + rpc:call(Server, application, which_applications, [])), + ?DEFAULT_AWAIT), Expected = Children + 1, ?assertMatch(Expected, length(rpc:call(Server, supervisor, which_children, [?SUPNAME]))), @@ -706,6 +728,31 @@ idempotent_recover(Config) -> end, ?DEFAULT_AWAIT), ok. +server_system_recover(Config) -> + Server = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename), + + Ch = rabbit_ct_client_helpers:open_channel(Config, Server), + LQ = ?config(queue_name, Config), + ?assertEqual({'queue.declare_ok', LQ, 0, 0}, + declare(Ch, LQ, [{<<"x-queue-type">>, longstr, <<"quorum">>}])), + + RaName = ra_name(LQ), + _ = ra:members({RaName, Server}), + EtsPid = ct_rpc:call(Server, erlang, whereis, [ra_log_ets]), + ?assert(is_pid(EtsPid)), + + true = ct_rpc:call(Server, erlang, exit, [EtsPid, kill]), + + %% validate quorum queue is still functional + ?awaitMatch({ok, _, _}, + begin + %% there is a small chance that a quorum queue process will crash + %% due to missing ETS table, in this case we need to keep + %% retrying awaiting the restart + catch ra:members({RaName, Server}) + end, ?DEFAULT_AWAIT), + ok. + vhost_with_quorum_queue_is_deleted(Config) -> Node = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename), VHost = <<"vhost2">>, @@ -866,7 +913,7 @@ stop_start_rabbit_app(Config) -> false -> true; {ra, _, _} -> false end - end), + end, 30000), ?assertEqual(ok, rabbit_control_helper:command(start_app, Server)), @@ -878,7 +925,7 @@ stop_start_rabbit_app(Config) -> false -> false; {ra, _, _} -> true end - end), + end, 30000), Expected = Children + 2, ?assertMatch(Expected, length(rpc:call(Server, supervisor, which_children, [?SUPNAME]))), @@ -905,6 +952,7 @@ publish_confirm(Ch, QName, Timeout) -> ct:pal("NOT CONFIRMED! ~ts", [QName]), fail after Timeout -> + flush(1), exit(confirm_timeout) end. @@ -937,17 +985,198 @@ consume_in_minority(Config) -> Ch = rabbit_ct_client_helpers:open_channel(Config, Server0), QQ = ?config(queue_name, Config), + RaName = binary_to_atom(<<"%2F_", QQ/binary>>, utf8), ?assertEqual({'queue.declare_ok', QQ, 0, 0}, declare(Ch, QQ, [{<<"x-queue-type">>, longstr, <<"quorum">>}])), - ok = rabbit_ct_broker_helpers:stop_node(Config, Server1), - ok = rabbit_ct_broker_helpers:stop_node(Config, Server2), + rabbit_quorum_queue:stop_server({RaName, Server1}), + rabbit_quorum_queue:stop_server({RaName, Server2}), ?assertExit({{shutdown, {connection_closing, {server_initiated_close, 541, _}}}, _}, amqp_channel:call(Ch, #'basic.get'{queue = QQ, no_ack = false})), - ok = rabbit_ct_broker_helpers:start_node(Config, Server1), - ok = rabbit_ct_broker_helpers:start_node(Config, Server2), + + rabbit_quorum_queue:restart_server({RaName, Server1}), + rabbit_quorum_queue:restart_server({RaName, Server2}), + ok. + +single_active_consumer_priority_take_over(Config) -> + check_quorum_queues_v4_compat(Config), + + [Server0, Server1, _Server2] = + rabbit_ct_broker_helpers:get_node_configs(Config, nodename), + Ch1 = rabbit_ct_client_helpers:open_channel(Config, Server0), + Ch2 = rabbit_ct_client_helpers:open_channel(Config, Server1), + QName = ?config(queue_name, Config), + Q1 = <>, + RaNameQ1 = binary_to_atom(<<"%2F", "_", Q1/binary>>, utf8), + QueryFun = fun rabbit_fifo:query_single_active_consumer/1, + Args = [{<<"x-queue-type">>, longstr, <<"quorum">>}, + {<<"x-single-active-consumer">>, bool, true}], + ?assertEqual({'queue.declare_ok', Q1, 0, 0}, declare(Ch1, Q1, Args)), + ok = subscribe(Ch1, Q1, false, <<"ch1-ctag1">>, [{"x-priority", byte, 1}]), + ?assertMatch({ok, {_, {value, {<<"ch1-ctag1">>, _}}}, _}, + rpc:call(Server0, ra, local_query, [RaNameQ1, QueryFun])), + #'confirm.select_ok'{} = amqp_channel:call(Ch2, #'confirm.select'{}), + publish_confirm(Ch2, Q1), + %% higher priority consumer attaches + ok = subscribe(Ch2, Q1, false, <<"ch2-ctag1">>, [{"x-priority", byte, 3}]), + + %% Q1 should still have Ch1 as consumer as it has pending messages + ?assertMatch({ok, {_, {value, {<<"ch1-ctag1">>, _}}}, _}, + rpc:call(Server0, ra, local_query, + [RaNameQ1, QueryFun])), + + %% ack the message + receive + {#'basic.deliver'{consumer_tag = <<"ch1-ctag1">>, + delivery_tag = DeliveryTag}, _} -> + amqp_channel:cast(Ch1, #'basic.ack'{delivery_tag = DeliveryTag, + multiple = false}) + after 5000 -> + flush(1), + exit(basic_deliver_timeout) + end, + + ?awaitMatch({ok, {_, {value, {<<"ch2-ctag1">>, _}}}, _}, + rpc:call(Server0, ra, local_query, [RaNameQ1, QueryFun]), + ?DEFAULT_AWAIT), + ok. + +single_active_consumer_priority(Config) -> + check_quorum_queues_v4_compat(Config), + [Server0, Server1, Server2] = + rabbit_ct_broker_helpers:get_node_configs(Config, nodename), + + Ch1 = rabbit_ct_client_helpers:open_channel(Config, Server0), + Ch2 = rabbit_ct_client_helpers:open_channel(Config, Server1), + Ch3 = rabbit_ct_client_helpers:open_channel(Config, Server2), + QName = ?config(queue_name, Config), + Q1 = <>, + Q2 = <>, + Q3 = <>, + Args = [{<<"x-queue-type">>, longstr, <<"quorum">>}, + {<<"x-single-active-consumer">>, bool, true}], + ?assertEqual({'queue.declare_ok', Q1, 0, 0}, declare(Ch1, Q1, Args)), + ?assertEqual({'queue.declare_ok', Q2, 0, 0}, declare(Ch2, Q2, Args)), + ?assertEqual({'queue.declare_ok', Q3, 0, 0}, declare(Ch3, Q3, Args)), + + ok = subscribe(Ch1, Q1, false, <<"ch1-ctag1">>, [{"x-priority", byte, 3}]), + ok = subscribe(Ch1, Q2, false, <<"ch1-ctag2">>, [{"x-priority", byte, 2}]), + ok = subscribe(Ch1, Q3, false, <<"ch1-ctag3">>, [{"x-priority", byte, 1}]), + + + ok = subscribe(Ch2, Q1, false, <<"ch2-ctag1">>, [{"x-priority", byte, 1}]), + ok = subscribe(Ch2, Q2, false, <<"ch2-ctag2">>, [{"x-priority", byte, 3}]), + ok = subscribe(Ch2, Q3, false, <<"ch2-ctag3">>, [{"x-priority", byte, 2}]), + + ok = subscribe(Ch3, Q1, false, <<"ch3-ctag1">>, [{"x-priority", byte, 2}]), + ok = subscribe(Ch3, Q2, false, <<"ch3-ctag2">>, [{"x-priority", byte, 1}]), + ok = subscribe(Ch3, Q3, false, <<"ch3-ctag3">>, [{"x-priority", byte, 3}]), + + + RaNameQ1 = binary_to_atom(<<"%2F", "_", Q1/binary>>, utf8), + RaNameQ2 = binary_to_atom(<<"%2F", "_", Q2/binary>>, utf8), + RaNameQ3 = binary_to_atom(<<"%2F", "_", Q3/binary>>, utf8), + %% assert each queue has a different consumer + QueryFun = fun rabbit_fifo:query_single_active_consumer/1, + + %% Q1 should have the consumer on Ch1 + ?assertMatch({ok, {_, {value, {<<"ch1-ctag1">>, _}}}, _}, + rpc:call(Server0, ra, local_query, [RaNameQ1, QueryFun])), + + %% Q2 Ch2 + ?assertMatch({ok, {_, {value, {<<"ch2-ctag2">>, _}}}, _}, + rpc:call(Server1, ra, local_query, [RaNameQ2, QueryFun])), + + %% Q3 Ch3 + ?assertMatch({ok, {_, {value, {<<"ch3-ctag3">>, _}}}, _}, + rpc:call(Server2, ra, local_query, [RaNameQ3, QueryFun])), + + %% close Ch3 + _ = rabbit_ct_client_helpers:close_channel(Ch3), + flush(100), + + %% assert Q3 has Ch2 (priority 2) as consumer + ?assertMatch({ok, {_, {value, {<<"ch2-ctag3">>, _}}}, _}, + rpc:call(Server2, ra, local_query, [RaNameQ3, QueryFun])), + + %% close Ch2 + _ = rabbit_ct_client_helpers:close_channel(Ch2), + flush(100), + + %% assert all queues as has Ch1 as consumer + ?assertMatch({ok, {_, {value, {<<"ch1-ctag1">>, _}}}, _}, + rpc:call(Server0, ra, local_query, [RaNameQ1, QueryFun])), + ?assertMatch({ok, {_, {value, {<<"ch1-ctag2">>, _}}}, _}, + rpc:call(Server0, ra, local_query, [RaNameQ2, QueryFun])), + ?assertMatch({ok, {_, {value, {<<"ch1-ctag3">>, _}}}, _}, + rpc:call(Server0, ra, local_query, [RaNameQ3, QueryFun])), + ok. + +priority_queue_fifo(Config) -> + %% testing: if hi priority messages are published before lo priority + %% messages they are always consumed first (fifo) + check_quorum_queues_v4_compat(Config), + [Server0 | _] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename), + Ch = rabbit_ct_client_helpers:open_channel(Config, Server0), + Queue = ?config(queue_name, Config), + ?assertEqual({'queue.declare_ok', Queue, 0, 0}, + declare(Ch, Queue, + [{<<"x-queue-type">>, longstr, <<"quorum">>}])), + ExpectedHi = + [begin + MsgP5 = integer_to_binary(P), + ok = amqp_channel:cast(Ch, #'basic.publish'{routing_key = Queue}, + #amqp_msg{props = #'P_basic'{priority = P}, + payload = MsgP5}), + MsgP5 + %% high priority is > 4 + end || P <- lists:seq(5, 10)], + + ExpectedLo = + [begin + MsgP1 = integer_to_binary(P), + ok = amqp_channel:cast(Ch, #'basic.publish'{routing_key = Queue}, + #amqp_msg{props = #'P_basic'{priority = P}, + payload = MsgP1}), + MsgP1 + end || P <- lists:seq(0, 4)], + + validate_queue(Ch, Queue, ExpectedHi ++ ExpectedLo), + ok. + +priority_queue_2_1_ratio(Config) -> + %% testing: if lo priority messages are published before hi priority + %% messages are consumed in a 2:1 hi to lo ratio + check_quorum_queues_v4_compat(Config), + [Server0 | _] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename), + Ch = rabbit_ct_client_helpers:open_channel(Config, Server0), + Queue = ?config(queue_name, Config), + ?assertEqual({'queue.declare_ok', Queue, 0, 0}, + declare(Ch, Queue, + [{<<"x-queue-type">>, longstr, <<"quorum">>}])), + ExpectedLo = + [begin + MsgP1 = integer_to_binary(P), + ok = amqp_channel:cast(Ch, #'basic.publish'{routing_key = Queue}, + #amqp_msg{props = #'P_basic'{priority = P}, + payload = MsgP1}), + MsgP1 + end || P <- lists:seq(0, 4)], + ExpectedHi = + [begin + MsgP5 = integer_to_binary(P), + ok = amqp_channel:cast(Ch, #'basic.publish'{routing_key = Queue}, + #amqp_msg{props = #'P_basic'{priority = P}, + payload = MsgP5}), + MsgP5 + %% high priority is > 4 + end || P <- lists:seq(5, 14)], + + Expected = lists_interleave(ExpectedLo, ExpectedHi), + + validate_queue(Ch, Queue, Expected), ok. reject_after_leader_transfer(Config) -> @@ -1117,7 +1346,7 @@ test_dead_lettering(PolicySet, Config, Ch, Servers, RaName, Source, Destination) wait_for_messages_ready(Servers, RaName, 1), wait_for_messages_pending_ack(Servers, RaName, 0), wait_for_messages(Config, [[Destination, <<"0">>, <<"0">>, <<"0">>]]), - DeliveryTag = consume(Ch, Source, false), + DeliveryTag = basic_get_tag(Ch, Source, false), wait_for_messages_ready(Servers, RaName, 0), wait_for_messages_pending_ack(Servers, RaName, 1), wait_for_messages(Config, [[Destination, <<"0">>, <<"0">>, <<"0">>]]), @@ -1129,7 +1358,7 @@ test_dead_lettering(PolicySet, Config, Ch, Servers, RaName, Source, Destination) case PolicySet of true -> wait_for_messages(Config, [[Destination, <<"1">>, <<"1">>, <<"0">>]]), - _ = consume(Ch, Destination, true); + _ = basic_get_tag(Ch, Destination, true); false -> wait_for_messages(Config, [[Destination, <<"0">>, <<"0">>, <<"0">>]]) end. @@ -1160,12 +1389,12 @@ invalid_policy(Config) -> ?assertEqual({'queue.declare_ok', QQ, 0, 0}, declare(Ch, QQ, [{<<"x-queue-type">>, longstr, <<"quorum">>}])), ok = rabbit_ct_broker_helpers:set_policy( - Config, 0, <<"ha">>, <<"invalid_policy.*">>, <<"queues">>, - [{<<"ha-mode">>, <<"all">>}]), + Config, 0, <<"max-age">>, <<"invalid_policy.*">>, <<"queues">>, + [{<<"max-age">>, <<"5s">>}]), Info = rpc:call(Server, rabbit_quorum_queue, infos, [rabbit_misc:r(<<"/">>, queue, QQ)]), ?assertEqual('', proplists:get_value(policy, Info)), - ok = rabbit_ct_broker_helpers:clear_policy(Config, 0, <<"ha">>). + ok = rabbit_ct_broker_helpers:clear_policy(Config, 0, <<"max-age">>). pre_existing_invalid_policy(Config) -> [Server | _] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename), @@ -1173,14 +1402,14 @@ pre_existing_invalid_policy(Config) -> Ch = rabbit_ct_client_helpers:open_channel(Config, Server), QQ = ?config(queue_name, Config), ok = rabbit_ct_broker_helpers:set_policy( - Config, 0, <<"ha">>, <<"invalid_policy.*">>, <<"queues">>, - [{<<"ha-mode">>, <<"all">>}]), + Config, 0, <<"max-age">>, <<"invalid_policy.*">>, <<"queues">>, + [{<<"max-age">>, <<"5s">>}]), ?assertEqual({'queue.declare_ok', QQ, 0, 0}, declare(Ch, QQ, [{<<"x-queue-type">>, longstr, <<"quorum">>}])), Info = rpc:call(Server, rabbit_quorum_queue, infos, [rabbit_misc:r(<<"/">>, queue, QQ)]), ?assertEqual('', proplists:get_value(policy, Info)), - ok = rabbit_ct_broker_helpers:clear_policy(Config, 0, <<"ha">>), + ok = rabbit_ct_broker_helpers:clear_policy(Config, 0, <<"max-age">>), ok. dead_letter_to_quorum_queue(Config) -> @@ -1203,7 +1432,7 @@ dead_letter_to_quorum_queue(Config) -> wait_for_messages_pending_ack(Servers, RaName, 0), wait_for_messages_ready(Servers, RaName2, 0), wait_for_messages_pending_ack(Servers, RaName2, 0), - DeliveryTag = consume(Ch, QQ, false), + DeliveryTag = basic_get_tag(Ch, QQ, false), wait_for_messages_ready(Servers, RaName, 0), wait_for_messages_pending_ack(Servers, RaName, 1), wait_for_messages_ready(Servers, RaName2, 0), @@ -1215,7 +1444,12 @@ dead_letter_to_quorum_queue(Config) -> wait_for_messages_pending_ack(Servers, RaName, 0), wait_for_messages_ready(Servers, RaName2, 1), wait_for_messages_pending_ack(Servers, RaName2, 0), - _ = consume(Ch, QQ2, false). + + {#'basic.get_ok'{delivery_tag = _Tag}, + #amqp_msg{} = Msg} = basic_get(Ch, QQ2, false, 1), + ct:pal("Msg ~p", [Msg]), + flush(1000), + ok. dead_letter_from_classic_to_quorum_queue(Config) -> [Server | _] = Servers = rabbit_ct_broker_helpers:get_node_configs(Config, nodename), @@ -1234,7 +1468,7 @@ dead_letter_from_classic_to_quorum_queue(Config) -> wait_for_messages_ready(Servers, RaName, 0), wait_for_messages_pending_ack(Servers, RaName, 0), wait_for_messages(Config, [[CQ, <<"1">>, <<"1">>, <<"0">>]]), - DeliveryTag = consume(Ch, CQ, false), + DeliveryTag = basic_get_tag(Ch, CQ, false), wait_for_messages_ready(Servers, RaName, 0), wait_for_messages_pending_ack(Servers, RaName, 0), wait_for_messages(Config, [[CQ, <<"1">>, <<"0">>, <<"1">>]]), @@ -1244,7 +1478,7 @@ dead_letter_from_classic_to_quorum_queue(Config) -> wait_for_messages_ready(Servers, RaName, 1), wait_for_messages_pending_ack(Servers, RaName, 0), wait_for_messages(Config, [[CQ, <<"0">>, <<"0">>, <<"0">>]]), - _ = consume(Ch, QQ, false), + _ = basic_get_tag(Ch, QQ, false), rabbit_ct_client_helpers:close_channel(Ch). cleanup_queue_state_on_channel_after_publish(Config) -> @@ -1275,10 +1509,11 @@ cleanup_queue_state_on_channel_after_publish(Config) -> %% then delete the queue and wait for the process to terminate ?assertMatch(#'queue.delete_ok'{}, amqp_channel:call(Ch1, #'queue.delete'{queue = QQ})), - wait_until(fun() -> - Children == length(rpc:call(Server, supervisor, which_children, - [?SUPNAME])) - end), + rabbit_ct_helpers:await_condition( + fun() -> + Children == length(rpc:call(Server, supervisor, which_children, + [?SUPNAME])) + end, 30000), %% Check that all queue states have been cleaned wait_for_cleanup(Server, NCh2, 0), wait_for_cleanup(Server, NCh1, 0). @@ -1317,9 +1552,10 @@ cleanup_queue_state_on_channel_after_subscribe(Config) -> wait_for_cleanup(Server, NCh1, 1), wait_for_cleanup(Server, NCh2, 1), ?assertMatch(#'queue.delete_ok'{}, amqp_channel:call(Ch1, #'queue.delete'{queue = QQ})), - wait_until(fun() -> - Children == length(rpc:call(Server, supervisor, which_children, [?SUPNAME])) - end), + rabbit_ct_helpers:await_condition( + fun() -> + Children == length(rpc:call(Server, supervisor, which_children, [?SUPNAME])) + end, 30000), %% Check that all queue states have been cleaned wait_for_cleanup(Server, NCh1, 0), wait_for_cleanup(Server, NCh2, 0). @@ -1333,6 +1569,8 @@ recover_from_single_failure(Config) -> declare(Ch, QQ, [{<<"x-queue-type">>, longstr, <<"quorum">>}])), ok = rabbit_ct_broker_helpers:stop_node(Config, Server2), + Running = Servers -- [Server2], + assert_cluster_status({Servers, Servers, Running}, Running), RaName = ra_name(QQ), publish(Ch, QQ), @@ -1354,6 +1592,9 @@ recover_from_multiple_failures(Config) -> declare(Ch, QQ, [{<<"x-queue-type">>, longstr, <<"quorum">>}])), ok = rabbit_ct_broker_helpers:stop_node(Config, Server1), + Running = Servers -- [Server1], + assert_cluster_status({Servers, Servers, Running}, Running), + RaName = ra_name(QQ), publish(Ch, QQ), @@ -1361,6 +1602,7 @@ recover_from_multiple_failures(Config) -> publish(Ch, QQ), ok = rabbit_ct_broker_helpers:stop_node(Config, Server2), + assert_cluster_status({Servers, Servers, [Server]}, [Server]), publish(Ch, QQ), publish(Ch, QQ), @@ -1378,17 +1620,20 @@ recover_from_multiple_failures(Config) -> wait_for_messages_pending_ack(Servers, RaName, 0). publishing_to_unavailable_queue(Config) -> - %% publishing to an unavialable queue but with a reachable member should result - %% in the initial enqueuer session timing out and the message being nacked - [Server, Server1, Server2] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename), + %% publishing to an unavailable queue but with a reachable member should result + %% in the initial enqueuer command that is send syncronously to set up + %% the enqueuer session timing out and the message being nacked + [Server, Server1, Server2] = + rabbit_ct_broker_helpers:get_node_configs(Config, nodename), TCh = rabbit_ct_client_helpers:open_channel(Config, Server), QQ = ?config(queue_name, Config), + RaName = binary_to_atom(<<"%2F_", QQ/binary>>, utf8), ?assertEqual({'queue.declare_ok', QQ, 0, 0}, declare(TCh, QQ, [{<<"x-queue-type">>, longstr, <<"quorum">>}])), - ok = rabbit_ct_broker_helpers:stop_node(Config, Server1), - ok = rabbit_ct_broker_helpers:stop_node(Config, Server2), + rabbit_quorum_queue:stop_server({RaName, Server1}), + rabbit_quorum_queue:stop_server({RaName, Server2}), ct:pal("opening channel to ~w", [Server]), Ch = rabbit_ct_client_helpers:open_channel(Config, Server), @@ -1400,33 +1645,39 @@ publishing_to_unavailable_queue(Config) -> #'basic.ack'{} -> fail; #'basic.nack'{} -> ok after 90000 -> + flush(1), exit(confirm_timeout) end, - ok = rabbit_ct_broker_helpers:start_node(Config, Server1), - ?awaitMatch(2, count_online_nodes(Server, <<"/">>, QQ), ?DEFAULT_AWAIT), + rabbit_quorum_queue:restart_server({RaName, Server1}), publish_many(Ch, QQ, 1), %% this should now be acked + %% check we get at least on ack ok = receive #'basic.ack'{} -> ok; #'basic.nack'{} -> fail after 90000 -> + flush(1), exit(confirm_timeout) end, - %% check we get at least on ack - ok = rabbit_ct_broker_helpers:start_node(Config, Server2), + flush(1), + rabbit_quorum_queue:restart_server({RaName, Server2}), ok. leadership_takeover(Config) -> %% Kill nodes in succession forcing the takeover of leadership, and all messages that %% are in the queue. - [Server, Server1, Server2] = Servers = rabbit_ct_broker_helpers:get_node_configs(Config, nodename), + [Server, Server1, Server2] = Servers = + rabbit_ct_broker_helpers:get_node_configs(Config, nodename), Ch = rabbit_ct_client_helpers:open_channel(Config, Server), QQ = ?config(queue_name, Config), ?assertEqual({'queue.declare_ok', QQ, 0, 0}, declare(Ch, QQ, [{<<"x-queue-type">>, longstr, <<"quorum">>}])), - ok = rabbit_ct_broker_helpers:stop_node(Config, Server1), + ok = rabbit_control_helper:command(stop_app, Server1), + Running = Servers -- [Server1], + assert_cluster_status({Servers, Servers, Running}, Running), + RaName = ra_name(QQ), publish(Ch, QQ), @@ -1436,18 +1687,17 @@ leadership_takeover(Config) -> wait_for_messages_ready([Server], RaName, 3), wait_for_messages_pending_ack([Server], RaName, 0), - ok = rabbit_ct_broker_helpers:stop_node(Config, Server2), - - ok = rabbit_ct_broker_helpers:start_node(Config, Server1), - ok = rabbit_ct_broker_helpers:stop_node(Config, Server), - ok = rabbit_ct_broker_helpers:start_node(Config, Server2), - ok = rabbit_ct_broker_helpers:stop_node(Config, Server1), - ok = rabbit_ct_broker_helpers:start_node(Config, Server), + ok = rabbit_control_helper:command(stop_app, Server2), + ok = rabbit_control_helper:command(start_app, Server1), + ok = rabbit_control_helper:command(stop_app, Server), + ok = rabbit_control_helper:command(start_app, Server2), + ok = rabbit_control_helper:command(stop_app, Server1), + ok = rabbit_control_helper:command(start_app, Server), wait_for_messages_ready([Server2, Server], RaName, 3), wait_for_messages_pending_ack([Server2, Server], RaName, 0), - ok = rabbit_ct_broker_helpers:start_node(Config, Server1), + ok = rabbit_control_helper:command(start_app, Server1), wait_for_messages_ready(Servers, RaName, 3), wait_for_messages_pending_ack(Servers, RaName, 0). @@ -1478,18 +1728,19 @@ metrics_cleanup_on_leadership_takeover0(Config) -> wait_for_messages_pending_ack([Server], RaName, 0), {ok, _, {_, Leader}} = ra:members({RaName, Server}), QRes = rabbit_misc:r(<<"/">>, queue, QQ), - wait_until( + rabbit_ct_helpers:await_condition( fun() -> case rpc:call(Leader, ets, lookup, [queue_coarse_metrics, QRes]) of [{QRes, 3, 0, 3, _}] -> true; _ -> false end - end), + end, 30000), force_leader_change(Servers, QQ), - wait_until(fun () -> - [] =:= rpc:call(Leader, ets, lookup, [queue_coarse_metrics, QRes]) andalso - [] =:= rpc:call(Leader, ets, lookup, [queue_metrics, QRes]) - end), + rabbit_ct_helpers:await_condition( + fun () -> + [] =:= rpc:call(Leader, ets, lookup, [queue_coarse_metrics, QRes]) andalso + [] =:= rpc:call(Leader, ets, lookup, [queue_metrics, QRes]) + end, 30000), ok. metrics_cleanup_on_leader_crash(Config) -> @@ -1512,13 +1763,13 @@ metrics_cleanup_on_leader_crash(Config) -> wait_for_messages_pending_ack([Server], RaName, 0), {ok, _, {Name, Leader}} = ra:members({RaName, Server}), QRes = rabbit_misc:r(<<"/">>, queue, QQ), - wait_until( + rabbit_ct_helpers:await_condition( fun() -> case rpc:call(Leader, ets, lookup, [queue_coarse_metrics, QRes]) of [{QRes, 3, 0, 3, _}] -> true; _ -> false end - end), + end, 30000), Pid = rpc:call(Leader, erlang, whereis, [Name]), rpc:call(Leader, erlang, exit, [Pid, kill]), [Other | _] = lists:delete(Leader, Servers), @@ -1528,10 +1779,10 @@ metrics_cleanup_on_leader_crash(Config) -> %% this isn't a reliable test as the leader can be restarted so quickly %% after a crash it is elected leader of the next term as well. - wait_until( + rabbit_ct_helpers:await_condition( fun() -> [] == rpc:call(Leader, ets, lookup, [queue_coarse_metrics, QRes]) - end), + end, 30000), ok. @@ -1626,34 +1877,54 @@ channel_handles_ra_event(Config) -> publish(Ch1, Q2), wait_for_messages(Config, [[Q1, <<"1">>, <<"1">>, <<"0">>]]), wait_for_messages(Config, [[Q2, <<"1">>, <<"1">>, <<"0">>]]), - ?assertEqual(1, consume(Ch1, Q1, false)), - ?assertEqual(2, consume(Ch1, Q2, false)). + ?assertEqual(1, basic_get_tag(Ch1, Q1, false)), + ?assertEqual(2, basic_get_tag(Ch1, Q2, false)). declare_during_node_down(Config) -> [Server, DownServer, _] = Servers = rabbit_ct_broker_helpers:get_node_configs( - Config, nodename), + Config, nodename), stop_node(Config, DownServer), - % rabbit_ct_broker_helpers:stop_node(Config, DownServer), + Running = Servers -- [DownServer], + assert_cluster_status({Servers, Servers, Running}, Running), + Ch = rabbit_ct_client_helpers:open_channel(Config, Server), QQ = ?config(queue_name, Config), ?assertEqual({'queue.declare_ok', QQ, 0, 0}, declare(Ch, QQ, [{<<"x-queue-type">>, longstr, <<"quorum">>}])), RaName = ra_name(QQ), - {ok, Members0, _} = ra:members({RaName, Server}), %% Since there are not sufficient running nodes, we expect that %% also stopped nodes are selected as replicas. - Members = lists:map(fun({_, N}) -> N end, Members0), - ?assert(same_elements(Members, Servers)), - timer:sleep(2000), + UniqueMembers = lists:usort(Servers), + ?awaitMatch(UniqueMembers, + begin + {ok, Members0, _} = ra:members({RaName, Server}), + Members = lists:map(fun({_, N}) -> N end, Members0), + lists:usort(Members) + end, 30_000), rabbit_ct_broker_helpers:start_node(Config, DownServer), + assert_clustered(Servers), + publish(Ch, QQ), wait_for_messages_ready(Servers, RaName, 1), - ok. + + case rabbit_ct_helpers:is_mixed_versions() of + true -> + %% stop here if mixexd + ok; + false -> + %% further assertions that we can consume from the newly + %% started member + SubCh = rabbit_ct_client_helpers:open_channel(Config, DownServer), + subscribe(SubCh, QQ, false), + receive_and_ack(Ch), + wait_for_messages_ready(Servers, RaName, 0), + ok + end. simple_confirm_availability_on_leader_change(Config) -> - [Node1, Node2, _Node3] = + [Node1, Node2, _Node3] = Servers = rabbit_ct_broker_helpers:get_node_configs(Config, nodename), %% declare a queue on node2 - this _should_ host the leader on node 2 @@ -1670,6 +1941,9 @@ simple_confirm_availability_on_leader_change(Config) -> %% stop the node hosting the leader ok = rabbit_ct_broker_helpers:stop_node(Config, Node2), + Running = Servers -- [Node2], + assert_cluster_status({Servers, Servers, Running}, Running), + %% this should not fail as the channel should detect the new leader and %% resend to that ok = publish_confirm(Ch, QQ), @@ -1677,7 +1951,7 @@ simple_confirm_availability_on_leader_change(Config) -> ok. confirm_availability_on_leader_change(Config) -> - [Node1, Node2, _Node3] = + [Node1, Node2, _Node3] = Servers = rabbit_ct_broker_helpers:get_node_configs(Config, nodename), %% declare a queue on node2 - this _should_ host the leader on node 2 @@ -1705,12 +1979,17 @@ confirm_availability_on_leader_change(Config) -> ConfirmLoop() end), - timer:sleep(500), + %% Instead of waiting a random amount of time, let's wait + %% until (at least) 100 new messages are published. + wait_for_new_messages(Config, Node1, QQ, 100), %% stop the node hosting the leader stop_node(Config, Node2), + Running = Servers -- [Node2], + assert_cluster_status({Servers, Servers, Running}, Running), + %% this should not fail as the channel should detect the new leader and %% resend to that - timer:sleep(500), + wait_for_new_messages(Config, Node1, QQ, 100), Publisher ! {done, self()}, receive publisher_done -> @@ -1726,6 +2005,13 @@ confirm_availability_on_leader_change(Config) -> ok = rabbit_ct_broker_helpers:start_node(Config, Node2), ok. +wait_for_new_messages(Config, Node, Name, Increase) -> + Infos = rabbit_ct_broker_helpers:rabbitmqctl_list( + Config, Node, ["list_queues", "name", "messages"]), + [[Name, Msgs0]] = [Props || Props <- Infos, hd(Props) == Name], + Msgs = binary_to_integer(Msgs0), + queue_utils:wait_for_min_messages(Config, Name, Msgs + Increase). + flush(T) -> receive X -> ct:pal("flushed ~p", [X]), @@ -1745,7 +2031,7 @@ add_member_not_running(Config) -> declare(Ch, QQ, [{<<"x-queue-type">>, longstr, <<"quorum">>}])), ?assertEqual({error, node_not_running}, rpc:call(Server, rabbit_quorum_queue, add_member, - [<<"/">>, QQ, 'rabbit@burrow', 5000])). + [<<"/">>, QQ, 'rabbit@burrow', voter, 5000])). add_member_classic(Config) -> [Server | _] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename), @@ -1754,7 +2040,7 @@ add_member_classic(Config) -> ?assertEqual({'queue.declare_ok', CQ, 0, 0}, declare(Ch, CQ, [])), ?assertEqual({error, classic_queue_not_supported}, rpc:call(Server, rabbit_quorum_queue, add_member, - [<<"/">>, CQ, Server, 5000])). + [<<"/">>, CQ, Server, voter, 5000])). add_member_wrong_type(Config) -> [Server | _] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename), @@ -1764,7 +2050,7 @@ add_member_wrong_type(Config) -> declare(Ch, SQ, [{<<"x-queue-type">>, longstr, <<"stream">>}])), ?assertEqual({error, not_quorum_queue}, rpc:call(Server, rabbit_quorum_queue, add_member, - [<<"/">>, SQ, Server, 5000])). + [<<"/">>, SQ, Server, voter, 5000])). add_member_already_a_member(Config) -> [Server | _] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename), @@ -1775,14 +2061,14 @@ add_member_already_a_member(Config) -> %% idempotent by design ?assertEqual(ok, rpc:call(Server, rabbit_quorum_queue, add_member, - [<<"/">>, QQ, Server, 5000])). + [<<"/">>, QQ, Server, voter, 5000])). add_member_not_found(Config) -> [Server | _] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename), QQ = ?config(queue_name, Config), ?assertEqual({error, not_found}, rpc:call(Server, rabbit_quorum_queue, add_member, - [<<"/">>, QQ, Server, 5000])). + [<<"/">>, QQ, Server, voter, 5000])). add_member(Config) -> [Server0, Server1] = Servers0 = @@ -1793,17 +2079,37 @@ add_member(Config) -> declare(Ch, QQ, [{<<"x-queue-type">>, longstr, <<"quorum">>}])), ?assertEqual({error, node_not_running}, rpc:call(Server0, rabbit_quorum_queue, add_member, - [<<"/">>, QQ, Server1, 5000])), + [<<"/">>, QQ, Server1, voter, 5000])), ok = rabbit_control_helper:command(stop_app, Server1), ok = rabbit_control_helper:command(join_cluster, Server1, [atom_to_list(Server0)], []), rabbit_control_helper:command(start_app, Server1), ?assertEqual(ok, rpc:call(Server1, rabbit_quorum_queue, add_member, - [<<"/">>, QQ, Server1, 5000])), + [<<"/">>, QQ, Server1, voter, 5000])), Info = rpc:call(Server0, rabbit_quorum_queue, infos, [rabbit_misc:r(<<"/">>, queue, QQ)]), Servers = lists:sort(Servers0), ?assertEqual(Servers, lists:sort(proplists:get_value(online, Info, []))). +add_member_2(Config) -> + %% this tests a scenario where an older node version is running a QQ + %% and a member is added on a newer node version (for mixe testing) + + %% we dont validate the ff was enabled as this test should pass either way + _ = rabbit_ct_broker_helpers:enable_feature_flag(Config, quorum_queue_non_voters), + [Server0, Server1 | _] = _Servers0 = + rabbit_ct_broker_helpers:get_node_configs(Config, nodename), + Ch = rabbit_ct_client_helpers:open_channel(Config, Server1), + QQ = ?config(queue_name, Config), + ?assertEqual({'queue.declare_ok', QQ, 0, 0}, + declare(Ch, QQ, [{<<"x-queue-type">>, longstr, <<"quorum">>}, + {<<"x-quorum-initial-group-size">>, long, 1}])), + ?assertEqual(ok, rpc:call(Server0, rabbit_quorum_queue, add_member, + [<<"/">>, QQ, Server0, 5000])), + Info = rpc:call(Server0, rabbit_quorum_queue, infos, + [rabbit_misc:r(<<"/">>, queue, QQ)]), + Servers = lists:sort([Server0, Server1]), + ?assertEqual(Servers, lists:sort(proplists:get_value(online, Info, []))). + delete_member_not_running(Config) -> [Server | _] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename), @@ -1870,11 +2176,40 @@ delete_member_not_a_member(Config) -> rpc:call(Server, rabbit_quorum_queue, delete_member, [<<"/">>, QQ, Server])). +delete_member_member_already_deleted(Config) -> + [Server, Server2] = Servers = rabbit_ct_broker_helpers:get_node_configs(Config, nodename), + NServers = length(Servers), + Ch = rabbit_ct_client_helpers:open_channel(Config, Server), + QQ = ?config(queue_name, Config), + RaName = ra_name(QQ), + ?assertEqual({'queue.declare_ok', QQ, 0, 0}, + declare(Ch, QQ, [{<<"x-queue-type">>, longstr, <<"quorum">>}])), + ?awaitMatch(NServers, count_online_nodes(Server, <<"/">>, QQ), ?DEFAULT_AWAIT), + ServerId = {RaName, Server}, + ServerId2 = {RaName, Server2}, + %% use are APU directory to simulate situation where the ra:remove_server/2 + %% call timed out but later succeeded + ?assertMatch(ok, + rpc:call(Server2, ra, leave_and_terminate, + [quorum_queues, ServerId, ServerId2])), + + %% idempotent by design + ?assertEqual(ok, + rpc:call(Server, rabbit_quorum_queue, delete_member, + [<<"/">>, QQ, Server2])), + {ok, Q} = rpc:call(Server, rabbit_amqqueue, lookup, [QQ, <<"/">>]), + #{nodes := Nodes} = amqqueue:get_type_state(Q), + ?assertEqual(1, length(Nodes)), + ok. + delete_member_during_node_down(Config) -> - [Server, DownServer, Remove] = rabbit_ct_broker_helpers:get_node_configs( - Config, nodename), + [Server, DownServer, Remove] = Servers = rabbit_ct_broker_helpers:get_node_configs( + Config, nodename), stop_node(Config, DownServer), + Running = Servers -- [DownServer], + assert_cluster_status({Servers, Servers, Running}, Running), + Ch = rabbit_ct_client_helpers:open_channel(Config, Server), QQ = ?config(queue_name, Config), ?assertEqual({'queue.declare_ok', QQ, 0, 0}, @@ -1915,72 +2250,20 @@ node_removal_is_not_quorum_critical(Config) -> ?assertEqual([], Qs). -file_handle_reservations(Config) -> - case rabbit_ct_helpers:is_mixed_versions() of - true -> - {skip, "file_handle_reservations tests isn't mixed version compatible"}; - false -> - file_handle_reservations0(Config) - end. - -file_handle_reservations0(Config) -> - Servers = [Server1 | _] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename), - Ch = rabbit_ct_client_helpers:open_channel(Config, Server1), - QQ = ?config(queue_name, Config), - ?assertEqual({'queue.declare_ok', QQ, 0, 0}, - declare(Ch, QQ, [{<<"x-queue-type">>, longstr, <<"quorum">>}])), - RaName = ra_name(QQ), - {ok, _, {_, Leader}} = ra:members({RaName, Server1}), - [Follower1, Follower2] = Servers -- [Leader], - ?assertEqual([{files_reserved, 5}], - rpc:call(Leader, file_handle_cache, info, [[files_reserved]])), - ?assertEqual([{files_reserved, 2}], - rpc:call(Follower1, file_handle_cache, info, [[files_reserved]])), - ?assertEqual([{files_reserved, 2}], - rpc:call(Follower2, file_handle_cache, info, [[files_reserved]])), - force_leader_change(Servers, QQ), - {ok, _, {_, Leader0}} = ra:members({RaName, Server1}), - [Follower01, Follower02] = Servers -- [Leader0], - ?assertEqual([{files_reserved, 5}], - rpc:call(Leader0, file_handle_cache, info, [[files_reserved]])), - ?assertEqual([{files_reserved, 2}], - rpc:call(Follower01, file_handle_cache, info, [[files_reserved]])), - ?assertEqual([{files_reserved, 2}], - rpc:call(Follower02, file_handle_cache, info, [[files_reserved]])). - -file_handle_reservations_above_limit(Config) -> - [S1, S2, S3] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename), - - Ch = rabbit_ct_client_helpers:open_channel(Config, S1), - QQ = ?config(queue_name, Config), - QQ2 = ?config(alt_queue_name, Config), - - Limit = rpc:call(S1, file_handle_cache, get_limit, []), - - ok = rpc:call(S1, file_handle_cache, set_limit, [3]), - ok = rpc:call(S2, file_handle_cache, set_limit, [3]), - ok = rpc:call(S3, file_handle_cache, set_limit, [3]), - - ?assertEqual({'queue.declare_ok', QQ, 0, 0}, - declare(Ch, QQ, [{<<"x-queue-type">>, longstr, <<"quorum">>}])), - ?assertEqual({'queue.declare_ok', QQ2, 0, 0}, - declare(Ch, QQ2, [{<<"x-queue-type">>, longstr, <<"quorum">>}])), - - ok = rpc:call(S1, file_handle_cache, set_limit, [Limit]), - ok = rpc:call(S2, file_handle_cache, set_limit, [Limit]), - ok = rpc:call(S3, file_handle_cache, set_limit, [Limit]). - cleanup_data_dir(Config) -> + %% With Khepri this test needs to run in a 3-node cluster, otherwise the queue can't + %% be deleted in minority + %% %% This test is slow, but also checks that we handle properly errors when %% trying to delete a queue in minority. A case clause there had gone %% previously unnoticed. - [Server1, Server2] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename), + [Server1, Server2, Server3] = Servers = rabbit_ct_broker_helpers:get_node_configs(Config, nodename), Ch = rabbit_ct_client_helpers:open_channel(Config, Server1), QQ = ?config(queue_name, Config), ?assertEqual({'queue.declare_ok', QQ, 0, 0}, declare(Ch, QQ, [{<<"x-queue-type">>, longstr, <<"quorum">>}])), - ?awaitMatch(2, count_online_nodes(Server1, <<"/">>, QQ), ?DEFAULT_AWAIT), + ?awaitMatch(3, count_online_nodes(Server1, <<"/">>, QQ), ?DEFAULT_AWAIT), UId1 = proplists:get_value(ra_name(QQ), rpc:call(Server1, ra_directory, list_registered, [quorum_queues])), UId2 = proplists:get_value(ra_name(QQ), rpc:call(Server2, ra_directory, list_registered, [quorum_queues])), @@ -1990,19 +2273,19 @@ cleanup_data_dir(Config) -> ?assert(filelib:is_dir(DataDir2)), ok = rabbit_ct_broker_helpers:stop_node(Config, Server2), + assert_cluster_status({Servers, Servers, [Server1, Server3]}, [Server1]), ?assertMatch(#'queue.delete_ok'{}, amqp_channel:call(Ch, #'queue.delete'{queue = QQ})), - ok = rabbit_ct_broker_helpers:stop_node(Config, Server2), %% data dir 1 should be force deleted at this point ?assert(not filelib:is_dir(DataDir1)), ?assert(filelib:is_dir(DataDir2)), ok = rabbit_ct_broker_helpers:start_node(Config, Server2), - timer:sleep(2000), + assert_clustered(Servers), ?assertEqual(ok, rpc:call(Server2, rabbit_quorum_queue, cleanup_data_dir, [])), - ?assert(not filelib:is_dir(DataDir2)), + ?awaitMatch(false, filelib:is_dir(DataDir2), 30000), ok. reconnect_consumer_and_publish(Config) -> @@ -2127,7 +2410,6 @@ reconnect_consumer_and_wait_channel_down(Config) -> %% Let's give it a few seconds to ensure it doesn't attempt to %% deliver to the down channel - it shouldn't be monitored %% at this time! - timer:sleep(5000), wait_for_messages_ready(Servers, RaName, 1), wait_for_messages_pending_ack(Servers, RaName, 0). @@ -2449,17 +2731,19 @@ message_bytes_metrics(Config) -> wait_for_messages_ready(Servers, RaName, 1), wait_for_messages_pending_ack(Servers, RaName, 0), - wait_until(fun() -> - {3, 3, 0} == get_message_bytes(Leader, QRes) - end), + rabbit_ct_helpers:await_condition( + fun() -> + {3, 3, 0} == get_message_bytes(Leader, QRes) + end, 30000), subscribe(Ch, QQ, false), wait_for_messages_ready(Servers, RaName, 0), wait_for_messages_pending_ack(Servers, RaName, 1), - wait_until(fun() -> - {3, 0, 3} == get_message_bytes(Leader, QRes) - end), + rabbit_ct_helpers:await_condition( + fun() -> + {3, 0, 3} == get_message_bytes(Leader, QRes) + end, 30000), receive {#'basic.deliver'{delivery_tag = DeliveryTag, @@ -2469,9 +2753,10 @@ message_bytes_metrics(Config) -> requeue = false}), wait_for_messages_ready(Servers, RaName, 0), wait_for_messages_pending_ack(Servers, RaName, 0), - wait_until(fun() -> - {0, 0, 0} == get_message_bytes(Leader, QRes) - end) + rabbit_ct_helpers:await_condition( + fun() -> + {0, 0, 0} == get_message_bytes(Leader, QRes) + end, 30000) end, %% Let's publish and then close the consumer channel. Messages must be @@ -2480,17 +2765,19 @@ message_bytes_metrics(Config) -> wait_for_messages_ready(Servers, RaName, 0), wait_for_messages_pending_ack(Servers, RaName, 1), - wait_until(fun() -> - {3, 0, 3} == get_message_bytes(Leader, QRes) - end), + rabbit_ct_helpers:await_condition( + fun() -> + {3, 0, 3} == get_message_bytes(Leader, QRes) + end, 30000), rabbit_ct_client_helpers:close_channel(Ch), wait_for_messages_ready(Servers, RaName, 1), wait_for_messages_pending_ack(Servers, RaName, 0), - wait_until(fun() -> - {3, 3, 0} == get_message_bytes(Leader, QRes) - end), + rabbit_ct_helpers:await_condition( + fun() -> + {3, 3, 0} == get_message_bytes(Leader, QRes) + end, 30000), ok. memory_alarm_rolls_wal(Config) -> @@ -2499,8 +2786,9 @@ memory_alarm_rolls_wal(Config) -> [Wal0] = filelib:wildcard(WalDataDir ++ "/*.wal"), rabbit_ct_broker_helpers:set_alarm(Config, Server, memory), rabbit_ct_helpers:await_condition( - fun() -> rabbit_ct_broker_helpers:get_alarms(Config, Server) =/= [] end - ), + fun() -> rabbit_ct_broker_helpers:get_alarms(Config, Server) =/= [] end, + 30000 + ), rabbit_ct_helpers:await_condition( fun() -> List = filelib:wildcard(WalDataDir ++ "/*.wal"), @@ -2515,8 +2803,9 @@ memory_alarm_rolls_wal(Config) -> %% min_wal_roll_over_interval rabbit_ct_broker_helpers:set_alarm(Config, Server, memory), rabbit_ct_helpers:await_condition( - fun() -> rabbit_ct_broker_helpers:get_alarms(Config, Server) =/= [] end - ), + fun() -> rabbit_ct_broker_helpers:get_alarms(Config, Server) =/= [] end, + 30000 + ), timer:sleep(1000), Wal2 = lists:last(lists:sort(filelib:wildcard(WalDataDir ++ "/*.wal"))), ?assert(Wal1 == Wal2), @@ -2654,6 +2943,143 @@ peek(Config) -> wait_for_messages(Config, [[QQ, <<"2">>, <<"2">>, <<"0">>]]), ok. +oldest_entry_timestamp(Config) -> + [Server | _] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename), + + Ch = rabbit_ct_client_helpers:open_channel(Config, Server), + QQ = ?config(queue_name, Config), + ?assertEqual({'queue.declare_ok', QQ, 0, 0}, + declare(Ch, QQ, [{<<"x-queue-type">>, longstr, <<"quorum">>}, + {<<"x-max-in-memory-length">>, long, 2}])), + + Msg1 = <<"msg1">>, + VHost = <<"%2F">>, + ServerId = binary_to_atom(<>, utf8), + + ?assertMatch({ok, Ts} when is_integer(Ts), + rabbit_ct_broker_helpers:rpc(Config, 0, ra, + aux_command, + [ServerId, oldest_entry_timestamp])), + publish(Ch, QQ, Msg1), + wait_for_messages(Config, [[QQ, <<"1">>, <<"1">>, <<"0">>]]), + + ?assertMatch({ok, Ts} when is_integer(Ts), + rabbit_ct_broker_helpers:rpc(Config, 0, ra, + aux_command, + [ServerId, oldest_entry_timestamp])), + ?assertMatch({ok, Ts} when is_integer(Ts), + rabbit_ct_broker_helpers:rpc(Config, 0, ra, + aux_command, + [ServerId, oldest_entry_timestamp])), + + {'queue.purge_ok', 1} = amqp_channel:call(Ch, #'queue.purge'{queue = QQ}), + Now = erlang:system_time(millisecond), + timer:sleep(100), + ?assertMatch({ok, Ts2} when Ts2 > Now, + rabbit_ct_broker_helpers:rpc(Config, 0, ra, + aux_command, + [ServerId, oldest_entry_timestamp])), + + ok. + +-define(STATUS_MATCH(N, T), + [{<<"Node Name">>, N}, + {<<"Raft State">>, _}, + {<<"Membership">>, _}, + {<<"Last Log Index">>, _}, + {<<"Last Written">>, _}, + {<<"Last Applied">>, _}, + {<<"Commit Index">>, _}, + {<<"Snapshot Index">>, _}, + {<<"Term">>, T}, + {<<"Machine Version">>, _} + ]). + +status(Config) -> + [Server | _] = Nodes = rabbit_ct_broker_helpers:get_node_configs(Config, nodename), + + Ch = rabbit_ct_client_helpers:open_channel(Config, Server), + QQ = ?config(queue_name, Config), + ?assertEqual({'queue.declare_ok', QQ, 0, 0}, + declare(Ch, QQ, [{<<"x-queue-type">>, longstr, <<"quorum">>}, + {<<"x-max-in-memory-length">>, long, 2}])), + + Msg1 = <<"msg1">>, + Msg2 = <<"msg11">>, + + publish(Ch, QQ, Msg1), + publish(Ch, QQ, Msg2), + wait_for_messages(Config, [[QQ, <<"2">>, <<"2">>, <<"0">>]]), + + [N1, N2, N3] = lists:sort(Nodes), + + %% check that nodes are returned and that at least the term isn't + %% defaulted (i.e. there was an error) + ?assertMatch([?STATUS_MATCH(N1, T1), + ?STATUS_MATCH(N2, T2), + ?STATUS_MATCH(N3, T3) + ] when T1 /= <<>> andalso + T2 /= <<>> andalso + T3 /= <<>>, + rabbit_ct_broker_helpers:rpc(Config, 0, rabbit_quorum_queue, + status, [<<"/">>, QQ])), + wait_for_messages(Config, [[QQ, <<"2">>, <<"2">>, <<"0">>]]), + ok. + +format(Config) -> + %% tests rabbit_quorum_queue:format/2 + Nodes = rabbit_ct_broker_helpers:get_node_configs(Config, nodename), + + Server = hd(Nodes), + + Ch = rabbit_ct_client_helpers:open_channel(Config, Server), + Q = ?config(queue_name, Config), + ?assertEqual({'queue.declare_ok', Q, 0, 0}, + declare(Ch, Q, [{<<"x-queue-type">>, longstr, <<"quorum">>}])), + + Vhost = ?config(rmq_vhost, Config), + QName = #resource{virtual_host = Vhost, + kind = queue, + name = Q}, + {ok, QRecord} = rabbit_ct_broker_helpers:rpc(Config, Server, + rabbit_amqqueue, + lookup, [QName]), + %% restart the quorum + Fmt = rabbit_ct_broker_helpers:rpc(Config, Server, rabbit_quorum_queue, + ?FUNCTION_NAME, [QRecord, #{}]), + + %% test all up case + ?assertEqual(quorum, proplists:get_value(type, Fmt)), + ?assertEqual(running, proplists:get_value(state, Fmt)), + ?assertEqual(Server, proplists:get_value(leader, Fmt)), + ?assertEqual(Server, proplists:get_value(node, Fmt)), + ?assertEqual(Nodes, proplists:get_value(online, Fmt)), + ?assertEqual(Nodes, proplists:get_value(members, Fmt)), + + case length(Nodes) of + 3 -> + [_, Server2, Server3] = Nodes, + ok = rabbit_control_helper:command(stop_app, Server2), + ok = rabbit_control_helper:command(stop_app, Server3), + + Fmt2 = rabbit_ct_broker_helpers:rpc(Config, Server, rabbit_quorum_queue, + ?FUNCTION_NAME, [QRecord, #{}]), + ok = rabbit_control_helper:command(start_app, Server2), + ok = rabbit_control_helper:command(start_app, Server3), + ?assertEqual(quorum, proplists:get_value(type, Fmt2)), + ?assertEqual(minority, proplists:get_value(state, Fmt2)), + ?assertEqual(Server, proplists:get_value(leader, Fmt2)), + ?assertEqual(Server, proplists:get_value(node, Fmt2)), + ?assertEqual([Server], proplists:get_value(online, Fmt2)), + ?assertEqual(Nodes, proplists:get_value(members, Fmt2)), + ok; + 1 -> + ok + end, + ?assertMatch(#'queue.delete_ok'{}, + amqp_channel:call(Ch, #'queue.delete'{queue = Q})), + ok. + peek_with_wrong_queue_type(Config) -> [Server | _] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename), @@ -2721,6 +3147,8 @@ receive_and_ack(Ch) -> redelivered = false}, _} -> amqp_channel:cast(Ch, #'basic.ack'{delivery_tag = DeliveryTag, multiple = false}) + after 5000 -> + ct:fail("receive_and_ack timed out", []) end. message_ttl_policy(Config) -> @@ -2757,9 +3185,9 @@ message_ttl_policy(Config) -> ok = rabbit_ct_broker_helpers:set_policy(Config, 0, <<"msg-ttl">>, QQ, <<"queues">>, - [{<<"message-ttl">>, 10000}]), + [{<<"message-ttl">>, 1000}]), {ok, {_, Overview2}, _} = rpc:call(Server, ra, local_query, [RaName, QueryFun]), - ?assertMatch(#{config := #{msg_ttl := 10000}}, Overview2), + ?assertMatch(#{config := #{msg_ttl := 1000}}, Overview2), publish(Ch, QQ, Msg1), wait_for_messages(Config, [[QQ, <<"1">>, <<"1">>, <<"0">>]]), wait_for_messages(Config, [[QQ, <<"0">>, <<"0">>, <<"0">>]]), @@ -2775,14 +3203,15 @@ per_message_ttl(Config) -> Msg1 = <<"msg1">>, + #'confirm.select_ok'{} = amqp_channel:call(Ch, #'confirm.select'{}), + amqp_channel:register_confirm_handler(Ch, self()), ok = amqp_channel:cast(Ch, #'basic.publish'{routing_key = QQ}, #amqp_msg{props = #'P_basic'{delivery_mode = 2, - expiration = <<"2000">>}, + expiration = <<"1000">>}, payload = Msg1}), - - wait_for_messages(Config, [[QQ, <<"1">>, <<"1">>, <<"0">>]]), - timer:sleep(2000), + amqp_channel:wait_for_confirms(Ch, 5), + %% we know the message got to the queue in 2s it should be gone wait_for_messages(Config, [[QQ, <<"0">>, <<"0">>, <<"0">>]]), ok. @@ -2806,13 +3235,13 @@ per_message_ttl_mixed_expiry(Config) -> ok = amqp_channel:cast(Ch, #'basic.publish'{routing_key = QQ}, #amqp_msg{props = #'P_basic'{delivery_mode = 2, - expiration = <<"500">>}, + expiration = <<"100">>}, payload = Msg2}), wait_for_messages(Config, [[QQ, <<"2">>, <<"2">>, <<"0">>]]), - timer:sleep(1000), - wait_for_messages(Config, [[QQ, <<"2">>, <<"2">>, <<"0">>]]), + %% twice the expiry interval + timer:sleep(100 * 2), subscribe(Ch, QQ, false), receive {#'basic.deliver'{delivery_tag = DeliveryTag}, @@ -2824,6 +3253,7 @@ per_message_ttl_mixed_expiry(Config) -> ct:fail("basic deliver timeout") end, + %% the second message should NOT be received as it has expired receive {#'basic.deliver'{}, #amqp_msg{payload = Msg2}} -> @@ -2903,26 +3333,33 @@ delete_if_unused(Config) -> queue_ttl(Config) -> Server = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename), - Ch = rabbit_ct_client_helpers:open_channel(Config, Server), QQ = ?config(queue_name, Config), + + %% Set policy to 10 seconds. + PolicyName = <<"my-queue-ttl-policy">>, + ok = rabbit_ct_broker_helpers:set_policy( + Config, 0, PolicyName, QQ, <<"queues">>, + [{<<"expires">>, 10000}]), + %% Set queue arg to 1 second. + QArgs = [{<<"x-queue-type">>, longstr, <<"quorum">>}, + {<<"x-expires">>, long, 1000}], ?assertEqual({'queue.declare_ok', QQ, 0, 0}, - declare(Ch, QQ, [{<<"x-queue-type">>, longstr, <<"quorum">>}, - {<<"x-expires">>, long, 1000}])), - timer:sleep(5500), - %% check queue no longer exists - ?assertExit( - {{shutdown, - {server_initiated_close,404, - <<"NOT_FOUND - no queue 'queue_ttl' in vhost '/'">>}}, - _}, - amqp_channel:call(Ch, #'queue.declare'{queue = QQ, - passive = true, - durable = true, - auto_delete = false, - arguments = [{<<"x-queue-type">>, longstr, <<"quorum">>}, - {<<"x-expires">>, long, 1000}]})), - ok. + declare(Ch, QQ, QArgs)), + %% The minimum should take effect. + ?awaitMatch( + {'EXIT', {{shutdown, + {server_initiated_close,404, + <<"NOT_FOUND - no queue 'queue_ttl' in vhost '/'">>}}, + _}}, + catch amqp_channel:call(Ch, #'queue.declare'{ + queue = QQ, + passive = true, + durable = true, + auto_delete = false, + arguments = QArgs}), + 5_000), + ok = rabbit_ct_broker_helpers:clear_policy(Config, 0, PolicyName). consumer_priorities(Config) -> Server = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename), @@ -3037,15 +3474,17 @@ cancel_consumer_gh_3729(Config) -> ct:fail("basic.cancel_ok timeout") end, - D = #'queue.declare'{queue = QQ, passive = true, arguments = [{<<"x-queue-type">>, longstr, <<"quorum">>}]}, + D = #'queue.declare'{queue = QQ, passive = true, + arguments = [{<<"x-queue-type">>, longstr, <<"quorum">>}]}, F = fun() -> #'queue.declare_ok'{queue = QQ, message_count = MC, consumer_count = CC} = amqp_channel:call(Ch, D), + ct:pal("Mc ~b CC ~b", [MC, CC]), MC =:= 1 andalso CC =:= 0 end, - wait_until(F), + rabbit_ct_helpers:await_condition(F, 30000), ok = rabbit_ct_client_helpers:close_channel(Ch). @@ -3115,6 +3554,38 @@ validate_messages_on_queue(Config) -> ok. +amqpl_headers(Config) -> + [Server | _] = Servers = rabbit_ct_broker_helpers:get_node_configs(Config, nodename), + Ch = rabbit_ct_client_helpers:open_channel(Config, Server), + QQ = ?config(queue_name, Config), + ?assertEqual({'queue.declare_ok', QQ, 0, 0}, + declare(Ch, QQ, [{<<"x-queue-type">>, longstr, <<"quorum">>}])), + + Headers1Sent = undefined, + Headers2Sent = [], + [ok = amqp_channel:cast( + Ch, + #'basic.publish'{routing_key = QQ}, + #amqp_msg{props = #'P_basic'{headers = HeadersSent, + delivery_mode = 2}}) || + HeadersSent <- [Headers1Sent, Headers2Sent]], + RaName = ra_name(QQ), + wait_for_messages_ready(Servers, RaName, 2), + + {#'basic.get_ok'{}, + #amqp_msg{props = #'P_basic'{headers = Headers1Received}} + } = amqp_channel:call(Ch, #'basic.get'{queue = QQ}), + + {#'basic.get_ok'{delivery_tag = DeliveryTag}, + #amqp_msg{props = #'P_basic'{headers = Headers2Received}} + } = amqp_channel:call(Ch, #'basic.get'{queue = QQ}), + + ?assertEqual(Headers1Sent, Headers1Received), + ?assertEqual(Headers2Sent, Headers2Received), + + ok = amqp_channel:cast(Ch, #'basic.ack'{delivery_tag = DeliveryTag, + multiple = true}). + leader_locator_client_local(Config) -> [Server1 | _] = Servers = rabbit_ct_broker_helpers:get_node_configs(Config, nodename), Q = ?config(queue_name, Config), @@ -3177,7 +3648,7 @@ leader_locator_balanced_maintenance(Config) -> || Q <- Qs]. leader_locator_balanced_random_maintenance(Config) -> - [S1, S2, S3] = Servers = rabbit_ct_broker_helpers:get_node_configs(Config, nodename), + [S1, S2, _S3] = Servers = rabbit_ct_broker_helpers:get_node_configs(Config, nodename), Ch = rabbit_ct_client_helpers:open_channel(Config, S1), Q = ?config(queue_name, Config), @@ -3199,15 +3670,15 @@ leader_locator_balanced_random_maintenance(Config) -> amqp_channel:call(Ch, #'queue.delete'{queue = Q})), Leader end || _ <- lists:seq(1, 10)], - ?assert(lists:member(S1, Leaders)), - ?assertNot(lists:member(S2, Leaders)), - ?assert(lists:member(S3, Leaders)), ok = rabbit_ct_broker_helpers:rpc(Config, 0, application, unset_env, [rabbit, queue_leader_locator]), ok = rabbit_ct_broker_helpers:rpc(Config, 0, application, unset_env, [rabbit, queue_count_start_random_selection]), - true = rabbit_ct_broker_helpers:unmark_as_being_drained(Config, S2). + true = rabbit_ct_broker_helpers:unmark_as_being_drained(Config, S2), + %% assert after resetting maintenance mode else other tests may also fail + ?assertNot(lists:member(S2, Leaders)), + ok. leader_locator_policy(Config) -> Server = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename), @@ -3215,8 +3686,9 @@ leader_locator_policy(Config) -> Qs = [?config(queue_name, Config), ?config(alt_queue_name, Config), ?config(alt_2_queue_name, Config)], + PolicyName = <<"my-leader-locator">>, ok = rabbit_ct_broker_helpers:set_policy( - Config, 0, <<"my-leader-locator">>, <<"leader_locator_policy_.*">>, <<"queues">>, + Config, 0, PolicyName, <<"leader_locator_policy_.*">>, <<"queues">>, [{<<"queue-leader-locator">>, <<"balanced">>}]), Leaders = [begin @@ -3226,12 +3698,14 @@ leader_locator_policy(Config) -> {ok, _, {_, Leader}} = ra:members({ra_name(Q), Server}), Leader end || Q <- Qs], - ?assertEqual(3, sets:size(sets:from_list(Leaders))), [?assertMatch(#'queue.delete_ok'{}, amqp_channel:call(Ch, #'queue.delete'{queue = Q})) || Q <- Qs], - ok = rabbit_ct_broker_helpers:clear_policy(Config, 0, <<"my-leader-locator">>). + ok = rabbit_ct_broker_helpers:clear_policy(Config, 0, PolicyName), + + ?assertEqual(3, length(lists:usort(Leaders))), + ok. select_nodes_with_least_replicas(Config) -> Server = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename), @@ -3262,7 +3736,6 @@ select_nodes_with_least_replicas_node_down(Config) -> Qs = [?config(queue_name, Config), ?config(alt_queue_name, Config)], - timer:sleep(1000), Members = [begin ?assertMatch({'queue.declare_ok', Q, 0, 0}, declare(Ch, Q, @@ -3282,6 +3755,88 @@ select_nodes_with_least_replicas_node_down(Config) -> amqp_channel:call(Ch, #'queue.delete'{queue = Q})) || Q <- Qs]. +requeue_multiple_true(Config) -> + Ch = rabbit_ct_client_helpers:open_channel(Config), + QQ = ?config(queue_name, Config), + ?assertEqual({'queue.declare_ok', QQ, 0, 0}, + declare(Ch, QQ, [{<<"x-queue-type">>, longstr, <<"quorum">>}, + {<<"x-delivery-limit">>, long, 3}])), + Num = 100, + Payloads = [integer_to_binary(N) || N <- lists:seq(1, Num)], + [publish(Ch, QQ, P) || P <- Payloads], + + amqp_channel:subscribe(Ch, #'basic.consume'{queue = QQ}, self()), + receive #'basic.consume_ok'{} -> ok + end, + + DTags = [receive {#'basic.deliver'{redelivered = false, + delivery_tag = D}, + #amqp_msg{payload = P0}} -> + ?assertEqual(P, P0), + D + after 5000 -> ct:fail({basic_deliver_timeout, P, ?LINE}) + end || P <- Payloads], + + %% Requeue all messages. + ok = amqp_channel:cast(Ch, #'basic.nack'{delivery_tag = lists:last(DTags), + multiple = true, + requeue = true}), + + %% We expect to get all messages re-delivered in the order in which we requeued + %% (which is the same order as messages were sent to us previously). + [receive {#'basic.deliver'{redelivered = true}, + #amqp_msg{payload = P1}} -> + ?assertEqual(P, P1) + after 5000 -> ct:fail({basic_deliver_timeout, P, ?LINE}) + end || P <- Payloads], + + ?assertEqual(#'queue.delete_ok'{message_count = 0}, + amqp_channel:call(Ch, #'queue.delete'{queue = QQ})). + +requeue_multiple_false(Config) -> + Ch = rabbit_ct_client_helpers:open_channel(Config), + QQ = ?config(queue_name, Config), + ?assertEqual({'queue.declare_ok', QQ, 0, 0}, + declare(Ch, QQ, [{<<"x-queue-type">>, longstr, <<"quorum">>}, + {<<"x-delivery-limit">>, long, 3}])), + Num = 100, + Payloads = [integer_to_binary(N) || N <- lists:seq(1, Num)], + [publish(Ch, QQ, P) || P <- Payloads], + + amqp_channel:subscribe(Ch, #'basic.consume'{queue = QQ}, self()), + receive #'basic.consume_ok'{} -> ok + end, + + DTags = [receive {#'basic.deliver'{redelivered = false, + delivery_tag = D}, + #amqp_msg{payload = P0}} -> + ?assertEqual(P, P0), + D + after 5000 -> ct:fail({basic_deliver_timeout, P, ?LINE}) + end || P <- Payloads], + + %% The delivery tags we received via AMQP 0.9.1 are ordered from 1-100. + %% Sanity check: + ?assertEqual(lists:seq(1, Num), DTags), + + %% Requeue each message individually in random order. + Tuples = [{rand:uniform(), D} || D <- DTags], + DTagsShuffled = [D || {_, D} <- lists:sort(Tuples)], + [ok = amqp_channel:cast(Ch, #'basic.nack'{delivery_tag = D, + multiple = false, + requeue = true}) + || D <- DTagsShuffled], + + %% We expect to get all messages re-delivered in the order in which we requeued. + [receive {#'basic.deliver'{redelivered = true}, + #amqp_msg{payload = P1}} -> + ?assertEqual(integer_to_binary(D), P1) + after 5000 -> ct:fail({basic_deliver_timeout, ?LINE}) + end || D <- DTagsShuffled], + + ?assertEqual(#'queue.delete_ok'{message_count = 0}, + amqp_channel:call(Ch, #'queue.delete'{queue = QQ})). + %%---------------------------------------------------------------------------- same_elements(L1, L2) @@ -3317,7 +3872,7 @@ get_queue_type(Server, VHost, Q0) -> count_online_nodes(Server, VHost, Q0) -> QNameRes = rabbit_misc:r(VHost, queue, Q0), - Info = rpc:call(Server, rabbit_quorum_queue, infos, [QNameRes]), + Info = rpc:call(Server, rabbit_quorum_queue, infos, [QNameRes, [online]]), length(proplists:get_value(online, Info, [])). publish_many(Ch, Queue, Count) -> @@ -3332,7 +3887,7 @@ publish(Ch, Queue, Msg) -> #amqp_msg{props = #'P_basic'{delivery_mode = 2}, payload = Msg}). -consume(Ch, Queue, NoAck) -> +basic_get_tag(Ch, Queue, NoAck) -> {GetOk, _} = Reply = amqp_channel:call(Ch, #'basic.get'{queue = Queue, no_ack = NoAck}), ?assertMatch({#'basic.get_ok'{}, #amqp_msg{payload = <<"msg">>}}, Reply), @@ -3344,13 +3899,20 @@ consume_empty(Ch, Queue, NoAck) -> no_ack = NoAck})). subscribe(Ch, Queue, NoAck) -> + subscribe(Ch, Queue, NoAck, <<"ctag">>, []). + +subscribe(Ch, Queue, NoAck, Tag, Args) -> amqp_channel:subscribe(Ch, #'basic.consume'{queue = Queue, no_ack = NoAck, - consumer_tag = <<"ctag">>}, + arguments = Args, + consumer_tag = Tag}, self()), receive - #'basic.consume_ok'{consumer_tag = <<"ctag">>} -> + #'basic.consume_ok'{consumer_tag = Tag} -> ok + after 30000 -> + flush(100), + exit(subscribe_timeout) end. qos(Ch, Prefetch, Global) -> @@ -3379,7 +3941,7 @@ nack(Ch, Multiple, Requeue) -> end. wait_for_cleanup(Server, Channel, Number) -> - wait_for_cleanup(Server, Channel, Number, 60). + wait_for_cleanup(Server, Channel, Number, 120). wait_for_cleanup(Server, Channel, Number, 0) -> ?assertEqual(length(rpc:call(Server, rabbit_channel, list_queue_states, [Channel])), @@ -3389,25 +3951,10 @@ wait_for_cleanup(Server, Channel, Number, N) -> Length when Number == Length -> ok; _ -> - timer:sleep(500), + timer:sleep(250), wait_for_cleanup(Server, Channel, Number, N - 1) end. -wait_until(Condition) -> - wait_until(Condition, 60). - -wait_until(Condition, 0) -> - ?assertEqual(true, Condition()); -wait_until(Condition, N) -> - case Condition() of - true -> - ok; - _ -> - timer:sleep(500), - wait_until(Condition, N - 1) - end. - - force_leader_change([Server | _] = Servers, Q) -> RaName = ra_name(Q), {ok, _, {_, Leader}} = ra:members({RaName, Server}), @@ -3478,3 +4025,19 @@ basic_get(Ch, Q, NoAck, Attempt) -> timer:sleep(100), basic_get(Ch, Q, NoAck, Attempt - 1) end. + +check_quorum_queues_v4_compat(Config) -> + case rabbit_ct_broker_helpers:is_feature_flag_enabled(Config, 'rabbitmq_4.0.0') of + true -> + ok; + false -> + throw({skip, "test needs feature flag rabbitmq_4.0.0"}) + end. + +lists_interleave([], _List) -> + []; +lists_interleave([Item | Items], List) + when is_list(List) -> + {Left, Right} = lists:split(2, List), + Left ++ [Item | lists_interleave(Items, Right)]. + diff --git a/deps/rabbit/test/quorum_queue_member_reconciliation_SUITE.erl b/deps/rabbit/test/quorum_queue_member_reconciliation_SUITE.erl index a2d08de42c40..00ccb34402fe 100644 --- a/deps/rabbit/test/quorum_queue_member_reconciliation_SUITE.erl +++ b/deps/rabbit/test/quorum_queue_member_reconciliation_SUITE.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2018-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. -module(quorum_queue_member_reconciliation_SUITE). @@ -10,8 +10,6 @@ -include_lib("common_test/include/ct.hrl"). -include_lib("eunit/include/eunit.hrl"). -include_lib("amqp_client/include/amqp_client.hrl"). --include_lib("rabbitmq_ct_helpers/include/rabbit_assert.hrl"). - -compile([nowarn_export_all, export_all]). @@ -114,7 +112,7 @@ auto_grow(Config) -> declare(Ch, QQ, [{<<"x-queue-type">>, longstr, <<"quorum">>}])), %% There is only one node in the cluster at the moment - {ok, Members, _} = ra:members({quorum_queue_utils:ra_name(QQ), Server0}), + {ok, Members, _} = ra:members({queue_utils:ra_name(QQ), Server0}), ?assertEqual(1, length(Members)), add_server_to_cluster(Server1, Server0), @@ -122,14 +120,14 @@ auto_grow(Config) -> %% new members should be available. We sleep a while so the periodic check %% runs timer:sleep(4000), - {ok, Members, _} = ra:members({quorum_queue_utils:ra_name(QQ), Server0}), + {ok, Members, _} = ra:members({queue_utils:ra_name(QQ), Server0}), ?assertEqual(1, length(Members)), add_server_to_cluster(Server2, Server0), %% With 3 nodes in the cluster, target size is met so eventually it should %% be 3 members wait_until(fun() -> - {ok, M, _} = ra:members({quorum_queue_utils:ra_name(QQ), Server0}), + {ok, M, _} = ra:members({queue_utils:ra_name(QQ), Server0}), 3 =:= length(M) end). @@ -143,7 +141,7 @@ auto_grow_drained_node(Config) -> declare(Ch, QQ, [{<<"x-queue-type">>, longstr, <<"quorum">>}])), %% There is only one node in the cluster at the moment - {ok, Members, _} = ra:members({quorum_queue_utils:ra_name(QQ), Server0}), + {ok, Members, _} = ra:members({queue_utils:ra_name(QQ), Server0}), ?assertEqual(1, length(Members)), add_server_to_cluster(Server1, Server0), @@ -156,7 +154,7 @@ auto_grow_drained_node(Config) -> add_server_to_cluster(Server2, Server0), timer:sleep(5000), %% We have 3 nodes, but one is drained, so it will not be concidered. - {ok, Members1, _} = ra:members({quorum_queue_utils:ra_name(QQ), Server0}), + {ok, Members1, _} = ra:members({queue_utils:ra_name(QQ), Server0}), ?assertEqual(1, length(Members1)), rabbit_ct_broker_helpers:unmark_as_being_drained(Config, Server1), @@ -165,7 +163,7 @@ auto_grow_drained_node(Config) -> 10000), %% We have 3 nodes, none is being drained, so we should grow membership to 3 wait_until(fun() -> - {ok, M, _} = ra:members({quorum_queue_utils:ra_name(QQ), Server0}), + {ok, M, _} = ra:members({queue_utils:ra_name(QQ), Server0}), 3 =:= length(M) end). @@ -182,7 +180,7 @@ auto_shrink(Config) -> declare(Ch, QQ, [{<<"x-queue-type">>, longstr, <<"quorum">>}])), wait_until(fun() -> - {ok, M, _} = ra:members({quorum_queue_utils:ra_name(QQ), + {ok, M, _} = ra:members({queue_utils:ra_name(QQ), Server0}), 3 =:= length(M) end), @@ -191,7 +189,7 @@ auto_shrink(Config) -> [Server2, false]), %% with one node 'forgotten', eventually the membership will shrink to 2 wait_until(fun() -> - {ok, M, _} = ra:members({quorum_queue_utils:ra_name(QQ), + {ok, M, _} = ra:members({queue_utils:ra_name(QQ), Server0}), 2 =:= length(M) end). diff --git a/deps/rabbit/test/rabbit_access_control_SUITE.erl b/deps/rabbit/test/rabbit_access_control_SUITE.erl new file mode 100644 index 000000000000..a2e4660ffa93 --- /dev/null +++ b/deps/rabbit/test/rabbit_access_control_SUITE.erl @@ -0,0 +1,104 @@ +%% This Source Code Form is subject to the terms of the Mozilla Public +%% License, v. 2.0. If a copy of the MPL was not distributed with this +%% file, You can obtain one at https://mozilla.org/MPL/2.0/. +%% +%% Copyright (c) 2024 Broadcom. All Rights Reserved. +%% The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. +%% + + +-module(rabbit_access_control_SUITE). + +-compile(export_all). + +-include_lib("eunit/include/eunit.hrl"). +-include_lib("rabbit_common/include/rabbit.hrl"). + +%%%=================================================================== +%%% Common Test callbacks +%%%=================================================================== + + +all() -> + [{group, tests}]. + +%% replicate eunit like test resolution +all_tests() -> + [F + || {F, _} <- ?MODULE:module_info(functions), + re:run(atom_to_list(F), "_test$") /= nomatch]. + +groups() -> + [{tests, [], all_tests()}]. + +init_per_suite(Config) -> + Config. + +end_per_suite(_Config) -> + ok. + +init_per_group(_Group, Config) -> + Config. + +end_per_group(_Group, _Config) -> + ok. + +init_per_testcase(_TestCase, Config) -> + Config. + +end_per_testcase(_TestCase, _Config) -> + meck:unload(), + ok. + +expiry_timestamp_test(_) -> + %% test rabbit_access_control:expiry_timestamp/1 returns the earliest expiry time + Now = os:system_time(seconds), + BeforeNow = Now - 60, + %% returns now + ok = meck:new(rabbit_expiry_backend, [non_strict]), + meck:expect(rabbit_expiry_backend, expiry_timestamp, fun (_) -> Now end), + %% return a bit before now (so the earliest expiry time) + ok = meck:new(rabbit_earlier_expiry_backend, [non_strict]), + meck:expect(rabbit_earlier_expiry_backend, expiry_timestamp, fun (_) -> BeforeNow end), + %% return 'never' (no expiry) + ok = meck:new(rabbit_no_expiry_backend, [non_strict]), + meck:expect(rabbit_no_expiry_backend, expiry_timestamp, fun (_) -> never end), + + %% never expires + User1 = #user{authz_backends = [{rabbit_no_expiry_backend, unused}]}, + ?assertEqual(never, rabbit_access_control:expiry_timestamp(User1)), + + %% returns the result from the backend that expires + User2 = #user{authz_backends = [{rabbit_expiry_backend, unused}, + {rabbit_no_expiry_backend, unused}]}, + ?assertEqual(Now, rabbit_access_control:expiry_timestamp(User2)), + + %% returns earliest expiry time + User3 = #user{authz_backends = [{rabbit_expiry_backend, unused}, + {rabbit_earlier_expiry_backend, unused}, + {rabbit_no_expiry_backend, unused}]}, + ?assertEqual(BeforeNow, rabbit_access_control:expiry_timestamp(User3)), + + %% returns earliest expiry time + User4 = #user{authz_backends = [{rabbit_earlier_expiry_backend, unused}, + {rabbit_expiry_backend, unused}, + {rabbit_no_expiry_backend, unused}]}, + ?assertEqual(BeforeNow, rabbit_access_control:expiry_timestamp(User4)), + + %% returns earliest expiry time + User5 = #user{authz_backends = [{rabbit_no_expiry_backend, unused}, + {rabbit_earlier_expiry_backend, unused}, + {rabbit_expiry_backend, unused}]}, + ?assertEqual(BeforeNow, rabbit_access_control:expiry_timestamp(User5)), + + %% returns earliest expiry time + User6 = #user{authz_backends = [{rabbit_no_expiry_backend, unused}, + {rabbit_expiry_backend, unused}, + {rabbit_earlier_expiry_backend, unused}]}, + ?assertEqual(BeforeNow, rabbit_access_control:expiry_timestamp(User6)), + + %% returns the result from the backend that expires + User7 = #user{authz_backends = [{rabbit_no_expiry_backend, unused}, + {rabbit_expiry_backend, unused}]}, + ?assertEqual(Now, rabbit_access_control:expiry_timestamp(User7)), + ok. diff --git a/deps/rabbit/test/rabbit_auth_backend_context_propagation_mock.erl b/deps/rabbit/test/rabbit_auth_backend_context_propagation_mock.erl index 8e842b613a95..01a0510db6e7 100644 --- a/deps/rabbit/test/rabbit_auth_backend_context_propagation_mock.erl +++ b/deps/rabbit/test/rabbit_auth_backend_context_propagation_mock.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2019-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% %% A mock authn/authz that records information during calls. For testing purposes only. @@ -15,7 +15,7 @@ -export([user_login_authentication/2, user_login_authorization/2, check_vhost_access/3, check_resource_access/4, check_topic_access/4, - state_can_expire/0, + expiry_timestamp/1, get/1, init/0]). init() -> @@ -40,7 +40,8 @@ check_topic_access(#auth_user{}, #resource{}, _Permission, TopicContext) -> ets:insert(?MODULE, {topic_access, TopicContext}), true. -state_can_expire() -> false. +expiry_timestamp(_) -> + never. get(K) -> ets:lookup(?MODULE, K). diff --git a/deps/rabbit/test/rabbit_confirms_SUITE.erl b/deps/rabbit/test/rabbit_confirms_SUITE.erl index 7252a9102a17..0bd3ef00cee8 100644 --- a/deps/rabbit/test/rabbit_confirms_SUITE.erl +++ b/deps/rabbit/test/rabbit_confirms_SUITE.erl @@ -64,8 +64,8 @@ confirm(_Config) -> ?assertEqual(undefined, rabbit_confirms:smallest(U7)), U8 = rabbit_confirms:insert(2, [QName], XName, U1), - {[{1, XName}, {2, XName}], _U9} = rabbit_confirms:confirm([1, 2], QName, U8), - ok. + {[{Seq1, XName}, {Seq2, XName}], _U9} = rabbit_confirms:confirm([1, 2], QName, U8), + ?assertEqual([1, 2], lists:sort([Seq1, Seq2])). reject(_Config) -> @@ -94,8 +94,7 @@ reject(_Config) -> {ok, {2, XName}, U5} = rabbit_confirms:reject(2, U3), {error, not_found} = rabbit_confirms:reject(2, U5), ?assertEqual(1, rabbit_confirms:size(U5)), - ?assertEqual(1, rabbit_confirms:smallest(U5)), - ok. + ?assertEqual(1, rabbit_confirms:smallest(U5)). remove_queue(_Config) -> XName = rabbit_misc:r(<<"/">>, exchange, <<"X">>), @@ -114,5 +113,5 @@ remove_queue(_Config) -> U5 = rabbit_confirms:insert(1, [QName], XName, U0), U6 = rabbit_confirms:insert(2, [QName], XName, U5), - {[{1, XName}, {2, XName}], _U} = rabbit_confirms:remove_queue(QName, U6), - ok. + {[{Seq1, XName}, {Seq2, XName}], _U} = rabbit_confirms:remove_queue(QName, U6), + ?assertEqual([1, 2], lists:sort([Seq1, Seq2])). diff --git a/deps/rabbit/test/rabbit_core_metrics_gc_SUITE.erl b/deps/rabbit/test/rabbit_core_metrics_gc_SUITE.erl index 8060845c457e..4153a7b0a849 100644 --- a/deps/rabbit/test/rabbit_core_metrics_gc_SUITE.erl +++ b/deps/rabbit/test/rabbit_core_metrics_gc_SUITE.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_core_metrics_gc_SUITE). @@ -15,8 +15,7 @@ all() -> [ - {group, non_parallel_tests}, - {group, cluster_tests} + {group, non_parallel_tests} ]. groups() -> @@ -29,8 +28,7 @@ groups() -> gen_server2_metrics, consumer_metrics ] - }, - {cluster_tests, [], [cluster_queue_metrics]} + } ]. %% ------------------------------------------------------------------- @@ -43,11 +41,6 @@ merge_app_env(Config) -> {collect_statistics, fine}]}, rabbit_ct_helpers:merge_app_env(Config, AppEnv). -init_per_group(cluster_tests, Config) -> - rabbit_ct_helpers:log_environment(), - Conf = [{rmq_nodename_suffix, cluster_tests}, {rmq_nodes_count, 2}], - Config1 = rabbit_ct_helpers:set_config(Config, Conf), - rabbit_ct_helpers:run_setup_steps(Config1, setup_steps()); init_per_group(non_parallel_tests, Config) -> rabbit_ct_helpers:log_environment(), Conf = [{rmq_nodename_suffix, non_parallel_tests}], @@ -127,10 +120,13 @@ connection_metrics(Config) -> DeadPid = rabbit_ct_broker_helpers:rpc(Config, A, ?MODULE, dead_pid, []), + Infos = [{info0, foo}, {info1, bar}, {info2, baz}, + {authz_backends, [rabbit_auth_backend_oauth2,rabbit_auth_backend_http]}], + rabbit_ct_broker_helpers:rpc(Config, A, rabbit_core_metrics, - connection_created, [DeadPid, infos]), + connection_created, [DeadPid, Infos]), rabbit_ct_broker_helpers:rpc(Config, A, rabbit_core_metrics, - connection_stats, [DeadPid, infos]), + connection_stats, [DeadPid, Infos]), rabbit_ct_broker_helpers:rpc(Config, A, rabbit_core_metrics, connection_stats, [DeadPid, 1, 1, 1]), @@ -321,72 +317,3 @@ x(Name) -> #resource{ virtual_host = <<"/">>, kind = exchange, name = Name }. - -%% ------------------------------------------------------------------- -%% Cluster Testcases. -%% ------------------------------------------------------------------- - -cluster_queue_metrics(Config) -> - VHost = <<"/">>, - QueueName = <<"cluster_queue_metrics">>, - PolicyName = <<"ha-policy-1">>, - PolicyPattern = <<".*">>, - PolicyAppliesTo = <<"queues">>, - - Node0 = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename), - Node1 = rabbit_ct_broker_helpers:get_node_config(Config, 1, nodename), - - Ch = rabbit_ct_client_helpers:open_channel(Config, Node0), - - Node0Name = rabbit_data_coercion:to_binary(Node0), - Definition0 = [{<<"ha-mode">>, <<"nodes">>}, {<<"ha-params">>, [Node0Name]}], - ok = rabbit_ct_broker_helpers:set_policy(Config, 0, - PolicyName, PolicyPattern, - PolicyAppliesTo, Definition0), - - amqp_channel:call(Ch, #'queue.declare'{queue = QueueName}), - amqp_channel:call(Ch, #'basic.publish'{routing_key = QueueName}, - #amqp_msg{payload = <<"hello">>}), - - % Update policy to point to other node - Node1Name = rabbit_data_coercion:to_binary(Node1), - Definition1 = [{<<"ha-mode">>, <<"nodes">>}, {<<"ha-params">>, [Node1Name]}], - ok = rabbit_ct_broker_helpers:set_policy(Config, 0, - PolicyName, PolicyPattern, - PolicyAppliesTo, Definition1), - - % Synchronize - Name = rabbit_misc:r(VHost, queue, QueueName), - [Q] = rabbit_ct_broker_helpers:rpc(Config, Node0, ets, lookup, [rabbit_queue, Name]), - QPid = amqqueue:get_pid(Q), - ok = rabbit_ct_broker_helpers:rpc(Config, Node0, rabbit_amqqueue, sync_mirrors, [QPid]), - - % Check ETS table for data - wait_for(fun () -> - [] =:= rabbit_ct_broker_helpers:rpc( - Config, Node0, ets, tab2list, - [queue_coarse_metrics]) - end, 60), - - wait_for(fun () -> - Ret = rabbit_ct_broker_helpers:rpc( - Config, Node1, ets, tab2list, - [queue_coarse_metrics]), - case Ret of - [{Name, 1, 0, 1, _}] -> true; - _ -> false - end - end, 60), - - amqp_channel:call(Ch, #'queue.delete'{queue=QueueName}), - rabbit_ct_client_helpers:close_channel(Ch), - Config. - -wait_for(_Fun, 0) -> false; -wait_for(Fun, Seconds) -> - case Fun() of - true -> ok; - false -> - timer:sleep(1000), - wait_for(Fun, Seconds - 1) - end. diff --git a/deps/rabbit/test/rabbit_cuttlefish_SUITE.erl b/deps/rabbit/test/rabbit_cuttlefish_SUITE.erl index bdd1d5ba0272..338dfcf95f18 100644 --- a/deps/rabbit/test/rabbit_cuttlefish_SUITE.erl +++ b/deps/rabbit/test/rabbit_cuttlefish_SUITE.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2023-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_cuttlefish_SUITE). diff --git a/deps/rabbit/test/rabbit_db_binding_SUITE.erl b/deps/rabbit/test/rabbit_db_binding_SUITE.erl index ed5376e3ea09..9055e4ff1ddb 100644 --- a/deps/rabbit/test/rabbit_db_binding_SUITE.erl +++ b/deps/rabbit/test/rabbit_db_binding_SUITE.erl @@ -2,15 +2,13 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_db_binding_SUITE). -include_lib("rabbit_common/include/rabbit.hrl"). -include_lib("eunit/include/eunit.hrl"). --include_lib("common_test/include/ct.hrl"). - -compile(export_all). -define(VHOST, <<"/">>). @@ -85,8 +83,8 @@ create(Config) -> create1(_Config) -> XName1 = rabbit_misc:r(?VHOST, exchange, <<"test-exchange1">>), XName2 = rabbit_misc:r(?VHOST, exchange, <<"test-exchange2">>), - Exchange1 = #exchange{name = XName1, durable = true}, - Exchange2 = #exchange{name = XName2, durable = true}, + Exchange1 = #exchange{name = XName1, durable = true, decorators = {[], []}}, + Exchange2 = #exchange{name = XName2, durable = true, decorators = {[], []}}, Binding = #binding{source = XName1, key = <<"">>, destination = XName2, args = #{}}, ?assertMatch({error, {resources_missing, [_, _]}}, rabbit_db_binding:create(Binding, fun(_, _) -> ok end)), @@ -105,12 +103,14 @@ exists(Config) -> exists1(_Config) -> XName1 = rabbit_misc:r(?VHOST, exchange, <<"test-exchange1">>), XName2 = rabbit_misc:r(?VHOST, exchange, <<"test-exchange2">>), - Exchange1 = #exchange{name = XName1, durable = true}, - Exchange2 = #exchange{name = XName2, durable = true}, + Exchange1 = #exchange{name = XName1, durable = true, decorators = {[], []}}, + Exchange2 = #exchange{name = XName2, durable = true, decorators = {[], []}}, Binding = #binding{source = XName1, key = <<"">>, destination = XName2, args = #{}}, - ?assertEqual(false, rabbit_db_exchange:exists(Binding)), + ?assertMatch({error, {resources_missing, [{not_found, _}, {not_found, _}]}}, + rabbit_db_binding:exists(Binding)), ?assertMatch({new, #exchange{}}, rabbit_db_exchange:create_or_get(Exchange1)), ?assertMatch({new, #exchange{}}, rabbit_db_exchange:create_or_get(Exchange2)), + ?assertEqual(false, rabbit_db_binding:exists(Binding)), ?assertMatch(ok, rabbit_db_binding:create(Binding, fun(_, _) -> ok end)), ?assertEqual(true, rabbit_db_binding:exists(Binding)), passed. @@ -121,8 +121,8 @@ delete(Config) -> delete1(_Config) -> XName1 = rabbit_misc:r(?VHOST, exchange, <<"test-exchange1">>), XName2 = rabbit_misc:r(?VHOST, exchange, <<"test-exchange2">>), - Exchange1 = #exchange{name = XName1, durable = true, auto_delete = false}, - Exchange2 = #exchange{name = XName2, durable = true, auto_delete = false}, + Exchange1 = #exchange{name = XName1, durable = true, auto_delete = false, decorators = {[], []}}, + Exchange2 = #exchange{name = XName2, durable = true, auto_delete = false, decorators = {[], []}}, Binding = #binding{source = XName1, key = <<"">>, destination = XName2, args = #{}}, ?assertEqual(ok, rabbit_db_binding:delete(Binding, fun(_, _) -> ok end)), ?assertMatch({new, #exchange{}}, rabbit_db_exchange:create_or_get(Exchange1)), @@ -142,8 +142,8 @@ auto_delete(Config) -> auto_delete1(_Config) -> XName1 = rabbit_misc:r(?VHOST, exchange, <<"test-exchange1">>), XName2 = rabbit_misc:r(?VHOST, exchange, <<"test-exchange2">>), - Exchange1 = #exchange{name = XName1, durable = true, auto_delete = true}, - Exchange2 = #exchange{name = XName2, durable = true, auto_delete = false}, + Exchange1 = #exchange{name = XName1, durable = true, auto_delete = true, decorators = {[], []}}, + Exchange2 = #exchange{name = XName2, durable = true, auto_delete = false, decorators = {[], []}}, Binding = #binding{source = XName1, key = <<"">>, destination = XName2, args = #{}}, ?assertEqual(ok, rabbit_db_binding:delete(Binding, fun(_, _) -> ok end)), ?assertMatch({new, #exchange{}}, rabbit_db_exchange:create_or_get(Exchange1)), @@ -163,8 +163,8 @@ get_all(Config) -> get_all1(_Config) -> XName1 = rabbit_misc:r(?VHOST, exchange, <<"test-exchange1">>), XName2 = rabbit_misc:r(?VHOST, exchange, <<"test-exchange2">>), - Exchange1 = #exchange{name = XName1, durable = true}, - Exchange2 = #exchange{name = XName2, durable = true}, + Exchange1 = #exchange{name = XName1, durable = true, decorators = {[], []}}, + Exchange2 = #exchange{name = XName2, durable = true, decorators = {[], []}}, Binding = #binding{source = XName1, key = <<"">>, destination = XName2, args = #{}}, ?assertEqual([], rabbit_db_binding:get_all()), ?assertMatch({new, #exchange{}}, rabbit_db_exchange:create_or_get(Exchange1)), @@ -179,8 +179,8 @@ get_all_by_vhost(Config) -> get_all_by_vhost1(_Config) -> XName1 = rabbit_misc:r(?VHOST, exchange, <<"test-exchange1">>), XName2 = rabbit_misc:r(?VHOST, exchange, <<"test-exchange2">>), - Exchange1 = #exchange{name = XName1, durable = true}, - Exchange2 = #exchange{name = XName2, durable = true}, + Exchange1 = #exchange{name = XName1, durable = true, decorators = {[], []}}, + Exchange2 = #exchange{name = XName2, durable = true, decorators = {[], []}}, Binding = #binding{source = XName1, key = <<"">>, destination = XName2, args = #{}}, ?assertEqual([], rabbit_db_binding:get_all(?VHOST)), ?assertMatch({new, #exchange{}}, rabbit_db_exchange:create_or_get(Exchange1)), @@ -197,8 +197,8 @@ get_all_for_source(Config) -> get_all_for_source1(_Config) -> XName1 = rabbit_misc:r(?VHOST, exchange, <<"test-exchange1">>), XName2 = rabbit_misc:r(?VHOST, exchange, <<"test-exchange2">>), - Exchange1 = #exchange{name = XName1, durable = true}, - Exchange2 = #exchange{name = XName2, durable = true}, + Exchange1 = #exchange{name = XName1, durable = true, decorators = {[], []}}, + Exchange2 = #exchange{name = XName2, durable = true, decorators = {[], []}}, Binding = #binding{source = XName1, key = <<"">>, destination = XName2, args = #{}}, ?assertEqual([], rabbit_db_binding:get_all_for_source(XName1)), ?assertEqual([], rabbit_db_binding:get_all_for_source(XName2)), @@ -216,8 +216,8 @@ get_all_for_destination(Config) -> get_all_for_destination1(_Config) -> XName1 = rabbit_misc:r(?VHOST, exchange, <<"test-exchange1">>), XName2 = rabbit_misc:r(?VHOST, exchange, <<"test-exchange2">>), - Exchange1 = #exchange{name = XName1, durable = true}, - Exchange2 = #exchange{name = XName2, durable = true}, + Exchange1 = #exchange{name = XName1, durable = true, decorators = {[], []}}, + Exchange2 = #exchange{name = XName2, durable = true, decorators = {[], []}}, Binding = #binding{source = XName1, key = <<"">>, destination = XName2, args = #{}}, ?assertEqual([], rabbit_db_binding:get_all_for_destination(XName1)), ?assertEqual([], rabbit_db_binding:get_all_for_destination(XName2)), @@ -235,8 +235,8 @@ get_all_for_source_and_destination(Config) -> get_all_for_source_and_destination1(_Config) -> XName1 = rabbit_misc:r(?VHOST, exchange, <<"test-exchange1">>), XName2 = rabbit_misc:r(?VHOST, exchange, <<"test-exchange2">>), - Exchange1 = #exchange{name = XName1, durable = true}, - Exchange2 = #exchange{name = XName2, durable = true}, + Exchange1 = #exchange{name = XName1, durable = true, decorators = {[], []}}, + Exchange2 = #exchange{name = XName2, durable = true, decorators = {[], []}}, Binding = #binding{source = XName1, key = <<"">>, destination = XName2, args = #{}}, ?assertEqual([], rabbit_db_binding:get_all(XName1, XName2, false)), ?assertEqual([], rabbit_db_binding:get_all(XName2, XName1, false)), @@ -256,8 +256,8 @@ get_all_for_source_and_destination_reverse(Config) -> get_all_for_source_and_destination_reverse1(_Config) -> XName1 = rabbit_misc:r(?VHOST, exchange, <<"test-exchange1">>), XName2 = rabbit_misc:r(?VHOST, exchange, <<"test-exchange2">>), - Exchange1 = #exchange{name = XName1, durable = true}, - Exchange2 = #exchange{name = XName2, durable = true}, + Exchange1 = #exchange{name = XName1, durable = true, decorators = {[], []}}, + Exchange2 = #exchange{name = XName2, durable = true, decorators = {[], []}}, Binding = #binding{source = XName1, key = <<"">>, destination = XName2, args = #{}}, ?assertEqual([], rabbit_db_binding:get_all(XName1, XName2, true)), ?assertEqual([], rabbit_db_binding:get_all(XName2, XName1, true)), @@ -276,8 +276,8 @@ fold(Config) -> fold1(_Config) -> XName1 = rabbit_misc:r(?VHOST, exchange, <<"test-exchange1">>), XName2 = rabbit_misc:r(?VHOST, exchange, <<"test-exchange2">>), - Exchange1 = #exchange{name = XName1, durable = true}, - Exchange2 = #exchange{name = XName2, durable = true}, + Exchange1 = #exchange{name = XName1, durable = true, decorators = {[], []}}, + Exchange2 = #exchange{name = XName2, durable = true, decorators = {[], []}}, Binding = #binding{source = XName1, key = <<"">>, destination = XName2, args = #{}}, ?assertEqual([], rabbit_db_binding:fold(fun(B, Acc) -> [B | Acc] end, [])), ?assertMatch({new, #exchange{}}, rabbit_db_exchange:create_or_get(Exchange1)), @@ -292,8 +292,8 @@ match(Config) -> match1(_Config) -> XName1 = rabbit_misc:r(?VHOST, exchange, <<"test-exchange1">>), XName2 = rabbit_misc:r(?VHOST, exchange, <<"test-exchange2">>), - Exchange1 = #exchange{name = XName1, durable = true}, - Exchange2 = #exchange{name = XName2, durable = true}, + Exchange1 = #exchange{name = XName1, durable = true, decorators = {[], []}}, + Exchange2 = #exchange{name = XName2, durable = true, decorators = {[], []}}, Binding = #binding{source = XName1, key = <<"">>, destination = XName2, args = #{foo => bar}}, ?assertEqual([], rabbit_db_binding:match(XName1, fun(#binding{args = Args}) -> @@ -318,8 +318,8 @@ match_routing_key(Config) -> match_routing_key1(_Config) -> XName1 = rabbit_misc:r(?VHOST, exchange, <<"test-exchange1">>), XName2 = rabbit_misc:r(?VHOST, exchange, <<"test-exchange2">>), - Exchange1 = #exchange{name = XName1, durable = true}, - Exchange2 = #exchange{name = XName2, durable = true}, + Exchange1 = #exchange{name = XName1, durable = true, decorators = {[], []}}, + Exchange2 = #exchange{name = XName2, durable = true, decorators = {[], []}}, Binding = #binding{source = XName1, key = <<"*.*">>, destination = XName2, args = #{foo => bar}}, ?assertEqual([], rabbit_db_binding:match_routing_key(XName1, [<<"a.b.c">>], false)), diff --git a/deps/rabbit/test/rabbit_db_exchange_SUITE.erl b/deps/rabbit/test/rabbit_db_exchange_SUITE.erl index 33982e8b34e0..4bfb504eddef 100644 --- a/deps/rabbit/test/rabbit_db_exchange_SUITE.erl +++ b/deps/rabbit/test/rabbit_db_exchange_SUITE.erl @@ -2,15 +2,13 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_db_exchange_SUITE). -include_lib("rabbit_common/include/rabbit.hrl"). -include_lib("eunit/include/eunit.hrl"). --include_lib("common_test/include/ct.hrl"). - -compile(export_all). -define(VHOST, <<"/">>). @@ -92,8 +90,8 @@ create_or_get1(_Config) -> XName = rabbit_misc:r(?VHOST, exchange, <<"test-exchange">>), Exchange0 = #exchange{name = XName, durable = true}, Exchange = rabbit_exchange_decorator:set(Exchange0), - ?assertMatch({new, Exchange}, rabbit_db_exchange:create_or_get(Exchange0)), - ?assertEqual({existing, Exchange}, rabbit_db_exchange:create_or_get(Exchange0)), + ?assertMatch({new, Exchange}, rabbit_db_exchange:create_or_get(Exchange)), + ?assertEqual({existing, Exchange}, rabbit_db_exchange:create_or_get(Exchange)), passed. get(Config) -> @@ -104,7 +102,7 @@ get1(_Config) -> Exchange0 = #exchange{name = XName, durable = true}, Exchange = rabbit_exchange_decorator:set(Exchange0), ?assertEqual({error, not_found}, rabbit_db_exchange:get(XName)), - ?assertEqual({new, Exchange}, rabbit_db_exchange:create_or_get(Exchange0)), + ?assertEqual({new, Exchange}, rabbit_db_exchange:create_or_get(Exchange)), ?assertEqual({ok, Exchange}, rabbit_db_exchange:get(XName)), passed. @@ -116,7 +114,7 @@ get_many1(_Config) -> Exchange0 = #exchange{name = XName, durable = true}, Exchange = rabbit_exchange_decorator:set(Exchange0), ?assertEqual([], rabbit_db_exchange:get_many([XName])), - ?assertEqual({new, Exchange}, rabbit_db_exchange:create_or_get(Exchange0)), + ?assertEqual({new, Exchange}, rabbit_db_exchange:create_or_get(Exchange)), ?assertEqual([Exchange], rabbit_db_exchange:get_many([XName])), passed. @@ -132,7 +130,7 @@ get_all1(_Config) -> Exchange2 = rabbit_exchange_decorator:set(Exchange2_0), All = lists:sort([Exchange1, Exchange2]), ?assertEqual([], rabbit_db_exchange:get_all()), - create([Exchange1_0, Exchange2_0]), + create([Exchange1, Exchange2]), ?assertEqual(All, lists:sort(rabbit_db_exchange:get_all())), passed. @@ -148,7 +146,7 @@ get_all_by_vhost1(_Config) -> Exchange2 = rabbit_exchange_decorator:set(Exchange2_0), All = lists:sort([Exchange1, Exchange2]), ?assertEqual([], rabbit_db_exchange:get_all(?VHOST)), - create([Exchange1_0, Exchange2_0]), + create([Exchange1, Exchange2]), ?assertEqual(All, lists:sort(rabbit_db_exchange:get_all(?VHOST))), ?assertEqual([], lists:sort(rabbit_db_exchange:get_all(<<"other-vhost">>))), passed. @@ -216,7 +214,6 @@ set1(_Config) -> XName = rabbit_misc:r(?VHOST, exchange, <<"test-exchange">>), Exchange = #exchange{name = XName, durable = true}, ?assertEqual(ok, rabbit_db_exchange:set([Exchange])), - ?assertEqual({error, not_found}, rabbit_db_exchange:get(XName)), ?assertEqual([Exchange], rabbit_db_exchange:get_all_durable()), passed. @@ -275,8 +272,8 @@ delete_if_unused(Config) -> delete_if_unused1(_Config) -> XName1 = rabbit_misc:r(?VHOST, exchange, <<"test-exchange1">>), XName2 = rabbit_misc:r(?VHOST, exchange, <<"test-exchange2">>), - Exchange1 = #exchange{name = XName1, durable = true}, - Exchange2 = #exchange{name = XName2, durable = true}, + Exchange1 = rabbit_exchange_decorator:set(#exchange{name = XName1, durable = true}), + Exchange2 = rabbit_exchange_decorator:set(#exchange{name = XName2, durable = true}), Binding = #binding{source = XName1, key = <<"">>, destination = XName2, args = #{}}, ?assertMatch({error, not_found}, rabbit_db_exchange:delete(XName1, true)), create([Exchange1, Exchange2]), @@ -316,9 +313,8 @@ recover(Config) -> recover1(_Config) -> XName = rabbit_misc:r(?VHOST, exchange, <<"test-exchange">>), - Exchange = #exchange{name = XName, durable = true}, + Exchange = rabbit_exchange_decorator:set(#exchange{name = XName, durable = true}), ?assertEqual(ok, rabbit_db_exchange:set([Exchange])), - ?assertEqual({error, not_found}, rabbit_db_exchange:get(XName)), ?assertEqual([Exchange], rabbit_db_exchange:get_all_durable()), ?assertMatch([Exchange], rabbit_db_exchange:recover(?VHOST)), ?assertMatch({ok, #exchange{name = XName}}, rabbit_db_exchange:get(XName)), diff --git a/deps/rabbit/test/rabbit_db_maintenance_SUITE.erl b/deps/rabbit/test/rabbit_db_maintenance_SUITE.erl index 491cdfb9a34a..336ca681bf99 100644 --- a/deps/rabbit/test/rabbit_db_maintenance_SUITE.erl +++ b/deps/rabbit/test/rabbit_db_maintenance_SUITE.erl @@ -2,14 +2,12 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_db_maintenance_SUITE). -include_lib("eunit/include/eunit.hrl"). --include_lib("common_test/include/ct.hrl"). - -compile(export_all). all() -> @@ -24,7 +22,6 @@ groups() -> all_tests() -> [ - setup_schema, set_and_get, set_and_get_consistent ]. @@ -64,14 +61,6 @@ end_per_testcase(Testcase, Config) -> %% Test Cases %% --------------------------------------------------------------------------- -setup_schema(Config) -> - passed = rabbit_ct_broker_helpers:rpc( - Config, 0, ?MODULE, setup_schema1, [Config]). - -setup_schema1(_Config) -> - ?assertEqual(ok, rabbit_db_maintenance:setup_schema()), - passed. - set_and_get(Config) -> passed = rabbit_ct_broker_helpers:rpc( Config, 0, ?MODULE, set_and_get1, [Config]). diff --git a/deps/rabbit/test/rabbit_db_msup_SUITE.erl b/deps/rabbit/test/rabbit_db_msup_SUITE.erl index 20ca9f1fbfd9..5fb9994d95e5 100644 --- a/deps/rabbit/test/rabbit_db_msup_SUITE.erl +++ b/deps/rabbit/test/rabbit_db_msup_SUITE.erl @@ -2,12 +2,11 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_db_msup_SUITE). --include_lib("rabbit_common/include/rabbit.hrl"). -include_lib("eunit/include/eunit.hrl"). -include_lib("common_test/include/ct.hrl"). @@ -83,9 +82,12 @@ create_or_update1(_Config) -> Overall = spawn(fun() -> ok end), Spec = #{id => id, start => {m, f, args}}, ?assertEqual(start, - rabbit_db_msup:create_or_update(group, Overall, undefined, Spec, id)), + rabbit_db_msup:create_or_update(group, Overall, undefined, Spec, id(id))), passed. +id(Id) -> + Id. + find_mirror(Config) -> passed = rabbit_ct_broker_helpers:rpc(Config, 0, ?MODULE, find_mirror1, [Config]). @@ -93,8 +95,8 @@ find_mirror1(_Config) -> Overall = spawn(fun() -> ok end), Spec = #{id => id, start => {m, f, args}}, ?assertEqual(start, rabbit_db_msup:create_or_update(group, Overall, undefined, - Spec, id)), - ?assertEqual({ok, Overall}, rabbit_db_msup:find_mirror(group, id)), + Spec, id(id))), + ?assertEqual({ok, Overall}, rabbit_db_msup:find_mirror(group, id(id))), passed. delete(Config) -> @@ -104,9 +106,9 @@ delete1(_Config) -> Overall = spawn(fun() -> ok end), Spec = #{id => id, start => {m, f, args}}, ?assertEqual(start, rabbit_db_msup:create_or_update(group, Overall, undefined, - Spec, id)), - ?assertEqual(ok, rabbit_db_msup:delete(group, id)), - ?assertEqual({error, not_found}, rabbit_db_msup:find_mirror(group, id)), + Spec, id(id))), + ?assertEqual(ok, rabbit_db_msup:delete(group, id(id))), + ?assertEqual({error, not_found}, rabbit_db_msup:find_mirror(group, id(id))), passed. delete_all(Config) -> @@ -116,9 +118,9 @@ delete_all1(_Config) -> Overall = spawn(fun() -> ok end), Spec = #{id => id, start => {m, f, args}}, ?assertEqual(start, rabbit_db_msup:create_or_update(group, Overall, undefined, - Spec, id)), + Spec, id(id))), ?assertEqual(ok, rabbit_db_msup:delete_all(group)), - ?assertEqual({error, not_found}, rabbit_db_msup:find_mirror(group, id)), + ?assertEqual({error, not_found}, rabbit_db_msup:find_mirror(group, id(id))), passed. update_all(Config) -> @@ -129,8 +131,8 @@ update_all1(_Config) -> Overall = spawn(fun() -> ok end), Spec = #{id => id, start => {m, f, args}}, ?assertEqual(start, rabbit_db_msup:create_or_update(group, OldOverall, undefined, - Spec, id)), - ?assertEqual({ok, OldOverall}, rabbit_db_msup:find_mirror(group, id)), + Spec, id(id))), + ?assertEqual({ok, OldOverall}, rabbit_db_msup:find_mirror(group, id(id))), ?assertEqual([Spec], rabbit_db_msup:update_all(Overall, OldOverall)), - ?assertEqual({ok, Overall}, rabbit_db_msup:find_mirror(group, id)), + ?assertEqual({ok, Overall}, rabbit_db_msup:find_mirror(group, id(id))), passed. diff --git a/deps/rabbit/test/rabbit_db_policy_SUITE.erl b/deps/rabbit/test/rabbit_db_policy_SUITE.erl index 2afa3acd5d9f..38068c7bdde4 100644 --- a/deps/rabbit/test/rabbit_db_policy_SUITE.erl +++ b/deps/rabbit/test/rabbit_db_policy_SUITE.erl @@ -2,15 +2,13 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_db_policy_SUITE). -include_lib("rabbit_common/include/rabbit.hrl"). -include_lib("eunit/include/eunit.hrl"). --include_lib("common_test/include/ct.hrl"). - -compile(export_all). -define(VHOST, <<"/">>). diff --git a/deps/rabbit/test/rabbit_db_queue_SUITE.erl b/deps/rabbit/test/rabbit_db_queue_SUITE.erl index 3cafb91443d0..f66e8fd236c9 100644 --- a/deps/rabbit/test/rabbit_db_queue_SUITE.erl +++ b/deps/rabbit/test/rabbit_db_queue_SUITE.erl @@ -2,14 +2,13 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_db_queue_SUITE). -include_lib("rabbit_common/include/rabbit.hrl"). -include_lib("eunit/include/eunit.hrl"). --include_lib("common_test/include/ct.hrl"). -include("amqqueue.hrl"). -compile(export_all). @@ -41,23 +40,24 @@ all_tests() -> count, count_by_vhost, set, - set_many, delete, update, + update_decorators, exists, get_all_durable, get_all_durable_by_type, filter_all_durable, get_durable, get_many_durable, - set_dirty, - internal_delete, - update_durable + update_durable, + mark_local_durable_queues_stopped, + foreach_durable, + internal_delete ]. mnesia_tests() -> [ - foreach_durable, + set_dirty, foreach_transient, delete_transient, update_in_mnesia_tx, @@ -75,7 +75,13 @@ init_per_suite(Config) -> end_per_suite(Config) -> rabbit_ct_helpers:run_teardown_steps(Config). +init_per_group(mnesia_store = Group, Config0) -> + Config = rabbit_ct_helpers:set_config(Config0, [{metadata_store, mnesia}]), + init_per_group_common(Group, Config); init_per_group(Group, Config) -> + init_per_group_common(Group, Config). + +init_per_group_common(Group, Config) -> Config1 = rabbit_ct_helpers:set_config(Config, [ {rmq_nodename_suffix, Group}, {rmq_nodes_count, 1} @@ -137,7 +143,7 @@ get_many1(_Config) -> ?assertEqual([Q], rabbit_db_queue:get_many([QName, QName2])), ?assertEqual([], rabbit_db_queue:get_many([QName2])), ok = rabbit_db_queue:set(Q2), - ?assertEqual([Q, Q2], rabbit_db_queue:get_many([QName, QName2])), + ?assertEqual(lists:sort([Q, Q2]), lists:sort(rabbit_db_queue:get_many([QName, QName2]))), passed. get_all(Config) -> @@ -276,23 +282,6 @@ set1(_Config) -> ?assertEqual({ok, Q}, rabbit_db_queue:get(QName)), passed. -set_many(Config) -> - passed = rabbit_ct_broker_helpers:rpc(Config, 0, ?MODULE, set_many1, [Config]). - -set_many1(_Config) -> - QName1 = rabbit_misc:r(?VHOST, queue, <<"test-queue1">>), - QName2 = rabbit_misc:r(?VHOST, queue, <<"test-queue2">>), - QName3 = rabbit_misc:r(?VHOST, queue, <<"test-queue3">>), - Q1 = new_queue(QName1, rabbit_classic_queue), - Q2 = new_queue(QName2, rabbit_classic_queue), - Q3 = new_queue(QName3, rabbit_classic_queue), - ?assertEqual(ok, rabbit_db_queue:set_many([])), - ?assertEqual(ok, rabbit_db_queue:set_many([Q1, Q2, Q3])), - ?assertEqual({ok, Q1}, rabbit_db_queue:get_durable(QName1)), - ?assertEqual({ok, Q2}, rabbit_db_queue:get_durable(QName2)), - ?assertEqual({ok, Q3}, rabbit_db_queue:get_durable(QName3)), - passed. - delete(Config) -> passed = rabbit_ct_broker_helpers:rpc(Config, 0, ?MODULE, delete1, [Config]). @@ -335,7 +324,7 @@ update_decorators1(_Config) -> ?assertEqual({ok, Q}, rabbit_db_queue:get(QName)), ?assertEqual(undefined, amqqueue:get_decorators(Q)), %% Not really testing we set a decorator, but at least the field is being updated - ?assertEqual(ok, rabbit_db_queue:update_decorators(QName)), + ?assertEqual(ok, rabbit_db_queue:update_decorators(QName, [])), {ok, Q1} = rabbit_db_queue:get(QName), ?assertEqual([], amqqueue:get_decorators(Q1)), passed. @@ -455,8 +444,24 @@ update_durable1(_Config) -> fun(Q0) when ?is_amqqueue(Q0) -> true end)), {ok, Q0} = rabbit_db_queue:get_durable(QName1), ?assertMatch(my_policy, amqqueue:get_policy(Q0)), - {ok, Q00} = rabbit_db_queue:get(QName1), - ?assertMatch(undefined, amqqueue:get_policy(Q00)), + passed. + +mark_local_durable_queues_stopped(Config) -> + passed = rabbit_ct_broker_helpers:rpc(Config, 0, + ?MODULE, mark_local_durable_queues_stopped1, [Config]). + +mark_local_durable_queues_stopped1(_Config) -> + DurableQName = rabbit_misc:r(?VHOST, queue, <<"test-queue1">>), + TransientQName = rabbit_misc:r(?VHOST, queue, <<"test-queue2">>), + DurableQ = new_queue(DurableQName, rabbit_classic_queue), + TransientQ = new_queue(TransientQName, rabbit_classic_queue), + %% Set Q1's pid to a dead process + RecoverableQ = amqqueue:set_pid(DurableQ, spawn(fun() -> ok end)), + ?assertEqual(ok, rabbit_db_queue:set(RecoverableQ)), + ?assertEqual(ok, rabbit_db_queue:set_dirty(TransientQ)), + ?assertEqual(ok, rabbit_amqqueue:mark_local_durable_queues_stopped(?VHOST)), + {ok, StoppedQ} = rabbit_db_queue:get_durable(DurableQName), + ?assertEqual(stopped, amqqueue:get_state(StoppedQ)), passed. foreach_durable(Config) -> @@ -464,11 +469,8 @@ foreach_durable(Config) -> foreach_durable1(_Config) -> QName1 = rabbit_misc:r(?VHOST, queue, <<"test-queue1">>), - QName2 = rabbit_misc:r(?VHOST, queue, <<"test-queue2">>), Q1 = new_queue(QName1, rabbit_classic_queue), - Q2 = new_queue(QName2, rabbit_classic_queue), ?assertEqual(ok, rabbit_db_queue:set(Q1)), - ?assertEqual(ok, rabbit_db_queue:set_dirty(Q2)), ?assertEqual(ok, rabbit_db_queue:foreach_durable( fun(Q0) -> rabbit_db_queue:internal_delete(amqqueue:get_name(Q0), true, normal) @@ -476,7 +478,6 @@ foreach_durable1(_Config) -> fun(Q0) when ?is_amqqueue(Q0) -> true end)), ?assertEqual({error, not_found}, rabbit_db_queue:get(QName1)), ?assertEqual({error, not_found}, rabbit_db_queue:get_durable(QName1)), - ?assertMatch({ok, _}, rabbit_db_queue:get(QName2)), passed. foreach_transient(Config) -> diff --git a/deps/rabbit/test/rabbit_db_topic_exchange_SUITE.erl b/deps/rabbit/test/rabbit_db_topic_exchange_SUITE.erl index 1bde941840f2..ec907a84118c 100644 --- a/deps/rabbit/test/rabbit_db_topic_exchange_SUITE.erl +++ b/deps/rabbit/test/rabbit_db_topic_exchange_SUITE.erl @@ -2,12 +2,13 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_db_topic_exchange_SUITE). -include_lib("rabbit_common/include/rabbit.hrl"). +-include_lib("common_test/include/ct.hrl"). -include_lib("eunit/include/eunit.hrl"). -compile([nowarn_export_all, export_all]). @@ -16,27 +17,33 @@ all() -> [ - {group, tests} + {group, mnesia_store} ]. groups() -> [ - {tests, [shuffle], all_tests()} + {mnesia_store, [], mnesia_tests()}, + {benchmarks, [], benchmarks()} ]. -all_tests() -> +mnesia_tests() -> [ set, delete, delete_all_for_exchange, match, match_return_binding_keys_many_destinations, - match_return_binding_keys_single_destination + match_return_binding_keys_single_destination, + build_key_from_topic_trie_binding_record, + build_key_from_deletion_events, + build_key_from_binding_deletion_event, + build_multiple_key_from_deletion_events ]. -%% ------------------------------------------------------------------- -%% Test suite setup/teardown. -%% ------------------------------------------------------------------- +benchmarks() -> + [ + match_benchmark + ]. init_per_suite(Config) -> rabbit_ct_helpers:log_environment(), @@ -45,7 +52,13 @@ init_per_suite(Config) -> end_per_suite(Config) -> rabbit_ct_helpers:run_teardown_steps(Config). +init_per_group(mnesia_store = Group, Config0) -> + Config = rabbit_ct_helpers:set_config(Config0, [{metadata_store, mnesia}]), + init_per_group_common(Group, Config); init_per_group(Group, Config) -> + init_per_group_common(Group, Config). + +init_per_group_common(Group, Config) -> Config1 = rabbit_ct_helpers:set_config(Config, [ {rmq_nodename_suffix, Group}, {rmq_nodes_count, 1} @@ -60,7 +73,11 @@ end_per_group(_Group, Config) -> rabbit_ct_broker_helpers:teardown_steps()). init_per_testcase(Testcase, Config) -> - rabbit_ct_helpers:testcase_started(Config, Testcase). + XName = rabbit_misc:r(<<"/">>, exchange, <<"amq.topic">>), + {ok, X} = rabbit_ct_broker_helpers:rpc(Config, 0, rabbit_exchange, lookup, [XName]), + Config1 = rabbit_ct_helpers:set_config(Config, [{exchange_name, XName}, + {exchange, X}]), + rabbit_ct_helpers:testcase_started(Config1, Testcase). end_per_testcase(Testcase, Config) -> rabbit_ct_broker_helpers:rpc(Config, 0, rabbit_db_topic_exchange, clear, []), @@ -247,3 +264,224 @@ set(Src, BindingKey, Dst, Args) when is_list(Args) -> Binding = #binding{source = Src, key = BindingKey, destination = Dst, args = Args}, ok = rabbit_db_topic_exchange:set(Binding). + +%% --------------------------------------------------------------------------- +%% Functional tests +%% --------------------------------------------------------------------------- + +build_key_from_topic_trie_binding_record(Config) -> + passed = rabbit_ct_broker_helpers:rpc( + Config, 0, ?MODULE, build_key_from_topic_trie_binding_record1, [Config]). + +build_key_from_topic_trie_binding_record1(Config) -> + XName = ?config(exchange_name, Config), + X = ?config(exchange, Config), + QName = rabbit_misc:r(<<"/">>, queue, <<"q1">>), + RK = <<"a.b.c.d.e.f">>, + ok = rabbit_exchange_type_topic:add_binding(none, X, #binding{source = XName, + destination = QName, + key = RK, + args = []}), + SplitRK = rabbit_db_topic_exchange:split_topic_key(RK), + [TopicTrieBinding] = ets:tab2list(rabbit_topic_trie_binding), + ?assertEqual(SplitRK, rabbit_db_topic_exchange:trie_binding_to_key(TopicTrieBinding)), + passed. + +build_key_from_deletion_events(Config) -> + passed = rabbit_ct_broker_helpers:rpc( + Config, 0, ?MODULE, build_key_from_deletion_events1, [Config]). + +build_key_from_deletion_events1(Config) -> + XName = ?config(exchange_name, Config), + X = ?config(exchange, Config), + QName = rabbit_misc:r(<<"/">>, queue, <<"q1">>), + RK = <<"a.b.c.d.e.f">>, + Binding = #binding{source = XName, + destination = QName, + key = RK, + args = []}, + ok = rabbit_exchange_type_topic:add_binding(none, X, Binding), + SplitRK = rabbit_db_topic_exchange:split_topic_key(RK), + Tables = [rabbit_topic_trie_binding, rabbit_topic_trie_edge], + subscribe_to_mnesia_changes(Tables), + rabbit_exchange_type_topic:remove_bindings(none, X, [Binding]), + Records = receive_delete_events(7), + unsubscribe_to_mnesia_changes(Tables), + ?assertMatch([{_, SplitRK}], + rabbit_db_topic_exchange:trie_records_to_key(Records)), + passed. + +build_key_from_binding_deletion_event(Config) -> + passed = rabbit_ct_broker_helpers:rpc( + Config, 0, ?MODULE, build_key_from_binding_deletion_event1, [Config]). + +build_key_from_binding_deletion_event1(Config) -> + XName = ?config(exchange_name, Config), + X = ?config(exchange, Config), + QName = rabbit_misc:r(<<"/">>, queue, <<"q1">>), + RK = <<"a.b.c.d.e.f">>, + Binding0 = #binding{source = XName, + destination = QName, + key = RK, + args = [some_args]}, + Binding = #binding{source = XName, + destination = QName, + key = RK, + args = []}, + ok = rabbit_exchange_type_topic:add_binding(none, X, Binding0), + ok = rabbit_exchange_type_topic:add_binding(none, X, Binding), + SplitRK = rabbit_db_topic_exchange:split_topic_key(RK), + Tables = [rabbit_topic_trie_binding, rabbit_topic_trie_edge], + subscribe_to_mnesia_changes(Tables), + rabbit_exchange_type_topic:remove_bindings(none, X, [Binding]), + Records = receive_delete_events(7), + unsubscribe_to_mnesia_changes(Tables), + ?assertMatch([{_, SplitRK}], + rabbit_db_topic_exchange:trie_records_to_key(Records)), + passed. + +build_multiple_key_from_deletion_events(Config) -> + passed = rabbit_ct_broker_helpers:rpc( + Config, 0, ?MODULE, build_multiple_key_from_deletion_events1, [Config]). + +build_multiple_key_from_deletion_events1(Config) -> + XName = ?config(exchange_name, Config), + X = ?config(exchange, Config), + QName = rabbit_misc:r(<<"/">>, queue, <<"q1">>), + RK0 = <<"a.b.c.d.e.f">>, + RK1 = <<"a.b.c.d">>, + RK2 = <<"a.b.c.g.e.f">>, + RK3 = <<"hare.rabbit.ho">>, + Binding0 = #binding{source = XName, destination = QName, key = RK0, args = []}, + Binding1 = #binding{source = XName, destination = QName, key = RK1, args = []}, + Binding2 = #binding{source = XName, destination = QName, key = RK2, args = []}, + Binding3 = #binding{source = XName, destination = QName, key = RK3, args = []}, + ok = rabbit_exchange_type_topic:add_binding(none, X, Binding0), + ok = rabbit_exchange_type_topic:add_binding(none, X, Binding1), + ok = rabbit_exchange_type_topic:add_binding(none, X, Binding2), + ok = rabbit_exchange_type_topic:add_binding(none, X, Binding3), + SplitRK0 = rabbit_db_topic_exchange:split_topic_key(RK0), + SplitRK1 = rabbit_db_topic_exchange:split_topic_key(RK1), + SplitRK2 = rabbit_db_topic_exchange:split_topic_key(RK2), + SplitRK3 = rabbit_db_topic_exchange:split_topic_key(RK3), + Tables = [rabbit_topic_trie_binding, rabbit_topic_trie_edge], + subscribe_to_mnesia_changes(Tables), + rabbit_exchange_type_topic:delete(none, X), + Records = receive_delete_events(7), + unsubscribe_to_mnesia_changes(Tables), + RKs = lists:sort([SplitRK0, SplitRK1, SplitRK2, SplitRK3]), + ?assertMatch( + RKs, + lists:sort([RK || {_, RK} <- rabbit_db_topic_exchange:trie_records_to_key(Records)])), + passed. + +%% --------------------------------------------------------------------------- +%% Benchmarks +%% --------------------------------------------------------------------------- + +match_benchmark(Config) -> + %% run the benchmark with Mnesia first + MnesiaResults = rabbit_ct_broker_helpers:rpc(Config, 0, ?MODULE, match_benchmark1, [Config]), + + %% migrate to Khepri + Servers = rabbit_ct_broker_helpers:get_node_configs(Config, nodename), + {T, ok} = timer:tc(fun() -> + rabbit_ct_broker_helpers:enable_feature_flag(Config, Servers, khepri_db) + end), + ct:pal("~p: time to migrate to Khepri: ~.2fs", [?FUNCTION_NAME, T/1000000]), + + %% run the same same benchmark with Khepri enabled + KhepriResults = rabbit_ct_broker_helpers:rpc(Config, 0, ?MODULE, match_benchmark1, [Config]), + + %% print all the results first + maps:foreach(fun(Test, KhepriResult) -> + MnesiaResult = maps:get(Test, MnesiaResults), + ct:pal("~p: Test: ~p, Mnesia: ~.2fus, Khepri: ~.2fus", [?FUNCTION_NAME, Test, MnesiaResult, KhepriResult]) + end, KhepriResults), + + %% fail the test if needed + maps:foreach(fun(Test, KhepriResult) -> + MnesiaResult = maps:get(Test, MnesiaResults), + ?assert(KhepriResult < MnesiaResult * 1.5, "Khepri can't be significantly slower than Mnesia") + end, KhepriResults). + +match_benchmark1(_Config) -> + Src = rabbit_misc:r(?VHOST, exchange, <<"test-exchange">>), + Dst1 = rabbit_misc:r(?VHOST, queue, <<"test-queue1">>), + Dst2 = rabbit_misc:r(?VHOST, queue, <<"test-queue2">>), + Dst3 = rabbit_misc:r(?VHOST, queue, <<"test-queue3">>), + Dst4 = rabbit_misc:r(?VHOST, queue, <<"test-queue4">>), + Dst5 = rabbit_misc:r(?VHOST, queue, <<"test-queue5">>), + Dst6 = rabbit_misc:r(?VHOST, queue, <<"test-queue6">>), + + SimpleTopics = [list_to_binary("a.b." ++ integer_to_list(N)) || N <- lists:seq(1,1000)], + Bindings = [#binding{source = Src, key = RoutingKey, destination = Dst1, args = #{}} || RoutingKey <- SimpleTopics], + BindingRes = [rabbit_db_topic_exchange:set(Binding) || Binding <- Bindings], + ?assertMatch([ok], lists:uniq(BindingRes)), + ok = rabbit_db_topic_exchange:set(#binding{source = Src, key = <<"a.b.*">>, destination = Dst2, args = #{}}), + ok = rabbit_db_topic_exchange:set(#binding{source = Src, key = <<"a.b.*">>, destination = Dst3, args = #{}}), + ok = rabbit_db_topic_exchange:set(#binding{source = Src, key = <<"a.#">>, destination = Dst4, args = #{}}), + ok = rabbit_db_topic_exchange:set(#binding{source = Src, key = <<"*.b.42">>, destination = Dst5, args = #{}}), + ok = rabbit_db_topic_exchange:set(#binding{source = Src, key = <<"#">>, destination = Dst6, args = #{}}), + + {Tany, _} = timer:tc(fun() -> + [rabbit_db_topic_exchange:match(Src, <<"foo">>) || _ <- lists:seq(1, 100)] + end), + ?assertMatch([Dst6], rabbit_db_topic_exchange:match(Src, <<"foo">>)), + + {Tbar, _} = timer:tc(fun() -> + [rabbit_db_topic_exchange:match(Src, <<"a.b.bar">>) || _ <- lists:seq(1, 100)] + end), + ?assertEqual(lists:sort([Dst2,Dst3,Dst4,Dst6]), lists:sort(rabbit_db_topic_exchange:match(Src, <<"a.b.bar">>))), + + {Tbaz, _} = timer:tc(fun() -> + [rabbit_db_topic_exchange:match(Src, <<"baz.b.42">>) || _ <- lists:seq(1, 100)] + end), + ?assertEqual(lists:sort([Dst5,Dst6]), lists:sort(rabbit_db_topic_exchange:match(Src, <<"baz.b.42">>))), + + {Tsimple, Rsimple} = timer:tc(fun() -> + [rabbit_db_topic_exchange:match(Src, RoutingKey) + || RoutingKey <- SimpleTopics, RoutingKey =/= <<"a.b.123">>] + end), + ?assertEqual([Dst1,Dst2,Dst3,Dst4,Dst6], lists:sort(lists:uniq(hd(Rsimple)))), + + #{ + "average time to match `foo`" => Tany/100, + "average time to match `a.b.bar`" => Tbar/100, + "average time to match `baz.b.42`" => Tbaz/100, + "average time to match a simple topic" => Tsimple/length(SimpleTopics) + }. + +subscribe_to_mnesia_changes([Table | Rest]) -> + case mnesia:subscribe({table, Table, detailed}) of + {ok, _} -> subscribe_to_mnesia_changes(Rest); + Error -> Error + end; +subscribe_to_mnesia_changes([]) -> + ok. + +unsubscribe_to_mnesia_changes([Table | Rest]) -> + case mnesia:unsubscribe({table, Table, detailed}) of + {ok, _} -> unsubscribe_to_mnesia_changes(Rest); + Error -> Error + end; +unsubscribe_to_mnesia_changes([]) -> + ok. + +receive_delete_events(Num) -> + receive_delete_events(Num, []). + +receive_delete_events(0, Evts) -> + receive + {mnesia_table_event, {delete, _, Record, _, _}} -> + receive_delete_events(0, [Record | Evts]) + after 0 -> + Evts + end; +receive_delete_events(N, Evts) -> + receive + {mnesia_table_event, {delete, _, Record, _, _}} -> + receive_delete_events(N - 1, [Record | Evts]) + after 10000 -> + Evts + end. diff --git a/deps/rabbit/test/rabbit_direct_reply_to_prop_SUITE.erl b/deps/rabbit/test/rabbit_direct_reply_to_prop_SUITE.erl index 177847583a5e..59451186ce94 100644 --- a/deps/rabbit/test/rabbit_direct_reply_to_prop_SUITE.erl +++ b/deps/rabbit/test/rabbit_direct_reply_to_prop_SUITE.erl @@ -2,7 +2,6 @@ -compile(export_all). --include_lib("common_test/include/ct.hrl"). -include_lib("proper/include/proper.hrl"). -define(ITERATIONS_TO_RUN_UNTIL_CONFIDENT, 10000). diff --git a/deps/rabbit/test/rabbit_dummy_protocol_connection_info.erl b/deps/rabbit/test/rabbit_dummy_protocol_connection_info.erl index 589b058cb178..0be88a464a41 100644 --- a/deps/rabbit/test/rabbit_dummy_protocol_connection_info.erl +++ b/deps/rabbit/test/rabbit_dummy_protocol_connection_info.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2017-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% %% Dummy module to test rabbit_direct:extract_extra_auth_props diff --git a/deps/rabbit/test/rabbit_fifo_SUITE.erl b/deps/rabbit/test/rabbit_fifo_SUITE.erl index 8362833b336e..a3608f26ef46 100644 --- a/deps/rabbit/test/rabbit_fifo_SUITE.erl +++ b/deps/rabbit/test/rabbit_fifo_SUITE.erl @@ -14,15 +14,16 @@ -include_lib("rabbit_common/include/rabbit.hrl"). -include_lib("rabbit_common/include/rabbit_framing.hrl"). -include_lib("rabbit/src/rabbit_fifo.hrl"). +-include_lib("rabbit/src/rabbit_fifo_dlx.hrl"). +% -define(PROTOMOD, rabbit_framing_amqp_0_9_1). %%%=================================================================== %%% Common Test callbacks %%%=================================================================== all() -> [ - {group, machine_version_2}, - {group, machine_version_3}, + {group, tests}, {group, machine_version_conversion} ]. @@ -34,31 +35,28 @@ all_tests() -> groups() -> [ - {machine_version_2, [], all_tests()}, - {machine_version_3, [], all_tests()}, - {machine_version_conversion, [], [convert_v2_to_v3]} + {tests, [shuffle], all_tests()}, + {machine_version_conversion, [shuffle], + [convert_v2_to_v3, + convert_v3_to_v4]} ]. -init_per_suite(Config) -> - Config. - -end_per_suite(_Config) -> - ok. - -init_per_group(machine_version_2, Config) -> - [{machine_version, 2} | Config]; -init_per_group(machine_version_3, Config) -> - [{machine_version, 3} | Config]; +init_per_group(tests, Config) -> + [{machine_version, 4} | Config]; init_per_group(machine_version_conversion, Config) -> Config. -end_per_group(_Group, _Config) -> - ok. +init_per_testcase(_Testcase, Config) -> + FF = ?config(machine_version, Config) == 4, + ok = meck:new(rabbit_feature_flags, [passthrough]), + meck:expect(rabbit_feature_flags, is_enabled, fun (_) -> FF end), + Config. -init_per_testcase(_TestCase, Config) -> +end_per_group(_, Config) -> Config. -end_per_testcase(_TestCase, _Config) -> +end_per_testcase(_Group, _Config) -> + meck:unload(), ok. %%%=================================================================== @@ -71,7 +69,10 @@ end_per_testcase(_TestCase, _Config) -> -define(ASSERT_EFF(EfxPat, Guard, Effects), ?assert(lists:any(fun (EfxPat) when Guard -> true; (_) -> false - end, Effects))). + end, Effects), + lists:flatten(io_lib:format("Expected to find effect matching " + "pattern '~s' in effect list '~0p'", + [??EfxPat, Effects])))). -define(ASSERT_NO_EFF(EfxPat, Effects), ?assert(not lists:any(fun (EfxPat) -> true; @@ -88,100 +89,257 @@ end_per_testcase(_TestCase, _Config) -> (_) -> false end, Effects))). +-define(ASSERT(Guard, Fun), + {assert, fun (S) -> ?assertMatch(Guard, S), _ = Fun(S) end}). +-define(ASSERT(Guard), + ?ASSERT(Guard, fun (_) -> true end)). + test_init(Name) -> init(#{name => Name, - max_in_memory_length => 0, - queue_resource => rabbit_misc:r("/", queue, - atom_to_binary(Name, utf8)), + queue_resource => rabbit_misc:r("/", queue, atom_to_binary(Name)), release_cursor_interval => 0}). -enq_enq_checkout_test(C) -> - Cid = {<<"enq_enq_checkout_test">>, self()}, - {State1, _} = enq(C, 1, 1, first, test_init(test)), - {State2, _} = enq(C, 2, 2, second, State1), +-define(FUNCTION_NAME_B, atom_to_binary(?FUNCTION_NAME)). +-define(LINE_B, integer_to_binary(?LINE)). + +enq_enq_checkout_compat_test(C) -> + enq_enq_checkout_test(C, {auto, 2, simple_prefetch}). + +enq_enq_checkout_v4_test(C) -> + enq_enq_checkout_test(C, {auto, {simple_prefetch, 2}}). + +enq_enq_checkout_test(Config, Spec) -> + Cid = {?FUNCTION_NAME_B, self()}, + {State1, _} = enq(Config, 1, 1, first, test_init(?FUNCTION_NAME)), + {State2, _} = enq(Config, 2, 2, second, State1), ?assertEqual(2, rabbit_fifo:query_messages_total(State2)), - {_State3, _, Effects} = - apply(meta(C, 3), - rabbit_fifo:make_checkout(Cid, {once, 2, simple_prefetch}, #{}), - State2), - ct:pal("~tp", [Effects]), + {State3, #{key := CKey, + next_msg_id := NextMsgId}, Effects} = + checkout(Config, ?LINE, Cid, Spec, State2), ?ASSERT_EFF({monitor, _, _}, Effects), - ?ASSERT_EFF({log, [1,2], _Fun, _Local}, Effects), + ?ASSERT_EFF({log, [1, 2], _Fun, _Local}, Effects), + + {State4, _} = settle(Config, CKey, ?LINE, + [NextMsgId, NextMsgId+1], State3), + ?assertMatch(#{num_messages := 0, + num_ready_messages := 0, + num_checked_out := 0, + num_consumers := 1}, + rabbit_fifo:overview(State4)), ok. -credit_enq_enq_checkout_settled_credit_test(C) -> +credit_enq_enq_checkout_settled_credit_v1_test(Config) -> Cid = {?FUNCTION_NAME, self()}, - {State1, _} = enq(C, 1, 1, first, test_init(test)), - {State2, _} = enq(C, 2, 2, second, State1), - {State3, _, Effects} = - apply(meta(C, 3), rabbit_fifo:make_checkout(Cid, {auto, 1, credited}, #{}), State2), - ?ASSERT_EFF({monitor, _, _}, Effects), - ?ASSERT_EFF({log, [1], _Fun, _Local}, Effects), + {State1, _} = enq(Config, 1, 1, first, test_init(test)), + {State2, _} = enq(Config, 2, 2, second, State1), + {State3, #{key := CKey, + next_msg_id := NextMsgId}, Effects3} = + checkout(Config, ?LINE, Cid, {auto, 0, credited}, State2), + ?ASSERT_EFF({monitor, _, _}, Effects3), + {State4, Effects4} = credit(Config, CKey, ?LINE, 1, 0, false, State3), + ?ASSERT_EFF({log, [1], _Fun, _Local}, Effects4), %% settle the delivery this should _not_ result in further messages being %% delivered - {State4, SettledEffects} = settle(C, Cid, 4, 1, State3), + {State5, SettledEffects} = settle(Config, CKey, ?LINE, NextMsgId, State4), ?assertEqual(false, lists:any(fun ({log, _, _, _}) -> true; - (_) -> false + (_) -> + false end, SettledEffects)), %% granting credit (3) should deliver the second msg if the receivers %% delivery count is (1) - {State5, CreditEffects} = credit(C, Cid, 5, 1, 1, false, State4), - % ?debugFmt("CreditEffects ~tp ~n~tp", [CreditEffects, State4]), + {State6, CreditEffects} = credit(Config, CKey, ?LINE, 1, 1, false, State5), ?ASSERT_EFF({log, [2], _, _}, CreditEffects), - {_State6, FinalEffects} = enq(C, 6, 3, third, State5), + {_State, FinalEffects} = enq(Config, 6, 3, third, State6), ?assertEqual(false, lists:any(fun ({log, _, _, _}) -> true; (_) -> false end, FinalEffects)), ok. -credit_with_drained_test(C) -> - Cid = {?FUNCTION_NAME, self()}, +credit_enq_enq_checkout_settled_credit_v2_test(Config) -> + InitDelCnt = 16#ff_ff_ff_ff, + Ctag = ?FUNCTION_NAME, + Cid = {Ctag, self()}, + {State1, _} = enq(Config, 1, 1, first, test_init(test)), + {State2, _} = enq(Config, 2, 2, second, State1), + {State3, #{key := CKey, + next_msg_id := NextMsgId}, Effects3} = + checkout(Config, ?LINE, Cid, {auto, {credited, InitDelCnt}}, State2), + ?ASSERT_EFF({monitor, _, _}, Effects3), + {State4, Effects4} = credit(Config, CKey, ?LINE, 1, InitDelCnt, false, State3), + ?ASSERT_EFF({log, [1], _Fun, _Local}, Effects4), + %% Settling the delivery should not grant new credit. + {State5, SettledEffects} = settle(Config, CKey, 4, NextMsgId, State4), + ?assertEqual(false, lists:any(fun ({log, _, _, _}) -> + true; + (_) -> + false + end, SettledEffects)), + {State6, CreditEffects} = credit(Config, CKey, ?LINE, 1, 0, false, State5), + ?ASSERT_EFF({log, [2], _, _}, CreditEffects), + %% The credit_reply should be sent **after** the delivery. + ?assertEqual({send_msg, self(), + {credit_reply, Ctag, _DeliveryCount = 1, _Credit = 0, _Available = 0, _Drain = false}, + ?DELIVERY_SEND_MSG_OPTS}, + lists:last(CreditEffects)), + {_State, FinalEffects} = enq(Config, 6, 3, third, State6), + ?assertEqual(false, lists:any(fun ({log, _, _, _}) -> + true; + (_) -> false + end, FinalEffects)). + +credit_with_drained_v1_test(Config) -> + Ctag = ?FUNCTION_NAME_B, + Cid = {Ctag, self()}, State0 = test_init(test), %% checkout with a single credit - {State1, _, _} = - apply(meta(C, 1), rabbit_fifo:make_checkout(Cid, {auto, 1, credited},#{}), - State0), - ?assertMatch(#rabbit_fifo{consumers = #{Cid := #consumer{credit = 1, - delivery_count = 0}}}, + {State1, #{key := CKey}, _} = checkout(Config, ?LINE, Cid, {auto, 0, credited}, State0), + ?assertMatch(#rabbit_fifo{consumers = #{CKey := #consumer{credit = 0, + delivery_count = 0}}}, State1), + {State2, _Effects2} = credit(Config, CKey, ?LINE, 1, 0, false, State1), + ?assertMatch(#rabbit_fifo{consumers = #{CKey := #consumer{credit = 1, + delivery_count = 0}}}, + State2), {State, Result, _} = - apply(meta(C, 3), rabbit_fifo:make_credit(Cid, 0, 5, true), State1), - ?assertMatch(#rabbit_fifo{consumers = #{Cid := #consumer{credit = 0, - delivery_count = 5}}}, + apply(meta(Config, ?LINE), rabbit_fifo:make_credit(Cid, 5, 0, true), State2), + ?assertMatch(#rabbit_fifo{consumers = #{CKey := #consumer{credit = 0, + delivery_count = 5}}}, State), ?assertEqual({multi, [{send_credit_reply, 0}, - {send_drained, {?FUNCTION_NAME, 5}}]}, - Result), + {send_drained, {Ctag, 5}}]}, + Result), ok. -credit_and_drain_test(C) -> - Cid = {?FUNCTION_NAME, self()}, - {State1, _} = enq(C, 1, 1, first, test_init(test)), - {State2, _} = enq(C, 2, 2, second, State1), +credit_with_drained_v2_test(Config) -> + Ctag = ?FUNCTION_NAME, + Cid = {Ctag, self()}, + State0 = test_init(test), + %% checkout with a single credit + {State1, #{key := CKey}, _} = checkout(Config, ?LINE, Cid, {auto, {credited, 0}}, State0), + ?assertMatch(#rabbit_fifo{consumers = #{CKey := #consumer{credit = 0, + delivery_count = 0}}}, + State1), + {State2, _Effects2} = credit(Config, CKey, ?LINE, 1, 0, false, State1), + ?assertMatch(#rabbit_fifo{consumers = #{CKey := #consumer{credit = 1, + delivery_count = 0}}}, + State2), + {State, _, Effects} = + apply(meta(Config, ?LINE), rabbit_fifo:make_credit(CKey, 5, 0, true), State2), + ?assertMatch(#rabbit_fifo{consumers = #{CKey := #consumer{credit = 0, + delivery_count = 5}}}, + State), + ?assertEqual([{send_msg, self(), + {credit_reply, Ctag, _DeliveryCount = 5, + _Credit = 0, _Available = 0, _Drain = true}, + ?DELIVERY_SEND_MSG_OPTS}], + Effects). + +credit_and_drain_v1_test(Config) -> + Ctag = ?FUNCTION_NAME, + Cid = {Ctag, self()}, + {State1, _} = enq(Config, 1, 1, first, test_init(test)), + {State2, _} = enq(Config, 2, 2, second, State1), %% checkout without any initial credit (like AMQP 1.0 would) {State3, _, CheckEffs} = - apply(meta(C, 3), rabbit_fifo:make_checkout(Cid, {auto, 0, credited}, #{}), + apply(meta(Config, 3), make_checkout(Cid, {auto, 0, credited}, #{}), State2), ?ASSERT_NO_EFF({log, _, _, _}, CheckEffs), {State4, {multi, [{send_credit_reply, 0}, - {send_drained, {?FUNCTION_NAME, 2}}]}, - Effects} = apply(meta(C, 4), rabbit_fifo:make_credit(Cid, 4, 0, true), State3), + {send_drained, {Ctag, 2}}]}, + Effects} = apply(meta(Config, 4), rabbit_fifo:make_credit(Cid, 4, 0, true), State3), ?assertMatch(#rabbit_fifo{consumers = #{Cid := #consumer{credit = 0, delivery_count = 4}}}, State4), ?ASSERT_EFF({log, [1, 2], _, _}, Effects), - {_State5, EnqEffs} = enq(C, 5, 2, third, State4), + {_State5, EnqEffs} = enq(Config, 5, 2, third, State4), ?ASSERT_NO_EFF({log, _, _, _}, EnqEffs), ok. +credit_and_drain_v2_test(Config) -> + Ctag = ?FUNCTION_NAME_B, + Cid = {Ctag, self()}, + {State1, _} = enq(Config, 1, 1, first, test_init(test)), + {State2, _} = enq(Config, 2, 2, second, State1), + {State3, #{key := CKey}, CheckEffs} = checkout(Config, ?LINE, Cid, + {auto, {credited, 16#ff_ff_ff_ff - 1}}, + State2), + ?ASSERT_NO_EFF({log, _, _, _}, CheckEffs), + {State4, Effects} = credit(Config, CKey, ?LINE, 4, 16#ff_ff_ff_ff - 1, + true, State3), + ?assertMatch(#rabbit_fifo{consumers = #{CKey := #consumer{credit = 0, + delivery_count = 2}}}, + State4), + ?ASSERT_EFF({log, [1, 2], _, _}, Effects), + %% The credit_reply should be sent **after** the deliveries. + ?assertEqual({send_msg, self(), + {credit_reply, Ctag, _DeliveryCount = 2, _Credit = 0, + _Available = 0, _Drain = true}, + ?DELIVERY_SEND_MSG_OPTS}, + lists:last(Effects)), + + {_State5, EnqEffs} = enq(Config, 5, 2, third, State4), + ?ASSERT_NO_EFF({log, _, _, _}, EnqEffs), + ok. + +credit_and_drain_single_active_consumer_v2_test(Config) -> + State0 = init(#{name => ?FUNCTION_NAME, + queue_resource => rabbit_misc:r( + "/", queue, atom_to_binary(?FUNCTION_NAME)), + release_cursor_interval => 0, + single_active_consumer_on => true}), + Self = self(), + + % Send 1 message. + {State1, _} = enq(Config, 1, 1, first, State0), + + % Add 2 consumers. + Ctag1 = <<"ctag1">>, + Ctag2 = <<"ctag2">>, + C1 = {Ctag1, Self}, + C2 = {Ctag2, Self}, + CK1 = ?LINE, + CK2 = ?LINE, + Entries = [ + {CK1, make_checkout(C1, {auto, {credited, 16#ff_ff_ff_ff}}, #{})}, + {CK2, make_checkout(C2, {auto, {credited, 16#ff_ff_ff_ff}}, #{})} + ], + {State2, _} = run_log(Config, State1, Entries), + + % The 1st registered consumer is the active one, the 2nd consumer is waiting. + ?assertMatch(#{single_active_consumer_id := C1, + single_active_num_waiting_consumers := 1}, + rabbit_fifo:overview(State2)), + + % Drain the inactive consumer. + {State3, Effects0} = credit(Config, CK2, ?LINE, 5000, 16#ff_ff_ff_ff, true, State2), + % The inactive consumer should not receive any message. + % Hence, no log effect should be returned. + % Since we sent drain=true, we expect the sending queue to consume all link credit + % advancing the delivery-count. + ?assertEqual({send_msg, Self, + {credit_reply, Ctag2, _DeliveryCount = 4999, _Credit = 0, + _Available = 0, _Drain = true}, + ?DELIVERY_SEND_MSG_OPTS}, + Effects0), + + % Drain the active consumer. + {_State4, Effects1} = credit(Config, CK1, ?LINE, 1000, 16#ff_ff_ff_ff, true, State3), + ?assertMatch([ + {log, [1], _Fun, _Local}, + {send_msg, Self, + {credit_reply, Ctag1, _DeliveryCount = 999, _Credit = 0, + _Available = 0, _Drain = true}, + ?DELIVERY_SEND_MSG_OPTS} + ], + Effects1). enq_enq_deq_test(C) -> - Cid = {?FUNCTION_NAME, self()}, + Cid = {?FUNCTION_NAME_B, self()}, {State1, _} = enq(C, 1, 1, first, test_init(test)), {State2, _} = enq(C, 2, 2, second, State1), % get returns a reply value @@ -190,52 +348,57 @@ enq_enq_deq_test(C) -> {_State3, _, [{log, [1], Fun}, {monitor, _, _}]} = - apply(meta(C, 3), rabbit_fifo:make_checkout(Cid, {dequeue, unsettled}, #{}), + apply(meta(C, 3), make_checkout(Cid, {dequeue, unsettled}, #{}), State2), ct:pal("Out ~tp", [Fun([Msg1])]), ok. -enq_enq_deq_deq_settle_test(C) -> - Cid = {?FUNCTION_NAME, self()}, - {State1, _} = enq(C, 1, 1, first, test_init(test)), - {State2, _} = enq(C, 2, 2, second, State1), +enq_enq_deq_deq_settle_test(Config) -> + Cid = {?FUNCTION_NAME_B, self()}, + {State1, _} = enq(Config, 1, 1, first, test_init(test)), + {State2, _} = enq(Config, 2, 2, second, State1), % get returns a reply value {State3, '$ra_no_reply', [{log, [1], _}, {monitor, _, _}]} = - apply(meta(C, 3), rabbit_fifo:make_checkout(Cid, {dequeue, unsettled}, #{}), + apply(meta(Config, 3), make_checkout(Cid, {dequeue, unsettled}, #{}), State2), - {_State4, {dequeue, empty}} = - apply(meta(C, 4), rabbit_fifo:make_checkout(Cid, {dequeue, unsettled}, #{}), + {State4, {dequeue, empty}} = + apply(meta(Config, 4), make_checkout(Cid, {dequeue, unsettled}, #{}), State3), + + {State, _} = settle(Config, Cid, ?LINE, 0, State4), + + ?assertMatch(#{num_consumers := 0}, rabbit_fifo:overview(State)), ok. -enq_enq_checkout_get_settled_test(C) -> +enq_enq_checkout_get_settled_test(Config) -> Cid = {?FUNCTION_NAME, self()}, - {State1, _} = enq(C, 1, 1, first, test_init(test)), + {State1, _} = enq(Config, 1, 1, first, test_init(test)), % get returns a reply value {State2, _, Effs} = - apply(meta(C, 3), rabbit_fifo:make_checkout(Cid, {dequeue, settled}, #{}), + apply(meta(Config, 3), make_checkout(Cid, {dequeue, settled}, #{}), State1), ?ASSERT_EFF({log, [1], _}, Effs), ?assertEqual(0, rabbit_fifo:query_messages_total(State2)), ok. -checkout_get_empty_test(C) -> +checkout_get_empty_test(Config) -> Cid = {?FUNCTION_NAME, self()}, - State = test_init(test), - {_State2, {dequeue, empty}, _} = - apply(meta(C, 1), rabbit_fifo:make_checkout(Cid, {dequeue, unsettled}, #{}), State), + State0 = test_init(test), + {State, {dequeue, empty}, _} = checkout(Config, ?LINE, Cid, + {dequeue, unsettled}, State0), + ?assertMatch(#{num_consumers := 0}, rabbit_fifo:overview(State)), ok. -untracked_enq_deq_test(C) -> +untracked_enq_deq_test(Config) -> Cid = {?FUNCTION_NAME, self()}, State0 = test_init(test), - {State1, _, _} = apply(meta(C, 1), + {State1, _, _} = apply(meta(Config, 1), rabbit_fifo:make_enqueue(undefined, undefined, first), State0), {_State2, _, Effs} = - apply(meta(C, 3), rabbit_fifo:make_checkout(Cid, {dequeue, settled}, #{}), State1), + apply(meta(Config, 3), make_checkout(Cid, {dequeue, settled}, #{}), State1), ?ASSERT_EFF({log, [1], _}, Effs), ok. @@ -244,104 +407,125 @@ enq_expire_deq_test(C) -> queue_resource => rabbit_misc:r(<<"/">>, queue, <<"test">>), msg_ttl => 0}, S0 = rabbit_fifo:init(Conf), - Msg = #basic_message{content = #content{properties = none, + Msg = #basic_message{content = #content{properties = #'P_basic'{}, payload_fragments_rev = []}}, - {S1, ok, _} = apply(meta(C, 1, 100), rabbit_fifo:make_enqueue(self(), 1, Msg), S0), + {S1, ok, _} = apply(meta(C, 1, 100, {notify, 1, self()}), + rabbit_fifo:make_enqueue(self(), 1, Msg), S0), Cid = {?FUNCTION_NAME, self()}, {_S2, {dequeue, empty}, Effs} = - apply(meta(C, 2, 101), rabbit_fifo:make_checkout(Cid, {dequeue, unsettled}, #{}), S1), + apply(meta(C, 2, 101), make_checkout(Cid, {dequeue, unsettled}, #{}), S1), ?ASSERT_EFF({mod_call, rabbit_global_counters, messages_dead_lettered, [expired, rabbit_quorum_queue, disabled, 1]}, Effs), ok. -enq_expire_enq_deq_test(C) -> +enq_expire_enq_deq_test(Config) -> S0 = test_init(test), %% Msg1 and Msg2 get enqueued in the same millisecond, %% but only Msg1 expires immediately. - Msg1 = #basic_message{content = #content{properties = #'P_basic'{expiration = <<"0">>}, - payload_fragments_rev = [<<"msg1">>]}}, + Msg1 = mc_amqpl:from_basic_message( + #basic_message{routing_keys = [<<"">>], + exchange_name = #resource{name = <<"x">>, + kind = exchange, + virtual_host = <<"v">>}, + content = #content{properties = #'P_basic'{ + expiration = <<"0">>}, + payload_fragments_rev = [<<"msg1">>]}}), Enq1 = rabbit_fifo:make_enqueue(self(), 1, Msg1), - {S1, ok, _} = apply(meta(C, 1, 100), Enq1, S0), - Msg2 = #basic_message{content = #content{properties = none, + Idx1 = ?LINE, + {S1, ok, _} = apply(meta(Config, Idx1, 100, {notify, 1, self()}), Enq1, S0), + Msg2 = #basic_message{content = #content{properties = #'P_basic'{}, + % class_id = 60, + % protocol = ?PROTOMOD, payload_fragments_rev = [<<"msg2">>]}}, Enq2 = rabbit_fifo:make_enqueue(self(), 2, Msg2), - {S2, ok, _} = apply(meta(C, 2, 100), Enq2, S1), + Idx2 = ?LINE, + {S2, ok, _} = apply(meta(Config, Idx2, 100, {notify, 2, self()}), Enq2, S1), Cid = {?FUNCTION_NAME, self()}, {_S3, _, Effs} = - apply(meta(C, 3, 101), rabbit_fifo:make_checkout(Cid, {dequeue, unsettled}, #{}), S2), - {log, [2], Fun} = get_log_eff(Effs), + apply(meta(Config, ?LINE, 101), make_checkout(Cid, {dequeue, unsettled}, #{}), S2), + {log, [Idx2], Fun} = get_log_eff(Effs), [{reply, _From, {wrap_reply, {dequeue, {_MsgId, _HeaderMsg}, ReadyMsgCount}}}] = Fun([Enq2]), ?assertEqual(0, ReadyMsgCount). -enq_expire_deq_enq_enq_deq_deq_test(C) -> +enq_expire_deq_enq_enq_deq_deq_test(Config) -> S0 = test_init(test), - Msg1 = #basic_message{content = #content{properties = #'P_basic'{expiration = <<"0">>}, - payload_fragments_rev = [<<"msg1">>]}}, - {S1, ok, _} = apply(meta(C, 1, 100), rabbit_fifo:make_enqueue(self(), 1, Msg1), S0), - {S2, {dequeue, empty}, _} = apply(meta(C, 2, 101), - rabbit_fifo:make_checkout({c1, self()}, {dequeue, unsettled}, #{}), S1), - {S3, _} = enq(C, 3, 2, msg2, S2), - {S4, _} = enq(C, 4, 3, msg3, S3), + Msg1 = #basic_message{content = + #content{properties = #'P_basic'{expiration = <<"0">>}, + payload_fragments_rev = [<<"msg1">>]}}, + {S1, ok, _} = apply(meta(Config, 1, 100, {notify, 1, self()}), + rabbit_fifo:make_enqueue(self(), 1, Msg1), S0), + {S2, {dequeue, empty}, _} = apply(meta(Config, 2, 101), + make_checkout({c1, self()}, + {dequeue, unsettled}, #{}), S1), + {S3, _} = enq(Config, 3, 2, msg2, S2), + {S4, _} = enq(Config, 4, 3, msg3, S3), {S5, '$ra_no_reply', [{log, [3], _}, {monitor, _, _}]} = - apply(meta(C, 5), rabbit_fifo:make_checkout({c2, self()}, {dequeue, unsettled}, #{}), S4), + apply(meta(Config, 5), make_checkout({c2, self()}, {dequeue, unsettled}, #{}), S4), {_S6, '$ra_no_reply', [{log, [4], _}, {monitor, _, _}]} = - apply(meta(C, 6), rabbit_fifo:make_checkout({c3, self()}, {dequeue, unsettled}, #{}), S5). + apply(meta(Config, 6), make_checkout({c3, self()}, {dequeue, unsettled}, #{}), S5), + ok. -release_cursor_test(C) -> - Cid = {?FUNCTION_NAME, self()}, - {State1, _} = enq(C, 1, 1, first, test_init(test)), - {State2, _} = enq(C, 2, 2, second, State1), - {State3, _} = check(C, Cid, 3, 10, State2), - % no release cursor effect at this point - {State4, _} = settle(C, Cid, 4, 1, State3), - {_Final, Effects1} = settle(C, Cid, 5, 0, State4), - % empty queue forwards release cursor all the way - ?ASSERT_EFF({release_cursor, 5, _}, Effects1), +checkout_enq_settle_test(Config) -> + Cid = {?FUNCTION_NAME_B, self()}, + {State1, #{key := CKey, + next_msg_id := NextMsgId}, + [{monitor, _, _} | _]} = checkout(Config, ?LINE, Cid, 1, test_init(test)), + {State2, Effects0} = enq(Config, 2, 1, first, State1), + ?ASSERT_EFF({send_msg, _, {delivery, _, [{0, {_, first}}]}, _}, Effects0), + {State3, _} = enq(Config, 3, 2, second, State2), + {_, _Effects} = settle(Config, CKey, 4, NextMsgId, State3), ok. -checkout_enq_settle_test(C) -> - Cid = {?FUNCTION_NAME, self()}, - {State1, [{monitor, _, _} | _]} = check(C, Cid, 1, test_init(test)), - {State2, Effects0} = enq(C, 2, 1, first, State1), - %% TODO: this should go back to a send_msg effect after optimisation - % ?ASSERT_EFF({log, [2], _, _}, Effects0), - ?ASSERT_EFF({send_msg, _, - {delivery, ?FUNCTION_NAME, - [{0, {_, first}}]}, _}, - Effects0), - {State3, _} = enq(C, 3, 2, second, State2), - {_, _Effects} = settle(C, Cid, 4, 0, State3), - % the release cursor is the smallest raft index that does not - % contribute to the state of the application - % ?ASSERT_EFF({release_cursor, 2, _}, Effects), - ok. - -duplicate_enqueue_test(C) -> - Cid = {<<"duplicate_enqueue_test">>, self()}, - {State1, [ {monitor, _, _} | _]} = check_n(C, Cid, 5, 5, test_init(test)), - {State2, Effects2} = enq(C, 2, 1, first, State1), - % ?ASSERT_EFF({log, [2], _, _}, Effects2), +duplicate_enqueue_test(Config) -> + Cid = {?FUNCTION_NAME_B, self()}, + MsgSeq = 1, + {State1, [ {monitor, _, _} | _]} = check_n(Config, Cid, 5, 5, test_init(test)), + {State2, Effects2} = enq(Config, 2, MsgSeq, first, State1), ?ASSERT_EFF({send_msg, _, {delivery, _, [{_, {_, first}}]}, _}, Effects2), - {_State3, Effects3} = enq(C, 3, 1, first, State2), + {_State3, Effects3} = enq(Config, 3, MsgSeq, first, State2), ?ASSERT_NO_EFF({log, [_], _, _}, Effects3), ok. -return_test(C) -> +return_test(Config) -> Cid = {<<"cid">>, self()}, Cid2 = {<<"cid2">>, self()}, - {State0, _} = enq(C, 1, 1, msg, test_init(test)), - {State1, _} = check_auto(C, Cid, 2, State0), - {State2, _} = check_auto(C, Cid2, 3, State1), - {State3, _, _} = apply(meta(C, 4), rabbit_fifo:make_return(Cid, [0]), State2), - ?assertMatch(#{Cid := #consumer{checked_out = C1}} when map_size(C1) == 0, - State3#rabbit_fifo.consumers), - ?assertMatch(#{Cid2 := #consumer{checked_out = C2}} when map_size(C2) == 1, - State3#rabbit_fifo.consumers), + {State0, _} = enq(Config, 1, 1, msg, test_init(test)), + {State1, #{key := C1Key, + next_msg_id := MsgId}, _} = checkout(Config, ?LINE, Cid, 1, State0), + {State2, #{key := C2Key}, _} = checkout(Config, ?LINE, Cid2, 1, State1), + {State3, _, _} = apply(meta(Config, 4), + rabbit_fifo:make_return(C1Key, [MsgId]), State2), + ?assertMatch(#{C1Key := #consumer{checked_out = C1}} + when map_size(C1) == 0, State3#rabbit_fifo.consumers), + ?assertMatch(#{C2Key := #consumer{checked_out = C2}} + when map_size(C2) == 1, State3#rabbit_fifo.consumers), + ok. + +return_multiple_test(Config) -> + Cid = {<<"cid">>, self()}, + {State0, _} = enq(Config, 1, 1, first, test_init(?FUNCTION_NAME)), + {State1, _} = enq(Config, 2, 2, second, State0), + {State2, _} = enq(Config, 3, 3, third, State1), + + {State3, + #{key := CKey, + next_msg_id := NextMsgId}, + Effects0} = checkout(Config, ?LINE, Cid, 3, State2), + ?ASSERT_EFF({log, [1, 2, 3], _Fun, _Local}, Effects0), + + {_, _, Effects1} = apply(meta(Config, ?LINE), + rabbit_fifo:make_return( + CKey, + %% Return messages in following order: 3, 1, 2 + [NextMsgId + 2, NextMsgId, NextMsgId + 1]), + State3), + %% We expect messages to be re-delivered in the same order in which we previously returned. + ?ASSERT_EFF({log, [3, 1, 2], _Fun, _Local}, Effects1), ok. return_dequeue_delivery_limit_test(C) -> @@ -367,33 +551,27 @@ return_dequeue_delivery_limit_test(C) -> ?assertMatch(#{num_messages := 0}, rabbit_fifo:overview(State4)), ok. -return_non_existent_test(C) -> +return_non_existent_test(Config) -> Cid = {<<"cid">>, self()}, - {State0, _} = enq(C, 1, 1, second, test_init(test)), - % return non-existent - {_State2, _} = apply(meta(C, 3), rabbit_fifo:make_return(Cid, [99]), State0), + {State0, _} = enq(Config, 1, 1, second, test_init(test)), + % return non-existent, check it doesn't crash + {_State2, _} = apply(meta(Config, 3), rabbit_fifo:make_return(Cid, [99]), State0), ok. -return_checked_out_test(C) -> +return_checked_out_test(Config) -> Cid = {<<"cid">>, self()}, - {State0, _} = enq(C, 1, 1, first, test_init(test)), - {State1, [_Monitor, - {log, [1], Fun, _} - | _ ] - } = check_auto(C, Cid, 2, State0), - - Msg1 = rabbit_fifo:make_enqueue(self(), 1, first), - - [{send_msg, _, {delivery, _, [{MsgId, _}]}, _}] = Fun([Msg1]), + {State0, _} = enq(Config, 1, 1, first, test_init(test)), + {State1, #{key := CKey, + next_msg_id := MsgId}, Effects1} = + checkout(Config, ?LINE, Cid, 1, State0), + ?ASSERT_EFF({log, [1], _Fun, _Local}, Effects1), % returning immediately checks out the same message again - {_, ok, [ - {log, [1], _, _} - % {send_msg, _, {delivery, _, [{_, _}]}, _}, - ]} = - apply(meta(C, 3), rabbit_fifo:make_return(Cid, [MsgId]), State1), + {_State, ok, Effects2} = + apply(meta(Config, 3), rabbit_fifo:make_return(CKey, [MsgId]), State1), + ?ASSERT_EFF({log, [1], _Fun, _Local}, Effects2), ok. -return_checked_out_limit_test(C) -> +return_checked_out_limit_test(Config) -> Cid = {<<"cid">>, self()}, Init = init(#{name => test, queue_resource => rabbit_misc:r("/", queue, @@ -402,124 +580,173 @@ return_checked_out_limit_test(C) -> max_in_memory_length => 0, delivery_limit => 1}), Msg1 = rabbit_fifo:make_enqueue(self(), 1, first), - {State0, _} = enq(C, 1, 1, first, Init), - {State1, [_Monitor, - {log, [1], Fun1, _} - | _ ]} = check_auto(C, Cid, 2, State0), - [{send_msg, _, {delivery, _, [{MsgId, _}]}, _}] = Fun1([Msg1]), + {State0, _} = enq(Config, 1, 1, Msg1, Init), + {State1, #{key := CKey, + next_msg_id := MsgId}, Effects1} = + checkout(Config, ?LINE, Cid, 1, State0), + ?ASSERT_EFF({log, [1], _Fun, _Local}, Effects1), % returning immediately checks out the same message again - {State2, ok, [ - {log, [1], Fun2, _} - ]} = - apply(meta(C, 3), rabbit_fifo:make_return(Cid, [MsgId]), State1), - [{send_msg, _, {delivery, _, [{MsgId2, _}]}, _}] = Fun2([Msg1]), + {State2, ok, Effects2} = + apply(meta(Config, 3), rabbit_fifo:make_return(CKey, [MsgId]), State1), + ?ASSERT_EFF({log, [1], _Fun, _Local}, Effects2), + {#rabbit_fifo{} = State, ok, _} = - apply(meta(C, 4), rabbit_fifo:make_return(Cid, [MsgId2]), State2), + apply(meta(Config, 4), rabbit_fifo:make_return(Cid, [MsgId + 1]), State2), ?assertEqual(0, rabbit_fifo:query_messages_total(State)), ok. -return_auto_checked_out_test(C) -> +return_auto_checked_out_test(Config) -> Cid = {<<"cid">>, self()}, Msg1 = rabbit_fifo:make_enqueue(self(), 1, first), - {State00, _} = enq(C, 1, 1, first, test_init(test)), - {State0, _} = enq(C, 2, 2, second, State00), + {State00, _} = enq(Config, 1, 1, first, test_init(test)), + {State0, _} = enq(Config, 2, 2, second, State00), % it first active then inactive as the consumer took on but cannot take % any more - {State1, [_Monitor, - {log, [1], Fun1, _} - ]} = check_auto(C, Cid, 2, State0), + {State1, #{key := CKey, + next_msg_id := MsgId}, + [_Monitor, {log, [1], Fun1, _} ]} = checkout(Config, ?LINE, Cid, 1, State0), [{send_msg, _, {delivery, _, [{MsgId, _}]}, _}] = Fun1([Msg1]), % return should include another delivery - {_State2, _, Effects} = apply(meta(C, 3), rabbit_fifo:make_return(Cid, [MsgId]), State1), + {State2, _, Effects} = apply(meta(Config, 3), + rabbit_fifo:make_return(CKey, [MsgId]), State1), [{log, [1], Fun2, _} | _] = Effects, - - [{send_msg, _, {delivery, _, [{_MsgId2, {#{delivery_count := 1}, first}}]}, _}] + [{send_msg, _, {delivery, _, [{_MsgId2, {#{acquired_count := 1}, first}}]}, _}] = Fun2([Msg1]), + + %% a down does not increment the return_count + {State3, _, _} = apply(meta(Config, ?LINE), {down, self(), noproc}, State2), + + {_State4, #{key := _CKey2, + next_msg_id := _}, + [_, {log, [1], Fun3, _} ]} = checkout(Config, ?LINE, Cid, 1, State3), + + [{send_msg, _, {delivery, _, [{_, {#{delivery_count := 1, + acquired_count := 2}, first}}]}, _}] + = Fun3([Msg1]), ok. -cancelled_checkout_empty_queue_test(C) -> +requeue_test(Config) -> Cid = {<<"cid">>, self()}, - {State1, _} = check_auto(C, Cid, 2, test_init(test)), + Msg1 = rabbit_fifo:make_enqueue(self(), 1, first), + {State0, _} = enq(Config, 1, 1, first, test_init(test)), + % it first active then inactive as the consumer took on but cannot take + % any more + {State1, #{key := CKey, + next_msg_id := MsgId}, + [_Monitor, {log, [1], Fun1, _} ]} = checkout(Config, ?LINE, Cid, 1, State0), + [{send_msg, _, {delivery, _, [{MsgId, {H1, _}}]}, _}] = Fun1([Msg1]), + % return should include another delivery + [{append, Requeue, _}] = rabbit_fifo:make_requeue(CKey, {notify, 1, self()}, + [{MsgId, 1, H1, Msg1}], []), + {_State2, _, Effects} = apply(meta(Config, 3), Requeue, State1), + [{log, [_], Fun2, _} | _] = Effects, + [{send_msg, _, + {delivery, _, [{_MsgId2, {#{acquired_count := 1}, first}}]}, _}] + = Fun2([Msg1]), + ok. + +cancelled_checkout_empty_queue_test(Config) -> + Cid = {?FUNCTION_NAME_B, self()}, + {State1, #{key := _CKey, + next_msg_id := _NextMsgId}, _} = + checkout(Config, ?LINE, Cid, 1, test_init(test)),%% prefetch of 1 % cancelled checkout should clear out service_queue also, else we'd get a % build up of these - {State2, _, Effects} = apply(meta(C, 3), rabbit_fifo:make_checkout(Cid, cancel, #{}), State1), + {State2, _, _Effects} = apply(meta(Config, 3), + make_checkout(Cid, cancel, #{}), State1), ?assertEqual(0, map_size(State2#rabbit_fifo.consumers)), ?assertEqual(0, priority_queue:len(State2#rabbit_fifo.service_queue)), - ?ASSERT_EFF({release_cursor, _, _}, Effects), ok. -cancelled_checkout_out_test(C) -> +cancelled_checkout_out_test(Config) -> Cid = {<<"cid">>, self()}, - {State00, _} = enq(C, 1, 1, first, test_init(test)), - {State0, _} = enq(C, 2, 2, second, State00), - {State1, _} = check_auto(C, Cid, 3, State0),%% prefetch of 1 + {State00, _} = enq(Config, 1, 1, first, test_init(test)), + {State0, _} = enq(Config, 2, 2, second, State00), + {State1, #{key := CKey, + next_msg_id := NextMsgId}, _} = + checkout(Config, ?LINE, Cid, 1, State0),%% prefetch of 1 % cancelled checkout should not return pending messages to queue - {State2, _, _} = apply(meta(C, 4), rabbit_fifo:make_checkout(Cid, cancel, #{}), State1), - ?assertEqual(1, lqueue:len(State2#rabbit_fifo.messages)), + {State2, _, _} = apply(meta(Config, 4), + rabbit_fifo:make_checkout(Cid, cancel, #{}), State1), + ?assertEqual(1, rabbit_fifo_q:len(State2#rabbit_fifo.messages)), ?assertEqual(0, lqueue:len(State2#rabbit_fifo.returns)), ?assertEqual(0, priority_queue:len(State2#rabbit_fifo.service_queue)), {State3, {dequeue, empty}} = - apply(meta(C, 5), rabbit_fifo:make_checkout(Cid, {dequeue, settled}, #{}), State2), + apply(meta(Config, 5), make_checkout(Cid, {dequeue, settled}, #{}), State2), %% settle {State4, ok, _} = - apply(meta(C, 6), rabbit_fifo:make_settle(Cid, [0]), State3), + apply(meta(Config, 6), rabbit_fifo:make_settle(CKey, [NextMsgId]), State3), {_State, _, [{log, [2], _Fun} | _]} = - apply(meta(C, 7), rabbit_fifo:make_checkout(Cid, {dequeue, settled}, #{}), State4), + apply(meta(Config, 7), make_checkout(Cid, {dequeue, settled}, #{}), State4), ok. -down_with_noproc_consumer_returns_unsettled_test(C) -> - Cid = {<<"down_consumer_returns_unsettled_test">>, self()}, - {State0, _} = enq(C, 1, 1, second, test_init(test)), - {State1, [{monitor, process, Pid} | _]} = check(C, Cid, 2, State0), - {State2, _, _} = apply(meta(C, 3), {down, Pid, noproc}, State1), - {_State, Effects} = check(C, Cid, 4, State2), +down_with_noproc_consumer_returns_unsettled_test(Config) -> + Cid = {?FUNCTION_NAME_B, self()}, + {State0, _} = enq(Config, 1, 1, second, test_init(test)), + {State1, #{key := CKey}, + [{monitor, process, Pid} | _]} = checkout(Config, ?LINE, Cid, 1, State0), + {State2, _, _} = apply(meta(Config, 3), {down, Pid, noproc}, State1), + {_State, #{key := CKey2}, Effects} = checkout(Config, ?LINE, Cid, 1, State2), + ?assertNotEqual(CKey, CKey2), ?ASSERT_EFF({monitor, process, _}, Effects), ok. -down_with_noconnection_marks_suspect_and_node_is_monitored_test(C) -> +removed_consumer_returns_unsettled_test(Config) -> + Cid = {?FUNCTION_NAME_B, self()}, + {State0, _} = enq(Config, 1, 1, second, test_init(test)), + {State1, #{key := CKey}, + [{monitor, process, _Pid} | _]} = checkout(Config, ?LINE, Cid, 1, State0), + Remove = rabbit_fifo:make_checkout(Cid, remove, #{}), + {State2, _, _} = apply(meta(Config, 3), Remove, State1), + {_State, #{key := CKey2}, Effects} = checkout(Config, ?LINE, Cid, 1, State2), + ?assertNotEqual(CKey, CKey2), + ?ASSERT_EFF({monitor, process, _}, Effects), + ok. + +down_with_noconnection_marks_suspect_and_node_is_monitored_test(Config) -> Pid = spawn(fun() -> ok end), - Cid = {<<"down_with_noconnect">>, Pid}, + Cid = {?FUNCTION_NAME_B, Pid}, Self = self(), Node = node(Pid), - {State0, Effects0} = enq(C, 1, 1, second, test_init(test)), + {State0, Effects0} = enq(Config, 1, 1, second, test_init(test)), ?ASSERT_EFF({monitor, process, P}, P =:= Self, Effects0), - {State1, Effects1} = check_auto(C, Cid, 2, State0), - #consumer{credit = 0} = maps:get(Cid, State1#rabbit_fifo.consumers), + {State1, #{key := CKey}, Effects1} = checkout(Config, ?LINE, Cid, 1, State0), + #consumer{credit = 0} = maps:get(CKey, State1#rabbit_fifo.consumers), ?ASSERT_EFF({monitor, process, P}, P =:= Pid, Effects1), % monitor both enqueuer and consumer % because we received a noconnection we now need to monitor the node - {State2a, _, _} = apply(meta(C, 3), {down, Pid, noconnection}, State1), + {State2a, _, _} = apply(meta(Config, 3), {down, Pid, noconnection}, State1), #consumer{credit = 1, checked_out = Ch, - status = suspected_down} = maps:get(Cid, State2a#rabbit_fifo.consumers), + status = suspected_down} = maps:get(CKey, State2a#rabbit_fifo.consumers), ?assertEqual(#{}, Ch), %% validate consumer has credit - {State2, _, Effects2} = apply(meta(C, 3), {down, Self, noconnection}, State2a), + {State2, _, Effects2} = apply(meta(Config, 3), {down, Self, noconnection}, State2a), ?ASSERT_EFF({monitor, node, _}, Effects2), ?assertNoEffect({demonitor, process, _}, Effects2), % when the node comes up we need to retry the process monitors for the % disconnected processes - {State3, _, Effects3} = apply(meta(C, 3), {nodeup, Node}, State2), - #consumer{status = up} = maps:get(Cid, State3#rabbit_fifo.consumers), + {State3, _, Effects3} = apply(meta(Config, 3), {nodeup, Node}, State2), + #consumer{status = up} = maps:get(CKey, State3#rabbit_fifo.consumers), % try to re-monitor the suspect processes ?ASSERT_EFF({monitor, process, P}, P =:= Pid, Effects3), ?ASSERT_EFF({monitor, process, P}, P =:= Self, Effects3), ok. -down_with_noconnection_returns_unack_test(C) -> +down_with_noconnection_returns_unack_test(Config) -> Pid = spawn(fun() -> ok end), - Cid = {<<"down_with_noconnect">>, Pid}, + Cid = {?FUNCTION_NAME_B, Pid}, Msg = rabbit_fifo:make_enqueue(self(), 1, second), - {State0, _} = enq(C, 1, 1, second, test_init(test)), - ?assertEqual(1, lqueue:len(State0#rabbit_fifo.messages)), + {State0, _} = enq(Config, 1, 1, second, test_init(test)), + ?assertEqual(1, rabbit_fifo_q:len(State0#rabbit_fifo.messages)), ?assertEqual(0, lqueue:len(State0#rabbit_fifo.returns)), - {State1, {_, _}} = deq(C, 2, Cid, unsettled, Msg, State0), - ?assertEqual(0, lqueue:len(State1#rabbit_fifo.messages)), + {State1, {_, _}} = deq(Config, 2, Cid, unsettled, Msg, State0), + ?assertEqual(0, rabbit_fifo_q:len(State1#rabbit_fifo.messages)), ?assertEqual(0, lqueue:len(State1#rabbit_fifo.returns)), - {State2a, _, _} = apply(meta(C, 3), {down, Pid, noconnection}, State1), - ?assertEqual(0, lqueue:len(State2a#rabbit_fifo.messages)), + {State2a, _, _} = apply(meta(Config, 3), {down, Pid, noconnection}, State1), + ?assertEqual(0, rabbit_fifo_q:len(State2a#rabbit_fifo.messages)), ?assertEqual(1, lqueue:len(State2a#rabbit_fifo.returns)), ?assertMatch(#consumer{checked_out = Ch, status = suspected_down} @@ -527,49 +754,72 @@ down_with_noconnection_returns_unack_test(C) -> maps:get(Cid, State2a#rabbit_fifo.consumers)), ok. -down_with_noproc_enqueuer_is_cleaned_up_test(C) -> +down_with_noproc_enqueuer_is_cleaned_up_test(Config) -> State00 = test_init(test), Pid = spawn(fun() -> ok end), - {State0, _, Effects0} = apply(meta(C, 1), rabbit_fifo:make_enqueue(Pid, 1, first), State00), + {State0, _, Effects0} = apply(meta(Config, 1, ?LINE, {notify, 1, Pid}), + rabbit_fifo:make_enqueue(Pid, 1, first), State00), ?ASSERT_EFF({monitor, process, _}, Effects0), - {State1, _, _} = apply(meta(C, 3), {down, Pid, noproc}, State0), + {State1, _, _} = apply(meta(Config, 3), {down, Pid, noproc}, State0), % ensure there are no enqueuers ?assert(0 =:= maps:size(State1#rabbit_fifo.enqueuers)), ok. -discarded_message_without_dead_letter_handler_is_removed_test(C) -> - Cid = {<<"completed_consumer_yields_demonitor_effect_test">>, self()}, - {State0, _} = enq(C, 1, 1, first, test_init(test)), - {State1, Effects1} = check_n(C, Cid, 2, 10, State0), +discarded_message_without_dead_letter_handler_is_removed_test(Config) -> + Cid = {?FUNCTION_NAME_B, self()}, + {State0, _} = enq(Config, 1, 1, first, test_init(test)), + {State1, #{key := CKey, + next_msg_id := MsgId}, Effects1} = + checkout(Config, ?LINE, Cid, 10, State0), ?ASSERT_EFF({log, [1], _Fun, _}, Effects1), - {_State2, _, Effects2} = apply(meta(C, 1), - rabbit_fifo:make_discard(Cid, [0]), State1), + {_State2, _, Effects2} = apply(meta(Config, 1), + rabbit_fifo:make_discard(CKey, [MsgId]), State1), ?ASSERT_NO_EFF({log, [1], _Fun, _}, Effects2), ok. -discarded_message_with_dead_letter_handler_emits_log_effect_test(C) -> - Cid = {<<"cid1">>, self()}, +discarded_message_with_dead_letter_handler_emits_log_effect_test(Config) -> + Cid = {?FUNCTION_NAME_B, self()}, State00 = init(#{name => test, queue_resource => rabbit_misc:r(<<"/">>, queue, <<"test">>), max_in_memory_length => 0, dead_letter_handler => {at_most_once, {somemod, somefun, [somearg]}}}), - Msg1 = rabbit_fifo:make_enqueue(self(), 1, first), - {State0, _} = enq(C, 1, 1, first, State00), - {State1, Effects1} = check_n(C, Cid, 2, 10, State0), + + Mc = mk_mc(<<"first">>), + Msg1 = rabbit_fifo:make_enqueue(self(), 1, Mc), + {State0, _} = enq(Config, 1, 1, Mc, State00), + {State1, #{key := CKey, + next_msg_id := MsgId}, Effects1} = + checkout(Config, ?LINE, Cid, 10, State0), ?ASSERT_EFF({log, [1], _, _}, Effects1), - {_State2, _, Effects2} = apply(meta(C, 1), rabbit_fifo:make_discard(Cid, [0]), State1), + {_State2, _, Effects2} = apply(meta(Config, 1), + rabbit_fifo:make_discard(CKey, [MsgId]), State1), % assert mod call effect with appended reason and message {value, {log, [1], Fun}} = lists:search(fun (E) -> element(1, E) == log end, Effects2), - ?assertMatch([{mod_call,somemod,somefun,[somearg,rejected,[first]]}], Fun([Msg1])), + [{mod_call, somemod, somefun, [somearg, rejected, [McOut]]}] = Fun([Msg1]), + + ?assertEqual(undefined, mc:get_annotation(acquired_count, McOut)), + ?assertEqual(1, mc:get_annotation(delivery_count, McOut)), + + ok. + +enqueued_msg_with_delivery_count_test(Config) -> + State00 = init(#{name => test, + queue_resource => rabbit_misc:r(<<"/">>, queue, <<"test">>), + max_in_memory_length => 0, + dead_letter_handler => + {at_most_once, {somemod, somefun, [somearg]}}}), + Mc = mc:set_annotation(delivery_count, 2, mk_mc(<<"first">>)), + {#rabbit_fifo{messages = Msgs}, _} = enq(Config, 1, 1, Mc, State00), + ?assertMatch(?MSG(_, #{delivery_count := 2}), rabbit_fifo_q:get(Msgs)), ok. get_log_eff(Effs) -> {value, Log} = lists:search(fun (E) -> element(1, E) == log end, Effs), Log. -mixed_send_msg_and_log_effects_are_correctly_ordered_test(C) -> +mixed_send_msg_and_log_effects_are_correctly_ordered_test(Config) -> Cid = {cid(?FUNCTION_NAME), self()}, State00 = init(#{name => test, queue_resource => rabbit_misc:r(<<"/">>, queue, <<"test">>), @@ -579,12 +829,11 @@ mixed_send_msg_and_log_effects_are_correctly_ordered_test(C) -> {somemod, somefun, [somearg]}}}), %% enqueue two messages Msg1 = rabbit_fifo:make_enqueue(self(), 1, first), - {State0, _} = enq(C, 1, 1, first, State00), + {State0, _} = enq(Config, 1, 1, first, State00), Msg2 = rabbit_fifo:make_enqueue(self(), 2, snd), - {State1, _} = enq(C, 2, 2, snd, State0), + {State1, _} = enq(Config, 2, 2, snd, State0), - {_State2, Effects1} = check_n(C, Cid, 3, 10, State1), - ct:pal("Effects ~w", [Effects1]), + {_State2, _, Effects1} = checkout(Config, ?LINE, Cid, 10, State1), {log, [1, 2], Fun, _} = get_log_eff(Effects1), [{send_msg, _, {delivery, _Cid, [{0,{0,first}},{1,{0,snd}}]}, [local,ra_event]}] = Fun([Msg1, Msg2]), @@ -596,17 +845,17 @@ mixed_send_msg_and_log_effects_are_correctly_ordered_test(C) -> ?ASSERT_NO_EFF({send_msg, _, _, _}, Effects1), ok. -tick_test(C) -> +tick_test(Config) -> Cid = {<<"c">>, self()}, Cid2 = {<<"c2">>, self()}, Msg1 = rabbit_fifo:make_enqueue(self(), 1, <<"fst">>), Msg2 = rabbit_fifo:make_enqueue(self(), 2, <<"snd">>), - {S0, _} = enq(C, 1, 1, <<"fst">>, test_init(?FUNCTION_NAME)), - {S1, _} = enq(C, 2, 2, <<"snd">>, S0), - {S2, {MsgId, _}} = deq(C, 3, Cid, unsettled, Msg1, S1), - {S3, {_, _}} = deq(C, 4, Cid2, unsettled, Msg2, S2), - {S4, _, _} = apply(meta(C, 5), rabbit_fifo:make_return(Cid, [MsgId]), S3), + {S0, _} = enq(Config, 1, 1, <<"fst">>, test_init(?FUNCTION_NAME)), + {S1, _} = enq(Config, 2, 2, <<"snd">>, S0), + {S2, {MsgId, _}} = deq(Config, 3, Cid, unsettled, Msg1, S1), + {S3, {_, _}} = deq(Config, 4, Cid2, unsettled, Msg2, S2), + {S4, _, _} = apply(meta(Config, 5), rabbit_fifo:make_return(Cid, [MsgId]), S3), [{aux, {handle_tick, [#resource{}, @@ -623,61 +872,38 @@ tick_test(C) -> ok. -delivery_query_returns_deliveries_test(C) -> +delivery_query_returns_deliveries_test(Config) -> Tag = atom_to_binary(?FUNCTION_NAME, utf8), Cid = {Tag, self()}, - Commands = [ - rabbit_fifo:make_checkout(Cid, {auto, 5, simple_prefetch}, #{}), - rabbit_fifo:make_enqueue(self(), 1, one), - rabbit_fifo:make_enqueue(self(), 2, two), - rabbit_fifo:make_enqueue(self(), 3, tre), - rabbit_fifo:make_enqueue(self(), 4, for) + CKey = ?LINE, + Entries = [ + {CKey, make_checkout(Cid, {auto, {simple_prefetch, 5}}, #{})}, + {?LINE, rabbit_fifo:make_enqueue(self(), 1, one)}, + {?LINE, rabbit_fifo:make_enqueue(self(), 2, two)}, + {?LINE, rabbit_fifo:make_enqueue(self(), 3, tre)}, + {?LINE, rabbit_fifo:make_enqueue(self(), 4, for)} ], - Indexes = lists:seq(1, length(Commands)), - Entries = lists:zip(Indexes, Commands), - {State, _Effects} = run_log(C, test_init(help), Entries), + {State, _Effects} = run_log(Config, test_init(help), Entries), % 3 deliveries are returned - [{0, {_, _}}] = rabbit_fifo:get_checked_out(Cid, 0, 0, State), + [{0, {_, _}}] = rabbit_fifo:get_checked_out(CKey, 0, 0, State), [_, _, _] = rabbit_fifo:get_checked_out(Cid, 1, 3, State), ok. -duplicate_delivery_test(C) -> - {State0, _} = enq(C, 1, 1, first, test_init(test)), - {#rabbit_fifo{messages = Messages} = State, _} = enq(C, 2, 1, first, State0), +duplicate_delivery_test(Config) -> + {State0, _} = enq(Config, 1, 1, first, test_init(test)), + {#rabbit_fifo{messages = Messages} = State, _} = + enq(Config, 2, 1, first, State0), ?assertEqual(1, rabbit_fifo:query_messages_total(State)), - ?assertEqual(1, lqueue:len(Messages)), - ok. - -state_enter_file_handle_leader_reservation_test(_) -> - S0 = init(#{name => the_name, - queue_resource => rabbit_misc:r(<<"/">>, queue, <<"test">>), - become_leader_handler => {m, f, [a]}}), - - Resource = {resource, <<"/">>, queue, <<"test">>}, - Effects = rabbit_fifo:state_enter(leader, S0), - ?assertMatch([{mod_call, m, f, [a, the_name]}, - _Timer, - {mod_call, rabbit_quorum_queue, file_handle_leader_reservation, [Resource]} - | _], Effects), - ok. - -state_enter_file_handle_other_reservation_test(_) -> - S0 = init(#{name => the_name, - queue_resource => rabbit_misc:r(<<"/">>, queue, <<"test">>)}), - Effects = rabbit_fifo:state_enter(other, S0), - ?assertEqual([ - {mod_call, rabbit_quorum_queue, file_handle_other_reservation, []} - ], - Effects), + ?assertEqual(1, rabbit_fifo_q:len(Messages)), ok. -state_enter_monitors_and_notifications_test(C) -> +state_enter_monitors_and_notifications_test(Config) -> Oth = spawn(fun () -> ok end), - {State0, _} = enq(C, 1, 1, first, test_init(test)), + {State0, _} = enq(Config, 1, 1, first, test_init(test)), Cid = {<<"adf">>, self()}, OthCid = {<<"oth">>, Oth}, - {State1, _} = check(C, Cid, 2, State0), - {State, _} = check(C, OthCid, 3, State1), + {State1, _, _} = checkout(Config, ?LINE, Cid, 1, State0), + {State, _, _} = checkout(Config, ?LINE, OthCid, 1, State1), Self = self(), Effects = rabbit_fifo:state_enter(leader, State), @@ -695,47 +921,48 @@ state_enter_monitors_and_notifications_test(C) -> ?ASSERT_EFF({monitor, process, _}, Effects), ok. -purge_test(C) -> +purge_test(Config) -> Cid = {<<"purge_test">>, self()}, - {State1, _} = enq(C, 1, 1, first, test_init(test)), - {State2, {purge, 1}, _} = apply(meta(C, 2), rabbit_fifo:make_purge(), State1), - {State3, _} = enq(C, 3, 2, second, State2), + {State1, _} = enq(Config, 1, 1, first, test_init(test)), + {State2, {purge, 1}, _} = apply(meta(Config, 2), rabbit_fifo:make_purge(), State1), + {State3, _} = enq(Config, 3, 2, second, State2), % get returns a reply value {_State4, _, Effs} = - apply(meta(C, 4), rabbit_fifo:make_checkout(Cid, {dequeue, unsettled}, #{}), State3), + apply(meta(Config, 4), make_checkout(Cid, {dequeue, unsettled}, #{}), State3), ?ASSERT_EFF({log, [3], _}, Effs), ok. -purge_with_checkout_test(C) -> +purge_with_checkout_test(Config) -> Cid = {<<"purge_test">>, self()}, - {State0, _} = check_auto(C, Cid, 1, test_init(?FUNCTION_NAME)), - {State1, _} = enq(C, 2, 1, <<"first">>, State0), - {State2, _} = enq(C, 3, 2, <<"second">>, State1), + {State0, #{key := CKey}, _} = checkout(Config, ?LINE, Cid, 1, + test_init(?FUNCTION_NAME)), + {State1, _} = enq(Config, 2, 1, <<"first">>, State0), + {State2, _} = enq(Config, 3, 2, <<"second">>, State1), %% assert message bytes are non zero ?assert(State2#rabbit_fifo.msg_bytes_checkout > 0), ?assert(State2#rabbit_fifo.msg_bytes_enqueue > 0), - {State3, {purge, 1}, _} = apply(meta(C, 2), rabbit_fifo:make_purge(), State2), + {State3, {purge, 1}, _} = apply(meta(Config, 2), rabbit_fifo:make_purge(), State2), ?assert(State2#rabbit_fifo.msg_bytes_checkout > 0), ?assertEqual(0, State3#rabbit_fifo.msg_bytes_enqueue), ?assertEqual(1, rabbit_fifo:query_messages_total(State3)), - #consumer{checked_out = Checked} = maps:get(Cid, State3#rabbit_fifo.consumers), + #consumer{checked_out = Checked} = maps:get(CKey, State3#rabbit_fifo.consumers), ?assertEqual(1, maps:size(Checked)), ok. -down_noproc_returns_checked_out_in_order_test(C) -> +down_noproc_returns_checked_out_in_order_test(Config) -> S0 = test_init(?FUNCTION_NAME), %% enqueue 100 S1 = lists:foldl(fun (Num, FS0) -> - {FS, _} = enq(C, Num, Num, Num, FS0), + {FS, _} = enq(Config, Num, Num, Num, FS0), FS end, S0, lists:seq(1, 100)), - ?assertEqual(100, lqueue:len(S1#rabbit_fifo.messages)), + ?assertEqual(100, rabbit_fifo_q:len(S1#rabbit_fifo.messages)), Cid = {<<"cid">>, self()}, - {S2, _} = check(C, Cid, 101, 1000, S1), - #consumer{checked_out = Checked} = maps:get(Cid, S2#rabbit_fifo.consumers), + {S2, #{key := CKey}, _} = checkout(Config, ?LINE, Cid, 1000, S1), + #consumer{checked_out = Checked} = maps:get(CKey, S2#rabbit_fifo.consumers), ?assertEqual(100, maps:size(Checked)), %% simulate down - {S, _, _} = apply(meta(C, 102), {down, self(), noproc}, S2), + {S, _, _} = apply(meta(Config, 102), {down, self(), noproc}, S2), Returns = lqueue:to_list(S#rabbit_fifo.returns), ?assertEqual(100, length(Returns)), ?assertEqual(0, maps:size(S#rabbit_fifo.consumers)), @@ -743,30 +970,30 @@ down_noproc_returns_checked_out_in_order_test(C) -> ?assertEqual(lists:sort(Returns), Returns), ok. -down_noconnection_returns_checked_out_test(C) -> +down_noconnection_returns_checked_out_test(Config) -> S0 = test_init(?FUNCTION_NAME), NumMsgs = 20, S1 = lists:foldl(fun (Num, FS0) -> - {FS, _} = enq(C, Num, Num, Num, FS0), + {FS, _} = enq(Config, Num, Num, Num, FS0), FS end, S0, lists:seq(1, NumMsgs)), - ?assertEqual(NumMsgs, lqueue:len(S1#rabbit_fifo.messages)), + ?assertEqual(NumMsgs, rabbit_fifo_q:len(S1#rabbit_fifo.messages)), Cid = {<<"cid">>, self()}, - {S2, _} = check(C, Cid, 101, 1000, S1), - #consumer{checked_out = Checked} = maps:get(Cid, S2#rabbit_fifo.consumers), + {S2, #{key := CKey}, _} = checkout(Config, ?LINE, Cid, 1000, S1), + #consumer{checked_out = Checked} = maps:get(CKey, S2#rabbit_fifo.consumers), ?assertEqual(NumMsgs, maps:size(Checked)), %% simulate down - {S, _, _} = apply(meta(C, 102), {down, self(), noconnection}, S2), + {S, _, _} = apply(meta(Config, 102), {down, self(), noconnection}, S2), Returns = lqueue:to_list(S#rabbit_fifo.returns), ?assertEqual(NumMsgs, length(Returns)), ?assertMatch(#consumer{checked_out = Ch} when map_size(Ch) == 0, - maps:get(Cid, S#rabbit_fifo.consumers)), + maps:get(CKey, S#rabbit_fifo.consumers)), %% validate returns are in order ?assertEqual(lists:sort(Returns), Returns), ok. -single_active_consumer_basic_get_test(C) -> +single_active_consumer_basic_get_test(Config) -> Cid = {?FUNCTION_NAME, self()}, State0 = init(#{name => ?FUNCTION_NAME, queue_resource => rabbit_misc:r("/", queue, @@ -775,27 +1002,28 @@ single_active_consumer_basic_get_test(C) -> single_active_consumer_on => true}), ?assertEqual(single_active, State0#rabbit_fifo.cfg#cfg.consumer_strategy), ?assertEqual(0, map_size(State0#rabbit_fifo.consumers)), - {State1, _} = enq(C, 1, 1, first, State0), + {State1, _} = enq(Config, 1, 1, first, State0), {_State, {error, {unsupported, single_active_consumer}}} = - apply(meta(C, 2), rabbit_fifo:make_checkout(Cid, {dequeue, unsettled}, #{}), + apply(meta(Config, 2), make_checkout(Cid, {dequeue, unsettled}, #{}), State1), ok. -single_active_consumer_revive_test(C) -> +single_active_consumer_revive_test(Config) -> S0 = init(#{name => ?FUNCTION_NAME, - queue_resource => rabbit_misc:r("/", queue, atom_to_binary(?FUNCTION_NAME, utf8)), + queue_resource => rabbit_misc:r("/", queue, ?FUNCTION_NAME_B), single_active_consumer_on => true}), Cid1 = {<<"one">>, self()}, Cid2 = {<<"two">>, self()}, - {S1, _} = check_auto(C, Cid1, 1, S0), - {S2, _} = check_auto(C, Cid2, 2, S1), - {S3, _} = enq(C, 3, 1, first, S2), + {S1, #{key := CKey1}, _} = checkout(Config, ?LINE, Cid1, 1, S0), + {S2, #{key := _CKey2}, _} = checkout(Config, ?LINE, Cid2, 1, S1), + {S3, _} = enq(Config, 3, 1, first, S2), %% cancel the active consumer whilst it has a message pending - {S4, _, _} = rabbit_fifo:apply(meta(C, 4), make_checkout(Cid1, cancel, #{}), S3), - {S5, _} = check_auto(C, Cid1, 5, S4), + {S4, _, _} = rabbit_fifo:apply(meta(Config, ?LINE), + make_checkout(Cid1, cancel, #{}), S3), + %% the revived consumer should have the original key + {S5, #{key := CKey1}, _} = checkout(Config, ?LINE, Cid1, 1, S4), - ct:pal("S5 ~tp", [S5]), ?assertEqual(1, rabbit_fifo:query_messages_checked_out(S5)), ?assertEqual(1, rabbit_fifo:query_messages_total(S5)), Consumers = S5#rabbit_fifo.consumers, @@ -806,12 +1034,12 @@ single_active_consumer_revive_test(C) -> ?assertEqual(1, map_size(Up)), %% settle message and ensure it is handled correctly - {S6, _} = settle(C, Cid1, 6, 0, S5), + {S6, _} = settle(Config, CKey1, 6, 0, S5), ?assertEqual(0, rabbit_fifo:query_messages_checked_out(S6)), ?assertEqual(0, rabbit_fifo:query_messages_total(S6)), %% requeue message and check that is handled - {S6b, _} = return(C, Cid1, 6, 0, S5), + {S6b, _} = return(Config, CKey1, 6, 0, S5), ?assertEqual(1, rabbit_fifo:query_messages_checked_out(S6b)), ?assertEqual(1, rabbit_fifo:query_messages_total(S6b)), %% @@ -824,22 +1052,21 @@ single_active_consumer_revive_test(C) -> single_active_consumer_revive_2_test(C) -> S0 = init(#{name => ?FUNCTION_NAME, - queue_resource => rabbit_misc:r("/", queue, atom_to_binary(?FUNCTION_NAME, utf8)), + queue_resource => rabbit_misc:r("/", queue, ?FUNCTION_NAME_B), single_active_consumer_on => true}), Cid1 = {<<"one">>, self()}, - {S1, _} = check_auto(C, Cid1, 1, S0), + {S1, #{key := CKey}, _} = checkout(C, ?LINE, Cid1, 1, S0), {S2, _} = enq(C, 3, 1, first, S1), %% cancel the active consumer whilst it has a message pending {S3, _, _} = rabbit_fifo:apply(meta(C, 4), make_checkout(Cid1, cancel, #{}), S2), - {S4, _} = check_auto(C, Cid1, 5, S3), + {S4, #{key := CKey}, _} = checkout(C, ?LINE, Cid1, 5, S3), ?assertEqual(1, rabbit_fifo:query_consumer_count(S4)), ?assertEqual(0, length(rabbit_fifo:query_waiting_consumers(S4))), ?assertEqual(1, rabbit_fifo:query_messages_total(S4)), ?assertEqual(1, rabbit_fifo:query_messages_checked_out(S4)), - ok. -single_active_consumer_test(C) -> +single_active_consumer_test(Config) -> State0 = init(#{name => ?FUNCTION_NAME, queue_resource => rabbit_misc:r("/", queue, atom_to_binary(?FUNCTION_NAME, utf8)), @@ -849,62 +1076,62 @@ single_active_consumer_test(C) -> ?assertEqual(0, map_size(State0#rabbit_fifo.consumers)), % adding some consumers - AddConsumer = fun(CTag, State) -> - {NewState, _, _} = apply( - meta(C, 1), - make_checkout({CTag, self()}, - {once, 1, simple_prefetch}, - #{}), - State), - NewState - end, - State1 = lists:foldl(AddConsumer, State0, - [<<"ctag1">>, <<"ctag2">>, <<"ctag3">>, <<"ctag4">>]), C1 = {<<"ctag1">>, self()}, C2 = {<<"ctag2">>, self()}, C3 = {<<"ctag3">>, self()}, C4 = {<<"ctag4">>, self()}, + CK1 = ?LINE, + CK2 = ?LINE, + CK3 = ?LINE, + CK4 = ?LINE, + Entries = [ + {CK1, make_checkout(C1, {once, {simple_prefetch, 1}}, #{})}, + {CK2, make_checkout(C2, {once, {simple_prefetch, 1}}, #{})}, + {CK3, make_checkout(C3, {once, {simple_prefetch, 1}}, #{})}, + {CK4, make_checkout(C4, {once, {simple_prefetch, 1}}, #{})} + ], + {State1, _} = run_log(Config, State0, Entries), % the first registered consumer is the active one, the others are waiting ?assertEqual(1, map_size(State1#rabbit_fifo.consumers)), - ?assertMatch(#{C1 := _}, State1#rabbit_fifo.consumers), + ?assertMatch(#{CK1 := _}, State1#rabbit_fifo.consumers), ?assertMatch(#{single_active_consumer_id := C1, single_active_num_waiting_consumers := 3}, rabbit_fifo:overview(State1)), ?assertEqual(3, length(rabbit_fifo:query_waiting_consumers(State1))), - ?assertNotEqual(false, lists:keyfind(C2, 1, rabbit_fifo:query_waiting_consumers(State1))), - ?assertNotEqual(false, lists:keyfind(C3, 1, rabbit_fifo:query_waiting_consumers(State1))), - ?assertNotEqual(false, lists:keyfind(C4, 1, rabbit_fifo:query_waiting_consumers(State1))), + ?assertNotEqual(false, lists:keyfind(CK2, 1, rabbit_fifo:query_waiting_consumers(State1))), + ?assertNotEqual(false, lists:keyfind(CK3, 1, rabbit_fifo:query_waiting_consumers(State1))), + ?assertNotEqual(false, lists:keyfind(CK4, 1, rabbit_fifo:query_waiting_consumers(State1))), % cancelling a waiting consumer - {State2, _, Effects1} = apply(meta(C, 2), + {State2, _, Effects1} = apply(meta(Config, ?LINE), make_checkout(C3, cancel, #{}), State1), % the active consumer should still be in place ?assertEqual(1, map_size(State2#rabbit_fifo.consumers)), - ?assertMatch(#{C1 := _}, State2#rabbit_fifo.consumers), + ?assertMatch(#{CK1 := _}, State2#rabbit_fifo.consumers), % the cancelled consumer has been removed from waiting consumers ?assertMatch(#{single_active_consumer_id := C1, single_active_num_waiting_consumers := 2}, rabbit_fifo:overview(State2)), ?assertEqual(2, length(rabbit_fifo:query_waiting_consumers(State2))), - ?assertNotEqual(false, lists:keyfind(C2, 1, rabbit_fifo:query_waiting_consumers(State2))), - ?assertNotEqual(false, lists:keyfind(C4, 1, rabbit_fifo:query_waiting_consumers(State2))), + ?assertNotEqual(false, lists:keyfind(CK2, 1, rabbit_fifo:query_waiting_consumers(State2))), + ?assertNotEqual(false, lists:keyfind(CK4, 1, rabbit_fifo:query_waiting_consumers(State2))), % there are some effects to unregister the consumer ?ASSERT_EFF({mod_call, rabbit_quorum_queue, cancel_consumer_handler, [_, Con]}, Con == C3, Effects1), % cancelling the active consumer - {State3, _, Effects2} = apply(meta(C, 3), + {State3, _, Effects2} = apply(meta(Config, ?LINE), make_checkout(C1, cancel, #{}), State2), % the second registered consumer is now the active one ?assertEqual(1, map_size(State3#rabbit_fifo.consumers)), - ?assertMatch(#{C2 := _}, State3#rabbit_fifo.consumers), + ?assertMatch(#{CK2 := _}, State3#rabbit_fifo.consumers), % the new active consumer is no longer in the waiting list ?assertEqual(1, length(rabbit_fifo:query_waiting_consumers(State3))), - ?assertNotEqual(false, lists:keyfind(C4, 1, + ?assertNotEqual(false, lists:keyfind(CK4, 1, rabbit_fifo:query_waiting_consumers(State3))), %% should have a cancel consumer handler mod_call effect and %% an active new consumer effect @@ -914,12 +1141,12 @@ single_active_consumer_test(C) -> update_consumer_handler, _}, Effects2), % cancelling the active consumer - {State4, _, Effects3} = apply(meta(C, 4), + {State4, _, Effects3} = apply(meta(Config, ?LINE), make_checkout(C2, cancel, #{}), State3), % the last waiting consumer became the active one ?assertEqual(1, map_size(State4#rabbit_fifo.consumers)), - ?assertMatch(#{C4 := _}, State4#rabbit_fifo.consumers), + ?assertMatch(#{CK4 := _}, State4#rabbit_fifo.consumers), % the waiting consumer list is now empty ?assertEqual(0, length(rabbit_fifo:query_waiting_consumers(State4))), % there are some effects to unregister the consumer and @@ -930,7 +1157,7 @@ single_active_consumer_test(C) -> update_consumer_handler, _}, Effects3), % cancelling the last consumer - {State5, _, Effects4} = apply(meta(C, 5), + {State5, _, Effects4} = apply(meta(Config, ?LINE), make_checkout(C4, cancel, #{}), State4), % no active consumer anymore @@ -943,33 +1170,34 @@ single_active_consumer_test(C) -> ok. -single_active_consumer_cancel_consumer_when_channel_is_down_test(C) -> +single_active_consumer_cancel_consumer_when_channel_is_down_test(Config) -> State0 = init(#{name => ?FUNCTION_NAME, - queue_resource => rabbit_misc:r("/", queue, - atom_to_binary(?FUNCTION_NAME, utf8)), - release_cursor_interval => 0, - single_active_consumer_on => true}), + queue_resource => rabbit_misc:r("/", queue, ?FUNCTION_NAME_B), + single_active_consumer_on => true}), - DummyFunction = fun() -> ok end, - Pid1 = spawn(DummyFunction), - Pid2 = spawn(DummyFunction), - Pid3 = spawn(DummyFunction), + Pid1 = spawn(fun() -> ok end), + Pid2 = spawn(fun() -> ok end), + Pid3 = spawn(fun() -> ok end), + C1 = {<<"ctag1">>, Pid1}, + C2 = {<<"ctag2">>, Pid2}, + C3 = {<<"ctag3">>, Pid2}, + C4 = {<<"ctag4">>, Pid3}, + CK1 = ?LINE, + CK2 = ?LINE, + CK3 = ?LINE, + CK4 = ?LINE, + Entries = [ + {CK1, make_checkout(C1, {auto, {simple_prefetch, 1}}, #{})}, + {CK2, make_checkout(C2, {auto, {simple_prefetch, 1}}, #{})}, + {CK3, make_checkout(C3, {auto, {simple_prefetch, 1}}, #{})}, + {CK4, make_checkout(C4, {auto, {simple_prefetch, 1}}, #{})}, + ?ASSERT(#rabbit_fifo{consumers = #{CK1 := #consumer{status = up}}}), + % the channel of the active consumer goes down + {?LINE, {down, Pid1, noproc}} + ], + {State2, Effects} = run_log(Config, State0, Entries), - [C1, C2, C3, C4] = Consumers = - [{<<"ctag1">>, Pid1}, {<<"ctag2">>, Pid2}, - {<<"ctag3">>, Pid2}, {<<"ctag4">>, Pid3}], - % adding some consumers - AddConsumer = fun({CTag, ChannelId}, State) -> - {NewState, _, _} = apply( - meta(C, 1), - make_checkout({CTag, ChannelId}, {once, 1, simple_prefetch}, #{}), - State), - NewState - end, - State1 = lists:foldl(AddConsumer, State0, Consumers), - - % the channel of the active consumer goes down - {State2, _, Effects} = apply(meta(C, 2), {down, Pid1, noproc}, State1), + % {State2, _, Effects} = apply(meta(Config, 2), {down, Pid1, noproc}, State1), % fell back to another consumer ?assertEqual(1, map_size(State2#rabbit_fifo.consumers)), % there are still waiting consumers @@ -981,8 +1209,11 @@ single_active_consumer_cancel_consumer_when_channel_is_down_test(C) -> ?ASSERT_EFF({mod_call, rabbit_quorum_queue, update_consumer_handler, _}, Effects), + ct:pal("STate2 ~p", [State2]), % the channel of the active consumer and a waiting consumer goes down - {State3, _, Effects2} = apply(meta(C, 3), {down, Pid2, noproc}, State2), + {State3, _, Effects2} = apply(meta(Config, ?LINE), {down, Pid2, noproc}, State2), + ct:pal("STate3 ~p", [State3]), + ct:pal("Effects2 ~p", [Effects2]), % fell back to another consumer ?assertEqual(1, map_size(State3#rabbit_fifo.consumers)), % no more waiting consumer @@ -996,7 +1227,8 @@ single_active_consumer_cancel_consumer_when_channel_is_down_test(C) -> update_consumer_handler, _}, Effects2), % the last channel goes down - {State4, _, Effects3} = apply(meta(C, 4), {down, Pid3, doesnotmatter}, State3), + {State4, _, Effects3} = apply(meta(Config, ?LINE), + {down, Pid3, doesnotmatter}, State3), % no more consumers ?assertEqual(0, map_size(State4#rabbit_fifo.consumers)), ?assertEqual(0, length(rabbit_fifo:query_waiting_consumers(State4))), @@ -1006,33 +1238,22 @@ single_active_consumer_cancel_consumer_when_channel_is_down_test(C) -> ok. -single_active_returns_messages_on_noconnection_test(C) -> - R = rabbit_misc:r("/", queue, atom_to_binary(?FUNCTION_NAME, utf8)), +single_active_returns_messages_on_noconnection_test(Config) -> + R = rabbit_misc:r("/", queue, ?FUNCTION_NAME_B), State0 = init(#{name => ?FUNCTION_NAME, queue_resource => R, release_cursor_interval => 0, single_active_consumer_on => true}), - Meta = meta(C, 1), - Nodes = [n1], - ConsumerIds = [{_, DownPid}] = - [begin - B = atom_to_binary(N, utf8), - {<<"ctag_", B/binary>>, - test_util:fake_pid(N)} - end || N <- Nodes], % adding some consumers - State1 = lists:foldl( - fun(CId, Acc0) -> - {Acc, _, _} = - apply(Meta, - make_checkout(CId, - {auto, 1, simple_prefetch}, #{}), - Acc0), - Acc - end, State0, ConsumerIds), - {State2, _} = enq(C, 4, 1, msg1, State1), + {CK1, {_, DownPid} = C1} = {?LINE, {?LINE_B, test_util:fake_pid(n1)}}, + Entries = [ + {CK1, make_checkout(C1, {auto, {simple_prefetch, 1}}, #{})} + ], + {State1, _} = run_log(Config, State0, Entries), + {State2, _} = enq(Config, 4, 1, msg1, State1), % simulate node goes down - {State3, _, _} = apply(meta(C, 5), {down, DownPid, noconnection}, State2), + {State3, _, _} = apply(meta(Config, ?LINE), {down, DownPid, noconnection}, State2), + ct:pal("state3 ~p", [State3]), %% assert the consumer is up ?assertMatch([_], lqueue:to_list(State3#rabbit_fifo.returns)), ?assertMatch([{_, #consumer{checked_out = Checked, @@ -1042,56 +1263,47 @@ single_active_returns_messages_on_noconnection_test(C) -> ok. -single_active_consumer_replaces_consumer_when_down_noconnection_test(C) -> +single_active_consumer_replaces_consumer_when_down_noconnection_test(Config) -> R = rabbit_misc:r("/", queue, atom_to_binary(?FUNCTION_NAME, utf8)), State0 = init(#{name => ?FUNCTION_NAME, queue_resource => R, release_cursor_interval => 0, single_active_consumer_on => true}), - Meta = meta(C, 1), - Nodes = [n1, n2, node()], - ConsumerIds = [C1 = {_, DownPid}, C2, _C3] = - [begin - B = atom_to_binary(N, utf8), - {<<"ctag_", B/binary>>, - test_util:fake_pid(N)} - end || N <- Nodes], - % adding some consumers - State1a = lists:foldl( - fun(CId, Acc0) -> - {Acc, _, _} = - apply(Meta, - make_checkout(CId, - {once, 1, simple_prefetch}, #{}), - Acc0), - Acc - end, State0, ConsumerIds), + {CK1, {_, DownPid} = C1} = {?LINE, {?LINE_B, test_util:fake_pid(n1)}}, + {CK2, C2} = {?LINE, {?LINE_B, test_util:fake_pid(n2)}}, + {CK3, C3} = {?LINE, {?LINE_B, test_util:fake_pid(n3)}}, + Entries = [ + {CK1, make_checkout(C1, {auto, {simple_prefetch, 1}}, #{})}, + {CK2, make_checkout(C2, {auto, {simple_prefetch, 1}}, #{})}, + {CK3, make_checkout(C3, {auto, {simple_prefetch, 1}}, #{})}, + {?LINE, rabbit_fifo:make_enqueue(self(), 1, msg)} + ], + {State1, _} = run_log(Config, State0, Entries), %% assert the consumer is up - ?assertMatch(#{C1 := #consumer{status = up}}, - State1a#rabbit_fifo.consumers), - - {State1, _} = enq(C, 10, 1, msg, State1a), + ?assertMatch(#{CK1 := #consumer{status = up}}, + State1#rabbit_fifo.consumers), % simulate node goes down - {State2, _, _} = apply(meta(C, 5), {down, DownPid, noconnection}, State1), + {State2, _, _} = apply(meta(Config, ?LINE), + {down, DownPid, noconnection}, State1), %% assert a new consumer is in place and it is up - ?assertMatch([{C2, #consumer{status = up, - checked_out = Ch}}] + ?assertMatch([{CK2, #consumer{status = up, + checked_out = Ch}}] when map_size(Ch) == 1, maps:to_list(State2#rabbit_fifo.consumers)), %% the disconnected consumer has been returned to waiting - ?assert(lists:any(fun ({Con,_}) -> Con =:= C1 end, + ?assert(lists:any(fun ({Con, _}) -> Con =:= CK1 end, rabbit_fifo:query_waiting_consumers(State2))), ?assertEqual(2, length(rabbit_fifo:query_waiting_consumers(State2))), % simulate node comes back up - {State3, _, _} = apply(meta(C, 2), {nodeup, node(DownPid)}, State2), + {State3, _, _} = apply(meta(Config, 2), {nodeup, node(DownPid)}, State2), %% the consumer is still active and the same as before - ?assertMatch([{C2, #consumer{status = up}}], + ?assertMatch([{CK2, #consumer{status = up}}], maps:to_list(State3#rabbit_fifo.consumers)), % the waiting consumers should be un-suspected ?assertEqual(2, length(rabbit_fifo:query_waiting_consumers(State3))), @@ -1100,192 +1312,167 @@ single_active_consumer_replaces_consumer_when_down_noconnection_test(C) -> end, rabbit_fifo:query_waiting_consumers(State3)), ok. -single_active_consumer_all_disconnected_test(C) -> +single_active_consumer_all_disconnected_test(Config) -> R = rabbit_misc:r("/", queue, atom_to_binary(?FUNCTION_NAME, utf8)), State0 = init(#{name => ?FUNCTION_NAME, queue_resource => R, release_cursor_interval => 0, single_active_consumer_on => true}), - Meta = meta(C, 1), - Nodes = [n1, n2], - ConsumerIds = [C1 = {_, C1Pid}, C2 = {_, C2Pid}] = - [begin - B = atom_to_binary(N, utf8), - {<<"ctag_", B/binary>>, - test_util:fake_pid(N)} - end || N <- Nodes], - % adding some consumers - State1 = lists:foldl( - fun(CId, Acc0) -> - {Acc, _, _} = - apply(Meta, - make_checkout(CId, - {once, 1, simple_prefetch}, #{}), - Acc0), - Acc - end, State0, ConsumerIds), - %% assert the consumer is up - ?assertMatch(#{C1 := #consumer{status = up}}, State1#rabbit_fifo.consumers), - - % simulate node goes down - {State2, _, _} = apply(meta(C, 5), {down, C1Pid, noconnection}, State1), - %% assert the consumer fails over to the consumer on n2 - ?assertMatch(#{C2 := #consumer{status = up}}, State2#rabbit_fifo.consumers), - {State3, _, _} = apply(meta(C, 6), {down, C2Pid, noconnection}, State2), - %% assert these no active consumer after both nodes are maked as down - ?assertMatch([], maps:to_list(State3#rabbit_fifo.consumers)), - %% n2 comes back - {State4, _, _} = apply(meta(C, 7), {nodeup, node(C2Pid)}, State3), - %% ensure n2 is the active consumer as this node as been registered - %% as up again - ?assertMatch([{{<<"ctag_n2">>, _}, #consumer{status = up, - credit = 1}}], - maps:to_list(State4#rabbit_fifo.consumers)), - ok. - -single_active_consumer_state_enter_leader_include_waiting_consumers_test(C) -> + {CK1, {_, C1Pid} = C1} = {?LINE, {?LINE_B, test_util:fake_pid(n1)}}, + {CK2, {_, C2Pid} = C2} = {?LINE, {?LINE_B, test_util:fake_pid(n2)}}, + Entries = + [ + {CK1, make_checkout(C1, {auto, {simple_prefetch, 1}}, #{})}, + {CK2, make_checkout(C2, {auto, {simple_prefetch, 1}}, #{})}, + ?ASSERT(#rabbit_fifo{consumers = #{CK1 := #consumer{status = up}}}), + {?LINE, {down, C1Pid, noconnection}}, + ?ASSERT(#rabbit_fifo{consumers = #{CK2 := #consumer{status = up}}}), + {?LINE, {down, C2Pid, noconnection}}, + ?ASSERT(#rabbit_fifo{consumers = C} when map_size(C) == 0), + {?LINE, {nodeup, node(C2Pid)}}, + ?ASSERT(#rabbit_fifo{consumers = #{CK2 := #consumer{status = up, + credit = 1}}}) + ], + {_State1, _} = run_log(Config, State0, Entries), + ok. + +single_active_consumer_state_enter_leader_include_waiting_consumers_test(Config) -> State0 = init(#{name => ?FUNCTION_NAME, - queue_resource => - rabbit_misc:r("/", queue, - atom_to_binary(?FUNCTION_NAME, utf8)), + queue_resource => rabbit_misc:r("/", queue, ?FUNCTION_NAME_B), release_cursor_interval => 0, single_active_consumer_on => true}), - DummyFunction = fun() -> ok end, - Pid1 = spawn(DummyFunction), - Pid2 = spawn(DummyFunction), - Pid3 = spawn(DummyFunction), - - Meta = meta(C, 1), - % adding some consumers - AddConsumer = fun({CTag, ChannelId}, State) -> - {NewState, _, _} = apply( - Meta, - make_checkout({CTag, ChannelId}, - {once, 1, simple_prefetch}, #{}), - State), - NewState - end, - State1 = lists:foldl(AddConsumer, State0, [{<<"ctag1">>, Pid1}, - {<<"ctag2">>, Pid2}, - {<<"ctag3">>, Pid2}, - {<<"ctag4">>, Pid3}]), - + Pid1 = spawn(fun() -> ok end), + Pid2 = spawn(fun() -> ok end), + Pid3 = spawn(fun() -> ok end), + C1 = {<<"ctag1">>, Pid1}, + C2 = {<<"ctag2">>, Pid2}, + C3 = {<<"ctag3">>, Pid2}, + C4 = {<<"ctag4">>, Pid3}, + CK1 = ?LINE, + CK2 = ?LINE, + CK3 = ?LINE, + CK4 = ?LINE, + Entries = [ + {CK1, make_checkout(C1, {auto, {simple_prefetch, 1}}, #{})}, + {CK2, make_checkout(C2, {auto, {simple_prefetch, 1}}, #{})}, + {CK3, make_checkout(C3, {auto, {simple_prefetch, 1}}, #{})}, + {CK4, make_checkout(C4, {auto, {simple_prefetch, 1}}, #{})} + ], + {State1, _} = run_log(Config, State0, Entries), Effects = rabbit_fifo:state_enter(leader, State1), %% 2 effects for each consumer process (channel process), 1 effect for the node, - %% 1 effect for file handle reservation - ?assertEqual(2 * 3 + 1 + 1 + 1 + 1, length(Effects)). + ?assertEqual(2 * 3 + 1 + 1 + 1, length(Effects)). -single_active_consumer_state_enter_eol_include_waiting_consumers_test(C) -> - Resource = rabbit_misc:r("/", queue, atom_to_binary(?FUNCTION_NAME, utf8)), +single_active_consumer_state_enter_eol_include_waiting_consumers_test(Config) -> + Resource = rabbit_misc:r("/", queue, ?FUNCTION_NAME_B), State0 = init(#{name => ?FUNCTION_NAME, queue_resource => Resource, release_cursor_interval => 0, single_active_consumer_on => true}), - DummyFunction = fun() -> ok end, - Pid1 = spawn(DummyFunction), - Pid2 = spawn(DummyFunction), - Pid3 = spawn(DummyFunction), - - Meta = meta(C, 1), - % adding some consumers - AddConsumer = fun({CTag, ChannelId}, State) -> - {NewState, _, _} = apply( - Meta, - make_checkout({CTag, ChannelId}, - {once, 1, simple_prefetch}, #{}), - State), - NewState - end, - State1 = lists:foldl(AddConsumer, State0, - [{<<"ctag1">>, Pid1}, {<<"ctag2">>, Pid2}, - {<<"ctag3">>, Pid2}, {<<"ctag4">>, Pid3}]), - + Pid1 = spawn(fun() -> ok end), + Pid2 = spawn(fun() -> ok end), + Pid3 = spawn(fun() -> ok end), + {CK1, C1} = {?LINE, {?LINE_B, Pid1}}, + {CK2, C2} = {?LINE, {?LINE_B, Pid2}}, + {CK3, C3} = {?LINE, {?LINE_B, Pid2}}, + {CK4, C4} = {?LINE, {?LINE_B, Pid3}}, + Entries = [ + {CK1, make_checkout(C1, {auto, {simple_prefetch, 1}}, #{})}, + {CK2, make_checkout(C2, {auto, {simple_prefetch, 1}}, #{})}, + {CK3, make_checkout(C3, {auto, {simple_prefetch, 1}}, #{})}, + {CK4, make_checkout(C4, {auto, {simple_prefetch, 1}}, #{})} + ], + {State1, _} = run_log(Config, State0, Entries), Effects = rabbit_fifo:state_enter(eol, State1), %% 1 effect for each consumer process (channel process), - %% 1 effect for file handle reservation %% 1 effect for eol to handle rabbit_fifo_usage entries - ?assertEqual(5, length(Effects)). + ?assertEqual(4, length(Effects)), + ok. -query_consumers_test(C) -> +query_consumers_test(Config) -> State0 = init(#{name => ?FUNCTION_NAME, queue_resource => rabbit_misc:r("/", queue, atom_to_binary(?FUNCTION_NAME, utf8)), release_cursor_interval => 0, single_active_consumer_on => false}), - % adding some consumers - AddConsumer = fun(CTag, State) -> - {NewState, _, _} = apply( - meta(C, 1), - make_checkout({CTag, self()}, - {once, 1, simple_prefetch}, #{}), - State), - NewState - end, - State1 = lists:foldl(AddConsumer, State0, [<<"ctag1">>, <<"ctag2">>, <<"ctag3">>, <<"ctag4">>]), + % % adding some consumers + {CK1, C1} = {?LINE, {?LINE_B, self()}}, + {CK2, C2} = {?LINE, {?LINE_B, self()}}, + {CK3, C3} = {?LINE, {?LINE_B, self()}}, + {CK4, C4} = {?LINE, {?LINE_B, self()}}, + Entries = [ + {CK1, make_checkout(C1, {auto, {simple_prefetch, 1}}, #{})}, + {CK2, make_checkout(C2, {auto, {simple_prefetch, 1}}, #{})}, + {CK3, make_checkout(C3, {auto, {simple_prefetch, 1}}, #{})}, + {CK4, make_checkout(C4, {auto, {simple_prefetch, 1}}, #{})} + ], + {State1, _} = run_log(Config, State0, Entries), Consumers0 = State1#rabbit_fifo.consumers, - Consumer = maps:get({<<"ctag2">>, self()}, Consumers0), - Consumers1 = maps:put({<<"ctag2">>, self()}, - Consumer#consumer{status = suspected_down}, Consumers0), + Consumer = maps:get(CK2, Consumers0), + Consumers1 = maps:put(CK2, Consumer#consumer{status = suspected_down}, + Consumers0), State2 = State1#rabbit_fifo{consumers = Consumers1}, ?assertEqual(3, rabbit_fifo:query_consumer_count(State2)), Consumers2 = rabbit_fifo:query_consumers(State2), ?assertEqual(4, maps:size(Consumers2)), - maps:fold(fun(_Key, {Pid, Tag, _, _, Active, ActivityStatus, _, _}, _Acc) -> - ?assertEqual(self(), Pid), - case Tag of - <<"ctag2">> -> - ?assertNot(Active), - ?assertEqual(suspected_down, ActivityStatus); - _ -> - ?assert(Active), - ?assertEqual(up, ActivityStatus) - end - end, [], Consumers2). - -query_consumers_when_single_active_consumer_is_on_test(C) -> + maps:fold(fun(Key, {Pid, _Tag, _, _, Active, ActivityStatus, _, _}, _Acc) -> + ?assertEqual(self(), Pid), + case Key of + CK2 -> + ?assertNot(Active), + ?assertEqual(suspected_down, ActivityStatus); + _ -> + ?assert(Active), + ?assertEqual(up, ActivityStatus) + end + end, [], Consumers2), + ok. + +query_consumers_when_single_active_consumer_is_on_test(Config) -> State0 = init(#{name => ?FUNCTION_NAME, - queue_resource => rabbit_misc:r("/", queue, - atom_to_binary(?FUNCTION_NAME, utf8)), + queue_resource => rabbit_misc:r("/", queue, ?FUNCTION_NAME_B), release_cursor_interval => 0, single_active_consumer_on => true}), - Meta = meta(C, 1), - % adding some consumers - AddConsumer = fun(CTag, State) -> - {NewState, _, _} = apply( - Meta, - make_checkout({CTag, self()}, - {once, 1, simple_prefetch}, #{}), - State), - NewState - end, - State1 = lists:foldl(AddConsumer, State0, [<<"ctag1">>, <<"ctag2">>, <<"ctag3">>, <<"ctag4">>]), + % % adding some consumers + {CK1, C1} = {?LINE, {?LINE_B, self()}}, + {CK2, C2} = {?LINE, {?LINE_B, self()}}, + {CK3, C3} = {?LINE, {?LINE_B, self()}}, + {CK4, C4} = {?LINE, {?LINE_B, self()}}, + Entries = [ + {CK1, make_checkout(C1, {auto, {simple_prefetch, 1}}, #{})}, + {CK2, make_checkout(C2, {auto, {simple_prefetch, 1}}, #{})}, + {CK3, make_checkout(C3, {auto, {simple_prefetch, 1}}, #{})}, + {CK4, make_checkout(C4, {auto, {simple_prefetch, 1}}, #{})} + ], + {State1, _} = run_log(Config, State0, Entries), ?assertEqual(4, rabbit_fifo:query_consumer_count(State1)), Consumers = rabbit_fifo:query_consumers(State1), ?assertEqual(4, maps:size(Consumers)), - maps:fold(fun(_Key, {Pid, Tag, _, _, Active, ActivityStatus, _, _}, _Acc) -> + maps:fold(fun(Key, {Pid, _Tag, _, _, Active, ActivityStatus, _, _}, _Acc) -> ?assertEqual(self(), Pid), - case Tag of - <<"ctag1">> -> + case Key of + CK1 -> ?assert(Active), ?assertEqual(single_active, ActivityStatus); _ -> ?assertNot(Active), ?assertEqual(waiting, ActivityStatus) end - end, [], Consumers). + end, [], Consumers), + ok. -active_flag_updated_when_consumer_suspected_unsuspected_test(C) -> +active_flag_updated_when_consumer_suspected_unsuspected_test(Config) -> State0 = init(#{name => ?FUNCTION_NAME, - queue_resource => rabbit_misc:r("/", queue, - atom_to_binary(?FUNCTION_NAME, utf8)), - release_cursor_interval => 0, - single_active_consumer_on => false}), + queue_resource => rabbit_misc:r("/", queue, + ?FUNCTION_NAME_B), + release_cursor_interval => 0, + single_active_consumer_on => false}), DummyFunction = fun() -> ok end, Pid1 = spawn(DummyFunction), @@ -1293,32 +1480,34 @@ active_flag_updated_when_consumer_suspected_unsuspected_test(C) -> Pid3 = spawn(DummyFunction), % adding some consumers - AddConsumer = fun({CTag, ChannelId}, State) -> - {NewState, _, _} = - apply( - meta(C, 1), - rabbit_fifo:make_checkout({CTag, ChannelId}, - {once, 1, simple_prefetch}, - #{}), - State), - NewState - end, - State1 = lists:foldl(AddConsumer, State0, - [{<<"ctag1">>, Pid1}, {<<"ctag2">>, Pid2}, {<<"ctag3">>, Pid2}, {<<"ctag4">>, Pid3}]), - - {State2, _, Effects2} = apply(meta(C, 3), + {CK1, C1} = {?LINE, {?LINE_B, Pid1}}, + {CK2, C2} = {?LINE, {?LINE_B, Pid2}}, + {CK3, C3} = {?LINE, {?LINE_B, Pid2}}, + {CK4, C4} = {?LINE, {?LINE_B, Pid3}}, + Entries = [ + {CK1, make_checkout(C1, {auto, {simple_prefetch, 1}}, #{})}, + {CK2, make_checkout(C2, {auto, {simple_prefetch, 1}}, #{})}, + {CK3, make_checkout(C3, {auto, {simple_prefetch, 1}}, #{})}, + {CK4, make_checkout(C4, {auto, {simple_prefetch, 1}}, #{})} + ], + {State1, _} = run_log(Config, State0, Entries), + {State2, _, Effects2} = apply(meta(Config, 3), {down, Pid1, noconnection}, State1), - % 1 effect to update the metrics of each consumer (they belong to the same node), 1 more effect to monitor the node, 1 more decorators effect + % 1 effect to update the metrics of each consumer + % (they belong to the same node), + % 1 more effect to monitor the node, + % 1 more decorators effect ?assertEqual(4 + 1, length(Effects2)), - {_, _, Effects3} = apply(meta(C, 4), {nodeup, node(self())}, State2), - % for each consumer: 1 effect to update the metrics, 1 effect to monitor the consumer PID, 1 more decorators effect - ?assertEqual(4 + 4, length(Effects3)). + {_, _, Effects3} = apply(meta(Config, 4), {nodeup, node(self())}, State2), + % for each consumer: 1 effect to update the metrics, + % 1 effect to monitor the consumer PID, 1 more decorators effect + ?assertEqual(4 + 4, length(Effects3)), + ok. -active_flag_not_updated_when_consumer_suspected_unsuspected_and_single_active_consumer_is_on_test(C) -> +active_flag_not_updated_when_consumer_suspected_unsuspected_and_single_active_consumer_is_on_test(Config) -> State0 = init(#{name => ?FUNCTION_NAME, - queue_resource => rabbit_misc:r("/", queue, - atom_to_binary(?FUNCTION_NAME, utf8)), + queue_resource => rabbit_misc:r("/", queue, ?FUNCTION_NAME_B), release_cursor_interval => 0, single_active_consumer_on => true}), @@ -1328,162 +1517,574 @@ active_flag_not_updated_when_consumer_suspected_unsuspected_and_single_active_co Pid3 = spawn(DummyFunction), % adding some consumers - AddConsumer = fun({CTag, ChannelId}, State) -> - {NewState, _, _} = apply( - meta(C, 1), - make_checkout({CTag, ChannelId}, - {once, 1, simple_prefetch}, #{}), - State), - NewState - end, - State1 = lists:foldl(AddConsumer, State0, - [{<<"ctag1">>, Pid1}, {<<"ctag2">>, Pid2}, - {<<"ctag3">>, Pid2}, {<<"ctag4">>, Pid3}]), - - {State2, _, Effects2} = apply(meta(C, 2), {down, Pid1, noconnection}, State1), + {CK1, C1} = {?LINE, {?LINE_B, Pid1}}, + {CK2, C2} = {?LINE, {?LINE_B, Pid2}}, + {CK3, C3} = {?LINE, {?LINE_B, Pid2}}, + {CK4, C4} = {?LINE, {?LINE_B, Pid3}}, + Entries = [ + {CK1, make_checkout(C1, {auto, {simple_prefetch, 1}}, #{})}, + {CK2, make_checkout(C2, {auto, {simple_prefetch, 1}}, #{})}, + {CK3, make_checkout(C3, {auto, {simple_prefetch, 1}}, #{})}, + {CK4, make_checkout(C4, {auto, {simple_prefetch, 1}}, #{})} + ], + {State1, _} = run_log(Config, State0, Entries), + {State2, _, Effects2} = apply(meta(Config, 2), {down, Pid1, noconnection}, State1), % one monitor and one consumer status update (deactivated) ?assertEqual(2, length(Effects2)), - {_, _, Effects3} = apply(meta(C, 3), {nodeup, node(self())}, State2), + {_, _, Effects3} = apply(meta(Config, 3), {nodeup, node(self())}, State2), % for each consumer: 1 effect to monitor the consumer PID - ?assertEqual(5, length(Effects3)). + ?assertEqual(5, length(Effects3)), + ok. -single_active_cancelled_with_unacked_test(C) -> +single_active_cancelled_with_unacked_test(Config) -> State0 = init(#{name => ?FUNCTION_NAME, - queue_resource => rabbit_misc:r("/", queue, - atom_to_binary(?FUNCTION_NAME, utf8)), + queue_resource => rabbit_misc:r("/", queue, ?FUNCTION_NAME_B), release_cursor_interval => 0, single_active_consumer_on => true}), - C1 = {<<"ctag1">>, self()}, - C2 = {<<"ctag2">>, self()}, - % adding some consumers - AddConsumer = fun(Con, S0) -> - {S, _, _} = apply( - meta(C, 1), - make_checkout(Con, - {auto, 1, simple_prefetch}, - #{}), - S0), - S - end, - State1 = lists:foldl(AddConsumer, State0, [C1, C2]), + % % adding some consumers + {CK1, C1} = {?LINE, {?LINE_B, self()}}, + {CK2, C2} = {?LINE, {?LINE_B, self()}}, + Entries = [ + {CK1, make_checkout(C1, {auto, {simple_prefetch, 1}}, #{})}, + {CK2, make_checkout(C2, {auto, {simple_prefetch, 1}}, #{})} + ], + {State1, _} = run_log(Config, State0, Entries), %% enqueue 2 messages - {State2, _Effects2} = enq(C, 3, 1, msg1, State1), - {State3, _Effects3} = enq(C, 4, 2, msg2, State2), + {State2, _Effects2} = enq(Config, 3, 1, msg1, State1), + {State3, _Effects3} = enq(Config, 4, 2, msg2, State2), %% one should be checked ou to C1 %% cancel C1 - {State4, _, _} = apply(meta(C, 5), + {State4, _, _} = apply(meta(Config, ?LINE), make_checkout(C1, cancel, #{}), State3), %% C2 should be the active consumer - ?assertMatch(#{C2 := #consumer{status = up, - checked_out = #{0 := _}}}, + ?assertMatch(#{CK2 := #consumer{status = up, + checked_out = #{0 := _}}}, State4#rabbit_fifo.consumers), %% C1 should be a cancelled consumer - ?assertMatch(#{C1 := #consumer{status = cancelled, - cfg = #consumer_cfg{lifetime = once}, - checked_out = #{0 := _}}}, + ?assertMatch(#{CK1 := #consumer{status = cancelled, + cfg = #consumer_cfg{lifetime = once}, + checked_out = #{0 := _}}}, State4#rabbit_fifo.consumers), ?assertMatch([], rabbit_fifo:query_waiting_consumers(State4)), %% Ack both messages - {State5, _Effects5} = settle(C, C1, 1, 0, State4), + {State5, _Effects5} = settle(Config, CK1, ?LINE, 0, State4), %% C1 should now be cancelled - {State6, _Effects6} = settle(C, C2, 2, 0, State5), + {State6, _Effects6} = settle(Config, CK2, ?LINE, 0, State5), %% C2 should remain - ?assertMatch(#{C2 := #consumer{status = up}}, + ?assertMatch(#{CK2 := #consumer{status = up}}, State6#rabbit_fifo.consumers), %% C1 should be gone - ?assertNotMatch(#{C1 := _}, + ?assertNotMatch(#{CK1 := _}, State6#rabbit_fifo.consumers), ?assertMatch([], rabbit_fifo:query_waiting_consumers(State6)), ok. -single_active_with_credited_test(C) -> +single_active_with_credited_v1_test(Config) -> State0 = init(#{name => ?FUNCTION_NAME, - queue_resource => rabbit_misc:r("/", queue, - atom_to_binary(?FUNCTION_NAME, utf8)), + queue_resource => rabbit_misc:r("/", queue, ?FUNCTION_NAME_B), release_cursor_interval => 0, single_active_consumer_on => true}), - C1 = {<<"ctag1">>, self()}, - C2 = {<<"ctag2">>, self()}, - % adding some consumers - AddConsumer = fun(Con, S0) -> - {S, _, _} = apply( - meta(C, 1), - make_checkout(Con, - {auto, 0, credited}, - #{}), - S0), - S - end, - State1 = lists:foldl(AddConsumer, State0, [C1, C2]), + % % adding some consumers + {CK1, C1} = {?LINE, {?LINE_B, self()}}, + {CK2, C2} = {?LINE, {?LINE_B, self()}}, + Entries = [ + {CK1, make_checkout(C1, {auto, {simple_prefetch, 1}}, #{})}, + {CK2, make_checkout(C2, {auto, {simple_prefetch, 1}}, #{})} + ], + {State1, _} = run_log(Config, State0, Entries), %% add some credit - C1Cred = rabbit_fifo:make_credit(C1, 5, 0, false), - {State2, _, _Effects2} = apply(meta(C, 3), C1Cred, State1), - C2Cred = rabbit_fifo:make_credit(C2, 4, 0, false), - {State3, _} = apply(meta(C, 4), C2Cred, State2), + C1Cred = rabbit_fifo:make_credit(CK1, 5, 0, false), + {State2, _, _Effects2} = apply(meta(Config, ?LINE), C1Cred, State1), + C2Cred = rabbit_fifo:make_credit(CK2, 4, 0, false), + {State3, _} = apply(meta(Config, ?LINE), C2Cred, State2), %% both consumers should have credit - ?assertMatch(#{C1 := #consumer{credit = 5}}, + ?assertMatch(#{CK1 := #consumer{credit = 5}}, State3#rabbit_fifo.consumers), - ?assertMatch([{C2, #consumer{credit = 4}}], + ?assertMatch([{CK2, #consumer{credit = 4}}], rabbit_fifo:query_waiting_consumers(State3)), ok. - -register_enqueuer_test(C) -> +single_active_with_credited_v2_test(Config) -> State0 = init(#{name => ?FUNCTION_NAME, queue_resource => rabbit_misc:r("/", queue, - atom_to_binary(?FUNCTION_NAME, utf8)), + ?FUNCTION_NAME_B), + release_cursor_interval => 0, + single_active_consumer_on => true}), + C1 = {<<"ctag1">>, self()}, + {State1, {ok, #{key := CKey1}}, _} = + apply(meta(Config, 1), + make_checkout(C1, {auto, {credited, 0}}, #{}), State0), + C2 = {<<"ctag2">>, self()}, + {State2, {ok, #{key := CKey2}}, _} = + apply(meta(Config, 2), + make_checkout(C2, {auto, {credited, 0}}, #{}), State1), + %% add some credit + C1Cred = rabbit_fifo:make_credit(CKey1, 5, 0, false), + {State3, ok, Effects1} = apply(meta(Config, 3), C1Cred, State2), + ?assertEqual([{send_msg, self(), + {credit_reply, <<"ctag1">>, _DeliveryCount = 0, _Credit = 5, + _Available = 0, _Drain = false}, + ?DELIVERY_SEND_MSG_OPTS}], + Effects1), + + C2Cred = rabbit_fifo:make_credit(CKey2, 4, 0, false), + {State, ok, Effects2} = apply(meta(Config, 4), C2Cred, State3), + ?assertEqual({send_msg, self(), + {credit_reply, <<"ctag2">>, _DeliveryCount = 0, _Credit = 4, + _Available = 0, _Drain = false}, + ?DELIVERY_SEND_MSG_OPTS}, + Effects2), + + %% both consumers should have credit + ?assertMatch(#{CKey1 := #consumer{credit = 5}}, + State#rabbit_fifo.consumers), + ?assertMatch([{CKey2, #consumer{credit = 4}}], + rabbit_fifo:query_waiting_consumers(State)), + ok. + +single_active_settle_after_cancel_test(Config) -> + S0 = init(#{name => ?FUNCTION_NAME, + queue_resource => rabbit_misc:r("/", queue, ?FUNCTION_NAME_B), + single_active_consumer_on => true}), + + Pid1 = test_util:fake_pid(node()), + % % adding some consumers + E1Idx = ?LINE, + {CK1, C1} = {?LINE, {?LINE_B, self()}}, + {CK2, C2} = {?LINE, {?LINE_B, self()}}, + Entries = + [ + {E1Idx , rabbit_fifo:make_enqueue(Pid1, 1, msg1)}, + %% add a consumer + {CK1, make_checkout(C1, {auto, {simple_prefetch, 1}}, #{})}, + ?ASSERT(#rabbit_fifo{consumers = #{CK1 := #consumer{next_msg_id = 1, + status = up, + checked_out = Ch}}} + when map_size(Ch) == 1), + %% add another consumer + {CK2, make_checkout(C2, {auto, {simple_prefetch, 1}}, #{})}, + ?ASSERT(#rabbit_fifo{consumers = #{CK1 := #consumer{status = up}}, + waiting_consumers = [{CK2, _}]}), + + %% cancel C1 + {?LINE, make_checkout(C1, cancel, #{})}, + ?ASSERT(#rabbit_fifo{consumers = #{CK1 := #consumer{status = cancelled}, + CK2 := #consumer{status = up}}, + waiting_consumers = []}), + %% settle the message, C1 one should be completely removed + {?LINE, rabbit_fifo:make_settle(CK1, [0])}, + ?ASSERT(#rabbit_fifo{consumers = #{CK2 := #consumer{status = up}} = C, + waiting_consumers = []} + when map_size(C) == 1) + + ], + {_S1, _} = run_log(Config, S0, Entries, fun single_active_invariant/1), + + ok. + +single_active_consumer_priority_test(Config) -> + S0 = init(#{name => ?FUNCTION_NAME, + queue_resource => rabbit_misc:r("/", queue, ?FUNCTION_NAME_B), + single_active_consumer_on => true}), + + Pid1 = test_util:fake_pid(node()), + % % adding some consumers + {CK1, C1} = {?LINE, {?LINE_B, self()}}, + {CK2, C2} = {?LINE, {?LINE_B, self()}}, + E1Idx = ?LINE, + {CK3, C3} = {?LINE, {?LINE_B, self()}}, + Entries = + [ + %% add a consumer + {CK1, make_checkout(C1, {auto, {simple_prefetch, 1}}, #{priority => 1})}, + ?ASSERT(#rabbit_fifo{consumers = #{CK1 := #consumer{status = up}}, + waiting_consumers = []}), + + %% add a consumer with a higher priority, assert it becomes active + {CK2, make_checkout(C2, {auto, {simple_prefetch, 1}}, #{priority => 2})}, + ?ASSERT(#rabbit_fifo{consumers = #{CK2 := #consumer{status = up}}, + waiting_consumers = [_]}), + + %% enqueue a message + {E1Idx , rabbit_fifo:make_enqueue(Pid1, 1, msg1)}, + ?ASSERT(#rabbit_fifo{consumers = #{CK2 := #consumer{next_msg_id = 1, + status = up, + checked_out = Ch}}} + when map_size(Ch) == 1), + + %% add en even higher consumer, but the current active has a message pending + %% so can't be immedately replaced + {CK3, make_checkout(C3, {auto, {simple_prefetch, 1}}, #{priority => 3})}, + ?ASSERT(#rabbit_fifo{consumers = #{CK2 := #consumer{status = quiescing}}, + waiting_consumers = [_, _]}), + %% settle the message, the higher priority should become the active, + %% completing the replacement + {?LINE, rabbit_fifo:make_settle(CK2, [0])}, + ?ASSERT(#rabbit_fifo{consumers = #{CK3 := #consumer{status = up, + checked_out = Ch}}, + waiting_consumers = [_, _]} + when map_size(Ch) == 0) + + ], + {_S1, _} = run_log(Config, S0, Entries, fun single_active_invariant/1), + + ok. + + +single_active_consumer_priority_cancel_active_test(Config) -> + S0 = init(#{name => ?FUNCTION_NAME, + queue_resource => rabbit_misc:r("/", queue, ?FUNCTION_NAME_B), + single_active_consumer_on => true}), + + % % adding some consumers + {CK1, C1} = {?LINE, {?LINE_B, self()}}, + {CK2, C2} = {?LINE, {?LINE_B, self()}}, + {CK3, C3} = {?LINE, {?LINE_B, self()}}, + Entries = + [ + %% add a consumer + {CK1, make_checkout(C1, {auto, {simple_prefetch, 1}}, #{priority => 2})}, + ?ASSERT(#rabbit_fifo{consumers = #{CK1 := #consumer{status = up}}, + waiting_consumers = []}), + + %% add two consumers each with a lower priority + {CK2, make_checkout(C2, {auto, {simple_prefetch, 1}}, #{priority => 1})}, + {CK3, make_checkout(C3, {auto, {simple_prefetch, 1}}, #{priority => 0})}, + ?ASSERT(#rabbit_fifo{consumers = #{CK1 := #consumer{status = up}}, + waiting_consumers = [_, _]}), + + {?LINE, make_checkout(C1, cancel, #{})}, + ?ASSERT(#rabbit_fifo{consumers = #{CK2 := #consumer{status = up}}, + waiting_consumers = [{CK3, _}]}) + ], + {_S1, _} = run_log(Config, S0, Entries, fun single_active_invariant/1), + + ok. + +single_active_consumer_update_priority_test(Config) -> + S0 = init(#{name => ?FUNCTION_NAME, + queue_resource => rabbit_misc:r("/", queue, ?FUNCTION_NAME_B), + single_active_consumer_on => true}), + + Pid1 = test_util:fake_pid(node()), + % % adding some consumers + {CK1, C1} = {?LINE, {?LINE_B, self()}}, + {CK2, C2} = {?LINE, {?LINE_B, self()}}, + Entries = + [ + %% add a consumer + {CK1, make_checkout(C1, {auto, {simple_prefetch, 1}}, #{priority => 2})}, + ?ASSERT(#rabbit_fifo{consumers = #{CK1 := #consumer{status = up}}, + waiting_consumers = []}), + %% add abother consumer with lower priority + {CK2, make_checkout(C2, {auto, {simple_prefetch, 1}}, #{priority => 1})}, + %% update the current active consumer to lower priority + {?LINE, make_checkout(C1, {auto, {simple_prefetch, 1}}, #{priority => 0})}, + ?ASSERT(#rabbit_fifo{consumers = #{CK2 := #consumer{status = up}}, + waiting_consumers = [_]}), + %% back to original priority + {?LINE, make_checkout(C1, {auto, {simple_prefetch, 1}}, #{priority => 2})}, + ?ASSERT(#rabbit_fifo{consumers = #{CK1 := #consumer{status = up}}, + waiting_consumers = [_]}), + {?LINE , rabbit_fifo:make_enqueue(Pid1, 1, msg1)}, + ?ASSERT(#rabbit_fifo{consumers = #{CK1 := #consumer{checked_out = Ch}}, + waiting_consumers = [{CK2, _}]} + when map_size(Ch) == 1), + %% update priority for C2 + {?LINE, make_checkout(C2, {auto, {simple_prefetch, 1}}, #{priority => 3})}, + ?ASSERT(#rabbit_fifo{consumers = #{CK1 := #consumer{checked_out = Ch}}, + waiting_consumers = [{CK2, _}]} + when map_size(Ch) == 1), + %% settle should cause the existing active to be replaced + {?LINE, rabbit_fifo:make_settle(CK1, [0])}, + ?ASSERT(#rabbit_fifo{consumers = #{CK2 := #consumer{status = up}}, + waiting_consumers = [{CK1, _}]}) + ], + {_S1, _} = run_log(Config, S0, Entries, fun single_active_invariant/1), + ok. + +single_active_consumer_quiescing_resumes_after_cancel_test(Config) -> + S0 = init(#{name => ?FUNCTION_NAME, + queue_resource => rabbit_misc:r("/", queue, ?FUNCTION_NAME_B), + single_active_consumer_on => true}), + + Pid1 = test_util:fake_pid(node()), + % % adding some consumers + {CK1, C1} = {?LINE, {?LINE_B, self()}}, + {CK2, C2} = {?LINE, {?LINE_B, self()}}, + Entries = + [ + %% add a consumer + {CK1, make_checkout(C1, {auto, {simple_prefetch, 1}}, #{priority => 1})}, + ?ASSERT(#rabbit_fifo{consumers = #{CK1 := #consumer{status = up}}, + waiting_consumers = []}), + + %% enqueue a message + {?LINE , rabbit_fifo:make_enqueue(Pid1, 1, msg1)}, + + %% add a consumer with a higher priority, current is quiescing + {CK2, make_checkout(C2, {auto, {simple_prefetch, 1}}, #{priority => 2})}, + ?ASSERT(#rabbit_fifo{consumers = #{CK1 := #consumer{status = quiescing}}, + waiting_consumers = [{CK2, _}]}), + + %% C2 cancels + {?LINE, make_checkout(C2, cancel, #{})}, + ?ASSERT(#rabbit_fifo{consumers = #{CK1 := #consumer{status = quiescing, + checked_out = Ch}}, + waiting_consumers = []} + when map_size(Ch) == 1), + + %% settle + {?LINE, rabbit_fifo:make_settle(CK1, [0])}, + ?ASSERT(#rabbit_fifo{consumers = #{CK1 := #consumer{status = up, + credit = 1}}, + waiting_consumers = []}) + ], + + {_S1, _} = run_log(Config, S0, Entries, fun single_active_invariant/1), + + ok. + +single_active_consumer_higher_waiting_disconnected_test(Config) -> + S0 = init(#{name => ?FUNCTION_NAME, + queue_resource => rabbit_misc:r("/", queue, ?FUNCTION_NAME_B), + single_active_consumer_on => true}), + + Pid1 = test_util:fake_pid(node()), + C1Pid = test_util:fake_pid(n1@banana), + C2Pid = test_util:fake_pid(n2@banana), + % % adding some consumers + {CK1, C1} = {?LINE, {?LINE_B, C1Pid}}, + {CK2, C2} = {?LINE, {?LINE_B, C2Pid}}, + Entries = + [ + %% add a consumer + {CK1, make_checkout(C1, {auto, {simple_prefetch, 1}}, #{priority => 1})}, + ?ASSERT(#rabbit_fifo{consumers = #{CK1 := #consumer{status = up}}, + waiting_consumers = []}), + + %% enqueue a message + {?LINE , rabbit_fifo:make_enqueue(Pid1, 1, msg1)}, + + %% add a consumer with a higher priority, current is quiescing + {CK2, make_checkout(C2, {auto, {simple_prefetch, 1}}, #{priority => 2})}, + ?ASSERT(#rabbit_fifo{consumers = #{CK1 := #consumer{status = quiescing}}, + waiting_consumers = [{CK2, _}]}), + %% C2 is disconnected, + {?LINE, {down, C2Pid, noconnection}}, + ?ASSERT( + #rabbit_fifo{consumers = #{CK1 := #consumer{status = quiescing}}, + waiting_consumers = [{CK2, #consumer{status = suspected_down}}]}), + %% settle + {?LINE, rabbit_fifo:make_settle(CK1, [0])}, + %% C1 should be reactivated + ?ASSERT(#rabbit_fifo{consumers = #{CK1 := #consumer{status = up, + credit = 1}}, + waiting_consumers = [_]}), + %% C2 comes back up and takes over + {?LINE, {nodeup, n2@banana}}, + ?ASSERT( + #rabbit_fifo{consumers = #{CK2 := #consumer{status = up}}, + waiting_consumers = [{CK1, #consumer{status = up}}]}) + ], + {_S1, _} = run_log(Config, S0, Entries, fun single_active_invariant/1), + + ok. + +single_active_consumer_quiescing_disconnected_test(Config) -> + S0 = init(#{name => ?FUNCTION_NAME, + queue_resource => rabbit_misc:r("/", queue, ?FUNCTION_NAME_B), + single_active_consumer_on => true}), + + Pid1 = test_util:fake_pid(node()), + C1Pid = test_util:fake_pid(n1@banana), + C2Pid = test_util:fake_pid(n2@banana), + % % adding some consumers + {CK1, C1} = {?LINE, {?LINE_B, C1Pid}}, + {CK2, C2} = {?LINE, {?LINE_B, C2Pid}}, + Entries = + [ + %% add a consumer + {CK1, make_checkout(C1, {auto, {simple_prefetch, 1}}, #{priority => 1})}, + ?ASSERT(#rabbit_fifo{consumers = #{CK1 := #consumer{status = up}}, + waiting_consumers = []}), + + %% enqueue a message + {?LINE , rabbit_fifo:make_enqueue(Pid1, 1, msg1)}, + + %% add a consumer with a higher priority, current is quiescing + {CK2, make_checkout(C2, {auto, {simple_prefetch, 1}}, #{priority => 2})}, + ?ASSERT(#rabbit_fifo{consumers = #{CK1 := #consumer{status = quiescing}}, + waiting_consumers = [{CK2, _}]}), + %% C1 is disconnected, + {?LINE, {down, C1Pid, noconnection}}, + ?ASSERT( + #rabbit_fifo{consumers = #{CK2 := #consumer{status = up, + checked_out = Ch2}}, + waiting_consumers = + [{CK1, #consumer{status = suspected_down, + checked_out = Ch1}}]} + when map_size(Ch2) == 1 andalso + map_size(Ch1) == 0), + %% C1 settles which will be ignored + {?LINE, rabbit_fifo:make_settle(CK1, [0])}, + ?ASSERT( + #rabbit_fifo{consumers = #{CK2 := #consumer{status = up, + checked_out = Ch2}}, + waiting_consumers = + [{CK1, #consumer{status = suspected_down, + checked_out = Ch1}}]} + when map_size(Ch2) == 1 andalso + map_size(Ch1) == 0), + % %% C1 comes back up + {?LINE, {nodeup, n1@banana}}, + ?ASSERT( + #rabbit_fifo{consumers = #{CK2 := #consumer{status = up}}, + waiting_consumers = [{CK1, #consumer{status = up}}]}) + ], + {_S1, _} = run_log(Config, S0, Entries, fun single_active_invariant/1), + + ok. + +single_active_consumer_quiescing_receives_no_further_messages_test(Config) -> + S0 = init(#{name => ?FUNCTION_NAME, + queue_resource => rabbit_misc:r("/", queue, ?FUNCTION_NAME_B), + single_active_consumer_on => true}), + + Pid1 = test_util:fake_pid(node()), + C1Pid = test_util:fake_pid(n1@banana), + C2Pid = test_util:fake_pid(n2@banana), + % % adding some consumers + {CK1, C1} = {?LINE, {?LINE_B, C1Pid}}, + {CK2, C2} = {?LINE, {?LINE_B, C2Pid}}, + Entries = + [ + %% add a consumer, with plenty of prefetch + {CK1, make_checkout(C1, {auto, {simple_prefetch, 10}}, #{priority => 1})}, + ?ASSERT(#rabbit_fifo{consumers = #{CK1 := #consumer{status = up}}, + waiting_consumers = []}), + + %% enqueue a message + {?LINE, rabbit_fifo:make_enqueue(Pid1, 1, msg1)}, + + %% add a consumer with a higher priority, current is quiescing + {CK2, make_checkout(C2, {auto, {simple_prefetch, 10}}, #{priority => 2})}, + ?ASSERT(#rabbit_fifo{consumers = #{CK1 := #consumer{status = quiescing, + checked_out = Ch}}, + waiting_consumers = [{CK2, _}]} + when map_size(Ch) == 1), + + %% enqueue another message + {?LINE, rabbit_fifo:make_enqueue(Pid1, 2, msg2)}, + %% message should not be assinged to quiescing consumer + ?ASSERT(#rabbit_fifo{consumers = #{CK1 := #consumer{status = quiescing, + checked_out = Ch}}, + waiting_consumers = [{CK2, _}]} + when map_size(Ch) == 1) + + ], + + {_S1, _} = run_log(Config, S0, Entries, fun single_active_invariant/1), + ok. + +single_active_consumer_credited_favour_with_credit_test(Config) -> + S0 = init(#{name => ?FUNCTION_NAME, + queue_resource => rabbit_misc:r("/", queue, ?FUNCTION_NAME_B), + single_active_consumer_on => true}), + + C1Pid = test_util:fake_pid(n1@banana), + C2Pid = test_util:fake_pid(n2@banana), + C3Pid = test_util:fake_pid(n3@banana), + % % adding some consumers + {CK1, C1} = {?LINE, {?LINE_B, C1Pid}}, + {CK2, C2} = {?LINE, {?LINE_B, C2Pid}}, + {CK3, C3} = {?LINE, {?LINE_B, C3Pid}}, + Entries = + [ + %% add a consumer + {CK1, make_checkout(C1, {auto, {credited, 0}}, #{priority => 3})}, + {CK2, make_checkout(C2, {auto, {credited, 0}}, #{priority => 1})}, + {CK3, make_checkout(C3, {auto, {credited, 0}}, #{priority => 1})}, + %% waiting are sorted by arrival order + ?ASSERT(#rabbit_fifo{consumers = #{CK1 := #consumer{status = up}}, + waiting_consumers = [{CK2, _}, {CK3, _}]}), + + %% give credit to C3 + {?LINE , rabbit_fifo:make_credit(CK3, 1, 0, false)}, + ?ASSERT(#rabbit_fifo{consumers = #{CK1 := #consumer{status = up}}, + waiting_consumers = [{CK3, _}, {CK2, _}]}), + %% cancel the current active consumer + {CK1, make_checkout(C1, cancel, #{})}, + %% C3 should become active due having credits + ?ASSERT(#rabbit_fifo{consumers = #{CK3 := #consumer{status = up, + credit = 1}}, + waiting_consumers = [{CK2, _}]}) + ], + + {_S1, _} = run_log(Config, S0, Entries, fun single_active_invariant/1), + ok. + + + +register_enqueuer_test(Config) -> + State0 = init(#{name => ?FUNCTION_NAME, + queue_resource => rabbit_misc:r("/", queue, ?FUNCTION_NAME_B), max_length => 2, max_in_memory_length => 0, overflow_strategy => reject_publish}), %% simply registering should be ok when we're below limit Pid1 = test_util:fake_pid(node()), - {State1, ok, [_]} = apply(meta(C, 1), make_register_enqueuer(Pid1), State0), + {State1, ok, [_]} = apply(meta(Config, 1, ?LINE, {notify, 1, Pid1}), + make_register_enqueuer(Pid1), State0), - {State2, ok, _} = apply(meta(C, 2), rabbit_fifo:make_enqueue(Pid1, 1, one), State1), + {State2, ok, _} = apply(meta(Config, 2, ?LINE, {notify, 2, Pid1}), + rabbit_fifo:make_enqueue(Pid1, 1, one), State1), %% register another enqueuer shoudl be ok Pid2 = test_util:fake_pid(node()), - {State3, ok, [_]} = apply(meta(C, 3), make_register_enqueuer(Pid2), State2), + {State3, ok, [_]} = apply(meta(Config, 3, ?LINE, {notify, 3, Pid2}), + make_register_enqueuer(Pid2), State2), - {State4, ok, _} = apply(meta(C, 4), rabbit_fifo:make_enqueue(Pid1, 2, two), State3), - {State5, ok, Efx} = apply(meta(C, 5), rabbit_fifo:make_enqueue(Pid1, 3, three), State4), - % ct:pal("Efx ~tp", [Efx]), + {State4, ok, _} = apply(meta(Config, 4, ?LINE, {notify, 4, Pid1}), + rabbit_fifo:make_enqueue(Pid1, 2, two), State3), + {State5, ok, Efx} = apply(meta(Config, 5, ?LINE, {notify, 4, Pid1}), + rabbit_fifo:make_enqueue(Pid1, 3, three), State4), %% validate all registered enqueuers are notified of overflow state - ?ASSERT_EFF({send_msg, P, {queue_status, reject_publish}, [ra_event]}, P == Pid1, Efx), - ?ASSERT_EFF({send_msg, P, {queue_status, reject_publish}, [ra_event]}, P == Pid2, Efx), + ?ASSERT_EFF({send_msg, P, {queue_status, reject_publish}, [ra_event]}, + P == Pid1, Efx), + ?ASSERT_EFF({send_msg, P, {queue_status, reject_publish}, [ra_event]}, + P == Pid2, Efx), %% this time, registry should return reject_publish - {State6, reject_publish, [_]} = apply(meta(C, 6), make_register_enqueuer( - test_util:fake_pid(node())), State5), + {State6, reject_publish, [_]} = + apply(meta(Config, 6), make_register_enqueuer( + test_util:fake_pid(node())), State5), ?assertMatch(#{num_enqueuers := 3}, rabbit_fifo:overview(State6)), - Pid3 = test_util:fake_pid(node()), %% remove two messages this should make the queue fall below the 0.8 limit {State7, _, Efx7} = - apply(meta(C, 7), + apply(meta(Config, 7), rabbit_fifo:make_checkout({<<"a">>, Pid3}, {dequeue, settled}, #{}), State6), ?ASSERT_EFF({log, [_], _}, Efx7), - % ct:pal("Efx7 ~tp", [_Efx7]), {State8, _, Efx8} = - apply(meta(C, 8), + apply(meta(Config, 8), rabbit_fifo:make_checkout({<<"a">>, Pid3}, {dequeue, settled}, #{}), State7), ?ASSERT_EFF({log, [_], _}, Efx8), - % ct:pal("Efx8 ~tp", [Efx8]), %% validate all registered enqueuers are notified of overflow state ?ASSERT_EFF({send_msg, P, {queue_status, go}, [ra_event]}, P == Pid1, Efx8), ?ASSERT_EFF({send_msg, P, {queue_status, go}, [ra_event]}, P == Pid2, Efx8), {_State9, _, Efx9} = - apply(meta(C, 9), + apply(meta(Config, 9), rabbit_fifo:make_checkout({<<"a">>, Pid3}, {dequeue, settled}, #{}), State8), ?ASSERT_EFF({log, [_], _}, Efx9), @@ -1491,27 +2092,29 @@ register_enqueuer_test(C) -> ?ASSERT_NO_EFF({send_msg, P, go, [ra_event]}, P == Pid2, Efx9), ok. -reject_publish_purge_test(C) -> +reject_publish_purge_test(Config) -> State0 = init(#{name => ?FUNCTION_NAME, - queue_resource => rabbit_misc:r("/", queue, - atom_to_binary(?FUNCTION_NAME, utf8)), + queue_resource => rabbit_misc:r("/", queue, ?FUNCTION_NAME_B), max_length => 2, max_in_memory_length => 0, overflow_strategy => reject_publish}), %% simply registering should be ok when we're below limit Pid1 = test_util:fake_pid(node()), - {State1, ok, [_]} = apply(meta(C, 1), make_register_enqueuer(Pid1), State0), - {State2, ok, _} = apply(meta(C, 2), rabbit_fifo:make_enqueue(Pid1, 1, one), State1), - {State3, ok, _} = apply(meta(C, 3), rabbit_fifo:make_enqueue(Pid1, 2, two), State2), - {State4, ok, Efx} = apply(meta(C, 4), rabbit_fifo:make_enqueue(Pid1, 3, three), State3), + {State1, ok, [_]} = apply(meta(Config, 1), make_register_enqueuer(Pid1), State0), + {State2, ok, _} = apply(meta(Config, 2, ?LINE, {notify, 2, Pid1}), + rabbit_fifo:make_enqueue(Pid1, 1, one), State1), + {State3, ok, _} = apply(meta(Config, 3, ?LINE, {notify, 2, Pid1}), + rabbit_fifo:make_enqueue(Pid1, 2, two), State2), + {State4, ok, Efx} = apply(meta(Config, 4, ?LINE, {notify, 2, Pid1}), + rabbit_fifo:make_enqueue(Pid1, 3, three), State3), % ct:pal("Efx ~tp", [Efx]), ?ASSERT_EFF({send_msg, P, {queue_status, reject_publish}, [ra_event]}, P == Pid1, Efx), - {_State5, {purge, 3}, Efx1} = apply(meta(C, 5), rabbit_fifo:make_purge(), State4), + {_State5, {purge, 3}, Efx1} = apply(meta(Config, 5), rabbit_fifo:make_purge(), State4), ?ASSERT_EFF({send_msg, P, {queue_status, go}, [ra_event]}, P == Pid1, Efx1), ok. -reject_publish_applied_after_limit_test(C) -> - QName = rabbit_misc:r("/", queue, atom_to_binary(?FUNCTION_NAME, utf8)), +reject_publish_applied_after_limit_test(Config) -> + QName = rabbit_misc:r("/", queue, ?FUNCTION_NAME_B), InitConf = #{name => ?FUNCTION_NAME, max_in_memory_length => 0, queue_resource => QName @@ -1519,12 +2122,16 @@ reject_publish_applied_after_limit_test(C) -> State0 = init(InitConf), %% simply registering should be ok when we're below limit Pid1 = test_util:fake_pid(node()), - {State1, ok, [_]} = apply(meta(C, 1), make_register_enqueuer(Pid1), State0), - {State2, ok, _} = apply(meta(C, 2), rabbit_fifo:make_enqueue(Pid1, 1, one), State1), - {State3, ok, _} = apply(meta(C, 3), rabbit_fifo:make_enqueue(Pid1, 2, two), State2), - {State4, ok, Efx} = apply(meta(C, 4), rabbit_fifo:make_enqueue(Pid1, 3, three), State3), - % ct:pal("Efx ~tp", [Efx]), - ?ASSERT_NO_EFF({send_msg, P, {queue_status, reject_publish}, [ra_event]}, P == Pid1, Efx), + {State1, ok, [_]} = apply(meta(Config, 1, ?LINE, {notify, 1, Pid1}), + make_register_enqueuer(Pid1), State0), + {State2, ok, _} = apply(meta(Config, 2, ?LINE, {notify, 1, Pid1}), + rabbit_fifo:make_enqueue(Pid1, 1, one), State1), + {State3, ok, _} = apply(meta(Config, 3, ?LINE, {notify, 1, Pid1}), + rabbit_fifo:make_enqueue(Pid1, 2, two), State2), + {State4, ok, Efx} = apply(meta(Config, 4, ?LINE, {notify, 1, Pid1}), + rabbit_fifo:make_enqueue(Pid1, 3, three), State3), + ?ASSERT_NO_EFF({send_msg, P, {queue_status, reject_publish}, [ra_event]}, + P == Pid1, Efx), %% apply new config Conf = #{name => ?FUNCTION_NAME, queue_resource => QName, @@ -1533,78 +2140,81 @@ reject_publish_applied_after_limit_test(C) -> max_in_memory_length => 0, dead_letter_handler => undefined }, - {State5, ok, Efx1} = apply(meta(C, 5), rabbit_fifo:make_update_config(Conf), State4), - ?ASSERT_EFF({send_msg, P, {queue_status, reject_publish}, [ra_event]}, P == Pid1, Efx1), + {State5, ok, Efx1} = apply(meta(Config, 5), rabbit_fifo:make_update_config(Conf), State4), + ?ASSERT_EFF({send_msg, P, {queue_status, reject_publish}, [ra_event]}, + P == Pid1, Efx1), Pid2 = test_util:fake_pid(node()), - {_State6, reject_publish, _} = apply(meta(C, 1), make_register_enqueuer(Pid2), State5), + {_State6, reject_publish, _} = + apply(meta(Config, 1), make_register_enqueuer(Pid2), State5), ok. -purge_nodes_test(C) -> +purge_nodes_test(Config) -> Node = purged@node, ThisNode = node(), EnqPid = test_util:fake_pid(Node), EnqPid2 = test_util:fake_pid(node()), ConPid = test_util:fake_pid(Node), Cid = {<<"tag">>, ConPid}, - % WaitingPid = test_util:fake_pid(Node), State0 = init(#{name => ?FUNCTION_NAME, - queue_resource => rabbit_misc:r("/", queue, - atom_to_binary(?FUNCTION_NAME, utf8)), + queue_resource => rabbit_misc:r("/", queue, ?FUNCTION_NAME_B), single_active_consumer_on => false}), - {State1, _, _} = apply(meta(C, 1), + {State1, _, _} = apply(meta(Config, 1, ?LINE, {notify, 1, EnqPid}), rabbit_fifo:make_enqueue(EnqPid, 1, msg1), State0), - {State2, _, _} = apply(meta(C, 2), + {State2, _, _} = apply(meta(Config, 2, ?LINE, {notify, 2, EnqPid2}), rabbit_fifo:make_enqueue(EnqPid2, 1, msg2), State1), - {State3, _} = check(C, Cid, 3, 1000, State2), - {State4, _, _} = apply(meta(C, 4), + {State3, _} = check(Config, Cid, 3, 1000, State2), + {State4, _, _} = apply(meta(Config, ?LINE), {down, EnqPid, noconnection}, State3), - ?assertMatch( - [{aux, {handle_tick, - [#resource{}, _Metrics, - [ThisNode, Node] - ]}}] , rabbit_fifo:tick(1, State4)), + ?assertMatch([{aux, {handle_tick, + [#resource{}, _Metrics, + [ThisNode, Node]]}}], + rabbit_fifo:tick(1, State4)), %% assert there are both enqueuers and consumers - {State, _, _} = apply(meta(C, 5), + {State, _, _} = apply(meta(Config, ?LINE), rabbit_fifo:make_purge_nodes([Node]), State4), %% assert there are no enqueuers nor consumers - ?assertMatch(#rabbit_fifo{enqueuers = Enqs} when map_size(Enqs) == 1, - State), - - ?assertMatch(#rabbit_fifo{consumers = Cons} when map_size(Cons) == 0, - State), - ?assertMatch( - [{aux, {handle_tick, - [#resource{}, _Metrics, - [ThisNode] - ]}}] , rabbit_fifo:tick(1, State)), + ?assertMatch(#rabbit_fifo{enqueuers = Enqs} + when map_size(Enqs) == 1, State), + ?assertMatch(#rabbit_fifo{consumers = Cons} + when map_size(Cons) == 0, State), + ?assertMatch([{aux, {handle_tick, [#resource{}, _Metrics, [ThisNode]]}}], + rabbit_fifo:tick(1, State)), ok. meta(Config, Idx) -> meta(Config, Idx, 0). meta(Config, Idx, Timestamp) -> + meta(Config, Idx, Timestamp, no_reply). + +meta(Config, Idx, Timestamp, ReplyMode) -> #{machine_version => ?config(machine_version, Config), index => Idx, term => 1, system_time => Timestamp, + reply_mode => ReplyMode, from => {make_ref(), self()}}. enq(Config, Idx, MsgSeq, Msg, State) -> strip_reply( - rabbit_fifo:apply(meta(Config, Idx), rabbit_fifo:make_enqueue(self(), MsgSeq, Msg), State)). + apply(meta(Config, Idx, 0, {notify, MsgSeq, self()}), + rabbit_fifo:make_enqueue(self(), MsgSeq, Msg), + State)). deq(Config, Idx, Cid, Settlement, Msg, State0) -> {State, _, Effs} = apply(meta(Config, Idx), rabbit_fifo:make_checkout(Cid, {dequeue, Settlement}, #{}), State0), - {value, {log, [_Idx], Fun}} = lists:search(fun(E) -> element(1, E) == log end, Effs), + {value, {log, [_Idx], Fun}} = lists:search(fun(E) -> + element(1, E) == log + end, Effs), [{reply, _From, {wrap_reply, {dequeue, {MsgId, _}, _}}}] = Fun([Msg]), @@ -1634,8 +2244,20 @@ check(Config, Cid, Idx, Num, State) -> rabbit_fifo:make_checkout(Cid, {auto, Num, simple_prefetch}, #{}), State)). -settle(Config, Cid, Idx, MsgId, State) -> - strip_reply(apply(meta(Config, Idx), rabbit_fifo:make_settle(Cid, [MsgId]), State)). +checkout(Config, Idx, Cid, Credit, State) + when is_integer(Credit) -> + checkout(Config, Idx, Cid, {auto, {simple_prefetch, Credit}}, State); +checkout(Config, Idx, Cid, Spec, State) -> + checkout_reply( + apply(meta(Config, Idx), + rabbit_fifo:make_checkout(Cid, Spec, #{}), + State)). + +settle(Config, Cid, Idx, MsgId, State) when is_integer(MsgId) -> + settle(Config, Cid, Idx, [MsgId], State); +settle(Config, Cid, Idx, MsgIds, State) when is_list(MsgIds) -> + strip_reply(apply(meta(Config, Idx), + rabbit_fifo:make_settle(Cid, MsgIds), State)). return(Config, Cid, Idx, MsgId, State) -> strip_reply(apply(meta(Config, Idx), rabbit_fifo:make_return(Cid, [MsgId]), State)). @@ -1647,17 +2269,36 @@ credit(Config, Cid, Idx, Credit, DelCnt, Drain, State) -> strip_reply({State, _, Effects}) -> {State, Effects}. +checkout_reply({State, {ok, CInfo}, Effects}) when is_map(CInfo) -> + {State, CInfo, Effects}; +checkout_reply(Oth) -> + Oth. + run_log(Config, InitState, Entries) -> - lists:foldl(fun ({Idx, E}, {Acc0, Efx0}) -> - case apply(meta(Config, Idx), E, Acc0) of - {Acc, _, Efx} when is_list(Efx) -> - {Acc, Efx0 ++ Efx}; - {Acc, _, Efx} -> - {Acc, Efx0 ++ [Efx]}; - {Acc, _} -> - {Acc, Efx0} - end - end, {InitState, []}, Entries). + run_log(rabbit_fifo, Config, InitState, Entries, fun (_) -> true end). + +run_log(Config, InitState, Entries, Invariant) -> + run_log(rabbit_fifo, Config, InitState, Entries, Invariant). + +run_log(Module, Config, InitState, Entries, Invariant) -> + lists:foldl( + fun ({assert, Fun}, {Acc0, Efx0}) -> + _ = Fun(Acc0), + {Acc0, Efx0}; + ({Idx, E}, {Acc0, Efx0}) -> + case Module:apply(meta(Config, Idx, Idx, {notify, Idx, self()}), + E, Acc0) of + {Acc, _, Efx} when is_list(Efx) -> + ?assert(Invariant(Acc)), + {Acc, Efx0 ++ Efx}; + {Acc, _, Efx} -> + ?assert(Invariant(Acc)), + {Acc, Efx0 ++ [Efx]}; + {Acc, _} -> + ?assert(Invariant(Acc)), + {Acc, Efx0} + end + end, {InitState, []}, Entries). %% AUX Tests @@ -1665,16 +2306,18 @@ run_log(Config, InitState, Entries) -> aux_test(_) -> _ = ra_machine_ets:start_link(), Aux0 = init_aux(aux_test), - MacState = init(#{name => aux_test, - queue_resource => - rabbit_misc:r(<<"/">>, queue, <<"test">>)}), + LastApplied = 0, + State0 = #{machine_state => + init(#{name => ?FUNCTION_NAME, + queue_resource => rabbit_misc:r("/", queue, ?FUNCTION_NAME_B), + single_active_consumer_on => false}), + log => mock_log, + last_applied => LastApplied}, ok = meck:new(ra_log, []), - Log = mock_log, meck:expect(ra_log, last_index_term, fun (_) -> {0, 0} end), - {no_reply, Aux, mock_log} = handle_aux(leader, cast, active, Aux0, - Log, MacState), - {no_reply, _Aux, mock_log} = handle_aux(leader, cast, tick, Aux, - Log, MacState), + {no_reply, Aux, State} = handle_aux(leader, cast, active, Aux0, State0), + {no_reply, _Aux, _, + [{release_cursor, LastApplied}]} = handle_aux(leader, cast, tick, Aux, State), [X] = ets:lookup(rabbit_fifo_usage, aux_test), meck:unload(), ?assert(X > 0.0), @@ -1742,9 +2385,9 @@ convert_v2_to_v3(Config) -> Cid1 = {ctag1, self()}, Cid2 = {ctag2, self()}, MaxCredits = 20, - Entries = [{1, rabbit_fifo:make_checkout(Cid1, {auto, 10, credited}, #{})}, - {2, rabbit_fifo:make_checkout(Cid2, {auto, MaxCredits, simple_prefetch}, - #{prefetch => MaxCredits})}], + Entries = [{1, make_checkout(Cid1, {auto, 10, credited}, #{})}, + {2, make_checkout(Cid2, {auto, MaxCredits, simple_prefetch}, + #{prefetch => MaxCredits})}], %% run log in v2 {State, _} = run_log(ConfigV2, test_init(?FUNCTION_NAME), Entries), @@ -1758,6 +2401,55 @@ convert_v2_to_v3(Config) -> maps:get(Cid2, Consumers)), ok. +convert_v3_to_v4(Config) -> + ConfigV3 = [{machine_version, 3} | Config], + ConfigV4 = [{machine_version, 4} | Config], + + EPid = test_util:fake_pid(node()), + Pid1 = test_util:fake_pid(node()), + Cid1 = {ctag1, Pid1}, + Cid2 = {ctag2, self()}, + MaxCredits = 2, + Entries = [ + {1, rabbit_fifo_v3:make_enqueue(EPid, 1, banana)}, + {2, rabbit_fifo_v3:make_enqueue(EPid, 2, apple)}, + {3, rabbit_fifo_v3:make_enqueue(EPid, 3, orange)}, + {4, make_checkout(Cid1, {auto, 10, credited}, #{})}, + {5, make_checkout(Cid2, {auto, MaxCredits, simple_prefetch}, + #{prefetch => MaxCredits})}, + {6, {down, Pid1, error}}], + + %% run log in v3 + Name = ?FUNCTION_NAME, + Init = rabbit_fifo_v3:init( + #{name => Name, + queue_resource => rabbit_misc:r("/", queue, atom_to_binary(Name)), + release_cursor_interval => 0}), + {State, _} = run_log(rabbit_fifo_v3, ConfigV3, Init, Entries, + fun (_) -> true end), + + %% convert from v3 to v4 + {#rabbit_fifo{consumers = Consumers, + returns = Returns}, ok, _} = + apply(meta(ConfigV4, ?LINE), {machine_version, 3, 4}, State), + + ?assertEqual(1, maps:size(Consumers)), + ?assertMatch(#consumer{cfg = #consumer_cfg{credit_mode = + {simple_prefetch, MaxCredits}}}, + maps:get(Cid2, Consumers)), + ?assertNot(is_map_key(Cid1, Consumers)), + %% assert delivery_count is copied to acquired_count + #consumer{checked_out = Ch2} = maps:get(Cid2, Consumers), + ?assertMatch(#{0 := ?MSG(_, #{delivery_count := 1, + acquired_count := 1}), + 1 := ?MSG(_, #{delivery_count := 1, + acquired_count := 1})}, Ch2), + + ?assertMatch(?MSG(_, #{delivery_count := 1, + acquired_count := 1}), lqueue:get(Returns)), + + ok. + queue_ttl_test(C) -> QName = rabbit_misc:r(<<"/">>, queue, <<"test">>), Conf = #{name => ?FUNCTION_NAME, @@ -1777,7 +2469,7 @@ queue_ttl_test(C) -> [{aux, {handle_tick, [_, _, _]}}] = rabbit_fifo:tick(Now + 1000, S1), %% cancelling the consumer should then {S2, _, _} = apply(meta(C, 2, Now), - rabbit_fifo:make_checkout(Cid, cancel, #{}), S1), + make_checkout(Cid, cancel, #{}), S1), %% last_active should have been reset when consumer was cancelled %% last_active = 2500 [{aux, {handle_tick, [_, _, _]}}] = rabbit_fifo:tick(Now + 1000, S2), @@ -1798,7 +2490,7 @@ queue_ttl_test(C) -> %% dequeue should set last applied {S1Deq, {dequeue, empty}, _} = apply(meta(C, 2, Now), - rabbit_fifo:make_checkout(Cid, {dequeue, unsettled}, #{}), + make_checkout(Cid, {dequeue, unsettled}, #{}), S0), [{aux, {handle_tick, [_, _, _]}}] = rabbit_fifo:tick(Now + 1000, S1Deq), @@ -1807,11 +2499,11 @@ queue_ttl_test(C) -> = rabbit_fifo:tick(Now + 2500, S1Deq), %% Enqueue message, Msg = rabbit_fifo:make_enqueue(self(), 1, msg1), - {E1, _, _} = apply(meta(C, 2, Now), Msg, S0), + {E1, _, _} = apply(meta(C, 2, Now, {notify, 2, self()}), Msg, S0), Deq = {<<"deq1">>, self()}, {E2, _, Effs2} = apply(meta(C, 3, Now), - rabbit_fifo:make_checkout(Deq, {dequeue, unsettled}, #{}), + make_checkout(Deq, {dequeue, unsettled}, #{}), E1), {log, [2], Fun2} = get_log_eff(Effs2), @@ -1825,7 +2517,7 @@ queue_ttl_test(C) -> = rabbit_fifo:tick(Now + 3000, E3), ok. -queue_ttl_with_single_active_consumer_test(C) -> +queue_ttl_with_single_active_consumer_test(Config) -> QName = rabbit_misc:r(<<"/">>, queue, <<"test">>), Conf = #{name => ?FUNCTION_NAME, queue_resource => QName, @@ -1840,12 +2532,12 @@ queue_ttl_with_single_active_consumer_test(C) -> = rabbit_fifo:tick(Now + 1000, S0), %% adding a consumer should not ever trigger deletion Cid = {<<"cid1">>, self()}, - {S1, _} = check_auto(C, Cid, 1, S0), + {S1, _, _} = checkout(Config, ?LINE, Cid, 1, S0), [{aux, {handle_tick, [_, _, _]}}] = rabbit_fifo:tick(Now, S1), [{aux, {handle_tick, [_, _, _]}}] = rabbit_fifo:tick(Now + 1000, S1), %% cancelling the consumer should then - {S2, _, _} = apply(meta(C, 2, Now), - rabbit_fifo:make_checkout(Cid, cancel, #{}), S1), + {S2, _, _} = apply(meta(Config, ?LINE, Now), + make_checkout(Cid, cancel, #{}), S1), %% last_active should have been reset when consumer was cancelled %% last_active = 2500 [{aux, {handle_tick, [_, _, _]}}] = rabbit_fifo:tick(Now + 1000, S2), @@ -1853,7 +2545,7 @@ queue_ttl_with_single_active_consumer_test(C) -> [{mod_call, rabbit_quorum_queue, spawn_deleter, [QName]}] = rabbit_fifo:tick(Now + 2500, S2), %% Same for downs - {S2D, _, _} = apply(meta(C, 2, Now), + {S2D, _, _} = apply(meta(Config, ?LINE, Now), {down, self(), noconnection}, S1), %% last_active should have been reset when consumer was cancelled %% last_active = 2500 @@ -1863,11 +2555,11 @@ queue_ttl_with_single_active_consumer_test(C) -> = rabbit_fifo:tick(Now + 2500, S2D), ok. -query_peek_test(C) -> +query_peek_test(Config) -> State0 = test_init(test), ?assertEqual({error, no_message_at_pos}, rabbit_fifo:query_peek(1, State0)), - {State1, _} = enq(C, 1, 1, first, State0), - {State2, _} = enq(C, 2, 2, second, State1), + {State1, _} = enq(Config, 1, 1, first, State0), + {State2, _} = enq(Config, 2, 2, second, State1), ?assertMatch({ok, [1 | _]}, rabbit_fifo:query_peek(1, State1)), ?assertEqual({error, no_message_at_pos}, rabbit_fifo:query_peek(2, State1)), ?assertMatch({ok, [1 | _]}, rabbit_fifo:query_peek(1, State2)), @@ -1875,56 +2567,29 @@ query_peek_test(C) -> ?assertEqual({error, no_message_at_pos}, rabbit_fifo:query_peek(3, State2)), ok. -checkout_priority_test(C) -> +checkout_priority_test(Config) -> Cid = {<<"checkout_priority_test">>, self()}, Pid = spawn(fun () -> ok end), Cid2 = {<<"checkout_priority_test2">>, Pid}, Args = [{<<"x-priority">>, long, 1}], {S1, _, _} = - apply(meta(C, 3), - rabbit_fifo:make_checkout(Cid, {once, 2, simple_prefetch}, - #{args => Args}), + apply(meta(Config, ?LINE), + make_checkout(Cid, {auto, {simple_prefetch, 2}}, + #{args => Args}), test_init(test)), {S2, _, _} = - apply(meta(C, 3), - rabbit_fifo:make_checkout(Cid2, {once, 2, simple_prefetch}, - #{args => []}), + apply(meta(Config, ?LINE), + make_checkout(Cid2, {auto, {simple_prefetch, 2}}, + #{args => []}), S1), - {S3, E3} = enq(C, 1, 1, first, S2), - ct:pal("E3 ~tp ~tp", [E3, self()]), + {S3, E3} = enq(Config, ?LINE, 1, first, S2), ?ASSERT_EFF({send_msg, P, {delivery, _, _}, _}, P == self(), E3), - {S4, E4} = enq(C, 2, 2, second, S3), + {S4, E4} = enq(Config, ?LINE, 2, second, S3), ?ASSERT_EFF({send_msg, P, {delivery, _, _}, _}, P == self(), E4), - {_S5, E5} = enq(C, 3, 3, third, S4), + {_S5, E5} = enq(Config, ?LINE, 3, third, S4), ?ASSERT_EFF({send_msg, P, {delivery, _, _}, _}, P == Pid, E5), ok. -empty_dequeue_should_emit_release_cursor_test(C) -> - State0 = test_init(?FUNCTION_NAME), - Cid = {<<"basic.get1">>, self()}, - {_State, {dequeue, empty}, Effects} = - apply(meta(C, 2, 1234), - rabbit_fifo:make_checkout(Cid, {dequeue, unsettled}, #{}), - State0), - - ?ASSERT_EFF({release_cursor, _, _}, Effects), - ok. - -expire_message_should_emit_release_cursor_test(C) -> - Conf = #{name => ?FUNCTION_NAME, - queue_resource => rabbit_misc:r(<<"/">>, queue, <<"test">>), - release_cursor_interval => 0, - msg_ttl => 1}, - S0 = rabbit_fifo:init(Conf), - Msg = #basic_message{content = #content{properties = none, - payload_fragments_rev = []}}, - {S1, ok, _} = apply(meta(C, 1, 100), rabbit_fifo:make_enqueue(self(), 1, Msg), S0), - {_S, ok, Effs} = apply(meta(C, 2, 101), - rabbit_fifo:make_enqueue(self(), 2, Msg), - S1), - ?ASSERT_EFF({release_cursor, 1, _}, Effs), - ok. - header_test(_) -> H0 = Size = 5, ?assertEqual(Size, rabbit_fifo:get_header(size, H0)), @@ -1996,28 +2661,141 @@ checkout_metadata_test(Config) -> {State0, _} = enq(Config, 2, 2, second, State00), %% NB: the consumer meta data is taken _before_ it runs a checkout %% so in this case num_checked_out will be 0 - {State1, {ok, #{next_msg_id := 0, - num_checked_out := 0}}, _} = - apply(meta(Config, ?LINE), - rabbit_fifo:make_checkout(Cid, {auto, 1, simple_prefetch}, #{}), - State0), + {State1, #{next_msg_id := 0, + num_checked_out := 0}, _} = + checkout(Config, ?LINE, Cid, 1, State0), {State2, _, _} = apply(meta(Config, ?LINE), - rabbit_fifo:make_checkout(Cid, cancel, #{}), State1), - {_State3, {ok, #{next_msg_id := 1, - num_checked_out := 1}}, _} = - apply(meta(Config, ?LINE), - rabbit_fifo:make_checkout(Cid, {auto, 1, simple_prefetch}, #{}), - State2), + make_checkout(Cid, cancel, #{}), State1), + {_State3, #{next_msg_id := 1, + num_checked_out := 1}, _} = + checkout(Config, ?LINE, Cid, 1, State2), + ok. + +modify_test(Config) -> + S0 = init(#{name => ?FUNCTION_NAME, + dead_letter_handler => at_least_once, + queue_resource => + rabbit_misc:r("/", queue, ?FUNCTION_NAME_B)}), + + Pid1 = test_util:fake_pid(node()), + % % adding some consumers + E1Idx = ?LINE, + {CK1, C1} = {?LINE, {?LINE_B, self()}}, + Entries = + [ + {E1Idx , rabbit_fifo:make_enqueue(Pid1, 1, msg1)}, + %% add a consumer + {CK1, make_checkout(C1, {auto, {simple_prefetch, 1}}, #{})}, + ?ASSERT(#rabbit_fifo{consumers = #{CK1 := #consumer{next_msg_id = 1, + checked_out = Ch}}} + when map_size(Ch) == 1), + %% delivery_failed = false, undeliverable_here = false|true + %% this is the same as a requeue, + %% this should not increment the delivery count + {?LINE, rabbit_fifo:make_modify(CK1, [0], false, false, + #{<<"x-opt-blah">> => <<"blah1">>})}, + ?ASSERT(#rabbit_fifo{consumers = #{CK1 := #consumer{next_msg_id = 2, + checked_out = Ch}}} + when map_size(Ch) == 1, + fun (#rabbit_fifo{consumers = + #{CK1 := #consumer{checked_out = Ch}}}) -> + ?assertMatch( + ?MSG(_, #{acquired_count := 1, + anns := #{<<"x-opt-blah">> := <<"blah1">>}} = H) + when not is_map_key(delivery_count, H), + maps:get(1, Ch)) + end), + %% delivery_failed = true does increment delivery_count + {?LINE, rabbit_fifo:make_modify(CK1, [1], true, false, + #{<<"x-opt-blah">> => <<"blah2">>})}, + ?ASSERT(#rabbit_fifo{consumers = #{CK1 := #consumer{next_msg_id = 3, + checked_out = Ch}}} + when map_size(Ch) == 1, + fun (#rabbit_fifo{consumers = + #{CK1 := #consumer{checked_out = Ch}}}) -> + ?assertMatch( + ?MSG(_, #{delivery_count := 1, + acquired_count := 2, + anns := #{<<"x-opt-blah">> := <<"blah2">>}}), + maps:get(2, Ch)) + end), + %% delivery_failed = true and undeliverable_here = true is the same as discard + {?LINE, rabbit_fifo:make_modify(CK1, [2], true, true, + #{<<"x-opt-blah">> => <<"blah3">>})}, + ?ASSERT(#rabbit_fifo{consumers = #{CK1 := #consumer{next_msg_id = 3, + checked_out = Ch}}} + when map_size(Ch) == 0, + fun (#rabbit_fifo{dlx = #rabbit_fifo_dlx{discards = Discards}}) -> + ?assertMatch([[_| + ?MSG(_, #{delivery_count := 2, + acquired_count := 3, + anns := #{<<"x-opt-blah">> := <<"blah3">>}})]], + lqueue:to_list(Discards)) + end) + ], + {_S1, _} = run_log(Config, S0, Entries, fun single_active_invariant/1), + + ok. + +ttb_test(Config) -> + S0 = init(#{name => ?FUNCTION_NAME, + queue_resource => + rabbit_misc:r("/", queue, ?FUNCTION_NAME_B)}), + + + S1 = do_n(5_000_000, + fun (N, Acc) -> + I = (5_000_000 - N), + element(1, enq(Config, I, I, ?FUNCTION_NAME_B, Acc)) + end, S0), + + + + {T1, _Res} = timer:tc(fun () -> + do_n(100, fun (_, S) -> + term_to_binary(S), + S1 end, S1) + end), + ct:pal("T1 took ~bus", [T1]), + + + {T2, _} = timer:tc(fun () -> + do_n(100, fun (_, S) -> term_to_iovec(S), S1 end, S1) + end), + ct:pal("T2 took ~bus", [T2]), + ok. %% Utility +%% + +do_n(0, _, A) -> + A; +do_n(N, Fun, A0) -> + A = Fun(N, A0), + do_n(N-1, Fun, A). + init(Conf) -> rabbit_fifo:init(Conf). make_register_enqueuer(Pid) -> rabbit_fifo:make_register_enqueuer(Pid). apply(Meta, Entry, State) -> rabbit_fifo:apply(Meta, Entry, State). init_aux(Conf) -> rabbit_fifo:init_aux(Conf). -handle_aux(S, T, C, A, L, M) -> rabbit_fifo:handle_aux(S, T, C, A, L, M). +handle_aux(S, T, C, A, A2) -> rabbit_fifo:handle_aux(S, T, C, A, A2). make_checkout(C, S, M) -> rabbit_fifo:make_checkout(C, S, M). cid(A) when is_atom(A) -> atom_to_binary(A, utf8). + +single_active_invariant( #rabbit_fifo{consumers = Cons}) -> + 1 >= map_size(maps:filter(fun (_, #consumer{status = S}) -> + S == up + end, Cons)). + +mk_mc(Body) -> + mc_amqpl:from_basic_message( + #basic_message{routing_keys = [<<"">>], + exchange_name = #resource{name = <<"x">>, + kind = exchange, + virtual_host = <<"v">>}, + content = #content{properties = #'P_basic'{}, + payload_fragments_rev = [Body]}}). diff --git a/deps/rabbit/test/rabbit_fifo_dlx_SUITE.erl b/deps/rabbit/test/rabbit_fifo_dlx_SUITE.erl index 47c84bb45dfb..6800451dcc04 100644 --- a/deps/rabbit/test/rabbit_fifo_dlx_SUITE.erl +++ b/deps/rabbit/test/rabbit_fifo_dlx_SUITE.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2018-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. -module(rabbit_fifo_dlx_SUITE). @@ -11,7 +11,6 @@ -include_lib("eunit/include/eunit.hrl"). -include_lib("rabbit/src/rabbit_fifo.hrl"). --include_lib("rabbit/src/rabbit_fifo_dlx.hrl"). -include_lib("rabbit_common/include/rabbit.hrl"). %%%=================================================================== diff --git a/deps/rabbit/test/rabbit_fifo_dlx_integration_SUITE.erl b/deps/rabbit/test/rabbit_fifo_dlx_integration_SUITE.erl index 0b7c4f0fdbcb..619fb4e06bdb 100644 --- a/deps/rabbit/test/rabbit_fifo_dlx_integration_SUITE.erl +++ b/deps/rabbit/test/rabbit_fifo_dlx_integration_SUITE.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. -module(rabbit_fifo_dlx_integration_SUITE). @@ -18,17 +18,18 @@ -include_lib("amqp_client/include/amqp_client.hrl"). -include_lib("rabbitmq_ct_helpers/include/rabbit_assert.hrl"). --import(quorum_queue_utils, [wait_for_messages_ready/3, - wait_for_min_messages/3, - dirty_query/3, - ra_name/1]). +-import(queue_utils, [wait_for_messages_ready/3, + wait_for_min_messages/3, + wait_for_messages/2, + dirty_query/3, + ra_name/1]). -import(rabbit_ct_helpers, [eventually/1, eventually/3, consistently/1]). -import(rabbit_ct_broker_helpers, [rpc/5, rpc/6]). -import(quorum_queue_SUITE, [publish/2, - consume/3]). + basic_get_tag/3]). -define(DEFAULT_WAIT, 1000). -define(DEFAULT_INTERVAL, 200). @@ -69,14 +70,17 @@ groups() -> ]. init_per_suite(Config0) -> + Tick = 256, rabbit_ct_helpers:log_environment(), Config1 = rabbit_ct_helpers:merge_app_env( - Config0, {rabbit, [{quorum_tick_interval, 1000}, + Config0, {rabbit, [{quorum_tick_interval, 256}, + {collect_statistics_interval, Tick}, + {channel_tick_interval, Tick}, {dead_letter_worker_consumer_prefetch, 2}, {dead_letter_worker_publisher_confirm_timeout, 1000} ]}), Config2 = rabbit_ct_helpers:merge_app_env( - Config1, {aten, [{poll_interval, 1000}]}), + Config1, {aten, [{poll_interval, 256}]}), rabbit_ct_helpers:run_setup_steps(Config2). end_per_suite(Config) -> @@ -96,10 +100,14 @@ init_per_group(Group, Config, NodesCount) -> Config2 = rabbit_ct_helpers:run_steps(Config1, [fun merge_app_env/1 ] ++ rabbit_ct_broker_helpers:setup_steps()), - _ = rabbit_ct_broker_helpers:enable_feature_flag(Config2, message_containers), - ok = rpc(Config2, 0, application, set_env, - [rabbit, channel_tick_interval, 100]), - Config2. + case Config2 of + {skip, _Reason} = Skip -> + Skip; + _ -> + ok = rpc(Config2, 0, application, set_env, + [rabbit, channel_tick_interval, 100]), + Config2 + end. end_per_group(_, Config) -> rabbit_ct_helpers:run_steps(Config, @@ -112,8 +120,12 @@ merge_app_env(Config) -> {ra, [{min_wal_roll_over_interval, 30000}]}). init_per_testcase(Testcase, Config) -> - case {Testcase, rabbit_ct_helpers:is_mixed_versions()} of - {single_dlx_worker, true} -> + IsKhepriEnabled = lists:any(fun(B) -> B end, + rabbit_ct_broker_helpers:rpc_all( + Config, rabbit_feature_flags, is_enabled, + [khepri_db])), + case {Testcase, rabbit_ct_helpers:is_mixed_versions(), IsKhepriEnabled} of + {single_dlx_worker, true, _} -> {skip, "single_dlx_worker is not mixed version compatible because process " "rabbit_fifo_dlx_sup does not exist in 3.9"}; _ -> @@ -195,7 +207,7 @@ rejected(Config) -> {Server, Ch, SourceQ, TargetQ} = declare_topology(Config, []), publish(Ch, SourceQ), wait_for_messages_ready([Server], ra_name(SourceQ), 1), - DelTag = consume(Ch, SourceQ, false), + DelTag = basic_get_tag(Ch, SourceQ, false), amqp_channel:cast(Ch, #'basic.nack'{delivery_tag = DelTag, multiple = false, requeue = false}), @@ -212,7 +224,7 @@ delivery_limit(Config) -> {Server, Ch, SourceQ, TargetQ} = declare_topology(Config, [{<<"x-delivery-limit">>, long, 0}]), publish(Ch, SourceQ), wait_for_messages_ready([Server], ra_name(SourceQ), 1), - DelTag = consume(Ch, SourceQ, false), + DelTag = basic_get_tag(Ch, SourceQ, false), amqp_channel:cast(Ch, #'basic.nack'{delivery_tag = DelTag, multiple = false, requeue = true}), @@ -639,9 +651,10 @@ reject_publish_max_length_target_quorum_queue(Config) -> %% Make space in target queue by consuming messages one by one %% allowing for more dead-lettered messages to reach the target queue. [begin - timer:sleep(2000), Msg = integer_to_binary(N), - {#'basic.get_ok'{}, #amqp_msg{payload = Msg}} = amqp_channel:call(Ch, #'basic.get'{queue = TargetQ}) + ?awaitMatch({#'basic.get_ok'{}, #amqp_msg{payload = Msg}}, + amqp_channel:call(Ch, #'basic.get'{queue = TargetQ}), + 30000) end || N <- lists:seq(1,4)], eventually(?_assertEqual([{0, 0}], dirty_query([Server], RaName, fun rabbit_fifo:query_stat_dlx/1)), 500, 10), @@ -686,7 +699,7 @@ reject_publish_down_target_quorum_queue(Config) -> end || N <- lists:seq(21, 50)], %% The target queue should have all 50 messages. - timer:sleep(2000), + wait_for_messages(Config, [[TargetQ, <<"50">>, <<"50">>, <<"0">>]]), Received = lists:foldl( fun(_N, S) -> {#'basic.get_ok'{}, #amqp_msg{payload = Msg}} = @@ -799,32 +812,25 @@ target_quorum_queue_delete_create(Config) -> %% 2. Target queue can be classic queue, quorum queue, or stream queue. %% %% Lesson learnt by writing this test: -%% If there are multiple target queues, messages will not be sent / routed to target non-mirrored durable classic queues +%% If there are multiple target queues, messages will not be sent / routed to target durable classic queues %% when their host node is temporarily down because these queues get temporarily deleted from the rabbit_queue RAM table %% (but will still be present in the rabbit_durable_queue DISC table). See: %% https://github.com/rabbitmq/rabbitmq-server/blob/cf76b479300b767b8ea450293d096cbf729ed734/deps/rabbit/src/rabbit_amqqueue.erl#L1955-L1964 many_target_queues(Config) -> [Server1, Server2, Server3] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename), Ch = rabbit_ct_client_helpers:open_channel(Config, Server1), - Ch2 = rabbit_ct_client_helpers:open_channel(Config, Server2), SourceQ = ?config(source_queue, Config), RaName = ra_name(SourceQ), TargetQ1 = ?config(target_queue_1, Config), TargetQ2 = ?config(target_queue_2, Config), TargetQ3 = ?config(target_queue_3, Config), - TargetQ4 = ?config(target_queue_4, Config), - TargetQ5 = ?config(target_queue_5, Config), - TargetQ6 = ?config(target_queue_6, Config), DLX = ?config(dead_letter_exchange, Config), DLRKey = <<"k1">>, %% Create topology: %% * source quorum queue with 1 replica on node 1 - %% * target non-mirrored classic queue on node 1 + %% * target classic queue on node 1 %% * target quorum queue with 3 replicas %% * target stream queue with 3 replicas - %% * target mirrored classic queue with 3 replicas (leader on node 1) - %% * target mirrored classic queue with 1 replica (leader on node 2) - %% * target mirrored classic queue with 3 replica (leader on node 2) declare_queue(Ch, SourceQ, [{<<"x-dead-letter-exchange">>, longstr, DLX}, {<<"x-dead-letter-routing-key">>, longstr, DLRKey}, {<<"x-dead-letter-strategy">>, longstr, <<"at-least-once">>}, @@ -843,22 +849,6 @@ many_target_queues(Config) -> {<<"x-initial-cluster-size">>, long, 3} ]), bind_queue(Ch, TargetQ3, DLX, DLRKey), - ok = rabbit_ct_broker_helpers:set_policy(Config, Server1, <<"mirror-q4">>, TargetQ4, <<"queues">>, - [{<<"ha-mode">>, <<"all">>}, - {<<"queue-master-locator">>, <<"client-local">>}]), - declare_queue(Ch, TargetQ4, []), - bind_queue(Ch, TargetQ4, DLX, DLRKey), - ok = rabbit_ct_broker_helpers:set_policy(Config, Server1, <<"mirror-q5">>, TargetQ5, <<"queues">>, - [{<<"ha-mode">>, <<"exactly">>}, - {<<"ha-params">>, 1}, - {<<"queue-master-locator">>, <<"client-local">>}]), - declare_queue(Ch2, TargetQ5, []), - bind_queue(Ch2, TargetQ5, DLX, DLRKey), - ok = rabbit_ct_broker_helpers:set_policy(Config, Server1, <<"mirror-q6">>, TargetQ6, <<"queues">>, - [{<<"ha-mode">>, <<"all">>}, - {<<"queue-master-locator">>, <<"client-local">>}]), - declare_queue(Ch2, TargetQ6, []), - bind_queue(Ch2, TargetQ6, DLX, DLRKey), Msg1 = <<"m1">>, ok = amqp_channel:cast(Ch, #'basic.publish'{routing_key = SourceQ}, @@ -892,15 +882,6 @@ many_target_queues(Config) -> after 2000 -> exit(deliver_timeout) end, - ?awaitMatch({#'basic.get_ok'{}, #amqp_msg{payload = Msg1}}, - amqp_channel:call(Ch, #'basic.get'{queue = TargetQ4}), - ?DEFAULT_WAIT, ?DEFAULT_INTERVAL), - ?awaitMatch({#'basic.get_ok'{}, #amqp_msg{payload = Msg1}}, - amqp_channel:call(Ch2, #'basic.get'{queue = TargetQ5}), - ?DEFAULT_WAIT, ?DEFAULT_INTERVAL), - ?awaitMatch({#'basic.get_ok'{}, #amqp_msg{payload = Msg1}}, - amqp_channel:call(Ch2, #'basic.get'{queue = TargetQ6}), - ?DEFAULT_WAIT, ?DEFAULT_INTERVAL), ?awaitMatch([{0, 0}], dirty_query([Server1], RaName, fun rabbit_fifo:query_stat_dlx/1), ?DEFAULT_WAIT, ?DEFAULT_INTERVAL), @@ -917,9 +898,6 @@ many_target_queues(Config) -> ?awaitMatch([{1, 2}], dirty_query([Server1], RaName, fun rabbit_fifo:query_stat_dlx/1), ?DEFAULT_WAIT, ?DEFAULT_INTERVAL), - timer:sleep(1000), - ?assertEqual([{1, 2}], - dirty_query([Server1], RaName, fun rabbit_fifo:query_stat_dlx/1)), ?assertMatch({#'basic.get_ok'{}, #amqp_msg{payload = Msg2}}, amqp_channel:call(Ch, #'basic.get'{queue = TargetQ1})), ok = rabbit_ct_broker_helpers:start_node(Config, Server2), @@ -937,16 +915,6 @@ many_target_queues(Config) -> after 0 -> exit(deliver_timeout) end, - ?awaitMatch({#'basic.get_ok'{}, #amqp_msg{payload = Msg2}}, - amqp_channel:call(Ch, #'basic.get'{queue = TargetQ4}), - ?DEFAULT_WAIT, ?DEFAULT_INTERVAL), - ?awaitMatch({#'basic.get_ok'{}, #amqp_msg{payload = Msg2}}, - amqp_channel:call(Ch, #'basic.get'{queue = TargetQ5}), - ?DEFAULT_WAIT, ?DEFAULT_INTERVAL), - %%TODO why is the 1st message (m1) a duplicate? - ?awaitMatch({#'basic.get_ok'{}, #amqp_msg{payload = Msg2}}, - amqp_channel:call(Ch, #'basic.get'{queue = TargetQ6}), - ?DEFAULT_WAIT, ?DEFAULT_INTERVAL), ?assertEqual(2, counted(messages_dead_lettered_expired_total, Config)), ?assertEqual(2, counted(messages_dead_lettered_confirmed_total, Config)). @@ -985,7 +953,7 @@ single_dlx_worker(Config) -> true = rpc(Config, Leader0, erlang, exit, [Pid, kill]), {ok, _, {_, Leader1}} = ?awaitMatch({ok, _, _}, ra:members({RaName, Follower0}), - 1000), + 30000), ?assertNotEqual(Leader0, Leader1), [Follower1, Follower2] = Servers -- [Leader1], assert_active_dlx_workers(0, Config, Follower1), diff --git a/deps/rabbit/test/rabbit_fifo_int_SUITE.erl b/deps/rabbit/test/rabbit_fifo_int_SUITE.erl index 39f4d52b01c0..fae1251d4738 100644 --- a/deps/rabbit/test/rabbit_fifo_int_SUITE.erl +++ b/deps/rabbit/test/rabbit_fifo_int_SUITE.erl @@ -8,6 +8,7 @@ -include_lib("common_test/include/ct.hrl"). -include_lib("eunit/include/eunit.hrl"). -include_lib("rabbit_common/include/rabbit.hrl"). +-include_lib("rabbit_common/include/rabbit_framing.hrl"). -define(RA_EVENT_TIMEOUT, 5000). -define(RA_SYSTEM, quorum_queues). @@ -23,6 +24,7 @@ all_tests() -> return, rabbit_fifo_returns_correlation, resends_lost_command, + returns, returns_after_down, resends_after_lost_applied, handles_reject_notification, @@ -31,8 +33,12 @@ all_tests() -> dequeue, discard, cancel_checkout, + cancel_checkout_with_remove, + cancel_checkout_with_pending_using_cancel_reason, + cancel_checkout_with_pending_using_remove_reason, lost_delivery, - credit, + credit_api_v1, + credit_api_v2, untracked_enqueue, flow, test_queries, @@ -42,7 +48,7 @@ all_tests() -> groups() -> [ - {tests, [], all_tests()} + {tests, [shuffle], all_tests()} ]. init_per_group(_, Config) -> @@ -62,9 +68,9 @@ end_per_group(_, Config) -> init_per_testcase(TestCase, Config) -> meck:new(rabbit_quorum_queue, [passthrough]), meck:expect(rabbit_quorum_queue, handle_tick, fun (_, _, _) -> ok end), - meck:expect(rabbit_quorum_queue, file_handle_leader_reservation, fun (_) -> ok end), - meck:expect(rabbit_quorum_queue, file_handle_other_reservation, fun () -> ok end), meck:expect(rabbit_quorum_queue, cancel_consumer_handler, fun (_, _) -> ok end), + meck:new(rabbit_feature_flags, []), + meck:expect(rabbit_feature_flags, is_enabled, fun (_) -> true end), ra_server_sup_sup:remove_all(?RA_SYSTEM), ServerName2 = list_to_atom(atom_to_list(TestCase) ++ "2"), ServerName3 = list_to_atom(atom_to_list(TestCase) ++ "3"), @@ -90,19 +96,18 @@ basics(Config) -> ConsumerTag = UId, ok = start_cluster(ClusterName, [ServerId]), FState0 = rabbit_fifo_client:init([ServerId]), - {ok, FState1} = rabbit_fifo_client:checkout(ConsumerTag, 1, simple_prefetch, - #{}, FState0), + {ok, _, FState1} = rabbit_fifo_client:checkout(ConsumerTag, {simple_prefetch, 1}, + #{}, FState0), rabbit_quorum_queue:wal_force_roll_over(node()), % create segment the segment will trigger a snapshot - timer:sleep(1000), + ra_log_segment_writer:await(ra_log_segment_writer), {ok, FState2, []} = rabbit_fifo_client:enqueue(ClusterName, one, FState1), DeliverFun = fun DeliverFun(S0, F) -> receive {ra_event, From, Evt} -> - ct:pal("ra_event ~p", [Evt]), case rabbit_fifo_client:handle_ra_event(ClusterName, From, Evt, S0) of {ok, S1, [{deliver, C, true, @@ -181,7 +186,7 @@ duplicate_delivery(Config) -> ServerId = ?config(node_id, Config), ok = start_cluster(ClusterName, [ServerId]), F0 = rabbit_fifo_client:init([ServerId]), - {ok, F1} = rabbit_fifo_client:checkout(<<"tag">>, 10, simple_prefetch, #{}, F0), + {ok, _, F1} = rabbit_fifo_client:checkout(<<"tag">>, {simple_prefetch, 10}, #{}, F0), {ok, F2, []} = rabbit_fifo_client:enqueue(ClusterName, corr1, msg1, F1), Fun = fun Loop(S0) -> receive @@ -216,7 +221,7 @@ usage(Config) -> ServerId = ?config(node_id, Config), ok = start_cluster(ClusterName, [ServerId]), F0 = rabbit_fifo_client:init([ServerId]), - {ok, F1} = rabbit_fifo_client:checkout(<<"tag">>, 10, simple_prefetch, #{}, F0), + {ok, _, F1} = rabbit_fifo_client:checkout(<<"tag">>, {simple_prefetch, 10}, #{}, F0), {ok, F2, []} = rabbit_fifo_client:enqueue(ClusterName, corr1, msg1, F1), {ok, F3, []} = rabbit_fifo_client:enqueue(ClusterName, corr2, msg2, F2), {_, _, _} = process_ra_events(receive_ra_events(2, 2), ClusterName, F3), @@ -269,7 +274,7 @@ detects_lost_delivery(Config) -> F000 = rabbit_fifo_client:init([ServerId]), {ok, F00, []} = rabbit_fifo_client:enqueue(ClusterName, msg1, F000), {_, _, F0} = process_ra_events(receive_ra_events(1, 0), ClusterName, F00), - {ok, F1} = rabbit_fifo_client:checkout(<<"tag">>, 10, simple_prefetch, #{}, F0), + {ok, _, F1} = rabbit_fifo_client:checkout(<<"tag">>, {simple_prefetch, 10}, #{}, F0), {ok, F2, []} = rabbit_fifo_client:enqueue(ClusterName, msg2, F1), {ok, F3, []} = rabbit_fifo_client:enqueue(ClusterName, msg3, F2), % lose first delivery @@ -285,28 +290,111 @@ detects_lost_delivery(Config) -> rabbit_quorum_queue:stop_server(ServerId), ok. +returns(Config) -> + ClusterName = ?config(cluster_name, Config), + ServerId = ?config(node_id, Config), + ok = start_cluster(ClusterName, [ServerId]), + + F0 = rabbit_fifo_client:init([ServerId]), + Msg1 = mk_msg(<<"msg1">>), + {ok, F1, []} = rabbit_fifo_client:enqueue(ClusterName, Msg1, F0), + {_, _, _F2} = process_ra_events(receive_ra_events(1, 0), ClusterName, F1), + + FC = rabbit_fifo_client:init([ServerId]), + {ok, _, FC1} = rabbit_fifo_client:checkout(<<"tag">>, + {simple_prefetch, 10}, + #{}, FC), + + {FC3, _} = + receive + {ra_event, Qname, {machine, {delivery, _, [{MsgId, {_, _}}]}} = Evt1} -> + {ok, FC2, Actions1} = + rabbit_fifo_client:handle_ra_event(Qname, Qname, Evt1, FC1), + [{deliver, _, true, + [{_, _, _, _, Msg1Out0}]}] = Actions1, + ?assert(mc:is(Msg1Out0)), + ?assertEqual(undefined, mc:get_annotation(<<"x-delivery-count">>, Msg1Out0)), + ?assertEqual(undefined, mc:get_annotation(delivery_count, Msg1Out0)), + rabbit_fifo_client:return(<<"tag">>, [MsgId], FC2) + after 5000 -> + flush(), + exit(await_delivery_timeout) + end, + {FC5, _} = + receive + {ra_event, Qname2, + {machine, {delivery, _, [{MsgId1, {_, _Msg1Out}}]}} = Evt2} -> + {ok, FC4, Actions2} = + rabbit_fifo_client:handle_ra_event(Qname2, Qname2, Evt2, FC3), + [{deliver, _tag, true, + [{_, _, _, _, Msg1Out}]}] = Actions2, + ?assert(mc:is(Msg1Out)), + ?assertEqual(1, mc:get_annotation(<<"x-delivery-count">>, Msg1Out)), + %% delivery_count should _not_ be incremented for a return + ?assertEqual(undefined, mc:get_annotation(delivery_count, Msg1Out)), + rabbit_fifo_client:modify(<<"tag">>, [MsgId1], true, false, #{}, FC4) + after 5000 -> + flush(), + exit(await_delivery_timeout_2) + end, + receive + {ra_event, Qname3, + {machine, {delivery, _, [{MsgId2, {_, _Msg2Out}}]}} = Evt3} -> + {ok, FC6, Actions3} = + rabbit_fifo_client:handle_ra_event(Qname3, Qname3, Evt3, FC5), + [{deliver, _, true, + [{_, _, _, _, Msg2Out}]}] = Actions3, + ?assert(mc:is(Msg2Out)), + ?assertEqual(2, mc:get_annotation(<<"x-delivery-count">>, Msg2Out)), + %% delivery_count should be incremented for a modify with delivery_failed = true + ?assertEqual(1, mc:get_annotation(delivery_count, Msg2Out)), + rabbit_fifo_client:settle(<<"tag">>, [MsgId2], FC6) + after 5000 -> + flush(), + exit(await_delivery_timeout_3) + end, + rabbit_quorum_queue:stop_server(ServerId), + ok. + returns_after_down(Config) -> ClusterName = ?config(cluster_name, Config), ServerId = ?config(node_id, Config), ok = start_cluster(ClusterName, [ServerId]), F0 = rabbit_fifo_client:init([ServerId]), - {ok, F1, []} = rabbit_fifo_client:enqueue(ClusterName, msg1, F0), + Msg1 = mk_msg(<<"msg1">>), + {ok, F1, []} = rabbit_fifo_client:enqueue(ClusterName, Msg1, F0), {_, _, F2} = process_ra_events(receive_ra_events(1, 0), ClusterName, F1), % start a consumer in a separate processes % that exits after checkout - Self = self(), - _Pid = spawn(fun () -> - F = rabbit_fifo_client:init([ServerId]), - {ok, _} = rabbit_fifo_client:checkout(<<"tag">>, 10, - simple_prefetch, - #{}, F), - Self ! checkout_done - end), - receive checkout_done -> ok after 1000 -> exit(checkout_done_timeout) end, - timer:sleep(1000), + {_, MonRef} = spawn_monitor( + fun () -> + F = rabbit_fifo_client:init([ServerId]), + {ok, _, _} = rabbit_fifo_client:checkout(<<"tag">>, + {simple_prefetch, 10}, + #{}, F) + end), + receive + {'DOWN', MonRef, _, _, _} -> + ok + after 5000 -> + ct:fail("waiting for process exit timed out") + end, + rabbit_ct_helpers:await_condition( + fun () -> + case ra:member_overview(ServerId) of + {ok, #{machine := #{num_consumers := 0}}, _} -> + true; + X -> + ct:pal("X ~p", [X]), + false + end + end), % message should be available for dequeue - {ok, _, {_, _, _, _, msg1}, _} = rabbit_fifo_client:dequeue(ClusterName, <<"tag">>, settled, F2), + {ok, _, {_, _, _, _, Msg1Out}, _} = + rabbit_fifo_client:dequeue(ClusterName, <<"tag">>, settled, F2), + ?assertEqual(1, mc:get_annotation(<<"x-delivery-count">>, Msg1Out)), + ?assertEqual(1, mc:get_annotation(delivery_count, Msg1Out)), rabbit_quorum_queue:stop_server(ServerId), ok. @@ -379,8 +467,8 @@ discard(Config) -> _ = ra:members(ServerId), F0 = rabbit_fifo_client:init([ServerId]), - {ok, F1} = rabbit_fifo_client:checkout(<<"tag">>, 10, - simple_prefetch, #{}, F0), + {ok, _, F1} = rabbit_fifo_client:checkout(<<"tag">>, {simple_prefetch, 10}, + #{}, F0), {ok, F2, []} = rabbit_fifo_client:enqueue(ClusterName, msg1, F1), F3 = discard_next_delivery(ClusterName, F2, 5000), {empty, _F4} = rabbit_fifo_client:dequeue(ClusterName, <<"tag1">>, settled, F3), @@ -402,11 +490,70 @@ cancel_checkout(Config) -> ok = start_cluster(ClusterName, [ServerId]), F0 = rabbit_fifo_client:init([ServerId], 4), {ok, F1, []} = rabbit_fifo_client:enqueue(ClusterName, m1, F0), - {ok, F2} = rabbit_fifo_client:checkout(<<"tag">>, 10, simple_prefetch, #{}, F1), - {_, _, F3} = process_ra_events(receive_ra_events(1, 1), ClusterName, F2, [], [], fun (_, S) -> S end), - {ok, F4} = rabbit_fifo_client:cancel_checkout(<<"tag">>, F3), + {ok, _, F2} = rabbit_fifo_client:checkout(<<"tag">>, {simple_prefetch, 10}, + #{}, F1), + {_, _, F3} = process_ra_events(receive_ra_events(1, 1), ClusterName, F2, + [], [], fun (_, S) -> S end), + {ok, F4} = rabbit_fifo_client:cancel_checkout(<<"tag">>, cancel, F3), {F5, _} = rabbit_fifo_client:return(<<"tag">>, [0], F4), - {ok, _, {_, _, _, _, m1}, F5} = rabbit_fifo_client:dequeue(ClusterName, <<"d1">>, settled, F5), + {ok, _, {_, _, _, _, m1}, F5} = + rabbit_fifo_client:dequeue(ClusterName, <<"d1">>, settled, F5), + ok. + +cancel_checkout_with_remove(Config) -> + ClusterName = ?config(cluster_name, Config), + ServerId = ?config(node_id, Config), + ok = start_cluster(ClusterName, [ServerId]), + F0 = rabbit_fifo_client:init([ServerId], 4), + {ok, F1, []} = rabbit_fifo_client:enqueue(ClusterName, m1, F0), + {ok, _, F2} = rabbit_fifo_client:checkout(<<"tag">>, {simple_prefetch, 10}, + #{}, F1), + {_, _, F3} = process_ra_events(receive_ra_events(1, 1), ClusterName, F2, + [], [], fun (_, S) -> S end), + {ok, F4} = rabbit_fifo_client:cancel_checkout(<<"tag">>, remove, F3), + %% settle here to prove that message is returned by "remove" cancellation + %% and not settled by late settlement + {F5, _} = rabbit_fifo_client:settle(<<"tag">>, [0], F4), + {ok, _, {_, _, _, _, m1}, F5} = + rabbit_fifo_client:dequeue(ClusterName, <<"d1">>, settled, F5), + ok. + +cancel_checkout_with_pending_using_cancel_reason(Config) -> + cancel_checkout_with_pending(Config, cancel). + +cancel_checkout_with_pending_using_remove_reason(Config) -> + cancel_checkout_with_pending(Config, remove). + +cancel_checkout_with_pending(Config, Reason) -> + ClusterName = ?config(cluster_name, Config), + ServerId = ?config(node_id, Config), + ok = start_cluster(ClusterName, [ServerId]), + F0 = rabbit_fifo_client:init([ServerId], 4), + F1 = lists:foldl( + fun (Num, Acc0) -> + {ok, Acc, _} = rabbit_fifo_client:enqueue(ClusterName, Num, Acc0), + Acc + end, F0, lists:seq(1, 10)), + receive_ra_events(10, 0), + {ok, _, F2} = rabbit_fifo_client:checkout(<<"tag">>, {simple_prefetch, 10}, + #{}, F1), + {Msgs, _, F3} = process_ra_events(receive_ra_events(0, 1), ClusterName, F2, + [], [], fun (_, S) -> S end), + %% settling each individually should cause the client to enter the "slow" + %% state where settled msg ids are buffered internally waiting for + %% applied events + F4 = lists:foldl( + fun({_Q, _, MsgId, _, _}, Acc0) -> + {Acc, _} = rabbit_fifo_client:settle(<<"tag">>, [MsgId], Acc0), + Acc + end, F3, Msgs), + + {ok, _F4} = rabbit_fifo_client:cancel_checkout(<<"tag">>, Reason, F4), + timer:sleep(100), + {ok, Overview, _} = ra:member_overview(ServerId), + ?assertMatch(#{machine := #{num_messages := 0, + num_consumers := 0}}, Overview), + flush(), ok. lost_delivery(Config) -> @@ -416,8 +563,9 @@ lost_delivery(Config) -> F0 = rabbit_fifo_client:init([ServerId], 4), {ok, F1, []} = rabbit_fifo_client:enqueue(ClusterName, m1, F0), {_, _, F2} = process_ra_events( - receive_ra_events(1, 0), ClusterName, F1, [], [], fun (_, S) -> S end), - {ok, F3} = rabbit_fifo_client:checkout(<<"tag">>, 10, simple_prefetch, #{}, F2), + receive_ra_events(1, 0), ClusterName, F1, [], [], + fun (_, S) -> S end), + {ok, _, F3} = rabbit_fifo_client:checkout(<<"tag">>, {simple_prefetch, 10}, #{}, F2), %% drop a delivery, simulating e.g. a full distribution buffer receive {ra_event, _, Evt} -> @@ -441,7 +589,8 @@ lost_delivery(Config) -> end), ok. -credit(Config) -> +credit_api_v1(Config) -> + meck:expect(rabbit_feature_flags, is_enabled, fun (_) -> false end), ClusterName = ?config(cluster_name, Config), ServerId = ?config(node_id, Config), ok = start_cluster(ClusterName, [ServerId]), @@ -450,21 +599,27 @@ credit(Config) -> {ok, F2, []} = rabbit_fifo_client:enqueue(ClusterName, m2, F1), {_, _, F3} = process_ra_events(receive_ra_events(2, 0), ClusterName, F2), %% checkout with 0 prefetch - {ok, F4} = rabbit_fifo_client:checkout(<<"tag">>, 0, credited, #{}, F3), + CTag = <<"my-tag">>, + {ok, _, F4} = rabbit_fifo_client:checkout(CTag, {credited, 0}, #{}, F3), %% assert no deliveries {_, _, F5} = process_ra_events(receive_ra_events(), ClusterName, F4, [], [], fun (D, _) -> error({unexpected_delivery, D}) end), %% provide some credit - {F6, []} = rabbit_fifo_client:credit(<<"tag">>, 1, false, F5), - {[{_, _, _, _, m1}], [{send_credit_reply, _}], F7} = - process_ra_events(receive_ra_events(1, 1), ClusterName, F6), + {F6, []} = rabbit_fifo_client:credit_v1(CTag, 1, false, F5), + {[{_, _, _, _, m1}], [{send_credit_reply, 1}], F7} = + process_ra_events(receive_ra_events(1, 1), ClusterName, F6), %% credit and drain - {F8, []} = rabbit_fifo_client:credit(<<"tag">>, 4, true, F7), - {[{_, _, _, _, m2}], [{send_credit_reply, _}, {send_drained, _}], F9} = - process_ra_events(receive_ra_events(2, 1), ClusterName, F8), + Drain = true, + {F8, []} = rabbit_fifo_client:credit_v1(CTag, 4, Drain, F7), + AvailableAfterCheckout = 0, + {[{_, _, _, _, m2}], + [{send_credit_reply, AvailableAfterCheckout}, + {credit_reply_v1, CTag, _CreditAfterCheckout = 3, + AvailableAfterCheckout, Drain}], + F9} = process_ra_events(receive_ra_events(2, 1), ClusterName, F8), flush(), %% enqueue another message - at this point the consumer credit should be @@ -476,10 +631,75 @@ credit(Config) -> (D, _) -> error({unexpected_delivery, D}) end), %% credit again and receive the last message - {F12, []} = rabbit_fifo_client:credit(<<"tag">>, 10, false, F11), + {F12, []} = rabbit_fifo_client:credit_v1(CTag, 10, false, F11), {[{_, _, _, _, m3}], _, _} = process_ra_events(receive_ra_events(1, 1), ClusterName, F12), ok. +credit_api_v2(Config) -> + ClusterName = ?config(cluster_name, Config), + ServerId = ?config(node_id, Config), + ok = start_cluster(ClusterName, [ServerId]), + F0 = rabbit_fifo_client:init([ServerId], 4), + %% Enqueue 2 messages. + {ok, F1, []} = rabbit_fifo_client:enqueue(ClusterName, m1, F0), + {ok, F2, []} = rabbit_fifo_client:enqueue(ClusterName, m2, F1), + {_, _, F3} = process_ra_events(receive_ra_events(2, 0), ClusterName, F2), + CTag = <<"my-tag">>, + DC0 = 16#ff_ff_ff_ff, + DC1 = 0, %% = DC0 + 1 using 32 bit serial number arithmetic + {ok, _, F4} = rabbit_fifo_client:checkout( + %% initial_delivery_count in consumer meta means credit API v2. + CTag, {credited, DC0}, #{}, F3), + %% assert no deliveries + {_, _, F5} = process_ra_events(receive_ra_events(), ClusterName, F4, [], [], + fun + (D, _) -> error({unexpected_delivery, D}) + end), + %% Grant 1 credit. + {F6, []} = rabbit_fifo_client:credit(CTag, DC0, 1, false, F5), + %% We expect exactly 1 message due to 1 credit being granted. + {[{_, _, _, _, m1}], + %% We always expect a credit_reply action. + [{credit_reply, CTag, DC1, _Credit0 = 0, _Available0 = 1, _Drain0 = false}], + F7} = process_ra_events(receive_ra_events(), ClusterName, F6), + + %% Again, grant 1 credit. + %% However, because we still use the initial delivery count DC0, rabbit_fifo + %% won't send us a new message since it already sent us m1 for that old delivery-count. + %% In other words, this credit top up simulates in-flight deliveries. + {F8, []} = rabbit_fifo_client:credit(CTag, DC0, 1, false, F7), + {_NoMessages = [], + [{credit_reply, CTag, DC1, _Credit1 = 0, _Available1 = 1, _Drain1 = false}], + F9} = process_ra_events(receive_ra_events(), ClusterName, F8), + + %% Grant 4 credits and drain. + {F10, []} = rabbit_fifo_client:credit(CTag, DC1, 4, true, F9), + %% rabbit_fifo should advance the delivery-count as much as possible + %% consuming all credits due to drain=true and insufficient messages in the queue. + DC2 = DC1 + 4, + %% We expect to receive m2 which is the only message in the queue. + {[{_, _, _, _, m2}], + [{credit_reply, CTag, DC2, _Credit2 = 0, _Available2 = 0, _Drain2 = true}], + F11} = process_ra_events(receive_ra_events(), ClusterName, F10), + flush(), + + %% Enqueue another message. + %% At this point the consumer credit should be all used up due to the drain. + {ok, F12, []} = rabbit_fifo_client:enqueue(ClusterName, m3, F11), + %% assert no deliveries + {_, _, F13} = process_ra_events(receive_ra_events(), ClusterName, F12, [], [], + fun + (D, _) -> error({unexpected_delivery, D}) + end), + + %% Grant 10 credits and receive the last message. + {F14, []} = rabbit_fifo_client:credit(CTag, DC2, 10, false, F13), + DC3 = DC2 + 1, + ?assertMatch( + {[{_, _, _, _, m3}], + [{credit_reply, CTag, DC3, _Credit3 = 9, _Available3 = 0, _Drain3 = false}], + _F15}, process_ra_events(receive_ra_events(), ClusterName, F14)). + untracked_enqueue(Config) -> ClusterName = ?config(cluster_name, Config), ServerId = ?config(node_id, Config), @@ -528,7 +748,7 @@ test_queries(Config) -> exit(ready_timeout) end, F0 = rabbit_fifo_client:init([ServerId], 4), - {ok, _} = rabbit_fifo_client:checkout(<<"tag">>, 1, simple_prefetch, #{}, F0), + {ok, _, _} = rabbit_fifo_client:checkout(<<"tag">>, {simple_prefetch, 1}, #{}, F0), {ok, {_, Ready}, _} = ra:local_query(ServerId, fun rabbit_fifo:query_messages_ready/1), ?assertEqual(1, Ready), @@ -556,8 +776,8 @@ dequeue(Config) -> {ok, F2_, []} = rabbit_fifo_client:enqueue(ClusterName, msg1, F1b), {_, _, F2} = process_ra_events(receive_ra_events(1, 0), ClusterName, F2_), - % {ok, {{0, {_, msg1}}, _}, F3} = rabbit_fifo_client:dequeue(ClusterName, Tag, settled, F2), - {ok, _, {_, _, 0, _, msg1}, F3} = rabbit_fifo_client:dequeue(ClusterName, Tag, settled, F2), + {ok, _, {_, _, 0, _, msg1}, F3} = + rabbit_fifo_client:dequeue(ClusterName, Tag, settled, F2), {ok, F4_, []} = rabbit_fifo_client:enqueue(ClusterName, msg2, F3), {_, _, F4} = process_ra_events(receive_ra_events(1, 0), ClusterName, F4_), {ok, _, {_, _, MsgId, _, msg2}, F5} = rabbit_fifo_client:dequeue(ClusterName, Tag, unsettled, F4), @@ -617,7 +837,7 @@ receive_ra_events(Acc) -> end. process_ra_events(Events, ClusterName, State) -> - DeliveryFun = fun ({deliver, _, Tag, Msgs}, S) -> + DeliveryFun = fun ({deliver, Tag, _, Msgs}, S) -> MsgIds = [element(1, M) || M <- Msgs], {S0, _} = rabbit_fifo_client:settle(Tag, MsgIds, S), S0 @@ -675,3 +895,12 @@ flush() -> after 10 -> ok end. + +mk_msg(Body) when is_binary(Body) -> + mc_amqpl:from_basic_message( + #basic_message{routing_keys = [<<"">>], + exchange_name = #resource{name = <<"x">>, + kind = exchange, + virtual_host = <<"v">>}, + content = #content{properties = #'P_basic'{}, + payload_fragments_rev = [Body]}}). diff --git a/deps/rabbit/test/rabbit_fifo_prop_SUITE.erl b/deps/rabbit/test/rabbit_fifo_prop_SUITE.erl index 858ca426fbca..273597982f31 100644 --- a/deps/rabbit/test/rabbit_fifo_prop_SUITE.erl +++ b/deps/rabbit/test/rabbit_fifo_prop_SUITE.erl @@ -11,9 +11,10 @@ -include_lib("eunit/include/eunit.hrl"). -include_lib("rabbit/src/rabbit_fifo.hrl"). -include_lib("rabbit/src/rabbit_fifo_dlx.hrl"). +-include_lib("rabbit_common/include/rabbit_framing.hrl"). -include_lib("rabbit_common/include/rabbit.hrl"). --define(record_info(T,R),lists:zip(record_info(fields,T),tl(tuple_to_list(R)))). +-define(MACHINE_VERSION, 4). %%%=================================================================== %%% Common Test callbacks @@ -62,10 +63,6 @@ all_tests() -> scenario31, scenario32, upgrade, - upgrade_snapshots, - upgrade_snapshots_scenario1, - upgrade_snapshots_scenario2, - upgrade_snapshots_v2_to_v3, messages_total, simple_prefetch, simple_prefetch_without_checkout_cancel, @@ -78,7 +75,6 @@ all_tests() -> single_active_ordering, single_active_ordering_01, single_active_ordering_03, - in_memory_limit, max_length, snapshots_dlx, dlx_01, @@ -89,8 +85,8 @@ all_tests() -> dlx_06, dlx_07, dlx_08, - dlx_09 - % single_active_ordering_02 + dlx_09, + single_active_ordering_02 ]. groups() -> @@ -111,18 +107,18 @@ end_per_group(_Group, _Config) -> ok. init_per_testcase(_TestCase, Config) -> + ok = meck:new(rabbit_feature_flags, [passthrough]), + meck:expect(rabbit_feature_flags, is_enabled, fun (_) -> false end), Config. end_per_testcase(_TestCase, _Config) -> + meck:unload(), ok. %%%=================================================================== %%% Test cases %%%=================================================================== -% -type log_op() :: -% {enqueue, pid(), maybe(msg_seqno()), Msg :: raw_msg()}. - scenario2(_Config) -> C1 = {<<>>, c:pid(0,346,1)}, C2 = {<<>>,c:pid(0,379,1)}, @@ -694,45 +690,6 @@ scenario23(_Config) -> Commands), ok. -upgrade_snapshots_scenario1(_Config) -> - E = c:pid(0,327,1), - Commands = [make_enqueue(E,1,msg(<<"msg1">>)), - make_enqueue(E,2,msg(<<"msg2">>)), - make_enqueue(E,3,msg(<<"msg3">>))], - run_upgrade_snapshot_test(#{name => ?FUNCTION_NAME, - delivery_limit => 100, - max_length => 1, - max_bytes => 100, - max_in_memory_length => undefined, - max_in_memory_bytes => undefined, - overflow_strategy => drop_head, - single_active_consumer_on => false, - dead_letter_handler => {?MODULE, banana, []} - }, - Commands), - ok. - -upgrade_snapshots_scenario2(_Config) -> - E = c:pid(0,240,0), - CPid = c:pid(0,242,0), - C = {<<>>, CPid}, - Commands = [make_checkout(C, {auto,1,simple_prefetch}), - make_enqueue(E,1,msg(<<"msg1">>)), - make_enqueue(E,2,msg(<<"msg2">>)), - rabbit_fifo:make_settle(C, [0])], - run_upgrade_snapshot_test(#{name => ?FUNCTION_NAME, - delivery_limit => undefined, - max_length => undefined, - max_bytes => undefined, - max_in_memory_length => undefined, - max_in_memory_bytes => undefined, - overflow_strategy => drop_head, - single_active_consumer_on => false, - dead_letter_handler => {?MODULE, banana, []} - }, - Commands), - ok. - single_active_01(_Config) -> C1Pid = test_util:fake_pid(rabbit@fake_node1), C1 = {<<0>>, C1Pid}, @@ -766,15 +723,14 @@ single_active_02(_Config) -> make_checkout(C2, cancel), {down,E,noconnection} ], - Conf = config(?FUNCTION_NAME, undefined, undefined, true, 1, undefined, undefined), + Conf = config(?FUNCTION_NAME, undefined, undefined, true, 1, + undefined, undefined), ?assert(single_active_prop(Conf, Commands, false)), ok. single_active_03(_Config) -> C1Pid = test_util:fake_pid(node()), C1 = {<<0>>, C1Pid}, - % C2Pid = test_util:fake_pid(rabbit@fake_node2), - % C2 = {<<>>, C2Pid}, Pid = test_util:fake_pid(node()), E = test_util:fake_pid(rabbit@fake_node2), Commands = [ @@ -789,67 +745,53 @@ single_active_03(_Config) -> ok. single_active_04(_Config) -> - % C1Pid = test_util:fake_pid(node()), - % C1 = {<<0>>, C1Pid}, - % C2Pid = test_util:fake_pid(rabbit@fake_node2), - % C2 = {<<>>, C2Pid}, - % Pid = test_util:fake_pid(node()), E = test_util:fake_pid(rabbit@fake_node2), Commands = [ - - % make_checkout(C1, {auto,2,simple_prefetch}), make_enqueue(E, 1, msg(<<>>)), make_enqueue(E, 2, msg(<<>>)), make_enqueue(E, 3, msg(<<>>)), make_enqueue(E, 4, msg(<<>>)) - % {down, Pid, noconnection}, - % {nodeup, node()} ], - Conf = config(?FUNCTION_NAME, 3, 587, true, 3, 7, undefined), + Conf = config(?FUNCTION_NAME, 3, 587, true, 3), ?assert(single_active_prop(Conf, Commands, true)), ok. test_run_log(_Config) -> - Fun = {-1, fun ({Prev, _}) -> {Prev + 1, Prev + 1} end}, + meck:expect(rabbit_feature_flags, is_enabled, + fun (_) -> true end), run_proper( fun () -> - ?FORALL({Length, Bytes, SingleActiveConsumer, DeliveryLimit, InMemoryLength, - InMemoryBytes}, - frequency([{10, {0, 0, false, 0, 0, 0}}, + ?FORALL({Length, Bytes, SingleActiveConsumer, DeliveryLimit}, + frequency([{10, {0, 0, false, 0}}, {5, {oneof([range(1, 10), undefined]), oneof([range(1, 1000), undefined]), boolean(), - oneof([range(1, 3), undefined]), - oneof([range(1, 10), undefined]), - oneof([range(1, 1000), undefined]) + oneof([range(1, 3), undefined]) }}]), - ?FORALL(O, ?LET(Ops, log_gen(100), expand(Ops, Fun)), + ?FORALL(O, ?LET(Ops, log_gen(100), expand(Ops, #{})), collect({log_size, length(O)}, dump_generated( config(?FUNCTION_NAME, Length, Bytes, SingleActiveConsumer, - DeliveryLimit, - InMemoryLength, - InMemoryBytes), O)))) + DeliveryLimit), O)))) end, [], 10). snapshots(_Config) -> + meck:expect(rabbit_feature_flags, is_enabled, fun (_) -> true end), run_proper( fun () -> ?FORALL({Length, Bytes, SingleActiveConsumer, - DeliveryLimit, InMemoryLength, InMemoryBytes, - Overflow, DeadLetterHandler}, - frequency([{10, {0, 0, false, 0, 0, 0, drop_head, undefined}}, + DeliveryLimit, Overflow, DeadLetterHandler}, + frequency([{10, {0, 0, false, 0, drop_head, undefined}}, {5, {oneof([range(1, 10), undefined]), oneof([range(1, 1000), undefined]), boolean(), oneof([range(1, 3), undefined]), - oneof([range(1, 10), undefined]), - oneof([range(1, 1000), undefined]), oneof([drop_head, reject_publish]), - oneof([undefined, {at_most_once, {?MODULE, banana, []}}]) + oneof([undefined, + {at_most_once, {?MODULE, banana, []}}]) }}]), begin Config = config(?FUNCTION_NAME, @@ -857,28 +799,24 @@ snapshots(_Config) -> Bytes, SingleActiveConsumer, DeliveryLimit, - InMemoryLength, - InMemoryBytes, Overflow, DeadLetterHandler), ?FORALL(O, ?LET(Ops, log_gen(256), expand(Ops, Config)), collect({log_size, length(O)}, snapshots_prop(Config, O))) end) - end, [], 1000). + end, [], 256). snapshots_dlx(_Config) -> + meck:expect(rabbit_feature_flags, is_enabled, fun (_) -> true end), Size = 256, run_proper( fun () -> - ?FORALL({Length, Bytes, SingleActiveConsumer, - DeliveryLimit, InMemoryLength, InMemoryBytes}, - frequency([{10, {0, 0, false, 0, 0, 0}}, + ?FORALL({Length, Bytes, SingleActiveConsumer, DeliveryLimit}, + frequency([{10, {0, 0, false, 0}}, {5, {oneof([range(1, 10), undefined]), oneof([range(1, 1000), undefined]), boolean(), - oneof([range(1, 3), undefined]), - oneof([range(1, 10), undefined]), oneof([range(1, 1000), undefined]) }}]), begin @@ -887,8 +825,6 @@ snapshots_dlx(_Config) -> Bytes, SingleActiveConsumer, DeliveryLimit, - InMemoryLength, - InMemoryBytes, reject_publish, at_least_once), ?FORALL(O, ?LET(Ops, log_gen_dlx(Size), expand(Ops, Config)), @@ -898,25 +834,24 @@ snapshots_dlx(_Config) -> end, [], Size). single_active(_Config) -> - Size = 300, + %% validates that there can only ever be a single active consumer at a time + %% as well as that message deliveries are done in order + Size = 1000, + meck:expect(rabbit_feature_flags, is_enabled, fun (_) -> true end), run_proper( fun () -> - ?FORALL({Length, Bytes, DeliveryLimit, InMemoryLength, InMemoryBytes}, - frequency([{10, {0, 0, 0, 0, 0}}, + ?FORALL({Length, Bytes, DeliveryLimit}, + frequency([{10, {undefined, undefined, undefined}}, {5, {oneof([range(1, 10), undefined]), oneof([range(1, 1000), undefined]), - oneof([range(1, 3), undefined]), - oneof([range(1, 10), undefined]), - oneof([range(1, 1000), undefined]) + oneof([range(1, 3), undefined]) }}]), begin Config = config(?FUNCTION_NAME, Length, Bytes, true, - DeliveryLimit, - InMemoryLength, - InMemoryBytes), + DeliveryLimit), ?FORALL(O, ?LET(Ops, log_gen(Size), expand(Ops, Config)), collect({log_size, length(O)}, single_active_prop(Config, O, false))) @@ -924,15 +859,16 @@ single_active(_Config) -> end, [], Size). upgrade(_Config) -> - Size = 500, + Size = 256, + %% upgrade is always done using _old_ command formats + meck:expect(rabbit_feature_flags, is_enabled, fun (_) -> false end), run_proper( fun () -> - ?FORALL({Length, Bytes, DeliveryLimit, InMemoryLength, SingleActive}, - frequency([{5, {undefined, undefined, undefined, undefined, false}}, + ?FORALL({Length, Bytes, DeliveryLimit, SingleActive}, + frequency([{5, {undefined, undefined, undefined, false}}, {5, {oneof([range(1, 10), undefined]), oneof([range(1, 1000), undefined]), oneof([range(1, 3), undefined]), - oneof([range(1, 10), 0, undefined]), oneof([true, false]) }}]), begin @@ -941,10 +877,8 @@ upgrade(_Config) -> Bytes, SingleActive, DeliveryLimit, - InMemoryLength, - undefined, drop_head, - {?MODULE, banana, []} + undefined ), ?FORALL(O, ?LET(Ops, log_gen(Size), expand(Ops, Config)), collect({log_size, length(O)}, @@ -952,37 +886,9 @@ upgrade(_Config) -> end) end, [], Size). -upgrade_snapshots(_Config) -> - Size = 500, - run_proper( - fun () -> - ?FORALL({Length, Bytes, DeliveryLimit, InMemoryLength, SingleActive}, - frequency([{5, {undefined, undefined, undefined, undefined, false}}, - {5, {oneof([range(1, 10), undefined]), - oneof([range(1, 1000), undefined]), - oneof([range(1, 3), undefined]), - oneof([range(1, 10), 0, undefined]), - oneof([true, false]) - }}]), - begin - Config = config(?FUNCTION_NAME, - Length, - Bytes, - SingleActive, - DeliveryLimit, - InMemoryLength, - undefined, - drop_head, - {?MODULE, banana, []} - ), - ?FORALL(O, ?LET(Ops, log_gen_upgrade_snapshots(Size), expand(Ops, Config)), - collect({log_size, length(O)}, - upgrade_snapshots_prop(Config, O))) - end) - end, [], Size). - -upgrade_snapshots_v2_to_v3(_Config) -> - Size = 500, +messages_total(_Config) -> + meck:expect(rabbit_feature_flags, is_enabled, fun (_) -> false end), + Size = 256, run_proper( fun () -> ?FORALL({Length, Bytes, DeliveryLimit, SingleActive}, @@ -997,36 +903,7 @@ upgrade_snapshots_v2_to_v3(_Config) -> Length, Bytes, SingleActive, - DeliveryLimit, - undefined, - undefined - ), - ?FORALL(O, ?LET(Ops, log_gen_upgrade_snapshots_v2_to_v3(Size), expand(Ops, Config)), - collect({log_size, length(O)}, - upgrade_snapshots_prop_v2_to_v3(Config, O))) - end) - end, [], Size). - -messages_total(_Config) -> - Size = 1000, - run_proper( - fun () -> - ?FORALL({Length, Bytes, DeliveryLimit, InMemoryLength, SingleActive}, - frequency([{5, {undefined, undefined, undefined, undefined, false}}, - {5, {oneof([range(1, 10), undefined]), - oneof([range(1, 1000), undefined]), - oneof([range(1, 3), undefined]), - oneof([range(1, 10), 0, undefined]), - oneof([true, false]) - }}]), - begin - Config = config(?FUNCTION_NAME, - Length, - Bytes, - SingleActive, - DeliveryLimit, - InMemoryLength, - undefined), + DeliveryLimit), ?FORALL(O, ?LET(Ops, log_gen(Size), expand(Ops, Config)), collect({log_size, length(O)}, messages_total_prop(Config, O))) @@ -1034,7 +911,8 @@ messages_total(_Config) -> end, [], Size). simple_prefetch(_Config) -> - Size = 2000, + Size = 500, + meck:expect(rabbit_feature_flags, is_enabled, fun (_) -> true end), run_proper( fun () -> ?FORALL({Length, Bytes, DeliveryLimit, SingleActive}, @@ -1049,9 +927,7 @@ simple_prefetch(_Config) -> Length, Bytes, SingleActive, - DeliveryLimit, - undefined, - undefined), + DeliveryLimit), ?FORALL(O, ?LET(Ops, log_gen(Size), expand(Ops, Config)), collect({log_size, length(O)}, simple_prefetch_prop(Config, O, true))) @@ -1059,7 +935,8 @@ simple_prefetch(_Config) -> end, [], Size). simple_prefetch_without_checkout_cancel(_Config) -> - Size = 2000, + Size = 256, + meck:expect(rabbit_feature_flags, is_enabled, fun (_) -> true end), run_proper( fun () -> ?FORALL({Length, Bytes, DeliveryLimit, SingleActive}, @@ -1074,10 +951,9 @@ simple_prefetch_without_checkout_cancel(_Config) -> Length, Bytes, SingleActive, - DeliveryLimit, - undefined, - undefined), - ?FORALL(O, ?LET(Ops, log_gen_without_checkout_cancel(Size), expand(Ops, Config)), + DeliveryLimit), + ?FORALL(O, ?LET(Ops, log_gen_without_checkout_cancel(Size), + expand(Ops, Config)), collect({log_size, length(O)}, simple_prefetch_prop(Config, O, false))) end) @@ -1105,20 +981,20 @@ simple_prefetch_01(_Config) -> ok. single_active_ordering(_Config) -> - Size = 2000, + Size = 500, + meck:expect(rabbit_feature_flags, is_enabled, fun (_) -> true end), Fun = {-1, fun ({Prev, _}) -> {Prev + 1, Prev + 1} end}, run_proper( fun () -> ?FORALL(O, ?LET(Ops, log_gen_ordered(Size), expand(Ops, Fun)), collect({log_size, length(O)}, - single_active_prop(config(?FUNCTION_NAME, - undefined, - undefined, - true, - undefined, - undefined, - undefined), O, - true))) + single_active_prop( + config(?FUNCTION_NAME, + undefined, + undefined, + true, + undefined), O, + true))) end, [], Size). single_active_ordering_01(_Config) -> @@ -1133,7 +1009,7 @@ single_active_ordering_01(_Config) -> make_enqueue(E2, 1, msg(<<"2">>)), make_settle(C1, [0]) ], - Conf = config(?FUNCTION_NAME, 0, 0, true, 0, 0, 0), + Conf = config(?FUNCTION_NAME, 0, 0, true, 0), ?assert(single_active_prop(Conf, Commands, true)), ok. @@ -1154,7 +1030,7 @@ single_active_ordering_02(_Config) -> {down,E,noproc}, make_settle(C1, [0]) ], - Conf = config(?FUNCTION_NAME, 0, 0, true, 0, 0, 0), + Conf = config(?FUNCTION_NAME, 0, 0, true, 0), ?assert(single_active_prop(Conf, Commands, true)), ok. @@ -1174,7 +1050,7 @@ single_active_ordering_03(_Config) -> make_checkout(C1, cancel), {down, C1Pid, noconnection} ], - Conf0 = config(?FUNCTION_NAME, 0, 0, true, 0, 0, 0), + Conf0 = config(?FUNCTION_NAME, 0, 0, true, 0), Conf = Conf0#{release_cursor_interval => 100}, Indexes = lists:seq(1, length(Commands)), Entries = lists:zip(Indexes, Commands), @@ -1194,54 +1070,22 @@ single_active_ordering_03(_Config) -> false end. -in_memory_limit(_Config) -> - Size = 2000, - run_proper( - fun () -> - ?FORALL({Length, Bytes, SingleActiveConsumer, DeliveryLimit, - InMemoryLength, InMemoryBytes}, - frequency([{10, {0, 0, false, 0, 0, 0}}, - {5, {oneof([range(1, 10), undefined]), - oneof([range(1, 1000), undefined]), - boolean(), - oneof([range(1, 3), undefined]), - range(1, 10), - range(1, 1000) - }}]), - begin - Config = config(?FUNCTION_NAME, - Length, - Bytes, - SingleActiveConsumer, - DeliveryLimit, - InMemoryLength, - InMemoryBytes), - ?FORALL(O, ?LET(Ops, log_gen(Size), expand(Ops, Config)), - collect({log_size, length(O)}, - in_memory_limit_prop(Config, O))) - end) - end, [], Size). - max_length(_Config) -> %% tests that max length is never transgressed Size = 1000, run_proper( fun () -> - ?FORALL({Length, SingleActiveConsumer, DeliveryLimit, - InMemoryLength}, + ?FORALL({Length, SingleActiveConsumer, DeliveryLimit}, {oneof([range(1, 100), undefined]), boolean(), - range(1, 3), - range(1, 10) + range(1, 3) }, begin Config = config(?FUNCTION_NAME, Length, undefined, SingleActiveConsumer, - DeliveryLimit, - InMemoryLength, - undefined), + DeliveryLimit), ?FORALL(O, ?LET(Ops, log_gen_config(Size), expand(Ops, Config)), collect({log_size, length(O)}, @@ -1264,7 +1108,8 @@ dlx_01(_Config) -> rabbit_fifo:make_discard(C1, [1]), rabbit_fifo_dlx:make_settle([1]) ], - Config = config(?FUNCTION_NAME, 8, undefined, false, 2, 5, 100, reject_publish, at_least_once), + Config = config(?FUNCTION_NAME, 8, undefined, false, 2, + reject_publish, at_least_once), ?assert(snapshots_prop(Config, Commands)), ok. @@ -1286,7 +1131,8 @@ dlx_02(_Config) -> rabbit_fifo_dlx:make_settle([0]) %% Release cursor A got emitted. ], - Config = config(?FUNCTION_NAME, 10, undefined, false, 5, 5, 100, reject_publish, at_least_once), + Config = config(?FUNCTION_NAME, 10, undefined, false, 5, + reject_publish, at_least_once), ?assert(snapshots_prop(Config, Commands)), ok. @@ -1307,7 +1153,8 @@ dlx_03(_Config) -> rabbit_fifo_dlx:make_settle([0]) %% Release cursor A got emitted. ], - Config = config(?FUNCTION_NAME, 10, undefined, false, 5, 5, 100, reject_publish, at_least_once), + Config = config(?FUNCTION_NAME, 10, undefined, false, 5, + reject_publish, at_least_once), ?assert(snapshots_prop(Config, Commands)), ok. @@ -1327,7 +1174,8 @@ dlx_04(_Config) -> rabbit_fifo:make_discard(C1, [0,1,2,3,4,5]), rabbit_fifo_dlx:make_settle([0,1,2]) ], - Config = config(?FUNCTION_NAME, undefined, undefined, true, 1, 5, 136, reject_publish, at_least_once), + Config = config(?FUNCTION_NAME, undefined, undefined, true, 1, + reject_publish, at_least_once), ?assert(snapshots_prop(Config, Commands)), ok. @@ -1353,7 +1201,8 @@ dlx_05(_Config) -> rabbit_fifo_dlx:make_settle([0]) %% 2 in checkout ], - Config = config(?FUNCTION_NAME, 0, 0, false, 0, 0, 0, reject_publish, at_least_once), + Config = config(?FUNCTION_NAME, 0, 0, false, 0, + reject_publish, at_least_once), ?assert(snapshots_prop(Config, Commands)), ok. @@ -1381,7 +1230,8 @@ dlx_06(_Config) -> rabbit_fifo_dlx:make_settle([0,1]) %% 3 in dlx_checkout ], - Config = config(?FUNCTION_NAME, undefined, 749, false, 1, 1, 131, reject_publish, at_least_once), + Config = config(?FUNCTION_NAME, undefined, 749, false, 1, + reject_publish, at_least_once), ?assert(snapshots_prop(Config, Commands)), ok. @@ -1414,7 +1264,7 @@ dlx_07(_Config) -> rabbit_fifo_dlx:make_settle([0,1]) %% 3 in checkout ], - Config = config(?FUNCTION_NAME, undefined, undefined, false, undefined, undefined, undefined, + Config = config(?FUNCTION_NAME, undefined, undefined, false, undefined, reject_publish, at_least_once), ?assert(snapshots_prop(Config, Commands)), ok. @@ -1460,7 +1310,7 @@ dlx_08(_Config) -> rabbit_fifo_dlx:make_settle([1]), rabbit_fifo_dlx:make_settle([2]) ], - Config = config(?FUNCTION_NAME, undefined, undefined, false, undefined, undefined, undefined, + Config = config(?FUNCTION_NAME, undefined, undefined, false, undefined, reject_publish, at_least_once), ?assert(snapshots_prop(Config, Commands)), ok. @@ -1482,55 +1332,30 @@ dlx_09(_Config) -> rabbit_fifo:make_discard(C1, [2]) %% 1,2 in discards ], - Config = config(?FUNCTION_NAME, undefined, undefined, false, undefined, undefined, undefined, + Config = config(?FUNCTION_NAME, undefined, undefined, false, undefined, reject_publish, at_least_once), ?assert(snapshots_prop(Config, Commands)), ok. -config(Name, Length, Bytes, SingleActive, DeliveryLimit, InMemoryLength, InMemoryBytes) -> -config(Name, Length, Bytes, SingleActive, DeliveryLimit, InMemoryLength, InMemoryBytes, +config(Name, Length, MaxBytes, SingleActive, DeliveryLimit) -> + config(Name, Length, MaxBytes, SingleActive, DeliveryLimit, drop_head, {at_most_once, {?MODULE, banana, []}}). -config(Name, Length, Bytes, SingleActive, DeliveryLimit, - InMemoryLength, InMemoryBytes, Overflow, DeadLetterHandler) -> +config(Name, Length, MaxBytes, SingleActive, DeliveryLimit, + Overflow, DeadLetterHandler) -> #{name => Name, max_length => map_max(Length), - max_bytes => map_max(Bytes), + max_bytes => map_max(MaxBytes), dead_letter_handler => DeadLetterHandler, single_active_consumer_on => SingleActive, delivery_limit => map_max(DeliveryLimit), - max_in_memory_length => map_max(InMemoryLength), - max_in_memory_bytes => map_max(InMemoryBytes), + % max_in_memory_length => map_max(InMemoryLength), + % max_in_memory_bytes => map_max(InMemoryBytes), overflow_strategy => Overflow}. map_max(0) -> undefined; map_max(N) -> N. -in_memory_limit_prop(Conf0, Commands) -> - Conf = Conf0#{release_cursor_interval => 100}, - Indexes = lists:seq(1, length(Commands)), - Entries = lists:zip(Indexes, Commands), - try run_log(test_init(Conf), Entries) of - {_State, Effects} -> - %% validate message ordering - lists:foldl(fun ({log, Idxs, _}, ReleaseCursorIdx) -> - validate_idx_order(Idxs, ReleaseCursorIdx), - ReleaseCursorIdx; - ({release_cursor, Idx, _}, _) -> - Idx; - (_, Acc) -> - Acc - end, 0, Effects), - true; - _ -> - true - catch - Err -> - ct:pal("Commands: ~tp~nConf~tp~n", [Commands, Conf]), - ct:pal("Err: ~tp~n", [Err]), - false - end. - max_length_prop(Conf0, Commands) -> Conf = Conf0#{release_cursor_interval => 100}, Indexes = lists:seq(1, length(Commands)), @@ -1539,7 +1364,7 @@ max_length_prop(Conf0, Commands) -> #{num_ready_messages := MsgReady} = rabbit_fifo:overview(S), MsgReady =< MaxLen end, - try run_log(test_init(Conf), Entries, Invariant, rabbit_fifo) of + try run_log(test_init(Conf), Entries, Invariant) of {_State, _Effects} -> true; _ -> @@ -1585,7 +1410,7 @@ single_active_prop(Conf0, Commands, ValidateOrder) -> map_size(Up) =< 1 end, - try run_log(test_init(Conf), Entries, Invariant, rabbit_fifo) of + try run_log(test_init(Conf), Entries, Invariant) of {_State, Effects} when ValidateOrder -> %% validate message ordering lists:foldl(fun ({send_msg, Pid, {delivery, Tag, Msgs}, ra_event}, @@ -1609,7 +1434,7 @@ messages_total_prop(Conf0, Commands) -> Indexes = lists:seq(1, length(Commands)), Entries = lists:zip(Indexes, Commands), InitState = test_init(Conf), - run_log(InitState, Entries, messages_total_invariant(), rabbit_fifo), + run_log(InitState, Entries, messages_total_invariant()), true. messages_total_invariant() -> @@ -1618,7 +1443,7 @@ messages_total_invariant() -> returns = R, dlx = #rabbit_fifo_dlx{discards = D, consumer = DlxCon}} = S) -> - Base = lqueue:len(M) + lqueue:len(R), + Base = rabbit_fifo_q:len(M) + lqueue:len(R), Tot0 = maps:fold(fun (_, #consumer{checked_out = Ch}, Acc) -> Acc + map_size(Ch) end, Base, C), @@ -1644,7 +1469,8 @@ simple_prefetch_prop(Conf0, Commands, WithCheckoutCancel) -> Indexes = lists:seq(1, length(Commands)), Entries = lists:zip(Indexes, Commands), InitState = test_init(Conf), - run_log(InitState, Entries, simple_prefetch_invariant(WithCheckoutCancel), rabbit_fifo), + run_log(InitState, Entries, + simple_prefetch_invariant(WithCheckoutCancel)), true. simple_prefetch_invariant(WithCheckoutCancel) -> @@ -1652,10 +1478,13 @@ simple_prefetch_invariant(WithCheckoutCancel) -> maps:fold( fun(_, _, false) -> false; - (Id, #consumer{cfg = #consumer_cfg{credit_mode = {simple_prefetch, MaxCredit}}, + (Id, #consumer{cfg = #consumer_cfg{credit_mode = + {simple_prefetch, MaxCredit}}, checked_out = CheckedOut, credit = Credit}, true) -> - valid_simple_prefetch(MaxCredit, Credit, maps:size(CheckedOut), WithCheckoutCancel, Id) + valid_simple_prefetch(MaxCredit, Credit, + maps:size(CheckedOut), + WithCheckoutCancel, Id) end, true, Consumers) end. @@ -1682,24 +1511,26 @@ valid_simple_prefetch(_, _, _, _, _) -> true. upgrade_prop(Conf0, Commands) -> + FromVersion = 3, + ToVersion = 4, + FromMod = rabbit_fifo:which_module(FromVersion), + ToMod = rabbit_fifo:which_module(ToVersion), Conf = Conf0#{release_cursor_interval => 0}, Indexes = lists:seq(1, length(Commands)), Entries = lists:zip(Indexes, Commands), - InitState = test_init_v1(Conf), + InitState = test_init_v(Conf, FromVersion), [begin {PreEntries, PostEntries} = lists:split(SplitPos, Entries), %% run log v1 - {V1, _V1Effs} = run_log(InitState, PreEntries, fun (_) -> true end, - rabbit_fifo_v1), + {V3, _V1Effs} = run_log(InitState, PreEntries, + fun (_) -> true end, FromVersion), %% perform conversion - #rabbit_fifo{} = V2 = element(1, rabbit_fifo:apply(meta(length(PreEntries) + 1), - {machine_version, 1, 2}, V1)), + #rabbit_fifo{} = V4 = element(1, rabbit_fifo:apply( + meta(length(PreEntries) + 1), + {machine_version, FromVersion, ToVersion}, + V3)), %% assert invariants - %% - %% Note that we cannot test for num_messages because rabbit_fifo_v1:messages_total/1 - %% relies on ra_indexes not to be empty. However ra_indexes are empty in snapshots - %% in which case the number of messages checked out to consumers will not be included. Fields = [num_ready_messages, smallest_raft_index, num_enqueuers, @@ -1707,42 +1538,18 @@ upgrade_prop(Conf0, Commands) -> enqueue_message_bytes, checkout_message_bytes ], - V1Overview = maps:with(Fields, rabbit_fifo_v1:overview(V1)), - V2Overview = maps:with(Fields, rabbit_fifo:overview(V2)), - case V1Overview == V2Overview of + V3Overview = maps:with(Fields, FromMod:overview(V3)), + V4Overview = maps:with(Fields, ToMod:overview(V4)), + case V3Overview == V4Overview of true -> ok; false -> ct:pal("upgrade_prop failed expected~n~tp~nGot:~n~tp", - [V1Overview, V2Overview]), - ?assertEqual(V1Overview, V2Overview) + [V3Overview, V4Overview]), + ?assertEqual(V3Overview, V4Overview) end, %% check we can run the post entries from the converted state - run_log(V2, PostEntries) + run_log(V4, PostEntries, fun (_) -> true end, ToVersion) end || SplitPos <- lists:seq(1, length(Entries))], - - {_, V1Effs} = run_log(InitState, Entries, fun (_) -> true end, - rabbit_fifo_v1), - [begin - Res = rabbit_fifo:apply(meta(Idx + 1), {machine_version, 1, 2}, RCS) , - #rabbit_fifo{} = V2 = element(1, Res), - %% assert invariants - Fields = [num_ready_messages, - smallest_raft_index, - num_enqueuers, - num_consumers, - enqueue_message_bytes, - checkout_message_bytes - ], - V1Overview = maps:with(Fields, rabbit_fifo_v1:overview(RCS)), - V2Overview = maps:with(Fields, rabbit_fifo:overview(V2)), - case V1Overview == V2Overview of - true -> ok; - false -> - ct:pal("upgrade_prop failed expected~n~tp~nGot:~n~tp", - [V1Overview, V2Overview]), - ?assertEqual(V1Overview, V2Overview) - end - end || {release_cursor, Idx, RCS} <- V1Effs], true. %% single active consumer ordering invariant: @@ -1774,27 +1581,7 @@ dump_generated(Conf, Commands) -> true. snapshots_prop(Conf, Commands) -> - try run_snapshot_test(Conf, Commands, messages_total_invariant()) of - _ -> true - catch - Err -> - ct:pal("Commands: ~tp~nConf~tp~n", [Commands, Conf]), - ct:pal("Err: ~tp~n", [Err]), - false - end. - -upgrade_snapshots_prop(Conf, Commands) -> - try run_upgrade_snapshot_test(Conf, Commands) of - _ -> true - catch - Err -> - ct:pal("Commands: ~tp~nConf~tp~n", [Commands, Conf]), - ct:pal("Err: ~tp~n", [Err]), - false - end. - -upgrade_snapshots_prop_v2_to_v3(Conf, Commands) -> - try run_upgrade_snapshot_test_v2_to_v3(Conf, Commands) of + try run_snapshot_test(Conf, Commands) of _ -> true catch Err -> @@ -1826,28 +1613,6 @@ log_gen(Size) -> {1, purge} ]))))). -%% Does not use "return", "down", or "checkout cancel" Ra commands -%% since these 3 commands change behaviour across v2 and v3 fixing -%% a bug where to many credits are granted to the consumer. -log_gen_upgrade_snapshots_v2_to_v3(Size) -> - Nodes = [node(), - fakenode@fake, - fakenode@fake2 - ], - ?LET(EPids, vector(2, pid_gen(Nodes)), - ?LET(CPids, vector(2, pid_gen(Nodes)), - resize(Size, - list( - frequency( - [{20, enqueue_gen(oneof(EPids))}, - {40, {input_event, - frequency([{10, settle}, - {2, discard}, - {2, requeue}])}}, - {1, checkout_gen(oneof(CPids))}, - {1, purge} - ]))))). - log_gen_upgrade_snapshots(Size) -> Nodes = [node(), fakenode@fake, @@ -1866,14 +1631,8 @@ log_gen_upgrade_snapshots(Size) -> {2, requeue} ])}}, {2, checkout_gen(oneof(CPids))}, - %% v2 fixes a bug that exists in v1 where a cancelled consumer is revived. - %% Therefore, there is an expected behavioural difference between v1 and v2 - %% and below line must be commented out. - % {1, checkout_cancel_gen(oneof(CPids))}, - %% Likewise there is a behavioural difference between v1 and v2 - %% when 'up' is followed by 'down' where v2 behaves correctly. - %% Therefore, below line must be commented out. - % {1, down_gen(oneof(EPids ++ CPids))}, + {1, checkout_cancel_gen(oneof(CPids))}, + {1, down_gen(oneof(EPids ++ CPids))}, {1, nodeup_gen(Nodes)}, {1, purge} ]))))). @@ -2000,16 +1759,21 @@ enqueue_gen(Pid) -> enqueue_gen(Pid, _Enq, _Del) -> ?LET(E, {enqueue, Pid, enqueue, msg_gen()}, E). -%% It's fair to assume that every message enqueued is a #basic_message. -%% That's what the channel expects and what rabbit_quorum_queue invokes rabbit_fifo_client with. msg_gen() -> ?LET(Bin, binary(), - #basic_message{content = #content{payload_fragments_rev = [Bin], - properties = none}}). + mc:prepare( + store, mc_amqpl:from_basic_message( + #basic_message{exchange_name = #resource{name = <<"e">>, + kind = exchange, + virtual_host = <<"/">>}, + routing_keys = [<<>>], + content = + #content{payload_fragments_rev = [Bin], + properties = #'P_basic'{}}}))). msg(Bin) when is_binary(Bin) -> #basic_message{content = #content{payload_fragments_rev = [Bin], - properties = none}}. + properties = #'P_basic'{}}}. checkout_cancel_gen(Pid) -> {checkout, Pid, cancel}. @@ -2028,7 +1792,8 @@ checkout_gen(Pid) -> config :: map(), log = [] :: list(), down = #{} :: #{pid() => noproc | noconnection}, - enq_cmds = #{} :: #{ra:index() => rabbit_fifo:enqueue()} + enq_cmds = #{} :: #{ra:index() => rabbit_fifo:enqueue()}, + is_v4 = false :: boolean() }). expand(Ops, Config) -> @@ -2054,9 +1819,11 @@ expand(Ops, Config, EnqFun) -> _ -> InitConfig0 end, + IsV4 = rabbit_feature_flags:is_enabled('rabbitmq_4.0.0'), T = #t{state = rabbit_fifo:init(InitConfig), enq_body_fun = EnqFun, - config = Config}, + config = Config, + is_v4 = IsV4}, #t{effects = Effs} = T1 = lists:foldl(fun handle_op/2, T, Ops), %% process the remaining effect #t{log = Log} = lists:foldl(fun do_apply/2, @@ -2078,7 +1845,7 @@ handle_op({enqueue, Pid, When, Data}, Enqs = maps:update_with(Pid, fun (Seq) -> Seq + 1 end, 1, Enqs0), MsgSeq = maps:get(Pid, Enqs), {EnqSt, Msg} = Fun({EnqSt0, Data}), - Cmd = rabbit_fifo:make_enqueue(Pid, MsgSeq, Msg), + Cmd = make_enqueue(Pid, MsgSeq, Msg), case When of enqueue -> do_apply(Cmd, T#t{enqueuers = Enqs, @@ -2108,9 +1875,15 @@ handle_op({checkout, CId, Prefetch}, #t{consumers = Cons0} = T) -> %% ignore if it already exists T; _ -> - Cons = maps:put(CId, ok, Cons0), - Cmd = rabbit_fifo:make_checkout(CId, - {auto, Prefetch, simple_prefetch}, + Spec = case T#t.is_v4 of + true -> + {auto, {simple_prefetch, Prefetch}}; + false -> + {auto, Prefetch, simple_prefetch} + end, + + Cons = maps:put(CId, T#t.index, Cons0), + Cmd = rabbit_fifo:make_checkout(CId, Spec, #{ack => true, prefetch => Prefetch, username => <<"user">>, @@ -2138,13 +1911,24 @@ handle_op({input_event, requeue}, #t{effects = Effs} = T) -> T end; handle_op({input_event, Settlement}, #t{effects = Effs, - down = Down} = T) -> + consumers = Cons, + down = Down, + is_v4 = IsV4} = T) -> case queue:out(Effs) of {{value, {settle, CId, MsgIds}}, Q} -> + CKey = case maps:get(CId, Cons, undefined) of + K when is_integer(K) andalso IsV4 -> + K; + _ -> + CId + end, Cmd = case Settlement of - settle -> rabbit_fifo:make_settle(CId, MsgIds); - return -> rabbit_fifo:make_return(CId, MsgIds); - discard -> rabbit_fifo:make_discard(CId, MsgIds) + settle -> + rabbit_fifo:make_settle(CKey, MsgIds); + return -> + rabbit_fifo:make_return(CKey, MsgIds); + discard -> + rabbit_fifo:make_discard(CKey, MsgIds) end, do_apply(Cmd, T#t{effects = Q}); {{value, {enqueue, Pid, _, _} = Cmd}, Q} -> @@ -2167,7 +1951,8 @@ handle_op(purge, T) -> handle_op({update_config, Changes}, #t{config = Conf} = T) -> Config = maps:merge(Conf, Changes), do_apply(rabbit_fifo:make_update_config(Config), T); -handle_op({checkout_dlx, Prefetch}, #t{config = #{dead_letter_handler := at_least_once}} = T) -> +handle_op({checkout_dlx, Prefetch}, + #t{config = #{dead_letter_handler := at_least_once}} = T) -> Cmd = rabbit_fifo_dlx:make_checkout(ignore_pid, Prefetch), do_apply(Cmd, T). @@ -2235,145 +2020,17 @@ run_proper(Fun, Args, NumTests) -> end}])). run_snapshot_test(Conf, Commands) -> - run_snapshot_test(Conf, Commands, fun (_) -> true end). - -run_snapshot_test(Conf, Commands, Invariant) -> - %% create every incremental permutation of the commands lists - %% and run the snapshot tests against that - ct:pal("running snapshot test with ~b commands using config ~tp", - [length(Commands), Conf]), - [begin - % ct:pal("~w running commands to ~w~n", [?FUNCTION_NAME, lists:last(C)]), - run_snapshot_test0(Conf, C, Invariant) - end || C <- prefixes(Commands, 1, [])]. - -run_snapshot_test0(Conf, Commands) -> - run_snapshot_test0(Conf, Commands, fun (_) -> true end). - -run_snapshot_test0(Conf0, Commands, Invariant) -> - Conf = Conf0#{max_in_memory_length => 0}, Indexes = lists:seq(1, length(Commands)), Entries = lists:zip(Indexes, Commands), - {State0, Effects} = run_log(test_init(Conf), Entries, Invariant, rabbit_fifo), - State = rabbit_fifo:normalize(State0), - Cursors = [ C || {release_cursor, _, _} = C <- Effects], - - [begin - %% drop all entries below and including the snapshot - Filtered = lists:dropwhile(fun({X, _}) when X =< SnapIdx -> true; - (_) -> false - end, Entries), - % ct:pal("release_cursor: ~b from ~w~n", [SnapIdx, element(1, hd_or(Filtered))]), - {S0, _} = run_log(SnapState, Filtered, Invariant, rabbit_fifo), - S = rabbit_fifo:normalize(S0), - % assert log can be restored from any release cursor index - case S of - State -> ok; - _ -> - ct:pal("Snapshot tests failed run log:~n" - "~tp~n from snapshot index ~b " - "with snapshot state~n~tp~n Entries~n~tp~n" - "Config: ~tp~n", - [Filtered, SnapIdx, SnapState, Entries, Conf]), - ct:pal("Expected~n~tp~nGot:~n~tp~n", [?record_info(rabbit_fifo, State), - ?record_info(rabbit_fifo, S)]), - ?assertEqual(State, S) - end - end || {release_cursor, SnapIdx, SnapState} <- Cursors], - ok. - -run_upgrade_snapshot_test(Conf, Commands) -> - ct:pal("running test with ~b commands using config ~tp", + ct:pal("running snapshot test 2 with ~b commands using config ~tp", [length(Commands), Conf]), - Indexes = lists:seq(1, length(Commands)), - Entries = lists:zip(Indexes, Commands), - Invariant = fun(_) -> true end, - %% Run the whole command log in v1 to emit release cursors. - {_, Effects} = run_log(test_init_v1(Conf), Entries, Invariant, rabbit_fifo_v1), - Cursors = [ C || {release_cursor, _, _} = C <- Effects], - [begin - %% Drop all entries below and including the snapshot. - FilteredV1 = lists:dropwhile(fun({X, _}) when X =< SnapIdx -> true; - (_) -> false - end, Entries), - %% For V2 we will apply the same commands to the snapshot state as for V1. - %% However, we need to increment all Raft indexes by 1 because V2 - %% requires one additional Raft index for the conversion command from V1 to V2. - FilteredV2 = lists:keymap(fun(Idx) -> Idx + 1 end, 1, FilteredV1), - %% Recover in V1. - {StateV1, _} = run_log(SnapState, FilteredV1, Invariant, rabbit_fifo_v1), - %% Perform conversion and recover in V2. - Res = rabbit_fifo:apply(meta(SnapIdx + 1), {machine_version, 1, 2}, SnapState), - #rabbit_fifo{} = V2 = element(1, Res), - {StateV2, _} = run_log(V2, FilteredV2, Invariant, rabbit_fifo, 2), - %% Invariant: Recovering a V1 snapshot in V1 or V2 should end up in the same - %% number of messages. - Fields = [num_messages, - num_ready_messages, - num_enqueuers, - num_consumers, - enqueue_message_bytes, - checkout_message_bytes - ], - V1Overview = maps:with(Fields, rabbit_fifo_v1:overview(StateV1)), - V2Overview = maps:with(Fields, rabbit_fifo:overview(StateV2)), - case V1Overview == V2Overview of - true -> ok; - false -> - ct:pal("property failed, expected:~n~tp~ngot:~n~tp~nstate v1:~n~tp~nstate v2:~n~tp~n" - "snapshot index: ~tp", - [V1Overview, V2Overview, StateV1, ?record_info(rabbit_fifo, StateV2), SnapIdx]), - ?assertEqual(V1Overview, V2Overview) - end - end || {release_cursor, SnapIdx, SnapState} <- Cursors], - ok. + Fun = fun (_E, S, _Effs) -> + MsgTotFun = messages_total_invariant(), + MsgTotFun(S) + end, + _ = run_log(test_init(Conf), Entries, Fun), + true. -run_upgrade_snapshot_test_v2_to_v3(Conf, Commands) -> - ct:pal("running test with ~b commands using config ~tp", - [length(Commands), Conf]), - Indexes = lists:seq(1, length(Commands)), - Entries = lists:zip(Indexes, Commands), - Invariant = fun(_) -> true end, - %% Run the whole command log in v2 to emit release cursors. - {_, Effects} = run_log(test_init(Conf), Entries, Invariant, rabbit_fifo, 2), - Cursors = [ C || {release_cursor, _, _} = C <- Effects], - [begin - %% Drop all entries below and including the snapshot. - FilteredV2 = lists:dropwhile(fun({X, _}) when X =< SnapIdx -> true; - (_) -> false - end, Entries), - %% For V3 we will apply the same commands to the snapshot state as for V2. - %% However, we need to increment all Raft indexes by 1 because V3 - %% requires one additional Raft index for the conversion command from V2 to V3. - FilteredV3 = lists:keymap(fun(Idx) -> Idx + 1 end, 1, FilteredV2), - %% Recover in V2. - {StateV2, _} = run_log(SnapState, FilteredV2, Invariant, rabbit_fifo, 2), - %% Perform conversion and recover in V3. - Res = rabbit_fifo:apply(meta(SnapIdx + 1), {machine_version, 2, 3}, SnapState), - #rabbit_fifo{} = V3 = element(1, Res), - {StateV3, _} = run_log(V3, FilteredV3, Invariant, rabbit_fifo, 3), - %% Invariant: Recovering a V2 snapshot in V2 or V3 should end up in the same - %% number of messages given that no "return", "down", or "cancel consumer" - %% Ra commands are used. - Fields = [num_messages, - num_ready_messages, - num_enqueuers, - num_consumers, - enqueue_message_bytes, - checkout_message_bytes - ], - V2Overview = maps:with(Fields, rabbit_fifo:overview(StateV2)), - V3Overview = maps:with(Fields, rabbit_fifo:overview(StateV3)), - case V2Overview == V3Overview of - true -> ok; - false -> - ct:pal("property failed, expected:~n~tp~ngot:~n~tp~nstate v2:~n~tp~nstate v3:~n~tp~n" - "snapshot index: ~tp", - [V2Overview, V3Overview, StateV2, ?record_info(rabbit_fifo, StateV3), SnapIdx]), - ?assertEqual(V2Overview, V3Overview) - end - end || {release_cursor, SnapIdx, SnapState} <- Cursors], - ok. hd_or([H | _]) -> H; hd_or(_) -> {undefined}. @@ -2386,45 +2043,64 @@ prefixes(Source, N, Acc) -> prefixes(Source, N+1, [X | Acc]). run_log(InitState, Entries) -> - run_log(InitState, Entries, fun(_) -> true end, rabbit_fifo). - -run_log(InitState, Entries, InvariantFun, FifoMod) -> - run_log(InitState, Entries, InvariantFun, FifoMod, 3). - -run_log(InitState, Entries, InvariantFun, FifoMod, MachineVersion) -> - Invariant = fun(E, S) -> - case InvariantFun(S) of + run_log(InitState, Entries, fun(_) -> true end). + +run_log(InitState, Entries, InvariantFun) -> + run_log(InitState, Entries, InvariantFun, ?MACHINE_VERSION). + +run_log(InitState, Entries, InvariantFun0, MachineVersion) + when is_function(InvariantFun0, 1) -> + InvariantFun = fun (_E, S, _Effs) -> + InvariantFun0(S) + end, + run_log(InitState, Entries, InvariantFun, MachineVersion); +run_log(InitState, Entries, InvariantFun, MachineVersion) + when is_integer(MachineVersion) -> + Invariant = fun(E, S, Effs) -> + case InvariantFun(E, S, Effs) of true -> ok; false -> throw({invariant, E, S}) end end, - - lists:foldl(fun ({Idx, E}, {Acc0, Efx0}) -> - case FifoMod:apply(meta(Idx, MachineVersion), E, Acc0) of + FifoMod = rabbit_fifo:which_module(MachineVersion), + + lists:foldl(fun ({Idx, E0}, {Acc0, Efx0}) -> + {Meta, E} = case E0 of + {M1, E1} when is_map(M1) -> + M0 = meta(Idx, MachineVersion), + {maps:merge(M0, M1), E1}; + _ -> + {meta(Idx, MachineVersion), E0} + end, + + case FifoMod:apply(Meta, E, Acc0) of {Acc, _, Efx} when is_list(Efx) -> - Invariant(E, Acc), + Invariant(E, Acc, lists:flatten(Efx)), {Acc, Efx0 ++ Efx}; {Acc, _, Efx} -> - Invariant(E, Acc), + Invariant(E, Acc, lists:flatten(Efx)), {Acc, Efx0 ++ [Efx]}; {Acc, _} -> - Invariant(E, Acc), + Invariant(E, Acc, []), {Acc, Efx0} end end, {InitState, []}, Entries). test_init(Conf) -> + test_init(rabbit_fifo, Conf). + +test_init(Mod, Conf) -> Default = #{queue_resource => blah, release_cursor_interval => 0, metrics_handler => {?MODULE, metrics_handler, []}}, - rabbit_fifo:init(maps:merge(Default, Conf)). + Mod:init(maps:merge(Default, Conf)). test_init_v1(Conf) -> - Default = #{queue_resource => blah, - release_cursor_interval => 0, - metrics_handler => {?MODULE, metrics_handler, []}}, - rabbit_fifo_v1:init(maps:merge(Default, Conf)). + test_init(rabbit_fifo_v1, Conf). + +test_init_v(Conf, Version) -> + test_init(rabbit_fifo:which_module(Version), Conf). meta(Idx) -> meta(Idx, 3). diff --git a/deps/rabbit/test/rabbit_fifo_q_SUITE.erl b/deps/rabbit/test/rabbit_fifo_q_SUITE.erl new file mode 100644 index 000000000000..919aa40f0e44 --- /dev/null +++ b/deps/rabbit/test/rabbit_fifo_q_SUITE.erl @@ -0,0 +1,208 @@ +-module(rabbit_fifo_q_SUITE). + +-compile(nowarn_export_all). +-compile(export_all). + +-include_lib("proper/include/proper.hrl"). +-include_lib("common_test/include/ct.hrl"). +-include_lib("eunit/include/eunit.hrl"). +-include_lib("rabbit/src/rabbit_fifo.hrl"). + +all() -> + [ + {group, tests} + ]. + + +all_tests() -> + [ + hi, + basics, + hi_is_prioritised, + get_lowest_index, + single_priority_behaves_like_queue + ]. + + +groups() -> + [ + {tests, [parallel], all_tests()} + ]. + +init_per_suite(Config) -> + Config. + +end_per_suite(_Config) -> + ok. + +init_per_group(_Group, Config) -> + Config. + +end_per_group(_Group, _Config) -> + ok. + +init_per_testcase(_TestCase, Config) -> + Config. + +end_per_testcase(_TestCase, _Config) -> + ok. + +%%%=================================================================== +%%% Test cases +%%%=================================================================== + +-define(MSG(L), ?MSG(L, L)). + +hi(_Config) -> + Q0 = rabbit_fifo_q:new(), + Q1 = lists:foldl( + fun ({P, I}, Q) -> + rabbit_fifo_q:in(P, I, Q) + end, Q0, [ + {hi, ?MSG(1)} + ]), + {?MSG(1), Q2} = rabbit_fifo_q:out(Q1), + empty = rabbit_fifo_q:out(Q2), + ok. + +basics(_Config) -> + Q0 = rabbit_fifo_q:new(), + Q1 = lists:foldl( + fun ({P, I}, Q) -> + rabbit_fifo_q:in(P, I, Q) + end, Q0, [ + {hi, ?MSG(1)}, + {no, ?MSG(2)}, + {hi, ?MSG(3)}, + {no, ?MSG(4)}, + {hi, ?MSG(5)} + ]), + {?MSG(1), Q2} = rabbit_fifo_q:out(Q1), + {?MSG(3), Q3} = rabbit_fifo_q:out(Q2), + {?MSG(2), Q4} = rabbit_fifo_q:out(Q3), + {?MSG(5), Q5} = rabbit_fifo_q:out(Q4), + {?MSG(4), Q6} = rabbit_fifo_q:out(Q5), + empty = rabbit_fifo_q:out(Q6), + ok. + +hi_is_prioritised(_Config) -> + Q0 = rabbit_fifo_q:new(), + %% when `hi' has a lower index than the next 'no' then it is still + %% prioritied (as this is safe to do). + Q1 = lists:foldl( + fun ({P, I}, Q) -> + rabbit_fifo_q:in(P, I, Q) + end, Q0, [ + {hi, ?MSG(1)}, + {hi, ?MSG(2)}, + {hi, ?MSG(3)}, + {hi, ?MSG(4)}, + {no, ?MSG(5)} + ]), + {?MSG(1), Q2} = rabbit_fifo_q:out(Q1), + {?MSG(2), Q3} = rabbit_fifo_q:out(Q2), + {?MSG(3), Q4} = rabbit_fifo_q:out(Q3), + {?MSG(4), Q5} = rabbit_fifo_q:out(Q4), + {?MSG(5), Q6} = rabbit_fifo_q:out(Q5), + empty = rabbit_fifo_q:out(Q6), + ok. + +get_lowest_index(_Config) -> + Q0 = rabbit_fifo_q:new(), + Q1 = rabbit_fifo_q:in(hi, ?MSG(1, ?LINE), Q0), + Q2 = rabbit_fifo_q:in(no, ?MSG(2, ?LINE), Q1), + Q3 = rabbit_fifo_q:in(no, ?MSG(3, ?LINE), Q2), + {_, Q4} = rabbit_fifo_q:out(Q3), + {_, Q5} = rabbit_fifo_q:out(Q4), + {_, Q6} = rabbit_fifo_q:out(Q5), + + ?assertEqual(undefined, rabbit_fifo_q:get_lowest_index(Q0)), + ?assertEqual(1, rabbit_fifo_q:get_lowest_index(Q1)), + ?assertEqual(1, rabbit_fifo_q:get_lowest_index(Q2)), + ?assertEqual(1, rabbit_fifo_q:get_lowest_index(Q3)), + ?assertEqual(2, rabbit_fifo_q:get_lowest_index(Q4)), + ?assertEqual(3, rabbit_fifo_q:get_lowest_index(Q5)), + ?assertEqual(undefined, rabbit_fifo_q:get_lowest_index(Q6)). + +-type op() :: {in, integer()} | out. + +single_priority_behaves_like_queue(_Config) -> + run_proper( + fun () -> + ?FORALL({P, Ops}, {oneof([hi, no]), op_gen(256)}, + queue_prop(P, Ops)) + end, [], 25), + ok. + +queue_prop(P, Ops) -> + % ct:pal("Running queue_prop for ~s", [Ops]), + Que = queue:new(), + Sut = rabbit_fifo_q:new(), + {Queue, FifoQ} = lists:foldl( + fun ({in, V}, {Q0, S0}) -> + Q = queue:in(V, Q0), + S = rabbit_fifo_q:in(P, V, S0), + case queue:len(Q) == rabbit_fifo_q:len(S) of + true -> + {Q, S}; + false -> + throw(false) + end; + (out, {Q0, S0}) -> + {V1, Q} = case queue:out(Q0) of + {{value, V0}, Q1} -> + {V0, Q1}; + Res0 -> + Res0 + end, + {V2, S} = case rabbit_fifo_q:out(S0) of + empty -> + {empty, S0}; + Res -> + Res + end, + case V1 == V2 of + true -> + {Q, S}; + false -> + ct:pal("V1 ~p, V2 ~p", [V1, V2]), + throw(false) + end + end, {Que, Sut}, Ops), + + queue:len(Queue) == rabbit_fifo_q:len(FifoQ). + + + + +%%% helpers + +op_gen(Size) -> + ?LET(Ops, + resize(Size, + list( + frequency( + [ + {20, {in, non_neg_integer()}}, + {20, out} + ] + ))), + begin + {_, Ops1} = lists:foldl( + fun ({in, I}, {Idx, Os}) -> + {Idx + 1, [{in, ?MSG(Idx, I)} | Os]}; + (out, {Idx, Os}) -> + {Idx + 1, [out | Os] } + end, {1, []}, Ops), + lists:reverse(Ops1) + end + ). + +run_proper(Fun, Args, NumTests) -> + ?assert( + proper:counterexample( + erlang:apply(Fun, Args), + [{numtests, NumTests}, + {on_output, fun(".", _) -> ok; % don't print the '.'s on new lines + (F, A) -> ct:pal(?LOW_IMPORTANCE, F, A) + end}])). diff --git a/deps/rabbit/test/rabbit_fifo_v0_SUITE.erl b/deps/rabbit/test/rabbit_fifo_v0_SUITE.erl index 291971ba1ab4..a35100f4e53b 100644 --- a/deps/rabbit/test/rabbit_fifo_v0_SUITE.erl +++ b/deps/rabbit/test/rabbit_fifo_v0_SUITE.erl @@ -8,7 +8,6 @@ -export([ ]). --include_lib("common_test/include/ct.hrl"). -include_lib("eunit/include/eunit.hrl"). -include_lib("rabbit_common/include/rabbit.hrl"). -include("src/rabbit_fifo_v0.hrl"). @@ -542,29 +541,6 @@ duplicate_delivery_test(_) -> ?assertEqual(1, maps:size(Messages)), ok. -state_enter_file_handle_leader_reservation_test(_) -> - S0 = init(#{name => the_name, - queue_resource => rabbit_misc:r(<<"/">>, queue, <<"test">>), - become_leader_handler => {m, f, [a]}}), - - Resource = {resource, <<"/">>, queue, <<"test">>}, - Effects = rabbit_fifo_v0:state_enter(leader, S0), - ?assertEqual([ - {mod_call, m, f, [a, the_name]}, - {mod_call, rabbit_quorum_queue, file_handle_leader_reservation, [Resource]} - ], Effects), - ok. - -state_enter_file_handle_other_reservation_test(_) -> - S0 = init(#{name => the_name, - queue_resource => rabbit_misc:r(<<"/">>, queue, <<"test">>)}), - Effects = rabbit_fifo_v0:state_enter(other, S0), - ?assertEqual([ - {mod_call, rabbit_quorum_queue, file_handle_other_reservation, []} - ], - Effects), - ok. - state_enter_monitors_and_notifications_test(_) -> Oth = spawn(fun () -> ok end), {State0, _} = enq(1, 1, first, test_init(test)), @@ -999,8 +975,7 @@ single_active_consumer_state_enter_leader_include_waiting_consumers_test(_) -> Effects = rabbit_fifo_v0:state_enter(leader, State1), %% 2 effects for each consumer process (channel process), 1 effect for the node, - %% 1 effect for file handle reservation - ?assertEqual(2 * 3 + 1 + 1, length(Effects)). + ?assertEqual(2 * 3 + 1, length(Effects)). single_active_consumer_state_enter_eol_include_waiting_consumers_test(_) -> Resource = rabbit_misc:r("/", queue, atom_to_binary(?FUNCTION_NAME, utf8)), @@ -1030,8 +1005,7 @@ single_active_consumer_state_enter_eol_include_waiting_consumers_test(_) -> Effects = rabbit_fifo_v0:state_enter(eol, State1), %% 1 effect for each consumer process (channel process), - %% 1 effect for file handle reservation - ?assertEqual(4, length(Effects)). + ?assertEqual(3, length(Effects)). query_consumers_test(_) -> State0 = init(#{name => ?FUNCTION_NAME, diff --git a/deps/rabbit/test/rabbit_foo_protocol_connection_info.erl b/deps/rabbit/test/rabbit_foo_protocol_connection_info.erl index 05b80c7f0b72..61ecf5940b22 100644 --- a/deps/rabbit/test/rabbit_foo_protocol_connection_info.erl +++ b/deps/rabbit/test/rabbit_foo_protocol_connection_info.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2019-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_foo_protocol_connection_info). diff --git a/deps/rabbit/test/rabbit_ha_test_consumer.erl b/deps/rabbit/test/rabbit_ha_test_consumer.erl deleted file mode 100644 index 4506efe118ab..000000000000 --- a/deps/rabbit/test/rabbit_ha_test_consumer.erl +++ /dev/null @@ -1,102 +0,0 @@ -%% This Source Code Form is subject to the terms of the Mozilla Public -%% License, v. 2.0. If a copy of the MPL was not distributed with this -%% file, You can obtain one at https://mozilla.org/MPL/2.0/. -%% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. -%% --module(rabbit_ha_test_consumer). - --include_lib("amqp_client/include/amqp_client.hrl"). - --export([await_response/1, create/5, start/6]). - -await_response(ConsumerPid) -> - case receive {ConsumerPid, Response} -> Response end of - {error, Reason} -> erlang:error(Reason); - ok -> ok - end. - -create(Channel, Queue, TestPid, CancelOnFailover, ExpectingMsgs) -> - ConsumerPid = spawn_link(?MODULE, start, - [TestPid, Channel, Queue, CancelOnFailover, - ExpectingMsgs + 1, ExpectingMsgs]), - amqp_channel:subscribe( - Channel, consume_method(Queue, CancelOnFailover), ConsumerPid), - ConsumerPid. - -start(TestPid, Channel, Queue, CancelOnFailover, LowestSeen, MsgsToConsume) -> - error_logger:info_msg("consumer ~tp on ~tp awaiting ~w messages " - "(lowest seen = ~w, cancel-on-failover = ~w)~n", - [self(), Channel, MsgsToConsume, LowestSeen, - CancelOnFailover]), - run(TestPid, Channel, Queue, CancelOnFailover, LowestSeen, MsgsToConsume). - -run(TestPid, _Channel, _Queue, _CancelOnFailover, _LowestSeen, 0) -> - consumer_reply(TestPid, ok); -run(TestPid, Channel, Queue, CancelOnFailover, LowestSeen, MsgsToConsume) -> - receive - #'basic.consume_ok'{} -> - run(TestPid, Channel, Queue, - CancelOnFailover, LowestSeen, MsgsToConsume); - {Delivery = #'basic.deliver'{ redelivered = Redelivered }, - #amqp_msg{payload = Payload}} -> - MsgNum = list_to_integer(binary_to_list(Payload)), - - ack(Delivery, Channel), - - %% we can receive any message we've already seen and, - %% because of the possibility of multiple requeuings, we - %% might see these messages in any order. If we are seeing - %% a message again, we don't decrement the MsgsToConsume - %% counter. - if - MsgNum + 1 == LowestSeen -> - run(TestPid, Channel, Queue, - CancelOnFailover, MsgNum, MsgsToConsume - 1); - MsgNum >= LowestSeen -> - true = Redelivered, %% ASSERTION - run(TestPid, Channel, Queue, - CancelOnFailover, LowestSeen, MsgsToConsume); - true -> - %% We received a message we haven't seen before, - %% but it is not the next message in the expected - %% sequence. - consumer_reply(TestPid, - {error, {unexpected_message, MsgNum}}) - end; - #'basic.cancel'{} when CancelOnFailover -> - error_logger:info_msg("consumer ~tp on ~tp received basic.cancel: " - "resubscribing to ~tp on ~tp~n", - [self(), Channel, Queue, Channel]), - resubscribe(TestPid, Channel, Queue, CancelOnFailover, - LowestSeen, MsgsToConsume); - #'basic.cancel'{} -> - exit(cancel_received_without_cancel_on_failover) - end. - -%% -%% Private API -%% - -resubscribe(TestPid, Channel, Queue, CancelOnFailover, LowestSeen, - MsgsToConsume) -> - amqp_channel:subscribe( - Channel, consume_method(Queue, CancelOnFailover), self()), - ok = receive #'basic.consume_ok'{} -> ok - end, - error_logger:info_msg("re-subscripting consumer ~tp on ~tp complete " - "(received basic.consume_ok)", - [self(), Channel]), - start(TestPid, Channel, Queue, CancelOnFailover, LowestSeen, MsgsToConsume). - -consume_method(Queue, CancelOnFailover) -> - Args = [{<<"x-cancel-on-ha-failover">>, bool, CancelOnFailover}], - #'basic.consume'{queue = Queue, - arguments = Args}. - -ack(#'basic.deliver'{delivery_tag = DeliveryTag}, Channel) -> - amqp_channel:call(Channel, #'basic.ack'{delivery_tag = DeliveryTag}), - ok. - -consumer_reply(TestPid, Reply) -> - TestPid ! {self(), Reply}. diff --git a/deps/rabbit/test/rabbit_ha_test_producer.erl b/deps/rabbit/test/rabbit_ha_test_producer.erl deleted file mode 100644 index 3dd2244d284f..000000000000 --- a/deps/rabbit/test/rabbit_ha_test_producer.erl +++ /dev/null @@ -1,131 +0,0 @@ -%% This Source Code Form is subject to the terms of the Mozilla Public -%% License, v. 2.0. If a copy of the MPL was not distributed with this -%% file, You can obtain one at https://mozilla.org/MPL/2.0/. -%% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. -%% --module(rabbit_ha_test_producer). - --export([await_response/1, start/6, create/5, create/6]). - --include_lib("amqp_client/include/amqp_client.hrl"). - -await_response(ProducerPid) -> - error_logger:info_msg("waiting for producer pid ~tp~n", [ProducerPid]), - case receive {ProducerPid, Response} -> Response end of - ok -> ok; - {error, _} = Else -> exit(Else); - Else -> exit({weird_response, Else}) - end. - -create(Channel, Queue, TestPid, Confirm, MsgsToSend) -> - create(Channel, Queue, TestPid, Confirm, MsgsToSend, acks). - -create(Channel, Queue, TestPid, Confirm, MsgsToSend, Mode) -> - AckNackMsgs = case Mode of - acks -> {ok, {error, received_nacks}}; - nacks -> {{error, received_acks}, ok} - end, - ProducerPid = spawn_link(?MODULE, start, [Channel, Queue, TestPid, - Confirm, MsgsToSend, AckNackMsgs]), - receive - {ProducerPid, started} -> ProducerPid - end. - -start(Channel, Queue, TestPid, Confirm, MsgsToSend, AckNackMsgs) -> - ConfirmState = - case Confirm of - true -> amqp_channel:register_confirm_handler(Channel, self()), - #'confirm.select_ok'{} = - amqp_channel:call(Channel, #'confirm.select'{}), - gb_trees:empty(); - false -> none - end, - TestPid ! {self(), started}, - error_logger:info_msg("publishing ~w msgs on ~tp~n", [MsgsToSend, Channel]), - producer(Channel, Queue, TestPid, ConfirmState, MsgsToSend, AckNackMsgs). - -%% -%% Private API -%% - -producer(_Channel, _Queue, TestPid, none, 0, _AckNackMsgs) -> - TestPid ! {self(), ok}; -producer(Channel, _Queue, TestPid, ConfirmState, 0, {AckMsg, NackMsg}) -> - error_logger:info_msg("awaiting confirms on channel ~tp~n", [Channel]), - Msg = case drain_confirms(none, ConfirmState) of - %% No acks or nacks - acks -> AckMsg; - nacks -> NackMsg; - mix -> {error, received_both_acks_and_nacks}; - {Nacks, CS} -> {error, {missing_confirms, Nacks, - lists:sort(gb_trees:keys(CS))}} - end, - TestPid ! {self(), Msg}; - -producer(Channel, Queue, TestPid, ConfirmState, MsgsToSend, AckNackMsgs) -> - Method = #'basic.publish'{exchange = <<"">>, - routing_key = Queue, - mandatory = false, - immediate = false}, - - ConfirmState1 = maybe_record_confirm(ConfirmState, Channel, MsgsToSend), - - amqp_channel:call(Channel, Method, - #amqp_msg{props = #'P_basic'{delivery_mode = 2}, - payload = list_to_binary( - integer_to_list(MsgsToSend))}), - - producer(Channel, Queue, TestPid, ConfirmState1, MsgsToSend - 1, AckNackMsgs). - -maybe_record_confirm(none, _, _) -> - none; -maybe_record_confirm(ConfirmState, Channel, MsgsToSend) -> - SeqNo = amqp_channel:next_publish_seqno(Channel), - gb_trees:insert(SeqNo, MsgsToSend, ConfirmState). - -drain_confirms(Collected, ConfirmState) -> - case gb_trees:is_empty(ConfirmState) of - true -> Collected; - false -> receive - #'basic.ack'{delivery_tag = DeliveryTag, - multiple = IsMulti} -> - Collected1 = case Collected of - none -> acks; - acks -> acks; - nacks -> mix; - mix -> mix - end, - drain_confirms(Collected1, - delete_confirms(DeliveryTag, IsMulti, - ConfirmState)); - #'basic.nack'{delivery_tag = DeliveryTag, - multiple = IsMulti} -> - Collected1 = case Collected of - none -> nacks; - nacks -> nacks; - acks -> mix; - mix -> mix - end, - drain_confirms(Collected1, - delete_confirms(DeliveryTag, IsMulti, - ConfirmState)) - after - 60000 -> {Collected, ConfirmState} - end - end. - -delete_confirms(DeliveryTag, false, ConfirmState) -> - gb_trees:delete(DeliveryTag, ConfirmState); -delete_confirms(DeliveryTag, true, ConfirmState) -> - multi_confirm(DeliveryTag, ConfirmState). - -multi_confirm(DeliveryTag, ConfirmState) -> - case gb_trees:is_empty(ConfirmState) of - true -> ConfirmState; - false -> {Key, _, ConfirmState1} = gb_trees:take_smallest(ConfirmState), - case Key =< DeliveryTag of - true -> multi_confirm(DeliveryTag, ConfirmState1); - false -> ConfirmState - end - end. diff --git a/deps/rabbit/test/rabbit_local_random_exchange_SUITE.erl b/deps/rabbit/test/rabbit_local_random_exchange_SUITE.erl new file mode 100644 index 000000000000..a2e57f476171 --- /dev/null +++ b/deps/rabbit/test/rabbit_local_random_exchange_SUITE.erl @@ -0,0 +1,205 @@ +%% This Source Code Form is subject to the terms of the Mozilla Public +%% License, v. 2.0. If a copy of the MPL was not distributed with this +%% file, You can obtain one at https://mozilla.org/MPL/2.0/. +%% +%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% +-module(rabbit_local_random_exchange_SUITE). + +-compile(nowarn_export_all). +-compile(export_all). + +-include_lib("amqp_client/include/amqp_client.hrl"). +-include_lib("eunit/include/eunit.hrl"). + +all() -> + [ + {group, non_parallel_tests} + ]. + +groups() -> + [ + {non_parallel_tests, [], [ + routed_to_one_local_queue_test, + no_route + ]} + ]. + +%% ------------------------------------------------------------------- +%% Test suite setup/teardown +%% ------------------------------------------------------------------- + +init_per_suite(Config) -> + rabbit_ct_helpers:log_environment(), + Config1 = rabbit_ct_helpers:set_config(Config, + [ + {rmq_nodename_suffix, ?MODULE}, + {rmq_nodes_count, 3}, + {tcp_ports_base, {skip_n_nodes, 3}} + ]), + rabbit_ct_helpers:run_setup_steps(Config1, + rabbit_ct_broker_helpers:setup_steps() ++ + rabbit_ct_client_helpers:setup_steps()). + +end_per_suite(Config) -> + rabbit_ct_helpers:run_teardown_steps( + Config, + rabbit_ct_client_helpers:teardown_steps() ++ + rabbit_ct_broker_helpers:teardown_steps()). + +init_per_group(_, Config) -> + Config. + +end_per_group(_, Config) -> + Config. + +init_per_testcase(Testcase, Config) -> + case rabbit_ct_broker_helpers:enable_feature_flag( + Config, rabbit_exchange_type_local_random) of + ok -> + TestCaseName = rabbit_ct_helpers:config_to_testcase_name(Config, Testcase), + Config1 = rabbit_ct_helpers:set_config(Config, {test_resource_name, + re:replace(TestCaseName, "/", "-", + [global, {return, list}])}), + rabbit_ct_helpers:testcase_started(Config1, Testcase); + Res -> + {skip, Res} + end. + +end_per_testcase(Testcase, Config) -> + rabbit_ct_helpers:testcase_finished(Config, Testcase). + +%% ------------------------------------------------------------------- +%% Test cases +%% ------------------------------------------------------------------- + +routed_to_one_local_queue_test(Config) -> + E = make_exchange_name(Config, "0"), + declare_exchange(Config, E), + %% declare queue on the first two nodes: 0, 1 + QueueNames = declare_and_bind_queues(Config, 2, E), + %% publish message on node 1 + publish(Config, E, 0), + publish(Config, E, 1), + %% message should arrive to queue on node 1 + run_on_node(Config, 0, + fun(Chan) -> + assert_queue_size(Config, Chan, 1, lists:nth(1, QueueNames)) + end), + run_on_node(Config, 0, + fun(Chan) -> + assert_queue_size(Config, Chan, 1, lists:nth(2, QueueNames)) + end), + delete_exchange_and_queues(Config, E, QueueNames), + ok. + +no_route(Config) -> + + E = make_exchange_name(Config, "0"), + declare_exchange(Config, E), + %% declare queue on nodes 0, 1 + QueueNames = declare_and_bind_queues(Config, 2, E), + %% publish message on node 2 + publish_expect_return(Config, E, 2), + %% message should arrive to any of the other nodes. Total size among all queues is 1 + delete_exchange_and_queues(Config, E, QueueNames), + ok. + +delete_exchange_and_queues(Config, E, QueueNames) -> + run_on_node(Config, 0, + fun(Chan) -> + amqp_channel:call(Chan, #'exchange.delete'{exchange = E }), + [amqp_channel:call(Chan, #'queue.delete'{queue = Q }) + || Q <- QueueNames] + end). +publish(Config, E, Node) -> + run_on_node(Config, Node, + fun(Chan) -> + amqp_channel:call(Chan, #'confirm.select'{}), + amqp_channel:call(Chan, + #'basic.publish'{exchange = E, routing_key = rnd()}, + #amqp_msg{props = #'P_basic'{}, payload = <<>>}), + amqp_channel:wait_for_confirms_or_die(Chan) + end). + +publish_expect_return(Config, E, Node) -> + run_on_node(Config, Node, + fun(Chan) -> + amqp_channel:register_return_handler(Chan, self()), + amqp_channel:call(Chan, + #'basic.publish'{exchange = E, + mandatory = true, + routing_key = rnd()}, + #amqp_msg{props = #'P_basic'{}, + payload = <<>>}), + receive + {#'basic.return'{}, _} -> + ok + after 5000 -> + flush(100), + ct:fail("no return received") + end + end). + +run_on_node(Config, Node, RunMethod) -> + {Conn, Chan} = rabbit_ct_client_helpers:open_connection_and_channel(Config, Node), + Return = RunMethod(Chan), + rabbit_ct_client_helpers:close_connection_and_channel(Conn, Chan), + Return. + +declare_exchange(Config, ExchangeName) -> + run_on_node(Config, 0, + fun(Chan) -> + #'exchange.declare_ok'{} = + amqp_channel:call(Chan, + #'exchange.declare'{exchange = ExchangeName, + type = <<"x-local-random">>, + auto_delete = false}) + end). + +declare_and_bind_queues(Config, NodeCount, E) -> + QueueNames = [make_queue_name(Config, Node) || Node <- lists:seq(0, NodeCount -1)], + [run_on_node(Config, Node, + fun(Chan) -> + declare_and_bind_queue(Chan, E, make_queue_name(Config, Node)) + end) || Node <- lists:seq(0, NodeCount -1)], + QueueNames. + +declare_and_bind_queue(Ch, E, Q) -> + #'queue.declare_ok'{} = amqp_channel:call(Ch, #'queue.declare'{queue = Q}), + #'queue.bind_ok'{} = amqp_channel:call(Ch, #'queue.bind'{queue = Q, + exchange = E, + routing_key = <<"">>}), + ok. + +assert_total_queue_size(_Config, Chan, ExpectedSize, ExpectedQueues) -> + Counts = [begin + #'queue.declare_ok'{message_count = M} = + amqp_channel:call(Chan, #'queue.declare'{queue = Q}), + M + end || Q <- ExpectedQueues], + ?assertEqual(ExpectedSize, lists:sum(Counts)). + +assert_queue_size(_Config, Chan, ExpectedSize, ExpectedQueue) -> + ct:log("assert_queue_size ~p ~p", [ExpectedSize, ExpectedQueue]), + #'queue.declare_ok'{message_count = M} = + amqp_channel:call(Chan, #'queue.declare'{queue = ExpectedQueue}), + ?assertEqual(ExpectedSize, M). + +rnd() -> + list_to_binary(integer_to_list(rand:uniform(1000000))). + +make_exchange_name(Config, Suffix) -> + B = rabbit_ct_helpers:get_config(Config, test_resource_name), + erlang:list_to_binary("x-" ++ B ++ "-" ++ Suffix). +make_queue_name(Config, Node) -> + B = rabbit_ct_helpers:get_config(Config, test_resource_name), + erlang:list_to_binary("q-" ++ B ++ "-" ++ integer_to_list(Node)). + +flush(T) -> + receive X -> + ct:pal("flushed ~p", [X]), + flush(T) + after T -> + ok + end. diff --git a/deps/rabbit/test/rabbit_message_interceptor_SUITE.erl b/deps/rabbit/test/rabbit_message_interceptor_SUITE.erl index db70c8e45f29..9641501468f0 100644 --- a/deps/rabbit/test/rabbit_message_interceptor_SUITE.erl +++ b/deps/rabbit/test/rabbit_message_interceptor_SUITE.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2018-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. -module(rabbit_message_interceptor_SUITE). @@ -66,10 +66,10 @@ headers_no_overwrite(Config) -> headers(Overwrite, Config) -> Server = atom_to_binary(rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename)), Payload = QName = atom_to_binary(?FUNCTION_NAME), - NowSecs = os:system_time(second), - NowMs = os:system_time(millisecond), Ch = rabbit_ct_client_helpers:open_channel(Config), #'queue.declare_ok'{} = amqp_channel:call(Ch, #'queue.declare'{queue = QName}), + NowSecs = os:system_time(second), + NowMs = os:system_time(millisecond), amqp_channel:call(Ch, #'basic.publish'{routing_key = QName}, #amqp_msg{payload = Payload}), AssertHeaders = diff --git a/deps/rabbit/test/rabbit_msg_record_SUITE.erl b/deps/rabbit/test/rabbit_msg_record_SUITE.erl deleted file mode 100644 index 940d311244b3..000000000000 --- a/deps/rabbit/test/rabbit_msg_record_SUITE.erl +++ /dev/null @@ -1,298 +0,0 @@ --module(rabbit_msg_record_SUITE). - --compile(nowarn_export_all). --compile(export_all). - --export([ - ]). - --include_lib("rabbit_common/include/rabbit_framing.hrl"). --include_lib("eunit/include/eunit.hrl"). --include_lib("amqp10_common/include/amqp10_framing.hrl"). - -%%%=================================================================== -%%% Common Test callbacks -%%%=================================================================== - -all() -> - [ - {group, tests} - ]. - - -all_tests() -> - [ - ampq091_roundtrip, - amqp10_non_single_data_bodies, - unsupported_091_header_is_dropped, - message_id_ulong, - message_id_uuid, - message_id_binary, - message_id_large_binary, - message_id_large_string, - reuse_amqp10_binary_chunks - ]. - -groups() -> - [ - {tests, [], all_tests()} - ]. - -init_per_suite(Config) -> - Config. - -end_per_suite(_Config) -> - ok. - -init_per_group(_Group, Config) -> - Config. - -end_per_group(_Group, _Config) -> - ok. - -init_per_testcase(_TestCase, Config) -> - Config. - -end_per_testcase(_TestCase, _Config) -> - ok. - -%%%=================================================================== -%%% Test cases -%%%=================================================================== - -ampq091_roundtrip(_Config) -> - Props = #'P_basic'{content_type = <<"text/plain">>, - content_encoding = <<"gzip">>, - headers = [{<<"x-stream-offset">>, long, 99}, - {<<"x-string">>, longstr, <<"a string">>}, - {<<"x-bool">>, bool, false}, - {<<"x-unsignedbyte">>, unsignedbyte, 1}, - {<<"x-unsignedshort">>, unsignedshort, 1}, - {<<"x-unsignedint">>, unsignedint, 1}, - {<<"x-signedint">>, signedint, 1}, - {<<"x-timestamp">>, timestamp, 1}, - {<<"x-double">>, double, 1.0}, - {<<"x-float">>, float, 1.0}, - {<<"x-void">>, void, undefined}, - {<<"x-binary">>, binary, <<"data">>} - ], - delivery_mode = 2, - priority = 99, - correlation_id = <<"corr">> , - reply_to = <<"reply-to">>, - expiration = <<"1">>, - message_id = <<"msg-id">>, - timestamp = 99, - type = <<"45">>, - user_id = <<"banana">>, - app_id = <<"rmq">> - % cluster_id = <<"adf">> - }, - Payload = [<<"data">>], - test_amqp091_roundtrip(Props, Payload), - test_amqp091_roundtrip(#'P_basic'{}, Payload), - ok. - -amqp10_non_single_data_bodies(_Config) -> - Props = #'P_basic'{type = <<"amqp-1.0">>}, - Payloads = [ - [#'v1_0.data'{content = <<"hello">>}, - #'v1_0.data'{content = <<"brave">>}, - #'v1_0.data'{content = <<"new">>}, - #'v1_0.data'{content = <<"world">>} - ], - #'v1_0.amqp_value'{content = {utf8, <<"hello world">>}}, - [#'v1_0.amqp_sequence'{content = [{utf8, <<"one">>}, - {utf8, <<"blah">>}]}, - #'v1_0.amqp_sequence'{content = [{utf8, <<"two">>}]} - ] - ], - - [begin - EncodedPayload = amqp10_encode_bin(Payload), - - MsgRecord0 = rabbit_msg_record:from_amqp091(Props, EncodedPayload), - MsgRecord = rabbit_msg_record:init( - iolist_to_binary(rabbit_msg_record:to_iodata(MsgRecord0))), - {PropsOut, PayloadEncodedOut} = rabbit_msg_record:to_amqp091(MsgRecord), - PayloadOut = case amqp10_framing:decode_bin(iolist_to_binary(PayloadEncodedOut)) of - L when length(L) =:= 1 -> - lists:nth(1, L); - L -> - L - end, - - ?assertEqual(Props, PropsOut), - ?assertEqual(iolist_to_binary(EncodedPayload), - iolist_to_binary(PayloadEncodedOut)), - ?assertEqual(Payload, PayloadOut) - - end || Payload <- Payloads], - ok. - -unsupported_091_header_is_dropped(_Config) -> - Props = #'P_basic'{ - headers = [ - {<<"x-received-from">>, array, []} - ] - }, - MsgRecord0 = rabbit_msg_record:from_amqp091(Props, <<"payload">>), - MsgRecord = rabbit_msg_record:init( - iolist_to_binary(rabbit_msg_record:to_iodata(MsgRecord0))), - % meck:unload(), - {PropsOut, <<"payload">>} = rabbit_msg_record:to_amqp091(MsgRecord), - - ?assertMatch(#'P_basic'{headers = undefined}, PropsOut), - - ok. - -message_id_ulong(_Config) -> - Num = 9876789, - ULong = erlang:integer_to_binary(Num), - P = #'v1_0.properties'{message_id = {ulong, Num}, - correlation_id = {ulong, Num}}, - D = #'v1_0.data'{content = <<"data">>}, - Bin = [amqp10_framing:encode_bin(P), - amqp10_framing:encode_bin(D)], - R = rabbit_msg_record:init(iolist_to_binary(Bin)), - {Props, _} = rabbit_msg_record:to_amqp091(R), - ?assertMatch(#'P_basic'{message_id = ULong, - correlation_id = ULong, - headers = - [ - %% ordering shouldn't matter - {<<"x-correlation-id-type">>, longstr, <<"ulong">>}, - {<<"x-message-id-type">>, longstr, <<"ulong">>} - ]}, - Props), - ok. - -message_id_uuid(_Config) -> - %% fake a uuid - UUId = erlang:md5(term_to_binary(make_ref())), - TextUUId = rabbit_data_coercion:to_binary(rabbit_guid:to_string(UUId)), - P = #'v1_0.properties'{message_id = {uuid, UUId}, - correlation_id = {uuid, UUId}}, - D = #'v1_0.data'{content = <<"data">>}, - Bin = [amqp10_framing:encode_bin(P), - amqp10_framing:encode_bin(D)], - R = rabbit_msg_record:init(iolist_to_binary(Bin)), - {Props, _} = rabbit_msg_record:to_amqp091(R), - ?assertMatch(#'P_basic'{message_id = TextUUId, - correlation_id = TextUUId, - headers = - [ - %% ordering shouldn't matter - {<<"x-correlation-id-type">>, longstr, <<"uuid">>}, - {<<"x-message-id-type">>, longstr, <<"uuid">>} - ]}, - Props), - ok. - -message_id_binary(_Config) -> - %% fake a uuid - Orig = <<"asdfasdf">>, - Text = base64:encode(Orig), - P = #'v1_0.properties'{message_id = {binary, Orig}, - correlation_id = {binary, Orig}}, - D = #'v1_0.data'{content = <<"data">>}, - Bin = [amqp10_framing:encode_bin(P), - amqp10_framing:encode_bin(D)], - R = rabbit_msg_record:init(iolist_to_binary(Bin)), - {Props, _} = rabbit_msg_record:to_amqp091(R), - ?assertMatch(#'P_basic'{message_id = Text, - correlation_id = Text, - headers = - [ - %% ordering shouldn't matter - {<<"x-correlation-id-type">>, longstr, <<"binary">>}, - {<<"x-message-id-type">>, longstr, <<"binary">>} - ]}, - Props), - ok. - -message_id_large_binary(_Config) -> - %% cannot fit in a shortstr - Orig = crypto:strong_rand_bytes(500), - P = #'v1_0.properties'{message_id = {binary, Orig}, - correlation_id = {binary, Orig}}, - D = #'v1_0.data'{content = <<"data">>}, - Bin = [amqp10_framing:encode_bin(P), - amqp10_framing:encode_bin(D)], - R = rabbit_msg_record:init(iolist_to_binary(Bin)), - {Props, _} = rabbit_msg_record:to_amqp091(R), - ?assertMatch(#'P_basic'{message_id = undefined, - correlation_id = undefined, - headers = - [ - %% ordering shouldn't matter - {<<"x-correlation-id">>, longstr, Orig}, - {<<"x-message-id">>, longstr, Orig} - ]}, - Props), - ok. - -message_id_large_string(_Config) -> - %% cannot fit in a shortstr - Orig = base64:encode(crypto:strong_rand_bytes(500)), - P = #'v1_0.properties'{message_id = {utf8, Orig}, - correlation_id = {utf8, Orig}}, - D = #'v1_0.data'{content = <<"data">>}, - Bin = [amqp10_framing:encode_bin(P), - amqp10_framing:encode_bin(D)], - R = rabbit_msg_record:init(iolist_to_binary(Bin)), - {Props, _} = rabbit_msg_record:to_amqp091(R), - ?assertMatch(#'P_basic'{message_id = undefined, - correlation_id = undefined, - headers = - [ - %% ordering shouldn't matter - {<<"x-correlation-id">>, longstr, Orig}, - {<<"x-message-id">>, longstr, Orig} - ]}, - Props), - ok. - -reuse_amqp10_binary_chunks(_Config) -> - Amqp10MsgAnnotations = #'v1_0.message_annotations'{content = - [{{symbol, <<"x-route">>}, {utf8, <<"dummy">>}}]}, - Amqp10MsgAnnotationsBin = amqp10_encode_bin(Amqp10MsgAnnotations), - Amqp10Props = #'v1_0.properties'{group_id = {utf8, <<"my-group">>}, - group_sequence = {uint, 42}}, - Amqp10PropsBin = amqp10_encode_bin(Amqp10Props), - Amqp10AppProps = #'v1_0.application_properties'{content = [{{utf8, <<"foo">>}, {utf8, <<"bar">>}}]}, - Amqp10AppPropsBin = amqp10_encode_bin(Amqp10AppProps), - Amqp091Headers = [{<<"x-amqp-1.0-message-annotations">>, longstr, Amqp10MsgAnnotationsBin}, - {<<"x-amqp-1.0-properties">>, longstr, Amqp10PropsBin}, - {<<"x-amqp-1.0-app-properties">>, longstr, Amqp10AppPropsBin}], - Amqp091Props = #'P_basic'{type= <<"amqp-1.0">>, headers = Amqp091Headers}, - Body = #'v1_0.amqp_value'{content = {utf8, <<"hello world">>}}, - EncodedBody = amqp10_encode_bin(Body), - R = rabbit_msg_record:from_amqp091(Amqp091Props, EncodedBody), - RBin = rabbit_msg_record:to_iodata(R), - Amqp10DecodedMsg = amqp10_framing:decode_bin(iolist_to_binary(RBin)), - [Amqp10DecodedMsgAnnotations, Amqp10DecodedProps, - Amqp10DecodedAppProps, DecodedBody] = Amqp10DecodedMsg, - ?assertEqual(Amqp10MsgAnnotations, Amqp10DecodedMsgAnnotations), - ?assertEqual(Amqp10Props, Amqp10DecodedProps), - ?assertEqual(Amqp10AppProps, Amqp10DecodedAppProps), - ?assertEqual(Body, DecodedBody), - ok. - -amqp10_encode_bin(L) when is_list(L) -> - iolist_to_binary([amqp10_encode_bin(X) || X <- L]); -amqp10_encode_bin(X) -> - iolist_to_binary(amqp10_framing:encode_bin(X)). - -%% Utility - -test_amqp091_roundtrip(Props, Payload) -> - MsgRecord0 = rabbit_msg_record:from_amqp091(Props, Payload), - MsgRecord = rabbit_msg_record:init( - iolist_to_binary(rabbit_msg_record:to_iodata(MsgRecord0))), - % meck:unload(), - {PropsOut, PayloadOut} = rabbit_msg_record:to_amqp091(MsgRecord), - ?assertEqual(Props, PropsOut), - ?assertEqual(iolist_to_binary(Payload), - iolist_to_binary(PayloadOut)), - ok. diff --git a/deps/rabbit/test/rabbit_stream_queue_SUITE.erl b/deps/rabbit/test/rabbit_stream_queue_SUITE.erl index e31444bb1ace..3d09d901caf9 100644 --- a/deps/rabbit/test/rabbit_stream_queue_SUITE.erl +++ b/deps/rabbit/test/rabbit_stream_queue_SUITE.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_stream_queue_SUITE). @@ -16,8 +16,11 @@ -compile(nowarn_export_all). -compile(export_all). +-import(rabbit_ct_helpers, [await_condition/2]). +-define(WAIT, 5000). + suite() -> - [{timetrap, 15 * 60000}]. + [{timetrap, 15 * 60_000}]. all() -> [ @@ -28,9 +31,6 @@ all() -> {group, single_node_parallel_4}, {group, cluster_size_2}, {group, cluster_size_2_parallel_1}, - {group, cluster_size_2_parallel_2}, - {group, cluster_size_2_parallel_3}, - {group, cluster_size_2_parallel_4}, {group, cluster_size_3}, {group, cluster_size_3_1}, {group, cluster_size_3_2}, @@ -41,24 +41,24 @@ all() -> {group, cluster_size_3_parallel_5}, {group, unclustered_size_3_1}, {group, unclustered_size_3_2}, - {group, unclustered_size_3_3} + {group, unclustered_size_3_3}, + {group, unclustered_size_3_4} ]. groups() -> [ - {single_node, [], [restart_single_node, recover]}, + {single_node, [], + [restart_single_node, + recover, + format]}, {single_node_parallel_1, [parallel], all_tests_1()}, {single_node_parallel_2, [parallel], all_tests_2()}, {single_node_parallel_3, [parallel], all_tests_3()}, {single_node_parallel_4, [parallel], all_tests_4()}, {cluster_size_2, [], [recover]}, {cluster_size_2_parallel_1, [parallel], all_tests_1()}, - {cluster_size_2_parallel_2, [parallel], all_tests_2()}, - {cluster_size_2_parallel_3, [parallel], all_tests_3()}, - {cluster_size_2_parallel_4, [parallel], all_tests_4()}, {cluster_size_3, [], [ - restart_coordinator_without_queues, delete_down_replica, replica_recovery, leader_failover, @@ -72,6 +72,7 @@ groups() -> select_nodes_with_least_replicas, recover_after_leader_and_coordinator_kill, restart_stream, + format, rebalance ]}, {cluster_size_3_1, [], [shrink_coordinator_cluster]}, @@ -96,7 +97,8 @@ groups() -> {cluster_size_3_parallel_5, [parallel], all_tests_4()}, {unclustered_size_3_1, [], [add_replica]}, {unclustered_size_3_2, [], [consume_without_local_replica]}, - {unclustered_size_3_3, [], [grow_coordinator_cluster]} + {unclustered_size_3_3, [], [grow_coordinator_cluster]}, + {unclustered_size_3_4, [], [grow_then_shrink_coordinator_cluster]} ]. all_tests_1() -> @@ -168,7 +170,7 @@ all_tests_4() -> init_per_suite(Config0) -> rabbit_ct_helpers:log_environment(), Config = rabbit_ct_helpers:merge_app_env( - Config0, {rabbit, [{stream_tick_interval, 1000}, + Config0, {rabbit, [{stream_tick_interval, 256}, {log, [{file, [{level, debug}]}]}]}), rabbit_ct_helpers:run_setup_steps(Config). @@ -207,12 +209,14 @@ init_per_group1(Group, Config) -> cluster_size_3_2 -> 3; unclustered_size_3_1 -> 3; unclustered_size_3_2 -> 3; - unclustered_size_3_3 -> 3 + unclustered_size_3_3 -> 3; + unclustered_size_3_4 -> 3 end, Clustered = case Group of unclustered_size_3_1 -> false; unclustered_size_3_2 -> false; unclustered_size_3_3 -> false; + unclustered_size_3_4 -> false; _ -> true end, Config1 = rabbit_ct_helpers:set_config(Config, @@ -221,28 +225,24 @@ init_per_group1(Group, Config) -> {tcp_ports_base}, {rmq_nodes_clustered, Clustered}]), Config1b = rabbit_ct_helpers:set_config(Config1, [{net_ticktime, 10}]), - Ret = rabbit_ct_helpers:run_steps(Config1b, + Config1c = case Group of + unclustered_size_3_4 -> + rabbit_ct_helpers:merge_app_env( + Config1b, {rabbit, [{stream_tick_interval, 5000}]}); + _ -> + Config1b + end, + Ret = rabbit_ct_helpers:run_steps(Config1c, [fun merge_app_env/1 ] ++ rabbit_ct_broker_helpers:setup_steps()), case Ret of {skip, _} -> Ret; Config2 -> - EnableFF = rabbit_ct_broker_helpers:enable_feature_flag( - Config2, stream_queue), - case EnableFF of - ok -> - ok = rabbit_ct_broker_helpers:rpc( - Config2, 0, application, set_env, - [rabbit, channel_tick_interval, 100]), - Config2; - {skip, _} = Skip -> - end_per_group(Group, Config2), - Skip; - Other -> - end_per_group(Group, Config2), - {skip, Other} - end + ok = rabbit_ct_broker_helpers:rpc( + Config2, 0, application, set_env, + [rabbit, channel_tick_interval, 100]), + Config2 end. end_per_group(_, Config) -> @@ -281,7 +281,6 @@ init_per_testcase(TestCase, Config) _ -> init_test_case(TestCase, Config) end; - init_per_testcase(TestCase, Config) -> init_test_case(TestCase, Config). @@ -420,7 +419,11 @@ declare_queue(Config) -> declare(Config, Server, Q, [{<<"x-queue-type">>, longstr, <<"stream">>}])), %% Test declare an existing queue - ?assertEqual({'queue.declare_ok', Q, 0, 0}, + %% there is a very brief race condition in the osiris counter updates that could + %% cause the message count to be reported as 1 temporarily after a new stream + %% creation. Hence to avoid flaking we don't match on the messages counter + %% here + ?assertMatch({'queue.declare_ok', Q, _, 0}, declare(Config, Server, Q, [{<<"x-queue-type">>, longstr, <<"stream">>}])), ?assertMatch([_], find_queue_info(Config, [])), @@ -433,22 +436,28 @@ find_queue_info(Config, Keys) -> find_queue_info(Config, 0, Keys). find_queue_info(Config, Node, Keys) -> - Name = ?config(queue_name, Config), + find_queue_info(?config(queue_name, Config), Config, Node, Keys). + +find_queue_info(Name, Config, Node, Keys) -> QName = rabbit_misc:r(<<"/">>, queue, Name), - Infos = rabbit_ct_broker_helpers:rpc(Config, Node, rabbit_amqqueue, info_all, - [<<"/">>, [name] ++ Keys]), - [Info] = [Props || Props <- Infos, lists:member({name, QName}, Props)], - Info. + rabbit_ct_broker_helpers:rpc(Config, Node, ?MODULE, find_queue_info_rpc, + [QName, [name | Keys]]). + +find_queue_info_rpc(QName, Infos) -> + case rabbit_amqqueue:lookup(QName) of + {ok, Q} -> + rabbit_amqqueue:info(Q, Infos); + _ -> + [] + end. delete_queue(Config) -> [Server | _] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename), - Ch = rabbit_ct_client_helpers:open_channel(Config, Server), Q = ?config(queue_name, Config), ?assertEqual({'queue.declare_ok', Q, 0, 0}, declare(Config, Server, Q, [{<<"x-queue-type">>, longstr, <<"stream">>}])), - ?assertMatch(#'queue.delete_ok'{}, - amqp_channel:call(Ch, #'queue.delete'{queue = Q})). + ?assertMatch(#'queue.delete_ok'{}, delete(Config, Server, Q)). add_replicas(Config) -> [Server0, Server1, Server2] = @@ -466,17 +475,15 @@ add_replicas(Config) -> #'confirm.select_ok'{} = amqp_channel:call(Ch, #'confirm.select'{}), amqp_channel:register_confirm_handler(Ch, self()), [publish(Ch, Q, Data) || _ <- lists:seq(1, NumMsgs)], - %% should be sufficient for the next message to fall in the next - %% chunk - timer:sleep(100), + %% wait for confirms here to ensure the next message ends up in a chunk + %% of it's own + amqp_channel:wait_for_confirms(Ch, 30), publish(Ch, Q, <<"last">>), amqp_channel:wait_for_confirms(Ch, 30), - timer:sleep(1000), ?assertEqual(ok, rpc:call(Server0, rabbit_stream_queue, add_replica, [<<"/">>, Q, Server1])), - timer:sleep(1000), check_leader_and_replicas(Config, [Server0, Server1]), %% it is almost impossible to reliably catch this situation. @@ -485,7 +492,6 @@ add_replicas(Config) -> ?assertMatch(ok , rpc:call(Server0, rabbit_stream_queue, add_replica, [<<"/">>, Q, Server2])), - timer:sleep(1000), check_leader_and_replicas(Config, [Server0, Server1, Server2]), %% validate we can read the last entry @@ -508,14 +514,11 @@ add_replicas(Config) -> after 60000 -> flush(), ?assertMatch(#'queue.delete_ok'{}, - amqp_channel:call(Ch, #'queue.delete'{queue = Q})), + delete(Config, Server0, Q)), exit(deliver_timeout) end, - % ?assertMatch({error, {disallowed, out_of_sync_replica}} , - % rpc:call(Server0, rabbit_stream_queue, add_replica, - % [<<"/">>, Q, Server2])), ?assertMatch(#'queue.delete_ok'{}, - amqp_channel:call(Ch, #'queue.delete'{queue = Q})), + delete(Config, Server0, Q)), rabbit_ct_broker_helpers:rpc(Config, 0, ?MODULE, delete_testcase_queue, [Q]). add_replica(Config) -> @@ -583,7 +586,7 @@ delete_replica(Config) -> declare(Config, Server0, Q, [{<<"x-queue-type">>, longstr, <<"stream">>}])), check_leader_and_replicas(Config, [Server0, Server1, Server2]), %% Not a member of the cluster, what would happen? - ?assertEqual({error, node_not_running}, + ?assertEqual(ok, rpc:call(Server0, rabbit_stream_queue, delete_replica, [<<"/">>, Q, 'zen@rabbit'])), ?assertEqual(ok, @@ -625,6 +628,50 @@ delete_last_replica(Config) -> check_leader_and_replicas(Config, [Server0]), rabbit_ct_broker_helpers:rpc(Config, 0, ?MODULE, delete_testcase_queue, [Q]). +grow_then_shrink_coordinator_cluster(Config) -> + [Server0, Server1, Server2] = + rabbit_ct_broker_helpers:get_node_configs(Config, nodename), + Q = ?config(queue_name, Config), + + ?assertEqual({'queue.declare_ok', Q, 0, 0}, + declare(Config, Server0, Q, [{<<"x-queue-type">>, longstr, <<"stream">>}])), + + ok = rabbit_control_helper:command(stop_app, Server1), + ok = rabbit_control_helper:command(join_cluster, Server1, [atom_to_list(Server0)], []), + ok = rabbit_control_helper:command(start_app, Server1), + ok = rabbit_control_helper:command(stop_app, Server2), + ok = rabbit_control_helper:command(join_cluster, Server2, [atom_to_list(Server0)], []), + ok = rabbit_control_helper:command(start_app, Server2), + + rabbit_ct_helpers:await_condition( + fun() -> + case rpc:call(Server0, ra, members, + [{rabbit_stream_coordinator, Server0}]) of + {_, Members, _} -> + Nodes = lists:sort([N || {_, N} <- Members]), + lists:sort([Server0, Server1, Server2]) == Nodes; + _ -> + false + end + end, 60000), + + ok = rabbit_control_helper:command(stop_app, Server1), + ok = rabbit_control_helper:command(forget_cluster_node, Server0, [atom_to_list(Server1)], []), + ok = rabbit_control_helper:command(stop_app, Server2), + ok = rabbit_control_helper:command(forget_cluster_node, Server0, [atom_to_list(Server2)], []), + rabbit_ct_helpers:await_condition( + fun() -> + case rpc:call(Server0, ra, members, + [{rabbit_stream_coordinator, Server0}]) of + {_, Members, _} -> + Nodes = lists:sort([N || {_, N} <- Members]), + lists:sort([Server0]) == Nodes; + _ -> + false + end + end, 60000), + ok. + grow_coordinator_cluster(Config) -> [Server0, Server1, _Server2] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename), @@ -725,22 +772,31 @@ delete_down_replica(Config) -> declare(Config, Server0, Q, [{<<"x-queue-type">>, longstr, <<"stream">>}])), check_leader_and_replicas(Config, [Server0, Server1, Server2]), ok = rabbit_ct_broker_helpers:stop_node(Config, Server1), - ?assertEqual({error, node_not_running}, + ?assertEqual(ok, rpc:call(Server0, rabbit_stream_queue, delete_replica, [<<"/">>, Q, Server1])), - %% check it isn't gone - check_leader_and_replicas(Config, [Server0, Server1, Server2], members), + %% check it's gone + check_leader_and_replicas(Config, [Server0, Server2], members), ok = rabbit_ct_broker_helpers:start_node(Config, Server1), - rabbit_ct_helpers:await_condition( - fun() -> - ok == rpc:call(Server0, rabbit_stream_queue, delete_replica, - [<<"/">>, Q, Server1]) - end), + check_leader_and_replicas(Config, [Server0, Server2], members), + %% check the folder was deleted + QName = rabbit_misc:r(<<"/">>, queue, Q), + StreamId = rabbit_ct_broker_helpers:rpc(Config, 0, ?MODULE, get_stream_id, [QName]), + Server1DataDir = rabbit_ct_broker_helpers:get_node_config(Config, 1, data_dir), + DeletedReplicaDir = filename:join([Server1DataDir, "stream", StreamId]), + + ?awaitMatch(false, filelib:is_dir(DeletedReplicaDir), ?WAIT), rabbit_ct_broker_helpers:rpc(Config, 0, ?MODULE, delete_testcase_queue, [Q]). publish_coordinator_unavailable(Config) -> - [Server0, Server1, Server2] = - rabbit_ct_broker_helpers:get_node_configs(Config, nodename), + %% This testcase leaves Khepri on minority when it is enabled. + %% If the Khepri leader is in one of the nodes being stopped, the + %% remaining node won't be able to reply to any channel query. + %% Let's remove it from the Khepri test suite, as it does not make + %% much sense to test something that will randomly work + %% depending on where the leader is placed - even though we could + %% always select as running node the Khepri leader + [Server0, Server1, Server2] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename), Ch = rabbit_ct_client_helpers:open_channel(Config, Server0), Q = ?config(queue_name, Config), ?assertEqual({'queue.declare_ok', Q, 0, 0}, @@ -750,21 +806,24 @@ publish_coordinator_unavailable(Config) -> ok = rabbit_ct_broker_helpers:stop_node(Config, Server2), rabbit_ct_helpers:await_condition( fun () -> - N = rabbit_ct_broker_helpers:rpc(Config, 0, rabbit_nodes, list_running, []), + N = rabbit_ct_broker_helpers:rpc(Config, Server0, rabbit_nodes, list_running, []), length(N) == 1 end), #'confirm.select_ok'{} = amqp_channel:call(Ch, #'confirm.select'{}), amqp_channel:register_confirm_handler(Ch, self()), publish(Ch, Q), - ?assertExit({{shutdown, {connection_closing, {server_initiated_close, 506, _}}}, _}, - amqp_channel:wait_for_confirms(Ch, 60)), - ok = rabbit_ct_broker_helpers:start_node(Config, Server1), - ok = rabbit_ct_broker_helpers:start_node(Config, Server2), + ok = rabbit_ct_broker_helpers:async_start_node(Config, Server1), + ok = rabbit_ct_broker_helpers:async_start_node(Config, Server2), + ok = rabbit_ct_broker_helpers:wait_for_async_start_node(Server1), + ok = rabbit_ct_broker_helpers:wait_for_async_start_node(Server2), rabbit_ct_helpers:await_condition( fun () -> - Info = find_queue_info(Config, 0, [online]), + Info = find_queue_info(Config, Server0, [online]), length(proplists:get_value(online, Info)) == 3 end), + % ?assertExit({{shutdown, {connection_closing, {server_initiated_close, 506, _}}}, _}, + %%% confirms should be issued when available + amqp_channel:wait_for_confirms(Ch, 60), Ch1 = rabbit_ct_client_helpers:open_channel(Config, Server0), publish(Ch1, Q), @@ -783,7 +842,7 @@ publish(Config) -> declare(Config, Server, Q, [{<<"x-queue-type">>, longstr, <<"stream">>}])), publish(Ch, Q), - quorum_queue_utils:wait_for_messages(Config, [[Q, <<"1">>, <<"1">>, <<"0">>]]), + queue_utils:wait_for_messages(Config, [[Q, <<"1">>, <<"1">>, <<"0">>]]), rabbit_ct_broker_helpers:rpc(Config, 0, ?MODULE, delete_testcase_queue, [Q]). publish_confirm(Config) -> @@ -798,7 +857,7 @@ publish_confirm(Config) -> amqp_channel:register_confirm_handler(Ch, self()), publish(Ch, Q), amqp_channel:wait_for_confirms(Ch, 5), - quorum_queue_utils:wait_for_messages(Config, [[Q, <<"1">>, <<"1">>, <<"0">>]]), + queue_utils:wait_for_messages(Config, [[Q, <<"1">>, <<"1">>, <<"0">>]]), rabbit_ct_broker_helpers:rpc(Config, 0, ?MODULE, delete_testcase_queue, [Q]). restart_single_node(Config) -> @@ -809,15 +868,15 @@ restart_single_node(Config) -> ?assertEqual({'queue.declare_ok', Q, 0, 0}, declare(Config, Server, Q, [{<<"x-queue-type">>, longstr, <<"stream">>}])), publish(Ch, Q), - quorum_queue_utils:wait_for_messages(Config, [[Q, <<"1">>, <<"1">>, <<"0">>]]), + queue_utils:wait_for_messages(Config, [[Q, <<"1">>, <<"1">>, <<"0">>]]), rabbit_control_helper:command(stop_app, Server), rabbit_control_helper:command(start_app, Server), - quorum_queue_utils:wait_for_messages(Config, [[Q, <<"1">>, <<"1">>, <<"0">>]]), + queue_utils:wait_for_messages(Config, [[Q, <<"1">>, <<"1">>, <<"0">>]]), Ch1 = rabbit_ct_client_helpers:open_channel(Config, Server), publish(Ch1, Q), - quorum_queue_utils:wait_for_messages(Config, [[Q, <<"2">>, <<"2">>, <<"0">>]]), + queue_utils:wait_for_messages(Config, [[Q, <<"2">>, <<"2">>, <<"0">>]]), rabbit_ct_broker_helpers:rpc(Config, 0, ?MODULE, delete_testcase_queue, [Q]). %% the failing case for this test relies on a particular random condition @@ -861,19 +920,22 @@ recover(Config) -> ?assertEqual({'queue.declare_ok', Q, 0, 0}, declare(Config, Server, Q, [{<<"x-queue-type">>, longstr, <<"stream">>}])), publish(Ch, Q), - quorum_queue_utils:wait_for_messages(Config, [[Q, <<"1">>, <<"1">>, <<"0">>]]), + queue_utils:wait_for_messages(Config, [[Q, <<"1">>, <<"1">>, <<"0">>]]), Perm0 = permute(Servers0), Servers = lists:nth(rand:uniform(length(Perm0)), Perm0), %% Such a slow test, let's select a single random permutation and trust that over enough %% ci rounds any failure will eventually show up + flush(), ct:pal("recover: running stop start for permutation ~w", [Servers]), [rabbit_ct_broker_helpers:stop_node(Config, S) || S <- Servers], - [rabbit_ct_broker_helpers:start_node(Config, S) || S <- lists:reverse(Servers)], - ct:pal("recover: running stop waiting for messages ~w", [Servers]), + [rabbit_ct_broker_helpers:async_start_node(Config, S) || S <- lists:reverse(Servers)], + [ok = rabbit_ct_broker_helpers:wait_for_async_start_node(S) || S <- lists:reverse(Servers)], + + ct:pal("recover: post stop / start, waiting for messages ~w", [Servers]), check_leader_and_replicas(Config, Servers0), - quorum_queue_utils:wait_for_messages(Config, [[Q, <<"1">>, <<"1">>, <<"0">>]], 60), + queue_utils:wait_for_messages(Config, [[Q, <<"1">>, <<"1">>, <<"0">>]], 60), %% Another single random permutation Perm1 = permute(Servers0), @@ -881,34 +943,17 @@ recover(Config) -> ct:pal("recover: running app stop start for permuation ~w", [Servers1]), [rabbit_control_helper:command(stop_app, S) || S <- Servers1], - [rabbit_control_helper:command(start_app, S) || S <- lists:reverse(Servers1)], + [rabbit_control_helper:async_command(start_app, S, [], []) + || S <- lists:reverse(Servers1)], + [rabbit_control_helper:wait_for_async_command(S) || S <- lists:reverse(Servers1)], + ct:pal("recover: running app stop waiting for messages ~w", [Servers1]), check_leader_and_replicas(Config, Servers0), - quorum_queue_utils:wait_for_messages(Config, [[Q, <<"1">>, <<"1">>, <<"0">>]], 60), + queue_utils:wait_for_messages(Config, [[Q, <<"1">>, <<"1">>, <<"0">>]], 60), Ch1 = rabbit_ct_client_helpers:open_channel(Config, Server), publish(Ch1, Q), - quorum_queue_utils:wait_for_messages(Config, [[Q, <<"2">>, <<"2">>, <<"0">>]]), - rabbit_ct_broker_helpers:rpc(Config, 0, ?MODULE, delete_testcase_queue, [Q]). - -restart_coordinator_without_queues(Config) -> - %% The coordinator failed to restart if stream queues were not present anymore, as - %% they wouldn't call recover in all nodes - only the local one was restarted so - %% the election wouldn't succeed. Fixed now, but this test checks for that failure - [Server | _] = Servers0 = rabbit_ct_broker_helpers:get_node_configs(Config, nodename), - - Ch = rabbit_ct_client_helpers:open_channel(Config, Server), - Q = ?config(queue_name, Config), - ?assertEqual({'queue.declare_ok', Q, 0, 0}, - declare(Config, Server, Q, [{<<"x-queue-type">>, longstr, <<"stream">>}])), - publish_confirm(Ch, Q, [<<"msg">>]), - ?assertMatch(#'queue.delete_ok'{}, amqp_channel:call(Ch, #'queue.delete'{queue = Q})), - - [rabbit_ct_broker_helpers:stop_node(Config, S) || S <- Servers0], - [rabbit_ct_broker_helpers:start_node(Config, S) || S <- lists:reverse(Servers0)], - - ?assertEqual({'queue.declare_ok', Q, 0, 0}, - declare(Config, Server, Q, [{<<"x-queue-type">>, longstr, <<"stream">>}])), + queue_utils:wait_for_messages(Config, [[Q, <<"2">>, <<"2">>, <<"0">>]]), rabbit_ct_broker_helpers:rpc(Config, 0, ?MODULE, delete_testcase_queue, [Q]). consume_without_qos(Config) -> @@ -1109,37 +1154,6 @@ consume_with_autoack(Config) -> subscribe(Ch1, Q, true, 0)), rabbit_ct_broker_helpers:rpc(Config, 0, ?MODULE, delete_testcase_queue, [Q]). -consume_and_nack(Config) -> - [Server | _] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename), - - Ch = rabbit_ct_client_helpers:open_channel(Config, Server), - Q = ?config(queue_name, Config), - ?assertEqual({'queue.declare_ok', Q, 0, 0}, - declare(Config, Server, Q, [{<<"x-queue-type">>, longstr, <<"stream">>}])), - - publish_confirm(Ch, Q, [<<"msg">>]), - - Ch1 = rabbit_ct_client_helpers:open_channel(Config, Server), - qos(Ch1, 10, false), - subscribe(Ch1, Q, false, 0), - receive - {#'basic.deliver'{delivery_tag = DeliveryTag}, _} -> - ok = amqp_channel:cast(Ch1, #'basic.nack'{delivery_tag = DeliveryTag, - multiple = false, - requeue = true}), - %% Nack will throw a not implemented exception. As it is a cast operation, - %% we'll detect the conneciton/channel closure on the next call. - %% Let's try to redeclare and see what happens - ?assertExit({{shutdown, {connection_closing, {server_initiated_close, 540, _}}}, _}, - amqp_channel:call(Ch1, #'queue.declare'{queue = Q, - durable = true, - auto_delete = false, - arguments = [{<<"x-queue-type">>, longstr, <<"stream">>}]})) - after 10000 -> - exit(timeout) - end, - rabbit_ct_broker_helpers:rpc(Config, 0, ?MODULE, delete_testcase_queue, [Q]). - basic_cancel(Config) -> [Server | _] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename), @@ -1225,7 +1239,7 @@ recover_after_leader_and_coordinator_kill(Config) -> ct:pal("sys state ~p", [CState]), - + rabbit_ct_broker_helpers:rpc(Config, 0, ?MODULE, delete_testcase_queue, [Q]), ok. keep_consuming_on_leader_restart(Config) -> @@ -1294,6 +1308,12 @@ get_leader_info(QName) -> {error, not_found} end. +get_stream_id(QName) -> + {ok, Q} = rabbit_amqqueue:lookup(QName), + QState = amqqueue:get_type_state(Q), + #{name := StreamId} = QState, + StreamId. + kill_process(Config, Node, Pid) -> rabbit_ct_broker_helpers:rpc(Config, Node, ?MODULE, do_kill_process, [Pid]). @@ -1312,6 +1332,13 @@ recover_coordinator(Config, Node) -> get_stream_coordinator_leader(Config) -> Node = hd(rabbit_ct_broker_helpers:get_node_configs(Config, nodename)), + rabbit_ct_helpers:await_condition( + fun() -> + Ret = rabbit_ct_broker_helpers:rpc( + Config, Node, ra_leaderboard, lookup_leader, + [rabbit_stream_coordinator]), + is_tuple(Ret) + end), rabbit_ct_broker_helpers:rpc(Config, Node, ra_leaderboard, lookup_leader, [rabbit_stream_coordinator]). @@ -1338,42 +1365,13 @@ filter_consumers(Config, Server, CTag) -> end, [], CInfo). consume_and_reject(Config) -> - [Server | _] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename), - - Ch = rabbit_ct_client_helpers:open_channel(Config, Server), - Q = ?config(queue_name, Config), - ?assertEqual({'queue.declare_ok', Q, 0, 0}, - declare(Config, Server, Q, [{<<"x-queue-type">>, longstr, <<"stream">>}])), - - publish_confirm(Ch, Q, [<<"msg">>]), - - Ch1 = rabbit_ct_client_helpers:open_channel(Config, Server), - qos(Ch1, 10, false), - subscribe(Ch1, Q, false, 0), - receive - {#'basic.deliver'{delivery_tag = DeliveryTag}, _} -> - MRef = erlang:monitor(process, Ch1), - ok = amqp_channel:cast(Ch1, #'basic.reject'{delivery_tag = DeliveryTag, - requeue = true}), - %% Reject will throw a not implemented exception. As it is a cast - %% operation, we detect the connection error from the channel - %% process exit reason. - receive - {'DOWN', MRef, _, _, Reason} -> - ?assertMatch( - {shutdown, - {connection_closing, - {server_initiated_close, 540, _}}}, - Reason) - after 10000 -> - exit(timeout) - end - after 10000 -> - exit(timeout) - end, - rabbit_ct_broker_helpers:rpc(Config, 0, ?MODULE, delete_testcase_queue, [Q]). - + consume_and_(Config, fun (DT) -> #'basic.reject'{delivery_tag = DT} end). +consume_and_nack(Config) -> + consume_and_(Config, fun (DT) -> #'basic.nack'{delivery_tag = DT} end). consume_and_ack(Config) -> + consume_and_(Config, fun (DT) -> #'basic.ack'{delivery_tag = DT} end). + +consume_and_(Config, AckFun) -> [Server | _] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename), Ch = rabbit_ct_client_helpers:open_channel(Config, Server), @@ -1388,15 +1386,14 @@ consume_and_ack(Config) -> subscribe(Ch1, Q, false, 0), receive {#'basic.deliver'{delivery_tag = DeliveryTag}, _} -> - ok = amqp_channel:cast(Ch1, #'basic.ack'{delivery_tag = DeliveryTag, - multiple = false}), + ok = amqp_channel:cast(Ch1, AckFun(DeliveryTag)), %% It will succeed as ack is now a credit operation. We should be %% able to redeclare a queue (gen_server call op) as the channel %% should still be open and declare is an idempotent operation %% ?assertMatch({'queue.declare_ok', Q, _MsgCount, 0}, declare(Config, Server, Q, [{<<"x-queue-type">>, longstr, <<"stream">>}])), - quorum_queue_utils:wait_for_messages(Config, [[Q, <<"1">>, <<"1">>, <<"0">>]]) + queue_utils:wait_for_messages(Config, [[Q, <<"1">>, <<"1">>, <<"0">>]]) after 5000 -> exit(timeout) end, @@ -1423,34 +1420,81 @@ tracking_status(Config) -> rabbit_ct_broker_helpers:rpc(Config, Server, ?MODULE, delete_testcase_queue, [Q]). restart_stream(Config) -> - case rabbit_ct_broker_helpers:enable_feature_flag(Config, restart_stream) of - ok -> - [Server | _] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename), + [Server | _] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename), - Ch = rabbit_ct_client_helpers:open_channel(Config, Server), - Q = ?config(queue_name, Config), - ?assertEqual({'queue.declare_ok', Q, 0, 0}, - declare(Config, Server, Q, [{<<"x-queue-type">>, longstr, <<"stream">>}])), + Ch = rabbit_ct_client_helpers:open_channel(Config, Server), + Q = ?config(queue_name, Config), + ?assertEqual({'queue.declare_ok', Q, 0, 0}, + declare(Config, Server, Q, [{<<"x-queue-type">>, longstr, <<"stream">>}])), + + publish_confirm(Ch, Q, [<<"msg">>]), + Vhost = ?config(rmq_vhost, Config), + QName = #resource{virtual_host = Vhost, + kind = queue, + name = Q}, + %% restart the stream + ?assertMatch({ok, _}, + rabbit_ct_broker_helpers:rpc(Config, Server, + rabbit_stream_coordinator, + ?FUNCTION_NAME, [QName])), + + publish_confirm(Ch, Q, [<<"msg2">>]), + rabbit_ct_broker_helpers:rpc(Config, Server, ?MODULE, delete_testcase_queue, [Q]), + ok. + +format(Config) -> + %% tests rabbit_stream_queue:format/2 + Nodes = rabbit_ct_broker_helpers:get_node_configs(Config, nodename), + + Server = hd(Nodes), + + Ch = rabbit_ct_client_helpers:open_channel(Config, Server), + Q = ?config(queue_name, Config), + ?assertEqual({'queue.declare_ok', Q, 0, 0}, + declare(Config, Server, Q, [{<<"x-queue-type">>, longstr, <<"stream">>}])), - publish_confirm(Ch, Q, [<<"msg">>]), - Vhost = ?config(rmq_vhost, Config), - QName = #resource{virtual_host = Vhost, - kind = queue, - name = Q}, - %% restart the stream - ?assertMatch({ok, _}, - rabbit_ct_broker_helpers:rpc(Config, Server, - rabbit_stream_coordinator, - ?FUNCTION_NAME, [QName])), - - publish_confirm(Ch, Q, [<<"msg2">>]), - rabbit_ct_broker_helpers:rpc(Config, Server, ?MODULE, delete_testcase_queue, [Q]), + publish_confirm(Ch, Q, [<<"msg">>]), + Vhost = ?config(rmq_vhost, Config), + QName = #resource{virtual_host = Vhost, + kind = queue, + name = Q}, + {ok, QRecord} = rabbit_ct_broker_helpers:rpc(Config, Server, + rabbit_amqqueue, + lookup, [QName]), + %% restart the stream + Fmt = rabbit_ct_broker_helpers:rpc(Config, Server, rabbit_stream_queue, + ?FUNCTION_NAME, [QRecord, #{}]), + + %% test all up case + ?assertEqual(stream, proplists:get_value(type, Fmt)), + ?assertEqual(running, proplists:get_value(state, Fmt)), + ?assertEqual(Server, proplists:get_value(leader, Fmt)), + ?assertEqual(Server, proplists:get_value(node, Fmt)), + ?assertEqual(Nodes, proplists:get_value(online, Fmt)), + ?assertEqual(Nodes, proplists:get_value(members, Fmt)), + + case length(Nodes) of + 3 -> + [_, Server2, Server3] = Nodes, + ok = rabbit_control_helper:command(stop_app, Server2), + ok = rabbit_control_helper:command(stop_app, Server3), + + Fmt2 = rabbit_ct_broker_helpers:rpc(Config, Server, rabbit_stream_queue, + ?FUNCTION_NAME, [QRecord, #{}]), + ok = rabbit_control_helper:command(start_app, Server2), + ok = rabbit_control_helper:command(start_app, Server3), + ?assertEqual(stream, proplists:get_value(type, Fmt2)), + ?assertEqual(minority, proplists:get_value(state, Fmt2)), + ?assertEqual(Server, proplists:get_value(leader, Fmt2)), + ?assertEqual(Server, proplists:get_value(node, Fmt2)), + ?assertEqual([Server], proplists:get_value(online, Fmt2)), + ?assertEqual(Nodes, proplists:get_value(members, Fmt2)), ok; - _ -> - ct:pal("skipping test ~s as feature flag `restart_stream` not supported", - [?FUNCTION_NAME]), + 1 -> ok - end. + end, + rabbit_ct_broker_helpers:rpc(Config, 0, ?MODULE, delete_testcase_queue, [Q]), + ok. consume_from_last(Config) -> @@ -1606,11 +1650,6 @@ consume_from_replica(Config) -> rabbit_ct_broker_helpers:rpc(Config, 0, ?MODULE, delete_testcase_queue, [Q]). consume_credit(Config) -> - %% Because osiris provides one chunk on every read and we don't want to buffer - %% messages in the broker to avoid memory penalties, the credit value won't - %% be strict - we allow it into the negative values. - %% We can test that after receiving a chunk, no more messages are delivered until - %% the credit goes back to a positive value. [Server | _] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename), Ch = rabbit_ct_client_helpers:open_channel(Config, Server), @@ -1630,39 +1669,54 @@ consume_credit(Config) -> qos(Ch1, Credit, false), subscribe(Ch1, Q, false, 0), - %% Receive everything - DeliveryTags = receive_batch(), + %% We expect to receive exactly 2 messages. + DTag1 = receive {#'basic.deliver'{delivery_tag = Tag1}, _} -> Tag1 + after 5000 -> ct:fail({missing_delivery, ?LINE}) + end, + _DTag2 = receive {#'basic.deliver'{delivery_tag = Tag2}, _} -> Tag2 + after 5000 -> ct:fail({missing_delivery, ?LINE}) + end, + receive {#'basic.deliver'{}, _} -> ct:fail({unexpected_delivery, ?LINE}) + after 100 -> ok + end, - %% We receive at least the given credit as we know there are 100 messages in the queue - ?assert(length(DeliveryTags) >= Credit), + %% When we ack the 1st message, we should receive exactly 1 more message + ok = amqp_channel:cast(Ch1, #'basic.ack'{delivery_tag = DTag1, + multiple = false}), + DTag3 = receive {#'basic.deliver'{delivery_tag = Tag3}, _} -> Tag3 + after 5000 -> ct:fail({missing_delivery, ?LINE}) + end, + receive {#'basic.deliver'{}, _} -> + ct:fail({unexpected_delivery, ?LINE}) + after 100 -> ok + end, - %% Let's ack as many messages as we can while avoiding a positive credit for new deliveries - {ToAck, Pending} = lists:split(length(DeliveryTags) - Credit, DeliveryTags), + %% Whenever we ack 2 messages, we should receive exactly 2 more messages. + ok = consume_credit0(Ch1, DTag3), - [ok = amqp_channel:cast(Ch1, #'basic.ack'{delivery_tag = DeliveryTag, - multiple = false}) - || DeliveryTag <- ToAck], + rabbit_ct_broker_helpers:rpc(Config, 0, ?MODULE, delete_testcase_queue, [Q]). - %% Nothing here, this is good - receive - {#'basic.deliver'{}, _} -> - exit(unexpected_delivery) - after 1000 -> - ok +consume_credit0(_Ch, DTag) + when DTag > 50 -> + %% sufficiently tested + ok; +consume_credit0(Ch, DTagPrev) -> + %% Ack 2 messages. + ok = amqp_channel:cast(Ch, #'basic.ack'{delivery_tag = DTagPrev, + multiple = true}), + %% Receive 1st message. + receive {#'basic.deliver'{}, _} -> ok + after 5000 -> ct:fail({missing_delivery, ?LINE}) end, - - %% Let's ack one more, we should receive a new chunk - ok = amqp_channel:cast(Ch1, #'basic.ack'{delivery_tag = hd(Pending), - multiple = false}), - - %% Yeah, here is the new chunk! - receive - {#'basic.deliver'{}, _} -> - ok - after 5000 -> - exit(timeout) + %% Receive 2nd message. + DTag = receive {#'basic.deliver'{delivery_tag = T}, _} -> T + after 5000 -> ct:fail({missing_delivery, ?LINE}) + end, + %% We shouldn't receive more messages given that AMQP 0.9.1 prefetch count is 2. + receive {#'basic.deliver'{}, _} -> ct:fail({unexpected_delivery, ?LINE}) + after 10 -> ok end, - rabbit_ct_broker_helpers:rpc(Config, 0, ?MODULE, delete_testcase_queue, [Q]). + consume_credit0(Ch, DTag). consume_credit_out_of_order_ack(Config) -> %% Like consume_credit but acknowledging the messages out of order. @@ -1820,22 +1874,24 @@ max_age(Config) -> Q = ?config(queue_name, Config), ?assertEqual({'queue.declare_ok', Q, 0, 0}, declare(Config, Server, Q, [{<<"x-queue-type">>, longstr, <<"stream">>}, - {<<"x-max-age">>, longstr, <<"10s">>}, + {<<"x-max-age">>, longstr, <<"5s">>}, {<<"x-stream-max-segment-size-bytes">>, long, 250}])), Payload = << <<"1">> || _ <- lists:seq(1, 500) >>, publish_confirm(Ch, Q, [Payload || _ <- lists:seq(1, 100)]), - timer:sleep(10000), + %% there is no way around this sleep, we need to wait for retention period + %% to pass + timer:sleep(5000), %% Let's publish again so the new segments will trigger the retention policy [publish(Ch, Q, Payload) || _ <- lists:seq(1, 100)], amqp_channel:wait_for_confirms(Ch, 5), %% Let's give it some margin if some messages fall between segments - quorum_queue_utils:wait_for_min_messages(Config, Q, 100), - quorum_queue_utils:wait_for_max_messages(Config, Q, 150), + queue_utils:wait_for_min_messages(Config, Q, 100), + queue_utils:wait_for_max_messages(Config, Q, 150), rabbit_ct_broker_helpers:rpc(Config, 0, ?MODULE, delete_testcase_queue, [Q]). @@ -1854,7 +1910,6 @@ replica_recovery(Config) -> fun(DownNode) -> rabbit_ct_helpers:await_condition( fun () -> - timer:sleep(1000), ct:pal("Wait for replica to recover..."), try {Conn, Ch2} = rabbit_ct_client_helpers:open_connection_and_channel(Config, DownNode), @@ -1866,20 +1921,23 @@ replica_recovery(Config) -> catch _:_ -> false end - end, 30000) + end, 120_000) end, + Perms = permute(Nodes), + AppPerm = lists:nth(rand:uniform(length(Perms)), Perms), [begin rabbit_control_helper:command(stop_app, DownNode), rabbit_control_helper:command(start_app, DownNode), CheckReplicaRecovered(DownNode) - end || [DownNode | _] <- permute(Nodes)], + end || [DownNode | _] <- AppPerm], + NodePerm = lists:nth(rand:uniform(length(Perms)), Perms), [begin ok = rabbit_ct_broker_helpers:stop_node(Config, DownNode), ok = rabbit_ct_broker_helpers:start_node(Config, DownNode), CheckReplicaRecovered(DownNode) - end || [DownNode | _] <- permute(Nodes)], + end || DownNode <- NodePerm], rabbit_ct_broker_helpers:rpc(Config, 0, ?MODULE, delete_testcase_queue, [Q]). leader_failover(Config) -> @@ -1953,11 +2011,9 @@ leader_failover_dedupe(Config) -> ok = rabbit_ct_broker_helpers:stop_node(Config, DownNode), %% this should cause a new leader to be elected and the channel on node 2 %% to have to resend any pending messages to ensure none is lost - ct:pal("preinfo", []), rabbit_ct_helpers:await_condition( fun() -> Info = find_queue_info(Config, PubNode, [leader, members]), - ct:pal("info ~tp", [Info]), NewLeader = proplists:get_value(leader, Info), NewLeader =/= DownNode end), @@ -1982,7 +2038,6 @@ leader_failover_dedupe(Config) -> initial_cluster_size_one(Config) -> [Server1 | _] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename), - Ch = rabbit_ct_client_helpers:open_channel(Config, Server1), Q = ?config(queue_name, Config), ?assertEqual({'queue.declare_ok', Q, 0, 0}, @@ -1991,13 +2046,12 @@ initial_cluster_size_one(Config) -> check_leader_and_replicas(Config, [Server1]), ?assertMatch(#'queue.delete_ok'{}, - amqp_channel:call(Ch, #'queue.delete'{queue = Q})), + delete(Config, Server1, Q)), rabbit_ct_broker_helpers:rpc(Config, 0, ?MODULE, delete_testcase_queue, [Q]). initial_cluster_size_two(Config) -> [Server1 | _] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename), - Ch = rabbit_ct_client_helpers:open_channel(Config, Server1), Q = ?config(queue_name, Config), ?assertEqual({'queue.declare_ok', Q, 0, 0}, @@ -2010,7 +2064,7 @@ initial_cluster_size_two(Config) -> ?assertEqual(2, length(proplists:get_value(members, Info))), ?assertMatch(#'queue.delete_ok'{}, - amqp_channel:call(Ch, #'queue.delete'{queue = Q})), + delete(Config, Server1, Q)), rabbit_ct_broker_helpers:rpc(Config, 0, ?MODULE, delete_testcase_queue, [Q]). initial_cluster_size_one_policy(Config) -> @@ -2022,7 +2076,6 @@ initial_cluster_size_one_policy(Config) -> <<"queues">>, [{<<"initial-cluster-size">>, 1}]), - Ch = rabbit_ct_client_helpers:open_channel(Config, Server1), Q = ?config(queue_name, Config), ?assertEqual({'queue.declare_ok', Q, 0, 0}, @@ -2031,7 +2084,7 @@ initial_cluster_size_one_policy(Config) -> check_leader_and_replicas(Config, [Server1]), ?assertMatch(#'queue.delete_ok'{}, - amqp_channel:call(Ch, #'queue.delete'{queue = Q})), + delete(Config, Server1, Q)), ok = rabbit_ct_broker_helpers:clear_policy(Config, 0, PolicyName), rabbit_ct_broker_helpers:rpc(Config, 0, ?MODULE, delete_testcase_queue, [Q]). @@ -2039,14 +2092,11 @@ initial_cluster_size_one_policy(Config) -> declare_delete_same_stream(Config) -> Servers = rabbit_ct_broker_helpers:get_node_configs(Config, nodename), Q = ?config(queue_name, Config), - [begin - Ch = rabbit_ct_client_helpers:open_channel(Config, S), ?assertEqual({'queue.declare_ok', Q, 0, 0}, declare(Config, S, Q, [{<<"x-queue-type">>, longstr, <<"stream">>}])), ?assertMatch(#'queue.delete_ok'{}, - amqp_channel:call(Ch, #'queue.delete'{queue = Q})), - rabbit_ct_client_helpers:close_channel(Ch) + delete(Config, S, Q)) end || _ <- lists:seq(1, 20), S <- Servers], ok. @@ -2054,45 +2104,51 @@ declare_delete_same_stream(Config) -> leader_locator_client_local(Config) -> [Server1, Server2, Server3] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename), - Ch = rabbit_ct_client_helpers:open_channel(Config, Server1), Q = ?config(queue_name, Config), ?assertEqual({'queue.declare_ok', Q, 0, 0}, declare(Config, Server1, Q, [{<<"x-queue-type">>, longstr, <<"stream">>}, {<<"x-queue-leader-locator">>, longstr, <<"client-local">>}])), - Info = find_queue_info(Config, [leader]), - - ?assertEqual(Server1, proplists:get_value(leader, Info)), + await_condition( + fun () -> + Server1 == proplists:get_value(leader, find_queue_info(Config, [leader])) + end, 60), ?assertMatch(#'queue.delete_ok'{}, - amqp_channel:call(Ch, #'queue.delete'{queue = Q})), + delete(Config, Server1, Q)), + Q2 = <>, %% Try second node - Ch2 = rabbit_ct_client_helpers:open_channel(Config, Server2), - ?assertEqual({'queue.declare_ok', Q, 0, 0}, - declare(Config, Server2, Q, [{<<"x-queue-type">>, longstr, <<"stream">>}, + ?assertEqual({'queue.declare_ok', Q2, 0, 0}, + declare(Config, Server2, Q2, [{<<"x-queue-type">>, longstr, <<"stream">>}, {<<"x-queue-leader-locator">>, longstr, <<"client-local">>}])), - Info2 = find_queue_info(Config, [leader]), - ?assertEqual(Server2, proplists:get_value(leader, Info2)), + %% the amqqueue:pid field is updated async for khepri + %% so we need to await the condition here + await_condition( + fun () -> + Server2 == proplists:get_value(leader, + find_queue_info(Q2, Config, 0, [leader])) + end, 60), + + ?assertMatch(#'queue.delete_ok'{}, delete(Config, Server2, Q2)), - ?assertMatch(#'queue.delete_ok'{}, - amqp_channel:call(Ch2, #'queue.delete'{queue = Q})), + Q3 = <>, %% Try third node - Ch3 = rabbit_ct_client_helpers:open_channel(Config, Server3), - ?assertEqual({'queue.declare_ok', Q, 0, 0}, - declare(Config, Server3, Q, [{<<"x-queue-type">>, longstr, <<"stream">>}, + ?assertEqual({'queue.declare_ok', Q3, 0, 0}, + declare(Config, Server3, Q3, [{<<"x-queue-type">>, longstr, <<"stream">>}, {<<"x-queue-leader-locator">>, longstr, <<"client-local">>}])), + await_condition( + fun () -> + Server3 == proplists:get_value(leader, + find_queue_info(Q3, Config, 0, [leader])) + end, 60), - Info3 = find_queue_info(Config, [leader]), - ?assertEqual(Server3, proplists:get_value(leader, Info3)), - - ?assertMatch(#'queue.delete_ok'{}, - amqp_channel:call(Ch3, #'queue.delete'{queue = Q})), - rabbit_ct_broker_helpers:rpc(Config, 0, ?MODULE, delete_testcase_queue, [Q]). + ?assertMatch(#'queue.delete_ok'{}, delete(Config, Server3, Q3)), + ok. leader_locator_balanced(Config) -> [Server1, Server2, Server3] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename), @@ -2130,11 +2186,15 @@ leader_locator_balanced_maintenance(Config) -> declare(Config, Server1, Q, [{<<"x-queue-type">>, longstr, <<"stream">>}, {<<"x-queue-leader-locator">>, longstr, <<"balanced">>}])), - Info = find_queue_info(Config, [leader]), - Leader = proplists:get_value(leader, Info), - ?assert(lists:member(Leader, [Server1, Server2])), + await_condition( + fun() -> + Info = find_queue_info(Config, [leader]), + Leader = proplists:get_value(leader, Info), + lists:member(Leader, [Server1, Server2]) + end, 60000), true = rabbit_ct_broker_helpers:unmark_as_being_drained(Config, Server3), + rabbit_ct_broker_helpers:rpc(Config, 0, ?MODULE, delete_queues, [[Q1, Q]]), rabbit_ct_broker_helpers:rpc(Config, 0, ?MODULE, delete_testcase_queue, [Q]). select_nodes_with_least_replicas(Config) -> @@ -2235,9 +2295,6 @@ invalid_policy(Config) -> Q = ?config(queue_name, Config), ?assertEqual({'queue.declare_ok', Q, 0, 0}, declare(Config, Server, Q, [{<<"x-queue-type">>, longstr, <<"stream">>}])), - ok = rabbit_ct_broker_helpers:set_policy( - Config, 0, <<"ha">>, <<"invalid_policy.*">>, <<"queues">>, - [{<<"ha-mode">>, <<"all">>}]), ok = rabbit_ct_broker_helpers:set_policy( Config, 0, <<"ttl">>, <<"invalid_policy.*">>, <<"queues">>, [{<<"message-ttl">>, 5}]), @@ -2247,7 +2304,6 @@ invalid_policy(Config) -> ?assertEqual('', proplists:get_value(policy, Info)), ?assertEqual('', proplists:get_value(operator_policy, Info)), ?assertEqual([], proplists:get_value(effective_policy_definition, Info)), - ok = rabbit_ct_broker_helpers:clear_policy(Config, 0, <<"ha">>), ok = rabbit_ct_broker_helpers:clear_policy(Config, 0, <<"ttl">>), rabbit_ct_broker_helpers:rpc(Config, 0, ?MODULE, delete_testcase_queue, [Q]). @@ -2301,7 +2357,7 @@ update_retention_policy(Config) -> %% Retention policy should clear approximately 2/3 of the messages, but just to be safe %% let's simply check that it removed half of them - quorum_queue_utils:wait_for_max_messages(Config, Q, 5000), + queue_utils:wait_for_max_messages(Config, Q, 5000), {ok, Q1} = rabbit_ct_broker_helpers:rpc(Config, 0, rabbit_amqqueue, lookup, [rabbit_misc:r(<<"/">>, queue, Q)]), @@ -2419,7 +2475,7 @@ dead_letter_target(Config) -> ok = amqp_channel:cast(Ch1, #'basic.nack'{delivery_tag = DeliveryTag, requeue =false, multiple = false}), - quorum_queue_utils:wait_for_messages(Config, [[Q, <<"1">>, <<"1">>, <<"0">>]]) + queue_utils:wait_for_messages(Config, [[Q, <<"1">>, <<"1">>, <<"0">>]]) after 5000 -> exit(timeout) end, @@ -2531,6 +2587,9 @@ declare(Config, Server, Q, Args) -> auto_delete = false, arguments = Args}). +delete(Config, Server, Q) -> + retry_if_coordinator_unavailable(Config, Server, #'queue.delete'{queue = Q}). + retry_if_coordinator_unavailable(Config, Server, Cmd) -> Props = ?config(tc_group_properties, Config), %% Running parallel tests the coordinator could be busy answering other @@ -2555,7 +2614,7 @@ retry_if_coordinator_unavailable(Config, Server, Cmd, Retry) -> case re:run(Msg, ".*coordinator_unavailable.*", [{capture, none}]) of match -> ct:pal("Attempt to execute command ~p failed, coordinator unavailable", [Cmd]), - retry_if_coordinator_unavailable(Ch, Cmd, Retry - 1); + retry_if_coordinator_unavailable(Config, Ch, Cmd, Retry - 1); _ -> exit(Error) end @@ -2576,10 +2635,15 @@ check_leader_and_replicas(Config, Members) -> check_leader_and_replicas(Config, Members, Tag) -> rabbit_ct_helpers:await_condition( fun() -> - Info = find_queue_info(Config, [leader, Tag]), - ct:pal("~ts members ~w ~tp", [?FUNCTION_NAME, Members, Info]), - lists:member(proplists:get_value(leader, Info), Members) - andalso (lists:sort(Members) == lists:sort(proplists:get_value(Tag, Info))) + case find_queue_info(Config, [leader, Tag]) of + [] -> + false; + Info -> + ct:pal("~ts members ~w ~tp", [?FUNCTION_NAME, Members, Info]), + lists:member(proplists:get_value(leader, Info), Members) + andalso (lists:sort(Members) == + lists:sort(proplists:get_value(Tag, Info))) + end end, 60_000). check_members(Config, ExpectedMembers) -> @@ -2730,18 +2794,7 @@ ensure_retention_applied(Config, Server) -> rabbit_ct_broker_helpers:rpc(Config, Server, gen_server, call, [osiris_retention, test]). rebalance(Config) -> - case rabbit_ct_broker_helpers:enable_feature_flag(Config, restart_stream) of - ok -> - rebalance0(Config); - _ -> - ct:pal("skipping test ~s as feature flag `restart_stream` not supported", - [?FUNCTION_NAME]), - ok - end. - -rebalance0(Config) -> - [Server0 | _] = - rabbit_ct_broker_helpers:get_node_configs(Config, nodename), + [Server0 | _] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename), Ch = rabbit_ct_client_helpers:open_channel(Config, Server0), Q1 = <<"st1">>, @@ -2751,20 +2804,20 @@ rebalance0(Config) -> Q5 = <<"st5">>, ?assertEqual({'queue.declare_ok', Q1, 0, 0}, - declare(Ch, Q1, [{<<"x-queue-type">>, longstr, <<"stream">>}, - {<<"x-initial-cluster-size">>, long, 3}])), + declare(Config, Server0, Q1, [{<<"x-queue-type">>, longstr, <<"stream">>}, + {<<"x-initial-cluster-size">>, long, 3}])), ?assertEqual({'queue.declare_ok', Q2, 0, 0}, - declare(Ch, Q2, [{<<"x-queue-type">>, longstr, <<"stream">>}, - {<<"x-initial-cluster-size">>, long, 3}])), + declare(Config, Server0, Q2, [{<<"x-queue-type">>, longstr, <<"stream">>}, + {<<"x-initial-cluster-size">>, long, 3}])), ?assertEqual({'queue.declare_ok', Q3, 0, 0}, - declare(Ch, Q3, [{<<"x-queue-type">>, longstr, <<"stream">>}, - {<<"x-initial-cluster-size">>, long, 3}])), + declare(Config, Server0, Q3, [{<<"x-queue-type">>, longstr, <<"stream">>}, + {<<"x-initial-cluster-size">>, long, 3}])), ?assertEqual({'queue.declare_ok', Q4, 0, 0}, - declare(Ch, Q4, [{<<"x-queue-type">>, longstr, <<"stream">>}, - {<<"x-initial-cluster-size">>, long, 3}])), + declare(Config, Server0, Q4, [{<<"x-queue-type">>, longstr, <<"stream">>}, + {<<"x-initial-cluster-size">>, long, 3}])), ?assertEqual({'queue.declare_ok', Q5, 0, 0}, - declare(Ch, Q5, [{<<"x-queue-type">>, longstr, <<"stream">>}, - {<<"x-initial-cluster-size">>, long, 3}])), + declare(Config, Server0, Q5, [{<<"x-queue-type">>, longstr, <<"stream">>}, + {<<"x-initial-cluster-size">>, long, 3}])), NumMsgs = 100, Data = crypto:strong_rand_bytes(100), diff --git a/deps/rabbit/test/rabbit_stream_sac_coordinator_SUITE.erl b/deps/rabbit/test/rabbit_stream_sac_coordinator_SUITE.erl index 5dd546b2deb9..597bd618228e 100644 --- a/deps/rabbit/test/rabbit_stream_sac_coordinator_SUITE.erl +++ b/deps/rabbit/test/rabbit_stream_sac_coordinator_SUITE.erl @@ -11,7 +11,7 @@ %% The Original Code is RabbitMQ. %% %% The Initial Developer of the Original Code is Pivotal Software, Inc. -%% Copyright (c) 2021-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_stream_sac_coordinator_SUITE). diff --git a/deps/rabbit/test/rabbitmq_4_0_deprecations_SUITE.erl b/deps/rabbit/test/rabbitmq_4_0_deprecations_SUITE.erl index 96d1e2cc7286..f0e05e580e0d 100644 --- a/deps/rabbit/test/rabbitmq_4_0_deprecations_SUITE.erl +++ b/deps/rabbit/test/rabbitmq_4_0_deprecations_SUITE.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbitmq_4_0_deprecations_SUITE). @@ -32,7 +32,10 @@ set_policy_when_cmq_is_not_permitted_from_conf/1, when_transient_nonexcl_is_permitted_by_default/1, - when_transient_nonexcl_is_not_permitted_from_conf/1 + when_transient_nonexcl_is_not_permitted_from_conf/1, + + when_queue_master_locator_is_permitted_by_default/1, + when_queue_master_locator_is_not_permitted_from_conf/1 ]). suite() -> @@ -40,27 +43,30 @@ suite() -> all() -> [ - {group, global_qos}, - {group, ram_node_type}, - {group, classic_queue_mirroring}, - {group, transient_nonexcl_queues} + {group, mnesia_store}, + {group, khepri_store} ]. groups() -> - [ - {global_qos, [], - [when_global_qos_is_permitted_by_default, - when_global_qos_is_not_permitted_from_conf]}, - {ram_node_type, [], - [join_when_ram_node_type_is_permitted_by_default, - join_when_ram_node_type_is_not_permitted_from_conf]}, - {classic_queue_mirroring, [], - [set_policy_when_cmq_is_permitted_by_default, - set_policy_when_cmq_is_not_permitted_from_conf]}, - {transient_nonexcl_queues, [], - [when_transient_nonexcl_is_permitted_by_default, - when_transient_nonexcl_is_not_permitted_from_conf]} - ]. + Groups = [ + {global_qos, [], + [when_global_qos_is_permitted_by_default, + when_global_qos_is_not_permitted_from_conf]}, + {ram_node_type, [], + [join_when_ram_node_type_is_permitted_by_default, + join_when_ram_node_type_is_not_permitted_from_conf]}, + {classic_queue_mirroring, [], + [set_policy_when_cmq_is_permitted_by_default, + set_policy_when_cmq_is_not_permitted_from_conf]}, + {transient_nonexcl_queues, [], + [when_transient_nonexcl_is_permitted_by_default, + when_transient_nonexcl_is_not_permitted_from_conf]}, + {queue_master_locator, [], + [when_queue_master_locator_is_permitted_by_default, + when_queue_master_locator_is_not_permitted_from_conf]} + ], + [{mnesia_store, [], Groups}, + {khepri_store, [], Groups}]. %% ------------------------------------------------------------------- %% Testsuite setup/teardown. @@ -76,6 +82,10 @@ init_per_suite(Config) -> end_per_suite(Config) -> Config. +init_per_group(mnesia_store, Config) -> + rabbit_ct_helpers:set_config(Config, [{metadata_store, mnesia}]); +init_per_group(khepri_store, Config) -> + rabbit_ct_helpers:set_config(Config, [{metadata_store, khepri}]); init_per_group(global_qos, Config) -> rabbit_ct_helpers:set_config(Config, {rmq_nodes_count, 1}); init_per_group(ram_node_type, Config) -> @@ -85,6 +95,8 @@ init_per_group(classic_queue_mirroring, Config) -> rabbit_ct_helpers:set_config(Config, {rmq_nodes_count, 1}); init_per_group(transient_nonexcl_queues, Config) -> rabbit_ct_helpers:set_config(Config, {rmq_nodes_count, 1}); +init_per_group(queue_master_locator, Config) -> + rabbit_ct_helpers:set_config(Config, {rmq_nodes_count, 1}); init_per_group(_Group, Config) -> Config. @@ -121,6 +133,14 @@ init_per_testcase( [{permit_deprecated_features, #{transient_nonexcl_queues => false}}]}), init_per_testcase1(Testcase, Config1); +init_per_testcase( + when_queue_master_locator_is_not_permitted_from_conf = Testcase, Config) -> + Config1 = rabbit_ct_helpers:merge_app_env( + Config, + {rabbit, + [{permit_deprecated_features, + #{queue_master_locator => false}}]}), + init_per_testcase1(Testcase, Config1); init_per_testcase(Testcase, Config) -> init_per_testcase1(Testcase, Config). @@ -208,14 +228,23 @@ is_prefetch_limited(ServerCh) -> %% ------------------------------------------------------------------- join_when_ram_node_type_is_permitted_by_default(Config) -> + case ?config(metadata_store, Config) of + mnesia -> + join_when_ram_node_type_is_permitted_by_default_mnesia(Config); + khepri -> + join_when_ram_node_type_is_permitted_by_default_khepri(Config) + end. + +join_when_ram_node_type_is_permitted_by_default_mnesia(Config) -> [NodeA, NodeB] = rabbit_ct_broker_helpers:get_node_configs( Config, nodename), - ok = rabbit_control_helper:command(stop_app, NodeA), - ok = rabbit_control_helper:command_with_output( - join_cluster, NodeA, - [atom_to_list(NodeB)], [{"--ram", true}]), - ok = rabbit_control_helper:command(start_app, NodeA), + {ok, _} = rabbit_ct_broker_helpers:rabbitmqctl( + Config, NodeA, ["stop_app"]), + {ok, _} = rabbit_ct_broker_helpers:rabbitmqctl( + Config, NodeA, ["join_cluster", "--ram", atom_to_list(NodeB)]), + {ok, _} = rabbit_ct_broker_helpers:rabbitmqctl( + Config, NodeA, ["start_app"]), ?assertEqual([NodeA, NodeB], get_all_nodes(Config, NodeA)), ?assertEqual([NodeA, NodeB], get_all_nodes(Config, NodeB)), @@ -225,7 +254,8 @@ join_when_ram_node_type_is_permitted_by_default(Config) -> ?assert( log_file_contains_message( Config, NodeA, - ["Deprecated features: `ram_node_type`: Feature `ram_node_type` is deprecated", + ["Deprecated features: `ram_node_type`: Feature `ram_node_type` is " + "deprecated", "By default, this feature can still be used for now."])), %% Change the advanced configuration file to turn off RAM node type. @@ -242,31 +272,66 @@ join_when_ram_node_type_is_permitted_by_default(Config) -> ?assertEqual({ok, [ConfigContent1]}, file:consult(ConfigFilename)), %% Restart the node and see if it was correctly converted to a disc node. - ok = rabbit_control_helper:command(stop_app, NodeA), - Ret = rabbit_control_helper:command(start_app, NodeA), + {ok, _} = rabbit_ct_broker_helpers:rabbitmqctl( + Config, NodeA, ["stop_app"]), + Ret = rabbit_ct_broker_helpers:rabbitmqctl(Config, NodeA, ["start_app"]), case Ret of - ok -> + {ok, _} -> ?assertEqual([NodeA, NodeB], get_all_nodes(Config, NodeA)), ?assertEqual([NodeA, NodeB], get_all_nodes(Config, NodeB)), ?assertEqual([NodeA, NodeB], get_disc_nodes(Config, NodeA)), ?assertEqual([NodeA, NodeB], get_disc_nodes(Config, NodeB)); - {error, 69, - <<"Error:\n{:rabbit, {:incompatible_feature_flags, ", _/binary>>} -> - {skip, "Incompatible feature flags between nodes A and B"} + {error, 69, Message} -> + Ret1 = re:run( + Message, "incompatible_feature_flags", + [{capture, none}]), + case Ret1 of + match -> + {skip, "Incompatible feature flags between nodes A and B"}; + _ -> + throw(Ret) + end end. -join_when_ram_node_type_is_not_permitted_from_conf(Config) -> +join_when_ram_node_type_is_permitted_by_default_khepri(Config) -> [NodeA, NodeB] = rabbit_ct_broker_helpers:get_node_configs( Config, nodename), ok = rabbit_control_helper:command(stop_app, NodeA), - Ret = rabbit_control_helper:command_with_output( - join_cluster, NodeA, - [atom_to_list(NodeB)], [{"--ram", true}]), + ?assertMatch( + {error, 70, + <<"Error:\nError: `ram` node type is unsupported", _/binary>>}, + rabbit_control_helper:command_with_output( + join_cluster, NodeA, + [atom_to_list(NodeB)], [{"--ram", true}])), + ok = rabbit_control_helper:command(start_app, NodeA), + + ?assertEqual([NodeA], get_all_nodes(Config, NodeA)), + ?assertEqual([NodeB], get_all_nodes(Config, NodeB)), + ?assertEqual([NodeA], get_disc_nodes(Config, NodeA)), + ?assertEqual([NodeB], get_disc_nodes(Config, NodeB)). + +join_when_ram_node_type_is_not_permitted_from_conf(Config) -> + case ?config(metadata_store, Config) of + mnesia -> + join_when_ram_node_type_is_not_permitted_from_conf_mnesia(Config); + khepri -> + join_when_ram_node_type_is_not_permitted_from_conf_khepri(Config) + end. + +join_when_ram_node_type_is_not_permitted_from_conf_mnesia(Config) -> + [NodeA, NodeB] = rabbit_ct_broker_helpers:get_node_configs( + Config, nodename), + + {ok, _} = rabbit_ct_broker_helpers:rabbitmqctl( + Config, NodeA, ["stop_app"]), + Ret = rabbit_ct_broker_helpers:rabbitmqctl( + Config, NodeA, ["join_cluster", "--ram", atom_to_list(NodeB)]), case Ret of - ok -> - ok = rabbit_control_helper:command(start_app, NodeA), + {ok, _} -> + {ok, _} = rabbit_ct_broker_helpers:rabbitmqctl( + Config, NodeA, ["start_app"]), ?assertEqual([NodeA, NodeB], get_all_nodes(Config, NodeA)), ?assertEqual([NodeA, NodeB], get_all_nodes(Config, NodeB)), @@ -278,75 +343,84 @@ join_when_ram_node_type_is_not_permitted_from_conf(Config) -> Config, NodeA, ["Deprecated features: `ram_node_type`: Feature `ram_node_type` is deprecated", "Its use is not permitted per the configuration"])); - {error, 69, <<"Error:\nincompatible_feature_flags">>} -> - {skip, "Incompatible feature flags between nodes A and B"} + {error, 69, Message} -> + Ret1 = re:run( + Message, "incompatible_feature_flags", + [{capture, none}]), + case Ret1 of + match -> + {skip, "Incompatible feature flags between nodes A and B"}; + _ -> + throw(Ret) + end end. +join_when_ram_node_type_is_not_permitted_from_conf_khepri(Config) -> + [NodeA, NodeB] = rabbit_ct_broker_helpers:get_node_configs( + Config, nodename), + + ok = rabbit_control_helper:command(stop_app, NodeA), + ?assertMatch( + {error, 70, + <<"Error:\nError: `ram` node type is unsupported", _/binary>>}, + rabbit_control_helper:command_with_output( + join_cluster, NodeA, + [atom_to_list(NodeB)], [{"--ram", true}])), + ok = rabbit_control_helper:command(start_app, NodeA), + + ?assertEqual([NodeA], get_all_nodes(Config, NodeA)), + ?assertEqual([NodeB], get_all_nodes(Config, NodeB)), + ?assertEqual([NodeA], get_disc_nodes(Config, NodeA)), + ?assertEqual([NodeB], get_disc_nodes(Config, NodeB)). + get_all_nodes(Config, Node) -> - lists:sort( - rabbit_ct_broker_helpers:rpc( - Config, Node, rabbit_mnesia, cluster_nodes, [all])). + Nodes = case rabbit_khepri:is_enabled(Node) of + true -> + rabbit_ct_broker_helpers:rpc( + Config, Node, rabbit_khepri, locally_known_nodes, []); + false -> + rabbit_ct_broker_helpers:rpc( + Config, Node, rabbit_mnesia, cluster_nodes, [all]) + end, + lists:sort(Nodes). get_disc_nodes(Config, Node) -> - lists:sort( - rabbit_ct_broker_helpers:rpc( - Config, Node, rabbit_mnesia, cluster_nodes, [disc])). + Nodes = case rabbit_khepri:is_enabled(Node) of + true -> + rabbit_ct_broker_helpers:rpc( + Config, Node, rabbit_khepri, locally_known_nodes, []); + false -> + rabbit_ct_broker_helpers:rpc( + Config, Node, rabbit_mnesia, cluster_nodes, [disc]) + end, + lists:sort(Nodes). %% ------------------------------------------------------------------- %% Classic queue mirroring. %% ------------------------------------------------------------------- set_policy_when_cmq_is_permitted_by_default(Config) -> - ?assertEqual( - ok, - rabbit_ct_broker_helpers:set_ha_policy( - Config, 0, <<".*">>, <<"all">>)), - - [NodeA] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename), - - ?assert( - log_file_contains_message( - Config, NodeA, - ["Deprecated features: `classic_queue_mirroring`: Classic mirrored queues are deprecated.", - "By default, they can still be used for now."])), - - %% Change the advanced configuration file to turn off classic queue - %% mirroring. - ConfigFilename0 = rabbit_ct_broker_helpers:get_node_config( - Config, NodeA, erlang_node_config_filename), - ConfigFilename = ConfigFilename0 ++ ".config", - {ok, [ConfigContent0]} = file:consult(ConfigFilename), - ConfigContent1 = rabbit_ct_helpers:merge_app_env_in_erlconf( - ConfigContent0, - {rabbit, [{permit_deprecated_features, - #{classic_queue_mirroring => false}}]}), - ConfigContent2 = lists:flatten(io_lib:format("~p.~n", [ConfigContent1])), - ok = file:write_file(ConfigFilename, ConfigContent2), - ?assertEqual({ok, [ConfigContent1]}, file:consult(ConfigFilename)), - - %% Restart the node and see if it was correctly converted to a disc node. - ok = rabbit_control_helper:command(stop_app, NodeA), - ?assertMatch( - {error, 69, - <<"Error:\n{:rabbit, {{:failed_to_deny_deprecated_features, " - "[:classic_queue_mirroring]}", _/binary>>}, - rabbit_control_helper:command(start_app, NodeA)). + set_cmq_policy(Config). set_policy_when_cmq_is_not_permitted_from_conf(Config) -> + set_cmq_policy(Config). + +set_cmq_policy(Config) -> + %% CMQ have been removed, any attempt to set a policy + %% should fail as any other unknown policy. ?assertError( {badmatch, {error_string, - "Validation failed\n\nClassic mirrored queues are deprecated." ++ _}}, - rabbit_ct_broker_helpers:set_ha_policy( - Config, 0, <<".*">>, <<"all">>)), + "Validation failed\n\n[{<<\"ha-mode\">>,<<\"all\">>}] are not recognised policy settings" ++ _}}, + rabbit_ct_broker_helpers:set_policy( + Config, 0, <<"ha">>, <<".*">>, <<"queues">>, [{<<"ha-mode">>, <<"all">>}])), [NodeA] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename), - ?assert( + ?assertNot( log_file_contains_message( Config, NodeA, - ["Deprecated features: `classic_queue_mirroring`: Classic mirrored queues are deprecated.", - "Their use is not permitted per the configuration"])). + ["Deprecated features: `classic_queue_mirroring`: Classic mirrored queues have been removed."])). %% ------------------------------------------------------------------- %% Transient non-exclusive queues. @@ -396,6 +470,63 @@ when_transient_nonexcl_is_not_permitted_from_conf(Config) -> ["Deprecated features: `transient_nonexcl_queues`: Feature `transient_nonexcl_queues` is deprecated", "Its use is not permitted per the configuration"])). +%% ------------------------------------------------------------------- +%% (x-)queue-master-locator +%% ------------------------------------------------------------------- + +when_queue_master_locator_is_permitted_by_default(Config) -> + [NodeA] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename), + + Ch = rabbit_ct_client_helpers:open_channel(Config, NodeA), + + QName = list_to_binary(atom_to_list(?FUNCTION_NAME)), + ?assertEqual( + {'queue.declare_ok', QName, 0, 0}, + amqp_channel:call( + Ch, + #'queue.declare'{queue = QName, + arguments = [{<<"x-queue-master-locator">>, longstr, <<"client-local">>}]})), + + ?assertEqual( + ok, + rabbit_ct_broker_helpers:set_policy( + Config, 0, <<"client-local">>, <<".*">>, <<"queues">>, [{<<"queue-master-locator">>, <<"client-local">>}])), + + ?assert( + log_file_contains_message( + Config, NodeA, + ["Deprecated features: `queue_master_locator`: queue-master-locator is deprecated"])). + +when_queue_master_locator_is_not_permitted_from_conf(Config) -> + [NodeA] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename), + + Ch = rabbit_ct_client_helpers:open_channel(Config, NodeA), + + QName = list_to_binary(atom_to_list(?FUNCTION_NAME)), + ?assertExit( + {{shutdown, + {connection_closing, + {server_initiated_close, 541, + <<"INTERNAL_ERROR - Feature `queue_master_locator` is " + "deprecated.", _/binary>>}}}, _}, + amqp_channel:call( + Ch, + #'queue.declare'{queue = QName, + arguments = [{<<"x-queue-master-locator">>, longstr, <<"client-local">>}]})), + + ?assertError( + {badmatch, + {error_string, + "Validation failed\n\nuse of deprecated queue-master-locator argument is not permitted\n"}}, + rabbit_ct_broker_helpers:set_policy( + Config, 0, <<"client-local">>, <<".*">>, <<"queues">>, [{<<"queue-master-locator">>, <<"client-local">>}])), + + ?assert( + log_file_contains_message( + Config, NodeA, + ["Deprecated features: `queue_master_locator`: Feature `queue_master_locator` is deprecated", + "Its use is not permitted per the configuration"])). + %% ------------------------------------------------------------------- %% Helpers. %% ------------------------------------------------------------------- diff --git a/deps/rabbit/test/rabbitmq_queues_cli_integration_SUITE.erl b/deps/rabbit/test/rabbitmq_queues_cli_integration_SUITE.erl index 77e4d15ca2af..fc56dfbed936 100644 --- a/deps/rabbit/test/rabbitmq_queues_cli_integration_SUITE.erl +++ b/deps/rabbit/test/rabbitmq_queues_cli_integration_SUITE.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2017-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbitmq_queues_cli_integration_SUITE). diff --git a/deps/rabbit/test/rabbitmqctl_integration_SUITE.erl b/deps/rabbit/test/rabbitmqctl_integration_SUITE.erl index 9dbeef1415c6..0d9ef37530f6 100644 --- a/deps/rabbit/test/rabbitmqctl_integration_SUITE.erl +++ b/deps/rabbit/test/rabbitmqctl_integration_SUITE.erl @@ -2,13 +2,14 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2016-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbitmqctl_integration_SUITE). -include_lib("common_test/include/ct.hrl"). -include_lib("amqp_client/include/amqp_client.hrl"). -include_lib("eunit/include/eunit.hrl"). +-include_lib("rabbitmq_ct_helpers/include/rabbit_assert.hrl"). -export([all/0 ,groups/0 @@ -51,9 +52,14 @@ end_per_suite(Config) -> init_per_group(list_queues, Config0) -> NumNodes = 3, Config = create_n_node_cluster(Config0, NumNodes), - Config1 = declare_some_queues(Config), - rabbit_ct_broker_helpers:stop_node(Config1, NumNodes - 1), - Config1; + case Config of + {skip, _Reason} = Skip -> + Skip; + _ -> + Config1 = declare_some_queues(Config), + rabbit_ct_broker_helpers:stop_node(Config1, NumNodes - 1), + Config1 + end; init_per_group(_, Config) -> Config. @@ -61,7 +67,11 @@ create_n_node_cluster(Config0, NumNodes) -> Config1 = rabbit_ct_helpers:set_config( Config0, [{rmq_nodes_count, NumNodes}, {rmq_nodes_clustered, true}]), - rabbit_ct_helpers:run_steps(Config1, + Config2 = rabbit_ct_helpers:merge_app_env( + Config1, {rabbit, [ + {vhost_process_reconciliation_enabled, false} + ]}), + rabbit_ct_helpers:run_steps(Config2, rabbit_ct_broker_helpers:setup_steps() ++ rabbit_ct_client_helpers:setup_steps()). @@ -94,11 +104,14 @@ end_per_group(_, Config) -> Config. init_per_testcase(list_queues_stopped, Config0) -> - %% Start node 3 to crash it's queues + %% Start node 3 to kill a few virtual hosts on it rabbit_ct_broker_helpers:start_node(Config0, 2), - %% Make vhost "down" on nodes 2 and 3 - rabbit_ct_broker_helpers:force_vhost_failure(Config0, 1, <<"/">>), - rabbit_ct_broker_helpers:force_vhost_failure(Config0, 2, <<"/">>), + %% Disable virtual host reconciliation + rabbit_ct_broker_helpers:rpc(Config0, 1, rabbit_vhosts, disable_reconciliation, []), + rabbit_ct_broker_helpers:rpc(Config0, 2, rabbit_vhosts, disable_reconciliation, []), + %% Terminate default virtual host's processes on nodes 2 and 3 + ok = rabbit_ct_broker_helpers:force_vhost_failure(Config0, 1, <<"/">>), + ok = rabbit_ct_broker_helpers:force_vhost_failure(Config0, 2, <<"/">>), rabbit_ct_broker_helpers:stop_node(Config0, 2), rabbit_ct_helpers:testcase_started(Config0, list_queues_stopped); @@ -112,53 +125,50 @@ end_per_testcase(Testcase, Config0) -> %%---------------------------------------------------------------------------- %% Test cases %%---------------------------------------------------------------------------- + list_queues_local(Config) -> - Node1Queues = lists:sort(lists:nth(1, ?config(per_node_queues, Config))), - Node2Queues = lists:sort(lists:nth(2, ?config(per_node_queues, Config))), + Node1Queues = lists:nth(1, ?config(per_node_queues, Config)), + Node2Queues = lists:nth(2, ?config(per_node_queues, Config)), assert_ctl_queues(Config, 0, ["--local"], Node1Queues), assert_ctl_queues(Config, 1, ["--local"], Node2Queues), ok. list_queues_online(Config) -> - Node1Queues = lists:sort(lists:nth(1, ?config(per_node_queues, Config))), - Node2Queues = lists:sort(lists:nth(2, ?config(per_node_queues, Config))), + Node1Queues = lists:nth(1, ?config(per_node_queues, Config)), + Node2Queues = lists:nth(2, ?config(per_node_queues, Config)), OnlineQueues = Node1Queues ++ Node2Queues, assert_ctl_queues(Config, 0, ["--online"], OnlineQueues), assert_ctl_queues(Config, 1, ["--online"], OnlineQueues), ok. list_queues_offline(Config) -> - Node3Queues = lists:sort(lists:nth(3, ?config(per_node_queues, Config))), + Node3Queues = lists:nth(3, ?config(per_node_queues, Config)), OfflineQueues = Node3Queues, assert_ctl_queues(Config, 0, ["--offline"], OfflineQueues), assert_ctl_queues(Config, 1, ["--offline"], OfflineQueues), ok. list_queues_stopped(Config) -> - Node1Queues = lists:sort(lists:nth(1, ?config(per_node_queues, Config))), - Node2Queues = lists:sort(lists:nth(2, ?config(per_node_queues, Config))), - Node3Queues = lists:sort(lists:nth(3, ?config(per_node_queues, Config))), - - %% All queues are listed - ListedQueues = - [ {Name, State} - || [Name, State] <- rabbit_ct_broker_helpers:rabbitmqctl_list( - Config, 0, ["list_queues", "name", "state", "--no-table-headers"]) ], - - [ <<"running">> = proplists:get_value(Q, ListedQueues) || Q <- Node1Queues ], - %% Node is running. Vhost is down - [ <<"stopped">> = proplists:get_value(Q, ListedQueues) || Q <- Node2Queues ], - %% Node is not running. Vhost is down - [ <<"down">> = proplists:get_value(Q, ListedQueues) || Q <- Node3Queues ]. + rabbit_ct_helpers:await_condition(fun() -> + Listed = rabbit_ct_broker_helpers:rabbitmqctl_list(Config, 0, ["list_queues", "name", "state", "--no-table-headers"]), + %% We expect some queue replicas to be reported as running, some as down and some as stopped, + %% and that CLI tools are capable of handling and formatting such rows. MK. + ReplicaStates = lists:usort([State|| [_Name, State] <- Listed]), + ReplicaStates =:= [<<"down">>, <<"running">>, <<"stopped">>] + end, 30_000). %%---------------------------------------------------------------------------- %% Helpers %%---------------------------------------------------------------------------- assert_ctl_queues(Config, Node, Args, Expected0) -> Expected = lists:sort(Expected0), - Got0 = run_list_queues(Config, Node, Args), - Got = lists:sort(lists:map(fun hd/1, Got0)), - ?assertMatch(Expected, Got). + ?awaitMatch( + Expected, + begin + Got0 = run_list_queues(Config, Node, Args), + lists:sort(lists:map(fun hd/1, Got0)) + end, + 30_000). run_list_queues(Config, Node, Args) -> rabbit_ct_broker_helpers:rabbitmqctl_list(Config, Node, ["list_queues"] ++ Args ++ ["name", "--no-table-headers"]). diff --git a/deps/rabbit/test/rabbitmqctl_shutdown_SUITE.erl b/deps/rabbit/test/rabbitmqctl_shutdown_SUITE.erl index 041352b17342..dc54354a7ccf 100644 --- a/deps/rabbit/test/rabbitmqctl_shutdown_SUITE.erl +++ b/deps/rabbit/test/rabbitmqctl_shutdown_SUITE.erl @@ -2,14 +2,12 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2017-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbitmqctl_shutdown_SUITE). -include_lib("common_test/include/ct.hrl"). --include_lib("amqp_client/include/amqp_client.hrl"). - -compile(export_all). all() -> diff --git a/deps/rabbit/test/routing_SUITE.erl b/deps/rabbit/test/routing_SUITE.erl index 51461a714767..1bbd453ef22b 100644 --- a/deps/rabbit/test/routing_SUITE.erl +++ b/deps/rabbit/test/routing_SUITE.erl @@ -2,15 +2,13 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2011-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(routing_SUITE). -include_lib("eunit/include/eunit.hrl"). -include_lib("amqp_client/include/amqp_client.hrl"). --include_lib("common_test/include/ct.hrl"). - -compile([nowarn_export_all, export_all]). -compile(export_all). @@ -19,7 +17,8 @@ all() -> [ - {group, mnesia_store} + {group, mnesia_store}, + {group, khepri_store} ]. suite() -> @@ -27,7 +26,8 @@ suite() -> groups() -> [ - {mnesia_store, [], all_tests()} + {mnesia_store, [], all_tests()}, + {khepri_store, [], all_tests()} ]. all_tests() -> @@ -46,10 +46,17 @@ init_per_suite(Config) -> end_per_suite(Config) -> rabbit_ct_helpers:run_teardown_steps(Config). -init_per_group(mnesia_store = Group, Config) -> +init_per_group(mnesia_store = Group, Config0) -> + Config = rabbit_ct_helpers:set_config(Config0, [{metadata_store, mnesia}]), + init_per_group_common(Group, Config, 1); +init_per_group(khepri_store = Group, Config0) -> + Config = rabbit_ct_helpers:set_config(Config0, [{metadata_store, khepri}]), + init_per_group_common(Group, Config, 1). + +init_per_group_common(Group, Config, Size) -> Config1 = rabbit_ct_helpers:set_config(Config, [ {rmq_nodename_suffix, Group}, - {rmq_nodes_count, 1} + {rmq_nodes_count, Size} ]), rabbit_ct_helpers:run_steps(Config1, rabbit_ct_broker_helpers:setup_steps() ++ @@ -77,9 +84,9 @@ topic(Config) -> topic1(_Config) -> XName = rabbit_misc:r(?VHOST, exchange, <<"topic_matching-exchange">>), - X = rabbit_exchange:declare( - XName, topic, _Durable = true, _AutoDelete = false, - _Internal = false, _Args = [], ?USER), + {ok, X} = rabbit_exchange:declare( + XName, topic, _Durable = true, _AutoDelete = false, + _Internal = false, _Args = [], ?USER), %% add some bindings Bindings = [#binding{source = XName, @@ -196,9 +203,9 @@ test_topic_expect_match(X, List) -> BinKey = list_to_binary(Key), Message = rabbit_basic:message(X#exchange.name, BinKey, #'P_basic'{}, <<>>), - Msg = mc_amqpl:message(X#exchange.name, - BinKey, - Message#basic_message.content), + {ok, Msg} = mc_amqpl:message(X#exchange.name, + BinKey, + Message#basic_message.content), Res = rabbit_exchange_type_topic:route(X, Msg), ExpectedRes = [rabbit_misc:r(?VHOST, queue, list_to_binary(Q)) || Q <- Expected], diff --git a/deps/rabbit/test/runtime_parameters_SUITE.erl b/deps/rabbit/test/runtime_parameters_SUITE.erl index c8e4420f963a..1db08c464931 100644 --- a/deps/rabbit/test/runtime_parameters_SUITE.erl +++ b/deps/rabbit/test/runtime_parameters_SUITE.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2023-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(runtime_parameters_SUITE). diff --git a/deps/rabbit/test/signal_handling_SUITE.erl b/deps/rabbit/test/signal_handling_SUITE.erl index d491278bf985..cfa403790547 100644 --- a/deps/rabbit/test/signal_handling_SUITE.erl +++ b/deps/rabbit/test/signal_handling_SUITE.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2020-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(signal_handling_SUITE). @@ -36,8 +36,8 @@ all() -> groups() -> Signals = [sighup, - sigterm, - sigtstp], + sigtstp, + sigterm], Tests = [list_to_existing_atom(rabbit_misc:format("send_~ts", [Signal])) || Signal <- Signals], [ @@ -45,6 +45,8 @@ groups() -> {signal_sent_to_pid_from_os_getpid, [], Tests} ]. +-define(SLEEP, 5000). + %% ------------------------------------------------------------------- %% Testsuite setup/teardown. %% ------------------------------------------------------------------- @@ -61,31 +63,32 @@ init_per_suite(Config) -> end_per_suite(Config) -> rabbit_ct_helpers:run_teardown_steps(Config). -init_per_group(_, Config) -> - Config. - -end_per_group(_, Config) -> - Config. - -init_per_testcase(Testcase, Config) -> - rabbit_ct_helpers:testcase_started(Config, Testcase), +init_per_group(Group, Config) -> ClusterSize = 1, - TestNumber = rabbit_ct_helpers:testcase_number(Config, ?MODULE, Testcase), + % TestNumber = rabbit_ct_helpers:testcase_number(Config, ?MODULE, Testcase), Config1 = rabbit_ct_helpers:set_config( Config, [ - {rmq_nodename_suffix, Testcase}, - {tcp_ports_base, {skip_n_nodes, TestNumber * ClusterSize}} + {rmq_nodename_suffix, Group}, + {tcp_ports_base, {skip_n_nodes, ClusterSize}} ]), rabbit_ct_helpers:run_steps(Config1, rabbit_ct_broker_helpers:setup_steps() ++ rabbit_ct_client_helpers:setup_steps()). -end_per_testcase(Testcase, Config) -> +end_per_group(_, Config) -> Config1 = rabbit_ct_helpers:run_steps(Config, rabbit_ct_client_helpers:teardown_steps() ++ rabbit_ct_broker_helpers:teardown_steps()), - rabbit_ct_helpers:testcase_finished(Config1, Testcase). + Config1. + +init_per_testcase(Testcase, Config) -> + rabbit_ct_helpers:testcase_started(Config, Testcase), + Config. + +end_per_testcase(Testcase, Config) -> + rabbit_ct_helpers:testcase_finished(Config, Testcase), + Config. %% ------------------------------------------------------------------- %% Testcases. @@ -97,7 +100,7 @@ send_sighup(Config) -> %% A SIGHUP signal should be ignored and the node should still be %% running. send_signal(Pid, "HUP"), - timer:sleep(10000), + timer:sleep(?SLEEP), A = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename), ?assert(rabbit_ct_broker_helpers:rpc(Config, A, rabbit, is_running, [])), ?assert(filelib:is_regular(PidFile)). @@ -108,10 +111,18 @@ send_sigterm(Config) -> %% After sending a SIGTERM to the process, we expect the node to %% exit. send_signal(Pid, "TERM"), - wait_for_node_exit(Pid), + rabbit_ct_helpers:await_condition( + fun () -> + rabbit_misc:is_os_process_alive(Pid) == false + end), %% After a clean exit, the PID file should be removed. - ?assertNot(filelib:is_regular(PidFile)). + ?assertNot(filelib:is_regular(PidFile)), + %% restart node + Server = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename), + ok = rabbit_ct_broker_helpers:start_node(Config, Server), + ok. + send_sigtstp(Config) -> {PidFile, Pid} = get_pidfile_and_pid(Config), @@ -119,7 +130,7 @@ send_sigtstp(Config) -> %% A SIGHUP signal should be ignored and the node should still be %% running. send_signal(Pid, "TSTP"), - timer:sleep(10000), + timer:sleep(?SLEEP), A = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename), ?assert(rabbit_ct_broker_helpers:rpc(Config, A, rabbit, is_running, [])), ?assert(filelib:is_regular(PidFile)). @@ -149,12 +160,3 @@ send_signal(Pid, Signal) -> "-" ++ Signal, Pid], ?assertMatch({ok, _}, rabbit_ct_helpers:exec(Cmd)). - -wait_for_node_exit(Pid) -> - case rabbit_misc:is_os_process_alive(Pid) of - true -> - timer:sleep(1000), - wait_for_node_exit(Pid); - false -> - ok - end. diff --git a/deps/rabbit/test/simple_ha_SUITE.erl b/deps/rabbit/test/simple_ha_SUITE.erl deleted file mode 100644 index 920d67bbc1aa..000000000000 --- a/deps/rabbit/test/simple_ha_SUITE.erl +++ /dev/null @@ -1,333 +0,0 @@ -%% This Source Code Form is subject to the terms of the Mozilla Public -%% License, v. 2.0. If a copy of the MPL was not distributed with this -%% file, You can obtain one at https://mozilla.org/MPL/2.0/. -%% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. -%% - --module(simple_ha_SUITE). - --include_lib("common_test/include/ct.hrl"). --include_lib("amqp_client/include/amqp_client.hrl"). --include_lib("eunit/include/eunit.hrl"). - --compile(export_all). - --define(DELAY, 8000). - -all() -> - [ - {group, cluster_size_2}, - {group, cluster_size_3} - ]. - -groups() -> - RejectTests = [ - rejects_survive_stop, - rejects_survive_policy - ], - [ - {cluster_size_2, [], [ - rapid_redeclare, - declare_synchrony, - clean_up_exclusive_queues - ]}, - {cluster_size_3, [], [ - consume_survives_stop, - consume_survives_policy, - auto_resume, - auto_resume_no_ccn_client, - confirms_survive_stop, - confirms_survive_policy, - {overflow_reject_publish, [], RejectTests}, - {overflow_reject_publish_dlx, [], RejectTests} - ]} - ]. - -%% ------------------------------------------------------------------- -%% Testsuite setup/teardown. -%% ------------------------------------------------------------------- - -init_per_suite(Config) -> - rabbit_ct_helpers:log_environment(), - rabbit_ct_helpers:run_setup_steps(Config). - -end_per_suite(Config) -> - rabbit_ct_helpers:run_teardown_steps(Config). - -init_per_group(cluster_size_2, Config) -> - rabbit_ct_helpers:set_config(Config, [ - {rmq_nodes_count, 2} - ]); -init_per_group(cluster_size_3, Config) -> - rabbit_ct_helpers:set_config(Config, [ - {rmq_nodes_count, 3} - ]); -init_per_group(overflow_reject_publish, Config) -> - rabbit_ct_helpers:set_config(Config, [ - {overflow, <<"reject-publish">>} - ]); -init_per_group(overflow_reject_publish_dlx, Config) -> - rabbit_ct_helpers:set_config(Config, [ - {overflow, <<"reject-publish-dlx">>} - ]). - -end_per_group(_, Config) -> - Config. - -init_per_testcase(Testcase, Config) -> - rabbit_ct_helpers:testcase_started(Config, Testcase), - ClusterSize = ?config(rmq_nodes_count, Config), - TestNumber = rabbit_ct_helpers:testcase_number(Config, ?MODULE, Testcase), - Config1 = rabbit_ct_helpers:set_config(Config, [ - {rmq_nodes_clustered, true}, - {rmq_nodename_suffix, Testcase}, - {tcp_ports_base, {skip_n_nodes, TestNumber * ClusterSize}} - ]), - rabbit_ct_helpers:run_steps(Config1, - rabbit_ct_broker_helpers:setup_steps() ++ - rabbit_ct_client_helpers:setup_steps() ++ [ - fun rabbit_ct_broker_helpers:set_ha_policy_all/1 - ]). - -end_per_testcase(Testcase, Config) -> - Config1 = rabbit_ct_helpers:run_steps(Config, - rabbit_ct_client_helpers:teardown_steps() ++ - rabbit_ct_broker_helpers:teardown_steps()), - rabbit_ct_helpers:testcase_finished(Config1, Testcase). - -%% ------------------------------------------------------------------- -%% Testcases. -%% ------------------------------------------------------------------- - -rapid_redeclare(Config) -> - A = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename), - Ch = rabbit_ct_client_helpers:open_channel(Config, A), - Queue = <<"test">>, - [begin - amqp_channel:call(Ch, #'queue.declare'{queue = Queue, - durable = true}), - amqp_channel:call(Ch, #'queue.delete'{queue = Queue}) - end || _I <- lists:seq(1, 20)], - ok. - -%% Check that by the time we get a declare-ok back, the mirrors are up -%% and in Mnesia. -declare_synchrony(Config) -> - [Rabbit, Hare] = rabbit_ct_broker_helpers:get_node_configs(Config, - nodename), - RabbitCh = rabbit_ct_client_helpers:open_channel(Config, Rabbit), - HareCh = rabbit_ct_client_helpers:open_channel(Config, Hare), - Q = <<"mirrored-queue">>, - declare(RabbitCh, Q), - amqp_channel:call(RabbitCh, #'confirm.select'{}), - amqp_channel:cast(RabbitCh, #'basic.publish'{routing_key = Q}, - #amqp_msg{props = #'P_basic'{delivery_mode = 2}}), - amqp_channel:wait_for_confirms(RabbitCh), - rabbit_ct_broker_helpers:kill_node(Config, Rabbit), - - #'queue.declare_ok'{message_count = 1} = declare(HareCh, Q), - ok. - -declare(Ch, Name) -> - amqp_channel:call(Ch, #'queue.declare'{durable = true, queue = Name}). - -%% Ensure that exclusive queues are cleaned up when part of ha cluster -%% and node is killed abruptly then restarted -clean_up_exclusive_queues(Config) -> - QName = <<"excl">>, - rabbit_ct_broker_helpers:set_ha_policy(Config, 0, <<".*">>, <<"all">>), - [A, B] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename), - ChA = rabbit_ct_client_helpers:open_channel(Config, A), - amqp_channel:call(ChA, #'queue.declare'{queue = QName, - exclusive = true}), - ok = rabbit_ct_broker_helpers:kill_node(Config, A), - timer:sleep(?DELAY), - [] = rabbit_ct_broker_helpers:rpc(Config, B, rabbit_amqqueue, list, []), - ok = rabbit_ct_broker_helpers:start_node(Config, A), - timer:sleep(?DELAY), - [[],[]] = rabbit_ct_broker_helpers:rpc_all(Config, rabbit_amqqueue, list, []), - ok. - -consume_survives_stop(Cf) -> consume_survives(Cf, fun stop/2, true). -consume_survives_sigkill(Cf) -> consume_survives(Cf, fun sigkill/2, true). -consume_survives_policy(Cf) -> consume_survives(Cf, fun policy/2, true). -auto_resume(Cf) -> consume_survives(Cf, fun sigkill/2, false). -auto_resume_no_ccn_client(Cf) -> consume_survives(Cf, fun sigkill/2, false, - false). - -confirms_survive_stop(Cf) -> confirms_survive(Cf, fun stop/2). -confirms_survive_policy(Cf) -> confirms_survive(Cf, fun policy/2). - -rejects_survive_stop(Cf) -> rejects_survive(Cf, fun stop/2). -rejects_survive_policy(Cf) -> rejects_survive(Cf, fun policy/2). - -%%---------------------------------------------------------------------------- - -consume_survives(Config, DeathFun, CancelOnFailover) -> - consume_survives(Config, DeathFun, CancelOnFailover, true). - -consume_survives(Config, - DeathFun, CancelOnFailover, CCNSupported) -> - [A, B, C] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename), - Msgs = rabbit_ct_helpers:cover_work_factor(Config, 20000), - Channel1 = rabbit_ct_client_helpers:open_channel(Config, A), - Channel2 = rabbit_ct_client_helpers:open_channel(Config, B), - Channel3 = rabbit_ct_client_helpers:open_channel(Config, C), - - %% declare the queue on the master, mirrored to the two mirrors - Queue = <<"test">>, - amqp_channel:call(Channel1, #'queue.declare'{queue = Queue, - auto_delete = false}), - - %% start up a consumer - ConsCh = case CCNSupported of - true -> Channel2; - false -> Port = rabbit_ct_broker_helpers:get_node_config( - Config, B, tcp_port_amqp), - open_incapable_channel(Port) - end, - ConsumerPid = rabbit_ha_test_consumer:create( - ConsCh, Queue, self(), CancelOnFailover, Msgs), - - %% send a bunch of messages from the producer - ProducerPid = rabbit_ha_test_producer:create(Channel3, Queue, - self(), false, Msgs), - DeathFun(Config, A), - %% verify that the consumer got all msgs, or die - the await_response - %% calls throw an exception if anything goes wrong.... - ct:pal("awaiting produce ~w", [ProducerPid]), - rabbit_ha_test_producer:await_response(ProducerPid), - ct:pal("awaiting consumer ~w", [ConsumerPid]), - rabbit_ha_test_consumer:await_response(ConsumerPid), - ok. - -confirms_survive(Config, DeathFun) -> - [A, B, _] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename), - Msgs = rabbit_ct_helpers:cover_work_factor(Config, 20000), - Node1Channel = rabbit_ct_client_helpers:open_channel(Config, A), - Node2Channel = rabbit_ct_client_helpers:open_channel(Config, B), - - %% declare the queue on the master, mirrored to the two mirrors - Queue = <<"test">>, - amqp_channel:call(Node1Channel,#'queue.declare'{queue = Queue, - auto_delete = false, - durable = true}), - - %% send one message to ensure the channel is flowing - amqp_channel:register_confirm_handler(Node1Channel, self()), - #'confirm.select_ok'{} = amqp_channel:call(Node1Channel, #'confirm.select'{}), - - Payload = <<"initial message">>, - ok = amqp_channel:call(Node1Channel, - #'basic.publish'{routing_key = Queue}, - #amqp_msg{payload = Payload}), - - ok = receive - #'basic.ack'{multiple = false} -> ok; - #'basic.nack'{multiple = false} -> message_nacked - after - 5000 -> confirm_not_received - end, - - %% send a bunch of messages from the producer - ProducerPid = rabbit_ha_test_producer:create(Node2Channel, Queue, - self(), true, Msgs), - DeathFun(Config, A), - rabbit_ha_test_producer:await_response(ProducerPid), - ok. - -rejects_survive(Config, DeathFun) -> - [A, B, _] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename), - Msgs = rabbit_ct_helpers:cover_work_factor(Config, 20000), - Node1Channel = rabbit_ct_client_helpers:open_channel(Config, A), - Node2Channel = rabbit_ct_client_helpers:open_channel(Config, B), - - %% declare the queue on the master, mirrored to the two mirrors - XOverflow = ?config(overflow, Config), - Queue = <<"test_rejects", "_", XOverflow/binary>>, - amqp_channel:call(Node1Channel,#'queue.declare'{queue = Queue, - auto_delete = false, - durable = true, - arguments = [{<<"x-max-length">>, long, 1}, - {<<"x-overflow">>, longstr, XOverflow}]}), - - amqp_channel:register_confirm_handler(Node1Channel, self()), - #'confirm.select_ok'{} = amqp_channel:call(Node1Channel, #'confirm.select'{}), - - Payload = <<"there can be only one">>, - ok = amqp_channel:call(Node1Channel, - #'basic.publish'{routing_key = Queue}, - #amqp_msg{payload = Payload}), - - ok = receive - #'basic.ack'{multiple = false} -> ok; - #'basic.nack'{multiple = false} -> message_nacked - after - 5000 -> confirm_not_received - end, - - %% send a bunch of messages from the producer. They should all be nacked, as the queue is full. - ProducerPid = rabbit_ha_test_producer:create(Node2Channel, Queue, - self(), true, Msgs, nacks), - DeathFun(Config, A), - rabbit_ha_test_producer:await_response(ProducerPid), - - {#'basic.get_ok'{}, #amqp_msg{payload = Payload}} = - amqp_channel:call(Node2Channel, #'basic.get'{queue = Queue}), - %% There is only one message. - #'basic.get_empty'{} = amqp_channel:call(Node2Channel, #'basic.get'{queue = Queue}), - ok. - - - -stop(Config, Node) -> - rabbit_ct_broker_helpers:stop_node_after(Config, Node, 50). - -sigkill(Config, Node) -> - rabbit_ct_broker_helpers:kill_node_after(Config, Node, 50). - -policy(Config, Node)-> - Nodes = [ - atom_to_binary(N) - || N <- rabbit_ct_broker_helpers:get_node_configs(Config, nodename), - N =/= Node], - rabbit_ct_broker_helpers:set_ha_policy(Config, Node, <<".*">>, - {<<"nodes">>, Nodes}). - -open_incapable_channel(NodePort) -> - Props = [{<<"capabilities">>, table, []}], - {ok, ConsConn} = - amqp_connection:start(#amqp_params_network{port = NodePort, - client_properties = Props}), - {ok, Ch} = amqp_connection:open_channel(ConsConn), - Ch. - -declare_exclusive(Ch, QueueName, Args) -> - Declare = #'queue.declare'{queue = QueueName, - exclusive = true, - arguments = Args - }, - #'queue.declare_ok'{} = amqp_channel:call(Ch, Declare). - -subscribe(Ch, QueueName) -> - ConsumeOk = amqp_channel:call(Ch, #'basic.consume'{queue = QueueName, - no_ack = true}), - #'basic.consume_ok'{} = ConsumeOk, - receive ConsumeOk -> ok after ?DELAY -> throw(consume_ok_timeout) end. - -receive_cancels(Cancels) -> - receive - #'basic.cancel'{} = C -> - receive_cancels([C|Cancels]) - after ?DELAY -> - Cancels - end. - -receive_messages(All) -> - receive - {#'basic.deliver'{}, Msg} -> - receive_messages([Msg|All]) - after ?DELAY -> - lists:reverse(All) - end. diff --git a/deps/rabbit/test/single_active_consumer_SUITE.erl b/deps/rabbit/test/single_active_consumer_SUITE.erl index b695e2a240ed..ac682ad95712 100644 --- a/deps/rabbit/test/single_active_consumer_SUITE.erl +++ b/deps/rabbit/test/single_active_consumer_SUITE.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2018-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(single_active_consumer_SUITE). @@ -11,13 +11,15 @@ -include_lib("eunit/include/eunit.hrl"). -include_lib("amqp_client/include/amqp_client.hrl"). +-compile(nowarn_export_all). -compile(export_all). -define(TIMEOUT, 30000). all() -> [ - {group, classic_queue}, {group, quorum_queue} + {group, classic_queue}, + {group, quorum_queue} ]. groups() -> @@ -25,6 +27,7 @@ groups() -> {classic_queue, [], [ all_messages_go_to_one_consumer, fallback_to_another_consumer_when_first_one_is_cancelled, + fallback_to_another_consumer_when_first_one_is_cancelled_qos1, fallback_to_another_consumer_when_exclusive_consumer_channel_is_cancelled, fallback_to_another_consumer_when_first_one_is_cancelled_manual_acks, amqp_exclusive_consume_fails_on_exclusive_consumer_queue @@ -32,6 +35,7 @@ groups() -> {quorum_queue, [], [ all_messages_go_to_one_consumer, fallback_to_another_consumer_when_first_one_is_cancelled, + fallback_to_another_consumer_when_first_one_is_cancelled_qos1, fallback_to_another_consumer_when_exclusive_consumer_channel_is_cancelled, fallback_to_another_consumer_when_first_one_is_cancelled_manual_acks, basic_get_is_unsupported @@ -165,6 +169,49 @@ fallback_to_another_consumer_when_first_one_is_cancelled(Config) -> amqp_connection:close(C), ok. +fallback_to_another_consumer_when_first_one_is_cancelled_qos1(Config) -> + {C, Ch} = connection_and_channel(Config), + Q = queue_declare(Ch, Config), + ?assertEqual(#'basic.qos_ok'{}, + amqp_channel:call(Ch, #'basic.qos'{prefetch_count = 1})), + CTag1 = <<"tag1">>, + CTag2 = <<"tag2">>, + amqp_channel:subscribe(Ch, #'basic.consume'{queue = Q, + consumer_tag = CTag1}, self()), + receive #'basic.consume_ok'{consumer_tag = CTag1} -> ok + after 5000 -> ct:fail(timeout_ctag1) + end, + + amqp_channel:subscribe(Ch, #'basic.consume'{queue = Q, + consumer_tag = CTag2}, self()), + receive #'basic.consume_ok'{consumer_tag = CTag2} -> ok + after 5000 -> ct:fail(timeout_ctag2) + end, + + Publish = #'basic.publish'{exchange = <<>>, routing_key = Q}, + amqp_channel:cast(Ch, Publish, #amqp_msg{payload = <<"m1">>}), + amqp_channel:cast(Ch, Publish, #amqp_msg{payload = <<"m2">>}), + + DTag1 = receive {#'basic.deliver'{consumer_tag = CTag1, + delivery_tag = DTag}, + #amqp_msg{payload = <<"m1">>}} -> DTag + after 5000 -> ct:fail(timeout_m1) + end, + + #'basic.cancel_ok'{consumer_tag = CTag1} = amqp_channel:call(Ch, #'basic.cancel'{consumer_tag = CTag1}), + receive #'basic.cancel_ok'{consumer_tag = CTag1} -> ok + after 5000 -> ct:fail(missing_cancel) + end, + + amqp_channel:cast(Ch, #'basic.ack'{delivery_tag = DTag1}), + + receive {#'basic.deliver'{consumer_tag = CTag2}, + #amqp_msg{payload = <<"m2">>}} -> ok; + Unexpected -> ct:fail({unexpected, Unexpected}) + after 5000 -> ct:fail(timeout_m2) + end, + amqp_connection:close(C). + fallback_to_another_consumer_when_first_one_is_cancelled_manual_acks(Config) -> %% Let's ensure that although the consumer is cancelled we still keep the unacked %% messages and accept acknowledgments on them. @@ -189,7 +236,7 @@ fallback_to_another_consumer_when_first_one_is_cancelled_manual_acks(Config) -> {CTag, DTag1} = receive_deliver(), {_CTag, DTag2} = receive_deliver(), - quorum_queue_utils:wait_for_messages(Config, [[Q, <<"2">>, <<"0">>, <<"2">>]]), + queue_utils:wait_for_messages(Config, [[Q, <<"2">>, <<"0">>, <<"2">>]]), #'basic.cancel_ok'{} = amqp_channel:call(Ch, #'basic.cancel'{consumer_tag = CTag}), receive @@ -201,14 +248,14 @@ fallback_to_another_consumer_when_first_one_is_cancelled_manual_acks(Config) -> Resource = proplists:get_value(queue_name, Props), Q == Resource#resource.name end, Consumers1)), - quorum_queue_utils:wait_for_messages(Config, [[Q, <<"2">>, <<"0">>, <<"2">>]]), + queue_utils:wait_for_messages(Config, [[Q, <<"2">>, <<"0">>, <<"2">>]]), [amqp_channel:cast(Ch, Publish, #amqp_msg{payload = P}) || P <- [<<"msg3">>, <<"msg4">>]], - quorum_queue_utils:wait_for_messages(Config, [[Q, <<"4">>, <<"0">>, <<"4">>]]), + queue_utils:wait_for_messages(Config, [[Q, <<"4">>, <<"0">>, <<"4">>]]), amqp_channel:cast(Ch, #'basic.ack'{delivery_tag = DTag1}), amqp_channel:cast(Ch, #'basic.ack'{delivery_tag = DTag2}), - quorum_queue_utils:wait_for_messages(Config, [[Q, <<"2">>, <<"0">>, <<"2">>]]), + queue_utils:wait_for_messages(Config, [[Q, <<"2">>, <<"0">>, <<"2">>]]), amqp_connection:close(C), ok. @@ -292,7 +339,7 @@ queue_declare(Channel, Config) -> consume({Parent, State, 0}) -> Parent ! {consumer_done, State}; -consume({Parent, {MessagesPerConsumer, MessageCount}, CountDown}) -> +consume({Parent, {MessagesPerConsumer, MessageCount}, CountDown} = Arg) -> receive #'basic.consume_ok'{consumer_tag = CTag} -> consume({Parent, {maps:put(CTag, 0, MessagesPerConsumer), MessageCount}, CountDown}); @@ -307,9 +354,9 @@ consume({Parent, {MessagesPerConsumer, MessageCount}, CountDown}) -> consume({Parent, NewState, CountDown - 1}); #'basic.cancel_ok'{consumer_tag = CTag} -> Parent ! {cancel_ok, CTag}, - consume({Parent, {MessagesPerConsumer, MessageCount}, CountDown}); + consume(Arg); _ -> - consume({Parent, {MessagesPerConsumer, MessageCount}, CountDown}) + consume(Arg) after ?TIMEOUT -> Parent ! {consumer_timeout, {MessagesPerConsumer, MessageCount}}, flush(), diff --git a/deps/rabbit/test/sync_detection_SUITE.erl b/deps/rabbit/test/sync_detection_SUITE.erl deleted file mode 100644 index 4bd14191b37b..000000000000 --- a/deps/rabbit/test/sync_detection_SUITE.erl +++ /dev/null @@ -1,243 +0,0 @@ -%% This Source Code Form is subject to the terms of the Mozilla Public -%% License, v. 2.0. If a copy of the MPL was not distributed with this -%% file, You can obtain one at https://mozilla.org/MPL/2.0/. -%% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. -%% - --module(sync_detection_SUITE). - --include_lib("common_test/include/ct.hrl"). --include_lib("amqp_client/include/amqp_client.hrl"). - --compile(export_all). - --define(LOOP_RECURSION_DELAY, 100). - -all() -> - [ - {group, cluster_size_2}, - {group, cluster_size_3} - ]. - -groups() -> - [ - {cluster_size_2, [], [ - follower_synchronization - ]}, - {cluster_size_3, [], [ - follower_synchronization_ttl - ]} - ]. - -%% ------------------------------------------------------------------- -%% Testsuite setup/teardown. -%% ------------------------------------------------------------------- - -init_per_suite(Config) -> - rabbit_ct_helpers:log_environment(), - rabbit_ct_helpers:run_setup_steps(Config). - -end_per_suite(Config) -> - rabbit_ct_helpers:run_teardown_steps(Config). - -init_per_group(cluster_size_2, Config) -> - rabbit_ct_helpers:set_config(Config, [{rmq_nodes_count, 2}]); -init_per_group(cluster_size_3, Config) -> - rabbit_ct_helpers:set_config(Config, [{rmq_nodes_count, 3}]). - -end_per_group(_, Config) -> - Config. - -init_per_testcase(Testcase, Config) -> - rabbit_ct_helpers:testcase_started(Config, Testcase), - ClusterSize = ?config(rmq_nodes_count, Config), - TestNumber = rabbit_ct_helpers:testcase_number(Config, ?MODULE, Testcase), - Config1 = rabbit_ct_helpers:set_config(Config, [ - {rmq_nodes_count, ClusterSize}, - {rmq_nodes_clustered, true}, - {rmq_nodename_suffix, Testcase}, - {tcp_ports_base, {skip_n_nodes, TestNumber * ClusterSize}} - ]), - rabbit_ct_helpers:run_steps(Config1, - rabbit_ct_broker_helpers:setup_steps() ++ - rabbit_ct_client_helpers:setup_steps() ++ [ - fun rabbit_ct_broker_helpers:set_ha_policy_two_pos/1, - fun rabbit_ct_broker_helpers:set_ha_policy_two_pos_batch_sync/1 - ]). - -end_per_testcase(Testcase, Config) -> - Config1 = rabbit_ct_helpers:run_steps(Config, - rabbit_ct_client_helpers:teardown_steps() ++ - rabbit_ct_broker_helpers:teardown_steps()), - rabbit_ct_helpers:testcase_finished(Config1, Testcase). - -%% ------------------------------------------------------------------- -%% Testcases. -%% ------------------------------------------------------------------- - -follower_synchronization(Config) -> - [Master, Slave] = rabbit_ct_broker_helpers:get_node_configs(Config, - nodename), - Channel = rabbit_ct_client_helpers:open_channel(Config, Master), - Queue = <<"ha.two.test">>, - #'queue.declare_ok'{} = - amqp_channel:call(Channel, #'queue.declare'{queue = Queue, - auto_delete = false}), - - %% The comments on the right are the queue length and the pending acks on - %% the master. - rabbit_ct_broker_helpers:stop_broker(Config, Slave), - - %% We get and ack one message when the mirror is down, and check that when we - %% start the mirror it's not marked as synced until ack the message. We also - %% publish another message when the mirror is up. - send_dummy_message(Channel, Queue), % 1 - 0 - {#'basic.get_ok'{delivery_tag = Tag1}, _} = - amqp_channel:call(Channel, #'basic.get'{queue = Queue}), % 0 - 1 - - rabbit_ct_broker_helpers:start_broker(Config, Slave), - - follower_unsynced(Master, Queue), - send_dummy_message(Channel, Queue), % 1 - 1 - follower_unsynced(Master, Queue), - - amqp_channel:cast(Channel, #'basic.ack'{delivery_tag = Tag1}), % 1 - 0 - - follower_synced(Master, Queue), - - %% We restart the mirror and we send a message, so that the mirror will only - %% have one of the messages. - rabbit_ct_broker_helpers:stop_broker(Config, Slave), - rabbit_ct_broker_helpers:start_broker(Config, Slave), - - send_dummy_message(Channel, Queue), % 2 - 0 - - follower_unsynced(Master, Queue), - - %% We reject the message that the mirror doesn't have, and verify that it's - %% still unsynced - {#'basic.get_ok'{delivery_tag = Tag2}, _} = - amqp_channel:call(Channel, #'basic.get'{queue = Queue}), % 1 - 1 - follower_unsynced(Master, Queue), - amqp_channel:cast(Channel, #'basic.reject'{ delivery_tag = Tag2, - requeue = true }), % 2 - 0 - follower_unsynced(Master, Queue), - {#'basic.get_ok'{delivery_tag = Tag3}, _} = - amqp_channel:call(Channel, #'basic.get'{queue = Queue}), % 1 - 1 - amqp_channel:cast(Channel, #'basic.ack'{delivery_tag = Tag3}), % 1 - 0 - follower_synced(Master, Queue), - {#'basic.get_ok'{delivery_tag = Tag4}, _} = - amqp_channel:call(Channel, #'basic.get'{queue = Queue}), % 0 - 1 - amqp_channel:cast(Channel, #'basic.ack'{delivery_tag = Tag4}), % 0 - 0 - follower_synced(Master, Queue). - -follower_synchronization_ttl(Config) -> - [Master, Slave, DLX] = rabbit_ct_broker_helpers:get_node_configs(Config, - nodename), - Channel = rabbit_ct_client_helpers:open_channel(Config, Master), - DLXChannel = rabbit_ct_client_helpers:open_channel(Config, DLX), - - %% We declare a DLX queue to wait for messages to be TTL'ed - DLXQueue = <<"dlx-queue">>, - #'queue.declare_ok'{} = - amqp_channel:call(Channel, #'queue.declare'{queue = DLXQueue, - auto_delete = false}), - - TestMsgTTL = 5000, - Queue = <<"ha.two.test">>, - %% Sadly we need fairly high numbers for the TTL because starting/stopping - %% nodes takes a fair amount of time. - Args = [{<<"x-message-ttl">>, long, TestMsgTTL}, - {<<"x-dead-letter-exchange">>, longstr, <<>>}, - {<<"x-dead-letter-routing-key">>, longstr, DLXQueue}], - #'queue.declare_ok'{} = - amqp_channel:call(Channel, #'queue.declare'{queue = Queue, - auto_delete = false, - arguments = Args}), - - follower_synced(Master, Queue), - - %% All unknown - rabbit_ct_broker_helpers:stop_broker(Config, Slave), - send_dummy_message(Channel, Queue), - send_dummy_message(Channel, Queue), - rabbit_ct_broker_helpers:start_broker(Config, Slave), - follower_unsynced(Master, Queue), - wait_for_messages(DLXQueue, DLXChannel, 2), - follower_synced(Master, Queue), - - %% 1 unknown, 1 known - rabbit_ct_broker_helpers:stop_broker(Config, Slave), - send_dummy_message(Channel, Queue), - rabbit_ct_broker_helpers:start_broker(Config, Slave), - follower_unsynced(Master, Queue), - send_dummy_message(Channel, Queue), - follower_unsynced(Master, Queue), - wait_for_messages(DLXQueue, DLXChannel, 2), - follower_synced(Master, Queue), - - %% %% both known - send_dummy_message(Channel, Queue), - send_dummy_message(Channel, Queue), - follower_synced(Master, Queue), - wait_for_messages(DLXQueue, DLXChannel, 2), - follower_synced(Master, Queue), - - ok. - -send_dummy_message(Channel, Queue) -> - Payload = <<"foo">>, - Publish = #'basic.publish'{exchange = <<>>, routing_key = Queue}, - amqp_channel:cast(Channel, Publish, #amqp_msg{payload = Payload}). - -follower_pids(Node, Queue) -> - {ok, Q} = rpc:call(Node, rabbit_amqqueue, lookup, - [rabbit_misc:r(<<"/">>, queue, Queue)]), - SSP = synchronised_slave_pids, - [{SSP, Pids}] = rpc:call(Node, rabbit_amqqueue, info, [Q, [SSP]]), - case Pids of - '' -> []; - _ -> Pids - end. - -%% The mnesia synchronization takes a while, but we don't want to wait for the -%% test to fail, since the timetrap is quite high. -wait_for_sync_status(Status, Node, Queue) -> - Max = 90000 / ?LOOP_RECURSION_DELAY, - wait_for_sync_status(0, Max, Status, Node, Queue). - -wait_for_sync_status(N, Max, Status, Node, Queue) when N >= Max -> - erlang:error({sync_status_max_tries_failed, - [{queue, Queue}, - {node, Node}, - {expected_status, Status}, - {max_tried, Max}]}); -wait_for_sync_status(N, Max, Status, Node, Queue) -> - Synced = length(follower_pids(Node, Queue)) =:= 1, - case Synced =:= Status of - true -> ok; - false -> timer:sleep(?LOOP_RECURSION_DELAY), - wait_for_sync_status(N + 1, Max, Status, Node, Queue) - end. - -follower_synced(Node, Queue) -> - wait_for_sync_status(true, Node, Queue). - -follower_unsynced(Node, Queue) -> - wait_for_sync_status(false, Node, Queue). - -wait_for_messages(Queue, Channel, N) -> - Sub = #'basic.consume'{queue = Queue}, - #'basic.consume_ok'{consumer_tag = CTag} = amqp_channel:call(Channel, Sub), - receive - #'basic.consume_ok'{} -> ok - end, - lists:foreach( - fun (_) -> receive - {#'basic.deliver'{delivery_tag = Tag}, _Content} -> - amqp_channel:cast(Channel, - #'basic.ack'{delivery_tag = Tag}) - end - end, lists:seq(1, N)), - amqp_channel:call(Channel, #'basic.cancel'{consumer_tag = CTag}). diff --git a/deps/rabbit/test/temp/rabbitmqadmin.py b/deps/rabbit/test/temp/rabbitmqadmin.py index cdddd56497b3..73425a82165b 100755 --- a/deps/rabbit/test/temp/rabbitmqadmin.py +++ b/deps/rabbit/test/temp/rabbitmqadmin.py @@ -3,7 +3,7 @@ # This Source Code Form is subject to the terms of the Mozilla Public # License, v. 2.0. If a copy of the MPL was not distributed with this # file, You can obtain one at https://mozilla.org/MPL/2.0/. -# Copyright (c) 2010-2020 VMware, Inc. or its affiliates. All rights reserved +# Copyright (c) 2007-2024 Broadcom. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. All rights reserved import sys if sys.version_info[0] < 2 or sys.version_info[1] < 6: diff --git a/deps/rabbit/test/term_to_binary_compat_prop_SUITE.erl b/deps/rabbit/test/term_to_binary_compat_prop_SUITE.erl index 38b77d3cb1e7..f53000a8cd78 100644 --- a/deps/rabbit/test/term_to_binary_compat_prop_SUITE.erl +++ b/deps/rabbit/test/term_to_binary_compat_prop_SUITE.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2017-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% @@ -10,7 +10,6 @@ -compile(export_all). --include_lib("rabbit_common/include/rabbit.hrl"). -include_lib("common_test/include/ct.hrl"). -include_lib("proper/include/proper.hrl"). diff --git a/deps/rabbit/test/topic_permission_SUITE.erl b/deps/rabbit/test/topic_permission_SUITE.erl index b39b3e8b77ad..0a30f7f30255 100644 --- a/deps/rabbit/test/topic_permission_SUITE.erl +++ b/deps/rabbit/test/topic_permission_SUITE.erl @@ -2,12 +2,11 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2011-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(topic_permission_SUITE). --include_lib("common_test/include/ct.hrl"). -include_lib("rabbit_common/include/rabbit.hrl"). -compile(export_all). @@ -58,16 +57,14 @@ topic_permission_database_access(Config) -> ?MODULE, topic_permission_database_access1, [Config]). topic_permission_database_access1(_Config) -> - 0 = length(ets:tab2list(rabbit_topic_permission)), rabbit_vhost:add(<<"/">>, <<"acting-user">>), rabbit_vhost:add(<<"other-vhost">>, <<"acting-user">>), rabbit_auth_backend_internal:add_user(<<"guest">>, <<"guest">>, <<"acting-user">>), rabbit_auth_backend_internal:add_user(<<"dummy">>, <<"dummy">>, <<"acting-user">>), - rabbit_auth_backend_internal:set_topic_permissions( - <<"guest">>, <<"/">>, <<"amq.topic">>, "^a", "^a", <<"acting-user">> - ), - 1 = length(ets:tab2list(rabbit_topic_permission)), + ok = rabbit_auth_backend_internal:set_topic_permissions( + <<"guest">>, <<"/">>, <<"amq.topic">>, "^a", "^a", <<"acting-user">> + ), 1 = length(rabbit_auth_backend_internal:list_user_topic_permissions(<<"guest">>)), 0 = length(rabbit_auth_backend_internal:list_user_topic_permissions(<<"dummy">>)), 1 = length(rabbit_auth_backend_internal:list_vhost_topic_permissions(<<"/">>)), @@ -79,7 +76,6 @@ topic_permission_database_access1(_Config) -> rabbit_auth_backend_internal:set_topic_permissions( <<"guest">>, <<"other-vhost">>, <<"amq.topic">>, ".*", ".*", <<"acting-user">> ), - 2 = length(ets:tab2list(rabbit_topic_permission)), 2 = length(rabbit_auth_backend_internal:list_user_topic_permissions(<<"guest">>)), 0 = length(rabbit_auth_backend_internal:list_user_topic_permissions(<<"dummy">>)), 1 = length(rabbit_auth_backend_internal:list_vhost_topic_permissions(<<"/">>)), @@ -142,16 +138,15 @@ topic_permission_checks(Config) -> ?MODULE, topic_permission_checks1, [Config]). topic_permission_checks1(_Config) -> - 0 = length(ets:tab2list(rabbit_topic_permission)), - rabbit_db_vhost:create_or_get(<<"/">>, [], #{}), - rabbit_db_vhost:create_or_get(<<"other-vhost">>, [], #{}), + rabbit_vhost:add(<<"/">>, <<"">>), + rabbit_vhost:add(<<"other-vhost">>, <<"">>), + rabbit_auth_backend_internal:add_user(<<"guest">>, <<"guest">>, <<"acting-user">>), rabbit_auth_backend_internal:add_user(<<"dummy">>, <<"dummy">>, <<"acting-user">>), rabbit_auth_backend_internal:set_topic_permissions( <<"guest">>, <<"/">>, <<"amq.topic">>, "^a", "^a", <<"acting-user">> ), - 1 = length(ets:tab2list(rabbit_topic_permission)), 1 = length(rabbit_auth_backend_internal:list_user_topic_permissions(<<"guest">>)), 0 = length(rabbit_auth_backend_internal:list_user_topic_permissions(<<"dummy">>)), 1 = length(rabbit_auth_backend_internal:list_vhost_topic_permissions(<<"/">>)), @@ -160,7 +155,6 @@ topic_permission_checks1(_Config) -> rabbit_auth_backend_internal:set_topic_permissions( <<"guest">>, <<"other-vhost">>, <<"amq.topic">>, ".*", ".*", <<"acting-user">> ), - 2 = length(ets:tab2list(rabbit_topic_permission)), 2 = length(rabbit_auth_backend_internal:list_user_topic_permissions(<<"guest">>)), 0 = length(rabbit_auth_backend_internal:list_user_topic_permissions(<<"dummy">>)), 1 = length(rabbit_auth_backend_internal:list_vhost_topic_permissions(<<"/">>)), diff --git a/deps/rabbit/test/transactions_SUITE.erl b/deps/rabbit/test/transactions_SUITE.erl new file mode 100644 index 000000000000..5dae3b8ebf4c --- /dev/null +++ b/deps/rabbit/test/transactions_SUITE.erl @@ -0,0 +1,131 @@ +%% This Source Code Form is subject to the terms of the Mozilla Public +%% License, v. 2.0. If a copy of the MPL was not distributed with this +%% file, You can obtain one at https://mozilla.org/MPL/2.0/. +%% +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. +%% +-module(transactions_SUITE). +-compile(export_all). + +-include_lib("common_test/include/ct.hrl"). +-include_lib("eunit/include/eunit.hrl"). +-include_lib("amqp_client/include/amqp_client.hrl"). + + +all() -> + [ + {group, non_parallel_tests} + ]. + +groups() -> + [ + {non_parallel_tests, [], [ + published_visible_after_commit, + return_after_commit + ]} + ]. + +%% ------------------------------------------------------------------- +%% Testsuite setup/teardown. +%% ------------------------------------------------------------------- + +init_per_suite(Config) -> + rabbit_ct_helpers:log_environment(), + Config1 = rabbit_ct_helpers:set_config(Config, [ + {rmq_nodename_suffix, ?MODULE} + ]), + rabbit_ct_helpers:run_setup_steps(Config1, + rabbit_ct_broker_helpers:setup_steps() ++ + rabbit_ct_client_helpers:setup_steps()). + +end_per_suite(Config) -> + rabbit_ct_helpers:run_teardown_steps(Config, + rabbit_ct_client_helpers:teardown_steps() ++ + rabbit_ct_broker_helpers:teardown_steps()). + +init_per_group(_, Config) -> + Config. + +end_per_group(_, Config) -> + Config. + +init_per_testcase(Testcase, Config) -> + Group = proplists:get_value(name, ?config(tc_group_properties, Config)), + QName = rabbit_data_coercion:to_binary(io_lib:format("~p_~tp", [Group, Testcase])), + {Conn, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, 0), + declare_queue(Ch, QName), + rabbit_ct_client_helpers:close_connection_and_channel(Conn, Ch), + Config1 = rabbit_ct_helpers:set_config(Config, [{queue_name, QName}]), + rabbit_ct_helpers:testcase_started(Config1, Testcase). + +end_per_testcase(Testcase, Config) -> + QName = queue_name(Config), + {Conn, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, 0), + delete_queue(Ch, QName), + rabbit_ct_client_helpers:close_connection_and_channel(Conn, Ch), + rabbit_ct_helpers:testcase_finished(Config, Testcase). + +%% ------------------------------------------------------------------- +%% Testcases. +%% ------------------------------------------------------------------- + +published_visible_after_commit(Config) -> + QName = queue_name(Config), + {Conn, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, 0), + #'tx.select_ok'{} = amqp_channel:call(Ch, #'tx.select'{}), + publish(Ch, QName, <<"msg">>), + ?assertMatch(#'basic.get_empty'{}, get(Ch, QName)), + amqp_channel:call(Ch, #'tx.commit'{}), + ?assertMatch({#'basic.get_ok'{}, _}, get(Ch, QName)), + rabbit_ct_client_helpers:close_connection_and_channel(Conn, Ch), + ok. + +return_after_commit(Config) -> + QName0 = queue_name(Config), + QName = <>, + {Conn, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, 0), + amqp_channel:register_return_handler(Ch, self()), + #'tx.select_ok'{} = amqp_channel:call(Ch, #'tx.select'{}), + publish(Ch, QName, <<"msg">>, true), + Result = receive + {#'basic.return'{}, _} -> + return_before_commit + after 1000 -> + return_after_commit + end, + ?assertEqual(return_after_commit, Result), + #'tx.commit_ok'{} = amqp_channel:call(Ch, #'tx.commit'{}), + rabbit_ct_client_helpers:close_connection_and_channel(Conn, Ch), + ok. + +queue_name(Config) -> + ?config(queue_name, Config). + +delete_queue(Ch, QName) -> + #'queue.delete_ok'{} = amqp_channel:call(Ch, #'queue.delete'{queue = QName}). + +declare_queue(Ch, QName) -> + #'queue.declare_ok'{} = amqp_channel:call(Ch, #'queue.declare'{queue = QName, + durable = false, + exclusive = false, + auto_delete = false}). +publish(Ch, QName, Payload) -> + publish(Ch, QName, Payload, false). + +publish(Ch, QName, Payload, Mandatory) -> + amqp_channel:call(Ch, #'basic.publish'{routing_key = QName, mandatory = Mandatory}, + #amqp_msg{payload = Payload}). + +get(Ch, QName) -> + get(Ch, QName, 10). + +get(_, _, 0) -> + #'basic.get_empty'{}; +get(Ch, QName, Attempt) -> + case amqp_channel:call(Ch, #'basic.get'{queue = QName, no_ack = true}) of + #'basic.get_empty'{} -> + timer:sleep(100), + get(Ch, QName, Attempt - 1); + GetOk -> + GetOk + end. diff --git a/deps/rabbit/test/unicode_SUITE.erl b/deps/rabbit/test/unicode_SUITE.erl index 65088e613961..4f28d1362c24 100644 --- a/deps/rabbit/test/unicode_SUITE.erl +++ b/deps/rabbit/test/unicode_SUITE.erl @@ -17,7 +17,6 @@ all() -> groups() -> [ {queues, [], [ - classic_queue_v1, classic_queue_v2, quorum_queue, stream @@ -57,14 +56,7 @@ init_per_testcase(Testcase, Config) -> end_per_testcase(Testcase, Config) -> rabbit_ct_helpers:testcase_finished(Config, Testcase). -classic_queue_v1(Config) -> - ok = rabbit_ct_broker_helpers:rpc( - Config, 0, application, set_env, [rabbit, classic_queue_default_version, 1]), - ok = queue(Config, ?FUNCTION_NAME, []). - classic_queue_v2(Config) -> - ok = rabbit_ct_broker_helpers:rpc( - Config, 0, application, set_env, [rabbit, classic_queue_default_version, 2]), ok = queue(Config, ?FUNCTION_NAME, []). quorum_queue(Config) -> diff --git a/deps/rabbit/test/unit_access_control_SUITE.erl b/deps/rabbit/test/unit_access_control_SUITE.erl index 5436aa5154ea..4f8e2b44235b 100644 --- a/deps/rabbit/test/unit_access_control_SUITE.erl +++ b/deps/rabbit/test/unit_access_control_SUITE.erl @@ -2,17 +2,16 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2011-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(unit_access_control_SUITE). -include_lib("common_test/include/ct.hrl"). --include_lib("kernel/include/file.hrl"). -include_lib("amqp_client/include/amqp_client.hrl"). -include_lib("eunit/include/eunit.hrl"). --compile(export_all). +-compile([export_all, nowarn_export_all]). all() -> [ @@ -24,7 +23,7 @@ groups() -> [ {parallel_tests, [parallel], [ password_hashing, - unsupported_connection_refusal + version_negotiation ]}, {sequential_tests, [], [ login_with_credentials_but_no_password, @@ -278,20 +277,42 @@ auth_backend_internal_expand_topic_permission(_Config) -> ), ok. -unsupported_connection_refusal(Config) -> - passed = rabbit_ct_broker_helpers:rpc(Config, 0, - ?MODULE, unsupported_connection_refusal1, [Config]). - -unsupported_connection_refusal1(Config) -> - H = ?config(rmq_hostname, Config), - P = rabbit_ct_broker_helpers:get_node_config(Config, 0, tcp_port_amqp), - [passed = test_unsupported_connection_refusal(H, P, V) || - V <- [<<"AMQP",9,9,9,9>>, <<"AMQP",0,1,0,0>>, <<"XXXX",0,0,9,1>>]], - passed. +%% Test AMQP 1.0 §2.2 +version_negotiation(Config) -> + ok = rabbit_ct_broker_helpers:rpc(Config, ?MODULE, version_negotiation1, [Config]). + +version_negotiation1(Config) -> + Hostname = ?config(rmq_hostname, Config), + Port = rabbit_ct_broker_helpers:get_node_config(Config, 0, tcp_port_amqp), + + [?assertEqual(<<"AMQP",3,1,0,0>>, + version_negotiation2(Hostname, Port, Vsn)) || + Vsn <- [<<"AMQP",0,1,0,0>>, + <<"AMQP",0,1,0,1>>, + <<"AMQP",0,1,1,0>>, + <<"AMQP",0,9,1,0>>, + <<"AMQP",0,0,8,0>>, + <<"AMQP",1,1,0,0>>, + <<"AMQP",2,1,0,0>>, + <<"AMQP",3,1,0,0>>, + <<"AMQP",3,1,0,1>>, + <<"AMQP",3,1,0,1>>, + <<"AMQP",4,1,0,0>>, + <<"AMQP",9,1,0,0>>, + <<"XXXX",0,1,0,0>>, + <<"XXXX",0,0,9,1>> + ]], + + [?assertEqual(<<"AMQP",0,0,9,1>>, + version_negotiation2(Hostname, Port, Vsn)) || + Vsn <- [<<"AMQP",0,0,9,2>>, + <<"AMQP",0,0,10,0>>, + <<"AMQP",0,0,10,1>>]], + ok. -test_unsupported_connection_refusal(H, P, Header) -> - {ok, C} = gen_tcp:connect(H, P, [binary, {active, false}]), +version_negotiation2(Hostname, Port, Header) -> + {ok, C} = gen_tcp:connect(Hostname, Port, [binary, {active, false}]), ok = gen_tcp:send(C, Header), - {ok, <<"AMQP",0,0,9,1>>} = gen_tcp:recv(C, 8, 100), + {ok, ServerVersion} = gen_tcp:recv(C, 8, 100), ok = gen_tcp:close(C), - passed. + ServerVersion. diff --git a/deps/rabbit/test/unit_access_control_authn_authz_context_propagation_SUITE.erl b/deps/rabbit/test/unit_access_control_authn_authz_context_propagation_SUITE.erl index c8dcab98f656..21dd13f1be99 100644 --- a/deps/rabbit/test/unit_access_control_authn_authz_context_propagation_SUITE.erl +++ b/deps/rabbit/test/unit_access_control_authn_authz_context_propagation_SUITE.erl @@ -2,12 +2,11 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2019-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(unit_access_control_authn_authz_context_propagation_SUITE). --include_lib("common_test/include/ct.hrl"). -include_lib("eunit/include/eunit.hrl"). -include_lib("amqp_client/include/amqp_client.hrl"). diff --git a/deps/rabbit/test/unit_access_control_credential_validation_SUITE.erl b/deps/rabbit/test/unit_access_control_credential_validation_SUITE.erl index b0c1abbdddc8..099c9c35bc6c 100644 --- a/deps/rabbit/test/unit_access_control_credential_validation_SUITE.erl +++ b/deps/rabbit/test/unit_access_control_credential_validation_SUITE.erl @@ -2,14 +2,13 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(unit_access_control_credential_validation_SUITE). -compile(export_all). -include_lib("proper/include/proper.hrl"). --include_lib("common_test/include/ct.hrl"). -include_lib("eunit/include/eunit.hrl"). all() -> diff --git a/deps/rabbit/test/unit_amqp091_content_framing_SUITE.erl b/deps/rabbit/test/unit_amqp091_content_framing_SUITE.erl index 72b5be08e5a7..13092e3e28f3 100644 --- a/deps/rabbit/test/unit_amqp091_content_framing_SUITE.erl +++ b/deps/rabbit/test/unit_amqp091_content_framing_SUITE.erl @@ -2,12 +2,11 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2011-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(unit_amqp091_content_framing_SUITE). --include_lib("common_test/include/ct.hrl"). -include_lib("eunit/include/eunit.hrl"). -include_lib("rabbit_common/include/rabbit.hrl"). -include_lib("rabbit_common/include/rabbit_framing.hrl"). diff --git a/deps/rabbit/test/unit_amqp091_server_properties_SUITE.erl b/deps/rabbit/test/unit_amqp091_server_properties_SUITE.erl index 521e8c52eb00..6b7b70352faa 100644 --- a/deps/rabbit/test/unit_amqp091_server_properties_SUITE.erl +++ b/deps/rabbit/test/unit_amqp091_server_properties_SUITE.erl @@ -2,13 +2,12 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2011-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(unit_amqp091_server_properties_SUITE). -include_lib("common_test/include/ct.hrl"). --include_lib("kernel/include/file.hrl"). -include_lib("amqp_client/include/amqp_client.hrl"). -include_lib("eunit/include/eunit.hrl"). diff --git a/deps/rabbit/test/unit_app_management_SUITE.erl b/deps/rabbit/test/unit_app_management_SUITE.erl index 8666c568c5f3..f7491252d6b9 100644 --- a/deps/rabbit/test/unit_app_management_SUITE.erl +++ b/deps/rabbit/test/unit_app_management_SUITE.erl @@ -2,13 +2,12 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2011-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(unit_app_management_SUITE). -include_lib("eunit/include/eunit.hrl"). --include_lib("common_test/include/ct.hrl"). -include_lib("eunit/include/eunit.hrl"). -include_lib("kernel/include/file.hrl"). -include_lib("amqp_client/include/amqp_client.hrl"). diff --git a/deps/rabbit/test/unit_classic_mirrored_queue_sync_throttling_SUITE.erl b/deps/rabbit/test/unit_classic_mirrored_queue_sync_throttling_SUITE.erl deleted file mode 100644 index ae20a7cc0251..000000000000 --- a/deps/rabbit/test/unit_classic_mirrored_queue_sync_throttling_SUITE.erl +++ /dev/null @@ -1,84 +0,0 @@ --module(unit_classic_mirrored_queue_sync_throttling_SUITE). - --include_lib("rabbit_common/include/rabbit.hrl"). --include_lib("rabbit_common/include/rabbit_framing.hrl"). - --include_lib("common_test/include/ct.hrl"). --include_lib("eunit/include/eunit.hrl"). - --compile(export_all). - -all() -> - [ - maybe_master_batch_send, - get_time_diff, - append_to_acc - ]. - -maybe_master_batch_send(_Config) -> - SyncBatchSize = 4096, - SyncThroughput = 2000, - QueueLen = 10000, - ?assertEqual( - true, %% Message reach the last one in the queue - rabbit_mirror_queue_sync:maybe_master_batch_send({[], 0, {0, 0, SyncThroughput}, {QueueLen, QueueLen}, 0}, SyncBatchSize)), - ?assertEqual( - true, %% # messages batched is less than batch size; and total message size has reached the batch size - rabbit_mirror_queue_sync:maybe_master_batch_send({[], 0, {0, 0, SyncThroughput}, {SyncBatchSize, QueueLen}, 0}, SyncBatchSize)), - TotalBytes0 = SyncThroughput + 1, - Curr0 = 1, - ?assertEqual( - true, %% Total batch size exceed max sync throughput - rabbit_mirror_queue_sync:maybe_master_batch_send({[], 0, {TotalBytes0, 0, SyncThroughput}, {Curr0, QueueLen}, 0}, SyncBatchSize)), - TotalBytes1 = 1, - Curr1 = 1, - ?assertEqual( - false, %% # messages batched is less than batch size; and total bytes is less than sync throughput - rabbit_mirror_queue_sync:maybe_master_batch_send({[], 0, {TotalBytes1, 0, SyncThroughput}, {Curr1, QueueLen}, 0}, SyncBatchSize)), - ok. - -get_time_diff(_Config) -> - TotalBytes0 = 100, - Interval0 = 1000, %% ms - MaxSyncThroughput0 = 100, %% bytes/s - ?assertEqual(%% Used throughput = 100 / 1000 * 1000 = 100 bytes/s; matched max throughput - 0, %% => no need to pause queue sync - rabbit_mirror_queue_sync:get_time_diff(TotalBytes0, Interval0, MaxSyncThroughput0)), - - TotalBytes1 = 100, - Interval1 = 1000, %% ms - MaxSyncThroughput1 = 200, %% bytes/s - ?assertEqual( %% Used throughput = 100 / 1000 * 1000 = 100 bytes/s; less than max throughput - 0, %% => no need to pause queue sync - rabbit_mirror_queue_sync:get_time_diff(TotalBytes1, Interval1, MaxSyncThroughput1)), - - TotalBytes2 = 100, - Interval2 = 1000, %% ms - MaxSyncThroughput2 = 50, %% bytes/s - ?assertEqual( %% Used throughput = 100 / 1000 * 1000 = 100 bytes/s; greater than max throughput - 1000, %% => pause queue sync for 1000 ms - rabbit_mirror_queue_sync:get_time_diff(TotalBytes2, Interval2, MaxSyncThroughput2)), - ok. - -append_to_acc(_Config) -> - Content = #content{properties = #'P_basic'{delivery_mode = 2, - priority = 2}, - payload_fragments_rev = [[<<"1234567890">>]] %% 10 bytes - }, - ExName = rabbit_misc:r(<<>>, exchange, <<>>), - Msg = mc_amqpl:message(ExName, <<>>, Content, #{id => 1}, true), - BQDepth = 10, - SyncThroughput_0 = 0, - FoldAcc1 = {[], 0, {0, erlang:monotonic_time(), SyncThroughput_0}, {0, BQDepth}, erlang:monotonic_time()}, - {_, _, {TotalBytes1, _, _}, _, _} = rabbit_mirror_queue_sync:append_to_acc(Msg, {}, false, FoldAcc1), - ?assertEqual(0, TotalBytes1), %% Skipping calculating TotalBytes for the pending batch as SyncThroughput is 0. - - SyncThroughput = 100, - FoldAcc2 = {[], 0, {0, erlang:monotonic_time(), SyncThroughput}, {0, BQDepth}, erlang:monotonic_time()}, - {_, _, {TotalBytes2, _, _}, _, _} = rabbit_mirror_queue_sync:append_to_acc(Msg, {}, false, FoldAcc2), - ?assertEqual(10, TotalBytes2), %% Message size is added to existing TotalBytes - - FoldAcc3 = {[], 0, {TotalBytes2, erlang:monotonic_time(), SyncThroughput}, {0, BQDepth}, erlang:monotonic_time()}, - {_, _, {TotalBytes3, _, _}, _, _} = rabbit_mirror_queue_sync:append_to_acc(Msg, {}, false, FoldAcc3), - ?assertEqual(TotalBytes2 + 10, TotalBytes3), %% Message size is added to existing TotalBytes - ok. diff --git a/deps/rabbit/test/unit_classic_mirrored_queue_throughput_SUITE.erl b/deps/rabbit/test/unit_classic_mirrored_queue_throughput_SUITE.erl deleted file mode 100644 index 7e10b5f5d9bc..000000000000 --- a/deps/rabbit/test/unit_classic_mirrored_queue_throughput_SUITE.erl +++ /dev/null @@ -1,29 +0,0 @@ --module(unit_classic_mirrored_queue_throughput_SUITE). - --include_lib("common_test/include/ct.hrl"). --include_lib("eunit/include/eunit.hrl"). - --compile(export_all). - -all() -> - [ - default_max_sync_throughput - ]. - -default_max_sync_throughput(_Config) -> - ?assertEqual( - 0, - rabbit_mirror_queue_misc:default_max_sync_throughput()), - application:set_env(rabbit, mirroring_sync_max_throughput, 100), - ?assertEqual( - 100, - rabbit_mirror_queue_misc:default_max_sync_throughput()), - application:set_env(rabbit, mirroring_sync_max_throughput, "100MiB"), - ?assertEqual( - 100*1024*1024, - rabbit_mirror_queue_misc:default_max_sync_throughput()), - application:set_env(rabbit, mirroring_sync_max_throughput, "100MB"), - ?assertEqual( - 100000000, - rabbit_mirror_queue_misc:default_max_sync_throughput()), - ok. diff --git a/deps/rabbit/test/unit_cluster_formation_locking_mocks_SUITE.erl b/deps/rabbit/test/unit_cluster_formation_locking_mocks_SUITE.erl index 558f75997c4a..9537c1fd1852 100644 --- a/deps/rabbit/test/unit_cluster_formation_locking_mocks_SUITE.erl +++ b/deps/rabbit/test/unit_cluster_formation_locking_mocks_SUITE.erl @@ -2,11 +2,10 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(unit_cluster_formation_locking_mocks_SUITE). --include_lib("common_test/include/ct.hrl"). -include_lib("eunit/include/eunit.hrl"). -compile(export_all). @@ -47,25 +46,37 @@ end_per_testcase(_, _) -> init_with_lock_exits_after_errors(_Config) -> meck:expect(rabbit_peer_discovery_classic_config, lock, fun(_) -> {error, "test error"} end), - ?assertExit(cannot_acquire_startup_lock, rabbit_peer_discovery:maybe_create_cluster(2, 10, fun(_, _) -> ok end)), + ?assertEqual( + {error, "test error"}, + rabbit_peer_discovery:join_selected_node(rabbit_peer_discovery_classic_config, missing@localhost, disc)), ?assert(meck:validate(rabbit_peer_discovery_classic_config)), passed. +%% The `aborted_feature_flags_compat_check' error means the function called +%% `rabbit_db_cluster:join/2', so it passed the locking step. The error is +%% expected because the test runs outside of a working RabbitMQ. + init_with_lock_ignore_after_errors(_Config) -> meck:expect(rabbit_peer_discovery_classic_config, lock, fun(_) -> {error, "test error"} end), - ?assertEqual(ok, rabbit_peer_discovery:maybe_create_cluster(2, 10, fun(_, _) -> ok end)), + ?assertEqual( + {error, {aborted_feature_flags_compat_check, {error, feature_flags_file_not_set}}}, + rabbit_peer_discovery:join_selected_node(rabbit_peer_discovery_classic_config, missing@localhost, disc)), ?assert(meck:validate(rabbit_peer_discovery_classic_config)), passed. init_with_lock_not_supported(_Config) -> meck:expect(rabbit_peer_discovery_classic_config, lock, fun(_) -> not_supported end), - ?assertEqual(ok, rabbit_peer_discovery:maybe_create_cluster(2, 10, fun(_, _) -> ok end)), + ?assertEqual( + {error, {aborted_feature_flags_compat_check, {error, feature_flags_file_not_set}}}, + rabbit_peer_discovery:join_selected_node(rabbit_peer_discovery_classic_config, missing@localhost, disc)), ?assert(meck:validate(rabbit_peer_discovery_classic_config)), passed. init_with_lock_supported(_Config) -> meck:expect(rabbit_peer_discovery_classic_config, lock, fun(_) -> {ok, data} end), meck:expect(rabbit_peer_discovery_classic_config, unlock, fun(data) -> ok end), - ?assertEqual(ok, rabbit_peer_discovery:maybe_create_cluster(2, 10, fun(_, _) -> ok end)), + ?assertEqual( + {error, {aborted_feature_flags_compat_check, {error, feature_flags_file_not_set}}}, + rabbit_peer_discovery:join_selected_node(rabbit_peer_discovery_classic_config, missing@localhost, disc)), ?assert(meck:validate(rabbit_peer_discovery_classic_config)), passed. diff --git a/deps/rabbit/test/unit_cluster_formation_sort_nodes_SUITE.erl b/deps/rabbit/test/unit_cluster_formation_sort_nodes_SUITE.erl new file mode 100644 index 000000000000..b882e4558ab0 --- /dev/null +++ b/deps/rabbit/test/unit_cluster_formation_sort_nodes_SUITE.erl @@ -0,0 +1,213 @@ +%% This Source Code Form is subject to the terms of the Mozilla Public +%% License, v. 2.0. If a copy of the MPL was not distributed with this +%% file, You can obtain one at https://mozilla.org/MPL/2.0/. +%% +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. +%% + +-module(unit_cluster_formation_sort_nodes_SUITE). + +-include_lib("eunit/include/eunit.hrl"). + +-export([all/0, + groups/0, + init_per_suite/1, end_per_suite/1, + init_per_group/2, end_per_group/2, + init_per_testcase/2, end_per_testcase/2, + + sort_single_node/1, + sort_by_cluster_size/1, + sort_by_start_time/1, + sort_by_node_name/1, + cluster_size_has_precedence_over_start_time/1, + start_time_has_precedence_over_node_name/1, + failed_in_ci_1/1, + failed_in_ci_2/1]). + +all() -> + [ + {group, parallel_tests} + ]. + +groups() -> + [ + {parallel_tests, [parallel], + [ + sort_single_node, + sort_by_cluster_size, + sort_by_start_time, + sort_by_node_name, + cluster_size_has_precedence_over_start_time, + start_time_has_precedence_over_node_name, + failed_in_ci_1, + failed_in_ci_2 + ]} + ]. + +init_per_suite(Config) -> + Config. + +end_per_suite(Config) -> + Config. + +init_per_group(_Group, Config) -> + Config. + +end_per_group(_Group, Config) -> + Config. + +init_per_testcase(_Testcase, Config) -> + Config. + +end_per_testcase(_Testcase, Config) -> + Config. + +sort_single_node(_Config) -> + NodesAndProps = [{a, [a], 100, true}], + ?assertEqual( + NodesAndProps, + rabbit_peer_discovery:sort_nodes_and_props(NodesAndProps)). + +sort_by_cluster_size(_Config) -> + NodesAndProps = [{a, [a], 100, true}, + {a, [a, a], 100, true}], + ?assertEqual( + [{a, [a, a], 100, true}, + {a, [a], 100, true}], + rabbit_peer_discovery:sort_nodes_and_props(NodesAndProps)). + +sort_by_start_time(_Config) -> + NodesAndProps = [{a, [a], 20, true}, + {a, [a], 10, true}], + ?assertEqual( + [{a, [a], 10, true}, + {a, [a], 20, true}], + rabbit_peer_discovery:sort_nodes_and_props(NodesAndProps)). + +sort_by_node_name(_Config) -> + NodesAndProps = [{b, [b], 100, true}, + {a, [a], 100, true}], + ?assertEqual( + [{a, [a], 100, true}, + {b, [b], 100, true}], + rabbit_peer_discovery:sort_nodes_and_props(NodesAndProps)). + +cluster_size_has_precedence_over_start_time(_Config) -> + NodesAndProps = [{a, [a], 100, true}, + {b, [b, c], 90, true}], + ?assertEqual( + [{b, [b, c], 90, true}, + {a, [a], 100, true}], + rabbit_peer_discovery:sort_nodes_and_props(NodesAndProps)). + +start_time_has_precedence_over_node_name(_Config) -> + NodesAndProps = [{a, [a], 100, true}, + {b, [b], 90, true}], + ?assertEqual( + [{b, [b], 90, true}, + {a, [a], 100, true}], + rabbit_peer_discovery:sort_nodes_and_props(NodesAndProps)). + +failed_in_ci_1(_Config) -> + NodesAndProps = [{'successful_discovery-cluster_size_7-7@localhost', + ['successful_discovery-cluster_size_7-7@localhost'], + 1699635835018, true}, + {'successful_discovery-cluster_size_7-6@localhost', + ['successful_discovery-cluster_size_7-6@localhost'], + 1699635835006, true}, + {'successful_discovery-cluster_size_7-5@localhost', + ['successful_discovery-cluster_size_7-5@localhost'], + 1699635835019, true}, + {'successful_discovery-cluster_size_7-4@localhost', + ['successful_discovery-cluster_size_7-4@localhost'], + 1699635835007, true}, + {'successful_discovery-cluster_size_7-3@localhost', + ['successful_discovery-cluster_size_7-3@localhost'], + 1699635835006, true}, + {'successful_discovery-cluster_size_7-2@localhost', + ['successful_discovery-cluster_size_7-2@localhost'], + 1699635835013, true}, + {'successful_discovery-cluster_size_7-1@localhost', + ['successful_discovery-cluster_size_7-1@localhost'], + 1699635835011, true}], + ?assertEqual( + [{'successful_discovery-cluster_size_7-3@localhost', + ['successful_discovery-cluster_size_7-3@localhost'], + 1699635835006, true}, + {'successful_discovery-cluster_size_7-6@localhost', + ['successful_discovery-cluster_size_7-6@localhost'], + 1699635835006, true}, + {'successful_discovery-cluster_size_7-4@localhost', + ['successful_discovery-cluster_size_7-4@localhost'], + 1699635835007, true}, + {'successful_discovery-cluster_size_7-1@localhost', + ['successful_discovery-cluster_size_7-1@localhost'], + 1699635835011, true}, + {'successful_discovery-cluster_size_7-2@localhost', + ['successful_discovery-cluster_size_7-2@localhost'], + 1699635835013, true}, + {'successful_discovery-cluster_size_7-7@localhost', + ['successful_discovery-cluster_size_7-7@localhost'], + 1699635835018, true}, + {'successful_discovery-cluster_size_7-5@localhost', + ['successful_discovery-cluster_size_7-5@localhost'], + 1699635835019, true}], + rabbit_peer_discovery:sort_nodes_and_props(NodesAndProps)). + +failed_in_ci_2(_Config) -> + NodesAndProps = [{'successful_discovery-cluster_size_7-7@localhost', + ['successful_discovery-cluster_size_7-1@localhost', + 'successful_discovery-cluster_size_7-7@localhost', + 'successful_discovery-cluster_size_7-2@localhost'], + 1699635835018, true}, + {'successful_discovery-cluster_size_7-6@localhost', + ['successful_discovery-cluster_size_7-6@localhost'], + 1699635835006, true}, + {'successful_discovery-cluster_size_7-5@localhost', + ['successful_discovery-cluster_size_7-5@localhost'], + 1699635835019, true}, + {'successful_discovery-cluster_size_7-4@localhost', + ['successful_discovery-cluster_size_7-4@localhost'], + 1699635835007, true}, + {'successful_discovery-cluster_size_7-3@localhost', + ['successful_discovery-cluster_size_7-3@localhost'], + 1699635835006, true}, + {'successful_discovery-cluster_size_7-2@localhost', + ['successful_discovery-cluster_size_7-1@localhost', + 'successful_discovery-cluster_size_7-7@localhost', + 'successful_discovery-cluster_size_7-2@localhost'], + 1699635835013, true}, + {'successful_discovery-cluster_size_7-1@localhost', + ['successful_discovery-cluster_size_7-1@localhost', + 'successful_discovery-cluster_size_7-7@localhost', + 'successful_discovery-cluster_size_7-2@localhost'], + 1699635835011, true}], + ?assertEqual( + [{'successful_discovery-cluster_size_7-1@localhost', + ['successful_discovery-cluster_size_7-1@localhost', + 'successful_discovery-cluster_size_7-7@localhost', + 'successful_discovery-cluster_size_7-2@localhost'], + 1699635835011, true}, + {'successful_discovery-cluster_size_7-2@localhost', + ['successful_discovery-cluster_size_7-1@localhost', + 'successful_discovery-cluster_size_7-7@localhost', + 'successful_discovery-cluster_size_7-2@localhost'], + 1699635835013, true}, + {'successful_discovery-cluster_size_7-7@localhost', + ['successful_discovery-cluster_size_7-1@localhost', + 'successful_discovery-cluster_size_7-7@localhost', + 'successful_discovery-cluster_size_7-2@localhost'], + 1699635835018, true}, + {'successful_discovery-cluster_size_7-3@localhost', + ['successful_discovery-cluster_size_7-3@localhost'], + 1699635835006, true}, + {'successful_discovery-cluster_size_7-6@localhost', + ['successful_discovery-cluster_size_7-6@localhost'], + 1699635835006, true}, + {'successful_discovery-cluster_size_7-4@localhost', + ['successful_discovery-cluster_size_7-4@localhost'], + 1699635835007, true}, + {'successful_discovery-cluster_size_7-5@localhost', + ['successful_discovery-cluster_size_7-5@localhost'], + 1699635835019, true}], + rabbit_peer_discovery:sort_nodes_and_props(NodesAndProps)). diff --git a/deps/rabbit/test/unit_collections_SUITE.erl b/deps/rabbit/test/unit_collections_SUITE.erl index 9c84c0e44c01..f91cb4c6aede 100644 --- a/deps/rabbit/test/unit_collections_SUITE.erl +++ b/deps/rabbit/test/unit_collections_SUITE.erl @@ -2,12 +2,11 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2011-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(unit_collections_SUITE). --include_lib("common_test/include/ct.hrl"). -include_lib("eunit/include/eunit.hrl"). -compile(export_all). diff --git a/deps/rabbit/test/unit_config_value_encryption_SUITE.erl b/deps/rabbit/test/unit_config_value_encryption_SUITE.erl index 1ba5605908b3..72968c0b37ac 100644 --- a/deps/rabbit/test/unit_config_value_encryption_SUITE.erl +++ b/deps/rabbit/test/unit_config_value_encryption_SUITE.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2011-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(unit_config_value_encryption_SUITE). diff --git a/deps/rabbit/test/unit_connection_tracking_SUITE.erl b/deps/rabbit/test/unit_connection_tracking_SUITE.erl index 71b0bf6aeeec..23925155bd63 100644 --- a/deps/rabbit/test/unit_connection_tracking_SUITE.erl +++ b/deps/rabbit/test/unit_connection_tracking_SUITE.erl @@ -2,13 +2,12 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2011-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(unit_connection_tracking_SUITE). -include_lib("eunit/include/eunit.hrl"). --include_lib("common_test/include/ct.hrl"). -include_lib("eunit/include/eunit.hrl"). -include_lib("kernel/include/file.hrl"). -include_lib("amqp_client/include/amqp_client.hrl"). diff --git a/deps/rabbit/test/unit_credit_flow_SUITE.erl b/deps/rabbit/test/unit_credit_flow_SUITE.erl index a90090dd6835..4b50e6f19a0c 100644 --- a/deps/rabbit/test/unit_credit_flow_SUITE.erl +++ b/deps/rabbit/test/unit_credit_flow_SUITE.erl @@ -2,12 +2,11 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2011-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(unit_credit_flow_SUITE). --include_lib("common_test/include/ct.hrl"). -include_lib("eunit/include/eunit.hrl"). -compile(export_all). diff --git a/deps/rabbit/test/unit_disk_monitor_SUITE.erl b/deps/rabbit/test/unit_disk_monitor_SUITE.erl index ad357d55bdee..559e9d0f0ee9 100644 --- a/deps/rabbit/test/unit_disk_monitor_SUITE.erl +++ b/deps/rabbit/test/unit_disk_monitor_SUITE.erl @@ -2,12 +2,11 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2011-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(unit_disk_monitor_SUITE). --include_lib("common_test/include/ct.hrl"). -include_lib("eunit/include/eunit.hrl"). -compile(export_all). diff --git a/deps/rabbit/test/unit_file_handle_cache_SUITE.erl b/deps/rabbit/test/unit_file_handle_cache_SUITE.erl index 59c59737c8b0..5ba89315989b 100644 --- a/deps/rabbit/test/unit_file_handle_cache_SUITE.erl +++ b/deps/rabbit/test/unit_file_handle_cache_SUITE.erl @@ -2,13 +2,12 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2011-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(unit_file_handle_cache_SUITE). -include_lib("eunit/include/eunit.hrl"). --include_lib("common_test/include/ct.hrl"). -include_lib("eunit/include/eunit.hrl"). -include_lib("kernel/include/file.hrl"). -include_lib("amqp_client/include/amqp_client.hrl"). @@ -26,9 +25,7 @@ groups() -> [ {non_parallel_tests, [], [ file_handle_cache, %% Change FHC limit. - file_handle_cache_reserve, file_handle_cache_reserve_release, - file_handle_cache_reserve_above_limit, file_handle_cache_reserve_monitor, file_handle_cache_reserve_open_file_above_limit ]} @@ -136,47 +133,6 @@ file_handle_cache1(_Config) -> ok = file_handle_cache:set_limit(Limit), passed. -file_handle_cache_reserve(Config) -> - passed = rabbit_ct_broker_helpers:rpc(Config, 0, - ?MODULE, file_handle_cache_reserve1, [Config]). - -file_handle_cache_reserve1(_Config) -> - Limit = file_handle_cache:get_limit(), - ok = file_handle_cache:set_limit(5), - %% Reserves are always accepted, even if above the limit - %% These are for special processes such as quorum queues - ok = file_handle_cache:set_reservation(7), - - Self = self(), - spawn(fun () -> ok = file_handle_cache:obtain(), - Self ! obtained - end), - - Props = file_handle_cache:info([files_reserved, sockets_used]), - ?assertEqual(7, proplists:get_value(files_reserved, Props)), - ?assertEqual(0, proplists:get_value(sockets_used, Props)), - - %% The obtain should still be blocked, as there are no file handles - %% available - receive - obtained -> - throw(error_file_obtained) - after 1000 -> - %% Let's release 5 file handles, that should leave - %% enough free for the `obtain` to go through - file_handle_cache:set_reservation(2), - Props0 = file_handle_cache:info([files_reserved, sockets_used]), - ?assertEqual(2, proplists:get_value(files_reserved, Props0)), - ?assertEqual(1, proplists:get_value(sockets_used, Props0)), - receive - obtained -> - ok = file_handle_cache:set_limit(Limit), - passed - after 5000 -> - throw(error_file_not_released) - end - end. - file_handle_cache_reserve_release(Config) -> passed = rabbit_ct_broker_helpers:rpc(Config, 0, ?MODULE, file_handle_cache_reserve_release1, [Config]). @@ -190,27 +146,6 @@ file_handle_cache_reserve_release1(_Config) -> ?assertEqual([{files_reserved, 0}], file_handle_cache:info([files_reserved])), passed. -file_handle_cache_reserve_above_limit(Config) -> - passed = rabbit_ct_broker_helpers:rpc(Config, 0, - ?MODULE, file_handle_cache_reserve_above_limit1, [Config]). - -file_handle_cache_reserve_above_limit1(_Config) -> - Limit = file_handle_cache:get_limit(), - ok = file_handle_cache:set_limit(5), - %% Reserves are always accepted, even if above the limit - %% These are for special processes such as quorum queues - ok = file_handle_cache:obtain(5), - ?assertEqual([{file_descriptor_limit, []}], rabbit_alarm:get_alarms()), - - ok = file_handle_cache:set_reservation(7), - - Props = file_handle_cache:info([files_reserved, sockets_used]), - ?assertEqual(7, proplists:get_value(files_reserved, Props)), - ?assertEqual(5, proplists:get_value(sockets_used, Props)), - - ok = file_handle_cache:set_limit(Limit), - passed. - file_handle_cache_reserve_open_file_above_limit(Config) -> passed = rabbit_ct_broker_helpers:rpc(Config, 0, ?MODULE, file_handle_cache_reserve_open_file_above_limit1, [Config]). diff --git a/deps/rabbit/test/unit_gen_server2_SUITE.erl b/deps/rabbit/test/unit_gen_server2_SUITE.erl index b12fa0ab849d..08d2ed905e23 100644 --- a/deps/rabbit/test/unit_gen_server2_SUITE.erl +++ b/deps/rabbit/test/unit_gen_server2_SUITE.erl @@ -2,12 +2,11 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2011-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(unit_gen_server2_SUITE). --include_lib("common_test/include/ct.hrl"). -include_lib("eunit/include/eunit.hrl"). -compile(export_all). @@ -66,8 +65,8 @@ gen_server2_with_state(Config) -> ?MODULE, gen_server2_with_state1, [Config]). gen_server2_with_state1(_Config) -> - fhc_state = gen_server2:with_state(file_handle_cache, - fun (S) -> element(1, S) end), + state = gen_server2:with_state(background_gc, + fun (S) -> element(1, S) end), passed. diff --git a/deps/rabbit/test/unit_gm_SUITE.erl b/deps/rabbit/test/unit_gm_SUITE.erl deleted file mode 100644 index 75024ababd90..000000000000 --- a/deps/rabbit/test/unit_gm_SUITE.erl +++ /dev/null @@ -1,242 +0,0 @@ -%% This Source Code Form is subject to the terms of the Mozilla Public -%% License, v. 2.0. If a copy of the MPL was not distributed with this -%% file, You can obtain one at https://mozilla.org/MPL/2.0/. -%% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. -%% - --module(unit_gm_SUITE). - --behaviour(gm). - --include_lib("common_test/include/ct.hrl"). - --include("gm_specs.hrl"). - --compile(export_all). - --define(RECEIVE_OR_THROW(Body, Bool, Error), - receive Body -> - true = Bool, - passed - after 5000 -> - throw(Error) - end). - -all() -> - [ - join_leave, - broadcast, - confirmed_broadcast, - member_death, - receive_in_order, - unexpected_msg, - down_in_members_change - ]. - -init_per_suite(Config) -> - ok = application:set_env(mnesia, dir, ?config(priv_dir, Config)), - ok = application:start(mnesia), - {ok, FHC} = file_handle_cache:start_link(), - unlink(FHC), - {ok, WPS} = worker_pool_sup:start_link(), - unlink(WPS), - rabbit_ct_helpers:set_config(Config, [ - {file_handle_cache_pid, FHC}, - {worker_pool_sup_pid, WPS} - ]). - -end_per_suite(Config) -> - exit(?config(worker_pool_sup_pid, Config), shutdown), - exit(?config(file_handle_cache_pid, Config), shutdown), - ok = application:stop(mnesia), - Config. - -%% --------------------------------------------------------------------------- -%% Functional tests -%% --------------------------------------------------------------------------- - -join_leave(_Config) -> - passed = with_two_members(fun (_Pid, _Pid2) -> passed end). - -broadcast(_Config) -> - passed = do_broadcast(fun gm:broadcast/2). - -confirmed_broadcast(_Config) -> - passed = do_broadcast(fun gm:confirmed_broadcast/2). - -member_death(_Config) -> - passed = with_two_members( - fun (Pid, Pid2) -> - {ok, Pid3} = gm:start_link( - ?MODULE, ?MODULE, self(), - fun rabbit_mnesia:execute_mnesia_transaction/1), - passed = receive_joined(Pid3, [Pid, Pid2, Pid3], - timeout_joining_gm_group_3), - passed = receive_birth(Pid, Pid3, timeout_waiting_for_birth_3_1), - passed = receive_birth(Pid2, Pid3, timeout_waiting_for_birth_3_2), - - unlink(Pid3), - exit(Pid3, kill), - - %% Have to do some broadcasts to ensure that all members - %% find out about the death. - BFun = broadcast_fun(fun gm:confirmed_broadcast/2), - passed = BFun(Pid, Pid2), - passed = BFun(Pid, Pid2), - - passed = receive_death(Pid, Pid3, timeout_waiting_for_death_3_1), - passed = receive_death(Pid2, Pid3, timeout_waiting_for_death_3_2), - - passed - end). - -receive_in_order(_Config) -> - passed = with_two_members( - fun (Pid, Pid2) -> - Numbers = lists:seq(1,1000), - [begin ok = gm:broadcast(Pid, N), ok = gm:broadcast(Pid2, N) end - || N <- Numbers], - passed = receive_numbers( - Pid, Pid, {timeout_for_msgs, Pid, Pid}, Numbers), - passed = receive_numbers( - Pid, Pid2, {timeout_for_msgs, Pid, Pid2}, Numbers), - passed = receive_numbers( - Pid2, Pid, {timeout_for_msgs, Pid2, Pid}, Numbers), - passed = receive_numbers( - Pid2, Pid2, {timeout_for_msgs, Pid2, Pid2}, Numbers), - passed - end). - -unexpected_msg(_Config) -> - passed = with_two_members( - fun(Pid, _) -> - Pid ! {make_ref(), old_gen_server_answer}, - true = erlang:is_process_alive(Pid), - passed - end). - -down_in_members_change(_Config) -> - %% Setup - ok = gm:create_tables(), - {ok, Pid} = gm:start_link(?MODULE, ?MODULE, self(), - fun rabbit_mnesia:execute_mnesia_transaction/1), - passed = receive_joined(Pid, [Pid], timeout_joining_gm_group_1), - {ok, Pid2} = gm:start_link(?MODULE, ?MODULE, self(), - fun rabbit_mnesia:execute_mnesia_transaction/1), - passed = receive_joined(Pid2, [Pid, Pid2], timeout_joining_gm_group_2), - passed = receive_birth(Pid, Pid2, timeout_waiting_for_birth_2), - - %% Test. Simulate that the gm group is deleted (forget_group) while - %% processing the 'DOWN' message from the neighbour - process_flag(trap_exit, true), - ok = meck:new(mnesia, [passthrough]), - ok = meck:expect(mnesia, read, fun({gm_group, ?MODULE}) -> - []; - (Key) -> - meck:passthrough([Key]) - end), - gm:leave(Pid2), - Passed = receive - {'EXIT', Pid, shutdown} -> - passed; - {'EXIT', Pid, _} -> - crashed - after 15000 -> - timeout - end, - %% Cleanup - meck:unload(mnesia), - process_flag(trap_exit, false), - passed = Passed. - - -do_broadcast(Fun) -> - with_two_members(broadcast_fun(Fun)). - -broadcast_fun(Fun) -> - fun (Pid, Pid2) -> - ok = Fun(Pid, magic_message), - passed = receive_or_throw({msg, Pid, Pid, magic_message}, - timeout_waiting_for_msg), - passed = receive_or_throw({msg, Pid2, Pid, magic_message}, - timeout_waiting_for_msg) - end. - -with_two_members(Fun) -> - ok = gm:create_tables(), - - {ok, Pid} = gm:start_link(?MODULE, ?MODULE, self(), - fun rabbit_mnesia:execute_mnesia_transaction/1), - passed = receive_joined(Pid, [Pid], timeout_joining_gm_group_1), - - {ok, Pid2} = gm:start_link(?MODULE, ?MODULE, self(), - fun rabbit_mnesia:execute_mnesia_transaction/1), - passed = receive_joined(Pid2, [Pid, Pid2], timeout_joining_gm_group_2), - passed = receive_birth(Pid, Pid2, timeout_waiting_for_birth_2), - - passed = Fun(Pid, Pid2), - - ok = gm:leave(Pid), - passed = receive_death(Pid2, Pid, timeout_waiting_for_death_1), - passed = - receive_termination(Pid, normal, timeout_waiting_for_termination_1), - - ok = gm:leave(Pid2), - passed = - receive_termination(Pid2, normal, timeout_waiting_for_termination_2), - - receive X -> throw({unexpected_message, X}) - after 0 -> passed - end. - -receive_or_throw(Pattern, Error) -> - ?RECEIVE_OR_THROW(Pattern, true, Error). - -receive_birth(From, Born, Error) -> - ?RECEIVE_OR_THROW({members_changed, From, Birth, Death}, - ([Born] == Birth) andalso ([] == Death), - Error). - -receive_death(From, Died, Error) -> - ?RECEIVE_OR_THROW({members_changed, From, Birth, Death}, - ([] == Birth) andalso ([Died] == Death), - Error). - -receive_joined(From, Members, Error) -> - ?RECEIVE_OR_THROW({joined, From, Members1}, - lists:usort(Members) == lists:usort(Members1), - Error). - -receive_termination(From, Reason, Error) -> - ?RECEIVE_OR_THROW({termination, From, Reason1}, - Reason == Reason1, - Error). - -receive_numbers(_Pid, _Sender, _Error, []) -> - passed; -receive_numbers(Pid, Sender, Error, [N | Numbers]) -> - ?RECEIVE_OR_THROW({msg, Pid, Sender, M}, - M == N, - Error), - receive_numbers(Pid, Sender, Error, Numbers). - -%% ------------------------------------------------------------------- -%% gm behavior callbacks. -%% ------------------------------------------------------------------- - -joined(Pid, Members) -> - Pid ! {joined, self(), Members}, - ok. - -members_changed(Pid, Births, Deaths) -> - Pid ! {members_changed, self(), Births, Deaths}, - ok. - -handle_msg(Pid, From, Msg) -> - Pid ! {msg, self(), From, Msg}, - ok. - -handle_terminate(Pid, Reason) -> - Pid ! {termination, self(), Reason}, - ok. diff --git a/deps/rabbit/test/unit_log_management_SUITE.erl b/deps/rabbit/test/unit_log_management_SUITE.erl index 3921f8ab6f08..1addb954706e 100644 --- a/deps/rabbit/test/unit_log_management_SUITE.erl +++ b/deps/rabbit/test/unit_log_management_SUITE.erl @@ -2,13 +2,12 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2011-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(unit_log_management_SUITE). -include_lib("eunit/include/eunit.hrl"). --include_lib("common_test/include/ct.hrl"). -include_lib("eunit/include/eunit.hrl"). -include_lib("kernel/include/file.hrl"). -include_lib("amqp_client/include/amqp_client.hrl"). diff --git a/deps/rabbit/test/unit_operator_policy_SUITE.erl b/deps/rabbit/test/unit_operator_policy_SUITE.erl index be01124a2e85..ea2b83984d69 100644 --- a/deps/rabbit/test/unit_operator_policy_SUITE.erl +++ b/deps/rabbit/test/unit_operator_policy_SUITE.erl @@ -2,12 +2,11 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2011-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(unit_operator_policy_SUITE). --include_lib("common_test/include/ct.hrl"). -include_lib("eunit/include/eunit.hrl"). -include_lib("rabbit_common/include/rabbit.hrl"). diff --git a/deps/rabbit/test/unit_pg_local_SUITE.erl b/deps/rabbit/test/unit_pg_local_SUITE.erl index 803749181317..cba1bd4a6212 100644 --- a/deps/rabbit/test/unit_pg_local_SUITE.erl +++ b/deps/rabbit/test/unit_pg_local_SUITE.erl @@ -2,12 +2,11 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2011-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(unit_pg_local_SUITE). --include_lib("common_test/include/ct.hrl"). -include_lib("eunit/include/eunit.hrl"). -compile(export_all). diff --git a/deps/rabbit/test/unit_plugin_directories_SUITE.erl b/deps/rabbit/test/unit_plugin_directories_SUITE.erl index b9fdbeea596c..4e05905a2be7 100644 --- a/deps/rabbit/test/unit_plugin_directories_SUITE.erl +++ b/deps/rabbit/test/unit_plugin_directories_SUITE.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2011-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(unit_plugin_directories_SUITE). diff --git a/deps/rabbit/test/unit_plugin_versioning_SUITE.erl b/deps/rabbit/test/unit_plugin_versioning_SUITE.erl index ed7e1b3cbbbb..9fd1b3624c9c 100644 --- a/deps/rabbit/test/unit_plugin_versioning_SUITE.erl +++ b/deps/rabbit/test/unit_plugin_versioning_SUITE.erl @@ -2,12 +2,11 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2016-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(unit_plugin_versioning_SUITE). --include_lib("common_test/include/ct.hrl"). -include_lib("amqp_client/include/amqp_client.hrl"). -compile(export_all). diff --git a/deps/rabbit/test/unit_policy_validators_SUITE.erl b/deps/rabbit/test/unit_policy_validators_SUITE.erl index d6b8cfe7eb31..89207caae97e 100644 --- a/deps/rabbit/test/unit_policy_validators_SUITE.erl +++ b/deps/rabbit/test/unit_policy_validators_SUITE.erl @@ -2,20 +2,18 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2011-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(unit_policy_validators_SUITE). --include_lib("common_test/include/ct.hrl"). -include_lib("eunit/include/eunit.hrl"). -compile(export_all). all() -> [ - {group, core_validators}, - {group, classic_queue_mirroring_validators} + {group, core_validators} ]. groups() -> @@ -33,11 +31,6 @@ groups() -> delivery_limit, classic_queue_lazy_mode, length_limit_overflow_mode - ]}, - - {classic_queue_mirroring_validators, [parallel], [ - classic_queue_ha_mode, - classic_queue_ha_params ]} ]. @@ -52,19 +45,9 @@ init_per_suite(Config) -> end_per_suite(Config) -> rabbit_ct_helpers:run_teardown_steps(Config). -init_per_group(Group = classic_queue_mirroring_validators, Config) -> - Config1 = rabbit_ct_helpers:set_config(Config, [ - {rmq_nodename_suffix, Group}, - {rmq_nodes_count, 1} - ]), - rabbit_ct_helpers:run_steps(Config1, - rabbit_ct_broker_helpers:setup_steps()); init_per_group(_, Config) -> Config. -end_per_group(classic_queue_mirroring_validators, Config) -> - rabbit_ct_helpers:run_steps(Config, - rabbit_ct_broker_helpers:teardown_steps()); end_per_group(_, Config) -> Config. @@ -126,60 +109,6 @@ length_limit_overflow_mode(_Config) -> %% invalid values [<<"unknown">>, <<"publish">>, <<"overflow">>, <<"mode">>]). - -%% ------------------------------------------------------------------- -%% CMQ Validators -%% ------------------------------------------------------------------- - -classic_queue_ha_mode(Config) -> - rabbit_ct_broker_helpers:rpc(Config, 0, - ?MODULE, classic_queue_ha_mode1, [Config]). - -classic_queue_ha_mode1(_Config) -> - ?assertEqual(ok, rabbit_mirror_queue_misc:validate_policy([ - {<<"ha-mode">>, <<"exactly">>}, - {<<"ha-params">>, 2} - ])), - - ?assertEqual(ok, rabbit_mirror_queue_misc:validate_policy([ - {<<"ha-mode">>, <<"nodes">>}, - {<<"ha-params">>, [<<"rabbit@host1">>, <<"rabbit@host2">>]} - ])), - - ?assertEqual(ok, rabbit_mirror_queue_misc:validate_policy([ - {<<"ha-mode">>, <<"all">>} - ])), - - ?assertMatch({error, _, _}, rabbit_mirror_queue_misc:validate_policy([ - {<<"ha-mode">>, <<"lolwut">>}, - {<<"ha-params">>, 2} - ])). - -classic_queue_ha_params(Config) -> - rabbit_ct_broker_helpers:rpc(Config, 0, - ?MODULE, classic_queue_ha_mode1, [Config]). - -classic_queue_ha_params1(_Config) -> - ?assertMatch({error, _, _}, rabbit_mirror_queue_misc:validate_policy([ - {<<"ha-mode">>, <<"exactly">>}, - {<<"ha-params">>, <<"2">>} - ])), - - ?assertEqual(ok, rabbit_mirror_queue_misc:validate_policy([ - {<<"ha-mode">>, <<"nodes">>}, - {<<"ha-params">>, <<"lolwut">>} - ])), - - ?assertEqual(ok, rabbit_mirror_queue_misc:validate_policy([ - {<<"ha-mode">>, <<"all">>}, - {<<"ha-params">>, <<"lolwut">>} - ])), - - ?assertMatch({error, _, _}, rabbit_mirror_queue_misc:validate_policy([ - {<<"ha-mode">>, <<"lolwut">>}, - {<<"ha-params">>, 2} - ])). - %% %% Implementation %% diff --git a/deps/rabbit/test/unit_priority_queue_SUITE.erl b/deps/rabbit/test/unit_priority_queue_SUITE.erl index 154a56cc4cef..be8eadf2837e 100644 --- a/deps/rabbit/test/unit_priority_queue_SUITE.erl +++ b/deps/rabbit/test/unit_priority_queue_SUITE.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2011-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(unit_priority_queue_SUITE). diff --git a/deps/rabbit/test/unit_queue_consumers_SUITE.erl b/deps/rabbit/test/unit_queue_consumers_SUITE.erl index 4c8bc21d9173..0f0690b2b8c2 100644 --- a/deps/rabbit/test/unit_queue_consumers_SUITE.erl +++ b/deps/rabbit/test/unit_queue_consumers_SUITE.erl @@ -2,12 +2,11 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2018-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(unit_queue_consumers_SUITE). --include_lib("common_test/include/ct.hrl"). -include_lib("eunit/include/eunit.hrl"). -compile(export_all). diff --git a/deps/rabbit/test/unit_queue_location_SUITE.erl b/deps/rabbit/test/unit_queue_location_SUITE.erl new file mode 100644 index 000000000000..61c49b334908 --- /dev/null +++ b/deps/rabbit/test/unit_queue_location_SUITE.erl @@ -0,0 +1,147 @@ +-module(unit_queue_location_SUITE). + +-compile(nowarn_export_all). +-compile(export_all). + +-include_lib("stdlib/include/assert.hrl"). + +all() -> + [ + {group, generic}, + {group, classic} + ]. + +groups() -> + [ + {generic, [], generic_tests()}, + {classic, [], classic_tests()} + ]. + +generic_tests() -> [ + default_strategy, + policy_key_precedence, + policy_key_fallback + ]. + +classic_tests() -> [ + classic_balanced_below_threshold, + classic_balanced_above_threshold + ]. + +default_strategy(_Config) -> + ok = meck:new(rabbit_queue_type_util, [passthrough]), + ok = meck:expect(rabbit_queue_type_util, args_policy_lookup, + fun(<<"queue-leader-locator">>, _, _) -> undefined; + (<<"queue-master-locator">>, _, _) -> undefined + end), + ?assertEqual(<<"client-local">>, rabbit_queue_location:leader_locator(queue)), + ok = meck:unload(rabbit_queue_type_util). + +policy_key_precedence(_Config) -> + ok = meck:new(rabbit_queue_type_util, [passthrough]), + ok = meck:expect(rabbit_queue_type_util, args_policy_lookup, + fun(<<"queue-leader-locator">>, _, _) -> <<"balanced">>; + (<<"queue-master-locator">>, _, _) -> <<"min-masters">> + end), + ?assertEqual(<<"balanced">>, rabbit_queue_location:leader_locator(queue)), + ok = meck:unload(rabbit_queue_type_util). + +policy_key_fallback(_Config) -> + ok = meck:new(rabbit_queue_type_util, [passthrough]), + ok = meck:expect(rabbit_queue_type_util, args_policy_lookup, + fun(<<"queue-leader-locator">>, _, _) -> undefined; + (<<"queue-master-locator">>, _, _) -> <<"min-masters">> + end), + ?assertEqual(<<"balanced">>, rabbit_queue_location:leader_locator(queue)), + ok = meck:unload(rabbit_queue_type_util). + +classic_balanced_below_threshold(_Config) -> + ok = meck:new(rabbit_queue_location, [passthrough]), + ok = meck:expect(rabbit_queue_location, node, fun() -> node1 end), + ok = meck:new(rabbit_maintenance, [passthrough]), + ok = meck:expect(rabbit_maintenance, filter_out_drained_nodes_local_read, fun(N) -> N end), + AllNodes = [node1, node2, node3, node4, node5], + RunningNodes = AllNodes, + QueueType = rabbit_classic_queue, + GetQueues = fun() -> unused_because_mecked end, + QueueCount = 2, + QueueCountStartRandom = 1000, + {PotentialLeaders, _} = rabbit_queue_location:select_members( + 1, + QueueType, + AllNodes, + RunningNodes, + QueueCount, + QueueCountStartRandom, + GetQueues), + %% all running nodes should be considered + ?assertEqual(RunningNodes, PotentialLeaders), + % a few different distributions of queues across nodes + % case 1 + ok = meck:expect(rabbit_queue_location, queues_per_node, fun(_, _) -> + #{node1 => 5, + node2 => 1, + node3 => 5} + end), + ?assertEqual(node2, rabbit_queue_location:leader_node(<<"balanced">>, + PotentialLeaders, + RunningNodes, + QueueCount, + QueueCountStartRandom, + GetQueues)), + % case 2 + ok = meck:expect(rabbit_queue_location, queues_per_node, fun(_, _) -> + #{node1 => 0, + node2 => 1, + node3 => 5} + end), + ?assertEqual(node1, rabbit_queue_location:leader_node(<<"balanced">>, + PotentialLeaders, + RunningNodes, + QueueCount, + QueueCountStartRandom, + GetQueues)), + % case 3 + ok = meck:expect(rabbit_queue_location, queues_per_node, fun(_, _) -> + #{node1 => 100, + node2 => 100, + node3 => 99} + end), + ?assertEqual(node3, rabbit_queue_location:leader_node(<<"balanced">>, + PotentialLeaders, + RunningNodes, + QueueCount, + QueueCountStartRandom, + GetQueues)), + + ok = meck:unload([rabbit_queue_location, rabbit_maintenance]). + +classic_balanced_above_threshold(_Config) -> + ok = meck:new(rabbit_maintenance, [passthrough]), + ok = meck:expect(rabbit_maintenance, filter_out_drained_nodes_local_read, fun(N) -> N end), + AllNodes = [node1, node2, node3], + RunningNodes = AllNodes, + QueueType = rabbit_classic_queue, + GetQueues = fun() -> [] end, %rabbit_queue_location:get_queues_for_type(QueueType), + QueueCount = 1230, + QueueCountStartRandom = 1000, + Locations = [begin + {Members, _} = rabbit_queue_location:select_members( + 1, + QueueType, + AllNodes, + RunningNodes, + QueueCount, + QueueCountStartRandom, + GetQueues), + rabbit_queue_location:leader_node(<<"balanced">>, + Members, + RunningNodes, + QueueCount, + QueueCountStartRandom, + GetQueues) + end || _ <- lists:seq(1, 30)], + %% given we selected a random location 30 times with 3 possible options, + %% we would have to be very unlucky not to see all 3 nodes in the results + ?assertEqual([node1, node2, node3], lists:sort(lists:uniq(Locations))), + ok = meck:unload([rabbit_maintenance]). diff --git a/deps/rabbit/test/unit_quorum_queue_SUITE.erl b/deps/rabbit/test/unit_quorum_queue_SUITE.erl new file mode 100644 index 000000000000..be96bd612359 --- /dev/null +++ b/deps/rabbit/test/unit_quorum_queue_SUITE.erl @@ -0,0 +1,71 @@ +-module(unit_quorum_queue_SUITE). + +-compile(nowarn_export_all). +-compile(export_all). + +all() -> + [ + all_replica_states_includes_nonvoters, + filter_nonvoters, + filter_quorum_critical_accounts_nonvoters + ]. + +filter_quorum_critical_accounts_nonvoters(_Config) -> + Nodes = [test@leader, test@follower1, test@follower2], + Qs0 = [amqqueue:new(rabbit_misc:r(<<"/">>, queue, <<"q1">>), + {q1, test@leader}, + false, false, none, [], undefined, #{}), + amqqueue:new(rabbit_misc:r(<<"/">>, queue, <<"q2">>), + {q2, test@leader}, + false, false, none, [], undefined, #{})], + Qs = [Q1, Q2] = lists:map(fun (Q) -> + amqqueue:set_type_state(Q, #{nodes => Nodes}) + end, Qs0), + Ss = #{test@leader => #{q1 => leader, q2 => leader}, + test@follower1 => #{q1 => promotable, q2 => follower}, + test@follower2 => #{q1 => follower, q2 => promotable}}, + Qs = rabbit_quorum_queue:filter_quorum_critical(Qs, Ss, test@leader), + [Q2] = rabbit_quorum_queue:filter_quorum_critical(Qs, Ss, test@follower1), + [Q1] = rabbit_quorum_queue:filter_quorum_critical(Qs, Ss, test@follower2), + ok. + +filter_nonvoters(_Config) -> + Qs = [_, _, _, Q4] = + [amqqueue:new(rabbit_misc:r(<<"/">>, queue, <<"q1">>), + {q1, test@leader}, + false, false, none, [], undefined, #{}), + amqqueue:new(rabbit_misc:r(<<"/">>, queue, <<"q2">>), + {q2, test@leader}, + false, false, none, [], undefined, #{}), + amqqueue:new(rabbit_misc:r(<<"/">>, queue, <<"q3">>), + {q3, test@leader}, + false, false, none, [], undefined, #{}), + amqqueue:new(rabbit_misc:r(<<"/">>, queue, <<"q4">>), + {q4, test@leader}, + false, false, none, [], undefined, #{})], + Ss = #{q1 => leader, q2 => follower, q3 => non_voter, q4 => promotable}, + [Q4] = rabbit_quorum_queue:filter_promotable(Qs, Ss), + ok. + +all_replica_states_includes_nonvoters(_Config) -> + ets:new(ra_state, [named_table, public, {write_concurrency, true}]), + ets:insert(ra_state, [ + {q1, leader, voter}, + {q2, follower, voter}, + {q3, follower, promotable}, + {q4, init, unknown}, + %% pre ra-2.7.0 + {q5, leader}, + {q6, follower} + ]), + {_, #{ + q1 := leader, + q2 := follower, + q3 := promotable, + q4 := init, + q5 := leader, + q6 := follower + }} = rabbit_quorum_queue:all_replica_states(), + + true = ets:delete(ra_state), + ok. diff --git a/deps/rabbit/test/unit_stats_and_metrics_SUITE.erl b/deps/rabbit/test/unit_stats_and_metrics_SUITE.erl index 25f7653eae3c..f75e5a1fb2cc 100644 --- a/deps/rabbit/test/unit_stats_and_metrics_SUITE.erl +++ b/deps/rabbit/test/unit_stats_and_metrics_SUITE.erl @@ -2,13 +2,12 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2011-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(unit_stats_and_metrics_SUITE). -include_lib("eunit/include/eunit.hrl"). --include_lib("common_test/include/ct.hrl"). -include_lib("eunit/include/eunit.hrl"). -include_lib("kernel/include/file.hrl"). -include_lib("amqp_client/include/amqp_client.hrl"). diff --git a/deps/rabbit/test/unit_supervisor2_SUITE.erl b/deps/rabbit/test/unit_supervisor2_SUITE.erl index 104496bc6604..95f46d7a5dad 100644 --- a/deps/rabbit/test/unit_supervisor2_SUITE.erl +++ b/deps/rabbit/test/unit_supervisor2_SUITE.erl @@ -2,12 +2,11 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2011-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(unit_supervisor2_SUITE). --include_lib("common_test/include/ct.hrl"). -include_lib("eunit/include/eunit.hrl"). -compile(export_all). diff --git a/deps/rabbit/test/unit_vm_memory_monitor_SUITE.erl b/deps/rabbit/test/unit_vm_memory_monitor_SUITE.erl index 67be3122ad39..09e782018f53 100644 --- a/deps/rabbit/test/unit_vm_memory_monitor_SUITE.erl +++ b/deps/rabbit/test/unit_vm_memory_monitor_SUITE.erl @@ -2,12 +2,11 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2011-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(unit_vm_memory_monitor_SUITE). --include_lib("common_test/include/ct.hrl"). -include_lib("eunit/include/eunit.hrl"). -compile(export_all). diff --git a/deps/rabbit/test/upgrade_preparation_SUITE.erl b/deps/rabbit/test/upgrade_preparation_SUITE.erl index 1ab103a12612..29787ae8d524 100644 --- a/deps/rabbit/test/upgrade_preparation_SUITE.erl +++ b/deps/rabbit/test/upgrade_preparation_SUITE.erl @@ -2,12 +2,11 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2020-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(upgrade_preparation_SUITE). --include_lib("common_test/include/ct.hrl"). -include_lib("eunit/include/eunit.hrl"). -include_lib("amqp_client/include/amqp_client.hrl"). diff --git a/deps/rabbit/test/vhost_SUITE.erl b/deps/rabbit/test/vhost_SUITE.erl index b32bb83b2142..6197e3c132bb 100644 --- a/deps/rabbit/test/vhost_SUITE.erl +++ b/deps/rabbit/test/vhost_SUITE.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2011-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(vhost_SUITE). @@ -33,6 +33,8 @@ groups() -> vhost_failure_forces_connection_closure, vhost_creation_idempotency, vhost_update_idempotency, + vhost_update_default_queue_type_undefined, + vhost_deletion, parse_tags ], ClusterSize2Tests = [ @@ -40,8 +42,10 @@ groups() -> vhost_failure_forces_connection_closure, vhost_failure_forces_connection_closure_on_failure_node, node_starts_with_dead_vhosts, - node_starts_with_dead_vhosts_with_mirrors, - vhost_creation_idempotency + vhost_creation_idempotency, + vhost_update_idempotency, + vhost_update_default_queue_type_undefined, + vhost_deletion ], [ {cluster_size_1_network, [], ClusterSize1Tests}, @@ -249,71 +253,6 @@ node_starts_with_dead_vhosts(Config) -> rabbit_vhost_sup_sup, is_vhost_alive, [VHost2]), ?AWAIT_TIMEOUT). -node_starts_with_dead_vhosts_with_mirrors(Config) -> - VHost1 = <<"vhost1">>, - VHost2 = <<"vhost2">>, - - set_up_vhost(Config, VHost1), - set_up_vhost(Config, VHost2), - - true = rabbit_ct_broker_helpers:rpc(Config, 1, - rabbit_vhost_sup_sup, is_vhost_alive, [VHost1]), - true = rabbit_ct_broker_helpers:rpc(Config, 1, - rabbit_vhost_sup_sup, is_vhost_alive, [VHost2]), - [] = rabbit_ct_broker_helpers:rpc(Config, 1, - rabbit_vhost_sup_sup, check, []), - - Conn = rabbit_ct_client_helpers:open_unmanaged_connection(Config, 0, VHost1), - {ok, Chan} = amqp_connection:open_channel(Conn), - - QName = <<"node_starts_with_dead_vhosts_with_mirrors-q-0">>, - amqp_channel:call(Chan, #'queue.declare'{queue = QName, durable = true}), - ok = rabbit_ct_broker_helpers:rpc(Config, 0, - rabbit_policy, set, - [VHost1, <<"mirror">>, <<".*">>, [{<<"ha-mode">>, <<"all">>}], - 0, <<"queues">>, <<"acting-user">>]), - - %% Wait for the queue to start a mirror - ?awaitMatch([_], - begin - {ok, Q0} = rabbit_ct_broker_helpers:rpc( - Config, 0, - rabbit_amqqueue, lookup, - [rabbit_misc:r(VHost1, queue, QName)], infinity), - amqqueue:get_sync_slave_pids(Q0) - end, - ?AWAIT_TIMEOUT), - - rabbit_ct_client_helpers:publish(Chan, QName, 10), - - {ok, Q} = rabbit_ct_broker_helpers:rpc( - Config, 0, - rabbit_amqqueue, lookup, - [rabbit_misc:r(VHost1, queue, QName)], infinity), - - Node1 = rabbit_ct_broker_helpers:get_node_config(Config, 1, nodename), - - [Pid] = amqqueue:get_sync_slave_pids(Q), - - Node1 = node(Pid), - - DataStore1 = rabbit_ct_broker_helpers:rpc( - Config, 1, rabbit_vhost, msg_store_dir_path, [VHost1]), - - rabbit_ct_broker_helpers:stop_node(Config, 1), - - file:write_file(filename:join(DataStore1, "recovery.dets"), <<"garbage">>), - - %% The node should start without a vhost - ok = rabbit_ct_broker_helpers:start_node(Config, 1), - - ?awaitMatch(true, - rabbit_ct_broker_helpers:rpc(Config, 1, rabbit, is_running, []), - ?AWAIT_TIMEOUT), - - ?assertEqual(true, rabbit_ct_broker_helpers:rpc(Config, 1, - rabbit_vhost_sup_sup, is_vhost_alive, [VHost2])). - vhost_creation_idempotency(Config) -> VHost = <<"idempotency-test">>, try @@ -364,6 +303,152 @@ vhost_update_idempotency(Config) -> rabbit_ct_broker_helpers:delete_vhost(Config, VHost) end. +vhost_update_default_queue_type_undefined(Config) -> + VHost = <<"update-default_queue_type-with-undefined-test">>, + Description = <<"rmqfpas-105 test vhost">>, + Tags = [replicate, private], + DefaultQueueType = quorum, + Trace = false, + ActingUser = <<"acting-user">>, + try + ?assertMatch(ok, rabbit_ct_broker_helpers:add_vhost(Config, VHost)), + + PutVhostArgs0 = [VHost, Description, Tags, DefaultQueueType, Trace, ActingUser], + ?assertMatch(ok, + rabbit_ct_broker_helpers:rpc(Config, 0, rabbit_vhost, put_vhost, PutVhostArgs0)), + + PutVhostArgs1 = [VHost, Description, Tags, undefined, Trace, ActingUser], + ?assertMatch(ok, + rabbit_ct_broker_helpers:rpc(Config, 0, rabbit_vhost, put_vhost, PutVhostArgs1)), + + V = rabbit_ct_broker_helpers:rpc(Config, 0, rabbit_vhost, lookup, [VHost]), + ?assertMatch(#{default_queue_type := DefaultQueueType}, vhost:get_metadata(V)) + after + rabbit_ct_broker_helpers:delete_vhost(Config, VHost) + end. + +vhost_deletion(Config) -> + VHost = <<"deletion-vhost">>, + ActingUser = <<"acting-user">>, + Node = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename), + + set_up_vhost(Config, VHost), + + Conn = rabbit_ct_client_helpers:open_unmanaged_connection(Config, 0, VHost), + {ok, Chan} = amqp_connection:open_channel(Conn), + + %% Declare some resources under the vhost. These should be deleted when the + %% vhost is deleted. + QName = <<"vhost-deletion-queue">>, + #'queue.declare_ok'{} = amqp_channel:call( + Chan, #'queue.declare'{queue = QName, durable = true}), + XName = <<"vhost-deletion-exchange">>, + #'exchange.declare_ok'{} = amqp_channel:call( + Chan, + #'exchange.declare'{exchange = XName, + durable = true, + type = <<"direct">>}), + RoutingKey = QName, + #'queue.bind_ok'{} = amqp_channel:call( + Chan, + #'queue.bind'{exchange = XName, + queue = QName, + routing_key = RoutingKey}), + PolicyName = <<"ttl-policy">>, + rabbit_ct_broker_helpers:set_policy_in_vhost( + Config, Node, VHost, + PolicyName, <<"policy_ttl-queue">>, <<"all">>, [{<<"message-ttl">>, 20}], + ActingUser), + + % Load the dummy event handler module on the node. + ok = rabbit_ct_broker_helpers:rpc(Config, Node, test_rabbit_event_handler, okay, []), + ok = rabbit_ct_broker_helpers:rpc(Config, Node, gen_event, add_handler, + [rabbit_event, test_rabbit_event_handler, []]), + try + rabbit_ct_broker_helpers:delete_vhost(Config, VHost), + + Events0 = rabbit_ct_broker_helpers:rpc(Config, Node, + gen_event, call, + [rabbit_event, test_rabbit_event_handler, events, 1000]), + ct:pal( + ?LOW_IMPORTANCE, + "Events emitted during deletion: ~p", [lists:reverse(Events0)]), + + %% Reorganize the event props into maps for easier matching. + Events = [{Type, maps:from_list(Props)} || + #event{type = Type, props = Props} <- Events0], + + ?assertMatch(#{user := <<"guest">>, vhost := VHost}, + proplists:get_value(permission_deleted, Events)), + + ?assertMatch(#{source_name := XName, + source_kind := exchange, + destination_name := QName, + destination_kind := queue, + routing_key := RoutingKey, + vhost := VHost}, + proplists:get_value(binding_deleted, Events)), + + ?assertMatch(#{name := #resource{name = QName, + kind = queue, + virtual_host = VHost}}, + proplists:get_value(queue_deleted, Events)), + + ?assertEqual( + lists:sort([<<>>, <<"amq.direct">>, <<"amq.fanout">>, <<"amq.headers">>, + <<"amq.match">>, <<"amq.rabbitmq.trace">>, <<"amq.topic">>, + <<"vhost-deletion-exchange">>]), + lists:sort(lists:filtermap( + fun ({exchange_deleted, + #{name := #resource{name = Name}}}) -> + {true, Name}; + (_Event) -> + false + end, Events))), + + ?assertMatch( + {value, {parameter_cleared, #{name := <<"limits">>, + vhost := VHost}}}, + lists:search( + fun ({parameter_cleared, #{component := <<"vhost-limits">>}}) -> + true; + (_Event) -> + false + end, Events)), + ?assertMatch(#{name := <<"limits">>, vhost := VHost}, + proplists:get_value(vhost_limits_cleared, Events)), + ?assertMatch(#{name := PolicyName, vhost := VHost}, + proplists:get_value(policy_cleared, Events)), + + ?assertMatch(#{name := VHost, + user_who_performed_action := ActingUser}, + proplists:get_value(vhost_deleted, Events)), + ?assertMatch(#{name := VHost, + node := Node, + user_who_performed_action := ?INTERNAL_USER}, + proplists:get_value(vhost_down, Events)), + + ?assert(proplists:is_defined(channel_closed, Events)), + ?assert(proplists:is_defined(connection_closed, Events)), + + %% VHost deletion is not idempotent - we return an error - but deleting + %% the same vhost again should not cause any more resources to be + %% deleted. So we should see no new events in the `rabbit_event' + %% handler. + ?assertEqual( + {error, {no_such_vhost, VHost}}, + rabbit_ct_broker_helpers:delete_vhost(Config, VHost)), + ?assertEqual( + Events0, + rabbit_ct_broker_helpers:rpc( + Config, Node, + gen_event, call, + [rabbit_event, test_rabbit_event_handler, events, 1000])) + after + rabbit_ct_broker_helpers:rpc(Config, Node, + gen_event, delete_handler, [rabbit_event, test_rabbit_event_handler, []]) + end. + vhost_is_created_with_default_limits(Config) -> VHost = <<"vhost1">>, Limits = [{<<"max-connections">>, 10}, {<<"max-queues">>, 1}], @@ -371,9 +456,15 @@ vhost_is_created_with_default_limits(Config) -> Env = [{vhosts, [{<<"id">>, Limits++Pattern}]}], ?assertEqual(ok, rabbit_ct_broker_helpers:rpc(Config, 0, application, set_env, [rabbit, default_limits, Env])), - ?assertEqual(ok, rabbit_ct_broker_helpers:add_vhost(Config, VHost)), - ?assertEqual(Limits, rabbit_ct_broker_helpers:rpc(Config, 0, - rabbit_vhost_limit, list, [VHost])). + try + ?assertEqual(ok, rabbit_ct_broker_helpers:add_vhost(Config, VHost)), + ?assertEqual(Limits, rabbit_ct_broker_helpers:rpc(Config, 0, + rabbit_vhost_limit, list, [VHost])) + after + rabbit_ct_broker_helpers:rpc( + Config, 0, + application, unset_env, [rabbit, default_limits]) + end. vhost_is_created_with_operator_policies(Config) -> VHost = <<"vhost1">>, @@ -382,9 +473,15 @@ vhost_is_created_with_operator_policies(Config) -> Env = [{operator, [{PolicyName, Definition}]}], ?assertEqual(ok, rabbit_ct_broker_helpers:rpc(Config, 0, application, set_env, [rabbit, default_policies, Env])), - ?assertEqual(ok, rabbit_ct_broker_helpers:add_vhost(Config, VHost)), - ?assertNotEqual(not_found, rabbit_ct_broker_helpers:rpc(Config, 0, - rabbit_policy, lookup_op, [VHost, PolicyName])). + try + ?assertEqual(ok, rabbit_ct_broker_helpers:add_vhost(Config, VHost)), + ?assertNotEqual(not_found, rabbit_ct_broker_helpers:rpc(Config, 0, + rabbit_policy, lookup_op, [VHost, PolicyName])) + after + rabbit_ct_broker_helpers:rpc( + Config, 0, + application, unset_env, [rabbit, default_policies]) + end. vhost_is_created_with_default_user(Config) -> VHost = <<"vhost1">>, @@ -405,17 +502,11 @@ vhost_is_created_with_default_user(Config) -> ct:pal("WANT: ~p", [WantPermissions]), ?assertEqual(WantPermissions, rabbit_ct_broker_helpers:rpc(Config, 0, rabbit_auth_backend_internal, list_user_permissions, [Username])), - HaveUser = lists:search( - fun (U) -> - case proplists:get_value(user, U) of - Username -> true; - undefined -> false - end - end, + ?assertEqual(true, lists:member( + WantUser, rabbit_ct_broker_helpers:rpc(Config, 0, rabbit_auth_backend_internal, list_users, []) - ), - ?assertEqual({value, WantUser}, HaveUser), + )), ?assertMatch({ok, _}, rabbit_ct_broker_helpers:rpc(Config, 0, rabbit_auth_backend_internal, user_login_authentication, [Username, [{password, list_to_binary(Pwd)}]])), ?assertEqual(ok, rabbit_ct_broker_helpers:rpc(Config, 0, diff --git a/deps/rabbit_common/.gitignore b/deps/rabbit_common/.gitignore deleted file mode 100644 index f609631433e1..000000000000 --- a/deps/rabbit_common/.gitignore +++ /dev/null @@ -1,31 +0,0 @@ -*~ -.sw? -.*.sw? -.*.plt -*.beam -*.coverdata -/.*.plt -/.erlang.mk/ -/cover/ -/deps/ -/doc/ -/ebin/ -/escript/ -/escript.lock -/git-revisions.txt -/logs/ -/plugins/ -/plugins.lock -/rebar.config -/rebar.lock -/sbin/ -/sbin.lock -/test/ct.cover.spec -/xrefr - -/rabbit_common.d - -# Generated source files. -/include/rabbit_framing.hrl -/src/rabbit_framing_amqp_0_8.erl -/src/rabbit_framing_amqp_0_9_1.erl diff --git a/deps/rabbit_common/BUILD.bazel b/deps/rabbit_common/BUILD.bazel index 687aedc6a81c..df5f2add5ada 100644 --- a/deps/rabbit_common/BUILD.bazel +++ b/deps/rabbit_common/BUILD.bazel @@ -1,3 +1,4 @@ +load("@aspect_bazel_lib//lib:write_source_files.bzl", "write_source_files") load("@rules_erlang//:eunit2.bzl", "eunit") load("@rules_erlang//:xref2.bzl", "xref") load("@rules_erlang//:dialyze.bzl", "dialyze", "plt") @@ -28,38 +29,50 @@ py_binary( ) genrule( - name = "generated_headers", + name = "rabbit_framing", srcs = [ "//deps/rabbitmq_codegen:amqp-rabbitmq-0.9.1.json", "//deps/rabbitmq_codegen:credit_extension.json", "//deps/rabbitmq_codegen:amqp-rabbitmq-0.8.json", ], - outs = ["include/rabbit_framing.hrl"], + outs = ["gen/rabbit_framing.hrl"], cmd = "$(location :codegen) --ignore-conflicts header $(SRCS) $@", + tags = ["manual"], tools = [":codegen"], ) genrule( - name = "rabbit_framing_amqp_0_9_1.erl", + name = "rabbit_framing_amqp_0_9_1", srcs = [ "//deps/rabbitmq_codegen:amqp-rabbitmq-0.9.1.json", "//deps/rabbitmq_codegen:credit_extension.json", ], - outs = ["src/rabbit_framing_amqp_0_9_1.erl"], + outs = ["gen/rabbit_framing_amqp_0_9_1.erl"], cmd = "$(location :codegen) body $(SRCS) $@", + tags = ["manual"], tools = [":codegen"], ) genrule( - name = "rabbit_framing_amqp_0_8.erl", + name = "rabbit_framing_amqp_0_8", srcs = [ "//deps/rabbitmq_codegen:amqp-rabbitmq-0.8.json", ], - outs = ["src/rabbit_framing_amqp_0_8.erl"], + outs = ["gen/rabbit_framing_amqp_0_8.erl"], cmd = "$(location :codegen) body $(SRCS) $@", + tags = ["manual"], tools = [":codegen"], ) +write_source_files( + name = "write_framing_sources", + files = { + "include/rabbit_framing.hrl": ":rabbit_framing", + "src/rabbit_framing_amqp_0_8.erl": ":rabbit_framing_amqp_0_8", + "src/rabbit_framing_amqp_0_9_1.erl": ":rabbit_framing_amqp_0_9_1", + }, +) + APP_EXTRA_KEYS = """ %% Hex.pm package informations. {licenses, ["MPL-2.0"]}, diff --git a/deps/rabbit_common/Makefile b/deps/rabbit_common/Makefile index a747135a3c67..f4a56200f693 100644 --- a/deps/rabbit_common/Makefile +++ b/deps/rabbit_common/Makefile @@ -25,8 +25,8 @@ define HEX_TARBALL_EXTRA_METADATA } endef -LOCAL_DEPS = compiler crypto public_key sasl ssl syntax_tools tools xmerl -DEPS = thoas recon credentials_obfuscation +LOCAL_DEPS = compiler crypto public_key sasl ssl syntax_tools tools xmerl os_mon runtime_tools +DEPS = thoas ranch recon credentials_obfuscation # Variables and recipes in development.*.mk are meant to be used from # any Git clone. They are excluded from the files published to Hex.pm. @@ -56,7 +56,6 @@ HEX_TARBALL_FILES += rabbitmq-components.mk \ mk/rabbitmq-dist.mk \ mk/rabbitmq-early-test.mk \ mk/rabbitmq-hexpm.mk \ - mk/rabbitmq-macros.mk \ mk/rabbitmq-test.mk \ mk/rabbitmq-tools.mk diff --git a/deps/rabbit_common/app.bzl b/deps/rabbit_common/app.bzl index ac112ef6043c..66bd9371fdb4 100644 --- a/deps/rabbit_common/app.bzl +++ b/deps/rabbit_common/app.bzl @@ -29,7 +29,6 @@ def all_beam_files(name = "all_beam_files"): "src/delegate.erl", "src/delegate_sup.erl", "src/file_handle_cache.erl", - "src/file_handle_cache_stats.erl", "src/mirrored_supervisor_locks.erl", "src/mnesia_sync.erl", "src/pmon.erl", @@ -59,7 +58,6 @@ def all_beam_files(name = "all_beam_files"): "src/rabbit_json.erl", "src/rabbit_log.erl", "src/rabbit_misc.erl", - "src/rabbit_msg_store_index.erl", "src/rabbit_net.erl", "src/rabbit_nodes_common.erl", "src/rabbit_numerical.erl", @@ -73,6 +71,7 @@ def all_beam_files(name = "all_beam_files"): "src/rabbit_queue_collector.erl", "src/rabbit_registry.erl", "src/rabbit_resource_monitor_misc.erl", + "src/rabbit_routing_parser.erl", "src/rabbit_runtime.erl", "src/rabbit_runtime_parameter.erl", "src/rabbit_semver.erl", @@ -124,7 +123,6 @@ def all_test_beam_files(name = "all_test_beam_files"): "src/delegate.erl", "src/delegate_sup.erl", "src/file_handle_cache.erl", - "src/file_handle_cache_stats.erl", "src/mirrored_supervisor_locks.erl", "src/mnesia_sync.erl", "src/pmon.erl", @@ -154,7 +152,6 @@ def all_test_beam_files(name = "all_test_beam_files"): "src/rabbit_json.erl", "src/rabbit_log.erl", "src/rabbit_misc.erl", - "src/rabbit_msg_store_index.erl", "src/rabbit_net.erl", "src/rabbit_nodes_common.erl", "src/rabbit_numerical.erl", @@ -168,6 +165,7 @@ def all_test_beam_files(name = "all_test_beam_files"): "src/rabbit_queue_collector.erl", "src/rabbit_registry.erl", "src/rabbit_resource_monitor_misc.erl", + "src/rabbit_routing_parser.erl", "src/rabbit_runtime.erl", "src/rabbit_runtime_parameter.erl", "src/rabbit_semver.erl", @@ -211,7 +209,6 @@ def all_srcs(name = "all_srcs"): "src/delegate.erl", "src/delegate_sup.erl", "src/file_handle_cache.erl", - "src/file_handle_cache_stats.erl", "src/gen_server2.erl", "src/mirrored_supervisor_locks.erl", "src/mnesia_sync.erl", @@ -244,7 +241,6 @@ def all_srcs(name = "all_srcs"): "src/rabbit_json.erl", "src/rabbit_log.erl", "src/rabbit_misc.erl", - "src/rabbit_msg_store_index.erl", "src/rabbit_net.erl", "src/rabbit_nodes_common.erl", "src/rabbit_numerical.erl", @@ -260,6 +256,7 @@ def all_srcs(name = "all_srcs"): "src/rabbit_registry.erl", "src/rabbit_registry_class.erl", "src/rabbit_resource_monitor_misc.erl", + "src/rabbit_routing_parser.erl", "src/rabbit_runtime.erl", "src/rabbit_runtime_parameter.erl", "src/rabbit_semver.erl", @@ -283,7 +280,6 @@ def all_srcs(name = "all_srcs"): "include/rabbit_framing.hrl", "include/rabbit_memory.hrl", "include/rabbit_misc.hrl", - "include/rabbit_msg_store.hrl", "include/resource.hrl", ], ) diff --git a/deps/rabbit_common/codegen.py b/deps/rabbit_common/codegen.py index 2e7bad69e915..8d7c539d83ec 100755 --- a/deps/rabbit_common/codegen.py +++ b/deps/rabbit_common/codegen.py @@ -4,7 +4,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. ## from __future__ import nested_scopes @@ -95,7 +95,7 @@ def printFileHeader(): %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %%""") def genErl(spec): diff --git a/deps/rabbit_common/include/rabbit.hrl b/deps/rabbit_common/include/rabbit.hrl index 443283a37f30..a3837106787a 100644 --- a/deps/rabbit_common/include/rabbit.hrl +++ b/deps/rabbit_common/include/rabbit.hrl @@ -2,13 +2,13 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2020-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -include("resource.hrl"). %% Passed around most places --record(user, {username, +-record(user, {username :: rabbit_types:option(rabbit_types:username()), tags, authz_backends}). %% List of {Module, AuthUserImpl} pairs @@ -210,7 +210,7 @@ }). %%---------------------------------------------------------------------------- --define(COPYRIGHT_MESSAGE, "Copyright (c) 2007-2023 VMware, Inc. or its affiliates."). +-define(COPYRIGHT_MESSAGE, "Copyright (c) 2007-2024 Broadcom Inc and/or its subsidiaries"). -define(INFORMATION_MESSAGE, "Licensed under the MPL 2.0. Website: https://rabbitmq.com"). %% EMPTY_FRAME_SIZE, 8 = 1 + 2 + 4 + 1 @@ -254,7 +254,7 @@ %% Max message size is hard limited to 512 MiB. %% If user configures a greater rabbit.max_message_size, %% this value is used instead. --define(MAX_MSG_SIZE, 536870912). +-define(MAX_MSG_SIZE, 536_870_912). -define(store_proc_name(N), rabbit_misc:store_proc_name(?MODULE, N)). diff --git a/deps/rabbit_common/include/rabbit_core_metrics.hrl b/deps/rabbit_common/include/rabbit_core_metrics.hrl index 47111701434c..d0d189139eb8 100644 --- a/deps/rabbit_common/include/rabbit_core_metrics.hrl +++ b/deps/rabbit_common/include/rabbit_core_metrics.hrl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2020-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% %% These tables contain the raw metrics as stored by RabbitMQ core @@ -28,6 +28,14 @@ {auth_attempt_metrics, set}, {auth_attempt_detailed_metrics, set}]). +% `CORE_NON_CHANNEL_TABLES` are tables that store counters representing the +% same info as some of the channel_queue_metrics, channel_exchange_metrics and +% channel_queue_exchange_metrics but without including the channel ID in the +% key. +-define(CORE_NON_CHANNEL_TABLES, [{queue_delivery_metrics, set}, + {exchange_metrics, set}, + {queue_exchange_metrics, set}]). + -define(CONNECTION_CHURN_METRICS, {node(), 0, 0, 0, 0, 0, 0, 0}). %% connection_created :: {connection_id, proplist} diff --git a/deps/rabbit_common/include/rabbit_framing.hrl b/deps/rabbit_common/include/rabbit_framing.hrl new file mode 100644 index 000000000000..6ce1096b1438 --- /dev/null +++ b/deps/rabbit_common/include/rabbit_framing.hrl @@ -0,0 +1,161 @@ +%% Autogenerated code. Do not edit. +%% +%% This Source Code Form is subject to the terms of the Mozilla Public +%% License, v. 2.0. If a copy of the MPL was not distributed with this +%% file, You can obtain one at https://mozilla.org/MPL/2.0/. +%% +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. +%% +-define(PROTOCOL_PORT, 5672). +-define(FRAME_METHOD, 1). +-define(FRAME_HEADER, 2). +-define(FRAME_BODY, 3). +-define(FRAME_HEARTBEAT, 8). +-define(FRAME_MIN_SIZE, 4096). +-define(FRAME_END, 206). +-define(REPLY_SUCCESS, 200). +-define(CONTENT_TOO_LARGE, 311). +-define(NO_ROUTE, 312). +-define(NO_CONSUMERS, 313). +-define(ACCESS_REFUSED, 403). +-define(NOT_FOUND, 404). +-define(RESOURCE_LOCKED, 405). +-define(PRECONDITION_FAILED, 406). +-define(CONNECTION_FORCED, 320). +-define(INVALID_PATH, 402). +-define(FRAME_ERROR, 501). +-define(SYNTAX_ERROR, 502). +-define(COMMAND_INVALID, 503). +-define(CHANNEL_ERROR, 504). +-define(UNEXPECTED_FRAME, 505). +-define(RESOURCE_ERROR, 506). +-define(NOT_ALLOWED, 530). +-define(NOT_IMPLEMENTED, 540). +-define(INTERNAL_ERROR, 541). +-define(FRAME_OOB_METHOD, 4). +-define(FRAME_OOB_HEADER, 5). +-define(FRAME_OOB_BODY, 6). +-define(FRAME_TRACE, 7). +-define(NOT_DELIVERED, 310). +%% Method field records. +-record('basic.qos', {prefetch_size = 0, prefetch_count = 0, global = false}). +-record('basic.qos_ok', {}). +-record('basic.consume', {ticket = 0, queue = <<"">>, consumer_tag = <<"">>, no_local = false, no_ack = false, exclusive = false, nowait = false, arguments = []}). +-record('basic.consume_ok', {consumer_tag}). +-record('basic.cancel', {consumer_tag, nowait = false}). +-record('basic.cancel_ok', {consumer_tag}). +-record('basic.publish', {ticket = 0, exchange = <<"">>, routing_key = <<"">>, mandatory = false, immediate = false}). +-record('basic.return', {reply_code, reply_text = <<"">>, exchange, routing_key}). +-record('basic.deliver', {consumer_tag, delivery_tag, redelivered = false, exchange, routing_key}). +-record('basic.get', {ticket = 0, queue = <<"">>, no_ack = false}). +-record('basic.get_ok', {delivery_tag, redelivered = false, exchange, routing_key, message_count}). +-record('basic.get_empty', {cluster_id = <<"">>}). +-record('basic.ack', {delivery_tag = 0, multiple = false}). +-record('basic.reject', {delivery_tag, requeue = true}). +-record('basic.recover_async', {requeue = false}). +-record('basic.recover', {requeue = false}). +-record('basic.recover_ok', {}). +-record('basic.nack', {delivery_tag = 0, multiple = false, requeue = true}). +-record('basic.credit', {consumer_tag = <<"">>, credit, drain}). +-record('basic.credit_ok', {available}). +-record('basic.credit_drained', {consumer_tag = <<"">>, credit_drained}). +-record('connection.start', {version_major = 0, version_minor = 9, server_properties, mechanisms = <<"PLAIN">>, locales = <<"en_US">>}). +-record('connection.start_ok', {client_properties, mechanism = <<"PLAIN">>, response, locale = <<"en_US">>}). +-record('connection.secure', {challenge}). +-record('connection.secure_ok', {response}). +-record('connection.tune', {channel_max = 0, frame_max = 0, heartbeat = 0}). +-record('connection.tune_ok', {channel_max = 0, frame_max = 0, heartbeat = 0}). +-record('connection.open', {virtual_host = <<"/">>, capabilities = <<"">>, insist = false}). +-record('connection.open_ok', {known_hosts = <<"">>}). +-record('connection.close', {reply_code, reply_text = <<"">>, class_id, method_id}). +-record('connection.close_ok', {}). +-record('connection.blocked', {reason = <<"">>}). +-record('connection.unblocked', {}). +-record('connection.update_secret', {new_secret, reason}). +-record('connection.update_secret_ok', {}). +-record('connection.redirect', {host, known_hosts = <<"">>}). +-record('channel.open', {out_of_band = <<"">>}). +-record('channel.open_ok', {channel_id = <<"">>}). +-record('channel.flow', {active}). +-record('channel.flow_ok', {active}). +-record('channel.close', {reply_code, reply_text = <<"">>, class_id, method_id}). +-record('channel.close_ok', {}). +-record('channel.alert', {reply_code, reply_text = <<"">>, details = []}). +-record('access.request', {realm = <<"/data">>, exclusive = false, passive = true, active = true, write = true, read = true}). +-record('access.request_ok', {ticket = 1}). +-record('exchange.declare', {ticket = 0, exchange, type = <<"direct">>, passive = false, durable = false, auto_delete = false, internal = false, nowait = false, arguments = []}). +-record('exchange.declare_ok', {}). +-record('exchange.delete', {ticket = 0, exchange, if_unused = false, nowait = false}). +-record('exchange.delete_ok', {}). +-record('exchange.bind', {ticket = 0, destination, source, routing_key = <<"">>, nowait = false, arguments = []}). +-record('exchange.bind_ok', {}). +-record('exchange.unbind', {ticket = 0, destination, source, routing_key = <<"">>, nowait = false, arguments = []}). +-record('exchange.unbind_ok', {}). +-record('queue.declare', {ticket = 0, queue = <<"">>, passive = false, durable = false, exclusive = false, auto_delete = false, nowait = false, arguments = []}). +-record('queue.declare_ok', {queue, message_count, consumer_count}). +-record('queue.bind', {ticket = 0, queue = <<"">>, exchange, routing_key = <<"">>, nowait = false, arguments = []}). +-record('queue.bind_ok', {}). +-record('queue.purge', {ticket = 0, queue = <<"">>, nowait = false}). +-record('queue.purge_ok', {message_count}). +-record('queue.delete', {ticket = 0, queue = <<"">>, if_unused = false, if_empty = false, nowait = false}). +-record('queue.delete_ok', {message_count}). +-record('queue.unbind', {ticket = 0, queue = <<"">>, exchange, routing_key = <<"">>, arguments = []}). +-record('queue.unbind_ok', {}). +-record('tx.select', {}). +-record('tx.select_ok', {}). +-record('tx.commit', {}). +-record('tx.commit_ok', {}). +-record('tx.rollback', {}). +-record('tx.rollback_ok', {}). +-record('confirm.select', {nowait = false}). +-record('confirm.select_ok', {}). +-record('file.qos', {prefetch_size = 0, prefetch_count = 0, global = false}). +-record('file.qos_ok', {}). +-record('file.consume', {ticket = 1, queue = <<"">>, consumer_tag = <<"">>, no_local = false, no_ack = false, exclusive = false, nowait = false}). +-record('file.consume_ok', {consumer_tag}). +-record('file.cancel', {consumer_tag, nowait = false}). +-record('file.cancel_ok', {consumer_tag}). +-record('file.open', {identifier, content_size}). +-record('file.open_ok', {staged_size}). +-record('file.stage', {}). +-record('file.publish', {ticket = 1, exchange = <<"">>, routing_key = <<"">>, mandatory = false, immediate = false, identifier}). +-record('file.return', {reply_code = 200, reply_text = <<"">>, exchange, routing_key}). +-record('file.deliver', {consumer_tag, delivery_tag, redelivered = false, exchange, routing_key, identifier}). +-record('file.ack', {delivery_tag = 0, multiple = false}). +-record('file.reject', {delivery_tag, requeue = true}). +-record('stream.qos', {prefetch_size = 0, prefetch_count = 0, consume_rate = 0, global = false}). +-record('stream.qos_ok', {}). +-record('stream.consume', {ticket = 1, queue = <<"">>, consumer_tag = <<"">>, no_local = false, exclusive = false, nowait = false}). +-record('stream.consume_ok', {consumer_tag}). +-record('stream.cancel', {consumer_tag, nowait = false}). +-record('stream.cancel_ok', {consumer_tag}). +-record('stream.publish', {ticket = 1, exchange = <<"">>, routing_key = <<"">>, mandatory = false, immediate = false}). +-record('stream.return', {reply_code = 200, reply_text = <<"">>, exchange, routing_key}). +-record('stream.deliver', {consumer_tag, delivery_tag, exchange, queue}). +-record('dtx.select', {}). +-record('dtx.select_ok', {}). +-record('dtx.start', {dtx_identifier}). +-record('dtx.start_ok', {}). +-record('tunnel.request', {meta_data}). +-record('test.integer', {integer_1, integer_2, integer_3, integer_4, operation}). +-record('test.integer_ok', {result}). +-record('test.string', {string_1, string_2, operation}). +-record('test.string_ok', {result}). +-record('test.table', {table, integer_op, string_op}). +-record('test.table_ok', {integer_result, string_result}). +-record('test.content', {}). +-record('test.content_ok', {content_checksum}). +%% Class property records. +-record('P_basic', {content_type, content_encoding, headers, delivery_mode, priority, correlation_id, reply_to, expiration, message_id, timestamp, type, user_id, app_id, cluster_id}). +-record('P_connection', {}). +-record('P_channel', {}). +-record('P_access', {}). +-record('P_exchange', {}). +-record('P_queue', {}). +-record('P_tx', {}). +-record('P_confirm', {}). +-record('P_file', {content_type, content_encoding, headers, priority, reply_to, message_id, filename, timestamp, cluster_id}). +-record('P_stream', {content_type, content_encoding, headers, priority, timestamp}). +-record('P_dtx', {}). +-record('P_tunnel', {headers, proxy_name, data_name, durable, broadcast}). +-record('P_test', {}). diff --git a/deps/rabbit_common/include/rabbit_memory.hrl b/deps/rabbit_common/include/rabbit_memory.hrl index a9a14f5e5e76..ccc2974b9c44 100644 --- a/deps/rabbit_common/include/rabbit_memory.hrl +++ b/deps/rabbit_common/include/rabbit_memory.hrl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2020-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -define(DEFAULT_MEMORY_CHECK_INTERVAL, 1000). diff --git a/deps/rabbit_common/include/rabbit_misc.hrl b/deps/rabbit_common/include/rabbit_misc.hrl index dfc0708087c3..20dbcfd0b4d3 100644 --- a/deps/rabbit_common/include/rabbit_misc.hrl +++ b/deps/rabbit_common/include/rabbit_misc.hrl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2020-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -define(RPC_TIMEOUT, 15000). diff --git a/deps/rabbit_common/include/rabbit_msg_store.hrl b/deps/rabbit_common/include/rabbit_msg_store.hrl deleted file mode 100644 index 854b71819c01..000000000000 --- a/deps/rabbit_common/include/rabbit_msg_store.hrl +++ /dev/null @@ -1,12 +0,0 @@ -%% This Source Code Form is subject to the terms of the Mozilla Public -%% License, v. 2.0. If a copy of the MPL was not distributed with this -%% file, You can obtain one at https://mozilla.org/MPL/2.0/. -%% -%% Copyright (c) 2020-2023 VMware, Inc. or its affiliates. All rights reserved. -%% - --include("rabbit.hrl"). - --type(msg() :: any()). - --record(msg_location, {msg_id, ref_count, file, offset, total_size}). diff --git a/deps/rabbit_common/include/resource.hrl b/deps/rabbit_common/include/resource.hrl index 15e10ae7e161..b5e1fd3e88f5 100644 --- a/deps/rabbit_common/include/resource.hrl +++ b/deps/rabbit_common/include/resource.hrl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2020-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -record(resource, { diff --git a/deps/rabbit_common/mk/rabbitmq-build.mk b/deps/rabbit_common/mk/rabbitmq-build.mk index 9e17a5badca8..010045f5c37a 100644 --- a/deps/rabbit_common/mk/rabbitmq-build.mk +++ b/deps/rabbit_common/mk/rabbitmq-build.mk @@ -2,10 +2,6 @@ # Compiler flags. # -------------------------------------------------------------------- -ifeq ($(filter rabbitmq-macros.mk,$(notdir $(MAKEFILE_LIST))),) -include $(dir $(lastword $(MAKEFILE_LIST)))rabbitmq-macros.mk -endif - # NOTE: This plugin is loaded twice because Erlang.mk recurses. That's # why ERL_LIBS may contain twice the path to Elixir libraries or # ERLC_OPTS may contain duplicated flags. diff --git a/deps/rabbit_common/mk/rabbitmq-dist.mk b/deps/rabbit_common/mk/rabbitmq-dist.mk index 35b0dda529b3..1d0254452fec 100644 --- a/deps/rabbit_common/mk/rabbitmq-dist.mk +++ b/deps/rabbit_common/mk/rabbitmq-dist.mk @@ -134,14 +134,10 @@ ZIP_V = $(ZIP_V_$(V)) $(ERLANGMK_DIST_EZS): $(verbose) rm -rf $(EZ_DIR) $(EZ) $(verbose) mkdir -p $(EZ_DIR) - $(dist_verbose) $(RSYNC) -a $(RSYNC_V) \ - --exclude '/ebin/dep_built' \ - --exclude '/ebin/test' \ - --include '/ebin/***' \ - --include '/include/***' \ - --include '/priv/***' \ - --exclude '*' \ - $(call core_unix_path,$(SRC_DIR))/ $(call core_unix_path,$(EZ_DIR))/ + $(eval SRC_DIR_UNIX := $(call core_unix_path,$(SRC_DIR))) + $(eval EZ_DIR_UNIX := $(call core_unix_path,$(EZ_DIR))) + $(dist_verbose) cp -a $(SRC_DIR_UNIX)/ebin $(wildcard $(SRC_DIR_UNIX)/include) $(wildcard $(SRC_DIR_UNIX)/priv) $(EZ_DIR_UNIX)/ + $(verbose) rm -f $(EZ_DIR_UNIX)/ebin/dep_built $(EZ_DIR_UNIX)/ebin/test @# Give a chance to the application to make any modification it @# wants to the tree before we make an archive. ifneq ($(RABBITMQ_COMPONENTS),) @@ -214,77 +210,32 @@ do-dist:: $(DIST_EZS) CLI_SCRIPTS_LOCK = $(CLI_SCRIPTS_DIR).lock CLI_ESCRIPTS_LOCK = $(CLI_ESCRIPTS_DIR).lock +ifeq ($(MAKELEVEL),0) ifneq ($(filter-out rabbit_common amqp10_common rabbitmq_stream_common,$(PROJECT)),) -dist:: install-cli +app:: install-cli test-build:: install-cli endif +endif install-cli: install-cli-scripts install-cli-escripts @: -ifeq ($(PROJECT),rabbit) -install-cli-scripts: - $(gen_verbose) \ - if command -v flock >/dev/null; then \ - flock $(CLI_SCRIPTS_LOCK) \ - sh -c 'mkdir -p "$(CLI_SCRIPTS_DIR)" && \ - for file in scripts/*; do \ - cmp -s "$$file" "$(CLI_SCRIPTS_DIR)/$$(basename "$$file")" || \ - cp -a "$$file" "$(CLI_SCRIPTS_DIR)/$$(basename "$$file")"; \ - done'; \ - elif command -v lockf >/dev/null; then \ - lockf $(CLI_SCRIPTS_LOCK) \ - sh -c 'mkdir -p "$(CLI_SCRIPTS_DIR)" && \ - for file in scripts/*; do \ - cmp -s "$$file" "$(CLI_SCRIPTS_DIR)/$$(basename "$$file")" || \ - cp -a "$$file" "$(CLI_SCRIPTS_DIR)/$$(basename "$$file")"; \ - done'; \ - else \ - mkdir -p "$(CLI_SCRIPTS_DIR)" && \ - for file in scripts/*; do \ - cmp -s "$$file" "$(CLI_SCRIPTS_DIR)/$$(basename "$$file")" || \ - cp -a "$$file" "$(CLI_SCRIPTS_DIR)/$$(basename "$$file")"; \ - done; \ - fi -else - install-cli-scripts: $(gen_verbose) \ set -e; \ - if test -d "$(DEPS_DIR)/rabbit/scripts"; then \ - rabbit_scripts_dir='$(DEPS_DIR)/rabbit/scripts'; \ - elif test -d "$(DEPS_DIR)/../scripts"; then \ - rabbit_scripts_dir='$(DEPS_DIR)/../scripts'; \ - else \ - echo 'rabbit/scripts directory not found' 1>&2; \ - exit 1; \ - fi; \ - test -d "$$rabbit_scripts_dir"; \ + test -d "$(DEPS_DIR)/rabbit/scripts"; \ if command -v flock >/dev/null; then \ flock $(CLI_SCRIPTS_LOCK) \ sh -e -c 'mkdir -p "$(CLI_SCRIPTS_DIR)" && \ - for file in "'$$rabbit_scripts_dir'"/*; do \ - test -f "$$file"; \ - cmp -s "$$file" "$(CLI_SCRIPTS_DIR)/$$(basename "$$file")" || \ - cp -a "$$file" "$(CLI_SCRIPTS_DIR)/$$(basename "$$file")"; \ - done'; \ + cp -a $(DEPS_DIR)/rabbit/scripts/* $(CLI_SCRIPTS_DIR)/'; \ elif command -v lockf >/dev/null; then \ lockf $(CLI_SCRIPTS_LOCK) \ sh -e -c 'mkdir -p "$(CLI_SCRIPTS_DIR)" && \ - for file in "'$$rabbit_scripts_dir'"/*; do \ - test -f "$$file"; \ - cmp -s "$$file" "$(CLI_SCRIPTS_DIR)/$$(basename "$$file")" || \ - cp -a "$$file" "$(CLI_SCRIPTS_DIR)/$$(basename "$$file")"; \ - done'; \ + cp -a $(DEPS_DIR)/rabbit/scripts/* $(CLI_SCRIPTS_DIR)/'; \ else \ mkdir -p "$(CLI_SCRIPTS_DIR)" && \ - for file in "$$rabbit_scripts_dir"/*; do \ - test -f "$$file"; \ - cmp -s "$$file" "$(CLI_SCRIPTS_DIR)/$$(basename "$$file")" || \ - cp -a "$$file" "$(CLI_SCRIPTS_DIR)/$$(basename "$$file")"; \ - done; \ + cp -a $(DEPS_DIR)/rabbit/scripts/* $(CLI_SCRIPTS_DIR)/; \ fi -endif install-cli-escripts: $(gen_verbose) \ diff --git a/deps/rabbit_common/mk/rabbitmq-early-test.mk b/deps/rabbit_common/mk/rabbitmq-early-test.mk index 4bdd20b0c4d6..3779bd4a2fe7 100644 --- a/deps/rabbit_common/mk/rabbitmq-early-test.mk +++ b/deps/rabbit_common/mk/rabbitmq-early-test.mk @@ -1,20 +1,10 @@ -# -------------------------------------------------------------------- -# xref -# -------------------------------------------------------------------- - -ifeq ($(filter distclean distclean-xref,$(MAKECMDGOALS)),) -ifneq ($(PROJECT),rabbit_common) -XREFR := $(DEPS_DIR)/rabbit_common/mk/xrefr -else -XREFR := mk/xrefr -endif -endif - # -------------------------------------------------------------------- # dialyzer # -------------------------------------------------------------------- -DIALYZER_OPTS ?= -Werror_handling +DIALYZER_OPTS ?= -Werror_handling -Wunmatched_returns -Wunknown + +dialyze: ERL_LIBS = $(APPS_DIR):$(DEPS_DIR):$(DEPS_DIR)/rabbitmq_cli/_build/dev/lib:$(dir $(shell elixir --eval ":io.format '~s~n', [:code.lib_dir :elixir ]")) # -------------------------------------------------------------------- # %-on-concourse dependencies. @@ -39,7 +29,7 @@ endif CT_OPTS += -hidden -# Enable the following common_test hooks on Travis and Concourse: +# Enable the following common_test hooks on GH and Concourse: # # cth_fail_fast # This hook will make sure the first failure puts an end to the @@ -54,15 +44,8 @@ CT_OPTS += -hidden # from its UI. Furthermore, it displays a graph showing evolution of the # results over time. -ifndef TRAVIS CT_HOOKS ?= cth_styledout TEST_DEPS += cth_styledout -endif - -ifdef TRAVIS -FAIL_FAST = 1 -SKIP_AS_ERROR = 1 -endif ifdef CONCOURSE FAIL_FAST = 1 @@ -81,49 +64,9 @@ dep_cth_styledout = git https://github.com/rabbitmq/cth_styledout.git master CT_HOOKS_PARAM_VALUE = $(patsubst %,and %,$(CT_HOOKS)) CT_OPTS += -ct_hooks $(wordlist 2,$(words $(CT_HOOKS_PARAM_VALUE)),$(CT_HOOKS_PARAM_VALUE)) -# Disable most messages on Travis because it might exceed the limit -# set by Travis. -# -# CAUTION: All arguments after -erl_args are passed to the emulator and -# common_test doesn't interpret them! Therefore, all common_test flags -# *MUST* appear before. - -CT_QUIET_FLAGS = -verbosity 50 \ - -erl_args \ - -kernel error_logger silent - -ifdef TRAVIS -CT_OPTS += $(CT_QUIET_FLAGS) -endif - # On CI, set $RABBITMQ_CT_SKIP_AS_ERROR so that any skipped # testsuite/testgroup/testcase is considered an error. ifeq ($(SKIP_AS_ERROR),1) export RABBITMQ_CT_SKIP_AS_ERROR = true endif - -# -------------------------------------------------------------------- -# Looking Glass rules. -# -------------------------------------------------------------------- - -ifneq ("$(RABBITMQ_TRACER)","") -BUILD_DEPS += looking_glass -ERL_LIBS := "$(ERL_LIBS):../looking_glass:../lz4" -export RABBITMQ_TRACER -endif - -define lg_callgrind.erl -lg_callgrind:profile_many("traces.lz4.*", "callgrind.out", #{running => true}), -halt(). -endef - -.PHONY: profile clean-profile - -profile: - $(gen_verbose) $(call erlang,$(call lg_callgrind.erl)) - -clean:: clean-profile - -clean-profile: - $(gen_verbose) rm -f traces.lz4.* callgrind.out.* diff --git a/deps/rabbit_common/mk/rabbitmq-macros.mk b/deps/rabbit_common/mk/rabbitmq-macros.mk deleted file mode 100644 index 048745a7f02c..000000000000 --- a/deps/rabbit_common/mk/rabbitmq-macros.mk +++ /dev/null @@ -1,22 +0,0 @@ -# Macro to compare two x.y.z versions. -# -# Usage: -# ifeq ($(call compare_version,$(ERTS_VER),$(MAX_ERTS_VER),<),true) -# # Only evaluated if $(ERTS_VER) < $(MAX_ERTS_VER) -# endif - -define compare_version -$(shell awk 'BEGIN { - split("$(1)", v1, "."); - version1 = v1[1] * 1000000 + v1[2] * 10000 + v1[3] * 100 + v1[4]; - - split("$(2)", v2, "."); - version2 = v2[1] * 1000000 + v2[2] * 10000 + v2[3] * 100 + v2[4]; - - if (version1 $(3) version2) { - print "true"; - } else { - print "false"; - } -}') -endef diff --git a/deps/rabbit_common/mk/rabbitmq-run.mk b/deps/rabbit_common/mk/rabbitmq-run.mk index d685d3c008bf..c7c322110897 100644 --- a/deps/rabbit_common/mk/rabbitmq-run.mk +++ b/deps/rabbit_common/mk/rabbitmq-run.mk @@ -25,13 +25,15 @@ ifeq ($(PLATFORM),msys2) RABBITMQ_PLUGINS ?= $(RABBITMQ_SCRIPTS_DIR)/rabbitmq-plugins.bat RABBITMQ_SERVER ?= $(RABBITMQ_SCRIPTS_DIR)/rabbitmq-server.bat RABBITMQCTL ?= $(RABBITMQ_SCRIPTS_DIR)/rabbitmqctl.bat +RABBITMQ_UPGRADE ?= $(RABBITMQ_SCRIPTS_DIR)/rabbitmq-upgrade.bat else RABBITMQ_PLUGINS ?= $(RABBITMQ_SCRIPTS_DIR)/rabbitmq-plugins RABBITMQ_SERVER ?= $(RABBITMQ_SCRIPTS_DIR)/rabbitmq-server RABBITMQCTL ?= $(RABBITMQ_SCRIPTS_DIR)/rabbitmqctl +RABBITMQ_UPGRADE ?= $(RABBITMQ_SCRIPTS_DIR)/rabbitmq-upgrade endif -export RABBITMQ_SCRIPTS_DIR RABBITMQCTL RABBITMQ_PLUGINS RABBITMQ_SERVER +export RABBITMQ_SCRIPTS_DIR RABBITMQCTL RABBITMQ_PLUGINS RABBITMQ_SERVER RABBITMQ_UPGRADE # We export MAKE to be sure scripts and tests use the proper command. export MAKE @@ -42,18 +44,6 @@ CODEGEN_DIR = $(DEPS_DIR)/rabbitmq_codegen PYTHONPATH = $(CODEGEN_DIR) export PYTHONPATH -ANT ?= ant -ANT_FLAGS += -Dmake.bin=$(MAKE) \ - -DUMBRELLA_AVAILABLE=true \ - -Drabbitmqctl.bin=$(RABBITMQCTL) \ - -Dsibling.codegen.dir=$(CODEGEN_DIR) -ifeq ($(PROJECT),rabbitmq_test) -ANT_FLAGS += -Dsibling.rabbitmq_test.dir=$(CURDIR) -else -ANT_FLAGS += -Dsibling.rabbitmq_test.dir=$(DEPS_DIR)/rabbitmq_test -endif -export ANT ANT_FLAGS - node_tmpdir = $(TEST_TMPDIR)/$(1) node_pid_file = $(call node_tmpdir,$(1))/$(1).pid node_log_base = $(call node_tmpdir,$(1))/log @@ -96,16 +86,15 @@ RABBITMQ_ENABLED_PLUGINS_FILE ?= $(call node_enabled_plugins_file,$(RABBITMQ_NOD RABBITMQ_LOG ?= debug,+color export RABBITMQ_LOG -# erlang.mk adds dependencies' ebin directory to ERL_LIBS. This is -# a sane default, but we prefer to rely on the .ez archives in the -# `plugins` directory so the plugin code is executed. The `plugins` -# directory is added to ERL_LIBS by rabbitmq-env. -DIST_ERL_LIBS = $(patsubst :%,%,$(patsubst %:,%,$(subst :$(APPS_DIR):,:,$(subst :$(DEPS_DIR):,:,:$(ERL_LIBS):)))) - ifdef PLUGINS_FROM_DEPS_DIR -RMQ_PLUGINS_DIR=$(DEPS_DIR) +RMQ_PLUGINS_DIR = $(DEPS_DIR) +DIST_ERL_LIBS = $(ERL_LIBS) else -RMQ_PLUGINS_DIR=$(CURDIR)/$(DIST_DIR) +RMQ_PLUGINS_DIR = $(CURDIR)/$(DIST_DIR) +# We do not want to add apps/ or deps/ to ERL_LIBS +# when running the release from dist. The `plugins` +# directory is added to ERL_LIBS by rabbitmq-env. +DIST_ERL_LIBS = $(patsubst :%,%,$(patsubst %:,%,$(subst :$(APPS_DIR):,:,$(subst :$(DEPS_DIR):,:,:$(ERL_LIBS):)))) endif node_plugins_dir = $(if $(RABBITMQ_PLUGINS_DIR),$(RABBITMQ_PLUGINS_DIR),$(if $(EXTRA_PLUGINS_DIR),$(EXTRA_PLUGINS_DIR):$(RMQ_PLUGINS_DIR),$(RMQ_PLUGINS_DIR))) @@ -369,7 +358,18 @@ stop-node: NODES ?= 3 start-brokers start-cluster: $(DIST_TARGET) - @for n in $$(seq $(NODES)); do \ + @if test '$@' = 'start-cluster'; then \ + for n in $$(seq $(NODES)); do \ + nodename="rabbit-$$n@$(HOSTNAME)"; \ + if test "$$nodeslist"; then \ + nodeslist="$$nodeslist,'$$nodename'"; \ + else \ + nodeslist="'$$nodename'"; \ + fi; \ + done; \ + cluster_nodes_arg="-rabbit cluster_nodes [$$nodeslist]"; \ + fi; \ + for n in $$(seq $(NODES)); do \ nodename="rabbit-$$n@$(HOSTNAME)"; \ $(MAKE) start-background-broker \ NOBUILD=1 \ @@ -386,22 +386,10 @@ start-brokers start-cluster: $(DIST_TARGET) -rabbitmq_web_stomp_examples listener [{port,$$((61633 + $$n - 1))}] \ -rabbitmq_prometheus tcp_config [{port,$$((15692 + $$n - 1))}] \ -rabbitmq_stream tcp_listeners [$$((5552 + $$n - 1))] \ + $$cluster_nodes_arg \ " & \ done; \ - wait && \ - for n in $$(seq $(NODES)); do \ - nodename="rabbit-$$n@$(HOSTNAME)"; \ - if test '$@' = 'start-cluster' && test "$$nodename1"; then \ - ERL_LIBS="$(DIST_ERL_LIBS)" \ - $(RABBITMQCTL) -n "$$nodename" stop_app; \ - ERL_LIBS="$(DIST_ERL_LIBS)" \ - $(RABBITMQCTL) -n "$$nodename" join_cluster "$$nodename1"; \ - ERL_LIBS="$(DIST_ERL_LIBS)" \ - $(RABBITMQCTL) -n "$$nodename" start_app; \ - else \ - nodename1=$$nodename; \ - fi; \ - done + wait stop-brokers stop-cluster: @for n in $$(seq $(NODES) -1 1); do \ @@ -411,6 +399,71 @@ stop-brokers stop-cluster: done; \ wait +NODES ?= 3 + +# Rolling restart similar to what the Kubernetes Operator does +restart-cluster: + @for n in $$(seq $(NODES) -1 1); do \ + nodename="rabbit-$$n@$(HOSTNAME)"; \ + $(RABBITMQ_UPGRADE) -n "$$nodename" await_online_quorum_plus_one -t 604800 && \ + $(RABBITMQ_UPGRADE) -n "$$nodename" drain; \ + $(MAKE) stop-node \ + RABBITMQ_NODENAME="$$nodename"; \ + $(MAKE) start-background-broker \ + NOBUILD=1 \ + RABBITMQ_NODENAME="$$nodename" \ + RABBITMQ_NODE_PORT="$$((5672 + $$n - 1))" \ + RABBITMQ_SERVER_START_ARGS=" \ + -rabbit loopback_users [] \ + -rabbitmq_management listener [{port,$$((15672 + $$n - 1))}] \ + -rabbitmq_mqtt tcp_listeners [$$((1883 + $$n - 1))] \ + -rabbitmq_web_mqtt tcp_config [{port,$$((1893 + $$n - 1))}] \ + -rabbitmq_web_mqtt_examples listener [{port,$$((1903 + $$n - 1))}] \ + -rabbitmq_stomp tcp_listeners [$$((61613 + $$n - 1))] \ + -rabbitmq_web_stomp tcp_config [{port,$$((61623 + $$n - 1))}] \ + -rabbitmq_web_stomp_examples listener [{port,$$((61633 + $$n - 1))}] \ + -rabbitmq_prometheus tcp_config [{port,$$((15692 + $$n - 1))}] \ + -rabbitmq_stream tcp_listeners [$$((5552 + $$n - 1))] \ + " & \ + done; \ + wait + +# -------------------------------------------------------------------- +# Code reloading. +# +# For `make run-broker` either do: +# * make RELOAD=1 +# * make all reload-broker (can't do this alongside -j flag) +# * make && make reload-broker (fine with -j flag) +# +# Or if recompiling a specific application: +# * make -C deps/rabbit RELOAD=1 +# +# For `make start-cluster` use the `reload-cluster` target. +# Same constraints apply as with `reload-broker`: +# * make all reload-cluster +# * make && make reload-cluster +# -------------------------------------------------------------------- + +reload-broker: + $(exec_verbose) ERL_LIBS="$(DIST_ERL_LIBS)" \ + $(RABBITMQCTL) -n $(RABBITMQ_NODENAME) \ + eval "io:format(\"~p~n\", [c:lm()])." + +ifeq ($(MAKELEVEL),0) +ifdef RELOAD +all:: reload-broker +endif +endif + +reload-cluster: + @for n in $$(seq $(NODES) -1 1); do \ + nodename="rabbit-$$n@$(HOSTNAME)"; \ + $(MAKE) reload-broker \ + RABBITMQ_NODENAME="$$nodename" & \ + done; \ + wait + # -------------------------------------------------------------------- # Used by testsuites. # -------------------------------------------------------------------- diff --git a/deps/rabbit_common/mk/rabbitmq-test.mk b/deps/rabbit_common/mk/rabbitmq-test.mk index 931f072125bb..16cf2dc8f6bc 100644 --- a/deps/rabbit_common/mk/rabbitmq-test.mk +++ b/deps/rabbit_common/mk/rabbitmq-test.mk @@ -3,20 +3,6 @@ ct-slow ct-fast: $(MAKE) ct CT_SUITES='$(CT_SUITES)' -# -------------------------------------------------------------------- -# xref -# -------------------------------------------------------------------- - -# We need the list of dependencies of the current project. We use it in -# xrefr(1) to scan for Elixir-based projects. For those, we need to add -# the path inside `_build` to the xref code path. - -ifneq ($(filter xref,$(MAKECMDGOALS)),) -export ERLANG_MK_RECURSIVE_DEPS_LIST -endif - -xref: $(ERLANG_MK_RECURSIVE_DEPS_LIST) - # -------------------------------------------------------------------- # Helpers to run Make targets on Concourse. # -------------------------------------------------------------------- diff --git a/deps/rabbit_common/mk/rabbitmq-tools.mk b/deps/rabbit_common/mk/rabbitmq-tools.mk index 5ecb39175c9e..0e5ca370a8e4 100644 --- a/deps/rabbit_common/mk/rabbitmq-tools.mk +++ b/deps/rabbit_common/mk/rabbitmq-tools.mk @@ -35,78 +35,6 @@ update-contributor-code-of-conduct: cp $(DEPS_DIR)/rabbit_common/CONTRIBUTING.md $$repo/CONTRIBUTING.md; \ done -ifdef CREDS -define replace_aws_creds - set -e; \ - if test -f "$(CREDS)"; then \ - key_id=$(shell travis encrypt --no-interactive \ - "AWS_ACCESS_KEY_ID=$$(awk '/^rabbitmq-s3-access-key-id/ { print $$2; }' < "$(CREDS)")"); \ - access_key=$(shell travis encrypt --no-interactive \ - "AWS_SECRET_ACCESS_KEY=$$(awk '/^rabbitmq-s3-secret-access-key/ { print $$2; }' < "$(CREDS)")"); \ - mv .travis.yml .travis.yml.orig; \ - awk "\ - /^ global:/ { \ - print; \ - print \" - secure: $$key_id\"; \ - print \" - secure: $$access_key\"; \ - next; \ - } \ - /- secure:/ { next; } \ - { print; }" < .travis.yml.orig > .travis.yml; \ - rm -f .travis.yml.orig; \ - else \ - echo " INFO: CREDS file missing; not setting/updating AWS credentials"; \ - fi -endef -else -define replace_aws_creds - echo " INFO: CREDS not set; not setting/updating AWS credentials" -endef -endif - -ifeq ($(PROJECT),rabbit_common) -travis-yml: - $(gen_verbose) $(replace_aws_creds) -else -travis-yml: - $(gen_verbose) \ - set -e; \ - if test -d .git && test -d $(DEPS_DIR)/rabbit_common/.git; then \ - upstream_branch=$$(LANG=C git -C $(DEPS_DIR)/rabbit_common branch --list | awk '/^\* \(.*detached / {ref=$$0; sub(/.*detached [^ ]+ /, "", ref); sub(/\)$$/, "", ref); print ref; exit;} /^\* / {ref=$$0; sub(/^\* /, "", ref); print ref; exit}'); \ - local_branch=$$(LANG=C git branch --list | awk '/^\* \(.*detached / {ref=$$0; sub(/.*detached [^ ]+ /, "", ref); sub(/\)$$/, "", ref); print ref; exit;} /^\* / {ref=$$0; sub(/^\* /, "", ref); print ref; exit}'); \ - test "$$local_branch" = "$$upstream_branch" || exit 0; \ - fi; \ - test -f .travis.yml || exit 0; \ - (grep -E -- '- secure:' .travis.yml || :) > .travis.yml.creds; \ - cp -a $(DEPS_DIR)/rabbit_common/.travis.yml .travis.yml.orig; \ - awk ' \ - /^ global:/ { \ - print; \ - system("test -f .travis.yml.creds && cat .travis.yml.creds"); \ - next; \ - } \ - /- secure:/ { next; } \ - { print; } \ - ' < .travis.yml.orig > .travis.yml; \ - rm -f .travis.yml.orig .travis.yml.creds; \ - if test -f .travis.yml.patch; then \ - patch -p0 < .travis.yml.patch; \ - rm -f .travis.yml.orig; \ - fi; \ - $(replace_aws_creds) -ifeq ($(DO_COMMIT),yes) - $(verbose) ! test -f .travis.yml || \ - git diff --quiet .travis.yml \ - || git commit -m 'Travis CI: Update config from rabbitmq-common' .travis.yml -endif -endif - -update-travis-yml: travis-yml - $(verbose) for repo in $(READY_DEPS:%=$(DEPS_DIR)/%); do \ - ! test -f $$repo/rabbitmq-components.mk \ - || $(MAKE) -C $$repo travis-yml; \ - done - ifneq ($(wildcard .git),) .PHONY: sync-gitremote sync-gituser diff --git a/deps/rabbit_common/mk/xrefr b/deps/rabbit_common/mk/xrefr deleted file mode 100755 index 03c408fcb43f..000000000000 --- a/deps/rabbit_common/mk/xrefr +++ /dev/null @@ -1,338 +0,0 @@ -#!/usr/bin/env escript -%% vim:ft=erlang: - -%% The code is copied from xref_runner. -%% https://github.com/inaka/xref_runner -%% -%% The only change is the support of our erlang_version_support -%% attribute: we don't want any warnings about functions which will be -%% dropped at load time. -%% -%% It's also a plain text escript instead of a compiled one because we -%% want to support Erlang R16B03 and the version of xref_runner uses -%% maps and is built with something like Erlang 18. - -%% This mode allows us to reference local function. For instance: -%% lists:map(fun generate_comment/1, Comments) --mode(compile). - --define(DIRS, ["ebin", "test"]). - --define(CHECKS, [undefined_function_calls, - undefined_functions, - locals_not_used]). - -main(_) -> - Checks = ?CHECKS, - ElixirDeps = get_elixir_deps_paths(), - [true = code:add_path(P) || P <- ElixirDeps], - XrefWarnings = lists:append([check(Check) || Check <- Checks]), - warnings_prn(XrefWarnings), - case XrefWarnings of - [] -> ok; - _ -> halt(1) - end. - -get_elixir_deps_paths() -> - case os:getenv("ERLANG_MK_RECURSIVE_DEPS_LIST") of - false -> - []; - Filename -> - {ok, Fd} = file:open(Filename, [read]), - get_elixir_deps_paths1(Fd, []) - end. - -get_elixir_deps_paths1(Fd, Paths) -> - case file:read_line(Fd) of - {ok, Line0} -> - Line = Line0 -- [$\r, $\n], - RootPath = case os:type() of - {unix, _} -> - Line; - {win32, _} -> - case os:find_executable("cygpath.exe") of - false -> - Line; - Cygpath -> - os:cmd( - io_lib:format("~s --windows \"~s\"", - [Cygpath, Line])) - -- [$\r, $\n] - end - end, - Glob = filename:join([RootPath, "_build", "dev", "lib", "*", "ebin"]), - NewPaths = filelib:wildcard(Glob), - get_elixir_deps_paths1(Fd, Paths ++ NewPaths); - eof -> - add_elixir_stdlib_path(Paths) - end. - -add_elixir_stdlib_path(Paths) -> - case find_elixir_home() of - false -> Paths; - ElixirLibDir -> [ElixirLibDir | Paths] - end. - -find_elixir_home() -> - ElixirExe = case os:type() of - {unix, _} -> "elixir"; - {win32, _} -> "elixir.bat" - end, - case os:find_executable(ElixirExe) of - false -> false; - ExePath -> resolve_symlink(ExePath) - end. - -resolve_symlink(ExePath) -> - case file:read_link_all(ExePath) of - {error, einval} -> - determine_elixir_home(ExePath); - {ok, ResolvedLink} -> - ExePath1 = filename:absname(ResolvedLink, - filename:dirname(ExePath)), - resolve_symlink(ExePath1); - {error, _} -> - false - end. - -determine_elixir_home(ExePath) -> - LibPath = filename:join([filename:dirname(filename:dirname(ExePath)), - "lib", - "elixir", - "ebin"]), - case filelib:is_dir(LibPath) of - true -> LibPath; - false -> {skip, "Failed to locate Elixir lib dir"} - end. -check(Check) -> - Dirs = ?DIRS, - lists:foreach(fun code:add_path/1, Dirs), - - {ok, Xref} = xref:start([]), - try - ok = xref:set_library_path(Xref, code:get_path()), - - lists:foreach( - fun(Dir) -> - case filelib:is_dir(Dir) of - true -> {ok, _} = xref:add_directory(Xref, Dir); - false -> ok - end - end, Dirs), - - {ok, Results} = xref:analyze(Xref, Check), - - FilteredResults = filter_xref_results(Check, Results), - - [result_to_warning(Check, Result) || Result <- FilteredResults] - after - stopped = xref:stop(Xref) - end. - -%% ------------------------------------------------------------------- -%% Filtering results. -%% ------------------------------------------------------------------- - -filter_xref_results(Check, Results) -> - SourceModules = - lists:usort([source_module(Result) || Result <- Results]), - - Ignores = lists:flatmap( - fun(Module) -> get_ignorelist(Module, Check) end, SourceModules), - - UnusedFunctions = lists:flatmap( - fun(Mod) -> get_unused_compat_functions(Mod) end, - SourceModules), - - ToIgnore = case get(results_to_ignore) of - undefined -> []; - RTI -> RTI - end, - NewToIgnore = [parse_xref_target(Result) - || Result <- Results, - lists:member(parse_xref_source(Result), UnusedFunctions)], - AllToIgnore = ToIgnore ++ NewToIgnore ++ [mfa(M, {F, A}) - || {_, {M, F, A}} <- Ignores], - put(results_to_ignore, AllToIgnore), - - [Result || Result <- Results, - not lists:member(parse_xref_result(Result), Ignores) andalso - not lists:member(parse_xref_result(Result), AllToIgnore) andalso - not lists:member(parse_xref_source(Result), UnusedFunctions)]. - -source_module({Mt, _Ft, _At}) -> Mt; -source_module({{Ms, _Fs, _As}, _Target}) -> Ms. - -%% -%% Ignore behaviour functions, and explicitly marked functions -%% -%% Functions can be ignored by using -%% -ignore_xref([{F, A}, {M, F, A}...]). -get_ignorelist(Mod, Check) -> - %% Get ignore_xref attribute and combine them in one list - Attributes = - try - Mod:module_info(attributes) - catch - _Class:_Error -> [] - end, - - IgnoreXref = - [mfa(Mod, Value) || {ignore_xref, Values} <- Attributes, Value <- Values], - - BehaviourCallbacks = get_behaviour_callbacks(Check, Mod, Attributes), - - %% And create a flat {M, F, A} list - IgnoreXref ++ BehaviourCallbacks. - -get_behaviour_callbacks(exports_not_used, Mod, Attributes) -> - Behaviours = [Value || {behaviour, Values} <- Attributes, Value <- Values], - [{Mod, {Mod, F, A}} - || B <- Behaviours, {F, A} <- B:behaviour_info(callbacks)]; -get_behaviour_callbacks(_Check, _Mod, _Attributes) -> - []. - -get_unused_compat_functions(Module) -> - OTPVersion = code_version:get_otp_version(), - Attributes = try - Module:module_info(attributes) - catch - _Class:_Error -> [] - end, - CompatTuples = [Tuple - || {erlang_version_support, Tuples} <- Attributes, - Tuple <- Tuples], - get_unused_compat_functions(Module, OTPVersion, CompatTuples, []). - -get_unused_compat_functions(_, _, [], Result) -> - Result; -get_unused_compat_functions(Module, - OTPVersion, - [{MinOTPVersion, Choices} | Rest], - Result) -> - Functions = lists:map( - fun({_, Arity, Pre, Post}) -> - if - OTPVersion >= MinOTPVersion -> - %% We ignore the "pre" function. - mfa(Module, {Pre, Arity}); - true -> - %% We ignore the "post" function. - mfa(Module, {Post, Arity}) - end - end, Choices), - get_unused_compat_functions(Module, OTPVersion, Rest, - Result ++ Functions). - -mfa(M, {F, A}) -> {M, {M, F, A}}; -mfa(M, MFA) -> {M, MFA}. - -parse_xref_result({{SM, _, _}, MFAt}) -> {SM, MFAt}; -parse_xref_result({TM, _, _} = MFAt) -> {TM, MFAt}. - -parse_xref_source({{SM, _, _} = MFAt, _}) -> {SM, MFAt}; -parse_xref_source({TM, _, _} = MFAt) -> {TM, MFAt}. - -parse_xref_target({_, {TM, _, _} = MFAt}) -> {TM, MFAt}; -parse_xref_target({TM, _, _} = MFAt) -> {TM, MFAt}. - -%% ------------------------------------------------------------------- -%% Preparing results. -%% ------------------------------------------------------------------- - -result_to_warning(Check, {MFASource, MFATarget}) -> - {Filename, Line} = get_source(MFASource), - [{filename, Filename}, - {line, Line}, - {source, MFASource}, - {target, MFATarget}, - {check, Check}]; -result_to_warning(Check, MFA) -> - {Filename, Line} = get_source(MFA), - [{filename, Filename}, - {line, Line}, - {source, MFA}, - {check, Check}]. - -%% -%% Given a MFA, find the file and LOC where it's defined. Note that -%% xref doesn't work if there is no abstract_code, so we can avoid -%% being too paranoid here. -%% -get_source({M, F, A}) -> - case code:get_object_code(M) of - error -> {"", 0}; - {M, Bin, _} -> find_function_source(M, F, A, Bin) - end. - -find_function_source(M, F, A, Bin) -> - AbstractCode = beam_lib:chunks(Bin, [abstract_code]), - {ok, {M, [{abstract_code, {raw_abstract_v1, Code}}]}} = AbstractCode, - - %% Extract the original source filename from the abstract code - [Source|_] = [S || {attribute, _, file, {S, _}} <- Code], - - %% Extract the line number for a given function def - Fn = [E || E <- Code, - element(1, E) == function, - element(3, E) == F, - element(4, E) == A], - - case Fn of - [{function, Line, F, _, _}] when is_integer(Line) -> - {Source, Line}; - [{function, Line, F, _, _}] -> - {Source, erl_anno:line(Line)}; - %% do not crash if functions are exported, even though they - %% are not in the source. - %% parameterized modules add new/1 and instance/1 for example. - [] -> {Source, 0} - end. - -%% ------------------------------------------------------------------- -%% Reporting results. -%% ------------------------------------------------------------------- - -warnings_prn([]) -> - ok; -warnings_prn(Comments) -> - Messages = lists:map(fun generate_comment/1, Comments), - lists:foreach(fun warning_prn/1, Messages). - -warning_prn(Message) -> - FullMessage = Message ++ "~n", - io:format(FullMessage, []). - -generate_comment(XrefWarning) -> - Filename = proplists:get_value(filename, XrefWarning), - Line = proplists:get_value(line, XrefWarning), - Source = proplists:get_value(source, XrefWarning), - Check = proplists:get_value(check, XrefWarning), - Target = proplists:get_value(target, XrefWarning), - Position = case {Filename, Line} of - {"", _} -> ""; - {Filename, 0} -> [Filename, " "]; - {Filename, Line} -> [Filename, ":", - integer_to_list(Line), " "] - end, - [Position, generate_comment_text(Check, Source, Target)]. - -generate_comment_text(Check, {SM, SF, SA}, TMFA) -> - SMFA = io_lib:format("`~p:~p/~p`", [SM, SF, SA]), - generate_comment_text(Check, SMFA, TMFA); -generate_comment_text(Check, SMFA, {TM, TF, TA}) -> - TMFA = io_lib:format("`~p:~p/~p`", [TM, TF, TA]), - generate_comment_text(Check, SMFA, TMFA); - -generate_comment_text(undefined_function_calls, SMFA, TMFA) -> - io_lib:format("~s calls undefined function ~s", [SMFA, TMFA]); -generate_comment_text(undefined_functions, SMFA, _TMFA) -> - io_lib:format("~s is not defined as a function", [SMFA]); -generate_comment_text(locals_not_used, SMFA, _TMFA) -> - io_lib:format("~s is an unused local function", [SMFA]); -generate_comment_text(exports_not_used, SMFA, _TMFA) -> - io_lib:format("~s is an unused export", [SMFA]); -generate_comment_text(deprecated_function_calls, SMFA, TMFA) -> - io_lib:format("~s calls deprecated function ~s", [SMFA, TMFA]); -generate_comment_text(deprecated_functions, SMFA, _TMFA) -> - io_lib:format("~s is deprecated", [SMFA]). diff --git a/deps/rabbit_common/src/app_utils.erl b/deps/rabbit_common/src/app_utils.erl index 146708e8b728..f0c68cd6bc5f 100644 --- a/deps/rabbit_common/src/app_utils.erl +++ b/deps/rabbit_common/src/app_utils.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(app_utils). diff --git a/deps/rabbit_common/src/code_version.erl b/deps/rabbit_common/src/code_version.erl index 9073de66425c..568a6e7c439a 100644 --- a/deps/rabbit_common/src/code_version.erl +++ b/deps/rabbit_common/src/code_version.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(code_version). diff --git a/deps/rabbit_common/src/credit_flow.erl b/deps/rabbit_common/src/credit_flow.erl index b77b5116267b..5681f0ed1fa9 100644 --- a/deps/rabbit_common/src/credit_flow.erl +++ b/deps/rabbit_common/src/credit_flow.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(credit_flow). @@ -37,19 +37,7 @@ %% synchronization has not been documented, since this doesn't affect %% client publishes. --define(DEFAULT_INITIAL_CREDIT, 200). --define(DEFAULT_MORE_CREDIT_AFTER, 100). - --define(DEFAULT_CREDIT, - case get(credit_flow_default_credit) of - undefined -> - Val = rabbit_misc:get_env(rabbit, credit_flow_default_credit, - {?DEFAULT_INITIAL_CREDIT, - ?DEFAULT_MORE_CREDIT_AFTER}), - put(credit_flow_default_credit, Val), - Val; - Val -> Val - end). +-define(DEFAULT_CREDIT, persistent_term:get(credit_flow_default_credit)). -export([send/1, send/2, ack/1, ack/2, handle_bump_msg/1, blocked/0, state/0, state_delayed/1]). -export([peer_down/1]). @@ -99,13 +87,13 @@ {from_info, erlang:process_info(FROM)}, {timestamp, os:system_time( - milliseconds)}])). + millisecond)}])). -define(TRACE_UNBLOCKED(SELF, FROM), rabbit_event:notify(credit_flow_unblocked, [{process, SELF}, {from, FROM}, {timestamp, os:system_time( - milliseconds)}])). + millisecond)}])). -else. -define(TRACE_BLOCKED(SELF, FROM), ok). -define(TRACE_UNBLOCKED(SELF, FROM), ok). @@ -168,7 +156,7 @@ state_delayed(BlockedAt) -> B -> Now = erlang:monotonic_time(), Diff = erlang:convert_time_unit(Now - B, native, - micro_seconds), + microsecond), case Diff < ?STATE_CHANGE_INTERVAL of true -> flow; false -> running diff --git a/deps/rabbit_common/src/delegate.erl b/deps/rabbit_common/src/delegate.erl index 98a0d780e34c..be5cf57622a5 100644 --- a/deps/rabbit_common/src/delegate.erl +++ b/deps/rabbit_common/src/delegate.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(delegate). @@ -227,12 +227,12 @@ group_pids_by_node(Pids) -> group_local_call_pids_by_node(Pids) -> {LocalPids0, Grouped0} = group_pids_by_node(Pids), - maps:fold(fun(K, V, {AccIn, MapsIn}) -> + maps:fold(fun(K, V, {AccIn, MapsIn}) -> case V of %% just one Pid for the node [SinglePid] -> {[SinglePid | AccIn], MapsIn}; - %% If the value is a list of more than one pid, - %% the (K,V) will be put into the new map which will be called + %% If the value is a list of more than one pid, + %% the (K,V) will be put into the new map which will be called %% through delegate to reduce inter-node communication. _ -> {AccIn, maps:update_with(K, fun(V1) -> V1 end, V, MapsIn)} end diff --git a/deps/rabbit_common/src/delegate_sup.erl b/deps/rabbit_common/src/delegate_sup.erl index 4c299c78bb82..6bf7dea1de3e 100644 --- a/deps/rabbit_common/src/delegate_sup.erl +++ b/deps/rabbit_common/src/delegate_sup.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(delegate_sup). diff --git a/deps/rabbit_common/src/file_handle_cache.erl b/deps/rabbit_common/src/file_handle_cache.erl index 4b0470362cb3..4e5c7901a30c 100644 --- a/deps/rabbit_common/src/file_handle_cache.erl +++ b/deps/rabbit_common/src/file_handle_cache.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(file_handle_cache). @@ -660,19 +660,16 @@ get_client_state(Pid) -> %%---------------------------------------------------------------------------- prim_file_read(Hdl, Size) -> - file_handle_cache_stats:update( - io_read, Size, fun() -> prim_file:read(Hdl, Size) end). + prim_file:read(Hdl, Size). prim_file_write(Hdl, Bytes) -> - file_handle_cache_stats:update( - io_write, iolist_size(Bytes), fun() -> prim_file:write(Hdl, Bytes) end). + prim_file:write(Hdl, Bytes). prim_file_sync(Hdl) -> - file_handle_cache_stats:update(io_sync, fun() -> prim_file:sync(Hdl) end). + prim_file:sync(Hdl). prim_file_position(Hdl, NewOffset) -> - file_handle_cache_stats:update( - io_seek, fun() -> prim_file:position(Hdl, NewOffset) end). + prim_file:position(Hdl, NewOffset). is_reader(Mode) -> lists:member(read, Mode). @@ -766,8 +763,7 @@ reopen([{Ref, NewOrReopen, Handle = #handle { hdl = closed, RefNewOrReopenHdls] = ToOpen, Tree, RefHdls) -> Mode = case NewOrReopen of new -> Mode0; - reopen -> file_handle_cache_stats:update(io_reopen), - [read | Mode0] + reopen -> [read | Mode0] end, case prim_file:open(Path, Mode) of {ok, Hdl} -> @@ -1087,9 +1083,8 @@ infos(Items, State) -> [{Item, i(Item, State)} || Item <- Items]. i(total_limit, #fhc_state{limit = Limit}) -> Limit; i(total_used, State) -> used(State); -i(sockets_limit, #fhc_state{obtain_limit = Limit}) -> Limit; -i(sockets_used, #fhc_state{obtain_count_socket = Count, - reserve_count_socket = RCount}) -> Count + RCount; +i(sockets_limit, _) -> 0; +i(sockets_used, _) -> 0; i(files_reserved, #fhc_state{reserve_count_file = RCount}) -> RCount; i(Item, _) -> throw({bad_argument, Item}). @@ -1104,7 +1099,6 @@ used(#fhc_state{open_count = C1, %%---------------------------------------------------------------------------- init([AlarmSet, AlarmClear]) -> - _ = file_handle_cache_stats:init(), Limit = case application:get_env(file_handles_high_watermark) of {ok, Watermark} when (is_integer(Watermark) andalso Watermark > 0) -> diff --git a/deps/rabbit_common/src/file_handle_cache_stats.erl b/deps/rabbit_common/src/file_handle_cache_stats.erl deleted file mode 100644 index 672872a37aee..000000000000 --- a/deps/rabbit_common/src/file_handle_cache_stats.erl +++ /dev/null @@ -1,63 +0,0 @@ -%% This Source Code Form is subject to the terms of the Mozilla Public -%% License, v. 2.0. If a copy of the MPL was not distributed with this -%% file, You can obtain one at https://mozilla.org/MPL/2.0/. -%% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. -%% - --module(file_handle_cache_stats). - -%% stats about read / write operations that go through the fhc. - --export([init/0, update/3, update/2, update/1, inc/2, get/0]). - --define(TABLE, ?MODULE). - --define(COUNT, - [io_reopen, mnesia_ram_tx, mnesia_disk_tx, - msg_store_read, msg_store_write, - queue_index_write, queue_index_read]). --define(COUNT_TIME, [io_sync, io_seek]). --define(COUNT_TIME_BYTES, [io_read, io_write]). - --import(rabbit_misc, [safe_ets_update_counter/3, safe_ets_update_counter/4]). - -init() -> - _ = ets:new(?TABLE, [public, named_table, {write_concurrency,true}]), - [ets:insert(?TABLE, {{Op, Counter}, 0}) || Op <- ?COUNT_TIME_BYTES, - Counter <- [count, bytes, time]], - [ets:insert(?TABLE, {{Op, Counter}, 0}) || Op <- ?COUNT_TIME, - Counter <- [count, time]], - [ets:insert(?TABLE, {{Op, Counter}, 0}) || Op <- ?COUNT, - Counter <- [count]]. - -update(Op, Bytes, Thunk) -> - {Time, Res} = timer_tc(Thunk), - _ = safe_ets_update_counter(?TABLE, {Op, count}, 1), - _ = safe_ets_update_counter(?TABLE, {Op, bytes}, Bytes), - _ = safe_ets_update_counter(?TABLE, {Op, time}, Time), - Res. - -update(Op, Thunk) -> - {Time, Res} = timer_tc(Thunk), - _ = safe_ets_update_counter(?TABLE, {Op, count}, 1), - _ = safe_ets_update_counter(?TABLE, {Op, time}, Time), - Res. - -update(Op) -> - _ = safe_ets_update_counter(?TABLE, {Op, count}, 1), - ok. - -inc(Op, Count) -> - _ = safe_ets_update_counter(?TABLE, {Op, count}, Count), - ok. - -get() -> - lists:sort(ets:tab2list(?TABLE)). - -timer_tc(Thunk) -> - T1 = erlang:monotonic_time(), - Res = Thunk(), - T2 = erlang:monotonic_time(), - Diff = erlang:convert_time_unit(T2 - T1, native, micro_seconds), - {Diff, Res}. diff --git a/deps/rabbit_common/src/gen_server2.erl b/deps/rabbit_common/src/gen_server2.erl index 442476bd2108..80dd79bb4502 100644 --- a/deps/rabbit_common/src/gen_server2.erl +++ b/deps/rabbit_common/src/gen_server2.erl @@ -93,7 +93,7 @@ %% %% 11) Internal buffer length is emitted as a core [RabbitMQ] metric. -%% All modifications are (C) 2009-2023 VMware, Inc. or its affiliates. +%% All modifications are (C) 2007-2024 Broadcom. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% ``The contents of this file are subject to the Erlang Public License, %% Version 1.1, (the "License"); you may not use this file except in @@ -1113,7 +1113,7 @@ system_code_change(GS2State = #gs2_state { mod = Mod, {ok, NewState} -> NewGS2State = find_prioritisers( GS2State #gs2_state { state = NewState }), - {ok, [NewGS2State]}; + {ok, NewGS2State}; Else -> Else end. diff --git a/deps/rabbit_common/src/mirrored_supervisor_locks.erl b/deps/rabbit_common/src/mirrored_supervisor_locks.erl index 60db79ca486f..ff6550a3fbcb 100644 --- a/deps/rabbit_common/src/mirrored_supervisor_locks.erl +++ b/deps/rabbit_common/src/mirrored_supervisor_locks.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(mirrored_supervisor_locks). diff --git a/deps/rabbit_common/src/mnesia_sync.erl b/deps/rabbit_common/src/mnesia_sync.erl index 2d48140160cc..3bb58d51e2c7 100644 --- a/deps/rabbit_common/src/mnesia_sync.erl +++ b/deps/rabbit_common/src/mnesia_sync.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(mnesia_sync). diff --git a/deps/rabbit_common/src/pmon.erl b/deps/rabbit_common/src/pmon.erl index 0fe439f2526e..c438f52ab359 100644 --- a/deps/rabbit_common/src/pmon.erl +++ b/deps/rabbit_common/src/pmon.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2011-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(pmon). diff --git a/deps/rabbit_common/src/priority_queue.erl b/deps/rabbit_common/src/priority_queue.erl index cdbef5f31dd4..19fe0941225f 100644 --- a/deps/rabbit_common/src/priority_queue.erl +++ b/deps/rabbit_common/src/priority_queue.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% %% Priority queues have essentially the same interface as ordinary diff --git a/deps/rabbit_common/src/rabbit_amqp_connection.erl b/deps/rabbit_common/src/rabbit_amqp_connection.erl index 4f1488e2c8e7..5f80cba2e1e3 100644 --- a/deps/rabbit_common/src/rabbit_amqp_connection.erl +++ b/deps/rabbit_common/src/rabbit_amqp_connection.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_amqp_connection). diff --git a/deps/rabbit_common/src/rabbit_amqqueue_common.erl b/deps/rabbit_common/src/rabbit_amqqueue_common.erl index 331772165fc0..7d3bbe6b9663 100644 --- a/deps/rabbit_common/src/rabbit_amqqueue_common.erl +++ b/deps/rabbit_common/src/rabbit_amqqueue_common.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_amqqueue_common). diff --git a/deps/rabbit_common/src/rabbit_auth_backend_dummy.erl b/deps/rabbit_common/src/rabbit_auth_backend_dummy.erl index 60ff35f1d436..e7f9f67bec75 100644 --- a/deps/rabbit_common/src/rabbit_auth_backend_dummy.erl +++ b/deps/rabbit_common/src/rabbit_auth_backend_dummy.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_auth_backend_dummy). @@ -14,7 +14,7 @@ -export([user/0]). -export([user_login_authentication/2, user_login_authorization/2, check_vhost_access/3, check_resource_access/4, check_topic_access/4]). --export([state_can_expire/0]). +-export([expiry_timestamp/1]). -spec user() -> rabbit_types:user(). @@ -36,4 +36,4 @@ check_vhost_access(#auth_user{}, _VHostPath, _AuthzData) -> true. check_resource_access(#auth_user{}, #resource{}, _Permission, _Context) -> true. check_topic_access(#auth_user{}, #resource{}, _Permission, _Context) -> true. -state_can_expire() -> false. +expiry_timestamp(_) -> never. diff --git a/deps/rabbit_common/src/rabbit_auth_mechanism.erl b/deps/rabbit_common/src/rabbit_auth_mechanism.erl index d950769b5046..a84e01b286d7 100644 --- a/deps/rabbit_common/src/rabbit_auth_mechanism.erl +++ b/deps/rabbit_common/src/rabbit_auth_mechanism.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% %% This module defines the interface for SASL mechanisms. diff --git a/deps/rabbit_common/src/rabbit_authn_backend.erl b/deps/rabbit_common/src/rabbit_authn_backend.erl index 2ed7e7d19689..c657bc82f0b6 100644 --- a/deps/rabbit_common/src/rabbit_authn_backend.erl +++ b/deps/rabbit_common/src/rabbit_authn_backend.erl @@ -2,13 +2,11 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_authn_backend). --include("rabbit.hrl"). - %% Check a user can log in, given a username and a proplist of %% authentication information (e.g. [{password, Password}]). If your %% backend is not to be used for authentication, this should always diff --git a/deps/rabbit_common/src/rabbit_authz_backend.erl b/deps/rabbit_common/src/rabbit_authz_backend.erl index 60456c518bbe..eda44d117604 100644 --- a/deps/rabbit_common/src/rabbit_authz_backend.erl +++ b/deps/rabbit_common/src/rabbit_authz_backend.erl @@ -2,13 +2,11 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_authz_backend). --include("rabbit.hrl"). - %% Check that a user can log in, when this backend is being used for %% authorisation only. Authentication has already taken place %% successfully, but we need to check that the user exists in this @@ -67,9 +65,6 @@ rabbit_types:topic_access_context()) -> boolean() | {'error', any()}. -%% Returns true for backends that support state or credential expiration (e.g. use JWTs). --callback state_can_expire() -> boolean(). - %% Updates backend state that has expired. %% %% Possible responses: @@ -85,4 +80,14 @@ {'refused', string(), [any()]} | {'error', any()}. +%% Get expiry timestamp for the user. +%% +%% Possible responses: +%% never +%% The user token/credentials never expire. +%% Timestamp +%% The expiry time (POSIX) in seconds of the token/credentials. +-callback expiry_timestamp(AuthUser :: rabbit_types:auth_user()) -> + integer() | never. + -optional_callbacks([update_state/2]). diff --git a/deps/rabbit_common/src/rabbit_basic_common.erl b/deps/rabbit_common/src/rabbit_basic_common.erl index 18f1f9bb813f..fec8d3fea2a8 100644 --- a/deps/rabbit_common/src/rabbit_basic_common.erl +++ b/deps/rabbit_common/src/rabbit_basic_common.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_basic_common). diff --git a/deps/rabbit_common/src/rabbit_binary_generator.erl b/deps/rabbit_common/src/rabbit_binary_generator.erl index fb94f9967481..eb256052c0b6 100644 --- a/deps/rabbit_common/src/rabbit_binary_generator.erl +++ b/deps/rabbit_common/src/rabbit_binary_generator.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_binary_generator). diff --git a/deps/rabbit_common/src/rabbit_binary_parser.erl b/deps/rabbit_common/src/rabbit_binary_parser.erl index 1b420196887d..b14763aa87c0 100644 --- a/deps/rabbit_common/src/rabbit_binary_parser.erl +++ b/deps/rabbit_common/src/rabbit_binary_parser.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_binary_parser). diff --git a/deps/rabbit_common/src/rabbit_cert_info.erl b/deps/rabbit_common/src/rabbit_cert_info.erl index 1d370c4d6b79..d70117793112 100644 --- a/deps/rabbit_common/src/rabbit_cert_info.erl +++ b/deps/rabbit_common/src/rabbit_cert_info.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_cert_info). diff --git a/deps/rabbit_common/src/rabbit_channel_common.erl b/deps/rabbit_common/src/rabbit_channel_common.erl index 6c4e3e67a2af..fb63c2da8866 100644 --- a/deps/rabbit_common/src/rabbit_channel_common.erl +++ b/deps/rabbit_common/src/rabbit_channel_common.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_channel_common). diff --git a/deps/rabbit_common/src/rabbit_command_assembler.erl b/deps/rabbit_common/src/rabbit_command_assembler.erl index 5ad16b3e060d..ceba7291f3ce 100644 --- a/deps/rabbit_common/src/rabbit_command_assembler.erl +++ b/deps/rabbit_common/src/rabbit_command_assembler.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_command_assembler). diff --git a/deps/rabbit_common/src/rabbit_control_misc.erl b/deps/rabbit_common/src/rabbit_control_misc.erl index 6b7b09c64deb..ef880088b87c 100644 --- a/deps/rabbit_common/src/rabbit_control_misc.erl +++ b/deps/rabbit_common/src/rabbit_control_misc.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_control_misc). diff --git a/deps/rabbit_common/src/rabbit_core_metrics.erl b/deps/rabbit_common/src/rabbit_core_metrics.erl index 8ce229954bab..c06b73bc457d 100644 --- a/deps/rabbit_common/src/rabbit_core_metrics.erl +++ b/deps/rabbit_common/src/rabbit_core_metrics.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_core_metrics). @@ -111,13 +111,15 @@ create_table({Table, Type}) -> {read_concurrency, true}]). init() -> - _ = [create_table({Table, Type}) - || {Table, Type} <- ?CORE_TABLES ++ ?CORE_EXTRA_TABLES], + Tables = ?CORE_TABLES ++ ?CORE_EXTRA_TABLES ++ ?CORE_NON_CHANNEL_TABLES, + _ = [create_table({Table, Type}) + || {Table, Type} <- Tables], ok. terminate() -> + Tables = ?CORE_TABLES ++ ?CORE_EXTRA_TABLES ++ ?CORE_NON_CHANNEL_TABLES, [ets:delete(Table) - || {Table, _Type} <- ?CORE_TABLES ++ ?CORE_EXTRA_TABLES], + || {Table, _Type} <- Tables], ok. connection_created(Pid, Infos) -> @@ -166,53 +168,65 @@ channel_stats(reductions, Id, Value) -> ets:insert(channel_process_metrics, {Id, Value}), ok. -channel_stats(exchange_stats, publish, Id, Value) -> +channel_stats(exchange_stats, publish, {_ChannelPid, XName} = Id, Value) -> %% Includes delete marker _ = ets:update_counter(channel_exchange_metrics, Id, {2, Value}, {Id, 0, 0, 0, 0, 0}), + _ = ets:update_counter(exchange_metrics, XName, {2, Value}, {XName, 0, 0, 0, 0, 0}), ok; -channel_stats(exchange_stats, confirm, Id, Value) -> +channel_stats(exchange_stats, confirm, {_ChannelPid, XName} = Id, Value) -> %% Includes delete marker _ = ets:update_counter(channel_exchange_metrics, Id, {3, Value}, {Id, 0, 0, 0, 0, 0}), + _ = ets:update_counter(exchange_metrics, XName, {3, Value}, {XName, 0, 0, 0, 0, 0}), ok; -channel_stats(exchange_stats, return_unroutable, Id, Value) -> +channel_stats(exchange_stats, return_unroutable, {_ChannelPid, XName} = Id, Value) -> %% Includes delete marker _ = ets:update_counter(channel_exchange_metrics, Id, {4, Value}, {Id, 0, 0, 0, 0, 0}), + _ = ets:update_counter(exchange_metrics, XName, {4, Value}, {XName, 0, 0, 0, 0, 0}), ok; -channel_stats(exchange_stats, drop_unroutable, Id, Value) -> +channel_stats(exchange_stats, drop_unroutable, {_ChannelPid, XName} = Id, Value) -> %% Includes delete marker _ = ets:update_counter(channel_exchange_metrics, Id, {5, Value}, {Id, 0, 0, 0, 0, 0}), + _ = ets:update_counter(exchange_metrics, XName, {5, Value}, {XName, 0, 0, 0, 0, 0}), ok; -channel_stats(queue_exchange_stats, publish, Id, Value) -> +channel_stats(queue_exchange_stats, publish, {_ChannelPid, QueueExchange} = Id, Value) -> %% Includes delete marker _ = ets:update_counter(channel_queue_exchange_metrics, Id, Value, {Id, 0, 0}), + _ = ets:update_counter(queue_exchange_metrics, QueueExchange, Value, {QueueExchange, 0, 0}), ok; -channel_stats(queue_stats, get, Id, Value) -> +channel_stats(queue_stats, get, {_ChannelPid, QName} = Id, Value) -> %% Includes delete marker _ = ets:update_counter(channel_queue_metrics, Id, {2, Value}, {Id, 0, 0, 0, 0, 0, 0, 0, 0}), + _ = ets:update_counter(queue_delivery_metrics, QName, {2, Value}, {QName, 0, 0, 0, 0, 0, 0, 0, 0}), ok; -channel_stats(queue_stats, get_no_ack, Id, Value) -> +channel_stats(queue_stats, get_no_ack, {_ChannelPid, QName} = Id, Value) -> %% Includes delete marker _ = ets:update_counter(channel_queue_metrics, Id, {3, Value}, {Id, 0, 0, 0, 0, 0, 0, 0, 0}), + _ = ets:update_counter(queue_delivery_metrics, QName, {3, Value}, {QName, 0, 0, 0, 0, 0, 0, 0, 0}), ok; -channel_stats(queue_stats, deliver, Id, Value) -> +channel_stats(queue_stats, deliver, {_ChannelPid, QName} = Id, Value) -> %% Includes delete marker _ = ets:update_counter(channel_queue_metrics, Id, {4, Value}, {Id, 0, 0, 0, 0, 0, 0, 0, 0}), + _ = ets:update_counter(queue_delivery_metrics, QName, {4, Value}, {QName, 0, 0, 0, 0, 0, 0, 0, 0}), ok; -channel_stats(queue_stats, deliver_no_ack, Id, Value) -> +channel_stats(queue_stats, deliver_no_ack, {_ChannelPid, QName} = Id, Value) -> %% Includes delete marker _ = ets:update_counter(channel_queue_metrics, Id, {5, Value}, {Id, 0, 0, 0, 0, 0, 0, 0, 0}), + _ = ets:update_counter(queue_delivery_metrics, QName, {5, Value}, {QName, 0, 0, 0, 0, 0, 0, 0, 0}), ok; -channel_stats(queue_stats, redeliver, Id, Value) -> +channel_stats(queue_stats, redeliver, {_ChannelPid, QName} = Id, Value) -> %% Includes delete marker _ = ets:update_counter(channel_queue_metrics, Id, {6, Value}, {Id, 0, 0, 0, 0, 0, 0, 0, 0}), + _ = ets:update_counter(queue_delivery_metrics, QName, {6, Value}, {QName, 0, 0, 0, 0, 0, 0, 0, 0}), ok; -channel_stats(queue_stats, ack, Id, Value) -> +channel_stats(queue_stats, ack, {_ChannelPid, QName} = Id, Value) -> %% Includes delete marker _ = ets:update_counter(channel_queue_metrics, Id, {7, Value}, {Id, 0, 0, 0, 0, 0, 0, 0, 0}), + _ = ets:update_counter(queue_delivery_metrics, QName, {7, Value}, {QName, 0, 0, 0, 0, 0, 0, 0, 0}), ok; -channel_stats(queue_stats, get_empty, Id, Value) -> +channel_stats(queue_stats, get_empty, {_ChannelPid, QName} = Id, Value) -> %% Includes delete marker _ = ets:update_counter(channel_queue_metrics, Id, {8, Value}, {Id, 0, 0, 0, 0, 0, 0, 0, 0}), + _ = ets:update_counter(queue_delivery_metrics, QName, {8, Value}, {QName, 0, 0, 0, 0, 0, 0, 0, 0}), ok. delete(Table, Key) -> diff --git a/deps/rabbit_common/src/rabbit_data_coercion.erl b/deps/rabbit_common/src/rabbit_data_coercion.erl index d8c79969c91a..da498b1afac8 100644 --- a/deps/rabbit_common/src/rabbit_data_coercion.erl +++ b/deps/rabbit_common/src/rabbit_data_coercion.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_data_coercion). @@ -65,9 +65,9 @@ to_map(Val) when is_list(Val) -> maps:from_list(Val). -spec atomize_keys(Val :: map() | list()) -> map() | list(). atomize_keys(Val) when is_list(Val) -> - [{to_atom(K), V} || {K, V} <- Val]; + [{to_atom(K), V} || {K, V} <- Val]; atomize_keys(Val) when is_map(Val) -> - maps:from_list(atomize_keys(maps:to_list(Val))). + #{to_atom(K) => V || K := V <- Val}. -spec to_list_of_binaries(Val :: undefined | [atom() | list() | binary() | integer()]) -> [binary()]. to_list_of_binaries(Value) -> diff --git a/deps/rabbit_common/src/rabbit_date_time.erl b/deps/rabbit_common/src/rabbit_date_time.erl index 38c51eab4be9..f3c5dc0d27f7 100644 --- a/deps/rabbit_common/src/rabbit_date_time.erl +++ b/deps/rabbit_common/src/rabbit_date_time.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_date_time). diff --git a/deps/rabbit_common/src/rabbit_env.erl b/deps/rabbit_common/src/rabbit_env.erl index 2b7d0f8f2102..4f222ab707f4 100644 --- a/deps/rabbit_common/src/rabbit_env.erl +++ b/deps/rabbit_common/src/rabbit_env.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2019-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_env). @@ -194,7 +194,7 @@ context_base(TakeFromRemoteNode) -> Timeout >= 0 -> update_context(Context, from_remote_node, - {TakeFromRemoteNode, Timeout}) + TakeFromRemoteNode) end. -ifdef(TEST). @@ -2146,7 +2146,8 @@ maybe_stop_dist_for_remote_query( maybe_stop_dist_for_remote_query(Context) -> Context. -query_remote({RemoteNode, Timeout}, Mod, Func, Args) -> +query_remote({RemoteNode, Timeout}, Mod, Func, Args) + when is_atom(RemoteNode) -> Ret = rpc:call(RemoteNode, Mod, Func, Args, Timeout), case Ret of {badrpc, nodedown} = Error -> Error; diff --git a/deps/rabbit_common/src/rabbit_error_logger_handler.erl b/deps/rabbit_common/src/rabbit_error_logger_handler.erl index 19a26d061730..70e079d7f3e4 100644 --- a/deps/rabbit_common/src/rabbit_error_logger_handler.erl +++ b/deps/rabbit_common/src/rabbit_error_logger_handler.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_error_logger_handler). diff --git a/deps/rabbit_common/src/rabbit_event.erl b/deps/rabbit_common/src/rabbit_event.erl index a6760b238858..ac584ed0819f 100644 --- a/deps/rabbit_common/src/rabbit_event.erl +++ b/deps/rabbit_common/src/rabbit_event.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_event). diff --git a/deps/rabbit_common/src/rabbit_framing.erl b/deps/rabbit_common/src/rabbit_framing.erl index e0f638e618a6..486e795bd85b 100644 --- a/deps/rabbit_common/src/rabbit_framing.erl +++ b/deps/rabbit_common/src/rabbit_framing.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% %% TODO auto-generate diff --git a/deps/rabbit_common/src/rabbit_framing_amqp_0_8.erl b/deps/rabbit_common/src/rabbit_framing_amqp_0_8.erl new file mode 100644 index 000000000000..3376a0aec604 --- /dev/null +++ b/deps/rabbit_common/src/rabbit_framing_amqp_0_8.erl @@ -0,0 +1,1661 @@ +%% Autogenerated code. Do not edit. +%% +%% This Source Code Form is subject to the terms of the Mozilla Public +%% License, v. 2.0. If a copy of the MPL was not distributed with this +%% file, You can obtain one at https://mozilla.org/MPL/2.0/. +%% +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. +%% +-module(rabbit_framing_amqp_0_8). +-include("rabbit_framing.hrl"). + +-export([version/0]). +-export([lookup_method_name/1]). +-export([lookup_class_name/1]). + +-export([method_id/1]). +-export([method_has_content/1]). +-export([is_method_synchronous/1]). +-export([method_record/1]). +-export([method_fieldnames/1]). +-export([decode_method_fields/2]). +-export([decode_properties/2]). +-export([encode_method_fields/1]). +-export([encode_properties/1]). +-export([lookup_amqp_exception/1]). +-export([amqp_exception/1]). + + +%% Various types +-export_type([amqp_field_type/0, amqp_property_type/0, + amqp_table/0, amqp_array/0, amqp_value/0, + amqp_method_name/0, amqp_method/0, amqp_method_record/0, + amqp_method_field_name/0, amqp_property_record/0, + amqp_exception/0, amqp_exception_code/0, amqp_class_id/0]). + +-type amqp_field_type() :: + 'longstr' | 'signedint' | 'decimal' | 'timestamp' | + 'unsignedbyte' | 'unsignedshort' | 'unsignedint' | + 'table' | 'byte' | 'double' | 'float' | 'long' | + 'short' | 'bool' | 'binary' | 'void' | 'array'. +-type amqp_property_type() :: + 'shortstr' | 'longstr' | 'octet' | 'short' | 'long' | + 'longlong' | 'timestamp' | 'bit' | 'table'. + +-type amqp_table() :: [{binary(), amqp_field_type(), amqp_value()}]. +-type amqp_array() :: [{amqp_field_type(), amqp_value()}]. +-type amqp_value() :: binary() | % longstr + integer() | % signedint + {non_neg_integer(), non_neg_integer()} | % decimal + amqp_table() | + amqp_array() | + byte() | % byte + float() | % double + integer() | % long + integer() | % short + boolean() | % bool + binary() | % binary + 'undefined' | % void + non_neg_integer(). % timestamp + +-type amqp_method_name() :: + ( 'connection.start' | 'connection.start_ok' | 'connection.secure' | 'connection.secure_ok' + | 'connection.tune' | 'connection.tune_ok' | 'connection.open' | 'connection.open_ok' + | 'connection.redirect' | 'connection.close' | 'connection.close_ok' | 'channel.open' + | 'channel.open_ok' | 'channel.flow' | 'channel.flow_ok' | 'channel.alert' + | 'channel.close' | 'channel.close_ok' | 'access.request' | 'access.request_ok' + | 'exchange.declare' | 'exchange.declare_ok' | 'exchange.delete' | 'exchange.delete_ok' + | 'queue.declare' | 'queue.declare_ok' | 'queue.bind' | 'queue.bind_ok' + | 'queue.purge' | 'queue.purge_ok' | 'queue.delete' | 'queue.delete_ok' + | 'queue.unbind' | 'queue.unbind_ok' | 'basic.qos' | 'basic.qos_ok' + | 'basic.consume' | 'basic.consume_ok' | 'basic.cancel' | 'basic.cancel_ok' + | 'basic.publish' | 'basic.return' | 'basic.deliver' | 'basic.get' + | 'basic.get_ok' | 'basic.get_empty' | 'basic.ack' | 'basic.reject' + | 'basic.recover_async' | 'basic.recover' | 'basic.recover_ok' | 'file.qos' + | 'file.qos_ok' | 'file.consume' | 'file.consume_ok' | 'file.cancel' + | 'file.cancel_ok' | 'file.open' | 'file.open_ok' | 'file.stage' + | 'file.publish' | 'file.return' | 'file.deliver' | 'file.ack' + | 'file.reject' | 'stream.qos' | 'stream.qos_ok' | 'stream.consume' + | 'stream.consume_ok' | 'stream.cancel' | 'stream.cancel_ok' | 'stream.publish' + | 'stream.return' | 'stream.deliver' | 'tx.select' | 'tx.select_ok' + | 'tx.commit' | 'tx.commit_ok' | 'tx.rollback' | 'tx.rollback_ok' + | 'dtx.select' | 'dtx.select_ok' | 'dtx.start' | 'dtx.start_ok' + | 'tunnel.request' | 'test.integer' | 'test.integer_ok' | 'test.string' + | 'test.string_ok' | 'test.table' | 'test.table_ok' | 'test.content' + | 'test.content_ok' ). +-type amqp_method() :: + ( {10, 10} | {10, 11} | {10, 20} | {10, 21} | {10, 30} | {10, 31} + | {10, 40} | {10, 41} | {10, 50} | {10, 60} | {10, 61} | {20, 10} + | {20, 11} | {20, 20} | {20, 21} | {20, 30} | {20, 40} | {20, 41} + | {30, 10} | {30, 11} | {40, 10} | {40, 11} | {40, 20} | {40, 21} + | {50, 10} | {50, 11} | {50, 20} | {50, 21} | {50, 30} | {50, 31} + | {50, 40} | {50, 41} | {50, 50} | {50, 51} | {60, 10} | {60, 11} + | {60, 20} | {60, 21} | {60, 30} | {60, 31} | {60, 40} | {60, 50} + | {60, 60} | {60, 70} | {60, 71} | {60, 72} | {60, 80} | {60, 90} + | {60, 100} | {60, 110} | {60, 111} | {70, 10} | {70, 11} | {70, 20} + | {70, 21} | {70, 30} | {70, 31} | {70, 40} | {70, 41} | {70, 50} + | {70, 60} | {70, 70} | {70, 80} | {70, 90} | {70, 100} | {80, 10} + | {80, 11} | {80, 20} | {80, 21} | {80, 30} | {80, 31} | {80, 40} + | {80, 50} | {80, 60} | {90, 10} | {90, 11} | {90, 20} | {90, 21} + | {90, 30} | {90, 31} | {100, 10} | {100, 11} | {100, 20} | {100, 21} + | {110, 10} | {120, 10} | {120, 11} | {120, 20} | {120, 21} | {120, 30} + | {120, 31} | {120, 40} | {120, 41} ). +-type amqp_method_record() :: + ( #'connection.start'{} | #'connection.start_ok'{} | #'connection.secure'{} | #'connection.secure_ok'{} + | #'connection.tune'{} | #'connection.tune_ok'{} | #'connection.open'{} | #'connection.open_ok'{} + | #'connection.redirect'{} | #'connection.close'{} | #'connection.close_ok'{} | #'channel.open'{} + | #'channel.open_ok'{} | #'channel.flow'{} | #'channel.flow_ok'{} | #'channel.alert'{} + | #'channel.close'{} | #'channel.close_ok'{} | #'access.request'{} | #'access.request_ok'{} + | #'exchange.declare'{} | #'exchange.declare_ok'{} | #'exchange.delete'{} | #'exchange.delete_ok'{} + | #'queue.declare'{} | #'queue.declare_ok'{} | #'queue.bind'{} | #'queue.bind_ok'{} + | #'queue.purge'{} | #'queue.purge_ok'{} | #'queue.delete'{} | #'queue.delete_ok'{} + | #'queue.unbind'{} | #'queue.unbind_ok'{} | #'basic.qos'{} | #'basic.qos_ok'{} + | #'basic.consume'{} | #'basic.consume_ok'{} | #'basic.cancel'{} | #'basic.cancel_ok'{} + | #'basic.publish'{} | #'basic.return'{} | #'basic.deliver'{} | #'basic.get'{} + | #'basic.get_ok'{} | #'basic.get_empty'{} | #'basic.ack'{} | #'basic.reject'{} + | #'basic.recover_async'{} | #'basic.recover'{} | #'basic.recover_ok'{} | #'file.qos'{} + | #'file.qos_ok'{} | #'file.consume'{} | #'file.consume_ok'{} | #'file.cancel'{} + | #'file.cancel_ok'{} | #'file.open'{} | #'file.open_ok'{} | #'file.stage'{} + | #'file.publish'{} | #'file.return'{} | #'file.deliver'{} | #'file.ack'{} + | #'file.reject'{} | #'stream.qos'{} | #'stream.qos_ok'{} | #'stream.consume'{} + | #'stream.consume_ok'{} | #'stream.cancel'{} | #'stream.cancel_ok'{} | #'stream.publish'{} + | #'stream.return'{} | #'stream.deliver'{} | #'tx.select'{} | #'tx.select_ok'{} + | #'tx.commit'{} | #'tx.commit_ok'{} | #'tx.rollback'{} | #'tx.rollback_ok'{} + | #'dtx.select'{} | #'dtx.select_ok'{} | #'dtx.start'{} | #'dtx.start_ok'{} + | #'tunnel.request'{} | #'test.integer'{} | #'test.integer_ok'{} | #'test.string'{} + | #'test.string_ok'{} | #'test.table'{} | #'test.table_ok'{} | #'test.content'{} + | #'test.content_ok'{} ). +-type amqp_method_field_name() :: + ( active | arguments | auto_delete | capabilities + | challenge | channel_max | class_id | client_properties + | cluster_id | consume_rate | consumer_count | consumer_tag + | content_checksum | content_size | delivery_tag | details + | dtx_identifier | durable | exchange | exclusive + | frame_max | global | heartbeat | host + | identifier | if_empty | if_unused | immediate + | insist | integer_1 | integer_2 | integer_3 + | integer_4 | integer_op | integer_result | internal + | known_hosts | locale | locales | mandatory + | mechanism | mechanisms | message_count | meta_data + | method_id | multiple | no_ack | no_local + | nowait | operation | out_of_band | passive + | prefetch_count | prefetch_size | queue | read + | realm | redelivered | reply_code | reply_text + | requeue | response | result | routing_key + | server_properties | staged_size | string_1 | string_2 + | string_op | string_result | table | ticket + | type | version_major | version_minor | virtual_host + | write ). +-type amqp_property_record() :: + ( #'P_connection'{} | #'P_channel'{} | #'P_access'{} | #'P_exchange'{} + | #'P_queue'{} | #'P_basic'{} | #'P_file'{} | #'P_stream'{} + | #'P_tx'{} | #'P_dtx'{} | #'P_tunnel'{} | #'P_test'{} ). +-type amqp_exception() :: + ( 'frame_method' | 'frame_header' | 'frame_body' | 'frame_oob_method' + | 'frame_oob_header' | 'frame_oob_body' | 'frame_trace' | 'frame_heartbeat' + | 'frame_min_size' | 'frame_end' | 'reply_success' | 'not_delivered' + | 'content_too_large' | 'no_route' | 'no_consumers' | 'access_refused' + | 'not_found' | 'resource_locked' | 'precondition_failed' | 'connection_forced' + | 'invalid_path' | 'frame_error' | 'syntax_error' | 'command_invalid' + | 'channel_error' | 'unexpected_frame' | 'resource_error' | 'not_allowed' + | 'not_implemented' | 'internal_error' ). +-type amqp_exception_code() :: + ( 1 | 2 | 3 | 4 + | 5 | 6 | 7 | 8 + | 4096 | 206 | 200 | 310 + | 311 | 312 | 313 | 403 + | 404 | 405 | 406 | 320 + | 402 | 501 | 502 | 503 + | 504 | 505 | 506 | 530 + | 540 | 541 ). +-type amqp_class_id() :: + ( 100 | 70 | 40 | 10 + | 110 | 80 | 50 | 20 + | 120 | 90 | 60 | 30 ). +-type amqp_class_name() :: + ( 'connection' | 'channel' | 'access' | 'exchange' + | 'queue' | 'basic' | 'file' | 'stream' + | 'tx' | 'dtx' | 'tunnel' | 'test' ). + +%% Method signatures +-spec version() -> {non_neg_integer(), non_neg_integer(), non_neg_integer()}. +-spec lookup_method_name(amqp_method()) -> amqp_method_name(). +-spec lookup_class_name(amqp_class_id()) -> amqp_class_name(). +-spec method_id(amqp_method_name()) -> amqp_method(). +-spec method_has_content(amqp_method_name()) -> boolean(). +-spec is_method_synchronous(amqp_method_record()) -> boolean(). +-spec method_record(amqp_method_name()) -> amqp_method_record(). +-spec method_fieldnames(amqp_method_name()) -> [amqp_method_field_name()]. +-spec decode_method_fields(amqp_method_name(), binary()) -> + amqp_method_record() | rabbit_types:connection_exit(). +-spec decode_properties(non_neg_integer(), binary()) -> amqp_property_record(). +-spec encode_method_fields(amqp_method_record()) -> binary(). +-spec encode_properties(amqp_property_record()) -> binary(). +-spec lookup_amqp_exception(amqp_exception()) -> + {boolean(), amqp_exception_code(), binary()}. +-spec amqp_exception(amqp_exception_code()) -> amqp_exception(). + +bitvalue(true) -> 1; +bitvalue(false) -> 0; +bitvalue(undefined) -> 0. + +shortstr_size(S) -> + case size(S) of + Len when Len =< 255 -> Len; + _ -> exit(method_field_shortstr_overflow) + end. + +-define(SHORTSTR_VAL(R, L, V, X), + begin + <> = R, + {V, X} + end). + +-define(LONGSTR_VAL(R, L, V, X), + begin + <> = R, + {V, X} + end). + +-define(SHORT_VAL(R, L, V, X), + begin + <> = R, + {V, X} + end). + +-define(LONG_VAL(R, L, V, X), + begin + <> = R, + {V, X} + end). + +-define(LONGLONG_VAL(R, L, V, X), + begin + <> = R, + {V, X} + end). + +-define(OCTET_VAL(R, L, V, X), + begin + <> = R, + {V, X} + end). + +-define(TABLE_VAL(R, L, V, X), + begin + <> = R, + {rabbit_binary_parser:parse_table(V), X} + end). + +-define(TIMESTAMP_VAL(R, L, V, X), + begin + <> = R, + {V, X} + end). + +-define(SHORTSTR_PROP(X, L), + begin + L = size(X), + if L < 256 -> <>; + true -> exit(content_properties_shortstr_overflow) + end + end). + +-define(LONGSTR_PROP(X, L), + begin + L = size(X), + <> + end). + +-define(OCTET_PROP(X, L), <>). +-define(SHORT_PROP(X, L), <>). +-define(LONG_PROP(X, L), <>). +-define(LONGLONG_PROP(X, L), <>). +-define(TIMESTAMP_PROP(X, L), <>). + +-define(TABLE_PROP(X, T), + begin + T = rabbit_binary_generator:generate_table(X), + <<(size(T)):32, T/binary>> + end). + +version() -> {0, 8, 0}. +lookup_method_name({10, 10}) -> 'connection.start'; +lookup_method_name({10, 11}) -> 'connection.start_ok'; +lookup_method_name({10, 20}) -> 'connection.secure'; +lookup_method_name({10, 21}) -> 'connection.secure_ok'; +lookup_method_name({10, 30}) -> 'connection.tune'; +lookup_method_name({10, 31}) -> 'connection.tune_ok'; +lookup_method_name({10, 40}) -> 'connection.open'; +lookup_method_name({10, 41}) -> 'connection.open_ok'; +lookup_method_name({10, 50}) -> 'connection.redirect'; +lookup_method_name({10, 60}) -> 'connection.close'; +lookup_method_name({10, 61}) -> 'connection.close_ok'; +lookup_method_name({20, 10}) -> 'channel.open'; +lookup_method_name({20, 11}) -> 'channel.open_ok'; +lookup_method_name({20, 20}) -> 'channel.flow'; +lookup_method_name({20, 21}) -> 'channel.flow_ok'; +lookup_method_name({20, 30}) -> 'channel.alert'; +lookup_method_name({20, 40}) -> 'channel.close'; +lookup_method_name({20, 41}) -> 'channel.close_ok'; +lookup_method_name({30, 10}) -> 'access.request'; +lookup_method_name({30, 11}) -> 'access.request_ok'; +lookup_method_name({40, 10}) -> 'exchange.declare'; +lookup_method_name({40, 11}) -> 'exchange.declare_ok'; +lookup_method_name({40, 20}) -> 'exchange.delete'; +lookup_method_name({40, 21}) -> 'exchange.delete_ok'; +lookup_method_name({50, 10}) -> 'queue.declare'; +lookup_method_name({50, 11}) -> 'queue.declare_ok'; +lookup_method_name({50, 20}) -> 'queue.bind'; +lookup_method_name({50, 21}) -> 'queue.bind_ok'; +lookup_method_name({50, 30}) -> 'queue.purge'; +lookup_method_name({50, 31}) -> 'queue.purge_ok'; +lookup_method_name({50, 40}) -> 'queue.delete'; +lookup_method_name({50, 41}) -> 'queue.delete_ok'; +lookup_method_name({50, 50}) -> 'queue.unbind'; +lookup_method_name({50, 51}) -> 'queue.unbind_ok'; +lookup_method_name({60, 10}) -> 'basic.qos'; +lookup_method_name({60, 11}) -> 'basic.qos_ok'; +lookup_method_name({60, 20}) -> 'basic.consume'; +lookup_method_name({60, 21}) -> 'basic.consume_ok'; +lookup_method_name({60, 30}) -> 'basic.cancel'; +lookup_method_name({60, 31}) -> 'basic.cancel_ok'; +lookup_method_name({60, 40}) -> 'basic.publish'; +lookup_method_name({60, 50}) -> 'basic.return'; +lookup_method_name({60, 60}) -> 'basic.deliver'; +lookup_method_name({60, 70}) -> 'basic.get'; +lookup_method_name({60, 71}) -> 'basic.get_ok'; +lookup_method_name({60, 72}) -> 'basic.get_empty'; +lookup_method_name({60, 80}) -> 'basic.ack'; +lookup_method_name({60, 90}) -> 'basic.reject'; +lookup_method_name({60, 100}) -> 'basic.recover_async'; +lookup_method_name({60, 110}) -> 'basic.recover'; +lookup_method_name({60, 111}) -> 'basic.recover_ok'; +lookup_method_name({70, 10}) -> 'file.qos'; +lookup_method_name({70, 11}) -> 'file.qos_ok'; +lookup_method_name({70, 20}) -> 'file.consume'; +lookup_method_name({70, 21}) -> 'file.consume_ok'; +lookup_method_name({70, 30}) -> 'file.cancel'; +lookup_method_name({70, 31}) -> 'file.cancel_ok'; +lookup_method_name({70, 40}) -> 'file.open'; +lookup_method_name({70, 41}) -> 'file.open_ok'; +lookup_method_name({70, 50}) -> 'file.stage'; +lookup_method_name({70, 60}) -> 'file.publish'; +lookup_method_name({70, 70}) -> 'file.return'; +lookup_method_name({70, 80}) -> 'file.deliver'; +lookup_method_name({70, 90}) -> 'file.ack'; +lookup_method_name({70, 100}) -> 'file.reject'; +lookup_method_name({80, 10}) -> 'stream.qos'; +lookup_method_name({80, 11}) -> 'stream.qos_ok'; +lookup_method_name({80, 20}) -> 'stream.consume'; +lookup_method_name({80, 21}) -> 'stream.consume_ok'; +lookup_method_name({80, 30}) -> 'stream.cancel'; +lookup_method_name({80, 31}) -> 'stream.cancel_ok'; +lookup_method_name({80, 40}) -> 'stream.publish'; +lookup_method_name({80, 50}) -> 'stream.return'; +lookup_method_name({80, 60}) -> 'stream.deliver'; +lookup_method_name({90, 10}) -> 'tx.select'; +lookup_method_name({90, 11}) -> 'tx.select_ok'; +lookup_method_name({90, 20}) -> 'tx.commit'; +lookup_method_name({90, 21}) -> 'tx.commit_ok'; +lookup_method_name({90, 30}) -> 'tx.rollback'; +lookup_method_name({90, 31}) -> 'tx.rollback_ok'; +lookup_method_name({100, 10}) -> 'dtx.select'; +lookup_method_name({100, 11}) -> 'dtx.select_ok'; +lookup_method_name({100, 20}) -> 'dtx.start'; +lookup_method_name({100, 21}) -> 'dtx.start_ok'; +lookup_method_name({110, 10}) -> 'tunnel.request'; +lookup_method_name({120, 10}) -> 'test.integer'; +lookup_method_name({120, 11}) -> 'test.integer_ok'; +lookup_method_name({120, 20}) -> 'test.string'; +lookup_method_name({120, 21}) -> 'test.string_ok'; +lookup_method_name({120, 30}) -> 'test.table'; +lookup_method_name({120, 31}) -> 'test.table_ok'; +lookup_method_name({120, 40}) -> 'test.content'; +lookup_method_name({120, 41}) -> 'test.content_ok'; +lookup_method_name({_ClassId, _MethodId} = Id) -> exit({unknown_method_id, Id}). +lookup_class_name(10) -> 'connection'; +lookup_class_name(20) -> 'channel'; +lookup_class_name(30) -> 'access'; +lookup_class_name(40) -> 'exchange'; +lookup_class_name(50) -> 'queue'; +lookup_class_name(60) -> 'basic'; +lookup_class_name(70) -> 'file'; +lookup_class_name(80) -> 'stream'; +lookup_class_name(90) -> 'tx'; +lookup_class_name(100) -> 'dtx'; +lookup_class_name(110) -> 'tunnel'; +lookup_class_name(120) -> 'test'; +lookup_class_name(ClassId) -> exit({unknown_class_id, ClassId}). +method_id('connection.start') -> {10, 10}; +method_id('connection.start_ok') -> {10, 11}; +method_id('connection.secure') -> {10, 20}; +method_id('connection.secure_ok') -> {10, 21}; +method_id('connection.tune') -> {10, 30}; +method_id('connection.tune_ok') -> {10, 31}; +method_id('connection.open') -> {10, 40}; +method_id('connection.open_ok') -> {10, 41}; +method_id('connection.redirect') -> {10, 50}; +method_id('connection.close') -> {10, 60}; +method_id('connection.close_ok') -> {10, 61}; +method_id('channel.open') -> {20, 10}; +method_id('channel.open_ok') -> {20, 11}; +method_id('channel.flow') -> {20, 20}; +method_id('channel.flow_ok') -> {20, 21}; +method_id('channel.alert') -> {20, 30}; +method_id('channel.close') -> {20, 40}; +method_id('channel.close_ok') -> {20, 41}; +method_id('access.request') -> {30, 10}; +method_id('access.request_ok') -> {30, 11}; +method_id('exchange.declare') -> {40, 10}; +method_id('exchange.declare_ok') -> {40, 11}; +method_id('exchange.delete') -> {40, 20}; +method_id('exchange.delete_ok') -> {40, 21}; +method_id('queue.declare') -> {50, 10}; +method_id('queue.declare_ok') -> {50, 11}; +method_id('queue.bind') -> {50, 20}; +method_id('queue.bind_ok') -> {50, 21}; +method_id('queue.purge') -> {50, 30}; +method_id('queue.purge_ok') -> {50, 31}; +method_id('queue.delete') -> {50, 40}; +method_id('queue.delete_ok') -> {50, 41}; +method_id('queue.unbind') -> {50, 50}; +method_id('queue.unbind_ok') -> {50, 51}; +method_id('basic.qos') -> {60, 10}; +method_id('basic.qos_ok') -> {60, 11}; +method_id('basic.consume') -> {60, 20}; +method_id('basic.consume_ok') -> {60, 21}; +method_id('basic.cancel') -> {60, 30}; +method_id('basic.cancel_ok') -> {60, 31}; +method_id('basic.publish') -> {60, 40}; +method_id('basic.return') -> {60, 50}; +method_id('basic.deliver') -> {60, 60}; +method_id('basic.get') -> {60, 70}; +method_id('basic.get_ok') -> {60, 71}; +method_id('basic.get_empty') -> {60, 72}; +method_id('basic.ack') -> {60, 80}; +method_id('basic.reject') -> {60, 90}; +method_id('basic.recover_async') -> {60, 100}; +method_id('basic.recover') -> {60, 110}; +method_id('basic.recover_ok') -> {60, 111}; +method_id('file.qos') -> {70, 10}; +method_id('file.qos_ok') -> {70, 11}; +method_id('file.consume') -> {70, 20}; +method_id('file.consume_ok') -> {70, 21}; +method_id('file.cancel') -> {70, 30}; +method_id('file.cancel_ok') -> {70, 31}; +method_id('file.open') -> {70, 40}; +method_id('file.open_ok') -> {70, 41}; +method_id('file.stage') -> {70, 50}; +method_id('file.publish') -> {70, 60}; +method_id('file.return') -> {70, 70}; +method_id('file.deliver') -> {70, 80}; +method_id('file.ack') -> {70, 90}; +method_id('file.reject') -> {70, 100}; +method_id('stream.qos') -> {80, 10}; +method_id('stream.qos_ok') -> {80, 11}; +method_id('stream.consume') -> {80, 20}; +method_id('stream.consume_ok') -> {80, 21}; +method_id('stream.cancel') -> {80, 30}; +method_id('stream.cancel_ok') -> {80, 31}; +method_id('stream.publish') -> {80, 40}; +method_id('stream.return') -> {80, 50}; +method_id('stream.deliver') -> {80, 60}; +method_id('tx.select') -> {90, 10}; +method_id('tx.select_ok') -> {90, 11}; +method_id('tx.commit') -> {90, 20}; +method_id('tx.commit_ok') -> {90, 21}; +method_id('tx.rollback') -> {90, 30}; +method_id('tx.rollback_ok') -> {90, 31}; +method_id('dtx.select') -> {100, 10}; +method_id('dtx.select_ok') -> {100, 11}; +method_id('dtx.start') -> {100, 20}; +method_id('dtx.start_ok') -> {100, 21}; +method_id('tunnel.request') -> {110, 10}; +method_id('test.integer') -> {120, 10}; +method_id('test.integer_ok') -> {120, 11}; +method_id('test.string') -> {120, 20}; +method_id('test.string_ok') -> {120, 21}; +method_id('test.table') -> {120, 30}; +method_id('test.table_ok') -> {120, 31}; +method_id('test.content') -> {120, 40}; +method_id('test.content_ok') -> {120, 41}; +method_id(Name) -> exit({unknown_method_name, Name}). +method_has_content('connection.start') -> false; +method_has_content('connection.start_ok') -> false; +method_has_content('connection.secure') -> false; +method_has_content('connection.secure_ok') -> false; +method_has_content('connection.tune') -> false; +method_has_content('connection.tune_ok') -> false; +method_has_content('connection.open') -> false; +method_has_content('connection.open_ok') -> false; +method_has_content('connection.redirect') -> false; +method_has_content('connection.close') -> false; +method_has_content('connection.close_ok') -> false; +method_has_content('channel.open') -> false; +method_has_content('channel.open_ok') -> false; +method_has_content('channel.flow') -> false; +method_has_content('channel.flow_ok') -> false; +method_has_content('channel.alert') -> false; +method_has_content('channel.close') -> false; +method_has_content('channel.close_ok') -> false; +method_has_content('access.request') -> false; +method_has_content('access.request_ok') -> false; +method_has_content('exchange.declare') -> false; +method_has_content('exchange.declare_ok') -> false; +method_has_content('exchange.delete') -> false; +method_has_content('exchange.delete_ok') -> false; +method_has_content('queue.declare') -> false; +method_has_content('queue.declare_ok') -> false; +method_has_content('queue.bind') -> false; +method_has_content('queue.bind_ok') -> false; +method_has_content('queue.purge') -> false; +method_has_content('queue.purge_ok') -> false; +method_has_content('queue.delete') -> false; +method_has_content('queue.delete_ok') -> false; +method_has_content('queue.unbind') -> false; +method_has_content('queue.unbind_ok') -> false; +method_has_content('basic.qos') -> false; +method_has_content('basic.qos_ok') -> false; +method_has_content('basic.consume') -> false; +method_has_content('basic.consume_ok') -> false; +method_has_content('basic.cancel') -> false; +method_has_content('basic.cancel_ok') -> false; +method_has_content('basic.publish') -> true; +method_has_content('basic.return') -> true; +method_has_content('basic.deliver') -> true; +method_has_content('basic.get') -> false; +method_has_content('basic.get_ok') -> true; +method_has_content('basic.get_empty') -> false; +method_has_content('basic.ack') -> false; +method_has_content('basic.reject') -> false; +method_has_content('basic.recover_async') -> false; +method_has_content('basic.recover') -> false; +method_has_content('basic.recover_ok') -> false; +method_has_content('file.qos') -> false; +method_has_content('file.qos_ok') -> false; +method_has_content('file.consume') -> false; +method_has_content('file.consume_ok') -> false; +method_has_content('file.cancel') -> false; +method_has_content('file.cancel_ok') -> false; +method_has_content('file.open') -> false; +method_has_content('file.open_ok') -> false; +method_has_content('file.stage') -> true; +method_has_content('file.publish') -> false; +method_has_content('file.return') -> true; +method_has_content('file.deliver') -> false; +method_has_content('file.ack') -> false; +method_has_content('file.reject') -> false; +method_has_content('stream.qos') -> false; +method_has_content('stream.qos_ok') -> false; +method_has_content('stream.consume') -> false; +method_has_content('stream.consume_ok') -> false; +method_has_content('stream.cancel') -> false; +method_has_content('stream.cancel_ok') -> false; +method_has_content('stream.publish') -> true; +method_has_content('stream.return') -> true; +method_has_content('stream.deliver') -> true; +method_has_content('tx.select') -> false; +method_has_content('tx.select_ok') -> false; +method_has_content('tx.commit') -> false; +method_has_content('tx.commit_ok') -> false; +method_has_content('tx.rollback') -> false; +method_has_content('tx.rollback_ok') -> false; +method_has_content('dtx.select') -> false; +method_has_content('dtx.select_ok') -> false; +method_has_content('dtx.start') -> false; +method_has_content('dtx.start_ok') -> false; +method_has_content('tunnel.request') -> true; +method_has_content('test.integer') -> false; +method_has_content('test.integer_ok') -> false; +method_has_content('test.string') -> false; +method_has_content('test.string_ok') -> false; +method_has_content('test.table') -> false; +method_has_content('test.table_ok') -> false; +method_has_content('test.content') -> true; +method_has_content('test.content_ok') -> true; +method_has_content(Name) -> exit({unknown_method_name, Name}). +is_method_synchronous(#'connection.start'{}) -> true; +is_method_synchronous(#'connection.start_ok'{}) -> false; +is_method_synchronous(#'connection.secure'{}) -> true; +is_method_synchronous(#'connection.secure_ok'{}) -> false; +is_method_synchronous(#'connection.tune'{}) -> true; +is_method_synchronous(#'connection.tune_ok'{}) -> false; +is_method_synchronous(#'connection.open'{}) -> true; +is_method_synchronous(#'connection.open_ok'{}) -> false; +is_method_synchronous(#'connection.redirect'{}) -> false; +is_method_synchronous(#'connection.close'{}) -> true; +is_method_synchronous(#'connection.close_ok'{}) -> false; +is_method_synchronous(#'channel.open'{}) -> true; +is_method_synchronous(#'channel.open_ok'{}) -> false; +is_method_synchronous(#'channel.flow'{}) -> true; +is_method_synchronous(#'channel.flow_ok'{}) -> false; +is_method_synchronous(#'channel.alert'{}) -> false; +is_method_synchronous(#'channel.close'{}) -> true; +is_method_synchronous(#'channel.close_ok'{}) -> false; +is_method_synchronous(#'access.request'{}) -> true; +is_method_synchronous(#'access.request_ok'{}) -> false; +is_method_synchronous(#'exchange.declare'{nowait = NoWait}) -> not(NoWait); +is_method_synchronous(#'exchange.declare_ok'{}) -> false; +is_method_synchronous(#'exchange.delete'{nowait = NoWait}) -> not(NoWait); +is_method_synchronous(#'exchange.delete_ok'{}) -> false; +is_method_synchronous(#'queue.declare'{nowait = NoWait}) -> not(NoWait); +is_method_synchronous(#'queue.declare_ok'{}) -> false; +is_method_synchronous(#'queue.bind'{nowait = NoWait}) -> not(NoWait); +is_method_synchronous(#'queue.bind_ok'{}) -> false; +is_method_synchronous(#'queue.purge'{nowait = NoWait}) -> not(NoWait); +is_method_synchronous(#'queue.purge_ok'{}) -> false; +is_method_synchronous(#'queue.delete'{nowait = NoWait}) -> not(NoWait); +is_method_synchronous(#'queue.delete_ok'{}) -> false; +is_method_synchronous(#'queue.unbind'{}) -> true; +is_method_synchronous(#'queue.unbind_ok'{}) -> false; +is_method_synchronous(#'basic.qos'{}) -> true; +is_method_synchronous(#'basic.qos_ok'{}) -> false; +is_method_synchronous(#'basic.consume'{nowait = NoWait}) -> not(NoWait); +is_method_synchronous(#'basic.consume_ok'{}) -> false; +is_method_synchronous(#'basic.cancel'{nowait = NoWait}) -> not(NoWait); +is_method_synchronous(#'basic.cancel_ok'{}) -> false; +is_method_synchronous(#'basic.publish'{}) -> false; +is_method_synchronous(#'basic.return'{}) -> false; +is_method_synchronous(#'basic.deliver'{}) -> false; +is_method_synchronous(#'basic.get'{}) -> true; +is_method_synchronous(#'basic.get_ok'{}) -> false; +is_method_synchronous(#'basic.get_empty'{}) -> false; +is_method_synchronous(#'basic.ack'{}) -> false; +is_method_synchronous(#'basic.reject'{}) -> false; +is_method_synchronous(#'basic.recover_async'{}) -> false; +is_method_synchronous(#'basic.recover'{}) -> true; +is_method_synchronous(#'basic.recover_ok'{}) -> false; +is_method_synchronous(#'file.qos'{}) -> true; +is_method_synchronous(#'file.qos_ok'{}) -> false; +is_method_synchronous(#'file.consume'{nowait = NoWait}) -> not(NoWait); +is_method_synchronous(#'file.consume_ok'{}) -> false; +is_method_synchronous(#'file.cancel'{nowait = NoWait}) -> not(NoWait); +is_method_synchronous(#'file.cancel_ok'{}) -> false; +is_method_synchronous(#'file.open'{}) -> true; +is_method_synchronous(#'file.open_ok'{}) -> false; +is_method_synchronous(#'file.stage'{}) -> false; +is_method_synchronous(#'file.publish'{}) -> false; +is_method_synchronous(#'file.return'{}) -> false; +is_method_synchronous(#'file.deliver'{}) -> false; +is_method_synchronous(#'file.ack'{}) -> false; +is_method_synchronous(#'file.reject'{}) -> false; +is_method_synchronous(#'stream.qos'{}) -> true; +is_method_synchronous(#'stream.qos_ok'{}) -> false; +is_method_synchronous(#'stream.consume'{nowait = NoWait}) -> not(NoWait); +is_method_synchronous(#'stream.consume_ok'{}) -> false; +is_method_synchronous(#'stream.cancel'{nowait = NoWait}) -> not(NoWait); +is_method_synchronous(#'stream.cancel_ok'{}) -> false; +is_method_synchronous(#'stream.publish'{}) -> false; +is_method_synchronous(#'stream.return'{}) -> false; +is_method_synchronous(#'stream.deliver'{}) -> false; +is_method_synchronous(#'tx.select'{}) -> true; +is_method_synchronous(#'tx.select_ok'{}) -> false; +is_method_synchronous(#'tx.commit'{}) -> true; +is_method_synchronous(#'tx.commit_ok'{}) -> false; +is_method_synchronous(#'tx.rollback'{}) -> true; +is_method_synchronous(#'tx.rollback_ok'{}) -> false; +is_method_synchronous(#'dtx.select'{}) -> true; +is_method_synchronous(#'dtx.select_ok'{}) -> false; +is_method_synchronous(#'dtx.start'{}) -> true; +is_method_synchronous(#'dtx.start_ok'{}) -> false; +is_method_synchronous(#'tunnel.request'{}) -> false; +is_method_synchronous(#'test.integer'{}) -> true; +is_method_synchronous(#'test.integer_ok'{}) -> false; +is_method_synchronous(#'test.string'{}) -> true; +is_method_synchronous(#'test.string_ok'{}) -> false; +is_method_synchronous(#'test.table'{}) -> true; +is_method_synchronous(#'test.table_ok'{}) -> false; +is_method_synchronous(#'test.content'{}) -> true; +is_method_synchronous(#'test.content_ok'{}) -> false; +is_method_synchronous(Name) -> exit({unknown_method_name, Name}). +method_record('connection.start') -> #'connection.start'{}; +method_record('connection.start_ok') -> #'connection.start_ok'{}; +method_record('connection.secure') -> #'connection.secure'{}; +method_record('connection.secure_ok') -> #'connection.secure_ok'{}; +method_record('connection.tune') -> #'connection.tune'{}; +method_record('connection.tune_ok') -> #'connection.tune_ok'{}; +method_record('connection.open') -> #'connection.open'{}; +method_record('connection.open_ok') -> #'connection.open_ok'{}; +method_record('connection.redirect') -> #'connection.redirect'{}; +method_record('connection.close') -> #'connection.close'{}; +method_record('connection.close_ok') -> #'connection.close_ok'{}; +method_record('channel.open') -> #'channel.open'{}; +method_record('channel.open_ok') -> #'channel.open_ok'{}; +method_record('channel.flow') -> #'channel.flow'{}; +method_record('channel.flow_ok') -> #'channel.flow_ok'{}; +method_record('channel.alert') -> #'channel.alert'{}; +method_record('channel.close') -> #'channel.close'{}; +method_record('channel.close_ok') -> #'channel.close_ok'{}; +method_record('access.request') -> #'access.request'{}; +method_record('access.request_ok') -> #'access.request_ok'{}; +method_record('exchange.declare') -> #'exchange.declare'{}; +method_record('exchange.declare_ok') -> #'exchange.declare_ok'{}; +method_record('exchange.delete') -> #'exchange.delete'{}; +method_record('exchange.delete_ok') -> #'exchange.delete_ok'{}; +method_record('queue.declare') -> #'queue.declare'{}; +method_record('queue.declare_ok') -> #'queue.declare_ok'{}; +method_record('queue.bind') -> #'queue.bind'{}; +method_record('queue.bind_ok') -> #'queue.bind_ok'{}; +method_record('queue.purge') -> #'queue.purge'{}; +method_record('queue.purge_ok') -> #'queue.purge_ok'{}; +method_record('queue.delete') -> #'queue.delete'{}; +method_record('queue.delete_ok') -> #'queue.delete_ok'{}; +method_record('queue.unbind') -> #'queue.unbind'{}; +method_record('queue.unbind_ok') -> #'queue.unbind_ok'{}; +method_record('basic.qos') -> #'basic.qos'{}; +method_record('basic.qos_ok') -> #'basic.qos_ok'{}; +method_record('basic.consume') -> #'basic.consume'{}; +method_record('basic.consume_ok') -> #'basic.consume_ok'{}; +method_record('basic.cancel') -> #'basic.cancel'{}; +method_record('basic.cancel_ok') -> #'basic.cancel_ok'{}; +method_record('basic.publish') -> #'basic.publish'{}; +method_record('basic.return') -> #'basic.return'{}; +method_record('basic.deliver') -> #'basic.deliver'{}; +method_record('basic.get') -> #'basic.get'{}; +method_record('basic.get_ok') -> #'basic.get_ok'{}; +method_record('basic.get_empty') -> #'basic.get_empty'{}; +method_record('basic.ack') -> #'basic.ack'{}; +method_record('basic.reject') -> #'basic.reject'{}; +method_record('basic.recover_async') -> #'basic.recover_async'{}; +method_record('basic.recover') -> #'basic.recover'{}; +method_record('basic.recover_ok') -> #'basic.recover_ok'{}; +method_record('file.qos') -> #'file.qos'{}; +method_record('file.qos_ok') -> #'file.qos_ok'{}; +method_record('file.consume') -> #'file.consume'{}; +method_record('file.consume_ok') -> #'file.consume_ok'{}; +method_record('file.cancel') -> #'file.cancel'{}; +method_record('file.cancel_ok') -> #'file.cancel_ok'{}; +method_record('file.open') -> #'file.open'{}; +method_record('file.open_ok') -> #'file.open_ok'{}; +method_record('file.stage') -> #'file.stage'{}; +method_record('file.publish') -> #'file.publish'{}; +method_record('file.return') -> #'file.return'{}; +method_record('file.deliver') -> #'file.deliver'{}; +method_record('file.ack') -> #'file.ack'{}; +method_record('file.reject') -> #'file.reject'{}; +method_record('stream.qos') -> #'stream.qos'{}; +method_record('stream.qos_ok') -> #'stream.qos_ok'{}; +method_record('stream.consume') -> #'stream.consume'{}; +method_record('stream.consume_ok') -> #'stream.consume_ok'{}; +method_record('stream.cancel') -> #'stream.cancel'{}; +method_record('stream.cancel_ok') -> #'stream.cancel_ok'{}; +method_record('stream.publish') -> #'stream.publish'{}; +method_record('stream.return') -> #'stream.return'{}; +method_record('stream.deliver') -> #'stream.deliver'{}; +method_record('tx.select') -> #'tx.select'{}; +method_record('tx.select_ok') -> #'tx.select_ok'{}; +method_record('tx.commit') -> #'tx.commit'{}; +method_record('tx.commit_ok') -> #'tx.commit_ok'{}; +method_record('tx.rollback') -> #'tx.rollback'{}; +method_record('tx.rollback_ok') -> #'tx.rollback_ok'{}; +method_record('dtx.select') -> #'dtx.select'{}; +method_record('dtx.select_ok') -> #'dtx.select_ok'{}; +method_record('dtx.start') -> #'dtx.start'{}; +method_record('dtx.start_ok') -> #'dtx.start_ok'{}; +method_record('tunnel.request') -> #'tunnel.request'{}; +method_record('test.integer') -> #'test.integer'{}; +method_record('test.integer_ok') -> #'test.integer_ok'{}; +method_record('test.string') -> #'test.string'{}; +method_record('test.string_ok') -> #'test.string_ok'{}; +method_record('test.table') -> #'test.table'{}; +method_record('test.table_ok') -> #'test.table_ok'{}; +method_record('test.content') -> #'test.content'{}; +method_record('test.content_ok') -> #'test.content_ok'{}; +method_record(Name) -> exit({unknown_method_name, Name}). +method_fieldnames('connection.start') -> [version_major, version_minor, server_properties, mechanisms, locales]; +method_fieldnames('connection.start_ok') -> [client_properties, mechanism, response, locale]; +method_fieldnames('connection.secure') -> [challenge]; +method_fieldnames('connection.secure_ok') -> [response]; +method_fieldnames('connection.tune') -> [channel_max, frame_max, heartbeat]; +method_fieldnames('connection.tune_ok') -> [channel_max, frame_max, heartbeat]; +method_fieldnames('connection.open') -> [virtual_host, capabilities, insist]; +method_fieldnames('connection.open_ok') -> [known_hosts]; +method_fieldnames('connection.redirect') -> [host, known_hosts]; +method_fieldnames('connection.close') -> [reply_code, reply_text, class_id, method_id]; +method_fieldnames('connection.close_ok') -> []; +method_fieldnames('channel.open') -> [out_of_band]; +method_fieldnames('channel.open_ok') -> []; +method_fieldnames('channel.flow') -> [active]; +method_fieldnames('channel.flow_ok') -> [active]; +method_fieldnames('channel.alert') -> [reply_code, reply_text, details]; +method_fieldnames('channel.close') -> [reply_code, reply_text, class_id, method_id]; +method_fieldnames('channel.close_ok') -> []; +method_fieldnames('access.request') -> [realm, exclusive, passive, active, write, read]; +method_fieldnames('access.request_ok') -> [ticket]; +method_fieldnames('exchange.declare') -> [ticket, exchange, type, passive, durable, auto_delete, internal, nowait, arguments]; +method_fieldnames('exchange.declare_ok') -> []; +method_fieldnames('exchange.delete') -> [ticket, exchange, if_unused, nowait]; +method_fieldnames('exchange.delete_ok') -> []; +method_fieldnames('queue.declare') -> [ticket, queue, passive, durable, exclusive, auto_delete, nowait, arguments]; +method_fieldnames('queue.declare_ok') -> [queue, message_count, consumer_count]; +method_fieldnames('queue.bind') -> [ticket, queue, exchange, routing_key, nowait, arguments]; +method_fieldnames('queue.bind_ok') -> []; +method_fieldnames('queue.purge') -> [ticket, queue, nowait]; +method_fieldnames('queue.purge_ok') -> [message_count]; +method_fieldnames('queue.delete') -> [ticket, queue, if_unused, if_empty, nowait]; +method_fieldnames('queue.delete_ok') -> [message_count]; +method_fieldnames('queue.unbind') -> [ticket, queue, exchange, routing_key, arguments]; +method_fieldnames('queue.unbind_ok') -> []; +method_fieldnames('basic.qos') -> [prefetch_size, prefetch_count, global]; +method_fieldnames('basic.qos_ok') -> []; +method_fieldnames('basic.consume') -> [ticket, queue, consumer_tag, no_local, no_ack, exclusive, nowait]; +method_fieldnames('basic.consume_ok') -> [consumer_tag]; +method_fieldnames('basic.cancel') -> [consumer_tag, nowait]; +method_fieldnames('basic.cancel_ok') -> [consumer_tag]; +method_fieldnames('basic.publish') -> [ticket, exchange, routing_key, mandatory, immediate]; +method_fieldnames('basic.return') -> [reply_code, reply_text, exchange, routing_key]; +method_fieldnames('basic.deliver') -> [consumer_tag, delivery_tag, redelivered, exchange, routing_key]; +method_fieldnames('basic.get') -> [ticket, queue, no_ack]; +method_fieldnames('basic.get_ok') -> [delivery_tag, redelivered, exchange, routing_key, message_count]; +method_fieldnames('basic.get_empty') -> [cluster_id]; +method_fieldnames('basic.ack') -> [delivery_tag, multiple]; +method_fieldnames('basic.reject') -> [delivery_tag, requeue]; +method_fieldnames('basic.recover_async') -> [requeue]; +method_fieldnames('basic.recover') -> [requeue]; +method_fieldnames('basic.recover_ok') -> []; +method_fieldnames('file.qos') -> [prefetch_size, prefetch_count, global]; +method_fieldnames('file.qos_ok') -> []; +method_fieldnames('file.consume') -> [ticket, queue, consumer_tag, no_local, no_ack, exclusive, nowait]; +method_fieldnames('file.consume_ok') -> [consumer_tag]; +method_fieldnames('file.cancel') -> [consumer_tag, nowait]; +method_fieldnames('file.cancel_ok') -> [consumer_tag]; +method_fieldnames('file.open') -> [identifier, content_size]; +method_fieldnames('file.open_ok') -> [staged_size]; +method_fieldnames('file.stage') -> []; +method_fieldnames('file.publish') -> [ticket, exchange, routing_key, mandatory, immediate, identifier]; +method_fieldnames('file.return') -> [reply_code, reply_text, exchange, routing_key]; +method_fieldnames('file.deliver') -> [consumer_tag, delivery_tag, redelivered, exchange, routing_key, identifier]; +method_fieldnames('file.ack') -> [delivery_tag, multiple]; +method_fieldnames('file.reject') -> [delivery_tag, requeue]; +method_fieldnames('stream.qos') -> [prefetch_size, prefetch_count, consume_rate, global]; +method_fieldnames('stream.qos_ok') -> []; +method_fieldnames('stream.consume') -> [ticket, queue, consumer_tag, no_local, exclusive, nowait]; +method_fieldnames('stream.consume_ok') -> [consumer_tag]; +method_fieldnames('stream.cancel') -> [consumer_tag, nowait]; +method_fieldnames('stream.cancel_ok') -> [consumer_tag]; +method_fieldnames('stream.publish') -> [ticket, exchange, routing_key, mandatory, immediate]; +method_fieldnames('stream.return') -> [reply_code, reply_text, exchange, routing_key]; +method_fieldnames('stream.deliver') -> [consumer_tag, delivery_tag, exchange, queue]; +method_fieldnames('tx.select') -> []; +method_fieldnames('tx.select_ok') -> []; +method_fieldnames('tx.commit') -> []; +method_fieldnames('tx.commit_ok') -> []; +method_fieldnames('tx.rollback') -> []; +method_fieldnames('tx.rollback_ok') -> []; +method_fieldnames('dtx.select') -> []; +method_fieldnames('dtx.select_ok') -> []; +method_fieldnames('dtx.start') -> [dtx_identifier]; +method_fieldnames('dtx.start_ok') -> []; +method_fieldnames('tunnel.request') -> [meta_data]; +method_fieldnames('test.integer') -> [integer_1, integer_2, integer_3, integer_4, operation]; +method_fieldnames('test.integer_ok') -> [result]; +method_fieldnames('test.string') -> [string_1, string_2, operation]; +method_fieldnames('test.string_ok') -> [result]; +method_fieldnames('test.table') -> [table, integer_op, string_op]; +method_fieldnames('test.table_ok') -> [integer_result, string_result]; +method_fieldnames('test.content') -> []; +method_fieldnames('test.content_ok') -> [content_checksum]; +method_fieldnames(Name) -> exit({unknown_method_name, Name}). +decode_method_fields('connection.start', <>) -> + F2 = rabbit_binary_parser:parse_table(F2Tab), + #'connection.start'{version_major = F0, version_minor = F1, server_properties = F2, mechanisms = F3, locales = F4}; +decode_method_fields('connection.start_ok', <>) -> + F0 = rabbit_binary_parser:parse_table(F0Tab), + rabbit_binary_parser:assert_utf8(F1), + rabbit_binary_parser:assert_utf8(F3), + #'connection.start_ok'{client_properties = F0, mechanism = F1, response = F2, locale = F3}; +decode_method_fields('connection.secure', <>) -> + #'connection.secure'{challenge = F0}; +decode_method_fields('connection.secure_ok', <>) -> + #'connection.secure_ok'{response = F0}; +decode_method_fields('connection.tune', <>) -> + #'connection.tune'{channel_max = F0, frame_max = F1, heartbeat = F2}; +decode_method_fields('connection.tune_ok', <>) -> + #'connection.tune_ok'{channel_max = F0, frame_max = F1, heartbeat = F2}; +decode_method_fields('connection.open', <>) -> + rabbit_binary_parser:assert_utf8(F0), + rabbit_binary_parser:assert_utf8(F1), + F2 = ((F2Bits band 1) /= 0), + #'connection.open'{virtual_host = F0, capabilities = F1, insist = F2}; +decode_method_fields('connection.open_ok', <>) -> + rabbit_binary_parser:assert_utf8(F0), + #'connection.open_ok'{known_hosts = F0}; +decode_method_fields('connection.redirect', <>) -> + rabbit_binary_parser:assert_utf8(F0), + rabbit_binary_parser:assert_utf8(F1), + #'connection.redirect'{host = F0, known_hosts = F1}; +decode_method_fields('connection.close', <>) -> + rabbit_binary_parser:assert_utf8(F1), + #'connection.close'{reply_code = F0, reply_text = F1, class_id = F2, method_id = F3}; +decode_method_fields('connection.close_ok', <<>>) -> + #'connection.close_ok'{}; +decode_method_fields('channel.open', <>) -> + rabbit_binary_parser:assert_utf8(F0), + #'channel.open'{out_of_band = F0}; +decode_method_fields('channel.open_ok', <<>>) -> + #'channel.open_ok'{}; +decode_method_fields('channel.flow', <>) -> + F0 = ((F0Bits band 1) /= 0), + #'channel.flow'{active = F0}; +decode_method_fields('channel.flow_ok', <>) -> + F0 = ((F0Bits band 1) /= 0), + #'channel.flow_ok'{active = F0}; +decode_method_fields('channel.alert', <>) -> + rabbit_binary_parser:assert_utf8(F1), + F2 = rabbit_binary_parser:parse_table(F2Tab), + #'channel.alert'{reply_code = F0, reply_text = F1, details = F2}; +decode_method_fields('channel.close', <>) -> + rabbit_binary_parser:assert_utf8(F1), + #'channel.close'{reply_code = F0, reply_text = F1, class_id = F2, method_id = F3}; +decode_method_fields('channel.close_ok', <<>>) -> + #'channel.close_ok'{}; +decode_method_fields('access.request', <>) -> + rabbit_binary_parser:assert_utf8(F0), + F1 = ((F1Bits band 1) /= 0), + F2 = ((F1Bits band 2) /= 0), + F3 = ((F1Bits band 4) /= 0), + F4 = ((F1Bits band 8) /= 0), + F5 = ((F1Bits band 16) /= 0), + #'access.request'{realm = F0, exclusive = F1, passive = F2, active = F3, write = F4, read = F5}; +decode_method_fields('access.request_ok', <>) -> + #'access.request_ok'{ticket = F0}; +decode_method_fields('exchange.declare', <>) -> + rabbit_binary_parser:assert_utf8(F1), + rabbit_binary_parser:assert_utf8(F2), + F3 = ((F3Bits band 1) /= 0), + F4 = ((F3Bits band 2) /= 0), + F5 = ((F3Bits band 4) /= 0), + F6 = ((F3Bits band 8) /= 0), + F7 = ((F3Bits band 16) /= 0), + F8 = rabbit_binary_parser:parse_table(F8Tab), + #'exchange.declare'{ticket = F0, exchange = F1, type = F2, passive = F3, durable = F4, auto_delete = F5, internal = F6, nowait = F7, arguments = F8}; +decode_method_fields('exchange.declare_ok', <<>>) -> + #'exchange.declare_ok'{}; +decode_method_fields('exchange.delete', <>) -> + rabbit_binary_parser:assert_utf8(F1), + F2 = ((F2Bits band 1) /= 0), + F3 = ((F2Bits band 2) /= 0), + #'exchange.delete'{ticket = F0, exchange = F1, if_unused = F2, nowait = F3}; +decode_method_fields('exchange.delete_ok', <<>>) -> + #'exchange.delete_ok'{}; +decode_method_fields('queue.declare', <>) -> + rabbit_binary_parser:assert_utf8(F1), + F2 = ((F2Bits band 1) /= 0), + F3 = ((F2Bits band 2) /= 0), + F4 = ((F2Bits band 4) /= 0), + F5 = ((F2Bits band 8) /= 0), + F6 = ((F2Bits band 16) /= 0), + F7 = rabbit_binary_parser:parse_table(F7Tab), + #'queue.declare'{ticket = F0, queue = F1, passive = F2, durable = F3, exclusive = F4, auto_delete = F5, nowait = F6, arguments = F7}; +decode_method_fields('queue.declare_ok', <>) -> + rabbit_binary_parser:assert_utf8(F0), + #'queue.declare_ok'{queue = F0, message_count = F1, consumer_count = F2}; +decode_method_fields('queue.bind', <>) -> + rabbit_binary_parser:assert_utf8(F1), + rabbit_binary_parser:assert_utf8(F2), + rabbit_binary_parser:assert_utf8(F3), + F4 = ((F4Bits band 1) /= 0), + F5 = rabbit_binary_parser:parse_table(F5Tab), + #'queue.bind'{ticket = F0, queue = F1, exchange = F2, routing_key = F3, nowait = F4, arguments = F5}; +decode_method_fields('queue.bind_ok', <<>>) -> + #'queue.bind_ok'{}; +decode_method_fields('queue.purge', <>) -> + rabbit_binary_parser:assert_utf8(F1), + F2 = ((F2Bits band 1) /= 0), + #'queue.purge'{ticket = F0, queue = F1, nowait = F2}; +decode_method_fields('queue.purge_ok', <>) -> + #'queue.purge_ok'{message_count = F0}; +decode_method_fields('queue.delete', <>) -> + rabbit_binary_parser:assert_utf8(F1), + F2 = ((F2Bits band 1) /= 0), + F3 = ((F2Bits band 2) /= 0), + F4 = ((F2Bits band 4) /= 0), + #'queue.delete'{ticket = F0, queue = F1, if_unused = F2, if_empty = F3, nowait = F4}; +decode_method_fields('queue.delete_ok', <>) -> + #'queue.delete_ok'{message_count = F0}; +decode_method_fields('queue.unbind', <>) -> + rabbit_binary_parser:assert_utf8(F1), + rabbit_binary_parser:assert_utf8(F2), + rabbit_binary_parser:assert_utf8(F3), + F4 = rabbit_binary_parser:parse_table(F4Tab), + #'queue.unbind'{ticket = F0, queue = F1, exchange = F2, routing_key = F3, arguments = F4}; +decode_method_fields('queue.unbind_ok', <<>>) -> + #'queue.unbind_ok'{}; +decode_method_fields('basic.qos', <>) -> + F2 = ((F2Bits band 1) /= 0), + #'basic.qos'{prefetch_size = F0, prefetch_count = F1, global = F2}; +decode_method_fields('basic.qos_ok', <<>>) -> + #'basic.qos_ok'{}; +decode_method_fields('basic.consume', <>) -> + rabbit_binary_parser:assert_utf8(F1), + rabbit_binary_parser:assert_utf8(F2), + F3 = ((F3Bits band 1) /= 0), + F4 = ((F3Bits band 2) /= 0), + F5 = ((F3Bits band 4) /= 0), + F6 = ((F3Bits band 8) /= 0), + #'basic.consume'{ticket = F0, queue = F1, consumer_tag = F2, no_local = F3, no_ack = F4, exclusive = F5, nowait = F6}; +decode_method_fields('basic.consume_ok', <>) -> + rabbit_binary_parser:assert_utf8(F0), + #'basic.consume_ok'{consumer_tag = F0}; +decode_method_fields('basic.cancel', <>) -> + rabbit_binary_parser:assert_utf8(F0), + F1 = ((F1Bits band 1) /= 0), + #'basic.cancel'{consumer_tag = F0, nowait = F1}; +decode_method_fields('basic.cancel_ok', <>) -> + rabbit_binary_parser:assert_utf8(F0), + #'basic.cancel_ok'{consumer_tag = F0}; +decode_method_fields('basic.publish', <>) -> + F3 = ((F3Bits band 1) /= 0), + F4 = ((F3Bits band 2) /= 0), + #'basic.publish'{ticket = F0, exchange = F1, routing_key = F2, mandatory = F3, immediate = F4}; +decode_method_fields('basic.return', <>) -> + #'basic.return'{reply_code = F0, reply_text = F1, exchange = F2, routing_key = F3}; +decode_method_fields('basic.deliver', <>) -> + F2 = ((F2Bits band 1) /= 0), + #'basic.deliver'{consumer_tag = F0, delivery_tag = F1, redelivered = F2, exchange = F3, routing_key = F4}; +decode_method_fields('basic.get', <>) -> + rabbit_binary_parser:assert_utf8(F1), + F2 = ((F2Bits band 1) /= 0), + #'basic.get'{ticket = F0, queue = F1, no_ack = F2}; +decode_method_fields('basic.get_ok', <>) -> + F1 = ((F1Bits band 1) /= 0), + #'basic.get_ok'{delivery_tag = F0, redelivered = F1, exchange = F2, routing_key = F3, message_count = F4}; +decode_method_fields('basic.get_empty', <>) -> + rabbit_binary_parser:assert_utf8(F0), + #'basic.get_empty'{cluster_id = F0}; +decode_method_fields('basic.ack', <>) -> + F1 = ((F1Bits band 1) /= 0), + #'basic.ack'{delivery_tag = F0, multiple = F1}; +decode_method_fields('basic.reject', <>) -> + F1 = ((F1Bits band 1) /= 0), + #'basic.reject'{delivery_tag = F0, requeue = F1}; +decode_method_fields('basic.recover_async', <>) -> + F0 = ((F0Bits band 1) /= 0), + #'basic.recover_async'{requeue = F0}; +decode_method_fields('basic.recover', <>) -> + F0 = ((F0Bits band 1) /= 0), + #'basic.recover'{requeue = F0}; +decode_method_fields('basic.recover_ok', <<>>) -> + #'basic.recover_ok'{}; +decode_method_fields('file.qos', <>) -> + F2 = ((F2Bits band 1) /= 0), + #'file.qos'{prefetch_size = F0, prefetch_count = F1, global = F2}; +decode_method_fields('file.qos_ok', <<>>) -> + #'file.qos_ok'{}; +decode_method_fields('file.consume', <>) -> + rabbit_binary_parser:assert_utf8(F1), + rabbit_binary_parser:assert_utf8(F2), + F3 = ((F3Bits band 1) /= 0), + F4 = ((F3Bits band 2) /= 0), + F5 = ((F3Bits band 4) /= 0), + F6 = ((F3Bits band 8) /= 0), + #'file.consume'{ticket = F0, queue = F1, consumer_tag = F2, no_local = F3, no_ack = F4, exclusive = F5, nowait = F6}; +decode_method_fields('file.consume_ok', <>) -> + rabbit_binary_parser:assert_utf8(F0), + #'file.consume_ok'{consumer_tag = F0}; +decode_method_fields('file.cancel', <>) -> + rabbit_binary_parser:assert_utf8(F0), + F1 = ((F1Bits band 1) /= 0), + #'file.cancel'{consumer_tag = F0, nowait = F1}; +decode_method_fields('file.cancel_ok', <>) -> + rabbit_binary_parser:assert_utf8(F0), + #'file.cancel_ok'{consumer_tag = F0}; +decode_method_fields('file.open', <>) -> + rabbit_binary_parser:assert_utf8(F0), + #'file.open'{identifier = F0, content_size = F1}; +decode_method_fields('file.open_ok', <>) -> + #'file.open_ok'{staged_size = F0}; +decode_method_fields('file.stage', <<>>) -> + #'file.stage'{}; +decode_method_fields('file.publish', <>) -> + rabbit_binary_parser:assert_utf8(F1), + rabbit_binary_parser:assert_utf8(F2), + F3 = ((F3Bits band 1) /= 0), + F4 = ((F3Bits band 2) /= 0), + rabbit_binary_parser:assert_utf8(F5), + #'file.publish'{ticket = F0, exchange = F1, routing_key = F2, mandatory = F3, immediate = F4, identifier = F5}; +decode_method_fields('file.return', <>) -> + #'file.return'{reply_code = F0, reply_text = F1, exchange = F2, routing_key = F3}; +decode_method_fields('file.deliver', <>) -> + rabbit_binary_parser:assert_utf8(F0), + F2 = ((F2Bits band 1) /= 0), + rabbit_binary_parser:assert_utf8(F3), + rabbit_binary_parser:assert_utf8(F4), + rabbit_binary_parser:assert_utf8(F5), + #'file.deliver'{consumer_tag = F0, delivery_tag = F1, redelivered = F2, exchange = F3, routing_key = F4, identifier = F5}; +decode_method_fields('file.ack', <>) -> + F1 = ((F1Bits band 1) /= 0), + #'file.ack'{delivery_tag = F0, multiple = F1}; +decode_method_fields('file.reject', <>) -> + F1 = ((F1Bits band 1) /= 0), + #'file.reject'{delivery_tag = F0, requeue = F1}; +decode_method_fields('stream.qos', <>) -> + F3 = ((F3Bits band 1) /= 0), + #'stream.qos'{prefetch_size = F0, prefetch_count = F1, consume_rate = F2, global = F3}; +decode_method_fields('stream.qos_ok', <<>>) -> + #'stream.qos_ok'{}; +decode_method_fields('stream.consume', <>) -> + rabbit_binary_parser:assert_utf8(F1), + rabbit_binary_parser:assert_utf8(F2), + F3 = ((F3Bits band 1) /= 0), + F4 = ((F3Bits band 2) /= 0), + F5 = ((F3Bits band 4) /= 0), + #'stream.consume'{ticket = F0, queue = F1, consumer_tag = F2, no_local = F3, exclusive = F4, nowait = F5}; +decode_method_fields('stream.consume_ok', <>) -> + rabbit_binary_parser:assert_utf8(F0), + #'stream.consume_ok'{consumer_tag = F0}; +decode_method_fields('stream.cancel', <>) -> + rabbit_binary_parser:assert_utf8(F0), + F1 = ((F1Bits band 1) /= 0), + #'stream.cancel'{consumer_tag = F0, nowait = F1}; +decode_method_fields('stream.cancel_ok', <>) -> + rabbit_binary_parser:assert_utf8(F0), + #'stream.cancel_ok'{consumer_tag = F0}; +decode_method_fields('stream.publish', <>) -> + F3 = ((F3Bits band 1) /= 0), + F4 = ((F3Bits band 2) /= 0), + #'stream.publish'{ticket = F0, exchange = F1, routing_key = F2, mandatory = F3, immediate = F4}; +decode_method_fields('stream.return', <>) -> + #'stream.return'{reply_code = F0, reply_text = F1, exchange = F2, routing_key = F3}; +decode_method_fields('stream.deliver', <>) -> + #'stream.deliver'{consumer_tag = F0, delivery_tag = F1, exchange = F2, queue = F3}; +decode_method_fields('tx.select', <<>>) -> + #'tx.select'{}; +decode_method_fields('tx.select_ok', <<>>) -> + #'tx.select_ok'{}; +decode_method_fields('tx.commit', <<>>) -> + #'tx.commit'{}; +decode_method_fields('tx.commit_ok', <<>>) -> + #'tx.commit_ok'{}; +decode_method_fields('tx.rollback', <<>>) -> + #'tx.rollback'{}; +decode_method_fields('tx.rollback_ok', <<>>) -> + #'tx.rollback_ok'{}; +decode_method_fields('dtx.select', <<>>) -> + #'dtx.select'{}; +decode_method_fields('dtx.select_ok', <<>>) -> + #'dtx.select_ok'{}; +decode_method_fields('dtx.start', <>) -> + rabbit_binary_parser:assert_utf8(F0), + #'dtx.start'{dtx_identifier = F0}; +decode_method_fields('dtx.start_ok', <<>>) -> + #'dtx.start_ok'{}; +decode_method_fields('tunnel.request', <>) -> + F0 = rabbit_binary_parser:parse_table(F0Tab), + #'tunnel.request'{meta_data = F0}; +decode_method_fields('test.integer', <>) -> + #'test.integer'{integer_1 = F0, integer_2 = F1, integer_3 = F2, integer_4 = F3, operation = F4}; +decode_method_fields('test.integer_ok', <>) -> + #'test.integer_ok'{result = F0}; +decode_method_fields('test.string', <>) -> + rabbit_binary_parser:assert_utf8(F0), + #'test.string'{string_1 = F0, string_2 = F1, operation = F2}; +decode_method_fields('test.string_ok', <>) -> + #'test.string_ok'{result = F0}; +decode_method_fields('test.table', <>) -> + F0 = rabbit_binary_parser:parse_table(F0Tab), + #'test.table'{table = F0, integer_op = F1, string_op = F2}; +decode_method_fields('test.table_ok', <>) -> + #'test.table_ok'{integer_result = F0, string_result = F1}; +decode_method_fields('test.content', <<>>) -> + #'test.content'{}; +decode_method_fields('test.content_ok', <>) -> + #'test.content_ok'{content_checksum = F0}; +decode_method_fields(Name, BinaryFields) -> + rabbit_misc:frame_error(Name, BinaryFields). +decode_properties(10, <<>>) -> + #'P_connection'{}; +decode_properties(20, <<>>) -> + #'P_channel'{}; +decode_properties(30, <<>>) -> + #'P_access'{}; +decode_properties(40, <<>>) -> + #'P_exchange'{}; +decode_properties(50, <<>>) -> + #'P_queue'{}; +decode_properties(60, <>) -> + {F0, R1} = if P0 =:= 0 -> {undefined, R0}; true -> ?SHORTSTR_VAL(R0, L0, V0, X0) end, + {F1, R2} = if P1 =:= 0 -> {undefined, R1}; true -> ?SHORTSTR_VAL(R1, L1, V1, X1) end, + {F2, R3} = if P2 =:= 0 -> {undefined, R2}; true -> ?TABLE_VAL(R2, L2, V2, X2) end, + {F3, R4} = if P3 =:= 0 -> {undefined, R3}; true -> ?OCTET_VAL(R3, L3, V3, X3) end, + {F4, R5} = if P4 =:= 0 -> {undefined, R4}; true -> ?OCTET_VAL(R4, L4, V4, X4) end, + {F5, R6} = if P5 =:= 0 -> {undefined, R5}; true -> ?SHORTSTR_VAL(R5, L5, V5, X5) end, + {F6, R7} = if P6 =:= 0 -> {undefined, R6}; true -> ?SHORTSTR_VAL(R6, L6, V6, X6) end, + {F7, R8} = if P7 =:= 0 -> {undefined, R7}; true -> ?SHORTSTR_VAL(R7, L7, V7, X7) end, + {F8, R9} = if P8 =:= 0 -> {undefined, R8}; true -> ?SHORTSTR_VAL(R8, L8, V8, X8) end, + {F9, R10} = if P9 =:= 0 -> {undefined, R9}; true -> ?TIMESTAMP_VAL(R9, L9, V9, X9) end, + {F10, R11} = if P10 =:= 0 -> {undefined, R10}; true -> ?SHORTSTR_VAL(R10, L10, V10, X10) end, + {F11, R12} = if P11 =:= 0 -> {undefined, R11}; true -> ?SHORTSTR_VAL(R11, L11, V11, X11) end, + {F12, R13} = if P12 =:= 0 -> {undefined, R12}; true -> ?SHORTSTR_VAL(R12, L12, V12, X12) end, + {F13, R14} = if P13 =:= 0 -> {undefined, R13}; true -> ?SHORTSTR_VAL(R13, L13, V13, X13) end, + <<>> = R14, + #'P_basic'{content_type = F0, content_encoding = F1, headers = F2, delivery_mode = F3, priority = F4, correlation_id = F5, reply_to = F6, expiration = F7, message_id = F8, timestamp = F9, type = F10, user_id = F11, app_id = F12, cluster_id = F13}; +decode_properties(70, <>) -> + {F0, R1} = if P0 =:= 0 -> {undefined, R0}; true -> ?SHORTSTR_VAL(R0, L0, V0, X0) end, + {F1, R2} = if P1 =:= 0 -> {undefined, R1}; true -> ?SHORTSTR_VAL(R1, L1, V1, X1) end, + {F2, R3} = if P2 =:= 0 -> {undefined, R2}; true -> ?TABLE_VAL(R2, L2, V2, X2) end, + {F3, R4} = if P3 =:= 0 -> {undefined, R3}; true -> ?OCTET_VAL(R3, L3, V3, X3) end, + {F4, R5} = if P4 =:= 0 -> {undefined, R4}; true -> ?SHORTSTR_VAL(R4, L4, V4, X4) end, + {F5, R6} = if P5 =:= 0 -> {undefined, R5}; true -> ?SHORTSTR_VAL(R5, L5, V5, X5) end, + {F6, R7} = if P6 =:= 0 -> {undefined, R6}; true -> ?SHORTSTR_VAL(R6, L6, V6, X6) end, + {F7, R8} = if P7 =:= 0 -> {undefined, R7}; true -> ?TIMESTAMP_VAL(R7, L7, V7, X7) end, + {F8, R9} = if P8 =:= 0 -> {undefined, R8}; true -> ?SHORTSTR_VAL(R8, L8, V8, X8) end, + <<>> = R9, + #'P_file'{content_type = F0, content_encoding = F1, headers = F2, priority = F3, reply_to = F4, message_id = F5, filename = F6, timestamp = F7, cluster_id = F8}; +decode_properties(80, <>) -> + {F0, R1} = if P0 =:= 0 -> {undefined, R0}; true -> ?SHORTSTR_VAL(R0, L0, V0, X0) end, + {F1, R2} = if P1 =:= 0 -> {undefined, R1}; true -> ?SHORTSTR_VAL(R1, L1, V1, X1) end, + {F2, R3} = if P2 =:= 0 -> {undefined, R2}; true -> ?TABLE_VAL(R2, L2, V2, X2) end, + {F3, R4} = if P3 =:= 0 -> {undefined, R3}; true -> ?OCTET_VAL(R3, L3, V3, X3) end, + {F4, R5} = if P4 =:= 0 -> {undefined, R4}; true -> ?TIMESTAMP_VAL(R4, L4, V4, X4) end, + <<>> = R5, + #'P_stream'{content_type = F0, content_encoding = F1, headers = F2, priority = F3, timestamp = F4}; +decode_properties(90, <<>>) -> + #'P_tx'{}; +decode_properties(100, <<>>) -> + #'P_dtx'{}; +decode_properties(110, <>) -> + {F0, R1} = if P0 =:= 0 -> {undefined, R0}; true -> ?TABLE_VAL(R0, L0, V0, X0) end, + {F1, R2} = if P1 =:= 0 -> {undefined, R1}; true -> ?SHORTSTR_VAL(R1, L1, V1, X1) end, + {F2, R3} = if P2 =:= 0 -> {undefined, R2}; true -> ?SHORTSTR_VAL(R2, L2, V2, X2) end, + {F3, R4} = if P3 =:= 0 -> {undefined, R3}; true -> ?OCTET_VAL(R3, L3, V3, X3) end, + {F4, R5} = if P4 =:= 0 -> {undefined, R4}; true -> ?OCTET_VAL(R4, L4, V4, X4) end, + <<>> = R5, + #'P_tunnel'{headers = F0, proxy_name = F1, data_name = F2, durable = F3, broadcast = F4}; +decode_properties(120, <<>>) -> + #'P_test'{}; +decode_properties(ClassId, _BinaryFields) -> exit({unknown_class_id, ClassId}). +encode_method_fields(#'connection.start'{version_major = F0, version_minor = F1, server_properties = F2, mechanisms = F3, locales = F4}) -> + F2Tab = rabbit_binary_generator:generate_table(F2), + F2Len = size(F2Tab), + F3Len = size(F3), + F4Len = size(F4), + <>; +encode_method_fields(#'connection.start_ok'{client_properties = F0, mechanism = F1, response = F2, locale = F3}) -> + F0Tab = rabbit_binary_generator:generate_table(F0), + F0Len = size(F0Tab), + F1Len = shortstr_size(F1), + F2Len = size(F2), + F3Len = shortstr_size(F3), + <>; +encode_method_fields(#'connection.secure'{challenge = F0}) -> + F0Len = size(F0), + <>; +encode_method_fields(#'connection.secure_ok'{response = F0}) -> + F0Len = size(F0), + <>; +encode_method_fields(#'connection.tune'{channel_max = F0, frame_max = F1, heartbeat = F2}) -> + <>; +encode_method_fields(#'connection.tune_ok'{channel_max = F0, frame_max = F1, heartbeat = F2}) -> + <>; +encode_method_fields(#'connection.open'{virtual_host = F0, capabilities = F1, insist = F2}) -> + F0Len = shortstr_size(F0), + F1Len = shortstr_size(F1), + F2Bits = ((bitvalue(F2) bsl 0)), + <>; +encode_method_fields(#'connection.open_ok'{known_hosts = F0}) -> + F0Len = shortstr_size(F0), + <>; +encode_method_fields(#'connection.redirect'{host = F0, known_hosts = F1}) -> + F0Len = shortstr_size(F0), + F1Len = shortstr_size(F1), + <>; +encode_method_fields(#'connection.close'{reply_code = F0, reply_text = F1, class_id = F2, method_id = F3}) -> + F1Len = shortstr_size(F1), + <>; +encode_method_fields(#'connection.close_ok'{}) -> + <<>>; +encode_method_fields(#'channel.open'{out_of_band = F0}) -> + F0Len = shortstr_size(F0), + <>; +encode_method_fields(#'channel.open_ok'{}) -> + <<>>; +encode_method_fields(#'channel.flow'{active = F0}) -> + F0Bits = ((bitvalue(F0) bsl 0)), + <>; +encode_method_fields(#'channel.flow_ok'{active = F0}) -> + F0Bits = ((bitvalue(F0) bsl 0)), + <>; +encode_method_fields(#'channel.alert'{reply_code = F0, reply_text = F1, details = F2}) -> + F1Len = shortstr_size(F1), + F2Tab = rabbit_binary_generator:generate_table(F2), + F2Len = size(F2Tab), + <>; +encode_method_fields(#'channel.close'{reply_code = F0, reply_text = F1, class_id = F2, method_id = F3}) -> + F1Len = shortstr_size(F1), + <>; +encode_method_fields(#'channel.close_ok'{}) -> + <<>>; +encode_method_fields(#'access.request'{realm = F0, exclusive = F1, passive = F2, active = F3, write = F4, read = F5}) -> + F0Len = shortstr_size(F0), + F1Bits = ((bitvalue(F1) bsl 0) bor (bitvalue(F2) bsl 1) bor (bitvalue(F3) bsl 2) bor (bitvalue(F4) bsl 3) bor (bitvalue(F5) bsl 4)), + <>; +encode_method_fields(#'access.request_ok'{ticket = F0}) -> + <>; +encode_method_fields(#'exchange.declare'{ticket = F0, exchange = F1, type = F2, passive = F3, durable = F4, auto_delete = F5, internal = F6, nowait = F7, arguments = F8}) -> + F1Len = shortstr_size(F1), + F2Len = shortstr_size(F2), + F3Bits = ((bitvalue(F3) bsl 0) bor (bitvalue(F4) bsl 1) bor (bitvalue(F5) bsl 2) bor (bitvalue(F6) bsl 3) bor (bitvalue(F7) bsl 4)), + F8Tab = rabbit_binary_generator:generate_table(F8), + F8Len = size(F8Tab), + <>; +encode_method_fields(#'exchange.declare_ok'{}) -> + <<>>; +encode_method_fields(#'exchange.delete'{ticket = F0, exchange = F1, if_unused = F2, nowait = F3}) -> + F1Len = shortstr_size(F1), + F2Bits = ((bitvalue(F2) bsl 0) bor (bitvalue(F3) bsl 1)), + <>; +encode_method_fields(#'exchange.delete_ok'{}) -> + <<>>; +encode_method_fields(#'queue.declare'{ticket = F0, queue = F1, passive = F2, durable = F3, exclusive = F4, auto_delete = F5, nowait = F6, arguments = F7}) -> + F1Len = shortstr_size(F1), + F2Bits = ((bitvalue(F2) bsl 0) bor (bitvalue(F3) bsl 1) bor (bitvalue(F4) bsl 2) bor (bitvalue(F5) bsl 3) bor (bitvalue(F6) bsl 4)), + F7Tab = rabbit_binary_generator:generate_table(F7), + F7Len = size(F7Tab), + <>; +encode_method_fields(#'queue.declare_ok'{queue = F0, message_count = F1, consumer_count = F2}) -> + F0Len = shortstr_size(F0), + <>; +encode_method_fields(#'queue.bind'{ticket = F0, queue = F1, exchange = F2, routing_key = F3, nowait = F4, arguments = F5}) -> + F1Len = shortstr_size(F1), + F2Len = shortstr_size(F2), + F3Len = shortstr_size(F3), + F4Bits = ((bitvalue(F4) bsl 0)), + F5Tab = rabbit_binary_generator:generate_table(F5), + F5Len = size(F5Tab), + <>; +encode_method_fields(#'queue.bind_ok'{}) -> + <<>>; +encode_method_fields(#'queue.purge'{ticket = F0, queue = F1, nowait = F2}) -> + F1Len = shortstr_size(F1), + F2Bits = ((bitvalue(F2) bsl 0)), + <>; +encode_method_fields(#'queue.purge_ok'{message_count = F0}) -> + <>; +encode_method_fields(#'queue.delete'{ticket = F0, queue = F1, if_unused = F2, if_empty = F3, nowait = F4}) -> + F1Len = shortstr_size(F1), + F2Bits = ((bitvalue(F2) bsl 0) bor (bitvalue(F3) bsl 1) bor (bitvalue(F4) bsl 2)), + <>; +encode_method_fields(#'queue.delete_ok'{message_count = F0}) -> + <>; +encode_method_fields(#'queue.unbind'{ticket = F0, queue = F1, exchange = F2, routing_key = F3, arguments = F4}) -> + F1Len = shortstr_size(F1), + F2Len = shortstr_size(F2), + F3Len = shortstr_size(F3), + F4Tab = rabbit_binary_generator:generate_table(F4), + F4Len = size(F4Tab), + <>; +encode_method_fields(#'queue.unbind_ok'{}) -> + <<>>; +encode_method_fields(#'basic.qos'{prefetch_size = F0, prefetch_count = F1, global = F2}) -> + F2Bits = ((bitvalue(F2) bsl 0)), + <>; +encode_method_fields(#'basic.qos_ok'{}) -> + <<>>; +encode_method_fields(#'basic.consume'{ticket = F0, queue = F1, consumer_tag = F2, no_local = F3, no_ack = F4, exclusive = F5, nowait = F6}) -> + F1Len = shortstr_size(F1), + F2Len = shortstr_size(F2), + F3Bits = ((bitvalue(F3) bsl 0) bor (bitvalue(F4) bsl 1) bor (bitvalue(F5) bsl 2) bor (bitvalue(F6) bsl 3)), + <>; +encode_method_fields(#'basic.consume_ok'{consumer_tag = F0}) -> + F0Len = shortstr_size(F0), + <>; +encode_method_fields(#'basic.cancel'{consumer_tag = F0, nowait = F1}) -> + F0Len = shortstr_size(F0), + F1Bits = ((bitvalue(F1) bsl 0)), + <>; +encode_method_fields(#'basic.cancel_ok'{consumer_tag = F0}) -> + F0Len = shortstr_size(F0), + <>; +encode_method_fields(#'basic.publish'{ticket = F0, exchange = F1, routing_key = F2, mandatory = F3, immediate = F4}) -> + F1Len = shortstr_size(F1), + F2Len = shortstr_size(F2), + F3Bits = ((bitvalue(F3) bsl 0) bor (bitvalue(F4) bsl 1)), + <>; +encode_method_fields(#'basic.return'{reply_code = F0, reply_text = F1, exchange = F2, routing_key = F3}) -> + F1Len = shortstr_size(F1), + F2Len = shortstr_size(F2), + F3Len = shortstr_size(F3), + <>; +encode_method_fields(#'basic.deliver'{consumer_tag = F0, delivery_tag = F1, redelivered = F2, exchange = F3, routing_key = F4}) -> + F0Len = shortstr_size(F0), + F2Bits = ((bitvalue(F2) bsl 0)), + F3Len = shortstr_size(F3), + F4Len = shortstr_size(F4), + <>; +encode_method_fields(#'basic.get'{ticket = F0, queue = F1, no_ack = F2}) -> + F1Len = shortstr_size(F1), + F2Bits = ((bitvalue(F2) bsl 0)), + <>; +encode_method_fields(#'basic.get_ok'{delivery_tag = F0, redelivered = F1, exchange = F2, routing_key = F3, message_count = F4}) -> + F1Bits = ((bitvalue(F1) bsl 0)), + F2Len = shortstr_size(F2), + F3Len = shortstr_size(F3), + <>; +encode_method_fields(#'basic.get_empty'{cluster_id = F0}) -> + F0Len = shortstr_size(F0), + <>; +encode_method_fields(#'basic.ack'{delivery_tag = F0, multiple = F1}) -> + F1Bits = ((bitvalue(F1) bsl 0)), + <>; +encode_method_fields(#'basic.reject'{delivery_tag = F0, requeue = F1}) -> + F1Bits = ((bitvalue(F1) bsl 0)), + <>; +encode_method_fields(#'basic.recover_async'{requeue = F0}) -> + F0Bits = ((bitvalue(F0) bsl 0)), + <>; +encode_method_fields(#'basic.recover'{requeue = F0}) -> + F0Bits = ((bitvalue(F0) bsl 0)), + <>; +encode_method_fields(#'basic.recover_ok'{}) -> + <<>>; +encode_method_fields(#'file.qos'{prefetch_size = F0, prefetch_count = F1, global = F2}) -> + F2Bits = ((bitvalue(F2) bsl 0)), + <>; +encode_method_fields(#'file.qos_ok'{}) -> + <<>>; +encode_method_fields(#'file.consume'{ticket = F0, queue = F1, consumer_tag = F2, no_local = F3, no_ack = F4, exclusive = F5, nowait = F6}) -> + F1Len = shortstr_size(F1), + F2Len = shortstr_size(F2), + F3Bits = ((bitvalue(F3) bsl 0) bor (bitvalue(F4) bsl 1) bor (bitvalue(F5) bsl 2) bor (bitvalue(F6) bsl 3)), + <>; +encode_method_fields(#'file.consume_ok'{consumer_tag = F0}) -> + F0Len = shortstr_size(F0), + <>; +encode_method_fields(#'file.cancel'{consumer_tag = F0, nowait = F1}) -> + F0Len = shortstr_size(F0), + F1Bits = ((bitvalue(F1) bsl 0)), + <>; +encode_method_fields(#'file.cancel_ok'{consumer_tag = F0}) -> + F0Len = shortstr_size(F0), + <>; +encode_method_fields(#'file.open'{identifier = F0, content_size = F1}) -> + F0Len = shortstr_size(F0), + <>; +encode_method_fields(#'file.open_ok'{staged_size = F0}) -> + <>; +encode_method_fields(#'file.stage'{}) -> + <<>>; +encode_method_fields(#'file.publish'{ticket = F0, exchange = F1, routing_key = F2, mandatory = F3, immediate = F4, identifier = F5}) -> + F1Len = shortstr_size(F1), + F2Len = shortstr_size(F2), + F3Bits = ((bitvalue(F3) bsl 0) bor (bitvalue(F4) bsl 1)), + F5Len = shortstr_size(F5), + <>; +encode_method_fields(#'file.return'{reply_code = F0, reply_text = F1, exchange = F2, routing_key = F3}) -> + F1Len = shortstr_size(F1), + F2Len = shortstr_size(F2), + F3Len = shortstr_size(F3), + <>; +encode_method_fields(#'file.deliver'{consumer_tag = F0, delivery_tag = F1, redelivered = F2, exchange = F3, routing_key = F4, identifier = F5}) -> + F0Len = shortstr_size(F0), + F2Bits = ((bitvalue(F2) bsl 0)), + F3Len = shortstr_size(F3), + F4Len = shortstr_size(F4), + F5Len = shortstr_size(F5), + <>; +encode_method_fields(#'file.ack'{delivery_tag = F0, multiple = F1}) -> + F1Bits = ((bitvalue(F1) bsl 0)), + <>; +encode_method_fields(#'file.reject'{delivery_tag = F0, requeue = F1}) -> + F1Bits = ((bitvalue(F1) bsl 0)), + <>; +encode_method_fields(#'stream.qos'{prefetch_size = F0, prefetch_count = F1, consume_rate = F2, global = F3}) -> + F3Bits = ((bitvalue(F3) bsl 0)), + <>; +encode_method_fields(#'stream.qos_ok'{}) -> + <<>>; +encode_method_fields(#'stream.consume'{ticket = F0, queue = F1, consumer_tag = F2, no_local = F3, exclusive = F4, nowait = F5}) -> + F1Len = shortstr_size(F1), + F2Len = shortstr_size(F2), + F3Bits = ((bitvalue(F3) bsl 0) bor (bitvalue(F4) bsl 1) bor (bitvalue(F5) bsl 2)), + <>; +encode_method_fields(#'stream.consume_ok'{consumer_tag = F0}) -> + F0Len = shortstr_size(F0), + <>; +encode_method_fields(#'stream.cancel'{consumer_tag = F0, nowait = F1}) -> + F0Len = shortstr_size(F0), + F1Bits = ((bitvalue(F1) bsl 0)), + <>; +encode_method_fields(#'stream.cancel_ok'{consumer_tag = F0}) -> + F0Len = shortstr_size(F0), + <>; +encode_method_fields(#'stream.publish'{ticket = F0, exchange = F1, routing_key = F2, mandatory = F3, immediate = F4}) -> + F1Len = shortstr_size(F1), + F2Len = shortstr_size(F2), + F3Bits = ((bitvalue(F3) bsl 0) bor (bitvalue(F4) bsl 1)), + <>; +encode_method_fields(#'stream.return'{reply_code = F0, reply_text = F1, exchange = F2, routing_key = F3}) -> + F1Len = shortstr_size(F1), + F2Len = shortstr_size(F2), + F3Len = shortstr_size(F3), + <>; +encode_method_fields(#'stream.deliver'{consumer_tag = F0, delivery_tag = F1, exchange = F2, queue = F3}) -> + F0Len = shortstr_size(F0), + F2Len = shortstr_size(F2), + F3Len = shortstr_size(F3), + <>; +encode_method_fields(#'tx.select'{}) -> + <<>>; +encode_method_fields(#'tx.select_ok'{}) -> + <<>>; +encode_method_fields(#'tx.commit'{}) -> + <<>>; +encode_method_fields(#'tx.commit_ok'{}) -> + <<>>; +encode_method_fields(#'tx.rollback'{}) -> + <<>>; +encode_method_fields(#'tx.rollback_ok'{}) -> + <<>>; +encode_method_fields(#'dtx.select'{}) -> + <<>>; +encode_method_fields(#'dtx.select_ok'{}) -> + <<>>; +encode_method_fields(#'dtx.start'{dtx_identifier = F0}) -> + F0Len = shortstr_size(F0), + <>; +encode_method_fields(#'dtx.start_ok'{}) -> + <<>>; +encode_method_fields(#'tunnel.request'{meta_data = F0}) -> + F0Tab = rabbit_binary_generator:generate_table(F0), + F0Len = size(F0Tab), + <>; +encode_method_fields(#'test.integer'{integer_1 = F0, integer_2 = F1, integer_3 = F2, integer_4 = F3, operation = F4}) -> + <>; +encode_method_fields(#'test.integer_ok'{result = F0}) -> + <>; +encode_method_fields(#'test.string'{string_1 = F0, string_2 = F1, operation = F2}) -> + F0Len = shortstr_size(F0), + F1Len = size(F1), + <>; +encode_method_fields(#'test.string_ok'{result = F0}) -> + F0Len = size(F0), + <>; +encode_method_fields(#'test.table'{table = F0, integer_op = F1, string_op = F2}) -> + F0Tab = rabbit_binary_generator:generate_table(F0), + F0Len = size(F0Tab), + <>; +encode_method_fields(#'test.table_ok'{integer_result = F0, string_result = F1}) -> + F1Len = size(F1), + <>; +encode_method_fields(#'test.content'{}) -> + <<>>; +encode_method_fields(#'test.content_ok'{content_checksum = F0}) -> + <>; +encode_method_fields(Record) -> exit({unknown_method_name, element(1, Record)}). +encode_properties(#'P_connection'{}) -> + <<>>; +encode_properties(#'P_channel'{}) -> + <<>>; +encode_properties(#'P_access'{}) -> + <<>>; +encode_properties(#'P_exchange'{}) -> + <<>>; +encode_properties(#'P_queue'{}) -> + <<>>; +encode_properties(#'P_basic'{content_type = F0, content_encoding = F1, headers = F2, delivery_mode = F3, priority = F4, correlation_id = F5, reply_to = F6, expiration = F7, message_id = F8, timestamp = F9, type = F10, user_id = F11, app_id = F12, cluster_id = F13}) -> + R0 = [<<>>], + {P0, R1} = if F0 =:= undefined -> {0, R0}; true -> {1, [?SHORTSTR_PROP(F0, L0) | R0]} end, + {P1, R2} = if F1 =:= undefined -> {0, R1}; true -> {1, [?SHORTSTR_PROP(F1, L1) | R1]} end, + {P2, R3} = if F2 =:= undefined -> {0, R2}; true -> {1, [?TABLE_PROP(F2, L2) | R2]} end, + {P3, R4} = if F3 =:= undefined -> {0, R3}; true -> {1, [?OCTET_PROP(F3, L3) | R3]} end, + {P4, R5} = if F4 =:= undefined -> {0, R4}; true -> {1, [?OCTET_PROP(F4, L4) | R4]} end, + {P5, R6} = if F5 =:= undefined -> {0, R5}; true -> {1, [?SHORTSTR_PROP(F5, L5) | R5]} end, + {P6, R7} = if F6 =:= undefined -> {0, R6}; true -> {1, [?SHORTSTR_PROP(F6, L6) | R6]} end, + {P7, R8} = if F7 =:= undefined -> {0, R7}; true -> {1, [?SHORTSTR_PROP(F7, L7) | R7]} end, + {P8, R9} = if F8 =:= undefined -> {0, R8}; true -> {1, [?SHORTSTR_PROP(F8, L8) | R8]} end, + {P9, R10} = if F9 =:= undefined -> {0, R9}; true -> {1, [?TIMESTAMP_PROP(F9, L9) | R9]} end, + {P10, R11} = if F10 =:= undefined -> {0, R10}; true -> {1, [?SHORTSTR_PROP(F10, L10) | R10]} end, + {P11, R12} = if F11 =:= undefined -> {0, R11}; true -> {1, [?SHORTSTR_PROP(F11, L11) | R11]} end, + {P12, R13} = if F12 =:= undefined -> {0, R12}; true -> {1, [?SHORTSTR_PROP(F12, L12) | R12]} end, + {P13, R14} = if F13 =:= undefined -> {0, R13}; true -> {1, [?SHORTSTR_PROP(F13, L13) | R13]} end, + list_to_binary([<> | lists:reverse(R14)]); +encode_properties(#'P_file'{content_type = F0, content_encoding = F1, headers = F2, priority = F3, reply_to = F4, message_id = F5, filename = F6, timestamp = F7, cluster_id = F8}) -> + R0 = [<<>>], + {P0, R1} = if F0 =:= undefined -> {0, R0}; true -> {1, [?SHORTSTR_PROP(F0, L0) | R0]} end, + {P1, R2} = if F1 =:= undefined -> {0, R1}; true -> {1, [?SHORTSTR_PROP(F1, L1) | R1]} end, + {P2, R3} = if F2 =:= undefined -> {0, R2}; true -> {1, [?TABLE_PROP(F2, L2) | R2]} end, + {P3, R4} = if F3 =:= undefined -> {0, R3}; true -> {1, [?OCTET_PROP(F3, L3) | R3]} end, + {P4, R5} = if F4 =:= undefined -> {0, R4}; true -> {1, [?SHORTSTR_PROP(F4, L4) | R4]} end, + {P5, R6} = if F5 =:= undefined -> {0, R5}; true -> {1, [?SHORTSTR_PROP(F5, L5) | R5]} end, + {P6, R7} = if F6 =:= undefined -> {0, R6}; true -> {1, [?SHORTSTR_PROP(F6, L6) | R6]} end, + {P7, R8} = if F7 =:= undefined -> {0, R7}; true -> {1, [?TIMESTAMP_PROP(F7, L7) | R7]} end, + {P8, R9} = if F8 =:= undefined -> {0, R8}; true -> {1, [?SHORTSTR_PROP(F8, L8) | R8]} end, + list_to_binary([<> | lists:reverse(R9)]); +encode_properties(#'P_stream'{content_type = F0, content_encoding = F1, headers = F2, priority = F3, timestamp = F4}) -> + R0 = [<<>>], + {P0, R1} = if F0 =:= undefined -> {0, R0}; true -> {1, [?SHORTSTR_PROP(F0, L0) | R0]} end, + {P1, R2} = if F1 =:= undefined -> {0, R1}; true -> {1, [?SHORTSTR_PROP(F1, L1) | R1]} end, + {P2, R3} = if F2 =:= undefined -> {0, R2}; true -> {1, [?TABLE_PROP(F2, L2) | R2]} end, + {P3, R4} = if F3 =:= undefined -> {0, R3}; true -> {1, [?OCTET_PROP(F3, L3) | R3]} end, + {P4, R5} = if F4 =:= undefined -> {0, R4}; true -> {1, [?TIMESTAMP_PROP(F4, L4) | R4]} end, + list_to_binary([<> | lists:reverse(R5)]); +encode_properties(#'P_tx'{}) -> + <<>>; +encode_properties(#'P_dtx'{}) -> + <<>>; +encode_properties(#'P_tunnel'{headers = F0, proxy_name = F1, data_name = F2, durable = F3, broadcast = F4}) -> + R0 = [<<>>], + {P0, R1} = if F0 =:= undefined -> {0, R0}; true -> {1, [?TABLE_PROP(F0, L0) | R0]} end, + {P1, R2} = if F1 =:= undefined -> {0, R1}; true -> {1, [?SHORTSTR_PROP(F1, L1) | R1]} end, + {P2, R3} = if F2 =:= undefined -> {0, R2}; true -> {1, [?SHORTSTR_PROP(F2, L2) | R2]} end, + {P3, R4} = if F3 =:= undefined -> {0, R3}; true -> {1, [?OCTET_PROP(F3, L3) | R3]} end, + {P4, R5} = if F4 =:= undefined -> {0, R4}; true -> {1, [?OCTET_PROP(F4, L4) | R4]} end, + list_to_binary([<> | lists:reverse(R5)]); +encode_properties(#'P_test'{}) -> + <<>>; +encode_properties(Record) -> exit({unknown_properties_record, Record}). +lookup_amqp_exception(not_delivered) -> {false, ?NOT_DELIVERED, <<"NOT_DELIVERED">>}; +lookup_amqp_exception(content_too_large) -> {false, ?CONTENT_TOO_LARGE, <<"CONTENT_TOO_LARGE">>}; +lookup_amqp_exception(no_route) -> {false, ?NO_ROUTE, <<"NO_ROUTE">>}; +lookup_amqp_exception(no_consumers) -> {false, ?NO_CONSUMERS, <<"NO_CONSUMERS">>}; +lookup_amqp_exception(access_refused) -> {false, ?ACCESS_REFUSED, <<"ACCESS_REFUSED">>}; +lookup_amqp_exception(not_found) -> {false, ?NOT_FOUND, <<"NOT_FOUND">>}; +lookup_amqp_exception(resource_locked) -> {false, ?RESOURCE_LOCKED, <<"RESOURCE_LOCKED">>}; +lookup_amqp_exception(precondition_failed) -> {false, ?PRECONDITION_FAILED, <<"PRECONDITION_FAILED">>}; +lookup_amqp_exception(connection_forced) -> {true, ?CONNECTION_FORCED, <<"CONNECTION_FORCED">>}; +lookup_amqp_exception(invalid_path) -> {true, ?INVALID_PATH, <<"INVALID_PATH">>}; +lookup_amqp_exception(frame_error) -> {true, ?FRAME_ERROR, <<"FRAME_ERROR">>}; +lookup_amqp_exception(syntax_error) -> {true, ?SYNTAX_ERROR, <<"SYNTAX_ERROR">>}; +lookup_amqp_exception(command_invalid) -> {true, ?COMMAND_INVALID, <<"COMMAND_INVALID">>}; +lookup_amqp_exception(channel_error) -> {true, ?CHANNEL_ERROR, <<"CHANNEL_ERROR">>}; +lookup_amqp_exception(unexpected_frame) -> {true, ?UNEXPECTED_FRAME, <<"UNEXPECTED_FRAME">>}; +lookup_amqp_exception(resource_error) -> {true, ?RESOURCE_ERROR, <<"RESOURCE_ERROR">>}; +lookup_amqp_exception(not_allowed) -> {true, ?NOT_ALLOWED, <<"NOT_ALLOWED">>}; +lookup_amqp_exception(not_implemented) -> {true, ?NOT_IMPLEMENTED, <<"NOT_IMPLEMENTED">>}; +lookup_amqp_exception(internal_error) -> {true, ?INTERNAL_ERROR, <<"INTERNAL_ERROR">>}; +lookup_amqp_exception(Code) -> + rabbit_log:warning("Unknown AMQP error code '~p'~n", [Code]), + {true, ?INTERNAL_ERROR, <<"INTERNAL_ERROR">>}. +amqp_exception(?FRAME_METHOD) -> frame_method; +amqp_exception(?FRAME_HEADER) -> frame_header; +amqp_exception(?FRAME_BODY) -> frame_body; +amqp_exception(?FRAME_OOB_METHOD) -> frame_oob_method; +amqp_exception(?FRAME_OOB_HEADER) -> frame_oob_header; +amqp_exception(?FRAME_OOB_BODY) -> frame_oob_body; +amqp_exception(?FRAME_TRACE) -> frame_trace; +amqp_exception(?FRAME_HEARTBEAT) -> frame_heartbeat; +amqp_exception(?FRAME_MIN_SIZE) -> frame_min_size; +amqp_exception(?FRAME_END) -> frame_end; +amqp_exception(?REPLY_SUCCESS) -> reply_success; +amqp_exception(?NOT_DELIVERED) -> not_delivered; +amqp_exception(?CONTENT_TOO_LARGE) -> content_too_large; +amqp_exception(?NO_ROUTE) -> no_route; +amqp_exception(?NO_CONSUMERS) -> no_consumers; +amqp_exception(?ACCESS_REFUSED) -> access_refused; +amqp_exception(?NOT_FOUND) -> not_found; +amqp_exception(?RESOURCE_LOCKED) -> resource_locked; +amqp_exception(?PRECONDITION_FAILED) -> precondition_failed; +amqp_exception(?CONNECTION_FORCED) -> connection_forced; +amqp_exception(?INVALID_PATH) -> invalid_path; +amqp_exception(?FRAME_ERROR) -> frame_error; +amqp_exception(?SYNTAX_ERROR) -> syntax_error; +amqp_exception(?COMMAND_INVALID) -> command_invalid; +amqp_exception(?CHANNEL_ERROR) -> channel_error; +amqp_exception(?UNEXPECTED_FRAME) -> unexpected_frame; +amqp_exception(?RESOURCE_ERROR) -> resource_error; +amqp_exception(?NOT_ALLOWED) -> not_allowed; +amqp_exception(?NOT_IMPLEMENTED) -> not_implemented; +amqp_exception(?INTERNAL_ERROR) -> internal_error; +amqp_exception(_Code) -> undefined. diff --git a/deps/rabbit_common/src/rabbit_framing_amqp_0_9_1.erl b/deps/rabbit_common/src/rabbit_framing_amqp_0_9_1.erl new file mode 100644 index 000000000000..7e6b921b43a5 --- /dev/null +++ b/deps/rabbit_common/src/rabbit_framing_amqp_0_9_1.erl @@ -0,0 +1,1270 @@ +%% Autogenerated code. Do not edit. +%% +%% This Source Code Form is subject to the terms of the Mozilla Public +%% License, v. 2.0. If a copy of the MPL was not distributed with this +%% file, You can obtain one at https://mozilla.org/MPL/2.0/. +%% +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. +%% +-module(rabbit_framing_amqp_0_9_1). +-include("rabbit_framing.hrl"). + +-export([version/0]). +-export([lookup_method_name/1]). +-export([lookup_class_name/1]). + +-export([method_id/1]). +-export([method_has_content/1]). +-export([is_method_synchronous/1]). +-export([method_record/1]). +-export([method_fieldnames/1]). +-export([decode_method_fields/2]). +-export([decode_properties/2]). +-export([encode_method_fields/1]). +-export([encode_properties/1]). +-export([lookup_amqp_exception/1]). +-export([amqp_exception/1]). + + +%% Various types +-export_type([amqp_field_type/0, amqp_property_type/0, + amqp_table/0, amqp_array/0, amqp_value/0, + amqp_method_name/0, amqp_method/0, amqp_method_record/0, + amqp_method_field_name/0, amqp_property_record/0, + amqp_exception/0, amqp_exception_code/0, amqp_class_id/0]). + +-type amqp_field_type() :: + 'longstr' | 'signedint' | 'decimal' | 'timestamp' | + 'unsignedbyte' | 'unsignedshort' | 'unsignedint' | + 'table' | 'byte' | 'double' | 'float' | 'long' | + 'short' | 'bool' | 'binary' | 'void' | 'array'. +-type amqp_property_type() :: + 'shortstr' | 'longstr' | 'octet' | 'short' | 'long' | + 'longlong' | 'timestamp' | 'bit' | 'table'. + +-type amqp_table() :: [{binary(), amqp_field_type(), amqp_value()}]. +-type amqp_array() :: [{amqp_field_type(), amqp_value()}]. +-type amqp_value() :: binary() | % longstr + integer() | % signedint + {non_neg_integer(), non_neg_integer()} | % decimal + amqp_table() | + amqp_array() | + byte() | % byte + float() | % double + integer() | % long + integer() | % short + boolean() | % bool + binary() | % binary + 'undefined' | % void + non_neg_integer(). % timestamp + +-type amqp_method_name() :: + ( 'basic.qos' | 'basic.qos_ok' | 'basic.consume' | 'basic.consume_ok' + | 'basic.cancel' | 'basic.cancel_ok' | 'basic.publish' | 'basic.return' + | 'basic.deliver' | 'basic.get' | 'basic.get_ok' | 'basic.get_empty' + | 'basic.ack' | 'basic.reject' | 'basic.recover_async' | 'basic.recover' + | 'basic.recover_ok' | 'basic.nack' | 'basic.credit' | 'basic.credit_ok' + | 'basic.credit_drained' | 'connection.start' | 'connection.start_ok' | 'connection.secure' + | 'connection.secure_ok' | 'connection.tune' | 'connection.tune_ok' | 'connection.open' + | 'connection.open_ok' | 'connection.close' | 'connection.close_ok' | 'connection.blocked' + | 'connection.unblocked' | 'connection.update_secret' | 'connection.update_secret_ok' | 'channel.open' + | 'channel.open_ok' | 'channel.flow' | 'channel.flow_ok' | 'channel.close' + | 'channel.close_ok' | 'access.request' | 'access.request_ok' | 'exchange.declare' + | 'exchange.declare_ok' | 'exchange.delete' | 'exchange.delete_ok' | 'exchange.bind' + | 'exchange.bind_ok' | 'exchange.unbind' | 'exchange.unbind_ok' | 'queue.declare' + | 'queue.declare_ok' | 'queue.bind' | 'queue.bind_ok' | 'queue.purge' + | 'queue.purge_ok' | 'queue.delete' | 'queue.delete_ok' | 'queue.unbind' + | 'queue.unbind_ok' | 'tx.select' | 'tx.select_ok' | 'tx.commit' + | 'tx.commit_ok' | 'tx.rollback' | 'tx.rollback_ok' | 'confirm.select' + | 'confirm.select_ok' ). +-type amqp_method() :: + ( {60, 10} | {60, 11} | {60, 20} | {60, 21} | {60, 30} | {60, 31} + | {60, 40} | {60, 50} | {60, 60} | {60, 70} | {60, 71} | {60, 72} + | {60, 80} | {60, 90} | {60, 100} | {60, 110} | {60, 111} | {60, 120} + | {60, 200} | {60, 201} | {60, 202} | {10, 10} | {10, 11} | {10, 20} + | {10, 21} | {10, 30} | {10, 31} | {10, 40} | {10, 41} | {10, 50} + | {10, 51} | {10, 60} | {10, 61} | {10, 70} | {10, 71} | {20, 10} + | {20, 11} | {20, 20} | {20, 21} | {20, 40} | {20, 41} | {30, 10} + | {30, 11} | {40, 10} | {40, 11} | {40, 20} | {40, 21} | {40, 30} + | {40, 31} | {40, 40} | {40, 51} | {50, 10} | {50, 11} | {50, 20} + | {50, 21} | {50, 30} | {50, 31} | {50, 40} | {50, 41} | {50, 50} + | {50, 51} | {90, 10} | {90, 11} | {90, 20} | {90, 21} | {90, 30} + | {90, 31} | {85, 10} | {85, 11} ). +-type amqp_method_record() :: + ( #'basic.qos'{} | #'basic.qos_ok'{} | #'basic.consume'{} | #'basic.consume_ok'{} + | #'basic.cancel'{} | #'basic.cancel_ok'{} | #'basic.publish'{} | #'basic.return'{} + | #'basic.deliver'{} | #'basic.get'{} | #'basic.get_ok'{} | #'basic.get_empty'{} + | #'basic.ack'{} | #'basic.reject'{} | #'basic.recover_async'{} | #'basic.recover'{} + | #'basic.recover_ok'{} | #'basic.nack'{} | #'basic.credit'{} | #'basic.credit_ok'{} + | #'basic.credit_drained'{} | #'connection.start'{} | #'connection.start_ok'{} | #'connection.secure'{} + | #'connection.secure_ok'{} | #'connection.tune'{} | #'connection.tune_ok'{} | #'connection.open'{} + | #'connection.open_ok'{} | #'connection.close'{} | #'connection.close_ok'{} | #'connection.blocked'{} + | #'connection.unblocked'{} | #'connection.update_secret'{} | #'connection.update_secret_ok'{} | #'channel.open'{} + | #'channel.open_ok'{} | #'channel.flow'{} | #'channel.flow_ok'{} | #'channel.close'{} + | #'channel.close_ok'{} | #'access.request'{} | #'access.request_ok'{} | #'exchange.declare'{} + | #'exchange.declare_ok'{} | #'exchange.delete'{} | #'exchange.delete_ok'{} | #'exchange.bind'{} + | #'exchange.bind_ok'{} | #'exchange.unbind'{} | #'exchange.unbind_ok'{} | #'queue.declare'{} + | #'queue.declare_ok'{} | #'queue.bind'{} | #'queue.bind_ok'{} | #'queue.purge'{} + | #'queue.purge_ok'{} | #'queue.delete'{} | #'queue.delete_ok'{} | #'queue.unbind'{} + | #'queue.unbind_ok'{} | #'tx.select'{} | #'tx.select_ok'{} | #'tx.commit'{} + | #'tx.commit_ok'{} | #'tx.rollback'{} | #'tx.rollback_ok'{} | #'confirm.select'{} + | #'confirm.select_ok'{} ). +-type amqp_method_field_name() :: + ( active | arguments | auto_delete | available + | capabilities | challenge | channel_id | channel_max + | class_id | client_properties | cluster_id | consumer_count + | consumer_tag | credit | credit_drained | delivery_tag + | destination | drain | durable | exchange + | exclusive | frame_max | global | heartbeat + | if_empty | if_unused | immediate | insist + | internal | known_hosts | locale | locales + | mandatory | mechanism | mechanisms | message_count + | method_id | multiple | new_secret | no_ack + | no_local | nowait | out_of_band | passive + | prefetch_count | prefetch_size | queue | read + | realm | reason | redelivered | reply_code + | reply_text | requeue | response | routing_key + | server_properties | source | ticket | type + | version_major | version_minor | virtual_host | write ). +-type amqp_property_record() :: + ( #'P_basic'{} | #'P_connection'{} | #'P_channel'{} | #'P_access'{} + | #'P_exchange'{} | #'P_queue'{} | #'P_tx'{} | #'P_confirm'{} ). +-type amqp_exception() :: + ( 'frame_method' | 'frame_header' | 'frame_body' | 'frame_heartbeat' + | 'frame_min_size' | 'frame_end' | 'reply_success' | 'content_too_large' + | 'no_route' | 'no_consumers' | 'access_refused' | 'not_found' + | 'resource_locked' | 'precondition_failed' | 'connection_forced' | 'invalid_path' + | 'frame_error' | 'syntax_error' | 'command_invalid' | 'channel_error' + | 'unexpected_frame' | 'resource_error' | 'not_allowed' | 'not_implemented' + | 'internal_error' ). +-type amqp_exception_code() :: + ( 1 | 2 | 3 | 8 + | 4096 | 206 | 200 | 311 + | 312 | 313 | 403 | 404 + | 405 | 406 | 320 | 402 + | 501 | 502 | 503 | 504 + | 505 | 506 | 530 | 540 + | 541 ). +-type amqp_class_id() :: + ( 40 | 10 | 50 | 20 + | 85 | 90 | 60 | 30 ). +-type amqp_class_name() :: + ( 'basic' | 'connection' | 'channel' | 'access' + | 'exchange' | 'queue' | 'tx' | 'confirm' ). + +%% Method signatures +-spec version() -> {non_neg_integer(), non_neg_integer(), non_neg_integer()}. +-spec lookup_method_name(amqp_method()) -> amqp_method_name(). +-spec lookup_class_name(amqp_class_id()) -> amqp_class_name(). +-spec method_id(amqp_method_name()) -> amqp_method(). +-spec method_has_content(amqp_method_name()) -> boolean(). +-spec is_method_synchronous(amqp_method_record()) -> boolean(). +-spec method_record(amqp_method_name()) -> amqp_method_record(). +-spec method_fieldnames(amqp_method_name()) -> [amqp_method_field_name()]. +-spec decode_method_fields(amqp_method_name(), binary()) -> + amqp_method_record() | rabbit_types:connection_exit(). +-spec decode_properties(non_neg_integer(), binary()) -> amqp_property_record(). +-spec encode_method_fields(amqp_method_record()) -> binary(). +-spec encode_properties(amqp_property_record()) -> binary(). +-spec lookup_amqp_exception(amqp_exception()) -> + {boolean(), amqp_exception_code(), binary()}. +-spec amqp_exception(amqp_exception_code()) -> amqp_exception(). + +bitvalue(true) -> 1; +bitvalue(false) -> 0; +bitvalue(undefined) -> 0. + +shortstr_size(S) -> + case size(S) of + Len when Len =< 255 -> Len; + _ -> exit(method_field_shortstr_overflow) + end. + +-define(SHORTSTR_VAL(R, L, V, X), + begin + <> = R, + {V, X} + end). + +-define(LONGSTR_VAL(R, L, V, X), + begin + <> = R, + {V, X} + end). + +-define(SHORT_VAL(R, L, V, X), + begin + <> = R, + {V, X} + end). + +-define(LONG_VAL(R, L, V, X), + begin + <> = R, + {V, X} + end). + +-define(LONGLONG_VAL(R, L, V, X), + begin + <> = R, + {V, X} + end). + +-define(OCTET_VAL(R, L, V, X), + begin + <> = R, + {V, X} + end). + +-define(TABLE_VAL(R, L, V, X), + begin + <> = R, + {rabbit_binary_parser:parse_table(V), X} + end). + +-define(TIMESTAMP_VAL(R, L, V, X), + begin + <> = R, + {V, X} + end). + +-define(SHORTSTR_PROP(X, L), + begin + L = size(X), + if L < 256 -> <>; + true -> exit(content_properties_shortstr_overflow) + end + end). + +-define(LONGSTR_PROP(X, L), + begin + L = size(X), + <> + end). + +-define(OCTET_PROP(X, L), <>). +-define(SHORT_PROP(X, L), <>). +-define(LONG_PROP(X, L), <>). +-define(LONGLONG_PROP(X, L), <>). +-define(TIMESTAMP_PROP(X, L), <>). + +-define(TABLE_PROP(X, T), + begin + T = rabbit_binary_generator:generate_table(X), + <<(size(T)):32, T/binary>> + end). + +version() -> {0, 9, 1}. +lookup_method_name({60, 10}) -> 'basic.qos'; +lookup_method_name({60, 11}) -> 'basic.qos_ok'; +lookup_method_name({60, 20}) -> 'basic.consume'; +lookup_method_name({60, 21}) -> 'basic.consume_ok'; +lookup_method_name({60, 30}) -> 'basic.cancel'; +lookup_method_name({60, 31}) -> 'basic.cancel_ok'; +lookup_method_name({60, 40}) -> 'basic.publish'; +lookup_method_name({60, 50}) -> 'basic.return'; +lookup_method_name({60, 60}) -> 'basic.deliver'; +lookup_method_name({60, 70}) -> 'basic.get'; +lookup_method_name({60, 71}) -> 'basic.get_ok'; +lookup_method_name({60, 72}) -> 'basic.get_empty'; +lookup_method_name({60, 80}) -> 'basic.ack'; +lookup_method_name({60, 90}) -> 'basic.reject'; +lookup_method_name({60, 100}) -> 'basic.recover_async'; +lookup_method_name({60, 110}) -> 'basic.recover'; +lookup_method_name({60, 111}) -> 'basic.recover_ok'; +lookup_method_name({60, 120}) -> 'basic.nack'; +lookup_method_name({60, 200}) -> 'basic.credit'; +lookup_method_name({60, 201}) -> 'basic.credit_ok'; +lookup_method_name({60, 202}) -> 'basic.credit_drained'; +lookup_method_name({10, 10}) -> 'connection.start'; +lookup_method_name({10, 11}) -> 'connection.start_ok'; +lookup_method_name({10, 20}) -> 'connection.secure'; +lookup_method_name({10, 21}) -> 'connection.secure_ok'; +lookup_method_name({10, 30}) -> 'connection.tune'; +lookup_method_name({10, 31}) -> 'connection.tune_ok'; +lookup_method_name({10, 40}) -> 'connection.open'; +lookup_method_name({10, 41}) -> 'connection.open_ok'; +lookup_method_name({10, 50}) -> 'connection.close'; +lookup_method_name({10, 51}) -> 'connection.close_ok'; +lookup_method_name({10, 60}) -> 'connection.blocked'; +lookup_method_name({10, 61}) -> 'connection.unblocked'; +lookup_method_name({10, 70}) -> 'connection.update_secret'; +lookup_method_name({10, 71}) -> 'connection.update_secret_ok'; +lookup_method_name({20, 10}) -> 'channel.open'; +lookup_method_name({20, 11}) -> 'channel.open_ok'; +lookup_method_name({20, 20}) -> 'channel.flow'; +lookup_method_name({20, 21}) -> 'channel.flow_ok'; +lookup_method_name({20, 40}) -> 'channel.close'; +lookup_method_name({20, 41}) -> 'channel.close_ok'; +lookup_method_name({30, 10}) -> 'access.request'; +lookup_method_name({30, 11}) -> 'access.request_ok'; +lookup_method_name({40, 10}) -> 'exchange.declare'; +lookup_method_name({40, 11}) -> 'exchange.declare_ok'; +lookup_method_name({40, 20}) -> 'exchange.delete'; +lookup_method_name({40, 21}) -> 'exchange.delete_ok'; +lookup_method_name({40, 30}) -> 'exchange.bind'; +lookup_method_name({40, 31}) -> 'exchange.bind_ok'; +lookup_method_name({40, 40}) -> 'exchange.unbind'; +lookup_method_name({40, 51}) -> 'exchange.unbind_ok'; +lookup_method_name({50, 10}) -> 'queue.declare'; +lookup_method_name({50, 11}) -> 'queue.declare_ok'; +lookup_method_name({50, 20}) -> 'queue.bind'; +lookup_method_name({50, 21}) -> 'queue.bind_ok'; +lookup_method_name({50, 30}) -> 'queue.purge'; +lookup_method_name({50, 31}) -> 'queue.purge_ok'; +lookup_method_name({50, 40}) -> 'queue.delete'; +lookup_method_name({50, 41}) -> 'queue.delete_ok'; +lookup_method_name({50, 50}) -> 'queue.unbind'; +lookup_method_name({50, 51}) -> 'queue.unbind_ok'; +lookup_method_name({90, 10}) -> 'tx.select'; +lookup_method_name({90, 11}) -> 'tx.select_ok'; +lookup_method_name({90, 20}) -> 'tx.commit'; +lookup_method_name({90, 21}) -> 'tx.commit_ok'; +lookup_method_name({90, 30}) -> 'tx.rollback'; +lookup_method_name({90, 31}) -> 'tx.rollback_ok'; +lookup_method_name({85, 10}) -> 'confirm.select'; +lookup_method_name({85, 11}) -> 'confirm.select_ok'; +lookup_method_name({_ClassId, _MethodId} = Id) -> exit({unknown_method_id, Id}). +lookup_class_name(60) -> 'basic'; +lookup_class_name(10) -> 'connection'; +lookup_class_name(20) -> 'channel'; +lookup_class_name(30) -> 'access'; +lookup_class_name(40) -> 'exchange'; +lookup_class_name(50) -> 'queue'; +lookup_class_name(90) -> 'tx'; +lookup_class_name(85) -> 'confirm'; +lookup_class_name(ClassId) -> exit({unknown_class_id, ClassId}). +method_id('basic.qos') -> {60, 10}; +method_id('basic.qos_ok') -> {60, 11}; +method_id('basic.consume') -> {60, 20}; +method_id('basic.consume_ok') -> {60, 21}; +method_id('basic.cancel') -> {60, 30}; +method_id('basic.cancel_ok') -> {60, 31}; +method_id('basic.publish') -> {60, 40}; +method_id('basic.return') -> {60, 50}; +method_id('basic.deliver') -> {60, 60}; +method_id('basic.get') -> {60, 70}; +method_id('basic.get_ok') -> {60, 71}; +method_id('basic.get_empty') -> {60, 72}; +method_id('basic.ack') -> {60, 80}; +method_id('basic.reject') -> {60, 90}; +method_id('basic.recover_async') -> {60, 100}; +method_id('basic.recover') -> {60, 110}; +method_id('basic.recover_ok') -> {60, 111}; +method_id('basic.nack') -> {60, 120}; +method_id('basic.credit') -> {60, 200}; +method_id('basic.credit_ok') -> {60, 201}; +method_id('basic.credit_drained') -> {60, 202}; +method_id('connection.start') -> {10, 10}; +method_id('connection.start_ok') -> {10, 11}; +method_id('connection.secure') -> {10, 20}; +method_id('connection.secure_ok') -> {10, 21}; +method_id('connection.tune') -> {10, 30}; +method_id('connection.tune_ok') -> {10, 31}; +method_id('connection.open') -> {10, 40}; +method_id('connection.open_ok') -> {10, 41}; +method_id('connection.close') -> {10, 50}; +method_id('connection.close_ok') -> {10, 51}; +method_id('connection.blocked') -> {10, 60}; +method_id('connection.unblocked') -> {10, 61}; +method_id('connection.update_secret') -> {10, 70}; +method_id('connection.update_secret_ok') -> {10, 71}; +method_id('channel.open') -> {20, 10}; +method_id('channel.open_ok') -> {20, 11}; +method_id('channel.flow') -> {20, 20}; +method_id('channel.flow_ok') -> {20, 21}; +method_id('channel.close') -> {20, 40}; +method_id('channel.close_ok') -> {20, 41}; +method_id('access.request') -> {30, 10}; +method_id('access.request_ok') -> {30, 11}; +method_id('exchange.declare') -> {40, 10}; +method_id('exchange.declare_ok') -> {40, 11}; +method_id('exchange.delete') -> {40, 20}; +method_id('exchange.delete_ok') -> {40, 21}; +method_id('exchange.bind') -> {40, 30}; +method_id('exchange.bind_ok') -> {40, 31}; +method_id('exchange.unbind') -> {40, 40}; +method_id('exchange.unbind_ok') -> {40, 51}; +method_id('queue.declare') -> {50, 10}; +method_id('queue.declare_ok') -> {50, 11}; +method_id('queue.bind') -> {50, 20}; +method_id('queue.bind_ok') -> {50, 21}; +method_id('queue.purge') -> {50, 30}; +method_id('queue.purge_ok') -> {50, 31}; +method_id('queue.delete') -> {50, 40}; +method_id('queue.delete_ok') -> {50, 41}; +method_id('queue.unbind') -> {50, 50}; +method_id('queue.unbind_ok') -> {50, 51}; +method_id('tx.select') -> {90, 10}; +method_id('tx.select_ok') -> {90, 11}; +method_id('tx.commit') -> {90, 20}; +method_id('tx.commit_ok') -> {90, 21}; +method_id('tx.rollback') -> {90, 30}; +method_id('tx.rollback_ok') -> {90, 31}; +method_id('confirm.select') -> {85, 10}; +method_id('confirm.select_ok') -> {85, 11}; +method_id(Name) -> exit({unknown_method_name, Name}). +method_has_content('basic.qos') -> false; +method_has_content('basic.qos_ok') -> false; +method_has_content('basic.consume') -> false; +method_has_content('basic.consume_ok') -> false; +method_has_content('basic.cancel') -> false; +method_has_content('basic.cancel_ok') -> false; +method_has_content('basic.publish') -> true; +method_has_content('basic.return') -> true; +method_has_content('basic.deliver') -> true; +method_has_content('basic.get') -> false; +method_has_content('basic.get_ok') -> true; +method_has_content('basic.get_empty') -> false; +method_has_content('basic.ack') -> false; +method_has_content('basic.reject') -> false; +method_has_content('basic.recover_async') -> false; +method_has_content('basic.recover') -> false; +method_has_content('basic.recover_ok') -> false; +method_has_content('basic.nack') -> false; +method_has_content('basic.credit') -> false; +method_has_content('basic.credit_ok') -> false; +method_has_content('basic.credit_drained') -> false; +method_has_content('connection.start') -> false; +method_has_content('connection.start_ok') -> false; +method_has_content('connection.secure') -> false; +method_has_content('connection.secure_ok') -> false; +method_has_content('connection.tune') -> false; +method_has_content('connection.tune_ok') -> false; +method_has_content('connection.open') -> false; +method_has_content('connection.open_ok') -> false; +method_has_content('connection.close') -> false; +method_has_content('connection.close_ok') -> false; +method_has_content('connection.blocked') -> false; +method_has_content('connection.unblocked') -> false; +method_has_content('connection.update_secret') -> false; +method_has_content('connection.update_secret_ok') -> false; +method_has_content('channel.open') -> false; +method_has_content('channel.open_ok') -> false; +method_has_content('channel.flow') -> false; +method_has_content('channel.flow_ok') -> false; +method_has_content('channel.close') -> false; +method_has_content('channel.close_ok') -> false; +method_has_content('access.request') -> false; +method_has_content('access.request_ok') -> false; +method_has_content('exchange.declare') -> false; +method_has_content('exchange.declare_ok') -> false; +method_has_content('exchange.delete') -> false; +method_has_content('exchange.delete_ok') -> false; +method_has_content('exchange.bind') -> false; +method_has_content('exchange.bind_ok') -> false; +method_has_content('exchange.unbind') -> false; +method_has_content('exchange.unbind_ok') -> false; +method_has_content('queue.declare') -> false; +method_has_content('queue.declare_ok') -> false; +method_has_content('queue.bind') -> false; +method_has_content('queue.bind_ok') -> false; +method_has_content('queue.purge') -> false; +method_has_content('queue.purge_ok') -> false; +method_has_content('queue.delete') -> false; +method_has_content('queue.delete_ok') -> false; +method_has_content('queue.unbind') -> false; +method_has_content('queue.unbind_ok') -> false; +method_has_content('tx.select') -> false; +method_has_content('tx.select_ok') -> false; +method_has_content('tx.commit') -> false; +method_has_content('tx.commit_ok') -> false; +method_has_content('tx.rollback') -> false; +method_has_content('tx.rollback_ok') -> false; +method_has_content('confirm.select') -> false; +method_has_content('confirm.select_ok') -> false; +method_has_content(Name) -> exit({unknown_method_name, Name}). +is_method_synchronous(#'basic.qos'{}) -> true; +is_method_synchronous(#'basic.qos_ok'{}) -> false; +is_method_synchronous(#'basic.consume'{nowait = NoWait}) -> not(NoWait); +is_method_synchronous(#'basic.consume_ok'{}) -> false; +is_method_synchronous(#'basic.cancel'{nowait = NoWait}) -> not(NoWait); +is_method_synchronous(#'basic.cancel_ok'{}) -> false; +is_method_synchronous(#'basic.publish'{}) -> false; +is_method_synchronous(#'basic.return'{}) -> false; +is_method_synchronous(#'basic.deliver'{}) -> false; +is_method_synchronous(#'basic.get'{}) -> true; +is_method_synchronous(#'basic.get_ok'{}) -> false; +is_method_synchronous(#'basic.get_empty'{}) -> false; +is_method_synchronous(#'basic.ack'{}) -> false; +is_method_synchronous(#'basic.reject'{}) -> false; +is_method_synchronous(#'basic.recover_async'{}) -> false; +is_method_synchronous(#'basic.recover'{}) -> true; +is_method_synchronous(#'basic.recover_ok'{}) -> false; +is_method_synchronous(#'basic.nack'{}) -> false; +is_method_synchronous(#'basic.credit'{}) -> true; +is_method_synchronous(#'basic.credit_ok'{}) -> false; +is_method_synchronous(#'basic.credit_drained'{}) -> false; +is_method_synchronous(#'connection.start'{}) -> true; +is_method_synchronous(#'connection.start_ok'{}) -> false; +is_method_synchronous(#'connection.secure'{}) -> true; +is_method_synchronous(#'connection.secure_ok'{}) -> false; +is_method_synchronous(#'connection.tune'{}) -> true; +is_method_synchronous(#'connection.tune_ok'{}) -> false; +is_method_synchronous(#'connection.open'{}) -> true; +is_method_synchronous(#'connection.open_ok'{}) -> false; +is_method_synchronous(#'connection.close'{}) -> true; +is_method_synchronous(#'connection.close_ok'{}) -> false; +is_method_synchronous(#'connection.blocked'{}) -> false; +is_method_synchronous(#'connection.unblocked'{}) -> false; +is_method_synchronous(#'connection.update_secret'{}) -> true; +is_method_synchronous(#'connection.update_secret_ok'{}) -> false; +is_method_synchronous(#'channel.open'{}) -> true; +is_method_synchronous(#'channel.open_ok'{}) -> false; +is_method_synchronous(#'channel.flow'{}) -> true; +is_method_synchronous(#'channel.flow_ok'{}) -> false; +is_method_synchronous(#'channel.close'{}) -> true; +is_method_synchronous(#'channel.close_ok'{}) -> false; +is_method_synchronous(#'access.request'{}) -> true; +is_method_synchronous(#'access.request_ok'{}) -> false; +is_method_synchronous(#'exchange.declare'{nowait = NoWait}) -> not(NoWait); +is_method_synchronous(#'exchange.declare_ok'{}) -> false; +is_method_synchronous(#'exchange.delete'{nowait = NoWait}) -> not(NoWait); +is_method_synchronous(#'exchange.delete_ok'{}) -> false; +is_method_synchronous(#'exchange.bind'{nowait = NoWait}) -> not(NoWait); +is_method_synchronous(#'exchange.bind_ok'{}) -> false; +is_method_synchronous(#'exchange.unbind'{nowait = NoWait}) -> not(NoWait); +is_method_synchronous(#'exchange.unbind_ok'{}) -> false; +is_method_synchronous(#'queue.declare'{nowait = NoWait}) -> not(NoWait); +is_method_synchronous(#'queue.declare_ok'{}) -> false; +is_method_synchronous(#'queue.bind'{nowait = NoWait}) -> not(NoWait); +is_method_synchronous(#'queue.bind_ok'{}) -> false; +is_method_synchronous(#'queue.purge'{nowait = NoWait}) -> not(NoWait); +is_method_synchronous(#'queue.purge_ok'{}) -> false; +is_method_synchronous(#'queue.delete'{nowait = NoWait}) -> not(NoWait); +is_method_synchronous(#'queue.delete_ok'{}) -> false; +is_method_synchronous(#'queue.unbind'{}) -> true; +is_method_synchronous(#'queue.unbind_ok'{}) -> false; +is_method_synchronous(#'tx.select'{}) -> true; +is_method_synchronous(#'tx.select_ok'{}) -> false; +is_method_synchronous(#'tx.commit'{}) -> true; +is_method_synchronous(#'tx.commit_ok'{}) -> false; +is_method_synchronous(#'tx.rollback'{}) -> true; +is_method_synchronous(#'tx.rollback_ok'{}) -> false; +is_method_synchronous(#'confirm.select'{nowait = NoWait}) -> not(NoWait); +is_method_synchronous(#'confirm.select_ok'{}) -> false; +is_method_synchronous(Name) -> exit({unknown_method_name, Name}). +method_record('basic.qos') -> #'basic.qos'{}; +method_record('basic.qos_ok') -> #'basic.qos_ok'{}; +method_record('basic.consume') -> #'basic.consume'{}; +method_record('basic.consume_ok') -> #'basic.consume_ok'{}; +method_record('basic.cancel') -> #'basic.cancel'{}; +method_record('basic.cancel_ok') -> #'basic.cancel_ok'{}; +method_record('basic.publish') -> #'basic.publish'{}; +method_record('basic.return') -> #'basic.return'{}; +method_record('basic.deliver') -> #'basic.deliver'{}; +method_record('basic.get') -> #'basic.get'{}; +method_record('basic.get_ok') -> #'basic.get_ok'{}; +method_record('basic.get_empty') -> #'basic.get_empty'{}; +method_record('basic.ack') -> #'basic.ack'{}; +method_record('basic.reject') -> #'basic.reject'{}; +method_record('basic.recover_async') -> #'basic.recover_async'{}; +method_record('basic.recover') -> #'basic.recover'{}; +method_record('basic.recover_ok') -> #'basic.recover_ok'{}; +method_record('basic.nack') -> #'basic.nack'{}; +method_record('basic.credit') -> #'basic.credit'{}; +method_record('basic.credit_ok') -> #'basic.credit_ok'{}; +method_record('basic.credit_drained') -> #'basic.credit_drained'{}; +method_record('connection.start') -> #'connection.start'{}; +method_record('connection.start_ok') -> #'connection.start_ok'{}; +method_record('connection.secure') -> #'connection.secure'{}; +method_record('connection.secure_ok') -> #'connection.secure_ok'{}; +method_record('connection.tune') -> #'connection.tune'{}; +method_record('connection.tune_ok') -> #'connection.tune_ok'{}; +method_record('connection.open') -> #'connection.open'{}; +method_record('connection.open_ok') -> #'connection.open_ok'{}; +method_record('connection.close') -> #'connection.close'{}; +method_record('connection.close_ok') -> #'connection.close_ok'{}; +method_record('connection.blocked') -> #'connection.blocked'{}; +method_record('connection.unblocked') -> #'connection.unblocked'{}; +method_record('connection.update_secret') -> #'connection.update_secret'{}; +method_record('connection.update_secret_ok') -> #'connection.update_secret_ok'{}; +method_record('channel.open') -> #'channel.open'{}; +method_record('channel.open_ok') -> #'channel.open_ok'{}; +method_record('channel.flow') -> #'channel.flow'{}; +method_record('channel.flow_ok') -> #'channel.flow_ok'{}; +method_record('channel.close') -> #'channel.close'{}; +method_record('channel.close_ok') -> #'channel.close_ok'{}; +method_record('access.request') -> #'access.request'{}; +method_record('access.request_ok') -> #'access.request_ok'{}; +method_record('exchange.declare') -> #'exchange.declare'{}; +method_record('exchange.declare_ok') -> #'exchange.declare_ok'{}; +method_record('exchange.delete') -> #'exchange.delete'{}; +method_record('exchange.delete_ok') -> #'exchange.delete_ok'{}; +method_record('exchange.bind') -> #'exchange.bind'{}; +method_record('exchange.bind_ok') -> #'exchange.bind_ok'{}; +method_record('exchange.unbind') -> #'exchange.unbind'{}; +method_record('exchange.unbind_ok') -> #'exchange.unbind_ok'{}; +method_record('queue.declare') -> #'queue.declare'{}; +method_record('queue.declare_ok') -> #'queue.declare_ok'{}; +method_record('queue.bind') -> #'queue.bind'{}; +method_record('queue.bind_ok') -> #'queue.bind_ok'{}; +method_record('queue.purge') -> #'queue.purge'{}; +method_record('queue.purge_ok') -> #'queue.purge_ok'{}; +method_record('queue.delete') -> #'queue.delete'{}; +method_record('queue.delete_ok') -> #'queue.delete_ok'{}; +method_record('queue.unbind') -> #'queue.unbind'{}; +method_record('queue.unbind_ok') -> #'queue.unbind_ok'{}; +method_record('tx.select') -> #'tx.select'{}; +method_record('tx.select_ok') -> #'tx.select_ok'{}; +method_record('tx.commit') -> #'tx.commit'{}; +method_record('tx.commit_ok') -> #'tx.commit_ok'{}; +method_record('tx.rollback') -> #'tx.rollback'{}; +method_record('tx.rollback_ok') -> #'tx.rollback_ok'{}; +method_record('confirm.select') -> #'confirm.select'{}; +method_record('confirm.select_ok') -> #'confirm.select_ok'{}; +method_record(Name) -> exit({unknown_method_name, Name}). +method_fieldnames('basic.qos') -> [prefetch_size, prefetch_count, global]; +method_fieldnames('basic.qos_ok') -> []; +method_fieldnames('basic.consume') -> [ticket, queue, consumer_tag, no_local, no_ack, exclusive, nowait, arguments]; +method_fieldnames('basic.consume_ok') -> [consumer_tag]; +method_fieldnames('basic.cancel') -> [consumer_tag, nowait]; +method_fieldnames('basic.cancel_ok') -> [consumer_tag]; +method_fieldnames('basic.publish') -> [ticket, exchange, routing_key, mandatory, immediate]; +method_fieldnames('basic.return') -> [reply_code, reply_text, exchange, routing_key]; +method_fieldnames('basic.deliver') -> [consumer_tag, delivery_tag, redelivered, exchange, routing_key]; +method_fieldnames('basic.get') -> [ticket, queue, no_ack]; +method_fieldnames('basic.get_ok') -> [delivery_tag, redelivered, exchange, routing_key, message_count]; +method_fieldnames('basic.get_empty') -> [cluster_id]; +method_fieldnames('basic.ack') -> [delivery_tag, multiple]; +method_fieldnames('basic.reject') -> [delivery_tag, requeue]; +method_fieldnames('basic.recover_async') -> [requeue]; +method_fieldnames('basic.recover') -> [requeue]; +method_fieldnames('basic.recover_ok') -> []; +method_fieldnames('basic.nack') -> [delivery_tag, multiple, requeue]; +method_fieldnames('basic.credit') -> [consumer_tag, credit, drain]; +method_fieldnames('basic.credit_ok') -> [available]; +method_fieldnames('basic.credit_drained') -> [consumer_tag, credit_drained]; +method_fieldnames('connection.start') -> [version_major, version_minor, server_properties, mechanisms, locales]; +method_fieldnames('connection.start_ok') -> [client_properties, mechanism, response, locale]; +method_fieldnames('connection.secure') -> [challenge]; +method_fieldnames('connection.secure_ok') -> [response]; +method_fieldnames('connection.tune') -> [channel_max, frame_max, heartbeat]; +method_fieldnames('connection.tune_ok') -> [channel_max, frame_max, heartbeat]; +method_fieldnames('connection.open') -> [virtual_host, capabilities, insist]; +method_fieldnames('connection.open_ok') -> [known_hosts]; +method_fieldnames('connection.close') -> [reply_code, reply_text, class_id, method_id]; +method_fieldnames('connection.close_ok') -> []; +method_fieldnames('connection.blocked') -> [reason]; +method_fieldnames('connection.unblocked') -> []; +method_fieldnames('connection.update_secret') -> [new_secret, reason]; +method_fieldnames('connection.update_secret_ok') -> []; +method_fieldnames('channel.open') -> [out_of_band]; +method_fieldnames('channel.open_ok') -> [channel_id]; +method_fieldnames('channel.flow') -> [active]; +method_fieldnames('channel.flow_ok') -> [active]; +method_fieldnames('channel.close') -> [reply_code, reply_text, class_id, method_id]; +method_fieldnames('channel.close_ok') -> []; +method_fieldnames('access.request') -> [realm, exclusive, passive, active, write, read]; +method_fieldnames('access.request_ok') -> [ticket]; +method_fieldnames('exchange.declare') -> [ticket, exchange, type, passive, durable, auto_delete, internal, nowait, arguments]; +method_fieldnames('exchange.declare_ok') -> []; +method_fieldnames('exchange.delete') -> [ticket, exchange, if_unused, nowait]; +method_fieldnames('exchange.delete_ok') -> []; +method_fieldnames('exchange.bind') -> [ticket, destination, source, routing_key, nowait, arguments]; +method_fieldnames('exchange.bind_ok') -> []; +method_fieldnames('exchange.unbind') -> [ticket, destination, source, routing_key, nowait, arguments]; +method_fieldnames('exchange.unbind_ok') -> []; +method_fieldnames('queue.declare') -> [ticket, queue, passive, durable, exclusive, auto_delete, nowait, arguments]; +method_fieldnames('queue.declare_ok') -> [queue, message_count, consumer_count]; +method_fieldnames('queue.bind') -> [ticket, queue, exchange, routing_key, nowait, arguments]; +method_fieldnames('queue.bind_ok') -> []; +method_fieldnames('queue.purge') -> [ticket, queue, nowait]; +method_fieldnames('queue.purge_ok') -> [message_count]; +method_fieldnames('queue.delete') -> [ticket, queue, if_unused, if_empty, nowait]; +method_fieldnames('queue.delete_ok') -> [message_count]; +method_fieldnames('queue.unbind') -> [ticket, queue, exchange, routing_key, arguments]; +method_fieldnames('queue.unbind_ok') -> []; +method_fieldnames('tx.select') -> []; +method_fieldnames('tx.select_ok') -> []; +method_fieldnames('tx.commit') -> []; +method_fieldnames('tx.commit_ok') -> []; +method_fieldnames('tx.rollback') -> []; +method_fieldnames('tx.rollback_ok') -> []; +method_fieldnames('confirm.select') -> [nowait]; +method_fieldnames('confirm.select_ok') -> []; +method_fieldnames(Name) -> exit({unknown_method_name, Name}). +decode_method_fields('basic.qos', <>) -> + F2 = ((F2Bits band 1) /= 0), + #'basic.qos'{prefetch_size = F0, prefetch_count = F1, global = F2}; +decode_method_fields('basic.qos_ok', <<>>) -> + #'basic.qos_ok'{}; +decode_method_fields('basic.consume', <>) -> + rabbit_binary_parser:assert_utf8(F1), + rabbit_binary_parser:assert_utf8(F2), + F3 = ((F3Bits band 1) /= 0), + F4 = ((F3Bits band 2) /= 0), + F5 = ((F3Bits band 4) /= 0), + F6 = ((F3Bits band 8) /= 0), + F7 = rabbit_binary_parser:parse_table(F7Tab), + #'basic.consume'{ticket = F0, queue = F1, consumer_tag = F2, no_local = F3, no_ack = F4, exclusive = F5, nowait = F6, arguments = F7}; +decode_method_fields('basic.consume_ok', <>) -> + rabbit_binary_parser:assert_utf8(F0), + #'basic.consume_ok'{consumer_tag = F0}; +decode_method_fields('basic.cancel', <>) -> + rabbit_binary_parser:assert_utf8(F0), + F1 = ((F1Bits band 1) /= 0), + #'basic.cancel'{consumer_tag = F0, nowait = F1}; +decode_method_fields('basic.cancel_ok', <>) -> + rabbit_binary_parser:assert_utf8(F0), + #'basic.cancel_ok'{consumer_tag = F0}; +decode_method_fields('basic.publish', <>) -> + F3 = ((F3Bits band 1) /= 0), + F4 = ((F3Bits band 2) /= 0), + #'basic.publish'{ticket = F0, exchange = F1, routing_key = F2, mandatory = F3, immediate = F4}; +decode_method_fields('basic.return', <>) -> + #'basic.return'{reply_code = F0, reply_text = F1, exchange = F2, routing_key = F3}; +decode_method_fields('basic.deliver', <>) -> + F2 = ((F2Bits band 1) /= 0), + #'basic.deliver'{consumer_tag = F0, delivery_tag = F1, redelivered = F2, exchange = F3, routing_key = F4}; +decode_method_fields('basic.get', <>) -> + rabbit_binary_parser:assert_utf8(F1), + F2 = ((F2Bits band 1) /= 0), + #'basic.get'{ticket = F0, queue = F1, no_ack = F2}; +decode_method_fields('basic.get_ok', <>) -> + F1 = ((F1Bits band 1) /= 0), + #'basic.get_ok'{delivery_tag = F0, redelivered = F1, exchange = F2, routing_key = F3, message_count = F4}; +decode_method_fields('basic.get_empty', <>) -> + rabbit_binary_parser:assert_utf8(F0), + #'basic.get_empty'{cluster_id = F0}; +decode_method_fields('basic.ack', <>) -> + F1 = ((F1Bits band 1) /= 0), + #'basic.ack'{delivery_tag = F0, multiple = F1}; +decode_method_fields('basic.reject', <>) -> + F1 = ((F1Bits band 1) /= 0), + #'basic.reject'{delivery_tag = F0, requeue = F1}; +decode_method_fields('basic.recover_async', <>) -> + F0 = ((F0Bits band 1) /= 0), + #'basic.recover_async'{requeue = F0}; +decode_method_fields('basic.recover', <>) -> + F0 = ((F0Bits band 1) /= 0), + #'basic.recover'{requeue = F0}; +decode_method_fields('basic.recover_ok', <<>>) -> + #'basic.recover_ok'{}; +decode_method_fields('basic.nack', <>) -> + F1 = ((F1Bits band 1) /= 0), + F2 = ((F1Bits band 2) /= 0), + #'basic.nack'{delivery_tag = F0, multiple = F1, requeue = F2}; +decode_method_fields('basic.credit', <>) -> + rabbit_binary_parser:assert_utf8(F0), + F2 = ((F2Bits band 1) /= 0), + #'basic.credit'{consumer_tag = F0, credit = F1, drain = F2}; +decode_method_fields('basic.credit_ok', <>) -> + #'basic.credit_ok'{available = F0}; +decode_method_fields('basic.credit_drained', <>) -> + rabbit_binary_parser:assert_utf8(F0), + #'basic.credit_drained'{consumer_tag = F0, credit_drained = F1}; +decode_method_fields('connection.start', <>) -> + F2 = rabbit_binary_parser:parse_table(F2Tab), + #'connection.start'{version_major = F0, version_minor = F1, server_properties = F2, mechanisms = F3, locales = F4}; +decode_method_fields('connection.start_ok', <>) -> + F0 = rabbit_binary_parser:parse_table(F0Tab), + rabbit_binary_parser:assert_utf8(F1), + rabbit_binary_parser:assert_utf8(F3), + #'connection.start_ok'{client_properties = F0, mechanism = F1, response = F2, locale = F3}; +decode_method_fields('connection.secure', <>) -> + #'connection.secure'{challenge = F0}; +decode_method_fields('connection.secure_ok', <>) -> + #'connection.secure_ok'{response = F0}; +decode_method_fields('connection.tune', <>) -> + #'connection.tune'{channel_max = F0, frame_max = F1, heartbeat = F2}; +decode_method_fields('connection.tune_ok', <>) -> + #'connection.tune_ok'{channel_max = F0, frame_max = F1, heartbeat = F2}; +decode_method_fields('connection.open', <>) -> + rabbit_binary_parser:assert_utf8(F0), + rabbit_binary_parser:assert_utf8(F1), + F2 = ((F2Bits band 1) /= 0), + #'connection.open'{virtual_host = F0, capabilities = F1, insist = F2}; +decode_method_fields('connection.open_ok', <>) -> + rabbit_binary_parser:assert_utf8(F0), + #'connection.open_ok'{known_hosts = F0}; +decode_method_fields('connection.close', <>) -> + rabbit_binary_parser:assert_utf8(F1), + #'connection.close'{reply_code = F0, reply_text = F1, class_id = F2, method_id = F3}; +decode_method_fields('connection.close_ok', <<>>) -> + #'connection.close_ok'{}; +decode_method_fields('connection.blocked', <>) -> + rabbit_binary_parser:assert_utf8(F0), + #'connection.blocked'{reason = F0}; +decode_method_fields('connection.unblocked', <<>>) -> + #'connection.unblocked'{}; +decode_method_fields('connection.update_secret', <>) -> + rabbit_binary_parser:assert_utf8(F1), + #'connection.update_secret'{new_secret = F0, reason = F1}; +decode_method_fields('connection.update_secret_ok', <<>>) -> + #'connection.update_secret_ok'{}; +decode_method_fields('channel.open', <>) -> + rabbit_binary_parser:assert_utf8(F0), + #'channel.open'{out_of_band = F0}; +decode_method_fields('channel.open_ok', <>) -> + #'channel.open_ok'{channel_id = F0}; +decode_method_fields('channel.flow', <>) -> + F0 = ((F0Bits band 1) /= 0), + #'channel.flow'{active = F0}; +decode_method_fields('channel.flow_ok', <>) -> + F0 = ((F0Bits band 1) /= 0), + #'channel.flow_ok'{active = F0}; +decode_method_fields('channel.close', <>) -> + rabbit_binary_parser:assert_utf8(F1), + #'channel.close'{reply_code = F0, reply_text = F1, class_id = F2, method_id = F3}; +decode_method_fields('channel.close_ok', <<>>) -> + #'channel.close_ok'{}; +decode_method_fields('access.request', <>) -> + rabbit_binary_parser:assert_utf8(F0), + F1 = ((F1Bits band 1) /= 0), + F2 = ((F1Bits band 2) /= 0), + F3 = ((F1Bits band 4) /= 0), + F4 = ((F1Bits band 8) /= 0), + F5 = ((F1Bits band 16) /= 0), + #'access.request'{realm = F0, exclusive = F1, passive = F2, active = F3, write = F4, read = F5}; +decode_method_fields('access.request_ok', <>) -> + #'access.request_ok'{ticket = F0}; +decode_method_fields('exchange.declare', <>) -> + rabbit_binary_parser:assert_utf8(F1), + rabbit_binary_parser:assert_utf8(F2), + F3 = ((F3Bits band 1) /= 0), + F4 = ((F3Bits band 2) /= 0), + F5 = ((F3Bits band 4) /= 0), + F6 = ((F3Bits band 8) /= 0), + F7 = ((F3Bits band 16) /= 0), + F8 = rabbit_binary_parser:parse_table(F8Tab), + #'exchange.declare'{ticket = F0, exchange = F1, type = F2, passive = F3, durable = F4, auto_delete = F5, internal = F6, nowait = F7, arguments = F8}; +decode_method_fields('exchange.declare_ok', <<>>) -> + #'exchange.declare_ok'{}; +decode_method_fields('exchange.delete', <>) -> + rabbit_binary_parser:assert_utf8(F1), + F2 = ((F2Bits band 1) /= 0), + F3 = ((F2Bits band 2) /= 0), + #'exchange.delete'{ticket = F0, exchange = F1, if_unused = F2, nowait = F3}; +decode_method_fields('exchange.delete_ok', <<>>) -> + #'exchange.delete_ok'{}; +decode_method_fields('exchange.bind', <>) -> + rabbit_binary_parser:assert_utf8(F1), + rabbit_binary_parser:assert_utf8(F2), + rabbit_binary_parser:assert_utf8(F3), + F4 = ((F4Bits band 1) /= 0), + F5 = rabbit_binary_parser:parse_table(F5Tab), + #'exchange.bind'{ticket = F0, destination = F1, source = F2, routing_key = F3, nowait = F4, arguments = F5}; +decode_method_fields('exchange.bind_ok', <<>>) -> + #'exchange.bind_ok'{}; +decode_method_fields('exchange.unbind', <>) -> + rabbit_binary_parser:assert_utf8(F1), + rabbit_binary_parser:assert_utf8(F2), + rabbit_binary_parser:assert_utf8(F3), + F4 = ((F4Bits band 1) /= 0), + F5 = rabbit_binary_parser:parse_table(F5Tab), + #'exchange.unbind'{ticket = F0, destination = F1, source = F2, routing_key = F3, nowait = F4, arguments = F5}; +decode_method_fields('exchange.unbind_ok', <<>>) -> + #'exchange.unbind_ok'{}; +decode_method_fields('queue.declare', <>) -> + rabbit_binary_parser:assert_utf8(F1), + F2 = ((F2Bits band 1) /= 0), + F3 = ((F2Bits band 2) /= 0), + F4 = ((F2Bits band 4) /= 0), + F5 = ((F2Bits band 8) /= 0), + F6 = ((F2Bits band 16) /= 0), + F7 = rabbit_binary_parser:parse_table(F7Tab), + #'queue.declare'{ticket = F0, queue = F1, passive = F2, durable = F3, exclusive = F4, auto_delete = F5, nowait = F6, arguments = F7}; +decode_method_fields('queue.declare_ok', <>) -> + rabbit_binary_parser:assert_utf8(F0), + #'queue.declare_ok'{queue = F0, message_count = F1, consumer_count = F2}; +decode_method_fields('queue.bind', <>) -> + rabbit_binary_parser:assert_utf8(F1), + rabbit_binary_parser:assert_utf8(F2), + rabbit_binary_parser:assert_utf8(F3), + F4 = ((F4Bits band 1) /= 0), + F5 = rabbit_binary_parser:parse_table(F5Tab), + #'queue.bind'{ticket = F0, queue = F1, exchange = F2, routing_key = F3, nowait = F4, arguments = F5}; +decode_method_fields('queue.bind_ok', <<>>) -> + #'queue.bind_ok'{}; +decode_method_fields('queue.purge', <>) -> + rabbit_binary_parser:assert_utf8(F1), + F2 = ((F2Bits band 1) /= 0), + #'queue.purge'{ticket = F0, queue = F1, nowait = F2}; +decode_method_fields('queue.purge_ok', <>) -> + #'queue.purge_ok'{message_count = F0}; +decode_method_fields('queue.delete', <>) -> + rabbit_binary_parser:assert_utf8(F1), + F2 = ((F2Bits band 1) /= 0), + F3 = ((F2Bits band 2) /= 0), + F4 = ((F2Bits band 4) /= 0), + #'queue.delete'{ticket = F0, queue = F1, if_unused = F2, if_empty = F3, nowait = F4}; +decode_method_fields('queue.delete_ok', <>) -> + #'queue.delete_ok'{message_count = F0}; +decode_method_fields('queue.unbind', <>) -> + rabbit_binary_parser:assert_utf8(F1), + rabbit_binary_parser:assert_utf8(F2), + rabbit_binary_parser:assert_utf8(F3), + F4 = rabbit_binary_parser:parse_table(F4Tab), + #'queue.unbind'{ticket = F0, queue = F1, exchange = F2, routing_key = F3, arguments = F4}; +decode_method_fields('queue.unbind_ok', <<>>) -> + #'queue.unbind_ok'{}; +decode_method_fields('tx.select', <<>>) -> + #'tx.select'{}; +decode_method_fields('tx.select_ok', <<>>) -> + #'tx.select_ok'{}; +decode_method_fields('tx.commit', <<>>) -> + #'tx.commit'{}; +decode_method_fields('tx.commit_ok', <<>>) -> + #'tx.commit_ok'{}; +decode_method_fields('tx.rollback', <<>>) -> + #'tx.rollback'{}; +decode_method_fields('tx.rollback_ok', <<>>) -> + #'tx.rollback_ok'{}; +decode_method_fields('confirm.select', <>) -> + F0 = ((F0Bits band 1) /= 0), + #'confirm.select'{nowait = F0}; +decode_method_fields('confirm.select_ok', <<>>) -> + #'confirm.select_ok'{}; +decode_method_fields(Name, BinaryFields) -> + rabbit_misc:frame_error(Name, BinaryFields). +decode_properties(60, <>) -> + {F0, R1} = if P0 =:= 0 -> {undefined, R0}; true -> ?SHORTSTR_VAL(R0, L0, V0, X0) end, + {F1, R2} = if P1 =:= 0 -> {undefined, R1}; true -> ?SHORTSTR_VAL(R1, L1, V1, X1) end, + {F2, R3} = if P2 =:= 0 -> {undefined, R2}; true -> ?TABLE_VAL(R2, L2, V2, X2) end, + {F3, R4} = if P3 =:= 0 -> {undefined, R3}; true -> ?OCTET_VAL(R3, L3, V3, X3) end, + {F4, R5} = if P4 =:= 0 -> {undefined, R4}; true -> ?OCTET_VAL(R4, L4, V4, X4) end, + {F5, R6} = if P5 =:= 0 -> {undefined, R5}; true -> ?SHORTSTR_VAL(R5, L5, V5, X5) end, + {F6, R7} = if P6 =:= 0 -> {undefined, R6}; true -> ?SHORTSTR_VAL(R6, L6, V6, X6) end, + {F7, R8} = if P7 =:= 0 -> {undefined, R7}; true -> ?SHORTSTR_VAL(R7, L7, V7, X7) end, + {F8, R9} = if P8 =:= 0 -> {undefined, R8}; true -> ?SHORTSTR_VAL(R8, L8, V8, X8) end, + {F9, R10} = if P9 =:= 0 -> {undefined, R9}; true -> ?TIMESTAMP_VAL(R9, L9, V9, X9) end, + {F10, R11} = if P10 =:= 0 -> {undefined, R10}; true -> ?SHORTSTR_VAL(R10, L10, V10, X10) end, + {F11, R12} = if P11 =:= 0 -> {undefined, R11}; true -> ?SHORTSTR_VAL(R11, L11, V11, X11) end, + {F12, R13} = if P12 =:= 0 -> {undefined, R12}; true -> ?SHORTSTR_VAL(R12, L12, V12, X12) end, + {F13, R14} = if P13 =:= 0 -> {undefined, R13}; true -> ?SHORTSTR_VAL(R13, L13, V13, X13) end, + <<>> = R14, + #'P_basic'{content_type = F0, content_encoding = F1, headers = F2, delivery_mode = F3, priority = F4, correlation_id = F5, reply_to = F6, expiration = F7, message_id = F8, timestamp = F9, type = F10, user_id = F11, app_id = F12, cluster_id = F13}; +decode_properties(10, <<>>) -> + #'P_connection'{}; +decode_properties(20, <<>>) -> + #'P_channel'{}; +decode_properties(30, <<>>) -> + #'P_access'{}; +decode_properties(40, <<>>) -> + #'P_exchange'{}; +decode_properties(50, <<>>) -> + #'P_queue'{}; +decode_properties(90, <<>>) -> + #'P_tx'{}; +decode_properties(85, <<>>) -> + #'P_confirm'{}; +decode_properties(ClassId, _BinaryFields) -> exit({unknown_class_id, ClassId}). +encode_method_fields(#'basic.qos'{prefetch_size = F0, prefetch_count = F1, global = F2}) -> + F2Bits = ((bitvalue(F2) bsl 0)), + <>; +encode_method_fields(#'basic.qos_ok'{}) -> + <<>>; +encode_method_fields(#'basic.consume'{ticket = F0, queue = F1, consumer_tag = F2, no_local = F3, no_ack = F4, exclusive = F5, nowait = F6, arguments = F7}) -> + F1Len = shortstr_size(F1), + F2Len = shortstr_size(F2), + F3Bits = ((bitvalue(F3) bsl 0) bor (bitvalue(F4) bsl 1) bor (bitvalue(F5) bsl 2) bor (bitvalue(F6) bsl 3)), + F7Tab = rabbit_binary_generator:generate_table(F7), + F7Len = size(F7Tab), + <>; +encode_method_fields(#'basic.consume_ok'{consumer_tag = F0}) -> + F0Len = shortstr_size(F0), + <>; +encode_method_fields(#'basic.cancel'{consumer_tag = F0, nowait = F1}) -> + F0Len = shortstr_size(F0), + F1Bits = ((bitvalue(F1) bsl 0)), + <>; +encode_method_fields(#'basic.cancel_ok'{consumer_tag = F0}) -> + F0Len = shortstr_size(F0), + <>; +encode_method_fields(#'basic.publish'{ticket = F0, exchange = F1, routing_key = F2, mandatory = F3, immediate = F4}) -> + F1Len = shortstr_size(F1), + F2Len = shortstr_size(F2), + F3Bits = ((bitvalue(F3) bsl 0) bor (bitvalue(F4) bsl 1)), + <>; +encode_method_fields(#'basic.return'{reply_code = F0, reply_text = F1, exchange = F2, routing_key = F3}) -> + F1Len = shortstr_size(F1), + F2Len = shortstr_size(F2), + F3Len = shortstr_size(F3), + <>; +encode_method_fields(#'basic.deliver'{consumer_tag = F0, delivery_tag = F1, redelivered = F2, exchange = F3, routing_key = F4}) -> + F0Len = shortstr_size(F0), + F2Bits = ((bitvalue(F2) bsl 0)), + F3Len = shortstr_size(F3), + F4Len = shortstr_size(F4), + <>; +encode_method_fields(#'basic.get'{ticket = F0, queue = F1, no_ack = F2}) -> + F1Len = shortstr_size(F1), + F2Bits = ((bitvalue(F2) bsl 0)), + <>; +encode_method_fields(#'basic.get_ok'{delivery_tag = F0, redelivered = F1, exchange = F2, routing_key = F3, message_count = F4}) -> + F1Bits = ((bitvalue(F1) bsl 0)), + F2Len = shortstr_size(F2), + F3Len = shortstr_size(F3), + <>; +encode_method_fields(#'basic.get_empty'{cluster_id = F0}) -> + F0Len = shortstr_size(F0), + <>; +encode_method_fields(#'basic.ack'{delivery_tag = F0, multiple = F1}) -> + F1Bits = ((bitvalue(F1) bsl 0)), + <>; +encode_method_fields(#'basic.reject'{delivery_tag = F0, requeue = F1}) -> + F1Bits = ((bitvalue(F1) bsl 0)), + <>; +encode_method_fields(#'basic.recover_async'{requeue = F0}) -> + F0Bits = ((bitvalue(F0) bsl 0)), + <>; +encode_method_fields(#'basic.recover'{requeue = F0}) -> + F0Bits = ((bitvalue(F0) bsl 0)), + <>; +encode_method_fields(#'basic.recover_ok'{}) -> + <<>>; +encode_method_fields(#'basic.nack'{delivery_tag = F0, multiple = F1, requeue = F2}) -> + F1Bits = ((bitvalue(F1) bsl 0) bor (bitvalue(F2) bsl 1)), + <>; +encode_method_fields(#'basic.credit'{consumer_tag = F0, credit = F1, drain = F2}) -> + F0Len = shortstr_size(F0), + F2Bits = ((bitvalue(F2) bsl 0)), + <>; +encode_method_fields(#'basic.credit_ok'{available = F0}) -> + <>; +encode_method_fields(#'basic.credit_drained'{consumer_tag = F0, credit_drained = F1}) -> + F0Len = shortstr_size(F0), + <>; +encode_method_fields(#'connection.start'{version_major = F0, version_minor = F1, server_properties = F2, mechanisms = F3, locales = F4}) -> + F2Tab = rabbit_binary_generator:generate_table(F2), + F2Len = size(F2Tab), + F3Len = size(F3), + F4Len = size(F4), + <>; +encode_method_fields(#'connection.start_ok'{client_properties = F0, mechanism = F1, response = F2, locale = F3}) -> + F0Tab = rabbit_binary_generator:generate_table(F0), + F0Len = size(F0Tab), + F1Len = shortstr_size(F1), + F2Len = size(F2), + F3Len = shortstr_size(F3), + <>; +encode_method_fields(#'connection.secure'{challenge = F0}) -> + F0Len = size(F0), + <>; +encode_method_fields(#'connection.secure_ok'{response = F0}) -> + F0Len = size(F0), + <>; +encode_method_fields(#'connection.tune'{channel_max = F0, frame_max = F1, heartbeat = F2}) -> + <>; +encode_method_fields(#'connection.tune_ok'{channel_max = F0, frame_max = F1, heartbeat = F2}) -> + <>; +encode_method_fields(#'connection.open'{virtual_host = F0, capabilities = F1, insist = F2}) -> + F0Len = shortstr_size(F0), + F1Len = shortstr_size(F1), + F2Bits = ((bitvalue(F2) bsl 0)), + <>; +encode_method_fields(#'connection.open_ok'{known_hosts = F0}) -> + F0Len = shortstr_size(F0), + <>; +encode_method_fields(#'connection.close'{reply_code = F0, reply_text = F1, class_id = F2, method_id = F3}) -> + F1Len = shortstr_size(F1), + <>; +encode_method_fields(#'connection.close_ok'{}) -> + <<>>; +encode_method_fields(#'connection.blocked'{reason = F0}) -> + F0Len = shortstr_size(F0), + <>; +encode_method_fields(#'connection.unblocked'{}) -> + <<>>; +encode_method_fields(#'connection.update_secret'{new_secret = F0, reason = F1}) -> + F0Len = size(F0), + F1Len = shortstr_size(F1), + <>; +encode_method_fields(#'connection.update_secret_ok'{}) -> + <<>>; +encode_method_fields(#'channel.open'{out_of_band = F0}) -> + F0Len = shortstr_size(F0), + <>; +encode_method_fields(#'channel.open_ok'{channel_id = F0}) -> + F0Len = size(F0), + <>; +encode_method_fields(#'channel.flow'{active = F0}) -> + F0Bits = ((bitvalue(F0) bsl 0)), + <>; +encode_method_fields(#'channel.flow_ok'{active = F0}) -> + F0Bits = ((bitvalue(F0) bsl 0)), + <>; +encode_method_fields(#'channel.close'{reply_code = F0, reply_text = F1, class_id = F2, method_id = F3}) -> + F1Len = shortstr_size(F1), + <>; +encode_method_fields(#'channel.close_ok'{}) -> + <<>>; +encode_method_fields(#'access.request'{realm = F0, exclusive = F1, passive = F2, active = F3, write = F4, read = F5}) -> + F0Len = shortstr_size(F0), + F1Bits = ((bitvalue(F1) bsl 0) bor (bitvalue(F2) bsl 1) bor (bitvalue(F3) bsl 2) bor (bitvalue(F4) bsl 3) bor (bitvalue(F5) bsl 4)), + <>; +encode_method_fields(#'access.request_ok'{ticket = F0}) -> + <>; +encode_method_fields(#'exchange.declare'{ticket = F0, exchange = F1, type = F2, passive = F3, durable = F4, auto_delete = F5, internal = F6, nowait = F7, arguments = F8}) -> + F1Len = shortstr_size(F1), + F2Len = shortstr_size(F2), + F3Bits = ((bitvalue(F3) bsl 0) bor (bitvalue(F4) bsl 1) bor (bitvalue(F5) bsl 2) bor (bitvalue(F6) bsl 3) bor (bitvalue(F7) bsl 4)), + F8Tab = rabbit_binary_generator:generate_table(F8), + F8Len = size(F8Tab), + <>; +encode_method_fields(#'exchange.declare_ok'{}) -> + <<>>; +encode_method_fields(#'exchange.delete'{ticket = F0, exchange = F1, if_unused = F2, nowait = F3}) -> + F1Len = shortstr_size(F1), + F2Bits = ((bitvalue(F2) bsl 0) bor (bitvalue(F3) bsl 1)), + <>; +encode_method_fields(#'exchange.delete_ok'{}) -> + <<>>; +encode_method_fields(#'exchange.bind'{ticket = F0, destination = F1, source = F2, routing_key = F3, nowait = F4, arguments = F5}) -> + F1Len = shortstr_size(F1), + F2Len = shortstr_size(F2), + F3Len = shortstr_size(F3), + F4Bits = ((bitvalue(F4) bsl 0)), + F5Tab = rabbit_binary_generator:generate_table(F5), + F5Len = size(F5Tab), + <>; +encode_method_fields(#'exchange.bind_ok'{}) -> + <<>>; +encode_method_fields(#'exchange.unbind'{ticket = F0, destination = F1, source = F2, routing_key = F3, nowait = F4, arguments = F5}) -> + F1Len = shortstr_size(F1), + F2Len = shortstr_size(F2), + F3Len = shortstr_size(F3), + F4Bits = ((bitvalue(F4) bsl 0)), + F5Tab = rabbit_binary_generator:generate_table(F5), + F5Len = size(F5Tab), + <>; +encode_method_fields(#'exchange.unbind_ok'{}) -> + <<>>; +encode_method_fields(#'queue.declare'{ticket = F0, queue = F1, passive = F2, durable = F3, exclusive = F4, auto_delete = F5, nowait = F6, arguments = F7}) -> + F1Len = shortstr_size(F1), + F2Bits = ((bitvalue(F2) bsl 0) bor (bitvalue(F3) bsl 1) bor (bitvalue(F4) bsl 2) bor (bitvalue(F5) bsl 3) bor (bitvalue(F6) bsl 4)), + F7Tab = rabbit_binary_generator:generate_table(F7), + F7Len = size(F7Tab), + <>; +encode_method_fields(#'queue.declare_ok'{queue = F0, message_count = F1, consumer_count = F2}) -> + F0Len = shortstr_size(F0), + <>; +encode_method_fields(#'queue.bind'{ticket = F0, queue = F1, exchange = F2, routing_key = F3, nowait = F4, arguments = F5}) -> + F1Len = shortstr_size(F1), + F2Len = shortstr_size(F2), + F3Len = shortstr_size(F3), + F4Bits = ((bitvalue(F4) bsl 0)), + F5Tab = rabbit_binary_generator:generate_table(F5), + F5Len = size(F5Tab), + <>; +encode_method_fields(#'queue.bind_ok'{}) -> + <<>>; +encode_method_fields(#'queue.purge'{ticket = F0, queue = F1, nowait = F2}) -> + F1Len = shortstr_size(F1), + F2Bits = ((bitvalue(F2) bsl 0)), + <>; +encode_method_fields(#'queue.purge_ok'{message_count = F0}) -> + <>; +encode_method_fields(#'queue.delete'{ticket = F0, queue = F1, if_unused = F2, if_empty = F3, nowait = F4}) -> + F1Len = shortstr_size(F1), + F2Bits = ((bitvalue(F2) bsl 0) bor (bitvalue(F3) bsl 1) bor (bitvalue(F4) bsl 2)), + <>; +encode_method_fields(#'queue.delete_ok'{message_count = F0}) -> + <>; +encode_method_fields(#'queue.unbind'{ticket = F0, queue = F1, exchange = F2, routing_key = F3, arguments = F4}) -> + F1Len = shortstr_size(F1), + F2Len = shortstr_size(F2), + F3Len = shortstr_size(F3), + F4Tab = rabbit_binary_generator:generate_table(F4), + F4Len = size(F4Tab), + <>; +encode_method_fields(#'queue.unbind_ok'{}) -> + <<>>; +encode_method_fields(#'tx.select'{}) -> + <<>>; +encode_method_fields(#'tx.select_ok'{}) -> + <<>>; +encode_method_fields(#'tx.commit'{}) -> + <<>>; +encode_method_fields(#'tx.commit_ok'{}) -> + <<>>; +encode_method_fields(#'tx.rollback'{}) -> + <<>>; +encode_method_fields(#'tx.rollback_ok'{}) -> + <<>>; +encode_method_fields(#'confirm.select'{nowait = F0}) -> + F0Bits = ((bitvalue(F0) bsl 0)), + <>; +encode_method_fields(#'confirm.select_ok'{}) -> + <<>>; +encode_method_fields(Record) -> exit({unknown_method_name, element(1, Record)}). +encode_properties(#'P_basic'{content_type = F0, content_encoding = F1, headers = F2, delivery_mode = F3, priority = F4, correlation_id = F5, reply_to = F6, expiration = F7, message_id = F8, timestamp = F9, type = F10, user_id = F11, app_id = F12, cluster_id = F13}) -> + R0 = [<<>>], + {P0, R1} = if F0 =:= undefined -> {0, R0}; true -> {1, [?SHORTSTR_PROP(F0, L0) | R0]} end, + {P1, R2} = if F1 =:= undefined -> {0, R1}; true -> {1, [?SHORTSTR_PROP(F1, L1) | R1]} end, + {P2, R3} = if F2 =:= undefined -> {0, R2}; true -> {1, [?TABLE_PROP(F2, L2) | R2]} end, + {P3, R4} = if F3 =:= undefined -> {0, R3}; true -> {1, [?OCTET_PROP(F3, L3) | R3]} end, + {P4, R5} = if F4 =:= undefined -> {0, R4}; true -> {1, [?OCTET_PROP(F4, L4) | R4]} end, + {P5, R6} = if F5 =:= undefined -> {0, R5}; true -> {1, [?SHORTSTR_PROP(F5, L5) | R5]} end, + {P6, R7} = if F6 =:= undefined -> {0, R6}; true -> {1, [?SHORTSTR_PROP(F6, L6) | R6]} end, + {P7, R8} = if F7 =:= undefined -> {0, R7}; true -> {1, [?SHORTSTR_PROP(F7, L7) | R7]} end, + {P8, R9} = if F8 =:= undefined -> {0, R8}; true -> {1, [?SHORTSTR_PROP(F8, L8) | R8]} end, + {P9, R10} = if F9 =:= undefined -> {0, R9}; true -> {1, [?TIMESTAMP_PROP(F9, L9) | R9]} end, + {P10, R11} = if F10 =:= undefined -> {0, R10}; true -> {1, [?SHORTSTR_PROP(F10, L10) | R10]} end, + {P11, R12} = if F11 =:= undefined -> {0, R11}; true -> {1, [?SHORTSTR_PROP(F11, L11) | R11]} end, + {P12, R13} = if F12 =:= undefined -> {0, R12}; true -> {1, [?SHORTSTR_PROP(F12, L12) | R12]} end, + {P13, R14} = if F13 =:= undefined -> {0, R13}; true -> {1, [?SHORTSTR_PROP(F13, L13) | R13]} end, + list_to_binary([<> | lists:reverse(R14)]); +encode_properties(#'P_connection'{}) -> + <<>>; +encode_properties(#'P_channel'{}) -> + <<>>; +encode_properties(#'P_access'{}) -> + <<>>; +encode_properties(#'P_exchange'{}) -> + <<>>; +encode_properties(#'P_queue'{}) -> + <<>>; +encode_properties(#'P_tx'{}) -> + <<>>; +encode_properties(#'P_confirm'{}) -> + <<>>; +encode_properties(Record) -> exit({unknown_properties_record, Record}). +lookup_amqp_exception(content_too_large) -> {false, ?CONTENT_TOO_LARGE, <<"CONTENT_TOO_LARGE">>}; +lookup_amqp_exception(no_route) -> {false, ?NO_ROUTE, <<"NO_ROUTE">>}; +lookup_amqp_exception(no_consumers) -> {false, ?NO_CONSUMERS, <<"NO_CONSUMERS">>}; +lookup_amqp_exception(access_refused) -> {false, ?ACCESS_REFUSED, <<"ACCESS_REFUSED">>}; +lookup_amqp_exception(not_found) -> {false, ?NOT_FOUND, <<"NOT_FOUND">>}; +lookup_amqp_exception(resource_locked) -> {false, ?RESOURCE_LOCKED, <<"RESOURCE_LOCKED">>}; +lookup_amqp_exception(precondition_failed) -> {false, ?PRECONDITION_FAILED, <<"PRECONDITION_FAILED">>}; +lookup_amqp_exception(connection_forced) -> {true, ?CONNECTION_FORCED, <<"CONNECTION_FORCED">>}; +lookup_amqp_exception(invalid_path) -> {true, ?INVALID_PATH, <<"INVALID_PATH">>}; +lookup_amqp_exception(frame_error) -> {true, ?FRAME_ERROR, <<"FRAME_ERROR">>}; +lookup_amqp_exception(syntax_error) -> {true, ?SYNTAX_ERROR, <<"SYNTAX_ERROR">>}; +lookup_amqp_exception(command_invalid) -> {true, ?COMMAND_INVALID, <<"COMMAND_INVALID">>}; +lookup_amqp_exception(channel_error) -> {true, ?CHANNEL_ERROR, <<"CHANNEL_ERROR">>}; +lookup_amqp_exception(unexpected_frame) -> {true, ?UNEXPECTED_FRAME, <<"UNEXPECTED_FRAME">>}; +lookup_amqp_exception(resource_error) -> {true, ?RESOURCE_ERROR, <<"RESOURCE_ERROR">>}; +lookup_amqp_exception(not_allowed) -> {true, ?NOT_ALLOWED, <<"NOT_ALLOWED">>}; +lookup_amqp_exception(not_implemented) -> {true, ?NOT_IMPLEMENTED, <<"NOT_IMPLEMENTED">>}; +lookup_amqp_exception(internal_error) -> {true, ?INTERNAL_ERROR, <<"INTERNAL_ERROR">>}; +lookup_amqp_exception(Code) -> + rabbit_log:warning("Unknown AMQP error code '~p'~n", [Code]), + {true, ?INTERNAL_ERROR, <<"INTERNAL_ERROR">>}. +amqp_exception(?FRAME_METHOD) -> frame_method; +amqp_exception(?FRAME_HEADER) -> frame_header; +amqp_exception(?FRAME_BODY) -> frame_body; +amqp_exception(?FRAME_HEARTBEAT) -> frame_heartbeat; +amqp_exception(?FRAME_MIN_SIZE) -> frame_min_size; +amqp_exception(?FRAME_END) -> frame_end; +amqp_exception(?REPLY_SUCCESS) -> reply_success; +amqp_exception(?CONTENT_TOO_LARGE) -> content_too_large; +amqp_exception(?NO_ROUTE) -> no_route; +amqp_exception(?NO_CONSUMERS) -> no_consumers; +amqp_exception(?ACCESS_REFUSED) -> access_refused; +amqp_exception(?NOT_FOUND) -> not_found; +amqp_exception(?RESOURCE_LOCKED) -> resource_locked; +amqp_exception(?PRECONDITION_FAILED) -> precondition_failed; +amqp_exception(?CONNECTION_FORCED) -> connection_forced; +amqp_exception(?INVALID_PATH) -> invalid_path; +amqp_exception(?FRAME_ERROR) -> frame_error; +amqp_exception(?SYNTAX_ERROR) -> syntax_error; +amqp_exception(?COMMAND_INVALID) -> command_invalid; +amqp_exception(?CHANNEL_ERROR) -> channel_error; +amqp_exception(?UNEXPECTED_FRAME) -> unexpected_frame; +amqp_exception(?RESOURCE_ERROR) -> resource_error; +amqp_exception(?NOT_ALLOWED) -> not_allowed; +amqp_exception(?NOT_IMPLEMENTED) -> not_implemented; +amqp_exception(?INTERNAL_ERROR) -> internal_error; +amqp_exception(_Code) -> undefined. diff --git a/deps/rabbit_common/src/rabbit_heartbeat.erl b/deps/rabbit_common/src/rabbit_heartbeat.erl index cb8137140164..90bc45c0b933 100644 --- a/deps/rabbit_common/src/rabbit_heartbeat.erl +++ b/deps/rabbit_common/src/rabbit_heartbeat.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_heartbeat). @@ -19,7 +19,7 @@ -export_type([heartbeaters/0]). --type heartbeaters() :: {rabbit_types:maybe(pid()), rabbit_types:maybe(pid())}. +-type heartbeaters() :: {rabbit_types:'maybe'(pid()), rabbit_types:'maybe'(pid())}. -type heartbeat_callback() :: fun (() -> any()). diff --git a/deps/rabbit_common/src/rabbit_http_util.erl b/deps/rabbit_common/src/rabbit_http_util.erl index 0bc8221b3d28..76b138ad33d5 100644 --- a/deps/rabbit_common/src/rabbit_http_util.erl +++ b/deps/rabbit_common/src/rabbit_http_util.erl @@ -697,32 +697,32 @@ safe_relative_path_test() -> parse_qvalues_test() -> [] = parse_qvalues(""), - [{"identity", 0.0}] = parse_qvalues("identity;q=0"), - [{"identity", 0.0}] = parse_qvalues("identity ;q=0"), - [{"identity", 0.0}] = parse_qvalues(" identity; q =0 "), - [{"identity", 0.0}] = parse_qvalues("identity ; q = 0"), - [{"identity", 0.0}] = parse_qvalues("identity ; q= 0.0"), - [{"gzip", 1.0}, {"deflate", 1.0}, {"identity", 0.0}] = parse_qvalues( + [{"identity", +0.0}] = parse_qvalues("identity;q=0"), + [{"identity", +0.0}] = parse_qvalues("identity ;q=0"), + [{"identity", +0.0}] = parse_qvalues(" identity; q =0 "), + [{"identity", +0.0}] = parse_qvalues("identity ; q = 0"), + [{"identity", +0.0}] = parse_qvalues("identity ; q= 0.0"), + [{"gzip", 1.0}, {"deflate", 1.0}, {"identity", +0.0}] = parse_qvalues( "gzip,deflate,identity;q=0.0" ), - [{"deflate", 1.0}, {"gzip", 1.0}, {"identity", 0.0}] = parse_qvalues( + [{"deflate", 1.0}, {"gzip", 1.0}, {"identity", +0.0}] = parse_qvalues( "deflate,gzip,identity;q=0.0" ), - [{"gzip", 1.0}, {"deflate", 1.0}, {"gzip", 1.0}, {"identity", 0.0}] = + [{"gzip", 1.0}, {"deflate", 1.0}, {"gzip", 1.0}, {"identity", +0.0}] = parse_qvalues("gzip,deflate,gzip,identity;q=0"), - [{"gzip", 1.0}, {"deflate", 1.0}, {"identity", 0.0}] = parse_qvalues( + [{"gzip", 1.0}, {"deflate", 1.0}, {"identity", +0.0}] = parse_qvalues( "gzip, deflate , identity; q=0.0" ), - [{"gzip", 1.0}, {"deflate", 1.0}, {"identity", 0.0}] = parse_qvalues( + [{"gzip", 1.0}, {"deflate", 1.0}, {"identity", +0.0}] = parse_qvalues( "gzip; q=1, deflate;q=1.0, identity;q=0.0" ), - [{"gzip", 0.5}, {"deflate", 1.0}, {"identity", 0.0}] = parse_qvalues( + [{"gzip", 0.5}, {"deflate", 1.0}, {"identity", +0.0}] = parse_qvalues( "gzip; q=0.5, deflate;q=1.0, identity;q=0" ), - [{"gzip", 0.5}, {"deflate", 1.0}, {"identity", 0.0}] = parse_qvalues( + [{"gzip", 0.5}, {"deflate", 1.0}, {"identity", +0.0}] = parse_qvalues( "gzip; q=0.5, deflate , identity;q=0.0" ), - [{"gzip", 0.5}, {"deflate", 0.8}, {"identity", 0.0}] = parse_qvalues( + [{"gzip", 0.5}, {"deflate", 0.8}, {"identity", +0.0}] = parse_qvalues( "gzip; q=0.5, deflate;q=0.8, identity;q=0.0" ), [{"gzip", 0.5}, {"deflate", 1.0}, {"identity", 1.0}] = parse_qvalues( diff --git a/deps/rabbit_common/src/rabbit_json.erl b/deps/rabbit_common/src/rabbit_json.erl index adec32df38cd..858face301e4 100644 --- a/deps/rabbit_common/src/rabbit_json.erl +++ b/deps/rabbit_common/src/rabbit_json.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_json). diff --git a/deps/rabbit_common/src/rabbit_log.erl b/deps/rabbit_common/src/rabbit_log.erl index 81f4593f425e..20d4a9c0e9c3 100644 --- a/deps/rabbit_common/src/rabbit_log.erl +++ b/deps/rabbit_common/src/rabbit_log.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_log). diff --git a/deps/rabbit_common/src/rabbit_misc.erl b/deps/rabbit_common/src/rabbit_misc.erl index 08c288a66ba6..c67d36adc8fe 100644 --- a/deps/rabbit_common/src/rabbit_misc.erl +++ b/deps/rabbit_common/src/rabbit_misc.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_misc). @@ -24,7 +24,8 @@ precondition_failed/1, precondition_failed/2]). -export([type_class/1, assert_args_equivalence/4, assert_field_equivalence/4]). -export([table_lookup/2, set_table_value/4, amqp_table/1, to_amqp_table/1]). --export([r/3, r/2, r_arg/4, rs/1]). +-export([r/3, r/2, r_arg/4, rs/1, + queue_resource/2, exchange_resource/2]). -export([enable_cover/0, report_cover/0]). -export([enable_cover/1, report_cover/1]). -export([start_cover/1]). @@ -82,13 +83,13 @@ -export([safe_ets_update_counter/3, safe_ets_update_counter/4, safe_ets_update_counter/5, safe_ets_update_element/3, safe_ets_update_element/4, safe_ets_update_element/5]). -export([is_even/1, is_odd/1]). --export([is_valid_shortstr/1]). -export([maps_any/2, maps_put_truthy/3, maps_put_falsy/3 ]). -export([remote_sup_child/2]). +-export([for_each_while_ok/2, fold_while_ok/3]). %% Horrible macro to use in guards -define(IS_BENIGN_EXIT(R), @@ -438,6 +439,16 @@ rs(#resource{virtual_host = VHostPath, kind = topic, name = Name}) -> rs(#resource{virtual_host = VHostPath, kind = Kind, name = Name}) -> format("~ts '~ts' in vhost '~ts'", [Kind, Name, VHostPath]). +-spec queue_resource(rabbit_types:vhost(), resource_name()) -> + rabbit_types:r(queue). +queue_resource(VHostPath, Name) -> + r(VHostPath, queue, Name). + +-spec exchange_resource(rabbit_types:vhost(), resource_name()) -> + rabbit_types:r(exchange). +exchange_resource(VHostPath, Name) -> + r(VHostPath, exchange, Name). + enable_cover() -> enable_cover(["."]). enable_cover(Dirs) -> @@ -1100,8 +1111,8 @@ rabbitmq_and_erlang_versions() -> which_applications() -> try application:which_applications(10000) - catch - exit:{timeout, _} -> [] + catch _:_:_Stacktrace -> + [] end. sequence_error([T]) -> T; @@ -1180,12 +1191,9 @@ get_proc_name() -> {ok, Name} end. -%% application:get_env/3 is only available in R16B01 or later. +%% application:get_env/3 is available in R16B01 or later. get_env(Application, Key, Def) -> - case application:get_env(Application, Key) of - {ok, Val} -> Val; - undefined -> Def - end. + application:get_env(Application, Key, Def). get_channel_operation_timeout() -> %% Default channel_operation_timeout set to net_ticktime + 10s to @@ -1600,21 +1608,6 @@ is_even(N) -> is_odd(N) -> (N band 1) =:= 1. --spec is_valid_shortstr(term()) -> boolean(). -is_valid_shortstr(Bin) when byte_size(Bin) < 256 -> - is_utf8_no_null(Bin); -is_valid_shortstr(_) -> - false. - -is_utf8_no_null(<<>>) -> - true; -is_utf8_no_null(<<0, _/binary>>) -> - false; -is_utf8_no_null(<<_/utf8, Rem/binary>>) -> - is_utf8_no_null(Rem); -is_utf8_no_null(_) -> - false. - -spec maps_put_truthy(Key, Value, Map) -> Map when Map :: #{Key => Value}. maps_put_truthy(_K, undefined, M) -> @@ -1640,3 +1633,46 @@ remote_sup_child(Node, Sup) -> [] -> {error, no_child}; {badrpc, {'EXIT', {noproc, _}}} -> {error, no_sup} end. + +-spec for_each_while_ok(ForEachFun, List) -> Ret when + ForEachFun :: fun((Element) -> ok | {error, ErrReason}), + ErrReason :: any(), + Element :: any(), + List :: [Element], + Ret :: ok | {error, ErrReason}. +%% @doc Calls the given `ForEachFun' for each element in the given `List', +%% short-circuiting if the function returns `{error,_}'. +%% +%% @returns the first `{error,_}' returned by `ForEachFun' or `ok' if +%% `ForEachFun' never returns an error tuple. + +for_each_while_ok(Fun, [Elem | Rest]) -> + case Fun(Elem) of + ok -> + for_each_while_ok(Fun, Rest); + {error, _} = Error -> + Error + end; +for_each_while_ok(_, []) -> + ok. + +-spec fold_while_ok(FoldFun, Acc, List) -> Ret when + FoldFun :: fun((Element, Acc) -> {ok, Acc} | {error, ErrReason}), + Element :: any(), + List :: Element, + Ret :: {ok, Acc} | {error, ErrReason}. +%% @doc Calls the given `FoldFun' on each element of the given `List' and the +%% accumulator value, short-circuiting if the function returns `{error,_}'. +%% +%% @returns the first `{error,_}' returned by `FoldFun' or `{ok,Acc}' if +%% `FoldFun' never returns an error tuple. + +fold_while_ok(Fun, Acc0, [Elem | Rest]) -> + case Fun(Elem, Acc0) of + {ok, Acc} -> + fold_while_ok(Fun, Acc, Rest); + {error, _} = Error -> + Error + end; +fold_while_ok(_Fun, Acc, []) -> + {ok, Acc}. diff --git a/deps/rabbit_common/src/rabbit_msg_store_index.erl b/deps/rabbit_common/src/rabbit_msg_store_index.erl deleted file mode 100644 index 8031229b86b2..000000000000 --- a/deps/rabbit_common/src/rabbit_msg_store_index.erl +++ /dev/null @@ -1,89 +0,0 @@ -%% This Source Code Form is subject to the terms of the Mozilla Public -%% License, v. 2.0. If a copy of the MPL was not distributed with this -%% file, You can obtain one at https://mozilla.org/MPL/2.0/. -%% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. -%% - --module(rabbit_msg_store_index). - --include("rabbit_msg_store.hrl"). - -%% Behaviour module to provide pluggable message store index. -%% The index is used to locate message on disk and for reference-counting. - -%% Message store have several additional assumptions about performance and -%% atomicity of some operations. See comments for each callback. - --type(dir() :: string()). --type(index_state() :: any()). --type(fieldpos() :: non_neg_integer()). --type(fieldvalue() :: any()). --type(msg_location() :: #msg_location{}). - - -%% There are two ways of starting an index: -%% - `new` - starts a clean index -%% - `recover` - attempts to read a saved index -%% In both cases the old saved state should be deleted from directory. - -%% Initialize a fresh index state for msg store directory. --callback new(dir()) -> index_state(). -%% Try to recover gracefully stopped index state. --callback recover(dir()) -> rabbit_types:ok_or_error2(index_state(), any()). -%% Gracefully shutdown the index. -%% Should save the index state, which will be loaded by the 'recover' function. --callback terminate(index_state()) -> any(). - -%% Lookup an entry in the index. -%% Is called concurrently by msg_store, it's clients and GC processes. -%% This function is called multiple times for each message store operation. -%% Message store tries to avoid writing messages on disk if consumers can -%% process them fast, so there will be a lot of lookups for non-existent -%% entries, which should be as fast as possible. --callback lookup(rabbit_types:msg_id(), index_state()) -> ('not_found' | msg_location()). - -%% Insert an entry into the index. -%% Is called by a msg_store process only. -%% This function can exit if there is already an entry with the same ID --callback insert(msg_location(), index_state()) -> 'ok'. - -%% Update an entry in the index. -%% Is called by a msg_store process only. -%% The function is called during message store recovery after crash. -%% The difference between update and insert functions, is that update -%% should not fail if entry already exist, and should be atomic. --callback update(msg_location(), index_state()) -> 'ok'. - -%% Update positional fields in the entry tuple. -%% Is called by msg_store and GC processes concurrently. -%% This function can exit if there is no entry with specified ID -%% This function is called to update reference-counters and file locations. -%% File locations are updated from a GC process, reference-counters are -%% updated from a message store process. -%% This function should be atomic. --callback update_fields(rabbit_types:msg_id(), ({fieldpos(), fieldvalue()} | - [{fieldpos(), fieldvalue()}]), - index_state()) -> 'ok'. - -%% Delete an entry from the index by ID. -%% Is called from a msg_store process only. -%% This function should be atomic. --callback delete(rabbit_types:msg_id(), index_state()) -> 'ok'. - -%% Delete an exactly matching entry from the index. -%% Is called by GC process only. -%% This function should match exact object to avoid deleting a zero-reference -%% object, which reference-counter is being concurrently updated. -%% This function should be atomic. --callback delete_object(msg_location(), index_state()) -> 'ok'. - -%% Delete temporary reference count entries with the 'file' record field equal to 'undefined'. -%% Is called during index rebuild from scratch (e.g. after non-clean stop) -%% During recovery after non-clean stop or file corruption, reference-counters -%% are added to the index with `undefined` value for the `file` field. -%% If message is found in a message store file, it's file field is updated. -%% If some reference-counters miss the message location after recovery - they -%% should be deleted. --callback clean_up_temporary_reference_count_entries_without_file(index_state()) -> 'ok'. - diff --git a/deps/rabbit_common/src/rabbit_net.erl b/deps/rabbit_common/src/rabbit_net.erl index bce19dda453f..494126795740 100644 --- a/deps/rabbit_common/src/rabbit_net.erl +++ b/deps/rabbit_common/src/rabbit_net.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_net). @@ -11,7 +11,7 @@ -include_lib("kernel/include/net_address.hrl"). -export([is_ssl/1, ssl_info/1, controlling_process/2, getstat/2, - recv/1, sync_recv/2, async_recv/3, port_command/2, getopts/2, + recv/1, sync_recv/2, async_recv/3, getopts/2, setopts/2, send/2, close/1, fast_close/1, sockname/1, peername/1, peercert/1, connection_string/2, socket_ends/2, is_loopback/1, tcp_host/1, unwrap_socket/1, maybe_get_proxy_socket/1, @@ -32,11 +32,11 @@ {raw, non_neg_integer(), non_neg_integer(), binary()}]. -type hostname() :: inet:hostname(). -type ip_port() :: inet:port_number(). --type rabbit_proxy_socket() :: {'rabbit_proxy_socket', ranch_transport:socket(), ranch_proxy_header:proxy_info()}. +-type proxy_socket() :: {'rabbit_proxy_socket', ranch_transport:socket(), ranch_proxy_header:proxy_info()}. % -type host_or_ip() :: binary() | inet:ip_address(). -spec is_ssl(socket()) -> boolean(). -spec ssl_info(socket()) -> 'nossl' | ok_val_or_error([{atom(), any()}]). --spec proxy_ssl_info(socket(), rabbit_proxy_socket() | 'undefined') -> 'nossl' | ok_val_or_error([{atom(), any()}]). +-spec proxy_ssl_info(socket(), proxy_socket() | 'undefined') -> 'nossl' | ok_val_or_error([{atom(), any()}]). -spec controlling_process(socket(), pid()) -> ok_or_any_error(). -spec getstat(socket(), [stat_option()]) -> ok_val_or_error([{stat_option(), integer()}]). @@ -50,7 +50,6 @@ rabbit_types:error(any()). -spec async_recv(socket(), integer(), timeout()) -> rabbit_types:ok(any()). --spec port_command(socket(), iolist()) -> 'true'. -spec getopts (socket(), [atom() | @@ -58,7 +57,7 @@ non_neg_integer() | binary()}]) -> ok_val_or_error(opts()). -spec setopts(socket(), opts()) -> ok_or_any_error(). --spec send(socket(), binary() | iolist()) -> ok_or_any_error(). +-spec send(socket(), iodata()) -> ok_or_any_error(). -spec close(socket()) -> ok_or_any_error(). -spec fast_close(socket()) -> ok_or_any_error(). -spec sockname(socket()) -> @@ -161,40 +160,6 @@ async_recv(Sock, Length, infinity) when is_port(Sock) -> async_recv(Sock, Length, Timeout) when is_port(Sock) -> prim_inet:async_recv(Sock, Length, Timeout). -port_command(Sock, Data) when ?IS_SSL(Sock) -> - case ssl:send(Sock, Data) of - ok -> self() ! {inet_reply, Sock, ok}, - true; - {error, Reason} -> erlang:error(Reason) - end; -port_command(Sock, Data) when is_port(Sock) -> - Fun = case persistent_term:get(rabbit_net_tcp_send, undefined) of - undefined -> - Rel = list_to_integer(erlang:system_info(otp_release)), - %% gen_tcp:send/2 does a selective receive of - %% {inet_reply, Sock, Status[, CallerTag]} - F = if Rel >= 26 -> - %% Selective receive is optimised: - %% https://github.com/erlang/otp/issues/6455 - fun gen_tcp_send/2; - Rel < 26 -> - %% Avoid costly selective receive. - fun erlang:port_command/2 - end, - ok = persistent_term:put(rabbit_net_tcp_send, F), - F; - F -> - F - end, - Fun(Sock, Data). - -gen_tcp_send(Sock, Data) -> - case gen_tcp:send(Sock, Data) of - ok -> self() ! {inet_reply, Sock, ok}, - true; - {error, Reason} -> erlang:error(Reason) - end. - getopts(Sock, Options) when ?IS_SSL(Sock) -> ssl:getopts(Sock, Options); getopts(Sock, Options) when is_port(Sock) -> diff --git a/deps/rabbit_common/src/rabbit_nodes_common.erl b/deps/rabbit_common/src/rabbit_nodes_common.erl index ceecb59ac2b0..7b4b6e2e9c82 100644 --- a/deps/rabbit_common/src/rabbit_nodes_common.erl +++ b/deps/rabbit_common/src/rabbit_nodes_common.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_nodes_common). @@ -107,6 +107,14 @@ epmd_port() -> end. ensure_epmd() -> + case net_adm:names() of + {ok, _Names} -> + ok; + _ -> + start_epmd() + end. + +start_epmd() -> Exe = rabbit_runtime:get_erl_path(), ID = rabbit_misc:random(1000000000), Port = open_port( diff --git a/deps/rabbit_common/src/rabbit_numerical.erl b/deps/rabbit_common/src/rabbit_numerical.erl index c94a46769a4f..6ae1f8db0ec2 100644 --- a/deps/rabbit_common/src/rabbit_numerical.erl +++ b/deps/rabbit_common/src/rabbit_numerical.erl @@ -30,7 +30,7 @@ %% human-readable output, or compact ASCII serializations for floats. digits(N) when is_integer(N) -> integer_to_list(N); -digits(N) when N =:= 0.0 -> +digits(N) when N =:= +0.0 orelse N =:= -0.0 -> "0.0"; digits(Float) -> {Frac1, Exp1} = frexp_int(Float), diff --git a/deps/rabbit_common/src/rabbit_password.erl b/deps/rabbit_common/src/rabbit_password.erl index 21c3357472f2..b54e3c4b64d7 100644 --- a/deps/rabbit_common/src/rabbit_password.erl +++ b/deps/rabbit_common/src/rabbit_password.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_password). diff --git a/deps/rabbit_common/src/rabbit_password_hashing.erl b/deps/rabbit_common/src/rabbit_password_hashing.erl index dd291b1a9d85..898094c663ab 100644 --- a/deps/rabbit_common/src/rabbit_password_hashing.erl +++ b/deps/rabbit_common/src/rabbit_password_hashing.erl @@ -2,10 +2,8 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_password_hashing). --include("rabbit.hrl"). - -callback hash(rabbit_types:password()) -> rabbit_types:password_hash(). diff --git a/deps/rabbit_common/src/rabbit_password_hashing_md5.erl b/deps/rabbit_common/src/rabbit_password_hashing_md5.erl index 1ee6e2ffe4fd..d7972884415d 100644 --- a/deps/rabbit_common/src/rabbit_password_hashing_md5.erl +++ b/deps/rabbit_common/src/rabbit_password_hashing_md5.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% %% Legacy hashing implementation, only used as a last resort when diff --git a/deps/rabbit_common/src/rabbit_password_hashing_sha256.erl b/deps/rabbit_common/src/rabbit_password_hashing_sha256.erl index e8314abda142..75c5f4b3a768 100644 --- a/deps/rabbit_common/src/rabbit_password_hashing_sha256.erl +++ b/deps/rabbit_common/src/rabbit_password_hashing_sha256.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_password_hashing_sha256). diff --git a/deps/rabbit_common/src/rabbit_password_hashing_sha512.erl b/deps/rabbit_common/src/rabbit_password_hashing_sha512.erl index a4f794a81409..7e5bf4b82901 100644 --- a/deps/rabbit_common/src/rabbit_password_hashing_sha512.erl +++ b/deps/rabbit_common/src/rabbit_password_hashing_sha512.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_password_hashing_sha512). diff --git a/deps/rabbit_common/src/rabbit_pbe.erl b/deps/rabbit_common/src/rabbit_pbe.erl index 3508f8f1654f..5d1fbc00be9e 100644 --- a/deps/rabbit_common/src/rabbit_pbe.erl +++ b/deps/rabbit_common/src/rabbit_pbe.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_pbe). @@ -10,8 +10,11 @@ -export([supported_ciphers/0, supported_hashes/0, default_cipher/0, default_hash/0, default_iterations/0]). -export([encrypt_term/5, decrypt_term/5]). -export([encrypt/5, decrypt/5]). +-export([encrypt/2, decrypt/2]). --export_type([encryption_result/0]). +-type encryptable_input() :: iodata() | '$pending-secret'. + +-export_type([encryptable_input/0, encryption_result/0]). supported_ciphers() -> credentials_obfuscation_pbe:supported_ciphers(). @@ -46,7 +49,7 @@ decrypt_term(Cipher, Hash, Iterations, PassPhrase, {encrypted, _Base64Binary}=En -type crypto_hash_algorithm() :: atom(). -spec encrypt(crypto_cipher(), crypto_hash_algorithm(), - pos_integer(), iodata() | '$pending-secret', binary()) -> encryption_result(). + pos_integer(), encryptable_input(), binary()) -> encryption_result(). encrypt(Cipher, Hash, Iterations, PassPhrase, ClearText) -> credentials_obfuscation_pbe:encrypt(Cipher, Hash, Iterations, PassPhrase, ClearText). @@ -57,3 +60,15 @@ decrypt(_Cipher, _Hash, _Iterations, _PassPhrase, {plaintext, Term}) -> Term; decrypt(Cipher, Hash, Iterations, PassPhrase, {encrypted, _Base64Binary}=Encrypted) -> credentials_obfuscation_pbe:decrypt(Cipher, Hash, Iterations, PassPhrase, Encrypted). + + +-spec encrypt(encryptable_input(), binary()) -> encryption_result(). +encrypt(PassPhrase, ClearText) -> + credentials_obfuscation_pbe:encrypt(default_cipher(), default_hash(), default_iterations(), PassPhrase, ClearText). + + +-spec decrypt(iodata(), encryption_result()) -> any(). +decrypt(_PassPhrase, {plaintext, Term}) -> + Term; +decrypt(PassPhrase, {encrypted, _Base64Binary}=Encrypted) -> + credentials_obfuscation_pbe:decrypt(default_cipher(), default_hash(), default_iterations(), PassPhrase, Encrypted). diff --git a/deps/rabbit_common/src/rabbit_peer_discovery_backend.erl b/deps/rabbit_common/src/rabbit_peer_discovery_backend.erl index b347ed2d7481..4ed3882620ff 100644 --- a/deps/rabbit_common/src/rabbit_peer_discovery_backend.erl +++ b/deps/rabbit_common/src/rabbit_peer_discovery_backend.erl @@ -2,7 +2,7 @@ %% from rabbitmq-autocluster by Gavin Roy. %% %% Copyright (c) 2014-2015 AWeber Communications -%% Copyright (c) 2016-2023 VMware, Inc. or its affiliates +%% Copyright (c) 2007-2024 Broadcom. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% All rights reserved. %% %% Redistribution and use in source and binary forms, with or without modification, @@ -32,16 +32,14 @@ %% %% The Initial Developer of the Original Code is AWeber Communications. %% Copyright (c) 2014-2015 AWeber Communications -%% Copyright (c) 2016-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_peer_discovery_backend). --include("rabbit.hrl"). - -callback init() -> ok | {error, Reason :: string()}. --callback list_nodes() -> {ok, {Nodes :: list(), NodeType :: rabbit_types:node_type()}} | +-callback list_nodes() -> {ok, {Nodes :: [node()] | node(), NodeType :: rabbit_types:node_type()}} | {error, Reason :: string()}. -callback supports_registration() -> boolean(). @@ -52,8 +50,13 @@ -callback post_registration() -> ok | {error, Reason :: string()}. --callback lock(Node :: atom()) -> {ok, Data :: term()} | not_supported | {error, Reason :: string()}. +-callback lock(Nodes :: [node()]) -> {ok, Data :: term()} | not_supported | {error, Reason :: string()}. -callback unlock(Data :: term()) -> ok. -optional_callbacks([init/0]). + +-export([api_version/0]). + +api_version() -> + 2. diff --git a/deps/rabbit_common/src/rabbit_policy_validator.erl b/deps/rabbit_common/src/rabbit_policy_validator.erl index cd9034b40c0e..be0642a1de6c 100644 --- a/deps/rabbit_common/src/rabbit_policy_validator.erl +++ b/deps/rabbit_common/src/rabbit_policy_validator.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_policy_validator). diff --git a/deps/rabbit_common/src/rabbit_queue_collector.erl b/deps/rabbit_common/src/rabbit_queue_collector.erl index 08da6acc3037..f69057a295cc 100644 --- a/deps/rabbit_common/src/rabbit_queue_collector.erl +++ b/deps/rabbit_common/src/rabbit_queue_collector.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_queue_collector). diff --git a/deps/rabbit_common/src/rabbit_registry.erl b/deps/rabbit_common/src/rabbit_registry.erl index 50c437410995..3cd5b344b28f 100644 --- a/deps/rabbit_common/src/rabbit_registry.erl +++ b/deps/rabbit_common/src/rabbit_registry.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_registry). @@ -46,9 +46,11 @@ unregister(Class, TypeName) -> %% can throw a badarg, indicating that the type cannot have been %% registered. binary_to_type(TypeBin) when is_binary(TypeBin) -> - case catch list_to_existing_atom(binary_to_list(TypeBin)) of - {'EXIT', {badarg, _}} -> {error, not_found}; - TypeAtom -> TypeAtom + case catch binary_to_existing_atom(TypeBin) of + {'EXIT', {badarg, _}} -> + {error, not_found}; + TypeAtom -> + TypeAtom end. lookup_module(Class, T) when is_atom(T) -> @@ -65,7 +67,7 @@ lookup_all(Class) -> %%--------------------------------------------------------------------------- internal_binary_to_type(TypeBin) when is_binary(TypeBin) -> - list_to_atom(binary_to_list(TypeBin)). + binary_to_atom(TypeBin). internal_register(Class, TypeName, ModuleName) when is_atom(Class), is_binary(TypeName), is_atom(ModuleName) -> @@ -133,8 +135,7 @@ class_module(policy_validator) -> rabbit_policy_validator; class_module(operator_policy_validator) -> rabbit_policy_validator; class_module(policy_merge_strategy) -> rabbit_policy_merge_strategy; class_module(ha_mode) -> rabbit_mirror_queue_mode; -class_module(channel_interceptor) -> rabbit_channel_interceptor; -class_module(queue_master_locator) -> rabbit_queue_master_locator. +class_module(channel_interceptor) -> rabbit_channel_interceptor. %%--------------------------------------------------------------------------- diff --git a/deps/rabbit_common/src/rabbit_registry_class.erl b/deps/rabbit_common/src/rabbit_registry_class.erl index 09605d916893..8e05abcf016c 100644 --- a/deps/rabbit_common/src/rabbit_registry_class.erl +++ b/deps/rabbit_common/src/rabbit_registry_class.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2016-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_registry_class). diff --git a/deps/rabbit_common/src/rabbit_resource_monitor_misc.erl b/deps/rabbit_common/src/rabbit_resource_monitor_misc.erl index 9d21878376ea..4edcc3dafe6c 100644 --- a/deps/rabbit_common/src/rabbit_resource_monitor_misc.erl +++ b/deps/rabbit_common/src/rabbit_resource_monitor_misc.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% @@ -17,20 +17,29 @@ parse_information_unit(Value) when is_integer(Value) -> {ok, Value}; parse_information_unit(Value0) -> Value = rabbit_data_coercion:to_list(Value0), case re:run(Value, - "^(?[0-9]+)(?kB|KB|MB|GB|kb|mb|gb|Kb|Mb|Gb|kiB|KiB|MiB|GiB|kib|mib|gib|KIB|MIB|GIB|k|K|m|M|g|G)?$", + "^(?[0-9]+)(?kB|Ki|Mi|Gi|Ti|Pi|KB|MB|GB|TB|PB|kb|ki|mb|gb|tb|pb|Kb|Mb|Gb|Tb|Pb|kiB|KiB|MiB|GiB|TiB|PiB|kib|mib|gib|tib|pib|KIB|MIB|GIB|TIB|PIB|k|K|m|M|g|G|p|P)?$", [{capture, all_but_first, list}]) of {match, [[], _]} -> {ok, list_to_integer(Value)}; {match, [Num]} -> {ok, list_to_integer(Num)}; {match, [Num, Unit]} -> + %% Note: there is no industry standard on what K, M, G, T, P means (is G a gigabyte or a gibibyte?), so + %% starting with 3.13 we treat those the same way as Kubernetes does [1]. + %% + %% 1. https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-memory Multiplier = case Unit of - KiB when KiB =:= "k"; KiB =:= "kiB"; KiB =:= "K"; KiB =:= "KIB"; KiB =:= "kib" -> 1024; - MiB when MiB =:= "m"; MiB =:= "MiB"; MiB =:= "M"; MiB =:= "MIB"; MiB =:= "mib" -> 1024*1024; - GiB when GiB =:= "g"; GiB =:= "GiB"; GiB =:= "G"; GiB =:= "GIB"; GiB =:= "gib" -> 1024*1024*1024; - KB when KB =:= "KB"; KB =:= "kB"; KB =:= "kb"; KB =:= "Kb" -> 1000; - MB when MB =:= "MB"; MB =:= "mB"; MB =:= "mb"; MB =:= "Mb" -> 1000000; - GB when GB =:= "GB"; GB =:= "gB"; GB =:= "gb"; GB =:= "Gb" -> 1000000000 + KiB when KiB =:= "kiB"; KiB =:= "KIB"; KiB =:= "kib"; KiB =:= "ki"; KiB =:= "Ki" -> 1024; + MiB when MiB =:= "MiB"; MiB =:= "MIB"; MiB =:= "mib"; MiB =:= "mi"; MiB =:= "Mi" -> 1024 * 1024; + GiB when GiB =:= "GiB"; GiB =:= "GIB"; GiB =:= "gib"; GiB =:= "gi"; GiB =:= "Gi" -> 1024 * 1024 * 1024; + TiB when TiB =:= "TiB"; TiB =:= "TIB"; TiB =:= "tib"; TiB =:= "ti"; TiB =:= "Ti" -> 1024 * 1024 * 1024 * 1024; + PiB when PiB =:= "PiB"; PiB =:= "PIB"; PiB =:= "pib"; PiB =:= "pi"; PiB =:= "Pi" -> 1024 * 1024 * 1024 * 1024 * 2014; + + KB when KB =:= "k"; KB =:= "K"; KB =:= "KB"; KB =:= "kB"; KB =:= "kb"; KB =:= "Kb" -> 1000; + MB when MB =:= "m"; MB =:= "M"; MB =:= "MB"; MB =:= "mB"; MB =:= "mb"; MB =:= "Mb" -> 1000_000; + GB when GB =:= "g"; GB =:= "G"; GB =:= "GB"; GB =:= "gB"; GB =:= "gb"; GB =:= "Gb" -> 1000_000_000; + TB when TB =:= "t"; TB =:= "T"; TB =:= "TB"; TB =:= "tB"; TB =:= "tb"; TB =:= "Tb" -> 1000_000_000_000; + PB when PB =:= "p"; PB =:= "P"; PB =:= "PB"; PB =:= "pB"; PB =:= "pb"; PB =:= "Pb" -> 1000_000_000_000_000 end, {ok, list_to_integer(Num) * Multiplier}; nomatch -> diff --git a/deps/rabbit_common/src/rabbit_routing_parser.erl b/deps/rabbit_common/src/rabbit_routing_parser.erl new file mode 100644 index 000000000000..81b26d4a913b --- /dev/null +++ b/deps/rabbit_common/src/rabbit_routing_parser.erl @@ -0,0 +1,77 @@ +%% This Source Code Form is subject to the terms of the Mozilla Public +%% License, v. 2.0. If a copy of the MPL was not distributed with this +%% file, You can obtain one at https://mozilla.org/MPL/2.0/. +%% +%% Copyright (c) 2013-2023 VMware, Inc. or its affiliates. All rights reserved. +%% + +-module(rabbit_routing_parser). + +-export([parse_endpoint/1, + parse_endpoint/2, + parse_routing/1]). + +parse_endpoint(Destination) -> + parse_endpoint(Destination, false). + +parse_endpoint(undefined, AllowAnonymousQueue) -> + parse_endpoint("/queue", AllowAnonymousQueue); +parse_endpoint(Destination, AllowAnonymousQueue) + when is_binary(Destination) -> + List = unicode:characters_to_list(Destination), + parse_endpoint(List, AllowAnonymousQueue); +parse_endpoint(Destination, AllowAnonymousQueue) + when is_list(Destination) -> + case re:split(Destination, "/", [unicode, {return, list}]) of + [Name] -> + {ok, {queue, unescape(Name)}}; + ["", Type | Rest] + when Type =:= "exchange" orelse Type =:= "queue" orelse + Type =:= "topic" orelse Type =:= "temp-queue" -> + parse_endpoint0(atomise(Type), Rest, AllowAnonymousQueue); + ["", "amq", "queue" | Rest] -> + parse_endpoint0(amqqueue, Rest, AllowAnonymousQueue); + ["", "reply-queue" = Prefix | [_|_]] -> + parse_endpoint0(reply_queue, + [lists:nthtail(2 + length(Prefix), Destination)], + AllowAnonymousQueue); + _ -> + {error, {unknown_destination, Destination}} + end. + +parse_endpoint0(exchange, ["" | _] = Rest, _) -> + {error, {invalid_destination, exchange, to_url(Rest)}}; +parse_endpoint0(exchange, [Name], _) -> + {ok, {exchange, {unescape(Name), undefined}}}; +parse_endpoint0(exchange, [Name, Pattern], _) -> + {ok, {exchange, {unescape(Name), unescape(Pattern)}}}; +parse_endpoint0(queue, [], false) -> + {error, {invalid_destination, queue, []}}; +parse_endpoint0(queue, [], true) -> + {ok, {queue, undefined}}; +parse_endpoint0(Type, [[_|_]] = [Name], _) -> + {ok, {Type, unescape(Name)}}; +parse_endpoint0(Type, Rest, _) -> + {error, {invalid_destination, Type, to_url(Rest)}}. + +parse_routing({exchange, {Name, undefined}}) -> + {Name, ""}; +parse_routing({exchange, {Name, Pattern}}) -> + {Name, Pattern}; +parse_routing({topic, Name}) -> + {"amq.topic", Name}; +parse_routing({Type, Name}) + when Type =:= queue orelse Type =:= reply_queue orelse Type =:= amqqueue -> + {"", Name}. + +atomise(Name) when is_list(Name) -> + list_to_atom(re:replace(Name, "-", "_", [{return,list}, global])). + +to_url([]) -> []; +to_url(Lol) -> "/" ++ string:join(Lol, "/"). + +unescape(Str) -> unescape(Str, []). + +unescape("%2F" ++ Str, Acc) -> unescape(Str, [$/ | Acc]); +unescape([C | Str], Acc) -> unescape(Str, [C | Acc]); +unescape([], Acc) -> lists:reverse(Acc). diff --git a/deps/rabbit_common/src/rabbit_runtime.erl b/deps/rabbit_common/src/rabbit_runtime.erl index a3f05be7bbf1..c6682cd56226 100644 --- a/deps/rabbit_common/src/rabbit_runtime.erl +++ b/deps/rabbit_common/src/rabbit_runtime.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% %% This module provides access to runtime metrics that are exposed @@ -56,11 +56,10 @@ msacc_stats(TimeInMs) -> % get the full path to the erl executable used to start this VM -spec get_erl_path() -> file:filename_all(). get_erl_path() -> - ERTSDir = rabbit_misc:format("erts-~ts", [erlang:system_info(version)]), - Bin = filename:join([code:root_dir(), ERTSDir, "bin"]), + {ok, [[BinDir]]} = init:get_argument(bindir), case os:type() of {win32, _} -> - filename:join(Bin, "erl.exe"); + filename:join(BinDir, "erl.exe"); _ -> - filename:join(Bin, "erl") + filename:join(BinDir, "erl") end. diff --git a/deps/rabbit_common/src/rabbit_runtime_parameter.erl b/deps/rabbit_common/src/rabbit_runtime_parameter.erl index e51b4c6da620..25e4a5ece588 100644 --- a/deps/rabbit_common/src/rabbit_runtime_parameter.erl +++ b/deps/rabbit_common/src/rabbit_runtime_parameter.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_runtime_parameter). diff --git a/deps/rabbit_common/src/rabbit_ssl_options.erl b/deps/rabbit_common/src/rabbit_ssl_options.erl index 636bfed73a3c..ee0d1b4a3260 100644 --- a/deps/rabbit_common/src/rabbit_ssl_options.erl +++ b/deps/rabbit_common/src/rabbit_ssl_options.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_ssl_options). @@ -18,7 +18,9 @@ -spec fix(rabbit_types:infos()) -> rabbit_types:infos(). fix(Config) -> - fix_verify_fun(fix_ssl_protocol_versions(Config)). + fix_verify_fun( + fix_ssl_protocol_versions( + hibernate_after(Config))). fix_verify_fun(SslOptsConfig) -> %% Starting with ssl 4.0.1 in Erlang R14B, the verify_fun function @@ -84,3 +86,12 @@ fix_ssl_protocol_versions(Config) -> end, rabbit_misc:pset(versions, Configured -- ?BAD_SSL_PROTOCOL_VERSIONS, Config) end. + +hibernate_after(Config) -> + Key = hibernate_after, + case proplists:is_defined(Key, Config) of + true -> + Config; + false -> + [{Key, 6_000} | Config] + end. diff --git a/deps/rabbit_common/src/rabbit_types.erl b/deps/rabbit_common/src/rabbit_types.erl index e58811c6c58d..6c3a78e84d64 100644 --- a/deps/rabbit_common/src/rabbit_types.erl +++ b/deps/rabbit_common/src/rabbit_types.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_types). @@ -11,7 +11,7 @@ -export_type([ %% deprecated - maybe/1, + 'maybe'/1, option/1, info/0, infos/0, info_key/0, info_keys/0, message/0, msg_id/0, basic_message/0, @@ -35,10 +35,12 @@ -type(option(T) :: T | 'none' | 'undefined'). %% Deprecated, 'maybe' is a keyword in modern Erlang --type(maybe(T) :: T | 'none'). +-type('maybe'(T) :: T | 'none'). -type(timestamp() :: {non_neg_integer(), non_neg_integer(), non_neg_integer()}). -type(vhost() :: binary()). +%% An arbitrary binary tag used to distinguish between different consumers +%% set up by the same process. -type(ctag() :: binary()). %% TODO: make this more precise by tying specific class_ids to diff --git a/deps/rabbit_common/src/rabbit_writer.erl b/deps/rabbit_common/src/rabbit_writer.erl index 7e67cab6692b..43da810bfb62 100644 --- a/deps/rabbit_common/src/rabbit_writer.erl +++ b/deps/rabbit_common/src/rabbit_writer.erl @@ -2,11 +2,13 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_writer). +-behavior(gen_server). + %% This module backs writer processes ("writers"). The responsibility of %% a writer is to serialise protocol methods and write them to the socket. %% Every writer is associated with a channel and normally it's the channel @@ -25,11 +27,14 @@ %% When a socket write fails, writer will exit. -include("rabbit.hrl"). --include("rabbit_framing.hrl"). - -export([start/6, start_link/6, start/7, start_link/7, start/8, start_link/8]). --export([system_continue/3, system_terminate/4, system_code_change/4]). +-export([init/1, + handle_call/3, + handle_cast/2, + handle_info/2, + terminate/2, + code_change/3]). -export([send_command/2, send_command/3, send_command_sync/2, send_command_sync/3, @@ -39,9 +44,6 @@ -export([internal_send_command/4, internal_send_command/6]). -export([msg_size/1, maybe_gc_large_msg/1, maybe_gc_large_msg/2]). -%% internal --export([enter_mainloop/2, mainloop/2, mainloop1/2]). - -record(wstate, { %% socket (port) sock, @@ -99,16 +101,9 @@ rabbit_types:proc_name(), boolean(), undefined|non_neg_integer()) -> rabbit_types:ok(pid()). --spec system_code_change(_,_,_,_) -> {'ok',_}. --spec system_continue(_,_,#wstate{}) -> any(). --spec system_terminate(_,_,_,_) -> no_return(). - -spec send_command(pid(), rabbit_framing:amqp_method_record()) -> 'ok'. -spec send_command - (pid(), rabbit_framing:amqp_method_record(), - rabbit_types:content() | - {integer(), rabbit_types:content()} %% publishing sequence for AMQP 1.0 return callback - ) -> + (pid(), rabbit_framing:amqp_method_record(), rabbit_types:content()) -> 'ok'. -spec send_command_sync(pid(), rabbit_framing:amqp_method_record()) -> 'ok'. -spec send_command_sync @@ -166,13 +161,15 @@ start(Sock, Channel, FrameMax, Protocol, ReaderPid, Identity, ReaderWantsStats, GCThreshold) -> State = initial_state(Sock, Channel, FrameMax, Protocol, ReaderPid, ReaderWantsStats, GCThreshold), - {ok, proc_lib:spawn(?MODULE, enter_mainloop, [Identity, State])}. + Options = [{hibernate_after, ?HIBERNATE_AFTER}], + gen_server:start(?MODULE, [Identity, State], Options). start_link(Sock, Channel, FrameMax, Protocol, ReaderPid, Identity, ReaderWantsStats, GCThreshold) -> State = initial_state(Sock, Channel, FrameMax, Protocol, ReaderPid, ReaderWantsStats, GCThreshold), - {ok, proc_lib:spawn_link(?MODULE, enter_mainloop, [Identity, State])}. + Options = [{hibernate_after, ?HIBERNATE_AFTER}], + gen_server:start_link(?MODULE, [Identity, State], Options). initial_state(Sock, Channel, FrameMax, Protocol, ReaderPid, ReaderWantsStats, GCThreshold) -> (case ReaderWantsStats of @@ -187,49 +184,57 @@ initial_state(Sock, Channel, FrameMax, Protocol, ReaderPid, ReaderWantsStats, GC writer_gc_threshold = GCThreshold}, #wstate.stats_timer). -system_continue(Parent, Deb, State) -> - mainloop(Deb, State#wstate{reader = Parent}). - -system_terminate(Reason, _Parent, _Deb, _State) -> - exit(Reason). - -system_code_change(Misc, _Module, _OldVsn, _Extra) -> - {ok, Misc}. - -enter_mainloop(Identity, State) -> +init([Identity, State]) -> ?LG_PROCESS_TYPE(writer), - Deb = sys:debug_options([]), ?store_proc_name(Identity), - mainloop(Deb, State). + {ok, State}. -mainloop(Deb, State) -> +handle_call({send_command_sync, MethodRecord}, _From, State) -> try - mainloop1(Deb, State) + State1 = internal_flush( + internal_send_command_async(MethodRecord, State)), + {reply, ok, State1, 0} catch - exit:Error -> #wstate{reader = ReaderPid, channel = Channel} = State, - ReaderPid ! {channel_exit, Channel, Error} - end, - done. - -mainloop1(Deb, State = #wstate{pending = []}) -> - receive - Message -> {Deb1, State1} = handle_message(Deb, Message, State), - ?MODULE:mainloop1(Deb1, State1) - after ?HIBERNATE_AFTER -> - erlang:hibernate(?MODULE, mainloop, [Deb, State]) + _Class:Reason -> + {stop, {shutdown, Reason}, State} end; -mainloop1(Deb, State) -> - receive - Message -> {Deb1, State1} = handle_message(Deb, Message, State), - ?MODULE:mainloop1(Deb1, State1) - after 0 -> - ?MODULE:mainloop1(Deb, internal_flush(State)) +handle_call({send_command_sync, MethodRecord, Content}, _From, State) -> + try + State1 = internal_flush( + internal_send_command_async(MethodRecord, Content, State)), + {reply, ok, State1, 0} + catch + _Class:Reason -> + {stop, {shutdown, Reason}, State} + end; +handle_call(flush, _From, State) -> + try + State1 = internal_flush(State), + {reply, ok, State1, 0} + catch + _Class:Reason -> + {stop, {shutdown, Reason}, State} end. -handle_message(Deb, {system, From, Req}, State = #wstate{reader = Parent}) -> - sys:handle_system_msg(Req, From, Parent, ?MODULE, Deb, State); -handle_message(Deb, Message, State) -> - {Deb, handle_message(Message, State)}. +handle_cast(_Message, State) -> + {noreply, State, 0}. + +handle_info(timeout, State) -> + try + State1 = internal_flush(State), + {noreply, State1} + catch + _Class:Reason -> + {stop, {shutdown, Reason}, State} + end; +handle_info(Message, State) -> + try + State1 = handle_message(Message, State), + {noreply, State1, 0} + catch + _Class:Reason -> + {stop, {shutdown, Reason}, State} + end. handle_message({send_command, MethodRecord}, State) -> internal_send_command_async(MethodRecord, State); @@ -241,21 +246,6 @@ handle_message({send_command_flow, MethodRecord, Sender}, State) -> handle_message({send_command_flow, MethodRecord, Content, Sender}, State) -> credit_flow:ack(Sender), internal_send_command_async(MethodRecord, Content, State); -handle_message({'$gen_call', From, {send_command_sync, MethodRecord}}, State) -> - State1 = internal_flush( - internal_send_command_async(MethodRecord, State)), - gen_server:reply(From, ok), - State1; -handle_message({'$gen_call', From, {send_command_sync, MethodRecord, Content}}, - State) -> - State1 = internal_flush( - internal_send_command_async(MethodRecord, Content, State)), - gen_server:reply(From, ok), - State1; -handle_message({'$gen_call', From, flush}, State) -> - State1 = internal_flush(State), - gen_server:reply(From, ok), - State1; handle_message({send_command_and_notify, QPid, ChPid, MethodRecord}, State) -> State1 = internal_send_command_async(MethodRecord, State), rabbit_amqqueue_common:notify_sent(QPid, ChPid), @@ -268,16 +258,28 @@ handle_message({send_command_and_notify, QPid, ChPid, MethodRecord, Content}, handle_message({'DOWN', _MRef, process, QPid, _Reason}, State) -> rabbit_amqqueue_common:notify_sent_queue_down(QPid), State; -handle_message({inet_reply, _, ok}, State) -> - rabbit_event:ensure_stats_timer(State, #wstate.stats_timer, emit_stats); -handle_message({inet_reply, _, Status}, _State) -> - exit({writer, send_failed, Status}); handle_message(emit_stats, State = #wstate{reader = ReaderPid}) -> ReaderPid ! ensure_stats, rabbit_event:reset_stats_timer(State, #wstate.stats_timer); +handle_message(ok, State) -> + State; +handle_message({_Ref, ok} = Msg, State) -> + rabbit_log:warning("AMQP 0-9-1 channel writer has received a message it does not support: ~p", [Msg]), + State; +handle_message({ok, _Ref} = Msg, State) -> + rabbit_log:warning("AMQP 0-9-1 channel writer has received a message it does not support: ~p", [Msg]), + State; handle_message(Message, _State) -> exit({writer, message_not_understood, Message}). +terminate(Reason, State) -> + #wstate{reader = ReaderPid, channel = Channel} = State, + ReaderPid ! {channel_exit, Channel, Reason}, + ok. + +code_change(_OldVsn, State, _Extra) -> + {ok, State}. + %%--------------------------------------------------------------------------- send_command(W, MethodRecord) -> @@ -317,8 +319,7 @@ flush(W) -> call(W, flush). %%--------------------------------------------------------------------------- call(Pid, Msg) -> - {ok, Res} = gen:call(Pid, '$gen_call', Msg, infinity), - Res. + gen_server:call(Pid, Msg, infinity). %%--------------------------------------------------------------------------- @@ -384,33 +385,15 @@ maybe_flush(State = #wstate{pending = Pending}) -> internal_flush(State = #wstate{pending = []}) -> State; -internal_flush(State = #wstate{sock = Sock, pending = Pending}) -> - ok = port_cmd(Sock, lists:reverse(Pending)), - State#wstate{pending = []}. - -%% gen_tcp:send/2 does a selective receive of {inet_reply, Sock, -%% Status} to obtain the result. That is bad when it is called from -%% the writer since it requires scanning of the writers possibly quite -%% large message queue. -%% -%% So instead we lift the code from prim_inet:send/2, which is what -%% gen_tcp:send/2 calls, do the first half here and then just process -%% the result code in handle_message/2 as and when it arrives. -%% -%% This means we may end up happily sending data down a closed/broken -%% socket, but that's ok since a) data in the buffers will be lost in -%% any case (so qualitatively we are no worse off than if we used -%% gen_tcp:send/2), and b) we do detect the changed socket status -%% eventually, i.e. when we get round to handling the result code. -%% -%% Also note that the port has bounded buffers and port_command blocks -%% when these are full. So the fact that we process the result -%% asynchronously does not impact flow control. -port_cmd(Sock, Data) -> - true = try rabbit_net:port_command(Sock, Data) - catch error:Error -> exit({writer, send_failed, Error}) - end, - ok. +internal_flush(State0 = #wstate{sock = Sock, pending = Pending}) -> + case rabbit_net:send(Sock, lists:reverse(Pending)) of + ok -> + ok; + {error, Reason} -> + exit({writer, send_failed, Reason}) + end, + State = State0#wstate{pending = []}, + rabbit_event:ensure_stats_timer(State, #wstate.stats_timer, emit_stats). %% Some processes (channel, writer) can get huge amounts of binary %% garbage when processing huge messages at high speed (since we only diff --git a/deps/rabbit_common/src/supervisor2.erl b/deps/rabbit_common/src/supervisor2.erl index c3ad436cb27b..46dae1c0a56b 100644 --- a/deps/rabbit_common/src/supervisor2.erl +++ b/deps/rabbit_common/src/supervisor2.erl @@ -38,7 +38,7 @@ %% 4) normal, and {shutdown, _} exit reasons are all treated the same %% (i.e. are regarded as normal exits) %% -%% All modifications are (C) 2010-2023 VMware, Inc. or its affiliates. +%% All modifications are (C) 2007-2024 Broadcom. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% %% %CopyrightBegin% %% @@ -71,7 +71,7 @@ %% Internal exports -export([init/1, handle_call/3, handle_cast/2, handle_info/2, - terminate/2, code_change/3, format_status/2]). + terminate/2, code_change/3]). %% For release_handler only -export([get_callback_module/1]). @@ -878,17 +878,13 @@ do_restart_delay(Reason, maybe_restart(Strategy, Child, State) -> case restart(Strategy, Child, State) of - {{try_again, Reason}, NState2} -> + {{try_again, TryAgainId}, NState2} -> %% Leaving control back to gen_server before %% trying again. This way other incoming requests %% for the supervisor can be handled - e.g. a %% shutdown request for the supervisor or the %% child. - Id = if ?is_simple(State) -> Child#child.pid; - true -> Child#child.id - end, - Args = [self(), Id, Reason], - {ok, _TRef} = timer:apply_after(0, ?MODULE, try_again_restart, Args), + try_again_restart(TryAgainId), {ok, NState2}; Other -> Other @@ -1610,12 +1606,6 @@ report_progress(Child, SupName) -> logger_formatter=>#{title=>"PROGRESS REPORT"}, error_logger=>#{tag=>info_report,type=>progress}}). -format_status(terminate, [_PDict, State]) -> - State; -format_status(_, [_PDict, State]) -> - [{data, [{"State", State}]}, - {supervisor, [{"Callback", State#state.module}]}]. - %%%----------------------------------------------------------------- %%% Dynamics database access dyn_size(#state{dynamics = {Mod,Db}}) -> diff --git a/deps/rabbit_common/src/vm_memory_monitor.erl b/deps/rabbit_common/src/vm_memory_monitor.erl index 82ec6c915898..007cbf2cc1fe 100644 --- a/deps/rabbit_common/src/vm_memory_monitor.erl +++ b/deps/rabbit_common/src/vm_memory_monitor.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% %% In practice Erlang shouldn't be allowed to grow to more than a half diff --git a/deps/rabbit_common/src/worker_pool.erl b/deps/rabbit_common/src/worker_pool.erl index 4770fd87a4c6..cd27ca346e71 100644 --- a/deps/rabbit_common/src/worker_pool.erl +++ b/deps/rabbit_common/src/worker_pool.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(worker_pool). diff --git a/deps/rabbit_common/src/worker_pool_sup.erl b/deps/rabbit_common/src/worker_pool_sup.erl index 86d30e087140..8a01154f3676 100644 --- a/deps/rabbit_common/src/worker_pool_sup.erl +++ b/deps/rabbit_common/src/worker_pool_sup.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(worker_pool_sup). diff --git a/deps/rabbit_common/src/worker_pool_worker.erl b/deps/rabbit_common/src/worker_pool_worker.erl index 13834981b8bc..1444063ffb30 100644 --- a/deps/rabbit_common/src/worker_pool_worker.erl +++ b/deps/rabbit_common/src/worker_pool_worker.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(worker_pool_worker). @@ -17,7 +17,6 @@ -export([start_link/1, next_job_from/2, submit/3, submit_async/2, run/1]). --export([set_maximum_since_use/2]). -export([set_timeout/2, set_timeout/3, clear_timeout/1]). -export([init/1, handle_call/3, handle_cast/2, handle_info/2, @@ -32,7 +31,6 @@ -spec submit(pid(), fun (() -> A) | mfargs(), 'reuse' | 'single') -> A. -spec submit_async(pid(), fun (() -> any()) | mfargs()) -> 'ok'. -spec run(fun (() -> A)) -> A; (mfargs()) -> any(). --spec set_maximum_since_use(pid(), non_neg_integer()) -> 'ok'. %%---------------------------------------------------------------------------- @@ -53,9 +51,6 @@ submit(Pid, Fun, ProcessModel) -> submit_async(Pid, Fun) -> gen_server2:cast(Pid, {submit_async, Fun, self()}). -set_maximum_since_use(Pid, Age) -> - gen_server2:cast(Pid, {set_maximum_since_use, Age}). - run({M, F, A}) -> apply(M, F, A); run(Fun) -> Fun(). @@ -76,15 +71,12 @@ run(Fun, single) -> %%---------------------------------------------------------------------------- init([PoolName]) -> - ok = file_handle_cache:register_callback(?MODULE, set_maximum_since_use, - [self()]), ok = worker_pool:ready(PoolName, self()), put(worker_pool_worker, true), put(worker_pool_name, PoolName), {ok, undefined, hibernate, {backoff, ?HIBERNATE_AFTER_MIN, ?HIBERNATE_AFTER_MIN, ?DESIRED_HIBERNATE}}. -prioritise_cast({set_maximum_since_use, _Age}, _Len, _State) -> 8; prioritise_cast({next_job_from, _CPid}, _Len, _State) -> 7; prioritise_cast(_Msg, _Len, _State) -> 0. @@ -120,10 +112,6 @@ handle_cast({submit_async, Fun, CPid}, {from, CPid, MRef}) -> ok = worker_pool:idle(get(worker_pool_name), self()), {noreply, undefined, hibernate}; -handle_cast({set_maximum_since_use, Age}, State) -> - ok = file_handle_cache:set_maximum_since_use(Age), - {noreply, State, hibernate}; - handle_cast(Msg, State) -> {stop, {unexpected_cast, Msg}, State}. diff --git a/deps/rabbit_common/test/gen_server2_test_server.erl b/deps/rabbit_common/test/gen_server2_test_server.erl index f93403cfb735..97041a22ee8d 100644 --- a/deps/rabbit_common/test/gen_server2_test_server.erl +++ b/deps/rabbit_common/test/gen_server2_test_server.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2017-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(gen_server2_test_server). diff --git a/deps/rabbit_common/test/rabbit_env_SUITE.erl b/deps/rabbit_common/test/rabbit_env_SUITE.erl index d28117da9b08..0961a37a1855 100644 --- a/deps/rabbit_common/test/rabbit_env_SUITE.erl +++ b/deps/rabbit_common/test/rabbit_env_SUITE.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2019-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_env_SUITE). @@ -383,7 +383,8 @@ check_values_from_reachable_remote_node(Config) -> try persistent_term:put({rabbit_env, os_type}, {unix, undefined}), - UnixContext = rabbit_env:get_context(Node), + TakeFromRemoteNode = {Node, 120000}, + UnixContext = rabbit_env:get_context(TakeFromRemoteNode), persistent_term:erase({rabbit_env, os_type}), @@ -447,7 +448,7 @@ check_values_from_reachable_remote_node(Config) -> erlang_dist_tcp_port => 25672, feature_flags_file => FeatureFlagsFile, forced_feature_flags_on_init => RFFValue, - from_remote_node => {Node, 10000}, + from_remote_node => TakeFromRemoteNode, interactive_shell => false, keep_pid_file_on_exit => false, log_base_dir => "/var/log/rabbitmq", @@ -492,9 +493,19 @@ consume_stdout(Port, Nodename) -> wait_for_remote_node(Nodename) -> case net_adm:ping(Nodename) of - pong -> ok; - pang -> timer:sleep(200), - wait_for_remote_node(Nodename) + pong -> + Ret = erpc:call( + Nodename, application, get_env, [rabbit, plugins_dir]), + case Ret of + {ok, Val} when is_list(Val) -> + ok; + _ -> + timer:sleep(200), + wait_for_remote_node(Nodename) + end; + pang -> + timer:sleep(200), + wait_for_remote_node(Nodename) end. check_values_from_offline_remote_node(_) -> diff --git a/deps/rabbit_common/test/supervisor2_SUITE.erl b/deps/rabbit_common/test/supervisor2_SUITE.erl index 5e59337b1cee..24a1f256302d 100644 --- a/deps/rabbit_common/test/supervisor2_SUITE.erl +++ b/deps/rabbit_common/test/supervisor2_SUITE.erl @@ -2,14 +2,13 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(supervisor2_SUITE). -behaviour(supervisor2). --include_lib("common_test/include/ct.hrl"). -include("rabbit.hrl"). -compile(export_all). diff --git a/deps/rabbit_common/test/test_event_handler.erl b/deps/rabbit_common/test/test_event_handler.erl index b4747f85efe5..b1a60b36826f 100644 --- a/deps/rabbit_common/test/test_event_handler.erl +++ b/deps/rabbit_common/test/test_event_handler.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2020-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(test_event_handler). diff --git a/deps/rabbit_common/test/unit_SUITE.erl b/deps/rabbit_common/test/unit_SUITE.erl index b7331b089607..0336bdb7fb57 100644 --- a/deps/rabbit_common/test/unit_SUITE.erl +++ b/deps/rabbit_common/test/unit_SUITE.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2016-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(unit_SUITE). diff --git a/deps/rabbit_common/test/unit_password_hashing_SUITE.erl b/deps/rabbit_common/test/unit_password_hashing_SUITE.erl index ea73e01e2d2c..f661cff317ec 100644 --- a/deps/rabbit_common/test/unit_password_hashing_SUITE.erl +++ b/deps/rabbit_common/test/unit_password_hashing_SUITE.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2011-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(unit_password_hashing_SUITE). diff --git a/deps/rabbit_common/test/unit_priority_queue_SUITE.erl b/deps/rabbit_common/test/unit_priority_queue_SUITE.erl index fb8655525844..24f98e21a0eb 100644 --- a/deps/rabbit_common/test/unit_priority_queue_SUITE.erl +++ b/deps/rabbit_common/test/unit_priority_queue_SUITE.erl @@ -2,12 +2,11 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2018-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(unit_priority_queue_SUITE). --include_lib("common_test/include/ct.hrl"). -include_lib("eunit/include/eunit.hrl"). -compile(export_all). diff --git a/deps/rabbit_common/test/worker_pool_SUITE.erl b/deps/rabbit_common/test/worker_pool_SUITE.erl index 12c36eeb59ba..36cfa307a705 100644 --- a/deps/rabbit_common/test/worker_pool_SUITE.erl +++ b/deps/rabbit_common/test/worker_pool_SUITE.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(worker_pool_SUITE). diff --git a/deps/rabbitmq_amqp1_0/.gitignore b/deps/rabbitmq_amqp1_0/.gitignore deleted file mode 100644 index faa711d4f6eb..000000000000 --- a/deps/rabbitmq_amqp1_0/.gitignore +++ /dev/null @@ -1,35 +0,0 @@ -.sw? -.*.sw? -*.beam -*.plt -/.erlang.mk/ -/cover/ -/deps/ -/doc/ -/ebin/ -/escript/ -/escript.lock -/logs/ -/plugins/ -/plugins.lock -/sbin/ -/sbin.lock -/xrefr - -rabbitmq_amqp1_0.d - -[Dd]ebug/ -[Rr]elease/ -x64/ -build/ -[Bb]in/ -[Oo]bj/ -*.lock.json - -[Dd]ebug/ -[Rr]elease/ -x64/ -build/ -[Bb]in/ -[Oo]bj/ -*.lock.json diff --git a/deps/rabbitmq_amqp1_0/BUILD.bazel b/deps/rabbitmq_amqp1_0/BUILD.bazel index 0b7218ab6c20..3c5a1d767c07 100644 --- a/deps/rabbitmq_amqp1_0/BUILD.bazel +++ b/deps/rabbitmq_amqp1_0/BUILD.bazel @@ -1,15 +1,9 @@ -load("@rules_erlang//:eunit2.bzl", "eunit") load("@rules_erlang//:xref2.bzl", "xref") load("@rules_erlang//:dialyze.bzl", "dialyze", "plt") load( "//:rabbitmq.bzl", - "BROKER_VERSION_REQUIREMENTS_ANY", - "RABBITMQ_DIALYZER_OPTS", "assert_suites", - "broker_for_integration_suites", "rabbitmq_app", - "rabbitmq_integration_suite", - "rabbitmq_suite", ) load( ":app.bzl", @@ -19,22 +13,14 @@ load( "test_suite_beam_files", ) -APP_ENV = """[ - {default_user, "guest"}, - {default_vhost, <<"/">>}, - {protocol_strict_mode, false} - ]""" - APP_NAME = "rabbitmq_amqp1_0" -APP_DESCRIPTION = "AMQP 1.0 support for RabbitMQ" +APP_DESCRIPTION = "Deprecated no-op AMQP 1.0 plugin" all_beam_files(name = "all_beam_files") all_test_beam_files(name = "all_test_beam_files") -all_srcs(name = "all_srcs") - test_suite_beam_files(name = "test_suite_beam_files") rabbitmq_app( @@ -42,107 +28,38 @@ rabbitmq_app( srcs = [":all_srcs"], hdrs = [":public_hdrs"], app_description = APP_DESCRIPTION, - app_env = APP_ENV, - app_extra_keys = BROKER_VERSION_REQUIREMENTS_ANY, app_name = APP_NAME, beam_files = [":beam_files"], license_files = [":license_files"], priv = [":priv"], deps = [ - "//deps/amqp10_common:erlang_app", - "//deps/amqp_client:erlang_app", - "//deps/rabbit:erlang_app", - "//deps/rabbit_common:erlang_app", + "//deps/rabbit:erlang_app", # keep ], ) +all_srcs(name = "all_srcs") + +alias( + name = "rabbitmq_amqp1_0", + actual = ":erlang_app", + visibility = ["//visibility:public"], +) + xref( name = "xref", - additional_libs = [ - "//deps/rabbitmq_cli:erlang_app", # keep - ], target = ":erlang_app", ) plt( name = "deps_plt", - apps = [ - "ssl", # keep - ], for_target = ":erlang_app", - ignore_warnings = True, - libs = ["//deps/rabbitmq_cli:elixir"], # keep plt = "//:base_plt", - deps = ["//deps/rabbitmq_cli:erlang_app"], # keep ) dialyze( name = "dialyze", - size = "medium", - dialyzer_opts = RABBITMQ_DIALYZER_OPTS, plt = ":deps_plt", target = ":erlang_app", ) -eunit( - name = "eunit", - target = ":test_erlang_app", -) - -broker_for_integration_suites() - -rabbitmq_integration_suite( - name = "amqp10_client_SUITE", - size = "medium", - runtime_deps = [ - "//deps/amqp10_client:erlang_app", - ], -) - -rabbitmq_integration_suite( - name = "command_SUITE", - size = "medium", - runtime_deps = [ - "//deps/amqp10_client:erlang_app", - ], - deps = [ - "//deps/amqp10_common:erlang_app", - ], -) - -rabbitmq_integration_suite( - name = "proxy_protocol_SUITE", - size = "medium", -) - -rabbitmq_integration_suite( - name = "system_SUITE", - flaky = True, - shard_count = 2, - tags = [ - "dotnet", - ], - test_env = { - "TMPDIR": "$TEST_TMPDIR", - }, -) - -rabbitmq_integration_suite( - name = "config_schema_SUITE", -) - -rabbitmq_suite( - name = "unit_SUITE", - size = "small", - deps = [ - "//deps/amqp10_common:erlang_app", - ], -) - assert_suites() - -alias( - name = "rabbitmq_amqp1_0", - actual = ":erlang_app", - visibility = ["//visibility:public"], -) diff --git a/deps/rabbitmq_amqp1_0/Makefile b/deps/rabbitmq_amqp1_0/Makefile index 11f08ca22b93..30dc3ed18824 100644 --- a/deps/rabbitmq_amqp1_0/Makefile +++ b/deps/rabbitmq_amqp1_0/Makefile @@ -1,46 +1,12 @@ PROJECT = rabbitmq_amqp1_0 -PROJECT_DESCRIPTION = AMQP 1.0 support for RabbitMQ +PROJECT_DESCRIPTION = Deprecated no-op AMQP 1.0 plugin -define PROJECT_ENV -[ - {default_user, "guest"}, - {default_vhost, <<"/">>}, - {protocol_strict_mode, false} - ] -endef - -define PROJECT_APP_EXTRA_KEYS - {broker_version_requirements, []} -endef - -BUILD_DEPS = rabbitmq_codegen -DEPS = rabbit_common rabbit amqp_client amqp10_common -TEST_DEPS = rabbitmq_ct_helpers rabbitmq_ct_client_helpers amqp10_client +LOCAL_DEPS = rabbit DEP_EARLY_PLUGINS = rabbit_common/mk/rabbitmq-early-plugin.mk DEP_PLUGINS = rabbit_common/mk/rabbitmq-plugin.mk .DEFAULT_GOAL = all -$(PROJECT).d:: $(EXTRA_SOURCES) include ../../rabbitmq-components.mk include ../../erlang.mk - -# -------------------------------------------------------------------- -# Framing sources generation. -# -------------------------------------------------------------------- - -clean:: clean-extra-sources - -clean-extra-sources: - $(gen_verbose) rm -f $(EXTRA_SOURCES) - -distclean:: distclean-dotnet-tests distclean-java-tests - -distclean-dotnet-tests: - $(gen_verbose) cd test/system_SUITE_data/dotnet-tests && \ - rm -rf bin obj && \ - rm -f project.lock.json TestResult.xml - -distclean-java-tests: - $(gen_verbose) cd test/system_SUITE_data/java-tests && mvn clean diff --git a/deps/rabbitmq_amqp1_0/README.md b/deps/rabbitmq_amqp1_0/README.md index 1000f76e3728..14f65b94c0e0 100644 --- a/deps/rabbitmq_amqp1_0/README.md +++ b/deps/rabbitmq_amqp1_0/README.md @@ -1,234 +1,11 @@ -# AMQP 1.0 support for RabbitMQ +This AMQP 1.0 plugin is deprecated and exists only for backward compatibility. -This plugin adds AMQP 1.0 support to RabbitMQ. - -Despite the name, -AMQP 0-9-1 and 1.0 are very much different protocols and thus -1.0 is treated as a separate protocol supported by RabbitMQ, -not a revision of the original protocol that will eventually supersede it. - -This plugin is several years old and is moderately mature. It may have certain -limitations with its current architecture but most major AMQP 1.0 features should be in place. - -This plugin supports 0-9-1 and 1.0 client interoperability with certain limitations. - -# Configuration - -This plugin ships with modern versions of RabbitMQ. - -It will listen on the standard AMQP port, 5672. To reconfigure this, -do so [as you would for 0-9-1](http://www.rabbitmq.com/configure.html). Clients connecting with 0-9-1 -will continue to work on the same port. - -The following two configuration options (which are specific to the AMQP 1.0 adapter) -are accepted in the `rabbitmq_amqp1_0` section of the configuration file. - -AMQP 1.0 conceptually allows connections that are not authenticated -with SASL (i.e. where no username and password is supplied). By -default these will connect as the "guest" user. To change this, set -`default_user` to a string with the name of the user to use, or the -atom `none` to prevent unauthenticated connections. - - {default_user, "guest"} - -The default virtual host can be specified using the `default_vhost` setting. -See the "Virtual Hosts" section below for a description. - - {default_vhost, <<"/">>} - -The `protocol_strict_mode` setting controls how strictly peers must conform -to the specification. The default is not to enforce strictness, which allows -non-fatal byte-counts in frames and inaccuracies in flow-control from peers. - - {protocol_strict_mode, false} - - -Configuration example using [sysctl config format](https://rabbitmq.com/configure.html#config-file-formats): - - amqp1_0.default_user = guest - amqp1_0.default_vhost = / - amqp1_0.protocol_strict_mode = false - - -Configuration for interoperability between AMQP 0.9.1 and AMQP 1.0. +From RabbitMQ `v4.x` onwards, AMQP 1.0 is supported natively by RabbitMQ and all AMQP 1.0 code was moved from this directory to the core [rabbit](../rabbit/) app. +This no-op plugin exists only such that deployment tools can continue to enable and disable this plugin without erroring: ``` -# Conversion only handles simple types, such as strings, ints and booleans. -# Convert AMQP 0.9.1 message headers to application properties for an AMQP 1.0 consumer -amqp1_0.convert_amqp091_headers_to_app_props = false | true (default false) -# Convert AMQP 1.0 Application Properties to AMQP 0.9.1 headers -amqp1_0.convert_app_props_to_amqp091_headers = false | true (default false) - +rabbitmq-plugins enable rabbitmq_amqp1_0 +rabbitmq-plugins disable rabbitmq_amqp1_0 ``` - -## Clients we have tested - -The current field of AMQP 1.0 clients is somewhat limited. Therefore -we have not achieved as much interoperability as we might like. - -We have tested against: - - * SwiftMQ Java client [1] - We have done most of our testing against this client and things seem - to work. - - * QPid / Proton C client [2] - We have successfully tested against the "proton" command line tool - this client ships with. - - * QPid / Proton Java client [2] - We have not been able to get this client to get as far as opening a - network connection (tested against 0.2 and 0.4). - - * Windows Azure Service Bus [3] - It seems that the URI scheme used by this client assumes that it is - connecting to Azure; it does not seem to be possible to get it to - connect to another server. - -[1] http://www.swiftmq.com/products/router/swiftlets/sys_amqp/client/index.html - -[2] http://qpid.apache.org/proton/ - -[3] http://www.windowsazure.com/en-us/develop/net/how-to-guides/service-bus-amqp/ - -As new clients appear we will of course work on interoperability with them. - -# Interoperability with AMQP 0-9-1 - -## Message payloads - -This implementation as a plugin aims for useful interoperability with -AMQP 0-9-1 clients. AMQP 1.0 messages can be far more structured than -AMQP 0-9-1 messages, which simply have a payload of bytes. - -The way we deal with this is that an AMQP 1.0 message with a single -data section will be transcoded to an AMQP 0-9-1 message with just the -bytes from that section, and vice versa. An AMQP 1.0 with any other -payload will keep exactly that payload (i.e., encoded AMQP 1.0 -sections, concatenated), and for AMQP 0-9-1 clients the `type` field -of the `basic.properties` will contain the value `"amqp-1.0"`. - -Thus, AMQP 0-9-1 clients may receive messages that they cannot -understand (if they don't have an AMQP 1.0 codec handy, anyway); -however, these will at least be labelled. AMQP 1.0 clients shall -receive exactly what they expect. - -## Message properties, annotations, headers, etc. - -The headers and properties map as follows: - - AMQP 1.0 AMQP 0-9-1 - Header Properties - durable <---------------> delivery-mode [1] - priority <---------------> priority - ttl <---------------> expiration [2] - first-acquirer [3] - delivery-count [4] - Properties - message-id <---------------> message-id [5] - user-id <---------------> user-id - to [6] - subject [6] - reply-to <---------------> reply-to [6] - correlation-id <---------------> correlation-id - content-type <---------------> content-type - content-encoding <---------------> content-encoding - absolute-expiry-time [7] - creation-time <---------------> timestamp - Application Properties <-------/-------> headers [8] - -[1] `durable` is `true` if and only if `delivery-mode` is `2`. - -[2] `expiration` is a shortstr; since RabbitMQ will expect this to be -an encoded string, we translate a `ttl` to the string representation -of its integer value. - -[3] `first-acquirer` is true if and only if the `basic.deliver` field -`redelivered` is false. - -[4] `delivery-count` is left null. - -[5] AMQP 0-9-1 expects this to be a shortstr. - -[6] See Routing and Addressing below. - -[7] `absolute-expiry-time` has no corresponding field in AMQP 0-9-1, -and is not supported in RabbitMQ in any case. - -[8] The application properties section and the `basic.properties` field -`headers` are natural analogues. However, rather than try to transcode -an AMQP 1.0 map to an AMQP 0-9-1 field-table, currently we discard -application properties (of AMQP 1.0 messages) and headers (of AMQP 0-9-1 -messages sent through to AMQP 1.0). In other words, the (AMQP 1.0) -application properties section is only available to AMQP 1.0 clients, and -the (AMQP 0-9-1) headers field is only available to AMQP 0-9-1 -clients. - -Note that properties (in both AMQP 1.0 and AMQP 0-9-1) and application -properties (in AMQP 1.0) are immutable; however, this can only apply -when the sending and receiving clients are using the same protocol. - -## Routing and Addressing - -In AMQP 1.0 source and destination addresses are opaque values, and -each message may have a `subject` field value. - -For targets, addresses are: - - = "/exchange/" X "/" RK Publish to exchange X with routing key RK - | "/exchange/" X Publish to exchange X with message subject as routing key - | "/topic/" RK Publish to amq.topic with routing key RK - | "/amq/queue/" Q Publish to default exchange with routing key Q - | "/queue/" Q Publish to default exchange with routing key Q - | Q (no leading slash) Publish to default exchange with routing key Q - | "/queue" Publish to default exchange with message subj as routing key - -For sources, addresses are: - - = "/exchange/" X "/" BK Consume from temp queue bound to X with binding key BK - | "/topic/" BK Consume from temp queue bound to amq.topic with binding key BK - | "/amq/queue/" Q Consume from Q - | "/queue/" Q Consume from Q - | Q (no leading slash) Consume from Q - -The intent is that the source and destination address formats should be -mostly the same as those supported by the STOMP plugin, to the extent -permitted by AMQP 1.0 semantics. - -## Virtual Hosts - -AMQP 1.0 has no equivalent of AMQP 0-9-1 virtual hosts. A virtual host -on the broker may be addressed when opening an AMQP 1.0 connection by setting -the `hostname` field, prefixing with "vhost:". Setting the `hostname` field -to "vhost:/" addresses the default virtual host. If the `hostname` field -does not start with "vhost:" then the `default_vhost` configuration -setting will be consulted. - -# Limitations and unsupported features - -At the minute, the RabbitMQ AMQP 1.0 adapter does not support: - - - "Exactly once" delivery [9] - - Link recovery [9] - - Full message fragmentation [10] - - Resuming messages - - "Modified" outcome - - Filters [11] - - Transactions - - Source/target expiry-policy other than link-detach and timeout - other than 0 - - Max message size for links - - Aborted transfers - - TLS negotiation via the AMQP2100 handshake (although SSL is supported) - -[9] We do not deduplicate as a target, though we may resend as a -source (messages that have no settled outcome when an outgoing link is -detached will be requeued). - -[10] We do fragment messages over multiple frames; however, if this -would overflow the session window we may discard or requeue messages. - -[11] In principle, filters for consuming from an exchange could -translate to AMQP 0-9-1 bindings. This is not implemented, so -effectively only consuming from fanout exchanges and queues is useful -currently. +Enabling or disabling this plugin has no effect. +RabbitMQ `v4.x` supports AMQP 1.0 by default. diff --git a/deps/rabbitmq_amqp1_0/app.bzl b/deps/rabbitmq_amqp1_0/app.bzl index 2191da43104b..78f6ada247e1 100644 --- a/deps/rabbitmq_amqp1_0/app.bzl +++ b/deps/rabbitmq_amqp1_0/app.bzl @@ -8,171 +8,46 @@ def all_beam_files(name = "all_beam_files"): ) erlang_bytecode( name = "other_beam", - srcs = [ - "src/Elixir.RabbitMQ.CLI.Ctl.Commands.ListAmqp10ConnectionsCommand.erl", - "src/rabbit_amqp1_0.erl", - "src/rabbit_amqp1_0_channel.erl", - "src/rabbit_amqp1_0_incoming_link.erl", - "src/rabbit_amqp1_0_link_util.erl", - "src/rabbit_amqp1_0_message.erl", - "src/rabbit_amqp1_0_outgoing_link.erl", - "src/rabbit_amqp1_0_reader.erl", - "src/rabbit_amqp1_0_session.erl", - "src/rabbit_amqp1_0_session_process.erl", - "src/rabbit_amqp1_0_session_sup.erl", - "src/rabbit_amqp1_0_session_sup_sup.erl", - "src/rabbit_amqp1_0_util.erl", - "src/rabbit_amqp1_0_writer.erl", - ], + srcs = ["src/rabbitmq_amqp1_0_noop.erl"], hdrs = [":public_and_private_hdrs"], app_name = "rabbitmq_amqp1_0", dest = "ebin", erlc_opts = "//:erlc_opts", - deps = [ - "//deps/amqp10_common:erlang_app", - "//deps/amqp_client:erlang_app", - "//deps/rabbit_common:erlang_app", - "//deps/rabbitmq_cli:erlang_app", - ], - ) - -def all_test_beam_files(name = "all_test_beam_files"): - filegroup( - name = "test_beam_files", - testonly = True, - srcs = [":test_other_beam"], - ) - erlang_bytecode( - name = "test_other_beam", - testonly = True, - srcs = [ - "src/Elixir.RabbitMQ.CLI.Ctl.Commands.ListAmqp10ConnectionsCommand.erl", - "src/rabbit_amqp1_0.erl", - "src/rabbit_amqp1_0_channel.erl", - "src/rabbit_amqp1_0_incoming_link.erl", - "src/rabbit_amqp1_0_link_util.erl", - "src/rabbit_amqp1_0_message.erl", - "src/rabbit_amqp1_0_outgoing_link.erl", - "src/rabbit_amqp1_0_reader.erl", - "src/rabbit_amqp1_0_session.erl", - "src/rabbit_amqp1_0_session_process.erl", - "src/rabbit_amqp1_0_session_sup.erl", - "src/rabbit_amqp1_0_session_sup_sup.erl", - "src/rabbit_amqp1_0_util.erl", - "src/rabbit_amqp1_0_writer.erl", - ], - hdrs = [":public_and_private_hdrs"], - app_name = "rabbitmq_amqp1_0", - dest = "test", - erlc_opts = "//:test_erlc_opts", - deps = [ - "//deps/amqp10_common:erlang_app", - "//deps/amqp_client:erlang_app", - "//deps/rabbit_common:erlang_app", - "//deps/rabbitmq_cli:erlang_app", - ], ) def all_srcs(name = "all_srcs"): filegroup( - name = "all_srcs", - srcs = [":public_and_private_hdrs", ":srcs"], + name = "srcs", + srcs = ["src/rabbitmq_amqp1_0_noop.erl"], ) + filegroup(name = "private_hdrs") + filegroup(name = "public_hdrs") + filegroup(name = "priv") + filegroup(name = "license_files") filegroup( name = "public_and_private_hdrs", srcs = [":private_hdrs", ":public_hdrs"], ) - - filegroup( - name = "priv", - srcs = ["priv/schema/rabbitmq_amqp1_0.schema"], - ) - filegroup( - name = "private_hdrs", - ) - filegroup( - name = "srcs", - srcs = [ - "src/Elixir.RabbitMQ.CLI.Ctl.Commands.ListAmqp10ConnectionsCommand.erl", - "src/rabbit_amqp1_0.erl", - "src/rabbit_amqp1_0_channel.erl", - "src/rabbit_amqp1_0_incoming_link.erl", - "src/rabbit_amqp1_0_link_util.erl", - "src/rabbit_amqp1_0_message.erl", - "src/rabbit_amqp1_0_outgoing_link.erl", - "src/rabbit_amqp1_0_reader.erl", - "src/rabbit_amqp1_0_session.erl", - "src/rabbit_amqp1_0_session_process.erl", - "src/rabbit_amqp1_0_session_sup.erl", - "src/rabbit_amqp1_0_session_sup_sup.erl", - "src/rabbit_amqp1_0_util.erl", - "src/rabbit_amqp1_0_writer.erl", - ], - ) - filegroup( - name = "public_hdrs", - srcs = ["include/rabbit_amqp1_0.hrl"], - ) filegroup( - name = "license_files", - srcs = [ - "LICENSE", - "LICENSE-MPL-RabbitMQ", - ], + name = "all_srcs", + srcs = [":public_and_private_hdrs", ":srcs"], ) -def test_suite_beam_files(name = "test_suite_beam_files"): - erlang_bytecode( - name = "amqp10_client_SUITE_beam_files", - testonly = True, - srcs = ["test/amqp10_client_SUITE.erl"], - outs = ["test/amqp10_client_SUITE.beam"], - app_name = "rabbitmq_amqp1_0", - erlc_opts = "//:test_erlc_opts", - deps = ["//deps/amqp_client:erlang_app"], - ) - erlang_bytecode( - name = "command_SUITE_beam_files", - testonly = True, - srcs = ["test/command_SUITE.erl"], - outs = ["test/command_SUITE.beam"], - hdrs = ["include/rabbit_amqp1_0.hrl"], - app_name = "rabbitmq_amqp1_0", - erlc_opts = "//:test_erlc_opts", - deps = ["//deps/amqp10_common:erlang_app", "//deps/amqp_client:erlang_app"], - ) - erlang_bytecode( - name = "config_schema_SUITE_beam_files", - testonly = True, - srcs = ["test/config_schema_SUITE.erl"], - outs = ["test/config_schema_SUITE.beam"], - app_name = "rabbitmq_amqp1_0", - erlc_opts = "//:test_erlc_opts", - ) - erlang_bytecode( - name = "proxy_protocol_SUITE_beam_files", - testonly = True, - srcs = ["test/proxy_protocol_SUITE.erl"], - outs = ["test/proxy_protocol_SUITE.beam"], - app_name = "rabbitmq_amqp1_0", - erlc_opts = "//:test_erlc_opts", - ) +def all_test_beam_files(name = "all_test_beam_files"): erlang_bytecode( - name = "system_SUITE_beam_files", + name = "test_other_beam", testonly = True, - srcs = ["test/system_SUITE.erl"], - outs = ["test/system_SUITE.beam"], + srcs = ["src/rabbitmq_amqp1_0_noop.erl"], + hdrs = [":public_and_private_hdrs"], app_name = "rabbitmq_amqp1_0", + dest = "test", erlc_opts = "//:test_erlc_opts", - deps = ["//deps/rabbit_common:erlang_app"], ) - erlang_bytecode( - name = "unit_SUITE_beam_files", + filegroup( + name = "test_beam_files", testonly = True, - srcs = ["test/unit_SUITE.erl"], - outs = ["test/unit_SUITE.beam"], - hdrs = ["include/rabbit_amqp1_0.hrl"], - app_name = "rabbitmq_amqp1_0", - erlc_opts = "//:test_erlc_opts", - deps = ["//deps/amqp10_common:erlang_app"], + srcs = [":test_other_beam"], ) + +def test_suite_beam_files(name = "test_suite_beam_files"): + pass diff --git a/deps/rabbitmq_amqp1_0/include/rabbit_amqp1_0.hrl b/deps/rabbitmq_amqp1_0/include/rabbit_amqp1_0.hrl deleted file mode 100644 index eb361683b985..000000000000 --- a/deps/rabbitmq_amqp1_0/include/rabbit_amqp1_0.hrl +++ /dev/null @@ -1,52 +0,0 @@ -%%-define(debug, true). - --ifdef(debug). --define(DEBUG0(F), ?SAFE(io:format(F, []))). --define(DEBUG(F, A), ?SAFE(io:format(F, A))). --else. --define(DEBUG0(F), ok). --define(DEBUG(F, A), ok). --endif. - --define(pprint(F), io:format("~p~n", [amqp10_framing:pprint(F)])). - --define(SAFE(F), - ((fun() -> - try F - catch __T:__E:__ST -> - io:format("~p:~p thrown debugging~n~p~n", - [__T, __E, __ST]) - end - end)())). - -%% General consts - --define(FRAME_1_0_MIN_SIZE, 512). - --define(SEND_ROLE, false). --define(RECV_ROLE, true). - -%% Encoding - --include_lib("amqp10_common/include/amqp10_framing.hrl"). - --define(INFO_ITEMS, [pid, - auth_mechanism, - host, - frame_max, - timeout, - user, - state, - recv_oct, - recv_cnt, - send_oct, - send_cnt, - ssl, - ssl_protocol, - ssl_key_exchange, - ssl_cipher, - ssl_hash, - peer_cert_issuer, - peer_cert_subject, - peer_cert_validity, - node]). diff --git a/deps/rabbitmq_amqp1_0/priv/schema/rabbitmq_amqp1_0.schema b/deps/rabbitmq_amqp1_0/priv/schema/rabbitmq_amqp1_0.schema deleted file mode 100644 index 09d2cd06b224..000000000000 --- a/deps/rabbitmq_amqp1_0/priv/schema/rabbitmq_amqp1_0.schema +++ /dev/null @@ -1,37 +0,0 @@ -%% ---------------------------------------------------------------------------- -%% RabbitMQ AMQP 1.0 Support -%% -%% See https://github.com/rabbitmq/rabbitmq-amqp1.0/blob/stable/README.md -%% for details -%% ---------------------------------------------------------------------------- - -% {rabbitmq_amqp1_0,[ -%% Connections that are not authenticated with SASL will connect as this -%% account. See the README for more information. -%% -%% Please note that setting this will allow clients to connect without -%% authenticating! -%% -%% {default_user, "guest"}, -{mapping, "amqp1_0.default_user", "rabbitmq_amqp1_0.default_user", - [{datatype, [{enum, [none]}, string]}]}. -%% Enable protocol strict mode. See the README for more information. -%% -%% {protocol_strict_mode, false} -% ]}, -{mapping, "amqp1_0.protocol_strict_mode", "rabbitmq_amqp1_0.protocol_strict_mode", - [{datatype, {enum, [true, false]}}]}. - -{mapping, "amqp1_0.default_vhost", "rabbitmq_amqp1_0.default_vhost", - [{datatype, string}]}. - -{translation , "rabbitmq_amqp1_0.default_vhost", -fun(Conf) -> - list_to_binary(cuttlefish:conf_get("amqp1_0.default_vhost", Conf)) -end}. - -{mapping, "amqp1_0.convert_amqp091_headers_to_app_props", "rabbitmq_amqp1_0.convert_amqp091_headers_to_app_props", - [{datatype, {enum, [true, false]}}]}. - -{mapping, "amqp1_0.convert_app_props_to_amqp091_headers", "rabbitmq_amqp1_0.convert_app_props_to_amqp091_headers", - [{datatype, {enum, [true, false]}}]}. diff --git a/deps/rabbitmq_amqp1_0/src/rabbit_amqp1_0.erl b/deps/rabbitmq_amqp1_0/src/rabbit_amqp1_0.erl deleted file mode 100644 index dc8e955c5aaa..000000000000 --- a/deps/rabbitmq_amqp1_0/src/rabbit_amqp1_0.erl +++ /dev/null @@ -1,42 +0,0 @@ -%% This Source Code Form is subject to the terms of the Mozilla Public -%% License, v. 2.0. If a copy of the MPL was not distributed with this -%% file, You can obtain one at https://mozilla.org/MPL/2.0/. -%% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. -%% --module(rabbit_amqp1_0). - --export([connection_info_local/1, - emit_connection_info_local/3, - emit_connection_info_all/4, - list/0]). - -emit_connection_info_all(Nodes, Items, Ref, AggregatorPid) -> - Pids = [spawn_link(Node, rabbit_amqp1_0, emit_connection_info_local, - [Items, Ref, AggregatorPid]) - || Node <- Nodes], - rabbit_control_misc:await_emitters_termination(Pids), - ok. - -emit_connection_info_local(Items, Ref, AggregatorPid) -> - rabbit_control_misc:emitting_map_with_exit_handler( - AggregatorPid, Ref, - fun(Pid) -> - rabbit_amqp1_0_reader:info(Pid, Items) - end, - list()). - -connection_info_local(Items) -> - Connections = list(), - [rabbit_amqp1_0_reader:info(Pid, Items) || Pid <- Connections]. - -list() -> - [ReaderPid - || {_, TcpPid, _, [tcp_listener_sup]} <- supervisor:which_children(rabbit_sup), - {_, RanchEPid, _, [ranch_embedded_sup]} <- supervisor:which_children(TcpPid), - {_, RanchLPid, _, [ranch_listener_sup]} <- supervisor:which_children(RanchEPid), - {_, RanchCSPid, _, [ranch_conns_sup_sup]} <- supervisor:which_children(RanchLPid), - {_, RanchCPid, _, [ranch_conns_sup]} <- supervisor:which_children(RanchCSPid), - {rabbit_connection_sup, ConnPid, _, _} <- supervisor:which_children(RanchCPid), - {reader, ReaderPid, _, _} <- supervisor:which_children(ConnPid) - ]. diff --git a/deps/rabbitmq_amqp1_0/src/rabbit_amqp1_0_channel.erl b/deps/rabbitmq_amqp1_0/src/rabbit_amqp1_0_channel.erl deleted file mode 100644 index b4099e4141ec..000000000000 --- a/deps/rabbitmq_amqp1_0/src/rabbit_amqp1_0_channel.erl +++ /dev/null @@ -1,61 +0,0 @@ -%% This Source Code Form is subject to the terms of the Mozilla Public -%% License, v. 2.0. If a copy of the MPL was not distributed with this -%% file, You can obtain one at https://mozilla.org/MPL/2.0/. -%% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. -%% - --module(rabbit_amqp1_0_channel). - --include_lib("amqp_client/include/amqp_client.hrl"). --include("rabbit_amqp1_0.hrl"). - --export([call/2, call/3, cast/2, cast/3, cast_flow/3, subscribe/3]). --export([convert_code/1, convert_error/1]). - --import(rabbit_amqp1_0_util, [protocol_error/3]). - -call(Ch, Method) -> - convert_error(fun () -> amqp_channel:call(Ch, Method) end). - -call(Ch, Method, Content) -> - convert_error(fun () -> amqp_channel:call(Ch, Method, Content) end). - -cast(Ch, Method) -> - convert_error(fun () -> amqp_channel:cast(Ch, Method) end). - -cast(Ch, Method, Content) -> - convert_error(fun () -> amqp_channel:cast(Ch, Method, Content) end). - -cast_flow(Ch, Method, Content) -> - convert_error(fun () -> amqp_channel:cast_flow(Ch, Method, Content) end). - -subscribe(Ch, Method, Subscriber) -> - convert_error(fun () -> amqp_channel:subscribe(Ch, Method, Subscriber) end). - -convert_error(Fun) -> - try - Fun() - catch exit:{{shutdown, {server_initiated_close, Code, Msg}}, _} -> - protocol_error(convert_code(Code), Msg, []) - end. - -%% TODO this was completely off the top of my head. Check these make sense. -convert_code(?CONTENT_TOO_LARGE) -> ?V_1_0_AMQP_ERROR_FRAME_SIZE_TOO_SMALL; -convert_code(?NO_ROUTE) -> ?V_1_0_AMQP_ERROR_PRECONDITION_FAILED; -convert_code(?NO_CONSUMERS) -> ?V_1_0_AMQP_ERROR_PRECONDITION_FAILED; -convert_code(?ACCESS_REFUSED) -> ?V_1_0_AMQP_ERROR_UNAUTHORIZED_ACCESS; -convert_code(?NOT_FOUND) -> ?V_1_0_AMQP_ERROR_NOT_FOUND; -convert_code(?RESOURCE_LOCKED) -> ?V_1_0_AMQP_ERROR_RESOURCE_LOCKED; -convert_code(?PRECONDITION_FAILED) -> ?V_1_0_AMQP_ERROR_PRECONDITION_FAILED; -convert_code(?CONNECTION_FORCED) -> ?V_1_0_CONNECTION_ERROR_CONNECTION_FORCED; -convert_code(?INVALID_PATH) -> ?V_1_0_AMQP_ERROR_INVALID_FIELD; -convert_code(?FRAME_ERROR) -> ?V_1_0_CONNECTION_ERROR_FRAMING_ERROR; -convert_code(?SYNTAX_ERROR) -> ?V_1_0_CONNECTION_ERROR_FRAMING_ERROR; -convert_code(?COMMAND_INVALID) -> ?V_1_0_CONNECTION_ERROR_FRAMING_ERROR; -convert_code(?CHANNEL_ERROR) -> ?V_1_0_CONNECTION_ERROR_FRAMING_ERROR; -convert_code(?UNEXPECTED_FRAME) -> ?V_1_0_CONNECTION_ERROR_FRAMING_ERROR; -convert_code(?RESOURCE_ERROR) -> ?V_1_0_AMQP_ERROR_RESOURCE_LIMIT_EXCEEDED; -convert_code(?NOT_ALLOWED) -> ?V_1_0_AMQP_ERROR_UNAUTHORIZED_ACCESS; -convert_code(?NOT_IMPLEMENTED) -> ?V_1_0_AMQP_ERROR_NOT_IMPLEMENTED; -convert_code(?INTERNAL_ERROR) -> ?V_1_0_AMQP_ERROR_INTERNAL_ERROR. diff --git a/deps/rabbitmq_amqp1_0/src/rabbit_amqp1_0_incoming_link.erl b/deps/rabbitmq_amqp1_0/src/rabbit_amqp1_0_incoming_link.erl deleted file mode 100644 index 635767aa4621..000000000000 --- a/deps/rabbitmq_amqp1_0/src/rabbit_amqp1_0_incoming_link.erl +++ /dev/null @@ -1,246 +0,0 @@ -%% This Source Code Form is subject to the terms of the Mozilla Public -%% License, v. 2.0. If a copy of the MPL was not distributed with this -%% file, You can obtain one at https://mozilla.org/MPL/2.0/. -%% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. -%% - --module(rabbit_amqp1_0_incoming_link). - --export([attach/3, transfer/4]). - --include_lib("amqp_client/include/amqp_client.hrl"). --include("rabbit_amqp1_0.hrl"). - --import(rabbit_amqp1_0_util, [protocol_error/3]). - -%% Just make these constant for the time being. --define(INCOMING_CREDIT, 65536). - --record(incoming_link, {name, exchange, routing_key, mandatory, - delivery_id = undefined, - delivery_count = 0, - send_settle_mode = undefined, - recv_settle_mode = undefined, - credit_used = ?INCOMING_CREDIT div 2, - msg_acc = [], - route_state}). - -attach(#'v1_0.attach'{name = Name, - handle = Handle, - source = Source, - snd_settle_mode = SndSettleMode, - rcv_settle_mode = RcvSettleMode, - target = Target, - initial_delivery_count = {uint, InitTransfer}}, - BCh, DCh) -> - %% TODO associate link name with target - case ensure_target(Target, - #incoming_link{ - name = Name, - route_state = rabbit_routing_util:init_state(), - delivery_count = InitTransfer }, - DCh) of - {ok, ServerTarget, IncomingLink} -> - {_, _Outcomes} = rabbit_amqp1_0_link_util:outcomes(Source), - %% Default is mixed - Confirm = - case SndSettleMode of - ?V_1_0_SENDER_SETTLE_MODE_SETTLED -> - false; - _ when SndSettleMode == undefined; - SndSettleMode == ?V_1_0_SENDER_SETTLE_MODE_UNSETTLED; - SndSettleMode == ?V_1_0_SENDER_SETTLE_MODE_MIXED -> - amqp_channel:register_confirm_handler(BCh, self()), - rabbit_amqp1_0_channel:call(BCh, #'confirm.select'{}), - amqp_channel:register_return_handler(BCh, self()), - true - end, - Flow = #'v1_0.flow'{ handle = Handle, - link_credit = {uint, ?INCOMING_CREDIT}, - drain = false, - echo = false }, - Attach = #'v1_0.attach'{ - name = Name, - handle = Handle, - source = Source, - snd_settle_mode = SndSettleMode, - rcv_settle_mode = RcvSettleMode, - target = ServerTarget, - initial_delivery_count = undefined, % must be, I am the receiver - role = ?RECV_ROLE}, %% server is receiver - IncomingLink1 = - IncomingLink#incoming_link{recv_settle_mode = RcvSettleMode, - mandatory = Confirm}, - {ok, [Attach, Flow], IncomingLink1, Confirm}; - {error, Reason} -> - %% TODO proper link establishment protocol here? - protocol_error(?V_1_0_AMQP_ERROR_INVALID_FIELD, - "Attach rejected: ~tp", [Reason]) - end. - -set_delivery_id({uint, D}, - #incoming_link{delivery_id = undefined} = Link) -> - Link#incoming_link{delivery_id = D}; -set_delivery_id(DeliveryId, - #incoming_link{delivery_id = D} = Link) - when DeliveryId == {uint, D} orelse DeliveryId == undefined -> - Link. - -effective_send_settle_mode(undefined, undefined) -> - false; -effective_send_settle_mode(undefined, SettleMode) - when is_boolean(SettleMode) -> - SettleMode; -effective_send_settle_mode(SettleMode, undefined) - when is_boolean(SettleMode) -> - SettleMode; -effective_send_settle_mode(SettleMode, SettleMode) - when is_boolean(SettleMode) -> - SettleMode. - -effective_recv_settle_mode(undefined, undefined) -> - ?V_1_0_RECEIVER_SETTLE_MODE_FIRST; -effective_recv_settle_mode(undefined, Mode) -> - Mode; -effective_recv_settle_mode(Mode, _) -> - Mode. - -% TODO: validate effective settle modes against -% those declared during attach - -% TODO: handle aborted transfers - -transfer(#'v1_0.transfer'{delivery_id = DeliveryId, - more = true, - settled = Settled}, MsgPart, - #incoming_link{msg_acc = MsgAcc, - send_settle_mode = SSM} = Link, _BCh) -> - {ok, set_delivery_id( - DeliveryId, - Link#incoming_link{msg_acc = [MsgPart | MsgAcc], - send_settle_mode = - effective_send_settle_mode(Settled, SSM)})}; -transfer(#'v1_0.transfer'{delivery_id = DeliveryId0, - settled = Settled, - rcv_settle_mode = RcvSettleMode, - handle = Handle}, - MsgPart, - #incoming_link{exchange = X, - routing_key = LinkRKey, - delivery_count = Count, - credit_used = CreditUsed, - msg_acc = MsgAcc, - send_settle_mode = SSM, - recv_settle_mode = RSM} = Link, BCh) -> - MsgBin = iolist_to_binary(lists:reverse([MsgPart | MsgAcc])), - ?DEBUG("Inbound content:~n ~tp", - [[amqp10_framing:pprint(Section) || - Section <- amqp10_framing:decode_bin(MsgBin)]]), - {MsgRKey, Msg} = rabbit_amqp1_0_message:assemble(MsgBin), - RKey = case LinkRKey of - undefined -> MsgRKey; - _ -> LinkRKey - end, - rabbit_amqp1_0_channel:cast_flow( - BCh, #'basic.publish'{exchange = X, - routing_key = RKey, - mandatory = true}, Msg), - {SendFlow, CreditUsed1} = case CreditUsed - 1 of - C when C =< 0 -> - {true, ?INCOMING_CREDIT div 2}; - D -> - {false, D} - end, - #incoming_link{delivery_id = DeliveryId} = - set_delivery_id(DeliveryId0, Link), - NewLink = Link#incoming_link{ - delivery_id = undefined, - send_settle_mode = undefined, - delivery_count = rabbit_amqp1_0_util:serial_add(Count, 1), - credit_used = CreditUsed1, - msg_acc = []}, - Reply = case SendFlow of - true -> ?DEBUG("sending flow for incoming ~tp", [NewLink]), - [incoming_flow(NewLink, Handle)]; - false -> [] - end, - EffectiveSendSettleMode = effective_send_settle_mode(Settled, SSM), - EffectiveRecvSettleMode = effective_recv_settle_mode(RcvSettleMode, RSM), - case not EffectiveSendSettleMode andalso - EffectiveRecvSettleMode =:= ?V_1_0_RECEIVER_SETTLE_MODE_SECOND of - false -> ok; - true -> protocol_error(?V_1_0_AMQP_ERROR_NOT_IMPLEMENTED, - "rcv-settle-mode second not supported", []) - end, - {message, Reply, NewLink, DeliveryId, - EffectiveSendSettleMode}. - -%% TODO default-outcome and outcomes, dynamic lifetimes - -ensure_target(Target = #'v1_0.target'{address = Address, - dynamic = Dynamic, - durable = Durable, - %% TODO - expiry_policy = _ExpiryPolicy, - %% TODO - timeout = _Timeout}, - Link = #incoming_link{ route_state = RouteState }, DCh) -> - DeclareParams = [{durable, rabbit_amqp1_0_link_util:durable(Durable)}, - {exclusive, false}, - {auto_delete, false}, - {check_exchange, true}, - {nowait, false}], - case Dynamic of - true -> - protocol_error(?V_1_0_AMQP_ERROR_NOT_IMPLEMENTED, - "Dynamic targets not supported", []); - _ -> - ok - end, - case Address of - {utf8, Destination} -> - case rabbit_routing_util:parse_endpoint(Destination, true) of - {ok, Dest} -> - {ok, _Queue, RouteState1} = - rabbit_amqp1_0_channel:convert_error( - fun () -> - rabbit_routing_util:ensure_endpoint( - dest, DCh, Dest, DeclareParams, - RouteState) - end), - maybe_ensure_queue(Dest, DCh), - {XName, RK} = rabbit_routing_util:parse_routing(Dest), - {ok, Target, Link#incoming_link{ - route_state = RouteState1, - exchange = list_to_binary(XName), - routing_key = case RK of - undefined -> undefined; - [] -> undefined; - _ -> list_to_binary(RK) - end}}; - {error, _} = E -> - E - end; - _Else -> - {error, {address_not_utf8_string, Address}} - end. - -maybe_ensure_queue({amqqueue, Q}, Ch) -> - try - rabbit_amqp1_0_channel:convert_error( - fun () -> - Method = #'queue.declare'{queue = list_to_binary(Q), - passive = true}, - amqp_channel:call(Ch, Method) - end) - catch exit:#'v1_0.error'{condition = ?V_1_0_AMQP_ERROR_PRECONDITION_FAILED} -> - ok - end; -maybe_ensure_queue(_, _) -> - ok. - -incoming_flow(#incoming_link{ delivery_count = Count }, Handle) -> - #'v1_0.flow'{handle = Handle, - delivery_count = {uint, Count}, - link_credit = {uint, ?INCOMING_CREDIT}}. diff --git a/deps/rabbitmq_amqp1_0/src/rabbit_amqp1_0_link_util.erl b/deps/rabbitmq_amqp1_0/src/rabbit_amqp1_0_link_util.erl deleted file mode 100644 index 582a9e6fd043..000000000000 --- a/deps/rabbitmq_amqp1_0/src/rabbit_amqp1_0_link_util.erl +++ /dev/null @@ -1,64 +0,0 @@ -%% This Source Code Form is subject to the terms of the Mozilla Public -%% License, v. 2.0. If a copy of the MPL was not distributed with this -%% file, You can obtain one at https://mozilla.org/MPL/2.0/. -%% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. -%% - --module(rabbit_amqp1_0_link_util). - --include_lib("amqp_client/include/amqp_client.hrl"). --include("rabbit_amqp1_0.hrl"). - --export([outcomes/1, ctag_to_handle/1, handle_to_ctag/1, durable/1]). - --define(EXCHANGE_SUB_LIFETIME, "delete-on-close"). --define(DEFAULT_OUTCOME, #'v1_0.released'{}). --define(OUTCOMES, [?V_1_0_SYMBOL_ACCEPTED, - ?V_1_0_SYMBOL_REJECTED, - ?V_1_0_SYMBOL_RELEASED, - ?V_1_0_SYMBOL_MODIFIED]). --define(SUPPORTED_OUTCOMES, ?OUTCOMES). - -outcomes(Source) -> - {DefaultOutcome, Outcomes} = - case Source of - #'v1_0.source' { - default_outcome = DO, - outcomes = Os - } -> - DO1 = case DO of - undefined -> ?DEFAULT_OUTCOME; - _ -> DO - end, - Os1 = case Os of - undefined -> ?SUPPORTED_OUTCOMES; - {array, symbol, Syms} -> Syms; - Bad1 -> rabbit_amqp1_0_util:protocol_error( - ?V_1_0_AMQP_ERROR_NOT_IMPLEMENTED, - "Outcomes not supported: ~tp", - [Bad1]) - end, - {DO1, Os1}; - _ -> - {?DEFAULT_OUTCOME, ?SUPPORTED_OUTCOMES} - end, - case [O || O <- Outcomes, not lists:member(O, ?OUTCOMES)] of - [] -> {DefaultOutcome, {array, symbol, Outcomes}}; - Bad -> rabbit_amqp1_0_util:protocol_error( - ?V_1_0_AMQP_ERROR_NOT_IMPLEMENTED, - "Outcomes not supported: ~tp", [Bad]) - end. - -handle_to_ctag({uint, H}) -> - <<"ctag-", H:32/integer>>. - -ctag_to_handle(<<"ctag-", H:32/integer>>) -> - {uint, H}. - -durable(undefined) -> false; %% default: none -durable(?V_1_0_TERMINUS_DURABILITY_NONE) -> false; -%% This one means "existence of the thing is durable, but unacked msgs -%% aren't". We choose to upgrade that. -durable(?V_1_0_TERMINUS_DURABILITY_CONFIGURATION) -> true; -durable(?V_1_0_TERMINUS_DURABILITY_UNSETTLED_STATE) -> true. diff --git a/deps/rabbitmq_amqp1_0/src/rabbit_amqp1_0_message.erl b/deps/rabbitmq_amqp1_0/src/rabbit_amqp1_0_message.erl deleted file mode 100644 index c8c07af57a30..000000000000 --- a/deps/rabbitmq_amqp1_0/src/rabbit_amqp1_0_message.erl +++ /dev/null @@ -1,370 +0,0 @@ -%% This Source Code Form is subject to the terms of the Mozilla Public -%% License, v. 2.0. If a copy of the MPL was not distributed with this -%% file, You can obtain one at https://mozilla.org/MPL/2.0/. -%% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. -%% - --module(rabbit_amqp1_0_message). - --export([assemble/1, annotated_message/3]). - --define(PROPERTIES_HEADER, <<"x-amqp-1.0-properties">>). --define(APP_PROPERTIES_HEADER, <<"x-amqp-1.0-app-properties">>). --define(MESSAGE_ANNOTATIONS_HEADER, <<"x-amqp-1.0-message-annotations">>). --define(STREAM_OFFSET_HEADER, <<"x-stream-offset">>). --define(FOOTER, <<"x-amqp-1.0-footer">>). --define(X_DELIVERY_COUNT, <<"x-delivery-count">>). --define(CONVERT_AMQP091_HEADERS_TO_APP_PROPS, application:get_env(rabbitmq_amqp1_0, convert_amqp091_headers_to_app_props, false)). --define(CONVERT_APP_PROPS_TO_AMQP091_HEADERS, application:get_env(rabbitmq_amqp1_0, convert_app_props_to_amqp091_headers, false)). - - --include_lib("amqp_client/include/amqp_client.hrl"). --include("rabbit_amqp1_0.hrl"). - -assemble(MsgBin) -> - {RKey, Props, Content0} = assemble(header, {<<"">>, #'P_basic'{}, []}, - decode_section(MsgBin), MsgBin), - - Content1 = case Content0 of - Sections when is_list(Content0) -> - lists:reverse(Sections); - _ -> - Content0 - end, - {RKey, #amqp_msg{props = Props, payload = Content1}}. - -assemble(header, {R, P, C}, {H = #'v1_0.header'{}, Rest}, _Uneaten) -> - assemble(message_annotations, {R, translate_header(H, P), C}, - decode_section(Rest), Rest); -assemble(header, {R, P, C}, Else, Uneaten) -> - assemble(message_annotations, {R, P, C}, Else, Uneaten); - -%% This clause doesn't get called, and is commented out as not to confuse dialyzer. -%% -%% assemble(delivery_annotations, RPC, {#'v1_0.delivery_annotations'{}, Rest}, -%% Uneaten) -> -%% %% ignore delivery annotations for now -%% %% TODO: handle "rejected" error -%% assemble(message_annotations, RPC, Rest, Uneaten); -%% assemble(delivery_annotations, RPC, Else, Uneaten) -> -%% assemble(message_annotations, RPC, Else, Uneaten); - -assemble(message_annotations, {R, P = #'P_basic'{headers = Headers}, C}, - {#'v1_0.message_annotations'{}, Rest}, Uneaten) -> - MsgAnnoBin = chunk(Rest, Uneaten), - assemble(properties, {R, P#'P_basic'{ - headers = set_header(?MESSAGE_ANNOTATIONS_HEADER, - MsgAnnoBin, Headers)}, C}, - decode_section(Rest), Rest); -assemble(message_annotations, {R, P, C}, Else, Uneaten) -> - assemble(properties, {R, P, C}, Else, Uneaten); - -assemble(properties, {_R, P, C}, {X = #'v1_0.properties'{}, Rest}, Uneaten) -> - PropsBin = chunk(Rest, Uneaten), - assemble(app_properties, {routing_key(X), - translate_properties(X, PropsBin, P), C}, - decode_section(Rest), Rest); -assemble(properties, {R, P, C}, Else, Uneaten) -> - assemble(app_properties, {R, P, C}, Else, Uneaten); - -assemble(app_properties, {R, P = #'P_basic'{headers = Headers}, C}, - {#'v1_0.application_properties'{}, Rest}, Uneaten) -> - AppPropsBin = chunk(Rest, Uneaten), - Amqp091Headers = case ?CONVERT_APP_PROPS_TO_AMQP091_HEADERS of - true -> - amqp10_app_props_to_amqp091_headers(Headers, AppPropsBin); - _ -> - Headers - end, - AppPropsAdded = set_header( - ?APP_PROPERTIES_HEADER, - AppPropsBin, Amqp091Headers), - assemble(body, {R, P#'P_basic'{ - headers = AppPropsAdded}, C}, - decode_section(Rest), Rest); -assemble(app_properties, {R, P, C}, Else, Uneaten) -> - assemble(body, {R, P, C}, Else, Uneaten); - -%% The only 'interoperable' content is a single amqp-data section. -%% Everything else we will leave as-is. We still have to parse the -%% sections one-by-one, however, to see when we hit the footer or -%% whatever comes next. - -%% NB we do not strictly enforce the (slightly random) rules -%% pertaining to body sections, that is: -%% - one amqp-value; OR -%% - one or more amqp-sequence; OR -%% - one or more amqp-data. -%% We allow any number of each kind, in any permutation. - -assemble(body, {R, P, _}, {#'v1_0.data'{content = Content}, Rest}, Uneaten) -> - Chunk = chunk(Rest, Uneaten), - assemble(amqp10body, {R, set_1_0_type(<<"binary">>, P), - {data, Content, Chunk}}, - decode_section(Rest), Rest); -assemble(body, {R, P, C}, Else, Uneaten) -> - assemble(amqp10body, {R, P, C}, Else, Uneaten); - -assemble(amqp10body, {R, P, C}, {{Type, _}, Rest}, Uneaten) - when Type =:= 'v1_0.data' orelse - Type =:= 'v1_0.amqp_sequence' orelse - Type =:= 'v1_0.amqp_value' -> - Encoded = chunk(Rest, Uneaten), - assemble(amqp10body, - {R, set_1_0_type(<<"amqp-1.0">>, P), add_body_section(Encoded, C)}, - decode_section(Rest), Rest); -assemble(amqp10body, {R, P, C}, Else, Uneaten) -> - assemble(footer, {R, P, compile_body(C)}, Else, Uneaten); - -assemble(footer, {R, P = #'P_basic'{headers = Headers}, C}, - {#'v1_0.footer'{}, <<>>}, Uneaten) -> - {R, P#'P_basic'{headers = set_header(?FOOTER, Uneaten, Headers)}, C}; -assemble(footer, {R, P, C}, none, _) -> - {R, P, C}; -assemble(footer, _, Else, _) -> - exit({unexpected_trailing_sections, Else}). - -%% Catch-all clause, not needed according to dialyzer -%% assemble(Expected, _, Actual, _) -> -%% exit({expected_section, Expected, Actual}). - -decode_section(<<>>) -> - none; -decode_section(MsgBin) -> - {AmqpValue, Rest} = amqp10_binary_parser:parse(MsgBin), - {amqp10_framing:decode(AmqpValue), Rest}. - -chunk(Rest, Uneaten) -> - ChunkLen = size(Uneaten) - size(Rest), - <> = Uneaten, - Chunk. - -add_body_section(C, {data, _, Bin}) -> - [C, Bin]; -add_body_section(C, Cs) -> - [C | Cs]. - -compile_body({data, Content, _}) -> - Content; -compile_body(Sections) -> - lists:reverse(Sections). - -translate_header(Header10, Props) -> - Props#'P_basic'{ - delivery_mode = case Header10#'v1_0.header'.durable of - true -> 2; - _ -> 1 - end, - priority = unwrap(Header10#'v1_0.header'.priority), - expiration = to_expiration(Header10#'v1_0.header'.ttl), - type = undefined, - app_id = undefined, - cluster_id = undefined}. - -translate_properties(Props10, Props10Bin, - Props = #'P_basic'{headers = Headers}) -> - Props#'P_basic'{ - headers = set_header(?PROPERTIES_HEADER, Props10Bin, - Headers), - content_type = unwrap(Props10#'v1_0.properties'.content_type), - content_encoding = unwrap(Props10#'v1_0.properties'.content_encoding), - correlation_id = unwrap(Props10#'v1_0.properties'.correlation_id), - reply_to = case unwrap(Props10#'v1_0.properties'.reply_to) of - <<"/queue/", Q/binary>> -> Q; - Else -> Else - end, - message_id = unwrap(Props10#'v1_0.properties'.message_id), - user_id = unwrap(Props10#'v1_0.properties'.user_id), - timestamp = unwrap(Props10#'v1_0.properties'.creation_time)}. - -routing_key(Props10) -> - unwrap(Props10#'v1_0.properties'.subject). - -unwrap(undefined) -> undefined; -unwrap({_Type, Thing}) -> Thing. - -to_expiration(undefined) -> - undefined; -to_expiration({uint, Num}) -> - list_to_binary(integer_to_list(Num)). - -from_expiration(PBasic) -> - case rabbit_basic:parse_expiration(PBasic) of - {ok, undefined} -> undefined; - {ok, N} -> {uint, N}; - _ -> undefined - end. - -set_header(Header, Value, undefined) -> - set_header(Header, Value, []); -set_header(Header, Value, Headers) -> - rabbit_misc:set_table_value(Headers, Header, longstr, Value). - -set_1_0_type(Type, Props = #'P_basic'{}) -> - Props#'P_basic'{type = Type}. - -%%-------------------------------------------------------------------- - -%% TODO create delivery-annotations - -annotated_message(RKey, #'basic.deliver'{redelivered = Redelivered}, - #amqp_msg{props = Props, - payload = Content}) -> - #'P_basic'{ headers = Headers } = Props, - Header10 = #'v1_0.header' - {durable = case Props#'P_basic'.delivery_mode of - 2 -> true; - _ -> false - end, - priority = wrap(ubyte, Props#'P_basic'.priority), - ttl = from_expiration(Props), - first_acquirer = not Redelivered, - delivery_count = case Redelivered of - true -> deliverycount_from_headers(Headers); - false -> undefined - end}, - HeadersBin = amqp10_framing:encode_bin(Header10), - MsgAnnoBin0 = - case table_lookup(Headers, ?MESSAGE_ANNOTATIONS_HEADER) of - undefined -> <<>>; - {_, MABin} -> MABin - end, - MsgAnnoBin = - case table_lookup(Headers, ?STREAM_OFFSET_HEADER) of - undefined -> - MsgAnnoBin0; - {_, StreamOffset} when is_integer(StreamOffset) -> - case amqp10_framing:decode_bin(MsgAnnoBin0) of - [#'v1_0.message_annotations'{content = C0} = MA] -> - Contents = map_add(utf8, ?STREAM_OFFSET_HEADER, - ulong, StreamOffset, C0), - amqp10_framing:encode_bin( - MA#'v1_0.message_annotations'{content = Contents}); - [] -> - Contents = map_add(utf8, ?STREAM_OFFSET_HEADER, - ulong, StreamOffset, []), - amqp10_framing:encode_bin( - #'v1_0.message_annotations'{content = Contents}) - end - end, - PropsBin = - case table_lookup(Headers, ?PROPERTIES_HEADER) of - {_, Props10Bin} -> - Props10Bin; - undefined -> - Props10 = #'v1_0.properties'{ - message_id = wrap(utf8, Props#'P_basic'.message_id), - user_id = wrap(utf8, Props#'P_basic'.user_id), - to = undefined, - subject = wrap(utf8, RKey), - reply_to = case Props#'P_basic'.reply_to of - undefined -> - undefined; - _ -> - wrap(utf8, - <<"/queue/", - (Props#'P_basic'.reply_to)/binary>>) - end, - correlation_id = wrap(utf8, Props#'P_basic'.correlation_id), - content_type = wrap(symbol, Props#'P_basic'.content_type), - content_encoding = wrap(symbol, Props#'P_basic'.content_encoding), - creation_time = wrap(timestamp, Props#'P_basic'.timestamp)}, - amqp10_framing:encode_bin(Props10) - end, - AppPropsBin = - case table_lookup(Headers, ?APP_PROPERTIES_HEADER) of - {_, AppProps10Bin} -> - AppProps10Bin; - undefined -> - case ?CONVERT_AMQP091_HEADERS_TO_APP_PROPS of - true -> - case amqp091_headers_to_amqp10_app_props(Headers) of - undefined -> []; - Other -> - amqp10_framing:encode_bin(Other) - end; - _ -> - [] - end - end, - DataBin = case Props#'P_basic'.type of - <<"amqp-1.0">> -> - Content; - _Else -> % e.g., <<"binary">> if originally from 1.0 - amqp10_framing:encode_bin( - #'v1_0.data'{content = Content}) - end, - FooterBin = - case table_lookup(Headers, ?FOOTER) of - undefined -> <<>>; - {_, FBin} -> FBin - end, - [HeadersBin, MsgAnnoBin, PropsBin, AppPropsBin, DataBin, FooterBin]. - -wrap(_Type, undefined) -> - undefined; -wrap(Type, Val) -> - {Type, Val}. - -table_lookup(undefined, _) -> undefined; -table_lookup(Headers, Header) -> rabbit_misc:table_lookup(Headers, Header). - -map_add(KeyType, Key, Type, Value, Acc) -> - [{wrap(KeyType, Key), wrap(Type, Value)} | Acc]. - -amqp10_app_props_to_amqp091_headers(CurrentHeaders, AppPropsBin) -> - case amqp10_framing:decode_bin(AppPropsBin) of - [#'v1_0.application_properties'{ content = AppProps}] when is_list(AppProps) -> - Hs = case CurrentHeaders of - undefined -> []; - Headers -> Headers - end, - lists:foldl(fun(Prop, Acc) -> - case Prop of - {{utf8, Key}, {ValueType, Value}} -> - case type10_to_type091(Key, ValueType, Value) of - undefined -> Acc; - Typed -> [Typed |Acc] - end; - _ -> Acc - end - end, Hs, AppProps); - _ -> CurrentHeaders - end. -type10_to_type091(Key, Type, Value) -> - try - rabbit_msg_record:to_091(Key, {Type, Value}) - catch - _:function_clause -> undefined - end. - -amqp091_headers_to_amqp10_app_props(undefined) -> undefined; -amqp091_headers_to_amqp10_app_props(Headers) when is_list(Headers) -> - AppPropsOut = lists:foldl(fun(H, Acc) -> - case H of - {Key, Type, Value} -> - case type091_to_type10(Type, Value) of - undefined -> Acc; - Typed -> - [{{utf8, Key}, Typed}|Acc] - end; - _ -> Acc - end - end, [], Headers), - #'v1_0.application_properties'{content = AppPropsOut}. - -type091_to_type10(Type, Value) -> - try - rabbit_msg_record:from_091(Type, Value) - catch - _:function_clause -> undefined - end. - -deliverycount_from_headers(Headers) -> - case table_lookup(Headers, ?X_DELIVERY_COUNT) of - undefined -> undefined; - {_, Value} when is_integer(Value) -> wrap(uint,Value); - _ -> undefined - end. - diff --git a/deps/rabbitmq_amqp1_0/src/rabbit_amqp1_0_outgoing_link.erl b/deps/rabbitmq_amqp1_0/src/rabbit_amqp1_0_outgoing_link.erl deleted file mode 100644 index be42a0301048..000000000000 --- a/deps/rabbitmq_amqp1_0/src/rabbit_amqp1_0_outgoing_link.erl +++ /dev/null @@ -1,278 +0,0 @@ -%% This Source Code Form is subject to the terms of the Mozilla Public -%% License, v. 2.0. If a copy of the MPL was not distributed with this -%% file, You can obtain one at https://mozilla.org/MPL/2.0/. -%% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. -%% - --module(rabbit_amqp1_0_outgoing_link). - --export([attach/3, detach/3, delivery/6, transferred/3, credit_drained/3, flow/3]). - --include_lib("amqp_client/include/amqp_client.hrl"). --include("rabbit_amqp1_0.hrl"). - --import(rabbit_amqp1_0_util, [protocol_error/3, serial_add/2]). --import(rabbit_amqp1_0_link_util, [handle_to_ctag/1]). - --define(INIT_TXFR_COUNT, 0). --define(DEFAULT_SEND_SETTLED, false). - --record(outgoing_link, {queue, - delivery_count = 0, - send_settled, - default_outcome, - route_state}). - -detach(#'v1_0.detach'{handle = Handle}, BCh,_Link) -> - CTag = handle_to_ctag(Handle), - rabbit_amqp1_0_channel:call(BCh, #'basic.cancel'{consumer_tag = CTag}), - ok. - -attach(#'v1_0.attach'{name = Name, - handle = Handle, - source = Source, - snd_settle_mode = SndSettleMode, - rcv_settle_mode = RcvSettleMode}, BCh, DCh) -> - {DefaultOutcome, Outcomes} = rabbit_amqp1_0_link_util:outcomes(Source), - SndSettled = - case SndSettleMode of - ?V_1_0_SENDER_SETTLE_MODE_SETTLED -> true; - ?V_1_0_SENDER_SETTLE_MODE_UNSETTLED -> false; - _ -> ?DEFAULT_SEND_SETTLED - end, - DOSym = amqp10_framing:symbol_for(DefaultOutcome), - case ensure_source(Source, - #outgoing_link{delivery_count = ?INIT_TXFR_COUNT, - send_settled = SndSettled, - default_outcome = DOSym, - route_state = - rabbit_routing_util:init_state()}, - DCh) of - {ok, Source1, OutgoingLink = #outgoing_link{queue = QueueName}} -> - CTag = handle_to_ctag(Handle), - Args = source_filters_to_consumer_args(Source1), - - case rabbit_amqp1_0_channel:subscribe( - BCh, #'basic.consume'{ - queue = QueueName, - consumer_tag = CTag, - %% we will ack when we've transferred - %% a message, or when we get an ack - %% from the client. - no_ack = false, - %% TODO exclusive? - exclusive = false, - arguments = Args ++ - [{<<"x-credit">>, table, - [{<<"credit">>, long, 0}, - {<<"drain">>, bool, false}]}]}, - self()) of - #'basic.consume_ok'{} -> - %% TODO we should avoid the race by getting the queue to send - %% attach back, but a.t.m. it would use the wrong codec. - {ok, [#'v1_0.attach'{ - name = Name, - handle = Handle, - initial_delivery_count = {uint, ?INIT_TXFR_COUNT}, - snd_settle_mode = - case SndSettled of - true -> ?V_1_0_SENDER_SETTLE_MODE_SETTLED; - false -> ?V_1_0_SENDER_SETTLE_MODE_UNSETTLED - end, - rcv_settle_mode = RcvSettleMode, - source = Source1#'v1_0.source'{ - default_outcome = DefaultOutcome, - outcomes = Outcomes - }, - role = ?SEND_ROLE}], OutgoingLink}; - Fail -> - protocol_error(?V_1_0_AMQP_ERROR_INTERNAL_ERROR, - "Consume failed: ~tp", [Fail]) - end; - {error, Reason} -> - %% TODO proper link establishment protocol here? - protocol_error(?V_1_0_AMQP_ERROR_INVALID_FIELD, - "Attach rejected: ~tp", [Reason]) - end. - -credit_drained(#'basic.credit_drained'{credit_drained = CreditDrained}, - Handle, Link = #outgoing_link{delivery_count = Count0}) -> - Count = Count0 + CreditDrained, - %% The transfer count that is given by the queue should be at - %% least that we have locally, since we will either have received - %% all the deliveries and transferred them, or the queue will have - %% advanced it due to drain. So we adopt the queue's idea of the - %% count. - %% TODO account for it not being there any more - F = #'v1_0.flow'{ handle = Handle, - delivery_count = {uint, Count}, - link_credit = {uint, 0}, - available = {uint, 0}, - drain = true }, - {F, Link#outgoing_link{delivery_count = Count}}. - -flow(#outgoing_link{delivery_count = LocalCount}, - #'v1_0.flow'{handle = Handle, - delivery_count = Count0, - link_credit = {uint, RemoteCredit}, - drain = Drain0}, BCh) -> - {uint, RemoteCount} = default(Count0, {uint, LocalCount}), - Drain = default(Drain0, false), - %% See section 2.6.7 - LocalCredit = RemoteCount + RemoteCredit - LocalCount, - CTag = handle_to_ctag(Handle), - #'basic.credit_ok'{available = Available} = - rabbit_amqp1_0_channel:call( - BCh, #'basic.credit'{consumer_tag = CTag, - credit = LocalCredit, - drain = Drain}), - case Available of - -1 -> - {ok, []}; - %% We don't know - probably because this flow relates - %% to a handle that does not yet exist - %% TODO is this an error? - _ -> - {ok, [#'v1_0.flow'{ - handle = Handle, - delivery_count = {uint, LocalCount}, - link_credit = {uint, LocalCredit}, - available = {uint, Available}, - drain = Drain}]} - end. - -default(undefined, Default) -> Default; -default(Thing, _Default) -> Thing. - -ensure_source(Source = #'v1_0.source'{address = Address, - dynamic = Dynamic, - durable = Durable, - filter = _Filters, - %% TODO - expiry_policy = _ExpiryPolicy, - %% TODO - timeout = _Timeout}, - Link = #outgoing_link{ route_state = RouteState }, DCh) -> - DeclareParams = [{durable, rabbit_amqp1_0_link_util:durable(Durable)}, - {exclusive, false}, - {auto_delete, false}, - {check_exchange, true}, - {nowait, false}], - case Dynamic of - true -> protocol_error(?V_1_0_AMQP_ERROR_NOT_IMPLEMENTED, - "Dynamic sources not supported", []); - _ -> ok - end, - case Address of - {utf8, Destination} -> - case rabbit_routing_util:parse_endpoint(Destination, false) of - {ok, Dest} -> - {ok, Queue, RouteState1} = - rabbit_amqp1_0_channel:convert_error( - fun() -> - rabbit_routing_util:ensure_endpoint( - source, DCh, Dest, DeclareParams, - RouteState) - end), - ER = rabbit_routing_util:parse_routing(Dest), - ok = rabbit_routing_util:ensure_binding(Queue, ER, DCh), - {ok, Source, Link#outgoing_link{route_state = RouteState1, - queue = Queue}}; - {error, _} = E -> - E - end; - _ -> - {error, {address_not_utf8_string, Address}} - end. - -delivery(Deliver = #'basic.deliver'{delivery_tag = DeliveryTag, - routing_key = RKey}, - Msg, FrameMax, Handle, Session, - #outgoing_link{send_settled = SendSettled, - default_outcome = DefaultOutcome}) -> - DeliveryId = rabbit_amqp1_0_session:next_delivery_id(Session), - Session1 = rabbit_amqp1_0_session:record_outgoing( - DeliveryTag, SendSettled, DefaultOutcome, Session), - Txfr = #'v1_0.transfer'{handle = Handle, - delivery_tag = {binary, <>}, - delivery_id = {uint, DeliveryId}, - %% The only one in AMQP 1-0 - message_format = {uint, 0}, - settled = SendSettled, - resume = false, - more = false, - aborted = false, - %% TODO: actually batchable would be fine, - %% but in any case it's only a hint - batchable = false}, - Msg1_0 = rabbit_amqp1_0_message:annotated_message( - RKey, Deliver, Msg), - ?DEBUG("Outbound content:~n ~tp", - [[amqp10_framing:pprint(Section) || - Section <- amqp10_framing:decode_bin( - iolist_to_binary(Msg1_0))]]), - %% TODO Ugh - TLen = iolist_size(amqp10_framing:encode_bin(Txfr)), - Frames = case FrameMax of - unlimited -> - [[Txfr, Msg1_0]]; - _ -> - encode_frames(Txfr, Msg1_0, FrameMax - TLen, []) - end, - {ok, Frames, Session1}. - -encode_frames(_T, _Msg, MaxContentLen, _Transfers) when MaxContentLen =< 0 -> - protocol_error(?V_1_0_AMQP_ERROR_FRAME_SIZE_TOO_SMALL, - "Frame size is too small by ~tp bytes", [-MaxContentLen]); -encode_frames(T, Msg, MaxContentLen, Transfers) -> - case iolist_size(Msg) > MaxContentLen of - true -> - <> = - iolist_to_binary(Msg), - T1 = T#'v1_0.transfer'{more = true}, - encode_frames(T, Rest, MaxContentLen, - [[T1, Chunk] | Transfers]); - false -> - lists:reverse([[T, Msg] | Transfers]) - end. - -transferred(DeliveryTag, Channel, - Link = #outgoing_link{ delivery_count = Count, - send_settled = SendSettled }) -> - if SendSettled -> - rabbit_amqp1_0_channel:cast( - Channel, #'basic.ack'{ delivery_tag = DeliveryTag }); - true -> - ok - end, - Link#outgoing_link{delivery_count = serial_add(Count, 1)}. - -source_filters_to_consumer_args(#'v1_0.source'{filter = {map, KVList}}) -> - Key = {symbol, <<"rabbitmq:stream-offset-spec">>}, - case keyfind_unpack_described(Key, KVList) of - {_, {timestamp, Ts}} -> - [{<<"x-stream-offset">>, timestamp, Ts div 1000}]; %% 0.9.1 uses second based timestamps - {_, {utf8, Spec}} -> - [{<<"x-stream-offset">>, longstr, Spec}]; %% next, last, first and "10m" etc - {_, {_, Offset}} when is_integer(Offset) -> - [{<<"x-stream-offset">>, long, Offset}]; %% integer offset - _ -> - [] - end; -source_filters_to_consumer_args(_Source) -> - []. - -keyfind_unpack_described(Key, KvList) -> - %% filterset values _should_ be described values - %% they aren't always however for historical reasons so we need this bit of - %% code to return a plain value for the given filter key - case lists:keyfind(Key, 1, KvList) of - {Key, {described, Key, Value}} -> - {Key, Value}; - {Key, _} = Kv -> - Kv; - false -> - false - end. - diff --git a/deps/rabbitmq_amqp1_0/src/rabbit_amqp1_0_reader.erl b/deps/rabbitmq_amqp1_0/src/rabbit_amqp1_0_reader.erl deleted file mode 100644 index e2f24c7e00ad..000000000000 --- a/deps/rabbitmq_amqp1_0/src/rabbit_amqp1_0_reader.erl +++ /dev/null @@ -1,812 +0,0 @@ -%% This Source Code Form is subject to the terms of the Mozilla Public -%% License, v. 2.0. If a copy of the MPL was not distributed with this -%% file, You can obtain one at https://mozilla.org/MPL/2.0/. -%% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. -%% - --module(rabbit_amqp1_0_reader). - --include_lib("rabbit_common/include/rabbit.hrl"). --include_lib("rabbit_common/include/rabbit_framing.hrl"). --include_lib("kernel/include/inet.hrl"). --include("rabbit_amqp1_0.hrl"). - --export([init/2, mainloop/2]). --export([info/2]). - -%% TODO which of these are needed? --export([shutdown/2]). --export([system_continue/3, system_terminate/4, system_code_change/4]). --export([conserve_resources/3]). - --import(rabbit_amqp1_0_util, [protocol_error/3]). - --define(HANDSHAKE_TIMEOUT, 10). --define(NORMAL_TIMEOUT, 3). --define(CLOSING_TIMEOUT, 30). --define(SILENT_CLOSE_DELAY, 3). - -%%-------------------------------------------------------------------------- - --record(v1, {parent, sock, connection, callback, recv_len, pending_recv, - connection_state, queue_collector, heartbeater, helper_sup, - channel_sup_sup_pid, buf, buf_len, throttle, proxy_socket, - tracked_channels}). - --record(v1_connection, {user, timeout_sec, frame_max, auth_mechanism, auth_state, - hostname}). - --record(throttle, {alarmed_by, last_blocked_by, last_blocked_at}). - --define(IS_RUNNING(State), - (State#v1.connection_state =:= running orelse - State#v1.connection_state =:= blocking orelse - State#v1.connection_state =:= blocked)). - -%%-------------------------------------------------------------------------- - -unpack_from_0_9_1({Parent, Sock,RecvLen, PendingRecv, - HelperSupPid, Buf, BufLen, ProxySocket}) -> - #v1{parent = Parent, - sock = Sock, - callback = handshake, - recv_len = RecvLen, - pending_recv = PendingRecv, - connection_state = pre_init, - queue_collector = undefined, - heartbeater = none, - helper_sup = HelperSupPid, - buf = Buf, - buf_len = BufLen, - throttle = #throttle{alarmed_by = [], - last_blocked_by = none, - last_blocked_at = never}, - connection = #v1_connection{user = none, - timeout_sec = ?HANDSHAKE_TIMEOUT, - frame_max = ?FRAME_MIN_SIZE, - auth_mechanism = none, - auth_state = none}, - proxy_socket = ProxySocket, - tracked_channels = maps:new()}. - -shutdown(Pid, Explanation) -> - gen_server:call(Pid, {shutdown, Explanation}, infinity). - -system_continue(Parent, Deb, State) -> - ?MODULE:mainloop(Deb, State#v1{parent = Parent}). - --spec system_terminate(term(), term(), term(), term()) -> no_return(). -system_terminate(Reason, _Parent, _Deb, _State) -> - exit(Reason). - -system_code_change(Misc, _Module, _OldVsn, _Extra) -> - {ok, Misc}. - --spec conserve_resources(pid(), - rabbit_alarm:resource_alarm_source(), - rabbit_alarm:resource_alert()) -> ok. -conserve_resources(Pid, Source, {_, Conserve, _}) -> - Pid ! {conserve_resources, Source, Conserve}, - ok. - -server_properties() -> - %% The atom doesn't match anything, it's just "not 0-9-1". - Raw = lists:keydelete( - <<"capabilities">>, 1, rabbit_reader:server_properties(amqp_1_0)), - {map, [{{symbol, K}, {utf8, V}} || {K, longstr, V} <- Raw]}. - -%%-------------------------------------------------------------------------- - -inet_op(F) -> rabbit_misc:throw_on_error(inet_error, F). - -recvloop(Deb, State = #v1{pending_recv = true}) -> - mainloop(Deb, State); -recvloop(Deb, State = #v1{connection_state = blocked}) -> - mainloop(Deb, State); -recvloop(Deb, State = #v1{sock = Sock, recv_len = RecvLen, buf_len = BufLen}) - when BufLen < RecvLen -> - case rabbit_net:setopts(Sock, [{active, once}]) of - ok -> - mainloop(Deb, State#v1{pending_recv = true}); - {error, Reason} -> - throw({inet_error, Reason}) - end; -recvloop(Deb, State = #v1{recv_len = RecvLen, buf = Buf, buf_len = BufLen}) -> - {Data, Rest} = split_binary(case Buf of - [B] -> B; - _ -> list_to_binary(lists:reverse(Buf)) - end, RecvLen), - recvloop(Deb, handle_input(State#v1.callback, Data, - State#v1{buf = [Rest], - buf_len = BufLen - RecvLen})). - -mainloop(Deb, State = #v1{sock = Sock, buf = Buf, buf_len = BufLen}) -> - case rabbit_net:recv(Sock) of - {data, Data} -> - recvloop(Deb, State#v1{buf = [Data | Buf], - buf_len = BufLen + size(Data), - pending_recv = false}); - closed when State#v1.connection_state =:= closed -> - ok; - closed -> - throw(connection_closed_abruptly); - {error, Reason} -> - throw({inet_error, Reason}); - {other, {system, From, Request}} -> - sys:handle_system_msg(Request, From, State#v1.parent, - ?MODULE, Deb, State); - {other, Other} -> - case handle_other(Other, State) of - stop -> ok; - NewState -> recvloop(Deb, NewState) - end - end. - -handle_other({conserve_resources, Source, Conserve}, - State = #v1{throttle = Throttle = - #throttle{alarmed_by = CR}}) -> - CR1 = case Conserve of - true -> lists:usort([Source | CR]); - false -> CR -- [Source] - end, - Throttle1 = Throttle#throttle{alarmed_by = CR1}, - control_throttle(State#v1{throttle = Throttle1}); -handle_other({'EXIT', Parent, Reason}, State = #v1{parent = Parent}) -> - _ = terminate(io_lib:format("broker forced connection closure " - "with reason '~w'", [Reason]), State), - %% this is what we are expected to do according to - %% http://www.erlang.org/doc/man/sys.html - %% - %% If we wanted to be *really* nice we should wait for a while for - %% clients to close the socket at their end, just as we do in the - %% ordinary error case. However, since this termination is - %% initiated by our parent it is probably more important to exit - %% quickly. - exit(Reason); -handle_other({'DOWN', _MRef, process, ChPid, Reason}, State) -> - handle_dependent_exit(ChPid, Reason, State); -handle_other(handshake_timeout, State) - when ?IS_RUNNING(State) orelse - State#v1.connection_state =:= closing orelse - State#v1.connection_state =:= closed -> - State; -handle_other(handshake_timeout, State) -> - throw({handshake_timeout, State#v1.callback}); -handle_other(heartbeat_timeout, State = #v1{connection_state = closed}) -> - State; -handle_other(heartbeat_timeout, #v1{connection_state = S}) -> - throw({heartbeat_timeout, S}); -handle_other({'$gen_call', From, {shutdown, Explanation}}, State) -> - {ForceTermination, NewState} = terminate(Explanation, State), - gen_server:reply(From, ok), - case ForceTermination of - force -> stop; - normal -> NewState - end; -handle_other({'$gen_cast', force_event_refresh}, State) -> - %% Ignore, the broker sent us this as it thinks we are a 0-9-1 connection - State; -handle_other({bump_credit, Msg}, State) -> - credit_flow:handle_bump_msg(Msg), - control_throttle(State); -handle_other(terminate_connection, State) -> - State; -handle_other({info, InfoItems, Pid}, State) -> - Infos = lists:map( - fun(InfoItem) -> - {InfoItem, info_internal(InfoItem, State)} - end, - InfoItems), - Pid ! {info_reply, Infos}, - State; -handle_other(Other, _State) -> - %% internal error -> something worth dying for - exit({unexpected_message, Other}). - -switch_callback(State, Callback, Length) -> - State#v1{callback = Callback, recv_len = Length}. - -terminate(Reason, State) when ?IS_RUNNING(State) -> - {normal, handle_exception(State, 0, - error_frame(?V_1_0_AMQP_ERROR_INTERNAL_ERROR, - "Connection forced: ~tp", [Reason]))}; -terminate(_Reason, State) -> - {force, State}. - -control_throttle(State = #v1{connection_state = CS, throttle = Throttle}) -> - IsThrottled = ((Throttle#throttle.alarmed_by =/= []) orelse - credit_flow:blocked()), - case {CS, IsThrottled} of - {running, true} -> State#v1{connection_state = blocking}; - {blocking, false} -> State#v1{connection_state = running}; - {blocked, false} -> ok = rabbit_heartbeat:resume_monitor( - State#v1.heartbeater), - State#v1{connection_state = running}; - {blocked, true} -> State#v1{throttle = update_last_blocked_by( - Throttle)}; - {_, _} -> State - end. - -update_last_blocked_by(Throttle = #throttle{alarmed_by = []}) -> - Throttle#throttle{last_blocked_by = flow}; -update_last_blocked_by(Throttle) -> - Throttle#throttle{last_blocked_by = resource}. - -%%-------------------------------------------------------------------------- -%% error handling / termination - -close_connection(State = #v1{connection = #v1_connection{ - timeout_sec = TimeoutSec}}) -> - erlang:send_after((if TimeoutSec > 0 andalso - TimeoutSec < ?CLOSING_TIMEOUT -> TimeoutSec; - true -> ?CLOSING_TIMEOUT - end) * 1000, self(), terminate_connection), - State#v1{connection_state = closed}. - -handle_dependent_exit(ChPid, Reason, State) -> - credit_flow:peer_down(ChPid), - - case {ChPid, termination_kind(Reason)} of - {_Channel, controlled} -> - maybe_close(control_throttle(State)); - {Channel, uncontrolled} -> - {RealReason, Trace} = Reason, - R = error_frame(?V_1_0_AMQP_ERROR_INTERNAL_ERROR, "Session error: ~tp~n~tp", [RealReason, Trace]), - maybe_close(handle_exception(control_throttle(State), Channel, R)) - end. - -termination_kind(normal) -> controlled; -termination_kind(_) -> uncontrolled. - -maybe_close(State = #v1{connection_state = closing, - sock = Sock}) -> - NewState = close_connection(State), - ok = send_on_channel0(Sock, #'v1_0.close'{}), - % Perform an rpc call to each session process to allow it time to - % process it's internal message buffer before the supervision tree - % shuts everything down and in flight messages such as dispositions - % could be lost - _ = [ _ = rabbit_amqp1_0_session:get_info(SessionPid) - || {{channel, _}, {ch_fr_pid, SessionPid}} <- get()], - NewState; -maybe_close(State) -> - State. - -error_frame(Condition, Fmt, Args) -> - #'v1_0.error'{condition = Condition, - description = {utf8, list_to_binary( - rabbit_misc:format(Fmt, Args))}}. - -handle_exception(State = #v1{connection_state = closed}, Channel, - #'v1_0.error'{description = {utf8, Desc}}) -> - rabbit_log_connection:error("Error on AMQP 1.0 connection ~tp (~tp), channel ~tp:~n~tp", - [self(), closed, Channel, Desc]), - State; -handle_exception(State = #v1{connection_state = CS}, Channel, - ErrorFrame = #'v1_0.error'{description = {utf8, Desc}}) - when ?IS_RUNNING(State) orelse CS =:= closing -> - rabbit_log_connection:error("Error on AMQP 1.0 connection ~tp (~tp), channel ~tp:~n~tp", - [self(), CS, Channel, Desc]), - %% TODO: session errors shouldn't force the connection to close - State1 = close_connection(State), - ok = send_on_channel0(State#v1.sock, #'v1_0.close'{error = ErrorFrame}), - State1; -handle_exception(State, Channel, Error) -> - %% We don't trust the client at this point - force them to wait - %% for a bit so they can't DOS us with repeated failed logins etc. - timer:sleep(?SILENT_CLOSE_DELAY * 1000), - throw({handshake_error, State#v1.connection_state, Channel, Error}). - -%%-------------------------------------------------------------------------- - -%% Begin 1-0 - -%% ---------------------------------------- -%% AMQP 1.0 frame handlers - -is_connection_frame(#'v1_0.open'{}) -> true; -is_connection_frame(#'v1_0.close'{}) -> true; -is_connection_frame(_) -> false. - -%% TODO Handle depending on connection state -%% TODO It'd be nice to only decode up to the descriptor - -handle_1_0_frame(Mode, Channel, Payload, State) -> - try - handle_1_0_frame0(Mode, Channel, Payload, State) - catch - _:#'v1_0.error'{} = Reason -> - handle_exception(State, 0, Reason); - _:{error, {not_allowed, Username}} -> - %% section 2.8.15 in http://docs.oasis-open.org/amqp/core/v1.0/os/amqp-core-complete-v1.0-os.pdf - handle_exception(State, 0, error_frame( - ?V_1_0_AMQP_ERROR_UNAUTHORIZED_ACCESS, - "Access for user '~ts' was refused: insufficient permissions", [Username])); - _:Reason:Trace -> - handle_exception(State, 0, error_frame( - ?V_1_0_AMQP_ERROR_INTERNAL_ERROR, - "Reader error: ~tp~n~tp", - [Reason, Trace])) - end. - -%% Nothing specifies that connection methods have to be on a -%% particular channel. -handle_1_0_frame0(_Mode, Channel, Payload, - State = #v1{ connection_state = CS}) when - CS =:= closing; CS =:= closed -> - Sections = parse_1_0_frame(Payload, Channel), - case is_connection_frame(Sections) of - true -> handle_1_0_connection_frame(Sections, State); - false -> State - end; -handle_1_0_frame0(Mode, Channel, Payload, State) -> - Sections = parse_1_0_frame(Payload, Channel), - case {Mode, is_connection_frame(Sections)} of - {amqp, true} -> handle_1_0_connection_frame(Sections, State); - {amqp, false} -> handle_1_0_session_frame(Channel, Sections, State); - {sasl, false} -> handle_1_0_sasl_frame(Sections, State) - end. - -parse_1_0_frame(Payload, _Channel) -> - {PerfDesc, Rest} = amqp10_binary_parser:parse(Payload), - Perf = amqp10_framing:decode(PerfDesc), - ?DEBUG("Channel ~tp ->~n~tp~n~ts", - [_Channel, amqp10_framing:pprint(Perf), - case Rest of - <<>> -> <<>>; - _ -> rabbit_misc:format( - " followed by ~tp bytes of content", [size(Rest)]) - end]), - case Rest of - <<>> -> Perf; - _ -> {Perf, Rest} - end. - -handle_1_0_connection_frame(#'v1_0.open'{ max_frame_size = ClientFrameMax, - channel_max = ClientChannelMax, - idle_time_out = IdleTimeout, - hostname = Hostname }, - State = #v1{ - connection_state = starting, - connection = Connection, - throttle = Throttle, - helper_sup = HelperSupPid, - sock = Sock}) -> - ClientHeartbeatSec = case IdleTimeout of - undefined -> 0; - {uint, Interval} -> Interval div 1000 - end, - FrameMax = case ClientFrameMax of - undefined -> unlimited; - {_, FM} -> FM - end, - {ok, HeartbeatSec} = application:get_env(rabbit, heartbeat), - State1 = - if (FrameMax =/= unlimited) and (FrameMax < ?FRAME_1_0_MIN_SIZE) -> - protocol_error(?V_1_0_AMQP_ERROR_FRAME_SIZE_TOO_SMALL, - "frame_max=~w < ~w min size", - [FrameMax, ?FRAME_1_0_MIN_SIZE]); - true -> - {ok, Collector} = - rabbit_connection_helper_sup:start_queue_collector( - HelperSupPid, <<"AMQP 1.0">>), %% TODO describe the connection - SendFun = - fun() -> - Frame = - amqp10_binary_generator:build_heartbeat_frame(), - catch rabbit_net:send(Sock, Frame) - end, - - Parent = self(), - ReceiveFun = - fun() -> - Parent ! heartbeat_timeout - end, - %% [2.4.5] the value in idle-time-out SHOULD be half the peer's - %% actual timeout threshold - ReceiverHeartbeatSec = lists:min([HeartbeatSec * 2, 4294967]), - %% TODO: only start heartbeat receive timer at next next frame - Heartbeater = - rabbit_heartbeat:start(HelperSupPid, Sock, - ClientHeartbeatSec, SendFun, - ReceiverHeartbeatSec, ReceiveFun), - State#v1{connection_state = running, - connection = Connection#v1_connection{ - frame_max = FrameMax, - hostname = Hostname}, - heartbeater = Heartbeater, - queue_collector = Collector} - end, - HostnameVal = case Hostname of - undefined -> undefined; - null -> undefined; - {utf8, Val} -> Val - end, - rabbit_log:debug("AMQP 1.0 connection.open frame: hostname = ~ts, extracted vhost = ~ts, idle_timeout = ~tp" , - [HostnameVal, vhost(Hostname), HeartbeatSec * 1000]), - %% TODO enforce channel_max - ok = send_on_channel0( - Sock, - #'v1_0.open'{channel_max = ClientChannelMax, - max_frame_size = ClientFrameMax, - idle_time_out = {uint, HeartbeatSec * 1000}, - container_id = {utf8, rabbit_nodes:cluster_name()}, - properties = server_properties()}), - Conserve = rabbit_alarm:register(self(), {?MODULE, conserve_resources, []}), - control_throttle( - State1#v1{throttle = Throttle#throttle{alarmed_by = Conserve}}); - -handle_1_0_connection_frame(_Frame, State) -> - maybe_close(State#v1{connection_state = closing}). - -handle_1_0_session_frame(Channel, Frame, State) -> - case maps:get(Channel, State#v1.tracked_channels, undefined) of - undefined -> - case ?IS_RUNNING(State) of - true -> - send_to_new_1_0_session(Channel, Frame, State); - false -> - throw({channel_frame_while_starting, - Channel, State#v1.connection_state, - Frame}) - end; - SessionPid -> - ok = rabbit_amqp1_0_session:process_frame(SessionPid, Frame), - case Frame of - #'v1_0.end'{} -> - untrack_channel(Channel, State); - #'v1_0.transfer'{} -> - case (State#v1.connection_state =:= blocking) of - true -> - ok = rabbit_heartbeat:pause_monitor( - State#v1.heartbeater), - State#v1{connection_state = blocked}; - false -> - State - end; - _ -> - State - end - end. - -%% TODO: write a proper ANONYMOUS plugin and unify with STOMP -handle_1_0_sasl_frame(#'v1_0.sasl_init'{mechanism = {symbol, <<"ANONYMOUS">>}, - hostname = _Hostname}, - State = #v1{connection_state = starting, - sock = Sock}) -> - case application:get_env(rabbitmq_amqp1_0, default_user) of - {ok, none} -> - %% No need to do anything, we will blow up in start_connection - ok; - {ok, _} -> - %% We only need to send the frame, again start_connection - %% will set up the default user. - Outcome = #'v1_0.sasl_outcome'{code = {ubyte, 0}}, - ok = send_on_channel0(Sock, Outcome, rabbit_amqp1_0_sasl), - switch_callback(State#v1{connection_state = waiting_amqp0100}, - handshake, 8) - end; -handle_1_0_sasl_frame(#'v1_0.sasl_init'{mechanism = {symbol, Mechanism}, - initial_response = {binary, Response}, - hostname = _Hostname}, - State0 = #v1{connection_state = starting, - connection = Connection, - sock = Sock}) -> - AuthMechanism = auth_mechanism_to_module(Mechanism, Sock), - State = State0#v1{connection = - Connection#v1_connection{ - auth_mechanism = {Mechanism, AuthMechanism}, - auth_state = AuthMechanism:init(Sock)}, - connection_state = securing}, - auth_phase_1_0(Response, State); -handle_1_0_sasl_frame(#'v1_0.sasl_response'{response = {binary, Response}}, - State = #v1{connection_state = securing}) -> - auth_phase_1_0(Response, State); -handle_1_0_sasl_frame(Frame, State) -> - throw({unexpected_1_0_sasl_frame, Frame, State}). - -%% We need to handle restarts... -handle_input(handshake, <<"AMQP", 0, 1, 0, 0>>, State) -> - start_1_0_connection(amqp, State); - -%% 3 stands for "SASL" (keeping this here for when we do TLS) -handle_input(handshake, <<"AMQP", 3, 1, 0, 0>>, State) -> - start_1_0_connection(sasl, State); - -handle_input({frame_header_1_0, Mode}, - Header = <>, - State) when DOff >= 2 -> - case {Mode, Type} of - {amqp, 0} -> ok; - {sasl, 1} -> ok; - _ -> throw({bad_1_0_header_type, Header, Mode}) - end, - case Size of - 8 -> % length inclusive - State; %% heartbeat - _ -> - switch_callback(State, {frame_payload_1_0, Mode, DOff, Channel}, Size - 8) - end; -handle_input({frame_header_1_0, _Mode}, Malformed, _State) -> - throw({bad_1_0_header, Malformed}); -handle_input({frame_payload_1_0, Mode, DOff, Channel}, - FrameBin, State) -> - SkipBits = (DOff * 32 - 64), % DOff = 4-byte words, we've read 8 already - <> = FrameBin, - Skip = Skip, %% hide warning when debug is off - handle_1_0_frame(Mode, Channel, FramePayload, - switch_callback(State, {frame_header_1_0, Mode}, 8)); - -handle_input(Callback, Data, _State) -> - throw({bad_input, Callback, Data}). - -init(Mode, PackedState) -> - %% By invoking recvloop here we become 1.0. - recvloop(sys:debug_options([]), - start_1_0_connection(Mode, unpack_from_0_9_1(PackedState))). - -start_1_0_connection(sasl, State = #v1{sock = Sock}) -> - send_1_0_handshake(Sock, <<"AMQP",3,1,0,0>>), - Ms = {array, symbol, - case application:get_env(rabbitmq_amqp1_0, default_user) of - {ok, none} -> []; - {ok, _} -> [{symbol, <<"ANONYMOUS">>}] - end ++ - [{symbol, list_to_binary(atom_to_list(M))} || M <- auth_mechanisms(Sock)]}, - Mechanisms = #'v1_0.sasl_mechanisms'{sasl_server_mechanisms = Ms}, - ok = send_on_channel0(Sock, Mechanisms, rabbit_amqp1_0_sasl), - start_1_0_connection0(sasl, State); - -start_1_0_connection(amqp, - State = #v1{sock = Sock, - connection = C = #v1_connection{user = User}}) -> - {ok, NoAuthUsername} = application:get_env(rabbitmq_amqp1_0, default_user), - case {User, NoAuthUsername} of - {none, none} -> - send_1_0_handshake(Sock, <<"AMQP",3,1,0,0>>), - throw(banned_unauthenticated_connection); - {none, Username} -> - case rabbit_access_control:check_user_login( - list_to_binary(Username), []) of - {ok, NoAuthUser} -> - State1 = State#v1{ - connection = C#v1_connection{user = NoAuthUser}}, - send_1_0_handshake(Sock, <<"AMQP",0,1,0,0>>), - start_1_0_connection0(amqp, State1); - _ -> - send_1_0_handshake(Sock, <<"AMQP",3,1,0,0>>), - throw(default_user_missing) - end; - _ -> - send_1_0_handshake(Sock, <<"AMQP",0,1,0,0>>), - start_1_0_connection0(amqp, State) - end. - -start_1_0_connection0(Mode, State = #v1{connection = Connection, - helper_sup = HelperSup}) -> - ChannelSupSupPid = - case Mode of - sasl -> - undefined; - amqp -> - StartMFA = {rabbit_amqp1_0_session_sup_sup, start_link, []}, - ChildSpec = #{id => channel_sup_sup, - start => StartMFA, - restart => transient, - significant => true, - shutdown => infinity, - type => supervisor, - modules => [rabbit_amqp1_0_session_sup_sup]}, - {ok, Pid} = supervisor:start_child(HelperSup, ChildSpec), - Pid - end, - switch_callback(State#v1{connection = Connection#v1_connection{ - timeout_sec = ?NORMAL_TIMEOUT}, - channel_sup_sup_pid = ChannelSupSupPid, - connection_state = starting}, - {frame_header_1_0, Mode}, 8). - -send_1_0_handshake(Sock, Handshake) -> - ok = inet_op(fun () -> rabbit_net:send(Sock, Handshake) end). - -send_on_channel0(Sock, Method) -> - send_on_channel0(Sock, Method, amqp10_framing). - -send_on_channel0(Sock, Method, Framing) -> - ok = rabbit_amqp1_0_writer:internal_send_command( - Sock, 0, Method, Framing). - -%% End 1-0 - -auth_mechanism_to_module(TypeBin, Sock) -> - case rabbit_registry:binary_to_type(TypeBin) of - {error, not_found} -> - protocol_error(?V_1_0_AMQP_ERROR_NOT_FOUND, - "unknown authentication mechanism '~ts'", [TypeBin]); - T -> - case {lists:member(T, auth_mechanisms(Sock)), - rabbit_registry:lookup_module(auth_mechanism, T)} of - {true, {ok, Module}} -> - Module; - _ -> - protocol_error(?V_1_0_AMQP_ERROR_NOT_FOUND, - "invalid authentication mechanism '~ts'", [T]) - end - end. - -auth_mechanisms(Sock) -> - {ok, Configured} = application:get_env(rabbit, auth_mechanisms), - [Name || {Name, Module} <- rabbit_registry:lookup_all(auth_mechanism), - Module:should_offer(Sock), lists:member(Name, Configured)]. - -%% Begin 1-0 - -auth_phase_1_0(Response, - State = #v1{connection = Connection = - #v1_connection{auth_mechanism = {Name, AuthMechanism}, - auth_state = AuthState}, - sock = Sock}) -> - case AuthMechanism:handle_response(Response, AuthState) of - {refused, User, Msg, Args} -> - %% We don't trust the client at this point - force them to wait - %% for a bit before sending the sasl outcome frame - %% so they can't DOS us with repeated failed logins etc. - rabbit_core_metrics:auth_attempt_failed(<<>>, User, amqp10), - timer:sleep(?SILENT_CLOSE_DELAY * 1000), - Outcome = #'v1_0.sasl_outcome'{code = {ubyte, 1}}, - ok = send_on_channel0(Sock, Outcome, rabbit_amqp1_0_sasl), - protocol_error( - ?V_1_0_AMQP_ERROR_UNAUTHORIZED_ACCESS, "~ts login refused: ~ts", - [Name, io_lib:format(Msg, Args)]); - {protocol_error, Msg, Args} -> - rabbit_core_metrics:auth_attempt_failed(<<>>, <<>>, amqp10), - protocol_error(?V_1_0_AMQP_ERROR_DECODE_ERROR, Msg, Args); - {challenge, Challenge, AuthState1} -> - rabbit_core_metrics:auth_attempt_succeeded(<<>>, <<>>, amqp10), - Secure = #'v1_0.sasl_challenge'{challenge = {binary, Challenge}}, - ok = send_on_channel0(Sock, Secure, rabbit_amqp1_0_sasl), - State#v1{connection = Connection#v1_connection{auth_state = AuthState1}}; - {ok, User = #user{username = Username}} -> - case rabbit_access_control:check_user_loopback(Username, Sock) of - ok -> - rabbit_log_connection:info( - "AMQP 1.0 connection ~tp: user '~ts' authenticated", - [self(), Username]), - rabbit_core_metrics:auth_attempt_succeeded(<<>>, Username, amqp10), - ok; - not_allowed -> - rabbit_core_metrics:auth_attempt_failed(<<>>, Username, amqp10), - protocol_error( - ?V_1_0_AMQP_ERROR_UNAUTHORIZED_ACCESS, - "user '~ts' can only connect via localhost", - [Username]) - end, - Outcome = #'v1_0.sasl_outcome'{code = {ubyte, 0}}, - ok = send_on_channel0(Sock, Outcome, rabbit_amqp1_0_sasl), - switch_callback( - State#v1{connection_state = waiting_amqp0100, - connection = Connection#v1_connection{user = User}}, - handshake, 8) - end. - -track_channel(Channel, ChFrPid, State) -> - rabbit_log:debug("AMQP 1.0 opened channel = ~tp " , [{Channel, ChFrPid}]), - State#v1{tracked_channels = maps:put(Channel, ChFrPid, State#v1.tracked_channels)}. - -untrack_channel(Channel, State) -> - case maps:take(Channel, State#v1.tracked_channels) of - {Value, NewMap} -> - rabbit_log:debug("AMQP 1.0 closed channel = ~tp ", [{Channel, Value}]), - State#v1{tracked_channels = NewMap}; - error -> State - end. - -send_to_new_1_0_session(Channel, Frame, State) -> - #v1{sock = Sock, queue_collector = Collector, - channel_sup_sup_pid = ChanSupSup, - connection = #v1_connection{frame_max = FrameMax, - hostname = Hostname, - user = User}, - proxy_socket = ProxySocket} = State, - %% Note: the equivalent, start_channel is in channel_sup_sup - - case rabbit_amqp1_0_session_sup_sup:start_session( - %% NB subtract fixed frame header size - ChanSupSup, {amqp10_framing, Sock, Channel, - case FrameMax of - unlimited -> unlimited; - _ -> FrameMax - 8 - end, - self(), User, vhost(Hostname), Collector, ProxySocket}) of - {ok, _ChSupPid, ChFrPid} -> - erlang:monitor(process, ChFrPid), - ModifiedState = track_channel(Channel, ChFrPid, State), - rabbit_log_connection:info( - "AMQP 1.0 connection ~tp: " - "user '~ts' authenticated and granted access to vhost '~ts'", - [self(), User#user.username, vhost(Hostname)]), - ok = rabbit_amqp1_0_session:process_frame(ChFrPid, Frame), - ModifiedState; - {error, {not_allowed, _}} -> - rabbit_log:error("AMQP 1.0: user '~ts' is not allowed to access virtual host '~ts'", - [User#user.username, vhost(Hostname)]), - %% Let's skip the supervisor trace, this is an expected error - throw({error, {not_allowed, User#user.username}}); - {error, _} = E -> - throw(E) - end. - -vhost({utf8, <<"vhost:", VHost/binary>>}) -> - VHost; -vhost(_) -> - application:get_env(rabbitmq_amqp1_0, default_vhost, - application:get_env(rabbit, default_vhost, <<"/">>)). - -%% End 1-0 - -info(Pid, InfoItems) -> - case InfoItems -- ?INFO_ITEMS of - [] -> - Ref = erlang:monitor(process, Pid), - Pid ! {info, InfoItems, self()}, - receive - {info_reply, Items} -> - erlang:demonitor(Ref), - Items; - {'DOWN', _, process, Pid, _} -> - [] - end; - UnknownItems -> throw({bad_argument, UnknownItems}) - end. - -info_internal(pid, #v1{}) -> self(); -info_internal(connection, #v1{connection = Val}) -> - Val; -info_internal(node, #v1{}) -> node(); -info_internal(auth_mechanism, #v1{connection = #v1_connection{auth_mechanism = none}}) -> - none; -info_internal(auth_mechanism, #v1{connection = #v1_connection{auth_mechanism = {Name, _Mod}}}) -> - Name; -info_internal(host, #v1{connection = #v1_connection{hostname = {utf8, Val}}}) -> - Val; -info_internal(host, #v1{connection = #v1_connection{hostname = Val}}) -> - Val; -info_internal(frame_max, #v1{connection = #v1_connection{frame_max = Val}}) -> - Val; -info_internal(timeout, #v1{connection = #v1_connection{timeout_sec = Val}}) -> - Val; -info_internal(user, - #v1{connection = #v1_connection{user = #user{username = Val}}}) -> - Val; -info_internal(user, - #v1{connection = #v1_connection{user = none}}) -> - ''; -info_internal(state, #v1{connection_state = Val}) -> - Val; -info_internal(SockStat, S) when SockStat =:= recv_oct; - SockStat =:= recv_cnt; - SockStat =:= send_oct; - SockStat =:= send_cnt; - SockStat =:= send_pend -> - socket_info(fun (Sock) -> rabbit_net:getstat(Sock, [SockStat]) end, - fun ([{_, I}]) -> I end, S); -info_internal(ssl, #v1{sock = Sock}) -> rabbit_net:is_ssl(Sock); -info_internal(SSL, #v1{sock = Sock, proxy_socket = ProxySock}) - when SSL =:= ssl_protocol; - SSL =:= ssl_key_exchange; - SSL =:= ssl_cipher; - SSL =:= ssl_hash -> - rabbit_ssl:info(SSL, {Sock, ProxySock}); -info_internal(Cert, #v1{sock = Sock}) - when Cert =:= peer_cert_issuer; - Cert =:= peer_cert_subject; - Cert =:= peer_cert_validity -> - rabbit_ssl:cert_info(Cert, Sock). - -%% From rabbit_reader -socket_info(Get, Select, #v1{sock = Sock}) -> - case Get(Sock) of - {ok, T} -> Select(T); - {error, _} -> '' - end. diff --git a/deps/rabbitmq_amqp1_0/src/rabbit_amqp1_0_session.erl b/deps/rabbitmq_amqp1_0/src/rabbit_amqp1_0_session.erl deleted file mode 100644 index 75a35f3caef0..000000000000 --- a/deps/rabbitmq_amqp1_0/src/rabbit_amqp1_0_session.erl +++ /dev/null @@ -1,420 +0,0 @@ -%% This Source Code Form is subject to the terms of the Mozilla Public -%% License, v. 2.0. If a copy of the MPL was not distributed with this -%% file, You can obtain one at https://mozilla.org/MPL/2.0/. -%% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. -%% - --module(rabbit_amqp1_0_session). - --export([process_frame/2, - get_info/1]). - --export([init/1, begin_/2, maybe_init_publish_id/2, record_delivery/3, - incr_incoming_id/1, next_delivery_id/1, transfers_left/1, - record_transfers/2, bump_outgoing_window/1, - record_outgoing/4, settle/3, flow_fields/2, channel/1, - flow/2, ack/2, return/2, validate_attach/1]). - --import(rabbit_amqp1_0_util, [protocol_error/3, - serial_add/2, serial_diff/2, serial_compare/2]). - --include_lib("amqp_client/include/amqp_client.hrl"). --include("rabbit_amqp1_0.hrl"). - --define(MAX_SESSION_WINDOW_SIZE, 65535). --define(DEFAULT_MAX_HANDLE, 16#ffffffff). --define(CALL_TIMEOUT, 30000). % 30s - matches CLOSE_TIMEOUT - --record(session, {channel_num, %% we just use the incoming (AMQP 1.0) channel number - remote_incoming_window, % keep track of the window until we're told - remote_outgoing_window, - next_incoming_id, % just to keep a check - incoming_window_max, % ) - incoming_window, % ) so we know when to open the session window - next_outgoing_id = 0, % arbitrary count of outgoing transfers - outgoing_window, - outgoing_window_max, - next_publish_id, %% the 0-9-1-side counter for confirms - next_delivery_id = 0, - incoming_unsettled_map, - outgoing_unsettled_map }). - -%% We record delivery_id -> #outgoing_delivery{}, so that we can -%% respond to dispositions about messages we've sent. NB the -%% delivery-tag doubles as the id we use when acking the rabbit -%% delivery. --record(outgoing_delivery, {delivery_tag, expected_outcome}). - -%% We record confirm_id -> #incoming_delivery{} so we can relay -%% confirms from the broker back to the sending client. NB we have -%% only one possible outcome, so there's no need to record it here. --record(incoming_delivery, {delivery_id}). - -get_info(Pid) -> - gen_server2:call(Pid, info, ?CALL_TIMEOUT). - -process_frame(Pid, Frame) -> - credit_flow:send(Pid), - gen_server2:cast(Pid, {frame, Frame, self()}). - -init(Channel) -> - #session{channel_num = Channel, - next_publish_id = 0, - incoming_unsettled_map = gb_trees:empty(), - outgoing_unsettled_map = gb_trees:empty()}. - -%% Session window: -%% -%% Each session has two abstract[1] buffers, one to record the -%% unsettled state of incoming messages, one to record the unsettled -%% state of outgoing messages. In general we want to bound these -%% buffers; but if we bound them, and don't tell the other side, we -%% may end up deadlocking the other party. -%% -%% Hence the flow frame contains a session window, expressed as the -%% next-id and the window size for each of the buffers. The frame -%% refers to the window of the sender of the frame, of course. -%% -%% The numbers work this way: for the outgoing window, the next-id -%% counts the next transfer the session will send, and it will stop -%% sending at next-id + window. For the incoming window, the next-id -%% counts the next transfer id expected, and it will not accept -%% messages beyond next-id + window (in fact it will probably close -%% the session, since sending outside the window is a transgression of -%% the protocol). -%% -%% We may as well just pick a value for the incoming and outgoing -%% windows; choosing based on what the client says may just stop -%% things dead, if the value is zero for instance. -%% -%% [1] Abstract because there probably won't be a data structure with -%% a size directly related to transfers; settlement is done with -%% delivery-id, which may refer to one or more transfers. -begin_(#'v1_0.begin'{next_outgoing_id = {uint, RemoteNextOut}, - incoming_window = {uint, RemoteInWindow}, - outgoing_window = {uint, RemoteOutWindow}, - handle_max = HandleMax0}, - Session = #session{next_outgoing_id = LocalNextOut, - channel_num = Channel}) -> - InWindow = ?MAX_SESSION_WINDOW_SIZE, - OutWindow = ?MAX_SESSION_WINDOW_SIZE, - HandleMax = case HandleMax0 of - {uint, Max} -> Max; - _ -> ?DEFAULT_MAX_HANDLE - end, - {ok, #'v1_0.begin'{remote_channel = {ushort, Channel}, - handle_max = {uint, HandleMax}, - next_outgoing_id = {uint, LocalNextOut}, - incoming_window = {uint, InWindow}, - outgoing_window = {uint, OutWindow}}, - Session#session{ - outgoing_window = OutWindow, - outgoing_window_max = OutWindow, - next_incoming_id = RemoteNextOut, - remote_incoming_window = RemoteInWindow, - remote_outgoing_window = RemoteOutWindow, - incoming_window = InWindow, - incoming_window_max = InWindow}, - OutWindow}. - -validate_attach(#'v1_0.attach'{target = #'v1_0.coordinator'{}}) -> - protocol_error(?V_1_0_AMQP_ERROR_NOT_IMPLEMENTED, - "Transactions not supported", []); -validate_attach(#'v1_0.attach'{unsettled = Unsettled, - incomplete_unsettled = IncompleteSettled}) - when Unsettled =/= undefined andalso Unsettled =/= {map, []} orelse - IncompleteSettled =:= true -> - protocol_error(?V_1_0_AMQP_ERROR_NOT_IMPLEMENTED, - "Link recovery not supported", []); -validate_attach( - #'v1_0.attach'{snd_settle_mode = SndSettleMode, - rcv_settle_mode = ?V_1_0_RECEIVER_SETTLE_MODE_SECOND}) - when SndSettleMode =/= ?V_1_0_SENDER_SETTLE_MODE_SETTLED -> - protocol_error(?V_1_0_AMQP_ERROR_NOT_IMPLEMENTED, - "rcv-settle-mode second not supported", []); -validate_attach(#'v1_0.attach'{}) -> - ok. - -maybe_init_publish_id(false, Session) -> - Session; -maybe_init_publish_id(true, Session = #session{next_publish_id = Id}) -> - Session#session{next_publish_id = erlang:max(1, Id)}. - -record_delivery(DeliveryId, Settled, - Session = #session{next_publish_id = Id, - incoming_unsettled_map = Unsettled}) -> - Id1 = case Id of - 0 -> 0; - _ -> Id + 1 % this ought to be a serial number in the broker, but isn't - end, - Unsettled1 = case Settled of - true -> - Unsettled; - false -> - gb_trees:insert(Id, - #incoming_delivery{ - delivery_id = DeliveryId }, - Unsettled) - end, - Session#session{ - next_publish_id = Id1, - incoming_unsettled_map = Unsettled1}. - -incr_incoming_id(Session = #session{ next_incoming_id = NextIn, - incoming_window = InWindow, - incoming_window_max = InWindowMax, - remote_outgoing_window = RemoteOut }) -> - NewOutWindow = RemoteOut - 1, - InWindow1 = InWindow - 1, - NewNextIn = serial_add(NextIn, 1), - %% If we've reached halfway, open the window - {Flows, NewInWindow} = - if InWindow1 =< (InWindowMax div 2) -> - {[#'v1_0.flow'{}], InWindowMax}; - true -> - {[], InWindow1} - end, - {Flows, Session#session{ next_incoming_id = NewNextIn, - incoming_window = NewInWindow, - remote_outgoing_window = NewOutWindow}}. - -next_delivery_id(#session{next_delivery_id = Num}) -> Num. - -transfers_left(#session{remote_incoming_window = RemoteWindow, - outgoing_window = LocalWindow}) -> - {LocalWindow, RemoteWindow}. - -record_outgoing(DeliveryTag, SendSettled, DefaultOutcome, - Session = #session{next_delivery_id = DeliveryId, - outgoing_unsettled_map = Unsettled}) -> - Unsettled1 = case SendSettled of - true -> - Unsettled; - false -> - gb_trees:insert(DeliveryId, - #outgoing_delivery{ - delivery_tag = DeliveryTag, - expected_outcome = DefaultOutcome }, - Unsettled) - end, - Session#session{outgoing_unsettled_map = Unsettled1, - next_delivery_id = serial_add(DeliveryId, 1)}. - -record_transfers(NumTransfers, - Session = #session{ remote_incoming_window = RemoteInWindow, - outgoing_window = OutWindow, - next_outgoing_id = NextOutId }) -> - Session#session{ remote_incoming_window = RemoteInWindow - NumTransfers, - outgoing_window = OutWindow - NumTransfers, - next_outgoing_id = serial_add(NextOutId, NumTransfers) }. - -%% Make sure we have "room" in our outgoing window by bumping the -%% window if necessary. TODO this *could* be based on how much -%% notional "room" there is in outgoing_unsettled. -bump_outgoing_window(Session = #session{ outgoing_window_max = OutMax }) -> - {#'v1_0.flow'{}, Session#session{ outgoing_window = OutMax }}. - -%% We've been told that the fate of a delivery has been determined. -%% Generally if the other side has not settled it, we will do so. If -%% the other side /has/ settled it, we don't need to reply -- it's -%% already forgotten its state for the delivery anyway. -settle(Disp = #'v1_0.disposition'{first = First0, - last = Last0, - state = _Outcome, - settled = Settled}, - Session = #session{outgoing_unsettled_map = Unsettled}, - UpstreamAckFun) -> - {uint, First} = First0, - %% Last may be omitted, in which case it's the same as first - Last = case Last0 of - {uint, L} -> L; - undefined -> First - end, - %% The other party may be talking about something we've already - %% forgotten; this isn't a crime, we can just ignore it. - case gb_trees:is_empty(Unsettled) of - true -> - {none, Session}; - false -> - {LWM, _} = gb_trees:smallest(Unsettled), - {HWM, _} = gb_trees:largest(Unsettled), - if Last < LWM -> - {none, Session}; - %% TODO this should probably be an error, rather than ignored. - First > HWM -> - {none, Session}; - true -> - Unsettled1 = - lists:foldl( - fun (Delivery, Map) -> - case gb_trees:lookup(Delivery, Map) of - none -> - Map; - {value, Entry} -> - #outgoing_delivery{delivery_tag = DeliveryTag } = Entry, - ?DEBUG("Settling ~tp with ~tp", [Delivery, _Outcome]), - UpstreamAckFun(DeliveryTag), - gb_trees:delete(Delivery, Map) - end - end, - Unsettled, lists:seq(erlang:max(LWM, First), - erlang:min(HWM, Last))), - {case Settled of - true -> none; - false -> Disp#'v1_0.disposition'{ settled = true, - role = ?SEND_ROLE } - end, - Session#session{outgoing_unsettled_map = Unsettled1}} - end - end. - -flow_fields(Frames, Session) when is_list(Frames) -> - [flow_fields(F, Session) || F <- Frames]; - -flow_fields(Flow = #'v1_0.flow'{}, - #session{next_outgoing_id = NextOut, - next_incoming_id = NextIn, - outgoing_window = OutWindow, - incoming_window = InWindow}) -> - Flow#'v1_0.flow'{ - next_outgoing_id = {uint, NextOut}, - outgoing_window = {uint, OutWindow}, - next_incoming_id = {uint, NextIn}, - incoming_window = {uint, InWindow}}; - -flow_fields(Frame, _Session) -> - Frame. - -channel(#session{channel_num = Channel}) -> Channel. - -%% We should already know the next outgoing transfer sequence number, -%% because it's one more than the last transfer we saw; and, we don't -%% need to know the next incoming transfer sequence number (although -%% we might use it to detect congestion -- e.g., if it's lagging far -%% behind our outgoing sequence number). We probably care about the -%% outgoing window, since we want to keep it open by sending back -%% settlements, but there's not much we can do to hurry things along. -%% -%% We do care about the incoming window, because we must not send -%% beyond it. This may cause us problems, even in normal operation, -%% since we want our unsettled transfers to be exactly those that are -%% held as unacked by the backing channel; however, the far side may -%% close the window while we still have messages pending transfer, and -%% indeed, an individual message may take more than one 'slot'. -%% -%% Note that this isn't a race so far as AMQP 1.0 is concerned; it's -%% only because AMQP 0-9-1 defines QoS in terms of the total number of -%% unacked messages, whereas 1.0 has an explicit window. -flow(#'v1_0.flow'{next_incoming_id = FlowNextIn0, - incoming_window = {uint, FlowInWindow}, - next_outgoing_id = {uint, FlowNextOut}, - outgoing_window = {uint, FlowOutWindow}}, - Session = #session{next_incoming_id = LocalNextIn, - next_outgoing_id = LocalNextOut}) -> - %% The far side may not have our begin{} with our next-transfer-id - FlowNextIn = case FlowNextIn0 of - {uint, Id} -> Id; - undefined -> LocalNextOut - end, - case serial_compare(FlowNextOut, LocalNextIn) of - equal -> - case serial_compare(FlowNextIn, LocalNextOut) of - greater -> - protocol_error(?V_1_0_SESSION_ERROR_WINDOW_VIOLATION, - "Remote incoming id (~tp) leads " - "local outgoing id (~tp)", - [FlowNextIn, LocalNextOut]); - equal -> - Session#session{ - remote_outgoing_window = FlowOutWindow, - remote_incoming_window = FlowInWindow}; - less -> - Session#session{ - remote_outgoing_window = FlowOutWindow, - remote_incoming_window = - serial_diff(serial_add(FlowNextIn, FlowInWindow), - LocalNextOut)} - end; - _ -> - case application:get_env(rabbitmq_amqp1_0, protocol_strict_mode) of - {ok, false} -> - Session#session{next_incoming_id = FlowNextOut}; - {ok, true} -> - protocol_error(?V_1_0_SESSION_ERROR_WINDOW_VIOLATION, - "Remote outgoing id (~tp) not equal to " - "local incoming id (~tp)", - [FlowNextOut, LocalNextIn]) - end - end. - -%% An acknowledgement from the queue, which we'll get if we are -%% using confirms. -ack(#'basic.ack'{delivery_tag = DTag, multiple = Multiple}, - Session = #session{incoming_unsettled_map = Unsettled}) -> - {DeliveryIds, Unsettled1} = - case Multiple of - true -> acknowledgement_range(DTag, Unsettled); - false -> case gb_trees:lookup(DTag, Unsettled) of - {value, #incoming_delivery{ delivery_id = Id }} -> - {[Id], gb_trees:delete(DTag, Unsettled)}; - none -> - {[], Unsettled} - end - end, - Disposition = case DeliveryIds of - [] -> []; - _ -> [acknowledgement( - DeliveryIds, - #'v1_0.disposition'{role = ?RECV_ROLE})] - end, - {Disposition, - Session#session{incoming_unsettled_map = Unsettled1}}. - -acknowledgement_range(DTag, Unsettled) -> - acknowledgement_range(DTag, Unsettled, []). - -acknowledgement_range(DTag, Unsettled, Acc) -> - case gb_trees:is_empty(Unsettled) of - true -> - {lists:reverse(Acc), Unsettled}; - false -> - {DTag1, #incoming_delivery{ delivery_id = Id}} = - gb_trees:smallest(Unsettled), - case DTag1 =< DTag of - true -> - {_K, _V, Unsettled1} = gb_trees:take_smallest(Unsettled), - acknowledgement_range(DTag, Unsettled1, - [Id|Acc]); - false -> - {lists:reverse(Acc), Unsettled} - end - end. - -acknowledgement(DeliveryIds, Disposition) -> - Disposition#'v1_0.disposition'{ first = {uint, hd(DeliveryIds)}, - last = {uint, lists:last(DeliveryIds)}, - settled = true, - state = #'v1_0.accepted'{} }. - -return(DTag, Session = #session{incoming_unsettled_map = Unsettled}) -> - {DeliveryId, - Unsettled1} = case gb_trees:lookup(DTag, Unsettled) of - {value, #incoming_delivery{ delivery_id = Id }} -> - {Id, gb_trees:delete(DTag, Unsettled)}; - none -> - {undefined, Unsettled} - end, - Disposition = case DeliveryId of - undefined -> undefined; - _ -> release(DeliveryId, - #'v1_0.disposition'{role = ?RECV_ROLE}) - end, - {Disposition, - Session#session{incoming_unsettled_map = Unsettled1}}. - -release(DeliveryId, Disposition) -> - Disposition#'v1_0.disposition'{ first = {uint, DeliveryId}, - last = {uint, DeliveryId}, - settled = true, - state = #'v1_0.released'{} }. diff --git a/deps/rabbitmq_amqp1_0/src/rabbit_amqp1_0_session_process.erl b/deps/rabbitmq_amqp1_0/src/rabbit_amqp1_0_session_process.erl deleted file mode 100644 index 0191ceaa6eca..000000000000 --- a/deps/rabbitmq_amqp1_0/src/rabbit_amqp1_0_session_process.erl +++ /dev/null @@ -1,467 +0,0 @@ -%% This Source Code Form is subject to the terms of the Mozilla Public -%% License, v. 2.0. If a copy of the MPL was not distributed with this -%% file, You can obtain one at https://mozilla.org/MPL/2.0/. -%% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. -%% - --module(rabbit_amqp1_0_session_process). - --behaviour(gen_server2). - --export([init/1, terminate/2, code_change/3, - handle_call/3, handle_cast/2, handle_info/2]). - --export([start_link/1]). --export([info/1]). - --record(state, {backing_connection, backing_channel, frame_max, - reader_pid, writer_pid, buffer, session}). - --record(pending, {delivery_tag, frames, link_handle }). - --include_lib("amqp_client/include/amqp_client.hrl"). --include("rabbit_amqp1_0.hrl"). - --import(rabbit_amqp1_0_util, [protocol_error/3]). --import(rabbit_amqp1_0_link_util, [ctag_to_handle/1]). - -start_link(Args) -> - gen_server2:start_link(?MODULE, Args, []). - -info(Pid) -> - gen_server2:call(Pid, info, infinity). -%% --------- - -init({Channel, ReaderPid, WriterPid, #user{username = Username}, VHost, - FrameMax, AdapterInfo, _Collector}) -> - process_flag(trap_exit, true), - case amqp_connection:start( - #amqp_params_direct{username = Username, - virtual_host = VHost, - adapter_info = AdapterInfo}) of - {ok, Conn} -> - case amqp_connection:open_channel(Conn) of - {ok, Ch} -> - monitor(process, Ch), - {ok, #state{backing_connection = Conn, - backing_channel = Ch, - reader_pid = ReaderPid, - writer_pid = WriterPid, - frame_max = FrameMax, - buffer = queue:new(), - session = rabbit_amqp1_0_session:init(Channel) - }}; - {error, Reason} -> - rabbit_log:warning("Closing session for connection ~tp:~n~tp", - [ReaderPid, Reason]), - {stop, Reason} - end; - {error, Reason} -> - rabbit_log:warning("Closing session for connection ~tp:~n~tp", - [ReaderPid, Reason]), - {stop, Reason} - end. - -terminate(_Reason, _State = #state{backing_connection = Conn}) -> - rabbit_misc:with_exit_handler(fun () -> ok end, - fun () -> amqp_connection:close(Conn) end). - -code_change(_OldVsn, State, _Extra) -> - {ok, State}. - -handle_call(info, _From, #state{reader_pid = ReaderPid, - backing_connection = Conn} = State) -> - Info = [{reader, ReaderPid}, {connection, Conn}], - {reply, Info, State}; -handle_call(Msg, _From, State) -> - {reply, {error, not_understood, Msg}, State}. - -handle_info(#'basic.consume_ok'{}, State) -> - %% Handled above - {noreply, State}; - -handle_info(#'basic.cancel_ok'{}, State) -> - %% just ignore this for now, - %% At some point we should send the detach here but then we'd need to track - %% consumer tags -> link handle somewhere - {noreply, State}; -handle_info({#'basic.deliver'{ consumer_tag = ConsumerTag, - delivery_tag = DeliveryTag } = Deliver, Msg}, - State = #state{frame_max = FrameMax, - buffer = Buffer, - session = Session}) -> - Handle = ctag_to_handle(ConsumerTag), - case get({out, Handle}) of - undefined -> - %% TODO handle missing link -- why does the queue think it's there? - rabbit_log:warning("Delivery to non-existent consumer ~tp", - [ConsumerTag]), - {noreply, State}; - Link -> - {ok, Frames, Session1} = - rabbit_amqp1_0_outgoing_link:delivery( - Deliver, Msg, FrameMax, Handle, Session, Link), - Pending = #pending{ delivery_tag = DeliveryTag, - frames = Frames, - link_handle = Handle }, - Buffer1 = queue:in(Pending, Buffer), - {noreply, run_buffer( - state(Session1, State#state{ buffer = Buffer1 }))} - end; - -%% A message from the queue saying that there are no more messages -handle_info(#'basic.credit_drained'{consumer_tag = CTag} = CreditDrained, - State = #state{writer_pid = WriterPid, - session = Session}) -> - Handle = ctag_to_handle(CTag), - Link = get({out, Handle}), - {Flow0, Link1} = rabbit_amqp1_0_outgoing_link:credit_drained( - CreditDrained, Handle, Link), - Flow = rabbit_amqp1_0_session:flow_fields(Flow0, Session), - rabbit_amqp1_0_writer:send_command(WriterPid, Flow), - put({out, Handle}, Link1), - {noreply, State}; - -handle_info(#'basic.ack'{} = Ack, State = #state{writer_pid = WriterPid, - session = Session}) -> - {Reply, Session1} = rabbit_amqp1_0_session:ack(Ack, Session), - [rabbit_amqp1_0_writer:send_command(WriterPid, F) || - F <- rabbit_amqp1_0_session:flow_fields(Reply, Session)], - {noreply, state(Session1, State)}; - -handle_info({#'basic.return'{}, {DTag, _Msg}}, State = #state{writer_pid = WriterPid, - session = Session}) -> - {Reply, Session1} = rabbit_amqp1_0_session:return(DTag, Session), - case Reply of - undefined -> - ok; - _ -> - rabbit_amqp1_0_writer:send_command( - WriterPid, - rabbit_amqp1_0_session:flow_fields(Reply, Session) - ) - end, - {noreply, state(Session1, State)}; - -handle_info({#'basic.return'{}, _Msg}, State = #state{session = Session}) -> - rabbit_log:warning("AMQP 1.0 message return without publishing sequence"), - {noreply, state(Session, State)}; - -handle_info({bump_credit, Msg}, State) -> - credit_flow:handle_bump_msg(Msg), - {noreply, State}; - -%% TODO these pretty much copied wholesale from rabbit_channel -handle_info({'EXIT', WriterPid, Reason = {writer, send_failed, _Error}}, - State = #state{writer_pid = WriterPid}) -> - State#state.reader_pid ! - {channel_exit, rabbit_amqp1_0_session:channel(session(State)), Reason}, - {stop, normal, State}; -handle_info({'EXIT', _Pid, Reason}, State) -> - {stop, Reason, State}; -handle_info({'DOWN', _MRef, process, Ch, Reason}, - #state{reader_pid = ReaderPid, - writer_pid = Sock, - backing_channel = Ch} = State) -> - Error = - case Reason of - {shutdown, {server_initiated_close, Code, Msg}} -> - #'v1_0.error'{condition = rabbit_amqp1_0_channel:convert_code(Code), - description = {utf8, Msg}}; - _ -> - #'v1_0.error'{condition = ?V_1_0_AMQP_ERROR_INTERNAL_ERROR, - description = {utf8, - list_to_binary( - lists:flatten( - io_lib:format("~w", [Reason])))}} - end, - End = #'v1_0.end'{ error = Error }, - rabbit_log:warning("Closing session for connection ~tp:~n~tp", - [ReaderPid, Reason]), - ok = rabbit_amqp1_0_writer:send_command_sync(Sock, End), - {stop, normal, State}; -handle_info({'DOWN', _MRef, process, _QPid, _Reason}, State) -> - %% TODO do we care any more since we're using direct client? - {noreply, State}. % TODO rabbit_channel uses queue_blocked? - -handle_cast({frame, Frame, FlowPid}, - State = #state{ reader_pid = ReaderPid, - writer_pid = Sock }) -> - credit_flow:ack(FlowPid), - try handle_control(Frame, State) of - {reply, Replies, NewState} when is_list(Replies) -> - lists:foreach(fun (Reply) -> - rabbit_amqp1_0_writer:send_command(Sock, Reply) - end, Replies), - noreply(NewState); - {reply, Reply, NewState} -> - rabbit_amqp1_0_writer:send_command(Sock, Reply), - noreply(NewState); - {noreply, NewState} -> - noreply(NewState); - stop -> - {stop, normal, State} - catch exit:Reason = #'v1_0.error'{} -> - %% TODO shut down nicely like rabbit_channel - End = #'v1_0.end'{ error = Reason }, - rabbit_log:warning("Closing session for connection ~tp:~n~tp", - [ReaderPid, Reason]), - ok = rabbit_amqp1_0_writer:send_command_sync(Sock, End), - {stop, normal, State}; - exit:normal -> - {stop, normal, State}; - _:Reason:Stacktrace -> - {stop, {Reason, Stacktrace}, State} - end. - -%% TODO rabbit_channel returns {noreply, State, hibernate}, but that -%% appears to break things here (it stops the session responding to -%% frames). -noreply(State) -> - {noreply, State}. - -%% ------ - -handle_control(#'v1_0.begin'{} = Begin, - State = #state{backing_channel = Ch, - session = Session}) -> - {ok, Reply, Session1, Prefetch} = - rabbit_amqp1_0_session:begin_(Begin, Session), - %% Attempt to limit the number of "at risk" messages we can have. - rabbit_amqp1_0_channel:cast(Ch, #'basic.qos'{prefetch_count = Prefetch}), - reply(Reply, state(Session1, State)); - -handle_control(#'v1_0.attach'{handle = Handle, - role = ?SEND_ROLE} = Attach, - State = #state{backing_channel = BCh, - backing_connection = Conn}) -> - ok = rabbit_amqp1_0_session:validate_attach(Attach), - {ok, Reply, Link, Confirm} = - with_disposable_channel( - Conn, fun (DCh) -> - rabbit_amqp1_0_incoming_link:attach(Attach, BCh, DCh) - end), - put({in, Handle}, Link), - reply(Reply, state(rabbit_amqp1_0_session:maybe_init_publish_id( - Confirm, session(State)), State)); - -handle_control(#'v1_0.attach'{handle = Handle, - role = ?RECV_ROLE} = Attach, - State = #state{backing_channel = BCh, - backing_connection = Conn}) -> - ok = rabbit_amqp1_0_session:validate_attach(Attach), - {ok, Reply, Link} = - with_disposable_channel( - Conn, fun (DCh) -> - rabbit_amqp1_0_outgoing_link:attach(Attach, BCh, DCh) - end), - put({out, Handle}, Link), - reply(Reply, State); - -handle_control({Txfr = #'v1_0.transfer'{handle = Handle}, - MsgPart}, - State = #state{backing_channel = BCh, - session = Session}) -> - case get({in, Handle}) of - undefined -> - protocol_error(?V_1_0_AMQP_ERROR_ILLEGAL_STATE, - "Unknown link handle ~tp", [Handle]); - Link -> - {Flows, Session1} = rabbit_amqp1_0_session:incr_incoming_id(Session), - case rabbit_amqp1_0_incoming_link:transfer( - Txfr, MsgPart, Link, BCh) of - {message, Reply, Link1, DeliveryId, Settled} -> - put({in, Handle}, Link1), - Session2 = rabbit_amqp1_0_session:record_delivery( - DeliveryId, Settled, Session1), - reply(Reply ++ Flows, state(Session2, State)); - {ok, Link1} -> - put({in, Handle}, Link1), - reply(Flows, state(Session1, State)) - end - end; - -%% Disposition: multiple deliveries may be settled at a time. -%% TODO: should we send a flow after this, to indicate the state -%% of the session window? -handle_control(#'v1_0.disposition'{state = Outcome, - role = ?RECV_ROLE} = Disp, - State = #state{backing_channel = Ch}) -> - AckFun = - fun (DeliveryTag) -> - ok = rabbit_amqp1_0_channel:call( - Ch, case Outcome of - #'v1_0.accepted'{} -> - #'basic.ack'{delivery_tag = DeliveryTag, - multiple = false}; - #'v1_0.modified'{delivery_failed = true, - undeliverable_here = UndelHere} -> - %% NB: this is not quite correct. - %% `undeliverable_here' refers to the link - %% not the message in general but we cannot - %% filter messages from being assigned to - %% individual consumers - %% so will have to reject it without requeue - %% in this case. - Requeue = case UndelHere of - true -> - false; - _ -> - true - end, - #'basic.reject'{delivery_tag = DeliveryTag, - requeue = Requeue}; - #'v1_0.modified'{} -> - %% if delivery_failed is not true, treat we - %% can't increment its' delivery_count so - %% will have to reject without requeue - #'basic.reject'{delivery_tag = DeliveryTag, - requeue = false}; - #'v1_0.rejected'{} -> - #'basic.reject'{delivery_tag = DeliveryTag, - requeue = false}; - #'v1_0.released'{} -> - #'basic.reject'{delivery_tag = DeliveryTag, - requeue = true}; - _ -> - protocol_error( - ?V_1_0_AMQP_ERROR_INVALID_FIELD, - "Unrecognised state: ~tp~n" - "Disposition was: ~tp", [Outcome, Disp]) - end) - end, - case rabbit_amqp1_0_session:settle(Disp, session(State), AckFun) of - {none, Session1} -> {noreply, state(Session1, State)}; - {Reply, Session1} -> {reply, Reply, state(Session1, State)} - end; - -handle_control(#'v1_0.detach'{handle = Handle} = Detach, - #state{backing_channel = BCh} = State) -> - %% TODO keep the state around depending on the lifetime - %% TODO outgoing links? - case get({out, Handle}) of - undefined -> - ok; - Link -> - erase({out, Handle}), - ok = rabbit_amqp1_0_outgoing_link:detach(Detach, BCh, Link) - end, - erase({in, Handle}), - {reply, #'v1_0.detach'{handle = Handle}, State}; - -handle_control(#'v1_0.end'{}, _State = #state{ writer_pid = Sock }) -> - ok = rabbit_amqp1_0_writer:send_command(Sock, #'v1_0.end'{}), - stop; - -%% Flow control. These frames come with two pieces of information: -%% the session window, and optionally, credit for a particular link. -%% We'll deal with each of them separately. -handle_control(Flow = #'v1_0.flow'{}, - State = #state{backing_channel = BCh, - session = Session}) -> - State1 = state(rabbit_amqp1_0_session:flow(Flow, Session), State), - State2 = run_buffer(State1), - case Flow#'v1_0.flow'.handle of - undefined -> - {noreply, State2}; - Handle -> - case get({in, Handle}) of - undefined -> - case get({out, Handle}) of - undefined -> - rabbit_log:warning("Flow for unknown link handle ~tp", [Flow]), - protocol_error(?V_1_0_AMQP_ERROR_INVALID_FIELD, - "Unattached handle: ~tp", [Handle]); - Out -> - {ok, Reply} = rabbit_amqp1_0_outgoing_link:flow( - Out, Flow, BCh), - reply(Reply, State2) - end; - _In -> - %% We're being told about available messages at - %% the sender. Yawn. - %% TODO at least check transfer-count? - {noreply, State2} - end - end; - -handle_control(Frame, _State) -> - protocol_error(?V_1_0_AMQP_ERROR_INTERNAL_ERROR, - "Unexpected frame ~tp", - [amqp10_framing:pprint(Frame)]). - -run_buffer(State = #state{ writer_pid = WriterPid, - session = Session, - backing_channel = BCh, - buffer = Buffer }) -> - {Session1, Buffer1} = - run_buffer1(WriterPid, BCh, Session, Buffer), - State#state{ buffer = Buffer1, session = Session1 }. - -run_buffer1(WriterPid, BCh, Session, Buffer) -> - case rabbit_amqp1_0_session:transfers_left(Session) of - {LocalSpace, RemoteSpace} when RemoteSpace > 0 andalso LocalSpace > 0 -> - Space = erlang:min(LocalSpace, RemoteSpace), - case queue:out(Buffer) of - {empty, Buffer} -> - {Session, Buffer}; - {{value, #pending{ delivery_tag = DeliveryTag, - frames = Frames, - link_handle = Handle } = Pending}, - BufferTail} -> - Link = get({out, Handle}), - case send_frames(WriterPid, Frames, Space) of - {all, SpaceLeft} -> - NewLink = - rabbit_amqp1_0_outgoing_link:transferred( - DeliveryTag, BCh, Link), - put({out, Handle}, NewLink), - Session1 = rabbit_amqp1_0_session:record_transfers( - Space - SpaceLeft, Session), - run_buffer1(WriterPid, BCh, Session1, BufferTail); - {some, Rest} -> - Session1 = rabbit_amqp1_0_session:record_transfers( - Space, Session), - Buffer1 = queue:in_r(Pending#pending{ frames = Rest }, - BufferTail), - run_buffer1(WriterPid, BCh, Session1, Buffer1) - end - end; - {_, RemoteSpace} when RemoteSpace > 0 -> - case rabbit_amqp1_0_session:bump_outgoing_window(Session) of - {Flow = #'v1_0.flow'{}, Session1} -> - rabbit_amqp1_0_writer:send_command( - WriterPid, - rabbit_amqp1_0_session:flow_fields(Flow, Session1)), - run_buffer1(WriterPid, BCh, Session1, Buffer) - end; - _ -> - {Session, Buffer} - end. - -send_frames(_WriterPid, [], Left) -> - {all, Left}; -send_frames(_WriterPid, Rest, 0) -> - {some, Rest}; -send_frames(WriterPid, [[T, C] | Rest], Left) -> - rabbit_amqp1_0_writer:send_command(WriterPid, T, C), - send_frames(WriterPid, Rest, Left - 1). - -%% ------ - -reply([], State) -> - {noreply, State}; -reply(Reply, State) -> - {reply, rabbit_amqp1_0_session:flow_fields(Reply, session(State)), State}. - -session(#state{session = Session}) -> Session. -state(Session, State) -> State#state{session = Session}. - -with_disposable_channel(Conn, Fun) -> - {ok, Ch} = amqp_connection:open_channel(Conn), - try - Fun(Ch) - after - catch amqp_channel:close(Ch) - end. diff --git a/deps/rabbitmq_amqp1_0/src/rabbit_amqp1_0_session_sup.erl b/deps/rabbitmq_amqp1_0/src/rabbit_amqp1_0_session_sup.erl deleted file mode 100644 index c5fc1f035fcd..000000000000 --- a/deps/rabbitmq_amqp1_0/src/rabbit_amqp1_0_session_sup.erl +++ /dev/null @@ -1,104 +0,0 @@ -%% This Source Code Form is subject to the terms of the Mozilla Public -%% License, v. 2.0. If a copy of the MPL was not distributed with this -%% file, You can obtain one at https://mozilla.org/MPL/2.0/. -%% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. -%% - --module(rabbit_amqp1_0_session_sup). - --behaviour(supervisor). - --export([start_link/1]). - --export([init/1]). - --include_lib("amqp_client/include/amqp_client.hrl"). - -%%---------------------------------------------------------------------------- - --export_type([start_link_args/0]). - --type start_link_args() :: - {'amqp10_framing', rabbit_net:socket(), - rabbit_channel:channel_number(), non_neg_integer() | 'unlimited', pid(), - #user{}, rabbit_types:vhost(), pid(), - {'rabbit_proxy_socket', rabbit_net:socket(), term()} | 'undefined'}. - --spec start_link(start_link_args()) -> {'ok', pid(), pid()} | {'error', term()}. - -%%---------------------------------------------------------------------------- -start_link({amqp10_framing, Sock, Channel, FrameMax, ReaderPid, - User, VHost, Collector, ProxySocket}) -> - {ok, SupPid} = supervisor:start_link(?MODULE, []), - {ok, WriterPid} = - supervisor:start_child( - SupPid, - #{ - id => writer, - start => - {rabbit_amqp1_0_writer, start_link, [ - Sock, - Channel, - FrameMax, - amqp10_framing, - ReaderPid - ]}, - restart => transient, - significant => true, - shutdown => ?WORKER_WAIT, - type => worker, - modules => [rabbit_amqp1_0_writer] - } - ), - SocketForAdapterInfo = case ProxySocket of - undefined -> Sock; - _ -> ProxySocket - end, - case supervisor:start_child( - SupPid, - #{ - id => channel, - start => - {rabbit_amqp1_0_session_process, start_link, [ - {Channel, ReaderPid, WriterPid, User, VHost, FrameMax, - adapter_info(User, SocketForAdapterInfo), Collector} - ]}, - restart => transient, - significant => true, - shutdown => ?WORKER_WAIT, - type => worker, - modules => [rabbit_amqp1_0_session_process] - } - ) of - {ok, ChannelPid} -> - {ok, SupPid, ChannelPid}; - {error, Reason} -> - {error, Reason} - end. - -%%---------------------------------------------------------------------------- - -init([]) -> - SupFlags = #{strategy => one_for_all, - intensity => 0, - period => 1, - auto_shutdown => any_significant}, - {ok, {SupFlags, []}}. - - -%% For each AMQP 1.0 session opened, an internal direct AMQP 0-9-1 connection is opened too. -%% This direct connection will authenticate the user again. Again because at this point -%% the SASL handshake has already taken place and this user has already been authenticated. -%% However, we do not have the credentials the user presented. For that reason, the -%% #amqp_adapter_info.additional_info carries an extra property called authz_backends -%% which is initialized from the #user.authz_backends attribute. In other words, we -%% propagate the outcome from the first authentication attempt to the subsequent attempts. - -%% See rabbit_direct.erl to see how `authz_bakends` is propagated from -% amqp_adapter_info.additional_info to the rabbit_access_control module - -adapter_info(User, Sock) -> - AdapterInfo = amqp_connection:socket_adapter_info(Sock, {'AMQP', "1.0"}), - AdapterInfo#amqp_adapter_info{additional_info = - AdapterInfo#amqp_adapter_info.additional_info ++ [{authz_backends, User#user.authz_backends}]}. diff --git a/deps/rabbitmq_amqp1_0/src/rabbit_amqp1_0_session_sup_sup.erl b/deps/rabbitmq_amqp1_0/src/rabbit_amqp1_0_session_sup_sup.erl deleted file mode 100644 index 05cece5b4fef..000000000000 --- a/deps/rabbitmq_amqp1_0/src/rabbit_amqp1_0_session_sup_sup.erl +++ /dev/null @@ -1,38 +0,0 @@ -%% This Source Code Form is subject to the terms of the Mozilla Public -%% License, v. 2.0. If a copy of the MPL was not distributed with this -%% file, You can obtain one at https://mozilla.org/MPL/2.0/. -%% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. -%% - --module(rabbit_amqp1_0_session_sup_sup). - --behaviour(supervisor). - --export([start_link/0, start_session/2]). - --export([init/1]). - -%% It would be much nicer if rabbit_channel_sup_sup was parameterised -%% on the module. - -%%---------------------------------------------------------------------------- - --spec start_link() -> rabbit_types:ok_pid_or_error(). --spec start_session(pid(), rabbit_amqp1_0_session_sup:start_link_args()) -> - supervisor:startchild_ret(). - -%%---------------------------------------------------------------------------- - -start_link() -> - supervisor:start_link(?MODULE, []). - -start_session(Pid, Args) -> - supervisor:start_child(Pid, [Args]). - -%%---------------------------------------------------------------------------- - -init([]) -> - {ok, {{simple_one_for_one, 0, 1}, - [{session_sup, {rabbit_amqp1_0_session_sup, start_link, []}, - temporary, infinity, supervisor, [rabbit_amqp1_0_session_sup]}]}}. diff --git a/deps/rabbitmq_amqp1_0/src/rabbit_amqp1_0_util.erl b/deps/rabbitmq_amqp1_0/src/rabbit_amqp1_0_util.erl deleted file mode 100644 index 90a8230bd247..000000000000 --- a/deps/rabbitmq_amqp1_0/src/rabbit_amqp1_0_util.erl +++ /dev/null @@ -1,73 +0,0 @@ -%% This Source Code Form is subject to the terms of the Mozilla Public -%% License, v. 2.0. If a copy of the MPL was not distributed with this -%% file, You can obtain one at https://mozilla.org/MPL/2.0/. -%% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. -%% - --module(rabbit_amqp1_0_util). - --include("rabbit_amqp1_0.hrl"). - --export([protocol_error/3]). --export([serial_add/2, serial_compare/2, serial_diff/2]). - --export_type([serial_number/0]). --type serial_number() :: non_neg_integer(). --type serial_compare_result() :: 'equal' | 'less' | 'greater'. - --spec serial_add(serial_number(), non_neg_integer()) -> - serial_number(). --spec serial_compare(serial_number(), serial_number()) -> - serial_compare_result(). --spec serial_diff(serial_number(), serial_number()) -> - integer(). - --spec protocol_error(term(), io:format(), [term()]) -> no_return(). -protocol_error(Condition, Msg, Args) -> - exit(#'v1_0.error'{ - condition = Condition, - description = {utf8, list_to_binary( - lists:flatten(io_lib:format(Msg, Args)))} - }). - -%% Serial arithmetic for unsigned ints. -%% http://www.faqs.org/rfcs/rfc1982.html -%% SERIAL_BITS = 32 - -%% 2 ^ SERIAL_BITS --define(SERIAL_MAX, 16#100000000). -%% 2 ^ (SERIAL_BITS - 1) - 1 --define(SERIAL_MAX_ADDEND, 16#7fffffff). - -serial_add(S, N) when N =< ?SERIAL_MAX_ADDEND -> - (S + N) rem ?SERIAL_MAX; -serial_add(S, N) -> - exit({out_of_bound_serial_addition, S, N}). - -serial_compare(A, B) -> - if A =:= B -> - equal; - (A < B andalso B - A < ?SERIAL_MAX_ADDEND) orelse - (A > B andalso A - B > ?SERIAL_MAX_ADDEND) -> - less; - (A < B andalso B - A > ?SERIAL_MAX_ADDEND) orelse - (A > B andalso B - A < ?SERIAL_MAX_ADDEND) -> - greater; - true -> exit({indeterminate_serial_comparison, A, B}) - end. - --define(SERIAL_DIFF_BOUND, 16#80000000). - -serial_diff(A, B) -> - Diff = A - B, - if Diff > (?SERIAL_DIFF_BOUND) -> - %% B is actually greater than A - - (?SERIAL_MAX - Diff); - Diff < - (?SERIAL_DIFF_BOUND) -> - ?SERIAL_MAX + Diff; - Diff < ?SERIAL_DIFF_BOUND andalso Diff > -?SERIAL_DIFF_BOUND -> - Diff; - true -> - exit({indeterminate_serial_diff, A, B}) - end. diff --git a/deps/rabbitmq_amqp1_0/src/rabbit_amqp1_0_writer.erl b/deps/rabbitmq_amqp1_0/src/rabbit_amqp1_0_writer.erl deleted file mode 100644 index df9b9d8f70a0..000000000000 --- a/deps/rabbitmq_amqp1_0/src/rabbit_amqp1_0_writer.erl +++ /dev/null @@ -1,280 +0,0 @@ -%% This Source Code Form is subject to the terms of the Mozilla Public -%% License, v. 2.0. If a copy of the MPL was not distributed with this -%% file, You can obtain one at https://mozilla.org/MPL/2.0/. -%% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. -%% - --module(rabbit_amqp1_0_writer). --include_lib("rabbit_common/include/rabbit.hrl"). --include_lib("rabbit_common/include/rabbit_framing.hrl"). --include("rabbit_amqp1_0.hrl"). - --export([start/5, start_link/5, start/6, start_link/6]). --export([send_command/2, send_command/3, - send_command_sync/2, send_command_sync/3, - send_command_and_notify/4, send_command_and_notify/5]). --export([internal_send_command/4]). - -%% internal --export([mainloop/1, mainloop1/1]). - --record(wstate, {sock, channel, frame_max, protocol, reader, - stats_timer, pending}). - --define(HIBERNATE_AFTER, 6_000). --define(AMQP_SASL_FRAME_TYPE, 1). - -%%--------------------------------------------------------------------------- - --spec start - (rabbit_net:socket(), rabbit_channel:channel_number(), - non_neg_integer(), rabbit_types:protocol(), pid()) - -> rabbit_types:ok(pid()). --spec start_link - (rabbit_net:socket(), rabbit_channel:channel_number(), - non_neg_integer(), rabbit_types:protocol(), pid()) - -> rabbit_types:ok(pid()). --spec start - (rabbit_net:socket(), rabbit_channel:channel_number(), - non_neg_integer(), rabbit_types:protocol(), pid(), boolean()) - -> rabbit_types:ok(pid()). --spec start_link - (rabbit_net:socket(), rabbit_channel:channel_number(), - non_neg_integer(), rabbit_types:protocol(), pid(), boolean()) - -> rabbit_types:ok(pid()). --spec send_command - (pid(), rabbit_framing:amqp_method_record()) -> 'ok'. --spec send_command - (pid(), rabbit_framing:amqp_method_record(), rabbit_types:content()) - -> 'ok'. --spec send_command_sync - (pid(), rabbit_framing:amqp_method_record()) -> 'ok'. --spec send_command_sync - (pid(), rabbit_framing:amqp_method_record(), rabbit_types:content()) - -> 'ok'. --spec send_command_and_notify - (pid(), pid(), pid(), rabbit_framing:amqp_method_record()) - -> 'ok'. --spec send_command_and_notify - (pid(), pid(), pid(), rabbit_framing:amqp_method_record(), - rabbit_types:content()) - -> 'ok'. --spec internal_send_command - (rabbit_net:socket(), rabbit_channel:channel_number(), - rabbit_framing:amqp_method_record(), 'amqp10_framing' | 'rabbit_amqp1_0_sasl') - -> 'ok'. - -%%--------------------------------------------------------------------------- - -start(Sock, Channel, FrameMax, Protocol, ReaderPid) -> - start(Sock, Channel, FrameMax, Protocol, ReaderPid, false). - -start_link(Sock, Channel, FrameMax, Protocol, ReaderPid) -> - start_link(Sock, Channel, FrameMax, Protocol, ReaderPid, false). - -start(Sock, Channel, FrameMax, Protocol, ReaderPid, ReaderWantsStats) -> - State = initial_state(Sock, Channel, FrameMax, Protocol, ReaderPid, - ReaderWantsStats), - {ok, proc_lib:spawn(?MODULE, mainloop, [State])}. - -start_link(Sock, Channel, FrameMax, Protocol, ReaderPid, ReaderWantsStats) -> - State = initial_state(Sock, Channel, FrameMax, Protocol, ReaderPid, - ReaderWantsStats), - {ok, proc_lib:spawn_link(?MODULE, mainloop, [State])}. - -initial_state(Sock, Channel, FrameMax, Protocol, ReaderPid, ReaderWantsStats) -> - (case ReaderWantsStats of - true -> fun rabbit_event:init_stats_timer/2; - false -> fun rabbit_event:init_disabled_stats_timer/2 - end)(#wstate{sock = Sock, - channel = Channel, - frame_max = FrameMax, - protocol = Protocol, - reader = ReaderPid, - pending = []}, - #wstate.stats_timer). - -mainloop(State) -> - try - mainloop1(State) - catch - exit:Error -> #wstate{reader = ReaderPid, channel = Channel} = State, - ReaderPid ! {channel_exit, Channel, Error} - end, - done. - -mainloop1(State = #wstate{pending = []}) -> - receive - Message -> ?MODULE:mainloop1(handle_message(Message, State)) - after ?HIBERNATE_AFTER -> - erlang:hibernate(?MODULE, mainloop, [State]) - end; -mainloop1(State) -> - receive - Message -> ?MODULE:mainloop1(handle_message(Message, State)) - after 0 -> - ?MODULE:mainloop1(flush(State)) - end. - -handle_message({send_command, MethodRecord}, State) -> - internal_send_command_async(MethodRecord, State); -handle_message({send_command, MethodRecord, Content}, State) -> - internal_send_command_async(MethodRecord, Content, State); -handle_message({'$gen_call', From, {send_command_sync, MethodRecord}}, State) -> - State1 = flush(internal_send_command_async(MethodRecord, State)), - gen_server:reply(From, ok), - State1; -handle_message({'$gen_call', From, {send_command_sync, MethodRecord, Content}}, - State) -> - State1 = flush(internal_send_command_async(MethodRecord, Content, State)), - gen_server:reply(From, ok), - State1; -handle_message({send_command_and_notify, QPid, ChPid, MethodRecord}, State) -> - State1 = internal_send_command_async(MethodRecord, State), - rabbit_amqqueue:notify_sent(QPid, ChPid), - State1; -handle_message({send_command_and_notify, QPid, ChPid, MethodRecord, Content}, - State) -> - State1 = internal_send_command_async(MethodRecord, Content, State), - rabbit_amqqueue:notify_sent(QPid, ChPid), - State1; -handle_message({'DOWN', _MRef, process, QPid, _Reason}, State) -> - rabbit_amqqueue:notify_sent_queue_down(QPid), - State; -handle_message({inet_reply, _, ok}, State) -> - rabbit_event:ensure_stats_timer(State, #wstate.stats_timer, emit_stats); -handle_message({inet_reply, _, Status}, _State) -> - exit({writer, send_failed, Status}); -handle_message(emit_stats, State = #wstate{reader = ReaderPid}) -> - ReaderPid ! ensure_stats, - rabbit_event:reset_stats_timer(State, #wstate.stats_timer); -handle_message(Message, _State) -> - exit({writer, message_not_understood, Message}). - -%%--------------------------------------------------------------------------- - -send_command(W, MethodRecord) -> - W ! {send_command, MethodRecord}, - ok. - -send_command(W, MethodRecord, Content) -> - W ! {send_command, MethodRecord, Content}, - ok. - -send_command_sync(W, MethodRecord) -> - call(W, {send_command_sync, MethodRecord}). - -send_command_sync(W, MethodRecord, Content) -> - call(W, {send_command_sync, MethodRecord, Content}). - -send_command_and_notify(W, Q, ChPid, MethodRecord) -> - W ! {send_command_and_notify, Q, ChPid, MethodRecord}, - ok. - -send_command_and_notify(W, Q, ChPid, MethodRecord, Content) -> - W ! {send_command_and_notify, Q, ChPid, MethodRecord, Content}, - ok. - -%%--------------------------------------------------------------------------- - -call(Pid, Msg) -> - {ok, Res} = gen:call(Pid, '$gen_call', Msg, infinity), - Res. - -%%--------------------------------------------------------------------------- - -%% Begin 1-0 - -assemble_frame(Channel, Performative, amqp10_framing) -> - ?DEBUG("Channel ~tp <-~n~tp", - [Channel, amqp10_framing:pprint(Performative)]), - PerfBin = amqp10_framing:encode_bin(Performative), - amqp10_binary_generator:build_frame(Channel, PerfBin); - -assemble_frame(Channel, Performative, rabbit_amqp1_0_sasl) -> - ?DEBUG("Channel ~tp <-~n~tp", - [Channel, amqp10_framing:pprint(Performative)]), - PerfBin = amqp10_framing:encode_bin(Performative), - amqp10_binary_generator:build_frame(Channel, - ?AMQP_SASL_FRAME_TYPE, PerfBin). - -%% Note: a transfer record can be followed by a number of other -%% records to make a complete frame but unlike 0-9-1 we may have many -%% content records. However, that's already been handled for us, we're -%% just sending a chunk, so from this perspective it's just a binary. - -assemble_frames(Channel, Performative, Content, _FrameMax, - amqp10_framing) -> - ?DEBUG("Channel ~tp <-~n~tp~n followed by ~tp bytes of content", - [Channel, amqp10_framing:pprint(Performative), - iolist_size(Content)]), - PerfBin = amqp10_framing:encode_bin(Performative), - amqp10_binary_generator:build_frame(Channel, [PerfBin, Content]). - -%% End 1-0 - -tcp_send(Sock, Data) -> - rabbit_misc:throw_on_error(inet_error, - fun () -> rabbit_net:send(Sock, Data) end). - -internal_send_command(Sock, Channel, MethodRecord, Protocol) -> - ok = tcp_send(Sock, assemble_frame(Channel, MethodRecord, Protocol)). - -internal_send_command_async(MethodRecord, - State = #wstate{channel = Channel, - protocol = Protocol, - pending = Pending}) -> - Frame = assemble_frame(Channel, MethodRecord, Protocol), - maybe_flush(State#wstate{pending = [Frame | Pending]}). - -internal_send_command_async(MethodRecord, Content, - State = #wstate{channel = Channel, - frame_max = FrameMax, - protocol = Protocol, - pending = Pending}) -> - Frames = assemble_frames(Channel, MethodRecord, Content, FrameMax, - Protocol), - maybe_flush(State#wstate{pending = [Frames | Pending]}). - -%% This magic number is the tcp-over-ethernet MSS (1460) minus the -%% minimum size of a AMQP basic.deliver method frame (24) plus basic -%% content header (22). The idea is that we want to flush just before -%% exceeding the MSS. --define(FLUSH_THRESHOLD, 1414). - -maybe_flush(State = #wstate{pending = Pending}) -> - case iolist_size(Pending) >= ?FLUSH_THRESHOLD of - true -> flush(State); - false -> State - end. - -flush(State = #wstate{pending = []}) -> - State; -flush(State = #wstate{sock = Sock, pending = Pending}) -> - ok = port_cmd(Sock, lists:reverse(Pending)), - State#wstate{pending = []}. - -%% gen_tcp:send/2 does a selective receive of {inet_reply, Sock, -%% Status} to obtain the result. That is bad when it is called from -%% the writer since it requires scanning of the writers possibly quite -%% large message queue. -%% -%% So instead we lift the code from prim_inet:send/2, which is what -%% gen_tcp:send/2 calls, do the first half here and then just process -%% the result code in handle_message/2 as and when it arrives. -%% -%% This means we may end up happily sending data down a closed/broken -%% socket, but that's ok since a) data in the buffers will be lost in -%% any case (so qualitatively we are no worse off than if we used -%% gen_tcp:send/2), and b) we do detect the changed socket status -%% eventually, i.e. when we get round to handling the result code. -%% -%% Also note that the port has bounded buffers and port_command blocks -%% when these are full. So the fact that we process the result -%% asynchronously does not impact flow control. -port_cmd(Sock, Data) -> - true = try rabbit_net:port_command(Sock, Data) - catch error:Error -> exit({writer, send_failed, Error}) - end, - ok. diff --git a/deps/rabbitmq_amqp1_0/src/rabbitmq_amqp1_0_noop.erl b/deps/rabbitmq_amqp1_0/src/rabbitmq_amqp1_0_noop.erl new file mode 100644 index 000000000000..3007ead09cb9 --- /dev/null +++ b/deps/rabbitmq_amqp1_0/src/rabbitmq_amqp1_0_noop.erl @@ -0,0 +1 @@ +-module(rabbitmq_amqp1_0_noop). diff --git a/deps/rabbitmq_amqp1_0/test/amqp10_client_SUITE.erl b/deps/rabbitmq_amqp1_0/test/amqp10_client_SUITE.erl deleted file mode 100644 index 72d5c5f2e8b7..000000000000 --- a/deps/rabbitmq_amqp1_0/test/amqp10_client_SUITE.erl +++ /dev/null @@ -1,562 +0,0 @@ -%% This Source Code Form is subject to the terms of the Mozilla Public -%% License, v. 2.0. If a copy of the MPL was not distributed with this -%% file, You can obtain one at https://mozilla.org/MPL/2.0/. -%% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. -%% - --module(amqp10_client_SUITE). - --include_lib("common_test/include/ct.hrl"). --include_lib("eunit/include/eunit.hrl"). --include_lib("amqp_client/include/amqp_client.hrl"). - --compile(nowarn_export_all). --compile(export_all). - -all() -> - [ - {group, tests}, - {group, metrics} - ]. - -groups() -> - [ - {tests, [], [ - reliable_send_receive_with_outcomes, - publishing_to_non_existing_queue_should_settle_with_released, - open_link_to_non_existing_destination_should_end_session, - roundtrip_classic_queue_with_drain, - roundtrip_quorum_queue_with_drain, - roundtrip_stream_queue_with_drain, - amqp_stream_amqpl, - message_headers_conversion - ]}, - {metrics, [], [ - auth_attempt_metrics - ]} - ]. - -%% ------------------------------------------------------------------- -%% Testsuite setup/teardown. -%% ------------------------------------------------------------------- - -init_per_suite(Config) -> - application:ensure_all_started(amqp10_client), - rabbit_ct_helpers:log_environment(), - Config. - -end_per_suite(Config) -> - Config. - -init_per_group(Group, Config) -> - Suffix = rabbit_ct_helpers:testcase_absname(Config, "", "-"), - Config1 = rabbit_ct_helpers:set_config( - Config, [ - {rmq_nodename_suffix, Suffix}, - {amqp10_client_library, Group} - ]), - rabbit_ct_helpers:run_setup_steps( - Config1, - rabbit_ct_broker_helpers:setup_steps() ++ - rabbit_ct_client_helpers:setup_steps()). - -end_per_group(_, Config) -> - rabbit_ct_helpers:run_teardown_steps(Config, - rabbit_ct_client_helpers:teardown_steps() ++ - rabbit_ct_broker_helpers:teardown_steps()). - -init_per_testcase(Testcase, Config) -> - rabbit_ct_helpers:testcase_started(Config, Testcase). - -end_per_testcase(Testcase, Config) -> - rabbit_ct_helpers:testcase_finished(Config, Testcase). - -%%% TESTS -%%% - -reliable_send_receive_with_outcomes(Config) -> - Outcomes = [accepted, - modified, - {modified, true, false, #{<<"fruit">> => <<"banana">>}}, - {modified, false, true, #{}}, - rejected, - released], - [begin - reliable_send_receive(Config, Outcome) - end || Outcome <- Outcomes], - ok. - -reliable_send_receive(Config, Outcome) -> - Container = atom_to_binary(?FUNCTION_NAME, utf8), - OutcomeBin = case is_atom(Outcome) of - true -> - atom_to_binary(Outcome, utf8); - false -> - O1 = atom_to_binary(element(1, Outcome), utf8), - O2 = atom_to_binary(element(2, Outcome), utf8), - <> - end, - - ct:pal("~s testing ~s", [?FUNCTION_NAME, OutcomeBin]), - QName = <>, - %% declare a quorum queue - Ch = rabbit_ct_client_helpers:open_channel(Config, 0), - amqp_channel:call(Ch, #'queue.declare'{queue = QName, - durable = true, - arguments = [{<<"x-queue-type">>, - longstr, <<"quorum">>}]}), - rabbit_ct_client_helpers:close_channel(Ch), - %% reliable send and consume - Host = ?config(rmq_hostname, Config), - Port = rabbit_ct_broker_helpers:get_node_config(Config, 0, tcp_port_amqp), - Address = <<"/amq/queue/", QName/binary>>, - - OpnConf = #{address => Host, - port => Port, - container_id => Container, - sasl => {plain, <<"guest">>, <<"guest">>}}, - {ok, Connection} = amqp10_client:open_connection(OpnConf), - {ok, Session} = amqp10_client:begin_session(Connection), - SenderLinkName = <<"test-sender">>, - {ok, Sender} = amqp10_client:attach_sender_link(Session, - SenderLinkName, - Address), - ok = wait_for_credit(Sender), - DTag1 = <<"dtag-1">>, - %% create an unsettled message, - %% link will be in "mixed" mode by default - Msg1 = amqp10_msg:new(DTag1, <<"body-1">>, false), - ok = amqp10_client:send_msg(Sender, Msg1), - ok = wait_for_settlement(DTag1), - - ok = amqp10_client:detach_link(Sender), - ok = amqp10_client:close_connection(Connection), - flush("post sender close"), - - {ok, Connection2} = amqp10_client:open_connection(OpnConf), - {ok, Session2} = amqp10_client:begin_session(Connection2), - ReceiverLinkName = <<"test-receiver">>, - {ok, Receiver} = amqp10_client:attach_receiver_link(Session2, - ReceiverLinkName, - Address, - unsettled), - {ok, Msg} = amqp10_client:get_msg(Receiver), - - ct:pal("got ~p", [amqp10_msg:body(Msg)]), - - ok = amqp10_client:settle_msg(Receiver, Msg, Outcome), - - flush("post accept"), - - ok = amqp10_client:detach_link(Receiver), - ok = amqp10_client:close_connection(Connection2), - - ok. - -publishing_to_non_existing_queue_should_settle_with_released(Config) -> - Container = atom_to_binary(?FUNCTION_NAME, utf8), - Suffix = <<"foo">>, - %% does not exist - QName = <>, - Host = ?config(rmq_hostname, Config), - Port = rabbit_ct_broker_helpers:get_node_config(Config, 0, tcp_port_amqp), - Address = <<"/exchange/amq.direct/", QName/binary>>, - - OpnConf = #{address => Host, - port => Port, - container_id => Container, - sasl => {plain, <<"guest">>, <<"guest">>}}, - {ok, Connection} = amqp10_client:open_connection(OpnConf), - {ok, Session} = amqp10_client:begin_session(Connection), - SenderLinkName = <<"test-sender">>, - {ok, Sender} = amqp10_client:attach_sender_link(Session, - SenderLinkName, - Address), - ok = wait_for_credit(Sender), - DTag1 = <<"dtag-1">>, - %% create an unsettled message, - %% link will be in "mixed" mode by default - Msg1 = amqp10_msg:new(DTag1, <<"body-1">>, false), - ok = amqp10_client:send_msg(Sender, Msg1), - ok = wait_for_settlement(DTag1, released), - - ok = amqp10_client:detach_link(Sender), - ok = amqp10_client:close_connection(Connection), - flush("post sender close"), - ok. - -open_link_to_non_existing_destination_should_end_session(Config) -> - Container = atom_to_list(?FUNCTION_NAME), - Name = Container ++ "foo", - Addresses = [ - "/exchange/" ++ Name ++ "/bar", - "/amq/queue/" ++ Name - ], - Host = ?config(rmq_hostname, Config), - Port = rabbit_ct_broker_helpers:get_node_config(Config, 0, tcp_port_amqp), - OpnConf = #{address => Host, - port => Port, - container_id => list_to_binary(Container), - sasl => {plain, <<"guest">>, <<"guest">>}}, - - [begin - {ok, Connection} = amqp10_client:open_connection(OpnConf), - {ok, Session} = amqp10_client:begin_session(Connection), - SenderLinkName = <<"test-sender">>, - ct:pal("Address ~p", [Address]), - {ok, _} = amqp10_client:attach_sender_link(Session, - SenderLinkName, - list_to_binary(Address)), - - wait_for_session_end(Session), - ok = amqp10_client:close_connection(Connection), - flush("post sender close") - - end || Address <- Addresses], - ok. - -roundtrip_classic_queue_with_drain(Config) -> - QName = atom_to_binary(?FUNCTION_NAME, utf8), - roundtrip_queue_with_drain(Config, <<"classic">>, QName). - -roundtrip_quorum_queue_with_drain(Config) -> - QName = atom_to_binary(?FUNCTION_NAME, utf8), - roundtrip_queue_with_drain(Config, <<"quorum">>, QName). - -roundtrip_stream_queue_with_drain(Config) -> - QName = atom_to_binary(?FUNCTION_NAME, utf8), - roundtrip_queue_with_drain(Config, <<"stream">>, QName). - -roundtrip_queue_with_drain(Config, QueueType, QName) when is_binary(QueueType) -> - Host = ?config(rmq_hostname, Config), - Port = rabbit_ct_broker_helpers:get_node_config(Config, 0, tcp_port_amqp), - Address = <<"/amq/queue/", QName/binary>>, - %% declare a queue - Ch = rabbit_ct_client_helpers:open_channel(Config, 0), - Args = [{<<"x-queue-type">>, longstr, QueueType}], - amqp_channel:call(Ch, #'queue.declare'{queue = QName, - durable = true, - arguments = Args}), - % create a configuration map - OpnConf = #{address => Host, - port => Port, - container_id => atom_to_binary(?FUNCTION_NAME, utf8), - sasl => {plain, <<"guest">>, <<"guest">>}}, - - {ok, Connection} = amqp10_client:open_connection(OpnConf), - {ok, Session} = amqp10_client:begin_session(Connection), - SenderLinkName = <<"test-sender">>, - {ok, Sender} = amqp10_client:attach_sender_link(Session, - SenderLinkName, - Address), - - wait_for_credit(Sender), - - % create a new message using a delivery-tag, body and indicate - % it's settlement status (true meaning no disposition confirmation - % will be sent by the receiver). - OutMsg = amqp10_msg:new(<<"my-tag">>, <<"my-body">>, true), - ok = amqp10_client:send_msg(Sender, OutMsg), - - flush("pre-receive"), - % create a receiver link - - TerminusDurability = none, - Filter = case QueueType of - <<"stream">> -> - #{<<"rabbitmq:stream-offset-spec">> => <<"first">>}; - _ -> - #{} - end, - Properties = #{}, - {ok, Receiver} = amqp10_client:attach_receiver_link(Session, <<"test-receiver">>, - Address, unsettled, - TerminusDurability, - Filter, Properties), - - % grant credit and drain - ok = amqp10_client:flow_link_credit(Receiver, 1, never, true), - - % wait for a delivery - receive - {amqp10_msg, Receiver, InMsg} -> - ok = amqp10_client:accept_msg(Receiver, InMsg), - wait_for_accepts(1), - ok - after 2000 -> - exit(delivery_timeout) - end, - OutMsg2 = amqp10_msg:new(<<"my-tag">>, <<"my-body2">>, true), - ok = amqp10_client:send_msg(Sender, OutMsg2), - - %% no delivery should be made at this point - receive - {amqp10_msg, _, _} -> - exit(unexpected_delivery) - after 500 -> - ok - end, - - flush("final"), - ok = amqp10_client:detach_link(Sender), - - ok = amqp10_client:close_connection(Connection), - ok. - -%% Send a message with a body containing a single AMQP 1.0 value section -%% to a stream and consume via AMQP 0.9.1. -amqp_stream_amqpl(Config) -> - Host = ?config(rmq_hostname, Config), - Port = rabbit_ct_broker_helpers:get_node_config(Config, 0, tcp_port_amqp), - Ch = rabbit_ct_client_helpers:open_channel(Config, 0), - ContainerId = QName = atom_to_binary(?FUNCTION_NAME), - - amqp_channel:call(Ch, #'queue.declare'{ - queue = QName, - durable = true, - arguments = [{<<"x-queue-type">>, longstr, <<"stream">>}]}), - - Address = <<"/amq/queue/", QName/binary>>, - OpnConf = #{address => Host, - port => Port, - container_id => ContainerId, - sasl => {plain, <<"guest">>, <<"guest">>}}, - {ok, Connection} = amqp10_client:open_connection(OpnConf), - {ok, Session} = amqp10_client:begin_session(Connection), - SenderLinkName = <<"test-sender">>, - {ok, Sender} = amqp10_client:attach_sender_link(Session, - SenderLinkName, - Address), - wait_for_credit(Sender), - OutMsg = amqp10_msg:new(<<"my-tag">>, {'v1_0.amqp_value', {binary, <<0, 255>>}}, true), - ok = amqp10_client:send_msg(Sender, OutMsg), - flush("final"), - ok = amqp10_client:detach_link(Sender), - ok = amqp10_client:close_connection(Connection), - - #'basic.qos_ok'{} = amqp_channel:call(Ch, #'basic.qos'{global = false, - prefetch_count = 1}), - CTag = <<"my-tag">>, - #'basic.consume_ok'{} = amqp_channel:subscribe( - Ch, - #'basic.consume'{ - queue = QName, - consumer_tag = CTag, - arguments = [{<<"x-stream-offset">>, longstr, <<"first">>}]}, - self()), - receive - {#'basic.deliver'{consumer_tag = CTag, - redelivered = false}, - #amqp_msg{props = #'P_basic'{type = <<"amqp-1.0">>}}} -> - ok - after 5000 -> - exit(basic_deliver_timeout) - end, - #'queue.delete_ok'{} = amqp_channel:call(Ch, #'queue.delete'{queue = QName}), - ok = rabbit_ct_client_helpers:close_channel(Ch). - -message_headers_conversion(Config) -> - Host = ?config(rmq_hostname, Config), - Port = rabbit_ct_broker_helpers:get_node_config(Config, 0, tcp_port_amqp), - QName = atom_to_binary(?FUNCTION_NAME, utf8), - Address = <<"/amq/queue/", QName/binary>>, - %% declare a quorum queue - Ch = rabbit_ct_client_helpers:open_channel(Config, 0), - amqp_channel:call(Ch, #'queue.declare'{queue = QName, - durable = true, - arguments = [{<<"x-queue-type">>, longstr, <<"quorum">>}]}), - - rabbit_ct_broker_helpers:rpc(Config, 0, application, set_env,[rabbitmq_amqp1_0, convert_amqp091_headers_to_app_props, true]), - rabbit_ct_broker_helpers:rpc(Config, 0, application, set_env,[rabbitmq_amqp1_0, convert_app_props_to_amqp091_headers, true]), - - OpnConf = #{address => Host, - port => Port, - container_id => atom_to_binary(?FUNCTION_NAME, utf8), - sasl => {plain, <<"guest">>, <<"guest">>}}, - - {ok, Connection} = amqp10_client:open_connection(OpnConf), - {ok, Session} = amqp10_client:begin_session(Connection), - - amqp10_to_amqp091_header_conversion(Session, Ch, QName, Address), - - amqp091_to_amqp10_header_conversion(Session, Ch, QName, Address), - delete_queue(Config, QName), - ok = amqp10_client:close_connection(Connection), - ok. - -amqp10_to_amqp091_header_conversion(Session,Ch, QName, Address) -> - {ok, Sender} = create_amqp10_sender(Session, Address), - - OutMsg = amqp10_msg:new(<<"my-tag">>, <<"my-body">>, true), - OutMsg2 = amqp10_msg:set_application_properties(#{ - "x-string" => "string-value", - "x-int" => 3, - "x-bool" => true - }, OutMsg), - ok = amqp10_client:send_msg(Sender, OutMsg2), - wait_for_accepts(1), - - {ok, Headers} = amqp091_get_msg_headers(Ch, QName), - - ?assertEqual({bool, true}, rabbit_misc:table_lookup(Headers, <<"x-bool">>)), - ?assertEqual({unsignedint, 3}, rabbit_misc:table_lookup(Headers, <<"x-int">>)), - ?assertEqual({longstr, <<"string-value">>}, rabbit_misc:table_lookup(Headers, <<"x-string">>)). - - -amqp091_to_amqp10_header_conversion(Session, Ch, QName, Address) -> - Amqp091Headers = [{<<"x-forwarding">>, array, - [{table, [{<<"uri">>, longstr, - <<"amqp://localhost/%2F/upstream">>}]}]}, - {<<"x-string">>, longstr, "my-string"}, - {<<"x-int">>, long, 92}, - {<<"x-bool">>, bool, true}], - - amqp_channel:cast(Ch, - #'basic.publish'{exchange = <<"">>, routing_key = QName}, - #amqp_msg{props = #'P_basic'{ - headers = Amqp091Headers}, - payload = <<"foobar">> } - ), - - {ok, [Msg]} = drain_queue(Session, Address, 1), - Amqp10Props = amqp10_msg:application_properties(Msg), - ?assertEqual(true, maps:get(<<"x-bool">>, Amqp10Props, undefined)), - ?assertEqual(92, maps:get(<<"x-int">>, Amqp10Props, undefined)), - ?assertEqual(<<"my-string">>, maps:get(<<"x-string">>, Amqp10Props, undefined)). - -auth_attempt_metrics(Config) -> - Host = ?config(rmq_hostname, Config), - Port = rabbit_ct_broker_helpers:get_node_config(Config, 0, tcp_port_amqp), - % create a configuration map - OpnConf = #{address => Host, - port => Port, - container_id => atom_to_binary(?FUNCTION_NAME, utf8), - sasl => {plain, <<"guest">>, <<"guest">>}}, - open_and_close_connection(OpnConf), - [Attempt] = - rabbit_ct_broker_helpers:rpc(Config, 0, rabbit_core_metrics, get_auth_attempts, []), - ?assertEqual(false, proplists:is_defined(remote_address, Attempt)), - ?assertEqual(false, proplists:is_defined(username, Attempt)), - ?assertEqual(proplists:get_value(protocol, Attempt), <<"amqp10">>), - ?assertEqual(proplists:get_value(auth_attempts, Attempt), 1), - ?assertEqual(proplists:get_value(auth_attempts_failed, Attempt), 0), - ?assertEqual(proplists:get_value(auth_attempts_succeeded, Attempt), 1), - rabbit_ct_broker_helpers:rpc(Config, 0, rabbit_core_metrics, reset_auth_attempt_metrics, []), - rabbit_ct_broker_helpers:rpc(Config, 0, application, set_env, - [rabbit, track_auth_attempt_source, true]), - open_and_close_connection(OpnConf), - Attempts = - rabbit_ct_broker_helpers:rpc(Config, 0, rabbit_core_metrics, get_auth_attempts_by_source, []), - [Attempt1] = lists:filter(fun(Props) -> - proplists:is_defined(remote_address, Props) - end, Attempts), - ?assertEqual(proplists:get_value(remote_address, Attempt1), <<>>), - ?assertEqual(proplists:get_value(username, Attempt1), <<"guest">>), - ?assertEqual(proplists:get_value(protocol, Attempt), <<"amqp10">>), - ?assertEqual(proplists:get_value(auth_attempts, Attempt1), 1), - ?assertEqual(proplists:get_value(auth_attempts_failed, Attempt1), 0), - ?assertEqual(proplists:get_value(auth_attempts_succeeded, Attempt1), 1), - ok. - -%% internal -%% - -flush(Prefix) -> - receive - Msg -> - ct:pal("~ts flushed: ~w~n", [Prefix, Msg]), - flush(Prefix) - after 1 -> - ok - end. - -open_and_close_connection(OpnConf) -> - {ok, Connection} = amqp10_client:open_connection(OpnConf), - {ok, _} = amqp10_client:begin_session(Connection), - ok = amqp10_client:close_connection(Connection). - -% before we can send messages we have to wait for credit from the server -wait_for_credit(Sender) -> - receive - {amqp10_event, {link, Sender, credited}} -> - flush(?FUNCTION_NAME), - ok - after 5000 -> - flush("wait_for_credit timed out"), - ct:fail(credited_timeout) - end. - -wait_for_session_end(Session) -> - receive - {amqp10_event, {session, Session, {ended, _}}} -> - flush(?FUNCTION_NAME), - ok - after 5000 -> - flush("wait_for_session_end timed out"), - ct:fail(settled_timeout) - end. - -wait_for_settlement(Tag) -> - wait_for_settlement(Tag, accepted). - -wait_for_settlement(Tag, State) -> - receive - {amqp10_disposition, {State, Tag}} -> - flush(?FUNCTION_NAME), - ok - after 5000 -> - flush("wait_for_settlement timed out"), - ct:fail(settled_timeout) - end. - -wait_for_accepts(0) -> ok; -wait_for_accepts(N) -> - receive - {amqp10_disposition,{accepted,_}} -> - wait_for_accepts(N -1) - after 250 -> - ok - end. - -delete_queue(Config, QName) -> - Ch = rabbit_ct_client_helpers:open_channel(Config, 0), - #'queue.delete_ok'{} = amqp_channel:call(Ch, #'queue.delete'{queue = QName}), - rabbit_ct_client_helpers:close_channel(Ch). - - -amqp091_get_msg_headers(Channel, QName) -> - {#'basic.get_ok'{}, #amqp_msg{props = #'P_basic'{ headers= Headers}}} - = amqp_channel:call(Channel, #'basic.get'{queue = QName, no_ack = true}), - {ok, Headers}. - -create_amqp10_sender(Session, Address) -> - SenderLinkName = <<"test-sender">>, - {ok, Sender} = amqp10_client:attach_sender_link(Session, - SenderLinkName, - Address), - wait_for_credit(Sender), - {ok, Sender}. - - drain_queue(Session, Address, N) -> - flush("Before drain_queue"), - {ok, Receiver} = amqp10_client:attach_receiver_link(Session, - <<"test-receiver">>, - Address, - settled, - configuration), - - ok = amqp10_client:flow_link_credit(Receiver, 1000, never, true), - Msgs = receive_message(Receiver, N, []), - flush("after drain"), - ok = amqp10_client:detach_link(Receiver), - {ok, Msgs}. - -receive_message(_Receiver, 0, Acc) -> lists:reverse(Acc); -receive_message(Receiver, N, Acc) -> - receive - {amqp10_msg, Receiver, Msg} -> - receive_message(Receiver, N-1, [Msg | Acc]) - after 5000 -> - exit(receive_timed_out) - end. diff --git a/deps/rabbitmq_amqp1_0/test/command_SUITE.erl b/deps/rabbitmq_amqp1_0/test/command_SUITE.erl deleted file mode 100644 index 2ef786f6c60c..000000000000 --- a/deps/rabbitmq_amqp1_0/test/command_SUITE.erl +++ /dev/null @@ -1,163 +0,0 @@ -%% This Source Code Form is subject to the terms of the Mozilla Public -%% License, v. 2.0. If a copy of the MPL was not distributed with this -%% file, You can obtain one at https://mozilla.org/MPL/2.0/. -%% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. - - --module(command_SUITE). --compile([export_all]). - --include_lib("common_test/include/ct.hrl"). --include_lib("eunit/include/eunit.hrl"). --include("rabbit_amqp1_0.hrl"). --include_lib("amqp_client/include/amqp_client.hrl"). - --define(COMMAND, 'Elixir.RabbitMQ.CLI.Ctl.Commands.ListAmqp10ConnectionsCommand'). - -all() -> - [ - {group, non_parallel_tests} - ]. - -groups() -> - [ - {non_parallel_tests, [], [ - merge_defaults, - validate, - when_no_connections, - when_one_connection - ]} - ]. - -init_per_suite(Config) -> - application:ensure_all_started(amqp10_client), - rabbit_ct_helpers:log_environment(), - Config. - -end_per_suite(Config) -> - Config. - -init_per_group(Group, Config) -> - Suffix = rabbit_ct_helpers:testcase_absname(Config, "", "-"), - Config1 = rabbit_ct_helpers:set_config( - Config, [ - {rmq_nodename_suffix, Suffix}, - {amqp10_client_library, Group} - ]), - rabbit_ct_helpers:run_setup_steps( - Config1, - rabbit_ct_broker_helpers:setup_steps() ++ - rabbit_ct_client_helpers:setup_steps()). - -end_per_group(_, Config) -> - rabbit_ct_helpers:run_teardown_steps(Config, - rabbit_ct_client_helpers:teardown_steps() ++ - rabbit_ct_broker_helpers:teardown_steps()). - -init_per_testcase(Testcase, Config) -> - rabbit_ct_helpers:testcase_started(Config, Testcase). - -end_per_testcase(Testcase, Config) -> - rabbit_ct_helpers:testcase_finished(Config, Testcase). - -merge_defaults(_Config) -> - {[<<"pid">>], #{verbose := false}} = - ?COMMAND:merge_defaults([], #{}), - - {[<<"other_key">>], #{verbose := true}} = - ?COMMAND:merge_defaults([<<"other_key">>], #{verbose => true}), - - {[<<"other_key">>], #{verbose := false}} = - ?COMMAND:merge_defaults([<<"other_key">>], #{verbose => false}). - -validate(_Config) -> - ok = ?COMMAND:validate([], #{}), - ok = ?COMMAND:validate([<<"recv_oct">>, <<"ssl">>], #{}), - ok = ?COMMAND:validate([atom_to_binary(K, utf8) || K <- ?INFO_ITEMS], #{}), - {validation_failure,{bad_info_key,[other]}} = - ?COMMAND:validate([<<"other">>], #{}). - -when_no_connections(_Config) -> - [A] = rabbit_ct_broker_helpers:get_node_configs(_Config, nodename), - Opts = #{node => A, timeout => 2000, verbose => true}, - [] = 'Elixir.Enum':to_list(?COMMAND:run([], Opts)). - -when_one_connection(_Config) -> - [A] = rabbit_ct_broker_helpers:get_node_configs(_Config, nodename), - Opts = #{node => A, timeout => 2000, verbose => true}, - - [Connection,Sender] = open_amqp10_connection(_Config), - - [_] = 'Elixir.Enum':to_list(?COMMAND:run([], Opts)), - close_amqp10_connection(Connection, Sender). - -open_amqp10_connection(Config) -> - Host = ?config(rmq_hostname, Config), - Port = rabbit_ct_broker_helpers:get_node_config(Config, 0, tcp_port_amqp), - QName = atom_to_binary(?FUNCTION_NAME, utf8), - Address = <<"/amq/queue/", QName/binary>>, - %% declare a quorum queue - Ch = rabbit_ct_client_helpers:open_channel(Config, 0), - amqp_channel:call(Ch, #'queue.declare'{queue = QName, - durable = true, - arguments = [{<<"x-queue-type">>, longstr, <<"quorum">>}]}), - - % create a configuration map - OpnConf = #{address => Host, - port => Port, - container_id => atom_to_binary(?FUNCTION_NAME, utf8), - sasl => {plain, <<"guest">>, <<"guest">>}}, - - % ct:pal("opening connectoin with ~tp", [OpnConf]), - {ok, Connection} = amqp10_client:open_connection(OpnConf), - {ok, Session} = amqp10_client:begin_session(Connection), - SenderLinkName = <<"test-sender">>, - {ok, Sender} = amqp10_client:attach_sender_link(Session, - SenderLinkName, - Address), - - % wait for credit to be received - receive - {amqp10_event, {link, Sender, credited}} -> ok - after 2000 -> - exit(credited_timeout) - end, - - OutMsg = amqp10_msg:new(<<"my-tag">>, <<"my-body">>, true), - ok = amqp10_client:send_msg(Sender, OutMsg), - - flush("pre-receive"), - {ok, Receiver} = amqp10_client:attach_receiver_link(Session, - <<"test-receiver">>, - Address), - - % grant credit and drain - ok = amqp10_client:flow_link_credit(Receiver, 1, never, true), - - % wait for a delivery - receive - {amqp10_msg, Receiver, _InMsg} -> ct:pal("Received amqp 1.0 message : ~w~n", [_InMsg]), ok - after 2000 -> - exit(delivery_timeout) - end, - - - - [Connection, Sender]. - -flush(Prefix) -> - receive - Msg -> - ct:pal("~ts flushed: ~w~n", [Prefix, Msg]), - flush(Prefix) - after 1 -> - ok - end. - -close_amqp10_connection(Connection, Sender) -> - flush("final"), - ct:pal("Closing connection ~w~n", [Connection]), - ok = amqp10_client:detach_link(Sender), - ok = amqp10_client:close_connection(Connection), - ok. diff --git a/deps/rabbitmq_amqp1_0/test/config_schema_SUITE_data/rabbitmq_amqp1_0.snippets b/deps/rabbitmq_amqp1_0/test/config_schema_SUITE_data/rabbitmq_amqp1_0.snippets deleted file mode 100644 index 41a19193cad1..000000000000 --- a/deps/rabbitmq_amqp1_0/test/config_schema_SUITE_data/rabbitmq_amqp1_0.snippets +++ /dev/null @@ -1,17 +0,0 @@ -%% This Source Code Form is subject to the terms of the Mozilla Public -%% License, v. 2.0. If a copy of the MPL was not distributed with this -%% file, You can obtain one at https://mozilla.org/MPL/2.0/. -%% -%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved. -%% - -[ - {rabbitmq_amqp1_0_config, - "amqp1_0.convert_amqp091_headers_to_app_props = true - amqp1_0.convert_app_props_to_amqp091_headers = true", - [{rabbitmq_amqp1_0,[ - {convert_amqp091_headers_to_app_props, true}, - {convert_app_props_to_amqp091_headers, true} - ]}], - [rabbitmq_amqp1_0]} -]. diff --git a/deps/rabbitmq_amqp1_0/test/system_SUITE_data/java-tests/.mvn/wrapper/MavenWrapperDownloader.java b/deps/rabbitmq_amqp1_0/test/system_SUITE_data/java-tests/.mvn/wrapper/MavenWrapperDownloader.java deleted file mode 100755 index 2e394d5b347b..000000000000 --- a/deps/rabbitmq_amqp1_0/test/system_SUITE_data/java-tests/.mvn/wrapper/MavenWrapperDownloader.java +++ /dev/null @@ -1,110 +0,0 @@ -/* -Licensed to the Apache Software Foundation (ASF) under one -or more contributor license agreements. See the NOTICE file -distributed with this work for additional information -regarding copyright ownership. The ASF licenses this file -to you under the Apache License, Version 2.0 (the -"License"); you may not use this file except in compliance -with the License. You may obtain a copy of the License at - - https://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, -software distributed under the License is distributed on an -"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -KIND, either express or implied. See the License for the -specific language governing permissions and limitations -under the License. -*/ - -import java.net.*; -import java.io.*; -import java.nio.channels.*; -import java.util.Properties; - -public class MavenWrapperDownloader { - - /** - * Default URL to download the maven-wrapper.jar from, if no 'downloadUrl' is provided. - */ - private static final String DEFAULT_DOWNLOAD_URL = - "https://repo.maven.apache.org/maven2/io/takari/maven-wrapper/0.4.2/maven-wrapper-0.4.2.jar"; - - /** - * Path to the maven-wrapper.properties file, which might contain a downloadUrl property to - * use instead of the default one. - */ - private static final String MAVEN_WRAPPER_PROPERTIES_PATH = - ".mvn/wrapper/maven-wrapper.properties"; - - /** - * Path where the maven-wrapper.jar will be saved to. - */ - private static final String MAVEN_WRAPPER_JAR_PATH = - ".mvn/wrapper/maven-wrapper.jar"; - - /** - * Name of the property which should be used to override the default download url for the wrapper. - */ - private static final String PROPERTY_NAME_WRAPPER_URL = "wrapperUrl"; - - public static void main(String args[]) { - System.out.println("- Downloader started"); - File baseDirectory = new File(args[0]); - System.out.println("- Using base directory: " + baseDirectory.getAbsolutePath()); - - // If the maven-wrapper.properties exists, read it and check if it contains a custom - // wrapperUrl parameter. - File mavenWrapperPropertyFile = new File(baseDirectory, MAVEN_WRAPPER_PROPERTIES_PATH); - String url = DEFAULT_DOWNLOAD_URL; - if(mavenWrapperPropertyFile.exists()) { - FileInputStream mavenWrapperPropertyFileInputStream = null; - try { - mavenWrapperPropertyFileInputStream = new FileInputStream(mavenWrapperPropertyFile); - Properties mavenWrapperProperties = new Properties(); - mavenWrapperProperties.load(mavenWrapperPropertyFileInputStream); - url = mavenWrapperProperties.getProperty(PROPERTY_NAME_WRAPPER_URL, url); - } catch (IOException e) { - System.out.println("- ERROR loading '" + MAVEN_WRAPPER_PROPERTIES_PATH + "'"); - } finally { - try { - if(mavenWrapperPropertyFileInputStream != null) { - mavenWrapperPropertyFileInputStream.close(); - } - } catch (IOException e) { - // Ignore ... - } - } - } - System.out.println("- Downloading from: : " + url); - - File outputFile = new File(baseDirectory.getAbsolutePath(), MAVEN_WRAPPER_JAR_PATH); - if(!outputFile.getParentFile().exists()) { - if(!outputFile.getParentFile().mkdirs()) { - System.out.println( - "- ERROR creating output direcrory '" + outputFile.getParentFile().getAbsolutePath() + "'"); - } - } - System.out.println("- Downloading to: " + outputFile.getAbsolutePath()); - try { - downloadFileFromURL(url, outputFile); - System.out.println("Done"); - System.exit(0); - } catch (Throwable e) { - System.out.println("- Error downloading"); - e.printStackTrace(); - System.exit(1); - } - } - - private static void downloadFileFromURL(String urlString, File destination) throws Exception { - URL website = new URL(urlString); - ReadableByteChannel rbc; - rbc = Channels.newChannel(website.openStream()); - FileOutputStream fos = new FileOutputStream(destination); - fos.getChannel().transferFrom(rbc, 0, Long.MAX_VALUE); - fos.close(); - rbc.close(); - } - -} diff --git a/deps/rabbitmq_amqp1_0/test/system_SUITE_data/java-tests/.mvn/wrapper/maven-wrapper.jar b/deps/rabbitmq_amqp1_0/test/system_SUITE_data/java-tests/.mvn/wrapper/maven-wrapper.jar deleted file mode 100755 index 01e67997377a..000000000000 Binary files a/deps/rabbitmq_amqp1_0/test/system_SUITE_data/java-tests/.mvn/wrapper/maven-wrapper.jar and /dev/null differ diff --git a/deps/rabbitmq_amqp1_0/test/system_SUITE_data/java-tests/.mvn/wrapper/maven-wrapper.properties b/deps/rabbitmq_amqp1_0/test/system_SUITE_data/java-tests/.mvn/wrapper/maven-wrapper.properties deleted file mode 100755 index 00d32aab1d44..000000000000 --- a/deps/rabbitmq_amqp1_0/test/system_SUITE_data/java-tests/.mvn/wrapper/maven-wrapper.properties +++ /dev/null @@ -1 +0,0 @@ -distributionUrl=https://repo.maven.apache.org/maven2/org/apache/maven/apache-maven/3.5.4/apache-maven-3.5.4-bin.zip \ No newline at end of file diff --git a/deps/rabbitmq_amqp1_0/test/system_SUITE_data/java-tests/mvnw b/deps/rabbitmq_amqp1_0/test/system_SUITE_data/java-tests/mvnw deleted file mode 100755 index 8b9da3b8b600..000000000000 --- a/deps/rabbitmq_amqp1_0/test/system_SUITE_data/java-tests/mvnw +++ /dev/null @@ -1,286 +0,0 @@ -#!/bin/sh -# ---------------------------------------------------------------------------- -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. -# ---------------------------------------------------------------------------- - -# ---------------------------------------------------------------------------- -# Maven2 Start Up Batch script -# -# Required ENV vars: -# ------------------ -# JAVA_HOME - location of a JDK home dir -# -# Optional ENV vars -# ----------------- -# M2_HOME - location of maven2's installed home dir -# MAVEN_OPTS - parameters passed to the Java VM when running Maven -# e.g. to debug Maven itself, use -# set MAVEN_OPTS=-Xdebug -Xrunjdwp:transport=dt_socket,server=y,suspend=y,address=8000 -# MAVEN_SKIP_RC - flag to disable loading of mavenrc files -# ---------------------------------------------------------------------------- - -if [ -z "$MAVEN_SKIP_RC" ] ; then - - if [ -f /etc/mavenrc ] ; then - . /etc/mavenrc - fi - - if [ -f "$HOME/.mavenrc" ] ; then - . "$HOME/.mavenrc" - fi - -fi - -# OS specific support. $var _must_ be set to either true or false. -cygwin=false; -darwin=false; -mingw=false -case "`uname`" in - CYGWIN*) cygwin=true ;; - MINGW*) mingw=true;; - Darwin*) darwin=true - # Use /usr/libexec/java_home if available, otherwise fall back to /Library/Java/Home - # See https://developer.apple.com/library/mac/qa/qa1170/_index.html - if [ -z "$JAVA_HOME" ]; then - if [ -x "/usr/libexec/java_home" ]; then - export JAVA_HOME="`/usr/libexec/java_home`" - else - export JAVA_HOME="/Library/Java/Home" - fi - fi - ;; -esac - -if [ -z "$JAVA_HOME" ] ; then - if [ -r /etc/gentoo-release ] ; then - JAVA_HOME=`java-config --jre-home` - fi -fi - -if [ -z "$M2_HOME" ] ; then - ## resolve links - $0 may be a link to maven's home - PRG="$0" - - # need this for relative symlinks - while [ -h "$PRG" ] ; do - ls=`ls -ld "$PRG"` - link=`expr "$ls" : '.*-> \(.*\)$'` - if expr "$link" : '/.*' > /dev/null; then - PRG="$link" - else - PRG="`dirname "$PRG"`/$link" - fi - done - - saveddir=`pwd` - - M2_HOME=`dirname "$PRG"`/.. - - # make it fully qualified - M2_HOME=`cd "$M2_HOME" && pwd` - - cd "$saveddir" - # echo Using m2 at $M2_HOME -fi - -# For Cygwin, ensure paths are in UNIX format before anything is touched -if $cygwin ; then - [ -n "$M2_HOME" ] && - M2_HOME=`cygpath --unix "$M2_HOME"` - [ -n "$JAVA_HOME" ] && - JAVA_HOME=`cygpath --unix "$JAVA_HOME"` - [ -n "$CLASSPATH" ] && - CLASSPATH=`cygpath --path --unix "$CLASSPATH"` -fi - -# For Mingw, ensure paths are in UNIX format before anything is touched -if $mingw ; then - [ -n "$M2_HOME" ] && - M2_HOME="`(cd "$M2_HOME"; pwd)`" - [ -n "$JAVA_HOME" ] && - JAVA_HOME="`(cd "$JAVA_HOME"; pwd)`" - # TODO classpath? -fi - -if [ -z "$JAVA_HOME" ]; then - javaExecutable="`which javac`" - if [ -n "$javaExecutable" ] && ! [ "`expr \"$javaExecutable\" : '\([^ ]*\)'`" = "no" ]; then - # readlink(1) is not available as standard on Solaris 10. - readLink=`which readlink` - if [ ! `expr "$readLink" : '\([^ ]*\)'` = "no" ]; then - if $darwin ; then - javaHome="`dirname \"$javaExecutable\"`" - javaExecutable="`cd \"$javaHome\" && pwd -P`/javac" - else - javaExecutable="`readlink -f \"$javaExecutable\"`" - fi - javaHome="`dirname \"$javaExecutable\"`" - javaHome=`expr "$javaHome" : '\(.*\)/bin'` - JAVA_HOME="$javaHome" - export JAVA_HOME - fi - fi -fi - -if [ -z "$JAVACMD" ] ; then - if [ -n "$JAVA_HOME" ] ; then - if [ -x "$JAVA_HOME/jre/sh/java" ] ; then - # IBM's JDK on AIX uses strange locations for the executables - JAVACMD="$JAVA_HOME/jre/sh/java" - else - JAVACMD="$JAVA_HOME/bin/java" - fi - else - JAVACMD="`which java`" - fi -fi - -if [ ! -x "$JAVACMD" ] ; then - echo "Error: JAVA_HOME is not defined correctly." >&2 - echo " We cannot execute $JAVACMD" >&2 - exit 1 -fi - -if [ -z "$JAVA_HOME" ] ; then - echo "Warning: JAVA_HOME environment variable is not set." -fi - -CLASSWORLDS_LAUNCHER=org.codehaus.plexus.classworlds.launcher.Launcher - -# traverses directory structure from process work directory to filesystem root -# first directory with .mvn subdirectory is considered project base directory -find_maven_basedir() { - - if [ -z "$1" ] - then - echo "Path not specified to find_maven_basedir" - return 1 - fi - - basedir="$1" - wdir="$1" - while [ "$wdir" != '/' ] ; do - if [ -d "$wdir"/.mvn ] ; then - basedir=$wdir - break - fi - # workaround for JBEAP-8937 (on Solaris 10/Sparc) - if [ -d "${wdir}" ]; then - wdir=`cd "$wdir/.."; pwd` - fi - # end of workaround - done - echo "${basedir}" -} - -# concatenates all lines of a file -concat_lines() { - if [ -f "$1" ]; then - echo "$(tr -s '\n' ' ' < "$1")" - fi -} - -BASE_DIR=`find_maven_basedir "$(pwd)"` -if [ -z "$BASE_DIR" ]; then - exit 1; -fi - -########################################################################################## -# Extension to allow automatically downloading the maven-wrapper.jar from Maven-central -# This allows using the maven wrapper in projects that prohibit checking in binary data. -########################################################################################## -if [ -r "$BASE_DIR/.mvn/wrapper/maven-wrapper.jar" ]; then - if [ "$MVNW_VERBOSE" = true ]; then - echo "Found .mvn/wrapper/maven-wrapper.jar" - fi -else - if [ "$MVNW_VERBOSE" = true ]; then - echo "Couldn't find .mvn/wrapper/maven-wrapper.jar, downloading it ..." - fi - jarUrl="https://repo.maven.apache.org/maven2/io/takari/maven-wrapper/0.4.2/maven-wrapper-0.4.2.jar" - while IFS="=" read key value; do - case "$key" in (wrapperUrl) jarUrl="$value"; break ;; - esac - done < "$BASE_DIR/.mvn/wrapper/maven-wrapper.properties" - if [ "$MVNW_VERBOSE" = true ]; then - echo "Downloading from: $jarUrl" - fi - wrapperJarPath="$BASE_DIR/.mvn/wrapper/maven-wrapper.jar" - - if command -v wget > /dev/null; then - if [ "$MVNW_VERBOSE" = true ]; then - echo "Found wget ... using wget" - fi - wget "$jarUrl" -O "$wrapperJarPath" - elif command -v curl > /dev/null; then - if [ "$MVNW_VERBOSE" = true ]; then - echo "Found curl ... using curl" - fi - curl -o "$wrapperJarPath" "$jarUrl" - else - if [ "$MVNW_VERBOSE" = true ]; then - echo "Falling back to using Java to download" - fi - javaClass="$BASE_DIR/.mvn/wrapper/MavenWrapperDownloader.java" - if [ -e "$javaClass" ]; then - if [ ! -e "$BASE_DIR/.mvn/wrapper/MavenWrapperDownloader.class" ]; then - if [ "$MVNW_VERBOSE" = true ]; then - echo " - Compiling MavenWrapperDownloader.java ..." - fi - # Compiling the Java class - ("$JAVA_HOME/bin/javac" "$javaClass") - fi - if [ -e "$BASE_DIR/.mvn/wrapper/MavenWrapperDownloader.class" ]; then - # Running the downloader - if [ "$MVNW_VERBOSE" = true ]; then - echo " - Running MavenWrapperDownloader.java ..." - fi - ("$JAVA_HOME/bin/java" -cp .mvn/wrapper MavenWrapperDownloader "$MAVEN_PROJECTBASEDIR") - fi - fi - fi -fi -########################################################################################## -# End of extension -########################################################################################## - -export MAVEN_PROJECTBASEDIR=${MAVEN_BASEDIR:-"$BASE_DIR"} -if [ "$MVNW_VERBOSE" = true ]; then - echo $MAVEN_PROJECTBASEDIR -fi -MAVEN_OPTS="$(concat_lines "$MAVEN_PROJECTBASEDIR/.mvn/jvm.config") $MAVEN_OPTS" - -# For Cygwin, switch paths to Windows format before running java -if $cygwin; then - [ -n "$M2_HOME" ] && - M2_HOME=`cygpath --path --windows "$M2_HOME"` - [ -n "$JAVA_HOME" ] && - JAVA_HOME=`cygpath --path --windows "$JAVA_HOME"` - [ -n "$CLASSPATH" ] && - CLASSPATH=`cygpath --path --windows "$CLASSPATH"` - [ -n "$MAVEN_PROJECTBASEDIR" ] && - MAVEN_PROJECTBASEDIR=`cygpath --path --windows "$MAVEN_PROJECTBASEDIR"` -fi - -WRAPPER_LAUNCHER=org.apache.maven.wrapper.MavenWrapperMain - -exec "$JAVACMD" \ - $MAVEN_OPTS \ - -classpath "$MAVEN_PROJECTBASEDIR/.mvn/wrapper/maven-wrapper.jar" \ - "-Dmaven.home=${M2_HOME}" "-Dmaven.multiModuleProjectDirectory=${MAVEN_PROJECTBASEDIR}" \ - ${WRAPPER_LAUNCHER} $MAVEN_CONFIG "$@" diff --git a/deps/rabbitmq_amqp1_0/test/system_SUITE_data/java-tests/mvnw.cmd b/deps/rabbitmq_amqp1_0/test/system_SUITE_data/java-tests/mvnw.cmd deleted file mode 100755 index a5284c79395d..000000000000 --- a/deps/rabbitmq_amqp1_0/test/system_SUITE_data/java-tests/mvnw.cmd +++ /dev/null @@ -1,161 +0,0 @@ -@REM ---------------------------------------------------------------------------- -@REM Licensed to the Apache Software Foundation (ASF) under one -@REM or more contributor license agreements. See the NOTICE file -@REM distributed with this work for additional information -@REM regarding copyright ownership. The ASF licenses this file -@REM to you under the Apache License, Version 2.0 (the -@REM "License"); you may not use this file except in compliance -@REM with the License. You may obtain a copy of the License at -@REM -@REM https://www.apache.org/licenses/LICENSE-2.0 -@REM -@REM Unless required by applicable law or agreed to in writing, -@REM software distributed under the License is distributed on an -@REM "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -@REM KIND, either express or implied. See the License for the -@REM specific language governing permissions and limitations -@REM under the License. -@REM ---------------------------------------------------------------------------- - -@REM ---------------------------------------------------------------------------- -@REM Maven2 Start Up Batch script -@REM -@REM Required ENV vars: -@REM JAVA_HOME - location of a JDK home dir -@REM -@REM Optional ENV vars -@REM M2_HOME - location of maven2's installed home dir -@REM MAVEN_BATCH_ECHO - set to 'on' to enable the echoing of the batch commands -@REM MAVEN_BATCH_PAUSE - set to 'on' to wait for a key stroke before ending -@REM MAVEN_OPTS - parameters passed to the Java VM when running Maven -@REM e.g. to debug Maven itself, use -@REM set MAVEN_OPTS=-Xdebug -Xrunjdwp:transport=dt_socket,server=y,suspend=y,address=8000 -@REM MAVEN_SKIP_RC - flag to disable loading of mavenrc files -@REM ---------------------------------------------------------------------------- - -@REM Begin all REM lines with '@' in case MAVEN_BATCH_ECHO is 'on' -@echo off -@REM set title of command window -title %0 -@REM enable echoing my setting MAVEN_BATCH_ECHO to 'on' -@if "%MAVEN_BATCH_ECHO%" == "on" echo %MAVEN_BATCH_ECHO% - -@REM set %HOME% to equivalent of $HOME -if "%HOME%" == "" (set "HOME=%HOMEDRIVE%%HOMEPATH%") - -@REM Execute a user defined script before this one -if not "%MAVEN_SKIP_RC%" == "" goto skipRcPre -@REM check for pre script, once with legacy .bat ending and once with .cmd ending -if exist "%HOME%\mavenrc_pre.bat" call "%HOME%\mavenrc_pre.bat" -if exist "%HOME%\mavenrc_pre.cmd" call "%HOME%\mavenrc_pre.cmd" -:skipRcPre - -@setlocal - -set ERROR_CODE=0 - -@REM To isolate internal variables from possible post scripts, we use another setlocal -@setlocal - -@REM ==== START VALIDATION ==== -if not "%JAVA_HOME%" == "" goto OkJHome - -echo. -echo Error: JAVA_HOME not found in your environment. >&2 -echo Please set the JAVA_HOME variable in your environment to match the >&2 -echo location of your Java installation. >&2 -echo. -goto error - -:OkJHome -if exist "%JAVA_HOME%\bin\java.exe" goto init - -echo. -echo Error: JAVA_HOME is set to an invalid directory. >&2 -echo JAVA_HOME = "%JAVA_HOME%" >&2 -echo Please set the JAVA_HOME variable in your environment to match the >&2 -echo location of your Java installation. >&2 -echo. -goto error - -@REM ==== END VALIDATION ==== - -:init - -@REM Find the project base dir, i.e. the directory that contains the folder ".mvn". -@REM Fallback to current working directory if not found. - -set MAVEN_PROJECTBASEDIR=%MAVEN_BASEDIR% -IF NOT "%MAVEN_PROJECTBASEDIR%"=="" goto endDetectBaseDir - -set EXEC_DIR=%CD% -set WDIR=%EXEC_DIR% -:findBaseDir -IF EXIST "%WDIR%"\.mvn goto baseDirFound -cd .. -IF "%WDIR%"=="%CD%" goto baseDirNotFound -set WDIR=%CD% -goto findBaseDir - -:baseDirFound -set MAVEN_PROJECTBASEDIR=%WDIR% -cd "%EXEC_DIR%" -goto endDetectBaseDir - -:baseDirNotFound -set MAVEN_PROJECTBASEDIR=%EXEC_DIR% -cd "%EXEC_DIR%" - -:endDetectBaseDir - -IF NOT EXIST "%MAVEN_PROJECTBASEDIR%\.mvn\jvm.config" goto endReadAdditionalConfig - -@setlocal EnableExtensions EnableDelayedExpansion -for /F "usebackq delims=" %%a in ("%MAVEN_PROJECTBASEDIR%\.mvn\jvm.config") do set JVM_CONFIG_MAVEN_PROPS=!JVM_CONFIG_MAVEN_PROPS! %%a -@endlocal & set JVM_CONFIG_MAVEN_PROPS=%JVM_CONFIG_MAVEN_PROPS% - -:endReadAdditionalConfig - -SET MAVEN_JAVA_EXE="%JAVA_HOME%\bin\java.exe" -set WRAPPER_JAR="%MAVEN_PROJECTBASEDIR%\.mvn\wrapper\maven-wrapper.jar" -set WRAPPER_LAUNCHER=org.apache.maven.wrapper.MavenWrapperMain - -set DOWNLOAD_URL="https://repo.maven.apache.org/maven2/io/takari/maven-wrapper/0.4.2/maven-wrapper-0.4.2.jar" -FOR /F "tokens=1,2 delims==" %%A IN (%MAVEN_PROJECTBASEDIR%\.mvn\wrapper\maven-wrapper.properties) DO ( - IF "%%A"=="wrapperUrl" SET DOWNLOAD_URL=%%B -) - -@REM Extension to allow automatically downloading the maven-wrapper.jar from Maven-central -@REM This allows using the maven wrapper in projects that prohibit checking in binary data. -if exist %WRAPPER_JAR% ( - echo Found %WRAPPER_JAR% -) else ( - echo Couldn't find %WRAPPER_JAR%, downloading it ... - echo Downloading from: %DOWNLOAD_URL% - powershell -Command "(New-Object Net.WebClient).DownloadFile('%DOWNLOAD_URL%', '%WRAPPER_JAR%')" - echo Finished downloading %WRAPPER_JAR% -) -@REM End of extension - -%MAVEN_JAVA_EXE% %JVM_CONFIG_MAVEN_PROPS% %MAVEN_OPTS% %MAVEN_DEBUG_OPTS% -classpath %WRAPPER_JAR% "-Dmaven.multiModuleProjectDirectory=%MAVEN_PROJECTBASEDIR%" %WRAPPER_LAUNCHER% %MAVEN_CONFIG% %* -if ERRORLEVEL 1 goto error -goto end - -:error -set ERROR_CODE=1 - -:end -@endlocal & set ERROR_CODE=%ERROR_CODE% - -if not "%MAVEN_SKIP_RC%" == "" goto skipRcPost -@REM check for post script, once with legacy .bat ending and once with .cmd ending -if exist "%HOME%\mavenrc_post.bat" call "%HOME%\mavenrc_post.bat" -if exist "%HOME%\mavenrc_post.cmd" call "%HOME%\mavenrc_post.cmd" -:skipRcPost - -@REM pause the script if MAVEN_BATCH_PAUSE is set to 'on' -if "%MAVEN_BATCH_PAUSE%" == "on" pause - -if "%MAVEN_TERMINATE_CMD%" == "on" exit %ERROR_CODE% - -exit /B %ERROR_CODE% diff --git a/deps/rabbitmq_amqp1_0/test/unit_SUITE.erl b/deps/rabbitmq_amqp1_0/test/unit_SUITE.erl deleted file mode 100644 index f8af7e675497..000000000000 --- a/deps/rabbitmq_amqp1_0/test/unit_SUITE.erl +++ /dev/null @@ -1,39 +0,0 @@ -%% This Source Code Form is subject to the terms of the Mozilla Public -%% License, v. 2.0. If a copy of the MPL was not distributed with this -%% file, You can obtain one at https://mozilla.org/MPL/2.0/. -%% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. -%% - --module(unit_SUITE). - --include_lib("common_test/include/ct.hrl"). --include_lib("eunit/include/eunit.hrl"). - --include("rabbit_amqp1_0.hrl"). - --import(rabbit_amqp1_0_util, [serial_add/2, serial_diff/2, serial_compare/2]). - --compile(export_all). - -all() -> - [ - serial_arithmetic - ]. - --include_lib("eunit/include/eunit.hrl"). - -serial_arithmetic(_Config) -> - ?assertEqual(1, serial_add(0, 1)), - ?assertEqual(16#7fffffff, serial_add(0, 16#7fffffff)), - ?assertEqual(0, serial_add(16#ffffffff, 1)), - %% Cannot add more than 2 ^ 31 - 1 - ?assertExit({out_of_bound_serial_addition, _, _}, - serial_add(200, 16#80000000)), - ?assertEqual(1, serial_diff(1, 0)), - ?assertEqual(2, serial_diff(1, 16#ffffffff)), - ?assertEqual(-2, serial_diff(16#ffffffff, 1)), - ?assertExit({indeterminate_serial_diff, _, _}, - serial_diff(0, 16#80000000)), - ?assertExit({indeterminate_serial_diff, _, _}, - serial_diff(16#ffffffff, 16#7fffffff)). diff --git a/deps/rabbitmq_amqp_client/BUILD.bazel b/deps/rabbitmq_amqp_client/BUILD.bazel new file mode 100644 index 000000000000..796bd653e1f3 --- /dev/null +++ b/deps/rabbitmq_amqp_client/BUILD.bazel @@ -0,0 +1,91 @@ +load("@rules_erlang//:eunit2.bzl", "eunit") +load("@rules_erlang//:xref2.bzl", "xref") +load("@rules_erlang//:dialyze.bzl", "dialyze", "plt") +load( + "//:rabbitmq.bzl", + "RABBITMQ_DIALYZER_OPTS", + "assert_suites", + "broker_for_integration_suites", + "rabbitmq_app", + "rabbitmq_integration_suite", + "rabbitmq_suite", +) +load( + ":app.bzl", + "all_beam_files", + "all_srcs", + "all_test_beam_files", + "test_suite_beam_files", +) + +APP_NAME = "rabbitmq_amqp_client" + +APP_DESCRIPTION = "AMQP 1.0 client for RabbitMQ" + +all_beam_files(name = "all_beam_files") + +all_test_beam_files(name = "all_test_beam_files") + +all_srcs(name = "all_srcs") + +test_suite_beam_files(name = "test_suite_beam_files") + +rabbitmq_app( + name = "erlang_app", + srcs = [":all_srcs"], + hdrs = [":public_hdrs"], + app_description = APP_DESCRIPTION, + app_name = APP_NAME, + beam_files = [":beam_files"], + license_files = [":license_files"], + priv = [":priv"], + deps = [ + "//deps/amqp10_client:erlang_app", + "//deps/amqp10_common:erlang_app", + ], +) + +xref( + name = "xref", + target = ":erlang_app", +) + +plt( + name = "deps_plt", + for_target = ":erlang_app", + plt = "//:base_plt", +) + +dialyze( + name = "dialyze", + dialyzer_opts = RABBITMQ_DIALYZER_OPTS, + plt = ":deps_plt", + target = ":erlang_app", +) + +broker_for_integration_suites( +) + +TEST_DEPS = [ + "//deps/amqp10_client:erlang_app", +] + +rabbitmq_integration_suite( + name = "management_SUITE", + size = "medium", + shard_count = 2, + deps = TEST_DEPS, +) + +assert_suites() + +alias( + name = "rabbitmq_amqp_client", + actual = ":erlang_app", + visibility = ["//visibility:public"], +) + +eunit( + name = "eunit", + target = ":test_erlang_app", +) diff --git a/deps/rabbitmq_amqp_client/LICENSE b/deps/rabbitmq_amqp_client/LICENSE new file mode 100644 index 000000000000..1699234a3e89 --- /dev/null +++ b/deps/rabbitmq_amqp_client/LICENSE @@ -0,0 +1,4 @@ +This package is licensed under the MPL 2.0. For the MPL 2.0, please see LICENSE-MPL-RabbitMQ. + +If you have any questions regarding licensing, please contact us at +rabbitmq-core@groups.vmware.com. diff --git a/deps/rabbitmq_amqp_client/LICENSE-MPL-RabbitMQ b/deps/rabbitmq_amqp_client/LICENSE-MPL-RabbitMQ new file mode 100644 index 000000000000..14e2f777f6c3 --- /dev/null +++ b/deps/rabbitmq_amqp_client/LICENSE-MPL-RabbitMQ @@ -0,0 +1,373 @@ +Mozilla Public License Version 2.0 +================================== + +1. Definitions +-------------- + +1.1. "Contributor" + means each individual or legal entity that creates, contributes to + the creation of, or owns Covered Software. + +1.2. "Contributor Version" + means the combination of the Contributions of others (if any) used + by a Contributor and that particular Contributor's Contribution. + +1.3. "Contribution" + means Covered Software of a particular Contributor. + +1.4. "Covered Software" + means Source Code Form to which the initial Contributor has attached + the notice in Exhibit A, the Executable Form of such Source Code + Form, and Modifications of such Source Code Form, in each case + including portions thereof. + +1.5. "Incompatible With Secondary Licenses" + means + + (a) that the initial Contributor has attached the notice described + in Exhibit B to the Covered Software; or + + (b) that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the + terms of a Secondary License. + +1.6. "Executable Form" + means any form of the work other than Source Code Form. + +1.7. "Larger Work" + means a work that combines Covered Software with other material, in + a separate file or files, that is not Covered Software. + +1.8. "License" + means this document. + +1.9. "Licensable" + means having the right to grant, to the maximum extent possible, + whether at the time of the initial grant or subsequently, any and + all of the rights conveyed by this License. + +1.10. "Modifications" + means any of the following: + + (a) any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered + Software; or + + (b) any new file in Source Code Form that contains any Covered + Software. + +1.11. "Patent Claims" of a Contributor + means any patent claim(s), including without limitation, method, + process, and apparatus claims, in any patent Licensable by such + Contributor that would be infringed, but for the grant of the + License, by the making, using, selling, offering for sale, having + made, import, or transfer of either its Contributions or its + Contributor Version. + +1.12. "Secondary License" + means either the GNU General Public License, Version 2.0, the GNU + Lesser General Public License, Version 2.1, the GNU Affero General + Public License, Version 3.0, or any later versions of those + licenses. + +1.13. "Source Code Form" + means the form of the work preferred for making modifications. + +1.14. "You" (or "Your") + means an individual or a legal entity exercising rights under this + License. For legal entities, "You" includes any entity that + controls, is controlled by, or is under common control with You. For + purposes of this definition, "control" means (a) the power, direct + or indirect, to cause the direction or management of such entity, + whether by contract or otherwise, or (b) ownership of more than + fifty percent (50%) of the outstanding shares or beneficial + ownership of such entity. + +2. License Grants and Conditions +-------------------------------- + +2.1. Grants + +Each Contributor hereby grants You a world-wide, royalty-free, +non-exclusive license: + +(a) under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or + as part of a Larger Work; and + +(b) under Patent Claims of such Contributor to make, use, sell, offer + for sale, have made, import, and otherwise transfer either its + Contributions or its Contributor Version. + +2.2. Effective Date + +The licenses granted in Section 2.1 with respect to any Contribution +become effective for each Contribution on the date the Contributor first +distributes such Contribution. + +2.3. Limitations on Grant Scope + +The licenses granted in this Section 2 are the only rights granted under +this License. No additional rights or licenses will be implied from the +distribution or licensing of Covered Software under this License. +Notwithstanding Section 2.1(b) above, no patent license is granted by a +Contributor: + +(a) for any code that a Contributor has removed from Covered Software; + or + +(b) for infringements caused by: (i) Your and any other third party's + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + +(c) under Patent Claims infringed by Covered Software in the absence of + its Contributions. + +This License does not grant any rights in the trademarks, service marks, +or logos of any Contributor (except as may be necessary to comply with +the notice requirements in Section 3.4). + +2.4. Subsequent Licenses + +No Contributor makes additional grants as a result of Your choice to +distribute the Covered Software under a subsequent version of this +License (see Section 10.2) or under the terms of a Secondary License (if +permitted under the terms of Section 3.3). + +2.5. Representation + +Each Contributor represents that the Contributor believes its +Contributions are its original creation(s) or it has sufficient rights +to grant the rights to its Contributions conveyed by this License. + +2.6. Fair Use + +This License is not intended to limit any rights You have under +applicable copyright doctrines of fair use, fair dealing, or other +equivalents. + +2.7. Conditions + +Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted +in Section 2.1. + +3. Responsibilities +------------------- + +3.1. Distribution of Source Form + +All distribution of Covered Software in Source Code Form, including any +Modifications that You create or to which You contribute, must be under +the terms of this License. You must inform recipients that the Source +Code Form of the Covered Software is governed by the terms of this +License, and how they can obtain a copy of this License. You may not +attempt to alter or restrict the recipients' rights in the Source Code +Form. + +3.2. Distribution of Executable Form + +If You distribute Covered Software in Executable Form then: + +(a) such Covered Software must also be made available in Source Code + Form, as described in Section 3.1, and You must inform recipients of + the Executable Form how they can obtain a copy of such Source Code + Form by reasonable means in a timely manner, at a charge no more + than the cost of distribution to the recipient; and + +(b) You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter + the recipients' rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + +You may create and distribute a Larger Work under terms of Your choice, +provided that You also comply with the requirements of this License for +the Covered Software. If the Larger Work is a combination of Covered +Software with a work governed by one or more Secondary Licenses, and the +Covered Software is not Incompatible With Secondary Licenses, this +License permits You to additionally distribute such Covered Software +under the terms of such Secondary License(s), so that the recipient of +the Larger Work may, at their option, further distribute the Covered +Software under the terms of either this License or such Secondary +License(s). + +3.4. Notices + +You may not remove or alter the substance of any license notices +(including copyright notices, patent notices, disclaimers of warranty, +or limitations of liability) contained within the Source Code Form of +the Covered Software, except that You may alter any license notices to +the extent required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + +You may choose to offer, and to charge a fee for, warranty, support, +indemnity or liability obligations to one or more recipients of Covered +Software. However, You may do so only on Your own behalf, and not on +behalf of any Contributor. You must make it absolutely clear that any +such warranty, support, indemnity, or liability obligation is offered by +You alone, and You hereby agree to indemnify every Contributor for any +liability incurred by such Contributor as a result of warranty, support, +indemnity or liability terms You offer. You may include additional +disclaimers of warranty and limitations of liability specific to any +jurisdiction. + +4. Inability to Comply Due to Statute or Regulation +--------------------------------------------------- + +If it is impossible for You to comply with any of the terms of this +License with respect to some or all of the Covered Software due to +statute, judicial order, or regulation then You must: (a) comply with +the terms of this License to the maximum extent possible; and (b) +describe the limitations and the code they affect. Such description must +be placed in a text file included with all distributions of the Covered +Software under this License. Except to the extent prohibited by statute +or regulation, such description must be sufficiently detailed for a +recipient of ordinary skill to be able to understand it. + +5. Termination +-------------- + +5.1. The rights granted under this License will terminate automatically +if You fail to comply with any of its terms. However, if You become +compliant, then the rights granted under this License from a particular +Contributor are reinstated (a) provisionally, unless and until such +Contributor explicitly and finally terminates Your grants, and (b) on an +ongoing basis, if such Contributor fails to notify You of the +non-compliance by some reasonable means prior to 60 days after You have +come back into compliance. Moreover, Your grants from a particular +Contributor are reinstated on an ongoing basis if such Contributor +notifies You of the non-compliance by some reasonable means, this is the +first time You have received notice of non-compliance with this License +from such Contributor, and You become compliant prior to 30 days after +Your receipt of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent +infringement claim (excluding declaratory judgment actions, +counter-claims, and cross-claims) alleging that a Contributor Version +directly or indirectly infringes any patent, then the rights granted to +You by any and all Contributors for the Covered Software under Section +2.1 of this License shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all +end user license agreements (excluding distributors and resellers) which +have been validly granted by You or Your distributors under this License +prior to termination shall survive termination. + +************************************************************************ +* * +* 6. Disclaimer of Warranty * +* ------------------------- * +* * +* Covered Software is provided under this License on an "as is" * +* basis, without warranty of any kind, either expressed, implied, or * +* statutory, including, without limitation, warranties that the * +* Covered Software is free of defects, merchantable, fit for a * +* particular purpose or non-infringing. The entire risk as to the * +* quality and performance of the Covered Software is with You. * +* Should any Covered Software prove defective in any respect, You * +* (not any Contributor) assume the cost of any necessary servicing, * +* repair, or correction. This disclaimer of warranty constitutes an * +* essential part of this License. No use of any Covered Software is * +* authorized under this License except under this disclaimer. * +* * +************************************************************************ + +************************************************************************ +* * +* 7. Limitation of Liability * +* -------------------------- * +* * +* Under no circumstances and under no legal theory, whether tort * +* (including negligence), contract, or otherwise, shall any * +* Contributor, or anyone who distributes Covered Software as * +* permitted above, be liable to You for any direct, indirect, * +* special, incidental, or consequential damages of any character * +* including, without limitation, damages for lost profits, loss of * +* goodwill, work stoppage, computer failure or malfunction, or any * +* and all other commercial damages or losses, even if such party * +* shall have been informed of the possibility of such damages. This * +* limitation of liability shall not apply to liability for death or * +* personal injury resulting from such party's negligence to the * +* extent applicable law prohibits such limitation. Some * +* jurisdictions do not allow the exclusion or limitation of * +* incidental or consequential damages, so this exclusion and * +* limitation may not apply to You. * +* * +************************************************************************ + +8. Litigation +------------- + +Any litigation relating to this License may be brought only in the +courts of a jurisdiction where the defendant maintains its principal +place of business and such litigation shall be governed by laws of that +jurisdiction, without reference to its conflict-of-law provisions. +Nothing in this Section shall prevent a party's ability to bring +cross-claims or counter-claims. + +9. Miscellaneous +---------------- + +This License represents the complete agreement concerning the subject +matter hereof. If any provision of this License is held to be +unenforceable, such provision shall be reformed only to the extent +necessary to make it enforceable. Any law or regulation which provides +that the language of a contract shall be construed against the drafter +shall not be used to construe this License against a Contributor. + +10. Versions of the License +--------------------------- + +10.1. New Versions + +Mozilla Foundation is the license steward. Except as provided in Section +10.3, no one other than the license steward has the right to modify or +publish new versions of this License. Each version will be given a +distinguishing version number. + +10.2. Effect of New Versions + +You may distribute the Covered Software under the terms of the version +of the License under which You originally received the Covered Software, +or under the terms of any subsequent version published by the license +steward. + +10.3. Modified Versions + +If you create software not governed by this License, and you want to +create a new license for such software, you may create and use a +modified version of this License if you rename the license and remove +any references to the name of the license steward (except to note that +such modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary +Licenses + +If You choose to distribute Source Code Form that is Incompatible With +Secondary Licenses under the terms of this version of the License, the +notice described in Exhibit B of this License must be attached. + +Exhibit A - Source Code Form License Notice +------------------------------------------- + + This Source Code Form is subject to the terms of the Mozilla Public + License, v. 2.0. If a copy of the MPL was not distributed with this + file, You can obtain one at http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular +file, then You may include the notice in a location (such as a LICENSE +file in a relevant directory) where a recipient would be likely to look +for such a notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - "Incompatible With Secondary Licenses" Notice +--------------------------------------------------------- + + This Source Code Form is "Incompatible With Secondary Licenses", as + defined by the Mozilla Public License, v. 2.0. diff --git a/deps/rabbitmq_amqp_client/Makefile b/deps/rabbitmq_amqp_client/Makefile new file mode 100644 index 000000000000..0a50069065e3 --- /dev/null +++ b/deps/rabbitmq_amqp_client/Makefile @@ -0,0 +1,21 @@ +PROJECT = rabbitmq_amqp_client +PROJECT_DESCRIPTION = AMQP 1.0 client for RabbitMQ + +DEPS = amqp10_client +TEST_DEPS = rabbitmq_ct_helpers + +BUILD_DEPS = rabbit_common +DEP_EARLY_PLUGINS = rabbit_common/mk/rabbitmq-early-plugin.mk +TEST_DEPS = rabbit rabbitmq_ct_helpers + +DEP_PLUGINS = rabbit_common/mk/rabbitmq-build.mk \ + rabbit_common/mk/rabbitmq-hexpm.mk \ + rabbit_common/mk/rabbitmq-dist.mk \ + rabbit_common/mk/rabbitmq-run.mk \ + rabbit_common/mk/rabbitmq-test.mk \ + rabbit_common/mk/rabbitmq-tools.mk + +.DEFAULT_GOAL = all + +include rabbitmq-components.mk +include erlang.mk diff --git a/deps/rabbitmq_amqp_client/README.md b/deps/rabbitmq_amqp_client/README.md new file mode 100644 index 000000000000..b19ab34c9412 --- /dev/null +++ b/deps/rabbitmq_amqp_client/README.md @@ -0,0 +1,29 @@ +# Erlang RabbitMQ AMQP 1.0 Client + +The [Erlang AMQP 1.0 client](../amqp10_client/) is a client that can communicate with any AMQP 1.0 broker. +In contrast, this project (Erlang **RabbitMQ** AMQP 1.0 Client) can only communicate with RabbitMQ. +This project wraps (i.e. depends on) the Erlang AMQP 1.0 client providing additionally the following RabbitMQ management operations: +* declare queue +* get queue +* delete queue +* purge queue +* bind queue to exchange +* unbind queue from exchange +* declare exchange +* delete exchange +* bind exchange to exchange +* unbind exchange from exchange + +Except for `get queue`, these management operations are defined in the [AMQP 0.9.1 protocol](https://www.rabbitmq.com/amqp-0-9-1-reference.html). +To support these AMQP 0.9.1 / RabbitMQ specific operations over AMQP 1.0, this project implements a subset of the following (most recent) AMQP 1.0 extension specifications: +* [AMQP Request-Response Messaging with Link Pairing Version 1.0 - Committee Specification 01](https://docs.oasis-open.org/amqp/linkpair/v1.0/cs01/linkpair-v1.0-cs01.html) (February 2021) +* [HTTP Semantics and Content over AMQP Version 1.0 - Working Draft 06](https://groups.oasis-open.org/higherlogic/ws/public/document?document_id=65571) (July 2019) +* [AMQP Management Version 1.0 - Working Draft 16](https://groups.oasis-open.org/higherlogic/ws/public/document?document_id=65575) (July 2019) + +This project might support more (non AMQP 0.9.1) RabbitMQ operations via AMQP 1.0 in the future. + +Topologies (exchanges, bindings, queues) in RabbitMQ can be created via +* [Management HTTP API](https://www.rabbitmq.com/docs/management#http-api) +* [Definition Import](https://www.rabbitmq.com/docs/definitions#import) +* AMQP 0.9.1 clients +* RabbitMQ AMQP 1.0 clients, such as this project diff --git a/deps/rabbitmq_amqp_client/app.bzl b/deps/rabbitmq_amqp_client/app.bzl new file mode 100644 index 000000000000..d80a6dafe4f5 --- /dev/null +++ b/deps/rabbitmq_amqp_client/app.bzl @@ -0,0 +1,73 @@ +load("@rules_erlang//:erlang_bytecode2.bzl", "erlang_bytecode") +load("@rules_erlang//:filegroup.bzl", "filegroup") + +def all_beam_files(name = "all_beam_files"): + filegroup( + name = "beam_files", + srcs = [":other_beam"], + ) + erlang_bytecode( + name = "other_beam", + srcs = ["src/rabbitmq_amqp_address.erl", "src/rabbitmq_amqp_client.erl"], + hdrs = [":public_and_private_hdrs"], + app_name = "rabbitmq_amqp_client", + dest = "ebin", + erlc_opts = "//:erlc_opts", + deps = ["//deps/amqp10_common:erlang_app"], + ) + +def all_srcs(name = "all_srcs"): + filegroup( + name = "srcs", + srcs = ["src/rabbitmq_amqp_address.erl", "src/rabbitmq_amqp_client.erl"], + ) + filegroup(name = "private_hdrs") + filegroup( + name = "public_hdrs", + srcs = ["include/rabbitmq_amqp_client.hrl"], + ) + filegroup(name = "priv") + filegroup( + name = "license_files", + srcs = [ + "LICENSE", + "LICENSE-MPL-RabbitMQ", + ], + ) + filegroup( + name = "public_and_private_hdrs", + srcs = [":private_hdrs", ":public_hdrs"], + ) + filegroup( + name = "all_srcs", + srcs = [":public_and_private_hdrs", ":srcs"], + ) + +def all_test_beam_files(name = "all_test_beam_files"): + erlang_bytecode( + name = "test_other_beam", + testonly = True, + srcs = ["src/rabbitmq_amqp_address.erl", "src/rabbitmq_amqp_client.erl"], + hdrs = [":public_and_private_hdrs"], + app_name = "rabbitmq_amqp_client", + dest = "test", + erlc_opts = "//:test_erlc_opts", + deps = ["//deps/amqp10_common:erlang_app"], + ) + filegroup( + name = "test_beam_files", + testonly = True, + srcs = [":test_other_beam"], + ) + +def test_suite_beam_files(name = "test_suite_beam_files"): + erlang_bytecode( + name = "management_SUITE_beam_files", + testonly = True, + srcs = ["test/management_SUITE.erl"], + outs = ["test/management_SUITE.beam"], + hdrs = ["include/rabbitmq_amqp_client.hrl"], + app_name = "rabbitmq_amqp_client", + erlc_opts = "//:test_erlc_opts", + deps = ["//deps/amqp10_common:erlang_app", "//deps/rabbit_common:erlang_app"], + ) diff --git a/deps/rabbitmq_amqp_client/erlang.mk b/deps/rabbitmq_amqp_client/erlang.mk new file mode 120000 index 000000000000..59af4a527a9d --- /dev/null +++ b/deps/rabbitmq_amqp_client/erlang.mk @@ -0,0 +1 @@ +../../erlang.mk \ No newline at end of file diff --git a/deps/rabbitmq_amqp_client/include/rabbitmq_amqp_client.hrl b/deps/rabbitmq_amqp_client/include/rabbitmq_amqp_client.hrl new file mode 100644 index 000000000000..58ba4dab1d8a --- /dev/null +++ b/deps/rabbitmq_amqp_client/include/rabbitmq_amqp_client.hrl @@ -0,0 +1,4 @@ +-record(link_pair, {session :: pid(), + outgoing_link :: amqp10_client:link_ref(), + incoming_link :: amqp10_client:link_ref()}). +-type link_pair() :: #link_pair{}. diff --git a/deps/rabbitmq_amqp_client/rabbitmq-components.mk b/deps/rabbitmq_amqp_client/rabbitmq-components.mk new file mode 120000 index 000000000000..43c0d3567154 --- /dev/null +++ b/deps/rabbitmq_amqp_client/rabbitmq-components.mk @@ -0,0 +1 @@ +../../rabbitmq-components.mk \ No newline at end of file diff --git a/deps/rabbitmq_amqp_client/src/rabbitmq_amqp_address.erl b/deps/rabbitmq_amqp_client/src/rabbitmq_amqp_address.erl new file mode 100644 index 000000000000..c4eeda80d641 --- /dev/null +++ b/deps/rabbitmq_amqp_client/src/rabbitmq_amqp_address.erl @@ -0,0 +1,30 @@ +%% This Source Code Form is subject to the terms of the Mozilla Public +%% License, v. 2.0. If a copy of the MPL was not distributed with this +%% file, You can obtain one at https://mozilla.org/MPL/2.0/. +%% +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. + +-module(rabbitmq_amqp_address). + +-export[exchange/1, + exchange/2, + queue/1]. + +-spec exchange(unicode:unicode_binary()) -> + unicode:unicode_binary(). +exchange(ExchangeName) -> + ExchangeNameQuoted = uri_string:quote(ExchangeName), + <<"/exchanges/", ExchangeNameQuoted/binary>>. + +-spec exchange(unicode:unicode_binary(), unicode:unicode_binary()) -> + unicode:unicode_binary(). +exchange(ExchangeName, RoutingKey) -> + ExchangeNameQuoted = uri_string:quote(ExchangeName), + RoutingKeyQuoted = uri_string:quote(RoutingKey), + <<"/exchanges/", ExchangeNameQuoted/binary, "/", RoutingKeyQuoted/binary>>. + +-spec queue(unicode:unicode_binary()) -> + unicode:unicode_binary(). +queue(QueueName) -> + QueueNameQuoted = uri_string:quote(QueueName), + <<"/queues/", QueueNameQuoted/binary>>. diff --git a/deps/rabbitmq_amqp_client/src/rabbitmq_amqp_client.erl b/deps/rabbitmq_amqp_client/src/rabbitmq_amqp_client.erl new file mode 100644 index 000000000000..fc5da6c7b4e4 --- /dev/null +++ b/deps/rabbitmq_amqp_client/src/rabbitmq_amqp_client.erl @@ -0,0 +1,478 @@ +%% This Source Code Form is subject to the terms of the Mozilla Public +%% License, v. 2.0. If a copy of the MPL was not distributed with this +%% file, You can obtain one at https://mozilla.org/MPL/2.0/. +%% +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. + +-module(rabbitmq_amqp_client). + +-feature(maybe_expr, enable). + +-include("rabbitmq_amqp_client.hrl"). +-include_lib("amqp10_common/include/amqp10_framing.hrl"). + +-export[ + %% link pair operations + attach_management_link_pair_sync/2, + detach_management_link_pair_sync/1, + + %% queue operations + get_queue/2, + declare_queue/3, + bind_queue/5, + unbind_queue/5, + purge_queue/2, + delete_queue/2, + + %% exchange operations + declare_exchange/3, + bind_exchange/5, + unbind_exchange/5, + delete_exchange/2 + ]. + +-define(TIMEOUT, 20_000). +-define(MANAGEMENT_NODE_ADDRESS, <<"/management">>). + +-type arguments() :: #{binary() => {atom(), term()}}. + +-type queue_info() :: #{name := binary(), + vhost := binary(), + durable := boolean(), + exclusive := boolean(), + auto_delete := boolean(), + arguments := arguments(), + type := binary(), + message_count := non_neg_integer(), + consumer_count := non_neg_integer(), + replicas => [binary()], + leader => binary()}. + +-type queue_properties() :: #{name := binary(), + durable => boolean(), + exclusive => boolean(), + auto_delete => boolean(), + arguments => arguments()}. + +-type exchange_properties() :: #{name := binary(), + type => binary(), + durable => boolean(), + auto_delete => boolean(), + internal => boolean(), + arguments => arguments()}. + +-type amqp10_prim() :: amqp10_binary_generator:amqp10_prim(). + +-spec attach_management_link_pair_sync(pid(), binary()) -> + {ok, link_pair()} | {error, term()}. +attach_management_link_pair_sync(Session, Name) -> + Terminus = #{address => ?MANAGEMENT_NODE_ADDRESS, + durable => none}, + OutgoingAttachArgs = #{name => Name, + role => {sender, Terminus}, + snd_settle_mode => settled, + rcv_settle_mode => first, + properties => #{<<"paired">> => true}}, + IncomingAttachArgs = OutgoingAttachArgs#{role := {receiver, Terminus, self()}, + filter => #{}}, + maybe + {ok, OutgoingRef} ?= attach(Session, OutgoingAttachArgs), + {ok, IncomingRef} ?= attach(Session, IncomingAttachArgs), + ok ?= await_attached(OutgoingRef), + ok ?= await_attached(IncomingRef), + {ok, #link_pair{session = Session, + outgoing_link = OutgoingRef, + incoming_link = IncomingRef}} + end. + +-spec attach(pid(), amqp10_client:attach_args()) -> + {ok, amqp10_client:link_ref()} | {error, term()}. +attach(Session, AttachArgs) -> + try amqp10_client:attach_link(Session, AttachArgs) + catch exit:Reason -> + {error, Reason} + end. + +-spec await_attached(amqp10_client:link_ref()) -> + ok | {error, term()}. +await_attached(Ref) -> + receive + {amqp10_event, {link, Ref, attached}} -> + ok; + {amqp10_event, {link, Ref, {detached, Err}}} -> + {error, Err} + after ?TIMEOUT -> + {error, timeout} + end. + +-spec detach_management_link_pair_sync(link_pair()) -> + ok | {error, term()}. +detach_management_link_pair_sync( + #link_pair{outgoing_link = OutgoingLink, + incoming_link = IncomingLink}) -> + maybe + ok ?= detach(OutgoingLink), + ok ?= detach(IncomingLink), + ok ?= await_detached(OutgoingLink), + await_detached(IncomingLink) + end. + +-spec detach(amqp10_client:link_ref()) -> + ok | {error, term()}. +detach(Ref) -> + try amqp10_client:detach_link(Ref) + catch exit:Reason -> + {error, Reason} + end. + +-spec await_detached(amqp10_client:link_ref()) -> + ok | {error, term()}. +await_detached(Ref) -> + receive + {amqp10_event, {link, Ref, {detached, normal}}} -> + ok; + {amqp10_event, {link, Ref, {detached, Err}}} -> + {error, Err} + after ?TIMEOUT -> + {error, timeout} + end. + +-spec get_queue(link_pair(), binary()) -> + {ok, queue_info()} | {error, term()}. +get_queue(LinkPair, QueueName) -> + QNameQuoted = uri_string:quote(QueueName), + Props = #{subject => <<"GET">>, + to => <<"/queues/", QNameQuoted/binary>>}, + case request(LinkPair, Props, null) of + {ok, Resp} -> + case is_success(Resp) of + true -> get_queue_info(Resp); + false -> {error, Resp} + end; + Err -> + Err + end. + +-spec declare_queue(link_pair(), binary(), queue_properties()) -> + {ok, queue_info()} | {error, term()}. +declare_queue(LinkPair, QueueName, QueueProperties) -> + Body0 = maps:fold( + fun(durable, V, L) when is_boolean(V) -> + [{{utf8, <<"durable">>}, {boolean, V}} | L]; + (exclusive, V, L) when is_boolean(V) -> + [{{utf8, <<"exclusive">>}, {boolean, V}} | L]; + (auto_delete, V, L) when is_boolean(V) -> + [{{utf8, <<"auto_delete">>}, {boolean, V}} | L]; + (arguments, V, L) -> + Args = encode_arguments(V), + [{{utf8, <<"arguments">>}, Args} | L] + end, [], QueueProperties), + Body = {map, Body0}, + QNameQuoted = uri_string:quote(QueueName), + Props = #{subject => <<"PUT">>, + to => <<"/queues/", QNameQuoted/binary>>}, + + case request(LinkPair, Props, Body) of + {ok, Resp} -> + case is_success(Resp) of + true -> get_queue_info(Resp); + false -> {error, Resp} + end; + Err -> + Err + end. + +-spec bind_queue(link_pair(), binary(), binary(), binary(), #{binary() => amqp10_prim()}) -> + ok | {error, term()}. +bind_queue(LinkPair, QueueName, ExchangeName, BindingKey, BindingArguments) -> + bind(<<"destination_queue">>, LinkPair, QueueName, ExchangeName, BindingKey, BindingArguments). + +-spec bind_exchange(link_pair(), binary(), binary(), binary(), #{binary() => amqp10_prim()}) -> + ok | {error, term()}. +bind_exchange(LinkPair, Destination, Source, BindingKey, BindingArguments) -> + bind(<<"destination_exchange">>, LinkPair, Destination, Source, BindingKey, BindingArguments). + +-spec bind(binary(), link_pair(), binary(), binary(), binary(), #{binary() => amqp10_prim()}) -> + ok | {error, term()}. +bind(DestinationKind, LinkPair, Destination, Source, BindingKey, BindingArguments) -> + Args = encode_arguments(BindingArguments), + Body = {map, [ + {{utf8, <<"source">>}, {utf8, Source}}, + {{utf8, DestinationKind}, {utf8, Destination}}, + {{utf8, <<"binding_key">>}, {utf8, BindingKey}}, + {{utf8, <<"arguments">>}, Args} + ]}, + Props = #{subject => <<"POST">>, + to => <<"/bindings">>}, + + case request(LinkPair, Props, Body) of + {ok, Resp} -> + case is_success(Resp) of + true -> ok; + false -> {error, Resp} + end; + Err -> + Err + end. + +-spec unbind_queue(link_pair(), binary(), binary(), binary(), #{binary() => amqp10_prim()}) -> + ok | {error, term()}. +unbind_queue(LinkPair, QueueName, ExchangeName, BindingKey, BindingArguments) -> + unbind($q, LinkPair, QueueName, ExchangeName, BindingKey, BindingArguments). + +-spec unbind_exchange(link_pair(), binary(), binary(), binary(), #{binary() => amqp10_prim()}) -> + ok | {error, term()}. +unbind_exchange(LinkPair, DestinationExchange, SourceExchange, BindingKey, BindingArguments) -> + unbind($e, LinkPair, DestinationExchange, SourceExchange, BindingKey, BindingArguments). + +-spec unbind(byte(), link_pair(), binary(), binary(), binary(), #{binary() => amqp10_prim()}) -> + ok | {error, term()}. +unbind(DestinationChar, LinkPair, Destination, Source, BindingKey, BindingArguments) + when map_size(BindingArguments) =:= 0 -> + SrcQ = uri_string:quote(Source), + DstQ = uri_string:quote(Destination), + KeyQ = uri_string:quote(BindingKey), + Uri = <<"/bindings/src=", SrcQ/binary, + ";dst", DestinationChar, $=, DstQ/binary, + ";key=", KeyQ/binary, + ";args=">>, + delete_binding(LinkPair, Uri); +unbind(DestinationChar, LinkPair, Destination, Source, BindingKey, BindingArguments) -> + Path = <<"/bindings">>, + Query = uri_string:compose_query( + [{<<"src">>, Source}, + {<<"dst", DestinationChar>>, Destination}, + {<<"key">>, BindingKey}]), + Uri0 = uri_string:recompose(#{path => Path, + query => Query}), + Props = #{subject => <<"GET">>, + to => Uri0}, + + case request(LinkPair, Props, null) of + {ok, Resp} -> + case is_success(Resp) of + true -> + #'v1_0.amqp_value'{content = {list, Bindings}} = amqp10_msg:body(Resp), + case search_binding_uri(BindingArguments, Bindings) of + {ok, Uri} -> + delete_binding(LinkPair, Uri); + not_found -> + ok + end; + false -> + {error, Resp} + end; + Err -> + Err + end. + +search_binding_uri(_, []) -> + not_found; +search_binding_uri(BindingArguments, [{map, Binding} | Bindings]) -> + case maps:from_list(Binding) of + #{{utf8, <<"arguments">>} := {map, Args0}, + {utf8, <<"location">>} := {utf8, Uri}} -> + Args = lists:map(fun({{utf8, Key}, TypeVal}) -> + {Key, TypeVal} + end, Args0), + case maps:from_list(Args) =:= BindingArguments of + true -> + {ok, Uri}; + false -> + search_binding_uri(BindingArguments, Bindings) + end; + _ -> + search_binding_uri(BindingArguments, Bindings) + end. + +-spec delete_binding(link_pair(), binary()) -> + ok | {error, term()}. +delete_binding(LinkPair, BindingUri) -> + Props = #{subject => <<"DELETE">>, + to => BindingUri}, + case request(LinkPair, Props, null) of + {ok, Resp} -> + case is_success(Resp) of + true -> ok; + false -> {error, Resp} + end; + Err -> + Err + end. + +-spec delete_queue(link_pair(), binary()) -> + {ok, map()} | {error, term()}. +delete_queue(LinkPair, QueueName) -> + purge_or_delete_queue(LinkPair, QueueName, <<>>). + +-spec purge_queue(link_pair(), binary()) -> + {ok, map()} | {error, term()}. +purge_queue(LinkPair, QueueName) -> + purge_or_delete_queue(LinkPair, QueueName, <<"/messages">>). + +-spec purge_or_delete_queue(link_pair(), binary(), binary()) -> + {ok, map()} | {error, term()}. +purge_or_delete_queue(LinkPair, QueueName, PathSuffix) -> + QNameQuoted = uri_string:quote(QueueName), + HttpRequestTarget = <<"/queues/", QNameQuoted/binary, PathSuffix/binary>>, + Props = #{subject => <<"DELETE">>, + to => HttpRequestTarget}, + case request(LinkPair, Props, null) of + {ok, Resp} -> + case is_success(Resp) of + true -> + #'v1_0.amqp_value'{content = {map, KVList}} = amqp10_msg:body(Resp), + #{{utf8, <<"message_count">>} := {ulong, Count}} = maps:from_list(KVList), + {ok, #{message_count => Count}}; + false -> + {error, Resp} + end; + Err -> + Err + end. + +-spec declare_exchange(link_pair(), binary(), exchange_properties()) -> + ok | {error, term()}. +declare_exchange(LinkPair, ExchangeName, ExchangeProperties) -> + Body0 = maps:fold( + fun(type, V, L) when is_binary(V) -> + [{{utf8, <<"type">>}, {utf8, V}} | L]; + (durable, V, L) when is_boolean(V) -> + [{{utf8, <<"durable">>}, {boolean, V}} | L]; + (auto_delete, V, L) when is_boolean(V) -> + [{{utf8, <<"auto_delete">>}, {boolean, V}} | L]; + (internal, V, L) when is_boolean(V) -> + [{{utf8, <<"internal">>}, {boolean, V}} | L]; + (arguments, V, L) -> + Args = encode_arguments(V), + [{{utf8, <<"arguments">>}, Args} | L] + end, [], ExchangeProperties), + Body = {map, Body0}, + + XNameQuoted = uri_string:quote(ExchangeName), + Props = #{subject => <<"PUT">>, + to => <<"/exchanges/", XNameQuoted/binary>>}, + + case request(LinkPair, Props, Body) of + {ok, Resp} -> + case is_success(Resp) of + true -> ok; + false -> {error, Resp} + end; + Err -> + Err + end. + +-spec delete_exchange(link_pair(), binary()) -> + ok | {error, term()}. +delete_exchange(LinkPair, ExchangeName) -> + XNameQuoted = uri_string:quote(ExchangeName), + Props = #{subject => <<"DELETE">>, + to => <<"/exchanges/", XNameQuoted/binary>>}, + case request(LinkPair, Props, null) of + {ok, Resp} -> + case is_success(Resp) of + true -> ok; + false -> {error, Resp} + end; + Err -> + Err + end. + +-spec request(link_pair(), amqp10_msg:amqp10_properties(), amqp10_prim()) -> + {ok, Response :: amqp10_msg:amqp10_msg()} | {error, term()}. +request(#link_pair{session = Session, + outgoing_link = OutgoingLink, + incoming_link = IncomingLink}, Properties, Body) -> + MessageId = message_id(), + Properties1 = Properties#{message_id => {binary, MessageId}, + reply_to => <<"$me">>}, + Request = amqp10_msg:new(<<>>, #'v1_0.amqp_value'{content = Body}, true), + Request1 = amqp10_msg:set_properties(Properties1, Request), + ok = amqp10_client:flow_link_credit(IncomingLink, 1, never), + case amqp10_client:send_msg(OutgoingLink, Request1) of + ok -> + receive {amqp10_msg, IncomingLink, Response} -> + #{correlation_id := MessageId} = amqp10_msg:properties(Response), + {ok, Response}; + {amqp10_event, {session, Session, {ended, Reason}}} -> + {error, {session_ended, Reason}} + after ?TIMEOUT -> + {error, timeout} + end; + Err -> + Err + end. + +-spec get_queue_info(amqp10_msg:amqp10_msg()) -> + {ok, queue_info()}. +get_queue_info(Response) -> + #'v1_0.amqp_value'{content = {map, KVList}} = amqp10_msg:body(Response), + RespMap = maps:from_list(KVList), + + RequiredQInfo = [<<"name">>, + <<"vhost">>, + <<"durable">>, + <<"exclusive">>, + <<"auto_delete">>, + <<"type">>, + <<"message_count">>, + <<"consumer_count">>], + Map0 = lists:foldl(fun(Key, M) -> + {ok, TypeVal} = maps:find({utf8, Key}, RespMap), + M#{binary_to_atom(Key) => amqp10_client_types:unpack(TypeVal)} + end, #{}, RequiredQInfo), + + {ok, {map, ArgsKVList}} = maps:find({utf8, <<"arguments">>}, RespMap), + ArgsMap = lists:foldl(fun({{utf8, K}, TypeVal}, M) -> + M#{K => TypeVal} + end, #{}, ArgsKVList), + Map1 = Map0#{arguments => ArgsMap}, + + Map2 = case maps:find({utf8, <<"replicas">>}, RespMap) of + {ok, {array, utf8, Arr}} -> + Replicas = lists:map(fun({utf8, Replica}) -> + Replica + end, Arr), + Map1#{replicas => Replicas}; + error -> + Map1 + end, + + Map = case maps:find({utf8, <<"leader">>}, RespMap) of + {ok, {utf8, Leader}} -> + Map2#{leader => Leader}; + error -> + Map2 + end, + {ok, Map}. + +-spec encode_arguments(arguments()) -> + {map, list(tuple())}. +encode_arguments(Arguments) -> + KVList = maps:fold( + fun(Key, TaggedVal, L) + when is_binary(Key) -> + [{{utf8, Key}, TaggedVal} | L] + end, [], Arguments), + {map, KVList}. + +%% "The message producer is usually responsible for setting the message-id in +%% such a way that it is assured to be globally unique." [3.2.4] +-spec message_id() -> binary(). +message_id() -> + rand:bytes(8). + +%% All successful 2xx and redirection 3xx status codes are interpreted as success. +%% We don't hard code any specific status code for now as the returned status +%% codes from RabbitMQ are subject to change. +-spec is_success(amqp10_msg:amqp10_msg()) -> boolean(). +is_success(Response) -> + case amqp10_msg:properties(Response) of + #{subject := <>} + when C =:= $2 orelse + C =:= $3 -> + true; + _ -> + false + end. diff --git a/deps/rabbitmq_amqp_client/test/management_SUITE.erl b/deps/rabbitmq_amqp_client/test/management_SUITE.erl new file mode 100644 index 000000000000..0e49a0d786e8 --- /dev/null +++ b/deps/rabbitmq_amqp_client/test/management_SUITE.erl @@ -0,0 +1,1103 @@ +%% This Source Code Form is subject to the terms of the Mozilla Public +%% License, v. 2.0. If a copy of the MPL was not distributed with this +%% file, You can obtain one at https://mozilla.org/MPL/2.0/. +%% +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. +%% + +-module(management_SUITE). + +-include_lib("common_test/include/ct.hrl"). +-include_lib("eunit/include/eunit.hrl"). + +-include_lib("rabbitmq_amqp_client.hrl"). +-include_lib("amqp10_common/include/amqp10_framing.hrl"). +-include_lib("rabbit_common/include/rabbit.hrl"). + +-compile([export_all, + nowarn_export_all]). + +-import(rabbit_ct_helpers, + [eventually/1, + eventually/3 + ]). + +-import(rabbit_ct_broker_helpers, + [rpc/4, + rpc/5, + get_node_config/3 + ]). + +-define(DEFAULT_EXCHANGE, <<>>). + +suite() -> + [{timetrap, {minutes, 3}}]. + + +all() -> + [{group, cluster_size_1}, + {group, cluster_size_3} + ]. + +groups() -> + [ + {cluster_size_1, [shuffle], + [all_management_operations, + queue_binding_args, + queue_defaults, + queue_properties, + exchange_defaults, + bad_uri, + bad_queue_property, + bad_exchange_property, + bad_exchange_type, + get_queue_not_found, + declare_queue_default_queue_type, + declare_queue_empty_name, + declare_queue_line_feed, + declare_queue_amq_prefix, + declare_queue_inequivalent_fields, + declare_queue_inequivalent_exclusive, + declare_queue_invalid_field, + declare_queue_invalid_arg, + declare_default_exchange, + declare_exchange_amq_prefix, + declare_exchange_line_feed, + declare_exchange_inequivalent_fields, + delete_default_exchange, + delete_exchange_amq_prefix, + delete_exchange_carriage_return, + bind_source_default_exchange, + bind_destination_default_exchange, + bind_source_line_feed, + bind_destination_line_feed, + bind_missing_queue, + unbind_bad_binding_path_segment, + exclusive_queue, + purge_stream, + pipeline, + multiple_link_pairs, + link_attach_order, + drain, + session_flow_control + ]}, + {cluster_size_3, [shuffle], + [classic_queue_stopped, + queue_topology + ]} + ]. + +init_per_suite(Config) -> + {ok, _} = application:ensure_all_started(amqp10_client), + rabbit_ct_helpers:log_environment(), + Config. + +end_per_suite(Config) -> + Config. + +init_per_group(Group, Config) -> + Nodes = case Group of + cluster_size_1 -> 1; + cluster_size_3 -> 3 + end, + Suffix = rabbit_ct_helpers:testcase_absname(Config, "", "-"), + Config1 = rabbit_ct_helpers:set_config( + Config, [{rmq_nodes_count, Nodes}, + {rmq_nodename_suffix, Suffix}]), + rabbit_ct_helpers:run_setup_steps( + Config1, + rabbit_ct_broker_helpers:setup_steps()). + +end_per_group(_, Config) -> + rabbit_ct_helpers:run_teardown_steps( + Config, + rabbit_ct_broker_helpers:teardown_steps()). + +init_per_testcase(Testcase, Config) -> + rabbit_ct_helpers:testcase_started(Config, Testcase). + +end_per_testcase(Testcase, Config) -> + %% Assert that every testcase cleaned up. + eventually(?_assertEqual([], rpc(Config, rabbit_amqqueue, list, []))), + rabbit_ct_helpers:testcase_finished(Config, Testcase). + +all_management_operations(Config) -> + NodeName = get_node_config(Config, 0, nodename), + Node = atom_to_binary(NodeName), + Init = {_, LinkPair = #link_pair{session = Session}} = init(Config), + + QName = <<"my 🐇"/utf8>>, + QProps = #{durable => true, + exclusive => false, + auto_delete => false, + arguments => #{<<"x-queue-type">> => {utf8, <<"quorum">>}}}, + {ok, QInfo} = rabbitmq_amqp_client:declare_queue(LinkPair, QName, QProps), + ?assertEqual( + #{name => QName, + vhost => <<"/">>, + durable => true, + exclusive => false, + auto_delete => false, + arguments => #{<<"x-queue-type">> => {utf8, <<"quorum">>}}, + type => <<"quorum">>, + message_count => 0, + consumer_count => 0, + leader => Node, + replicas => [Node]}, + QInfo), + + %% This operation should be idempotent. + %% Also, exactly the same queue infos should be returned. + ?assertEqual({ok, QInfo}, + rabbitmq_amqp_client:declare_queue(LinkPair, QName, QProps)), + + %% get_queue/2 should also return the exact the same queue infos. + ?assertEqual({ok, QInfo}, + rabbitmq_amqp_client:get_queue(LinkPair, QName)), + + [Q] = rpc(Config, rabbit_amqqueue, list, []), + ?assert(rpc(Config, amqqueue, is_durable, [Q])), + ?assertNot(rpc(Config, amqqueue, is_exclusive, [Q])), + ?assertNot(rpc(Config, amqqueue, is_auto_delete, [Q])), + ?assertEqual(rabbit_quorum_queue, rpc(Config, amqqueue, get_type, [Q])), + + TargetAddr1 = <<"/amq/queue/", QName/binary>>, + {ok, Sender1} = amqp10_client:attach_sender_link(Session, <<"sender 1">>, TargetAddr1), + ok = wait_for_credit(Sender1), + flush(credited), + DTag1 = <<"tag 1">>, + Msg1 = amqp10_msg:new(DTag1, <<"m1">>, false), + ok = amqp10_client:send_msg(Sender1, Msg1), + ok = wait_for_accepted(DTag1), + + RoutingKey1 = BindingKey1 = <<"🗝️ 1"/utf8>>, + SourceExchange = <<"amq.direct">>, + ?assertEqual(ok, rabbitmq_amqp_client:bind_queue(LinkPair, QName, SourceExchange, BindingKey1, #{})), + %% This operation should be idempotent. + ?assertEqual(ok, rabbitmq_amqp_client:bind_queue(LinkPair, QName, SourceExchange, BindingKey1, #{})), + TargetAddr2 = <<"/exchange/", SourceExchange/binary, "/", RoutingKey1/binary>>, + + {ok, Sender2} = amqp10_client:attach_sender_link(Session, <<"sender 2">>, TargetAddr2), + ok = wait_for_credit(Sender2), + flush(credited), + DTag2 = <<"tag 2">>, + Msg2 = amqp10_msg:new(DTag2, <<"m2">>, false), + ok = amqp10_client:send_msg(Sender2, Msg2), + ok = wait_for_accepted(DTag2), + + ?assertEqual(ok, rabbitmq_amqp_client:unbind_queue(LinkPair, QName, SourceExchange, BindingKey1, #{})), + ?assertEqual(ok, rabbitmq_amqp_client:unbind_queue(LinkPair, QName, SourceExchange, BindingKey1, #{})), + DTag3 = <<"tag 3">>, + ok = amqp10_client:send_msg(Sender2, amqp10_msg:new(DTag3, <<"not routed">>, false)), + ok = wait_for_settlement(DTag3, released), + + XName = <<"my fanout exchange 🥳"/utf8>>, + XProps = #{type => <<"fanout">>, + durable => false, + auto_delete => true, + internal => false, + arguments => #{<<"x-📥"/utf8>> => {utf8, <<"📮"/utf8>>}}}, + ?assertEqual(ok, rabbitmq_amqp_client:declare_exchange(LinkPair, XName, XProps)), + ?assertEqual(ok, rabbitmq_amqp_client:declare_exchange(LinkPair, XName, XProps)), + + {ok, Exchange} = rpc(Config, rabbit_exchange, lookup, [rabbit_misc:r(<<"/">>, exchange, XName)]), + ?assertMatch(#exchange{type = fanout, + durable = false, + auto_delete = true, + internal = false, + arguments = [{<<"x-📥"/utf8>>, longstr, <<"📮"/utf8>>}]}, + Exchange), + + TargetAddr3 = <<"/exchange/", XName/binary>>, + SourceExchange = <<"amq.direct">>, + ?assertEqual(ok, rabbitmq_amqp_client:bind_queue(LinkPair, QName, XName, <<"ignored">>, #{})), + ?assertEqual(ok, rabbitmq_amqp_client:bind_queue(LinkPair, QName, XName, <<"ignored">>, #{})), + + {ok, Sender3} = amqp10_client:attach_sender_link(Session, <<"sender 3">>, TargetAddr3), + ok = wait_for_credit(Sender3), + flush(credited), + DTag4 = <<"tag 4">>, + Msg3 = amqp10_msg:new(DTag4, <<"m3">>, false), + ok = amqp10_client:send_msg(Sender3, Msg3), + ok = wait_for_accepted(DTag4), + + RoutingKey2 = BindingKey2 = <<"key 2">>, + BindingArgs = #{<<" 😬 "/utf8>> => {utf8, <<" 😬 "/utf8>>}}, + ?assertEqual(ok, rabbitmq_amqp_client:bind_exchange(LinkPair, XName, SourceExchange, BindingKey2, BindingArgs)), + ?assertEqual(ok, rabbitmq_amqp_client:bind_exchange(LinkPair, XName, SourceExchange, BindingKey2, BindingArgs)), + TargetAddr4 = <<"/exchange/", SourceExchange/binary, "/", RoutingKey2/binary>>, + + {ok, Sender4} = amqp10_client:attach_sender_link(Session, <<"sender 4">>, TargetAddr4), + ok = wait_for_credit(Sender4), + flush(credited), + DTag5 = <<"tag 5">>, + Msg4 = amqp10_msg:new(DTag5, <<"m4">>, false), + ok = amqp10_client:send_msg(Sender4, Msg4), + ok = wait_for_accepted(DTag5), + + ?assertEqual(ok, rabbitmq_amqp_client:unbind_exchange(LinkPair, XName, SourceExchange, BindingKey2, BindingArgs)), + ?assertEqual(ok, rabbitmq_amqp_client:unbind_exchange(LinkPair, XName, SourceExchange, BindingKey2, BindingArgs)), + DTag6 = <<"tag 6">>, + ok = amqp10_client:send_msg(Sender4, amqp10_msg:new(DTag6, <<"not routed">>, false)), + ok = wait_for_settlement(DTag6, released), + + ?assertEqual(ok, rabbitmq_amqp_client:delete_exchange(LinkPair, XName)), + ?assertEqual(ok, rabbitmq_amqp_client:delete_exchange(LinkPair, XName)), + %% When we publish the next message, we expect: + %% 1. that the message is released because the exchange doesn't exist anymore, and + DTag7 = <<"tag 7">>, + ok = amqp10_client:send_msg(Sender3, amqp10_msg:new(DTag7, <<"not routed">>, false)), + ok = wait_for_settlement(DTag7, released), + %% 2. that the server closes the link, i.e. sends us a DETACH frame. + receive {amqp10_event, + {link, Sender3, + {detached, #'v1_0.error'{condition = ?V_1_0_AMQP_ERROR_NOT_FOUND}}}} -> ok + after 5000 -> ct:fail({missing_event, ?LINE}) + end, + + ?assertEqual({ok, #{message_count => 4}}, + rabbitmq_amqp_client:purge_queue(LinkPair, QName)), + + ?assertEqual({ok, #{message_count => 0}}, + rabbitmq_amqp_client:delete_queue(LinkPair, QName)), + ?assertEqual({ok, #{message_count => 0}}, + rabbitmq_amqp_client:delete_queue(LinkPair, QName)), + + ok = cleanup(Init). + +queue_defaults(Config) -> + Init = {_, LinkPair} = init(Config), + QName = atom_to_binary(?FUNCTION_NAME), + {ok, _} = rabbitmq_amqp_client:declare_queue(LinkPair, QName, #{}), + [Q] = rpc(Config, rabbit_amqqueue, list, []), + ?assert(rpc(Config, amqqueue, is_durable, [Q])), + ?assertNot(rpc(Config, amqqueue, is_exclusive, [Q])), + ?assertNot(rpc(Config, amqqueue, is_auto_delete, [Q])), + ?assertEqual([], rpc(Config, amqqueue, get_arguments, [Q])), + + {ok, _} = rabbitmq_amqp_client:delete_queue(LinkPair, QName), + ok = cleanup(Init). + +queue_properties(Config) -> + Init = {_, LinkPair} = init(Config), + QName = atom_to_binary(?FUNCTION_NAME), + {ok, _} = rabbitmq_amqp_client:declare_queue(LinkPair, QName, #{durable => false, + exclusive => true, + auto_delete => true}), + [Q] = rpc(Config, rabbit_amqqueue, list, []), + ?assertNot(rpc(Config, amqqueue, is_durable, [Q])), + ?assert(rpc(Config, amqqueue, is_exclusive, [Q])), + ?assert(rpc(Config, amqqueue, is_auto_delete, [Q])), + + {ok, _} = rabbitmq_amqp_client:delete_queue(LinkPair, QName), + ok = cleanup(Init). + +exchange_defaults(Config) -> + Init = {_, LinkPair} = init(Config), + XName = atom_to_binary(?FUNCTION_NAME), + ok = rabbitmq_amqp_client:declare_exchange(LinkPair, XName, #{}), + {ok, Exchange} = rpc(Config, rabbit_exchange, lookup, [rabbit_misc:r(<<"/">>, exchange, XName)]), + ?assertMatch(#exchange{type = direct, + durable = true, + auto_delete = false, + internal = false, + arguments = []}, + Exchange), + + ok = rabbitmq_amqp_client:delete_exchange(LinkPair, XName), + ok = cleanup(Init). + +queue_binding_args(Config) -> + Init = {_, LinkPair = #link_pair{session = Session}} = init(Config), + QName = <<"my queue ~!@#$%^&*()_+🙈`-=[]\;',./"/utf8>>, + Q = #{durable => false, + exclusive => true, + auto_delete => false, + arguments => #{<<"x-queue-type">> => {utf8, <<"classic">>}}}, + {ok, #{}} = rabbitmq_amqp_client:declare_queue(LinkPair, QName, Q), + + Exchange = <<"amq.headers">>, + BindingKey = <<>>, + BindingArgs = #{<<"key 1">> => {utf8, <<"👏"/utf8>>}, + <<"key 2">> => {uint, 3}, + <<"key 3">> => true, + <<"x-match">> => {utf8, <<"all">>}}, + ?assertEqual(ok, rabbitmq_amqp_client:bind_queue(LinkPair, QName, Exchange, BindingKey, BindingArgs)), + + TargetAddr = <<"/exchange/amq.headers">>, + {ok, Sender} = amqp10_client:attach_sender_link(Session, <<"sender">>, TargetAddr), + ok = wait_for_credit(Sender), + flush(credited), + DTag1 = <<"tag 1">>, + Msg1 = amqp10_msg:new(DTag1, <<"m1">>, false), + AppProps = #{<<"key 1">> => <<"👏"/utf8>>, + <<"key 2">> => 3, + <<"key 3">> => true}, + ok = amqp10_client:send_msg(Sender, amqp10_msg:set_application_properties(AppProps, Msg1)), + ok = wait_for_accepted(DTag1), + + DTag2 = <<"tag 2">>, + Msg2 = amqp10_msg:new(DTag2, <<"m2">>, false), + ok = amqp10_client:send_msg(Sender, + amqp10_msg:set_application_properties( + maps:remove(<<"key 2">>, AppProps), + Msg2)), + ok = wait_for_settlement(DTag2, released), + + ?assertEqual(ok, rabbitmq_amqp_client:unbind_queue(LinkPair, QName, Exchange, BindingKey, BindingArgs)), + + DTag3 = <<"tag 3">>, + Msg3 = amqp10_msg:new(DTag3, <<"m3">>, false), + ok = amqp10_client:send_msg(Sender, amqp10_msg:set_application_properties(AppProps, Msg3)), + ok = wait_for_settlement(DTag3, released), + + ?assertEqual({ok, #{message_count => 1}}, + rabbitmq_amqp_client:delete_queue(LinkPair, QName)), + + ok = amqp10_client:detach_link(Sender), + ok = cleanup(Init). + +bad_uri(Config) -> + Init = {_, #link_pair{outgoing_link = OutgoingLink, + incoming_link = IncomingLink}} = init(Config), + BadUri = <<"👎"/utf8>>, + Correlation = <<1, 2, 3>>, + Properties = #{subject => <<"GET">>, + to => BadUri, + message_id => {binary, Correlation}, + reply_to => <<"$me">>}, + Body = null, + Request0 = amqp10_msg:new(<<>>, #'v1_0.amqp_value'{content = Body}, true), + Request = amqp10_msg:set_properties(Properties, Request0), + ok = amqp10_client:flow_link_credit(IncomingLink, 1, never), + ok = amqp10_client:send_msg(OutgoingLink, Request), + + receive {amqp10_msg, IncomingLink, Response} -> + ?assertEqual( + #{subject => <<"400">>, + correlation_id => Correlation}, + amqp10_msg:properties(Response)), + ?assertEqual( + #'v1_0.amqp_value'{content = {utf8, <<"failed to normalize URI '👎': invalid_uri \"👎\""/utf8>>}}, + amqp10_msg:body(Response)) + after 5000 -> ct:fail({missing_message, ?LINE}) + end, + ok = cleanup(Init). + +bad_queue_property(Config) -> + bad_property(<<"queue">>, Config). + +bad_exchange_property(Config) -> + bad_property(<<"exchange">>, Config). + +bad_property(Kind, Config) -> + Init = {_, #link_pair{outgoing_link = OutgoingLink, + incoming_link = IncomingLink}} = init(Config), + Correlation = <<1>>, + Properties = #{subject => <<"PUT">>, + to => <<$/, Kind/binary, "s/my-object">>, + message_id => {binary, Correlation}, + reply_to => <<"$me">>}, + Body = {map, [{{utf8, <<"unknown">>}, {utf8, <<"bla">>}}]}, + Request0 = amqp10_msg:new(<<>>, #'v1_0.amqp_value'{content = Body}, true), + Request = amqp10_msg:set_properties(Properties, Request0), + ok = amqp10_client:flow_link_credit(IncomingLink, 1, never), + ok = amqp10_client:send_msg(OutgoingLink, Request), + + receive {amqp10_msg, IncomingLink, Response} -> + ?assertEqual( + #{subject => <<"400">>, + correlation_id => Correlation}, + amqp10_msg:properties(Response)), + ?assertEqual( + #'v1_0.amqp_value'{ + content = {utf8, <<"bad ", Kind/binary, " property {{utf8,<<\"unknown\">>},{utf8,<<\"bla\">>}}">>}}, + amqp10_msg:body(Response)) + after 5000 -> ct:fail({missing_message, ?LINE}) + end, + ok = cleanup(Init). + +bad_exchange_type(Config) -> + Init = {_, LinkPair} = init(Config), + UnknownXType = <<"🤷"/utf8>>, + {error, Resp} = rabbitmq_amqp_client:declare_exchange(LinkPair, <<"e1">>, #{type => UnknownXType}), + ?assertMatch(#{subject := <<"400">>}, amqp10_msg:properties(Resp)), + ?assertEqual(#'v1_0.amqp_value'{content = {utf8, <<"unknown exchange type '", UnknownXType/binary, "'">>}}, + amqp10_msg:body(Resp)), + ok = cleanup(Init). + +get_queue_not_found(Config) -> + Init = {_, LinkPair} = init(Config), + QName = <<"🤷"/utf8>>, + {error, Resp} = rabbitmq_amqp_client:get_queue(LinkPair, QName), + ?assertMatch(#{subject := <<"404">>}, amqp10_msg:properties(Resp)), + ?assertEqual(#'v1_0.amqp_value'{content = {utf8, <<"queue '", QName/binary, "' in vhost '/' not found">>}}, + amqp10_msg:body(Resp)), + ok = cleanup(Init). + +declare_queue_default_queue_type(Config) -> + Node = get_node_config(Config, 0, nodename), + Vhost = QName = atom_to_binary(?FUNCTION_NAME), + ok = erpc:call(Node, rabbit_vhost, add, + [Vhost, + #{default_queue_type => <<"quorum">>}, + <<"acting-user">>]), + ok = rabbit_ct_broker_helpers:set_full_permissions(Config, <<"guest">>, Vhost), + OpnConf = connection_config(Config, 0, Vhost), + {ok, Connection} = amqp10_client:open_connection(OpnConf), + {ok, Session} = amqp10_client:begin_session_sync(Connection), + {ok, LinkPair} = rabbitmq_amqp_client:attach_management_link_pair_sync(Session, <<"my link pair">>), + + ?assertMatch({ok, #{type := <<"quorum">>}}, + rabbitmq_amqp_client:declare_queue(LinkPair, QName, #{})), + + {ok, #{}} = rabbitmq_amqp_client:delete_queue(LinkPair, QName), + ok = rabbitmq_amqp_client:detach_management_link_pair_sync(LinkPair), + ok = amqp10_client:end_session(Session), + ok = amqp10_client:close_connection(Connection), + ok = rabbit_ct_broker_helpers:delete_vhost(Config, Vhost). + +declare_queue_empty_name(Config) -> + Init = {_, LinkPair} = init(Config), + QName = <<"">>, + {error, Resp} = rabbitmq_amqp_client:declare_queue(LinkPair, QName, #{}), + ?assertMatch(#{subject := <<"400">>}, amqp10_msg:properties(Resp)), + ?assertEqual(#'v1_0.amqp_value'{content = {utf8, <<"declare queue with empty name not allowed">>}}, + amqp10_msg:body(Resp)), + ok = cleanup(Init). + +declare_queue_line_feed(Config) -> + Init = {_, LinkPair} = init(Config), + QName = <<"🤠\n😱"/utf8>>, + {error, Resp} = rabbitmq_amqp_client:declare_queue(LinkPair, QName, #{}), + ?assertMatch(#{subject := <<"400">>}, amqp10_msg:properties(Resp)), + ?assertEqual(#'v1_0.amqp_value'{ + content = {utf8, <<"Bad name '", QName/binary, + "': line feed and carriage return characters not allowed">>}}, + amqp10_msg:body(Resp)), + ok = cleanup(Init). + +declare_queue_amq_prefix(Config) -> + Init = {_, LinkPair} = init(Config), + QName = <<"amq.🎇"/utf8>>, + {error, Resp} = rabbitmq_amqp_client:declare_queue(LinkPair, QName, #{}), + ?assertMatch(#{subject := <<"403">>}, amqp10_msg:properties(Resp)), + ?assertEqual(#'v1_0.amqp_value'{ + content = {utf8, <<"queue '", QName/binary, "' in vhost '/' " + "starts with reserved prefix 'amq.'">>}}, + amqp10_msg:body(Resp)), + ok = cleanup(Init). + +declare_queue_inequivalent_fields(Config) -> + Init = {_, LinkPair} = init(Config), + QName = <<"👌"/utf8>>, + {ok, #{auto_delete := false}} = rabbitmq_amqp_client:declare_queue(LinkPair, QName, #{auto_delete => false}), + {error, Resp} = rabbitmq_amqp_client:declare_queue(LinkPair, QName, #{auto_delete => true}), + ?assertMatch(#{subject := <<"409">>}, amqp10_msg:properties(Resp)), + ?assertEqual(#'v1_0.amqp_value'{ + content = {utf8, <<"inequivalent arg 'auto_delete' for queue '", QName/binary, + "' in vhost '/': received 'true' but current is 'false'">>}}, + amqp10_msg:body(Resp)), + {ok, #{}} = rabbitmq_amqp_client:delete_queue(LinkPair, QName), + ok = cleanup(Init). + +declare_queue_inequivalent_exclusive(Config) -> + Init = {_, LinkPair} = init(Config), + QName = <<"👌"/utf8>>, + {ok, #{exclusive := true}} = rabbitmq_amqp_client:declare_queue(LinkPair, QName, #{exclusive => true}), + {error, Resp} = rabbitmq_amqp_client:declare_queue(LinkPair, QName, #{exclusive => false}), + ?assertMatch(#{subject := <<"400">>}, amqp10_msg:properties(Resp)), + ?assertEqual( + #'v1_0.amqp_value'{ + content = {utf8, + <<"cannot obtain exclusive access to locked queue '", QName/binary, "' in vhost '/'. ", + "It could be originally declared on another connection or the exclusive property ", + "value does not match that of the original declaration.">>}}, + amqp10_msg:body(Resp)), + ok = cleanup(Init). + +declare_queue_invalid_field(Config) -> + Init = {_, LinkPair} = init(Config), + QName = <<"👌"/utf8>>, + QProps = #{auto_delete => true, + arguments => #{<<"x-queue-type">> => {utf8, <<"stream">>}}}, + {error, Resp} = rabbitmq_amqp_client:declare_queue(LinkPair, QName, QProps), + ?assertMatch(#{subject := <<"400">>}, amqp10_msg:properties(Resp)), + ?assertEqual( + #'v1_0.amqp_value'{ + content = {utf8, <<"invalid property 'auto-delete' for queue '", QName/binary, "' in vhost '/'">>}}, + amqp10_msg:body(Resp)), + ok = cleanup(Init). + +declare_queue_invalid_arg(Config) -> + Init = {_, LinkPair} = init(Config), + QName = <<"👌"/utf8>>, + QProps = #{arguments => #{<<"x-queue-type">> => {utf8, <<"stream">>}, + <<"x-dead-letter-exchange">> => {utf8, <<"dlx is invalid for stream">>}}}, + {error, Resp} = rabbitmq_amqp_client:declare_queue(LinkPair, QName, QProps), + ?assertMatch(#{subject := <<"409">>}, amqp10_msg:properties(Resp)), + ?assertEqual( + #'v1_0.amqp_value'{ + content = {utf8, <<"invalid arg 'x-dead-letter-exchange' for queue '", QName/binary, + "' in vhost '/' of queue type rabbit_stream_queue">>}}, + amqp10_msg:body(Resp)), + ok = cleanup(Init). + +declare_default_exchange(Config) -> + Init = {_, LinkPair} = init(Config), + {error, Resp} = rabbitmq_amqp_client:declare_exchange(LinkPair, ?DEFAULT_EXCHANGE, #{}), + ?assertMatch(#{subject := <<"403">>}, amqp10_msg:properties(Resp)), + ?assertEqual(#'v1_0.amqp_value'{content = {utf8, <<"operation not permitted on the default exchange">>}}, + amqp10_msg:body(Resp)), + ok = cleanup(Init). + +declare_exchange_amq_prefix(Config) -> + Init = {_, LinkPair} = init(Config), + XName = <<"amq.🎇"/utf8>>, + {error, Resp} = rabbitmq_amqp_client:declare_exchange(LinkPair, XName, #{}), + ?assertMatch(#{subject := <<"403">>}, amqp10_msg:properties(Resp)), + ?assertEqual(#'v1_0.amqp_value'{ + content = {utf8, <<"exchange '", XName/binary, "' in vhost '/' " + "starts with reserved prefix 'amq.'">>}}, + amqp10_msg:body(Resp)), + ok = cleanup(Init). + +declare_exchange_line_feed(Config) -> + Init = {_, LinkPair} = init(Config), + XName = <<"🤠\n😱"/utf8>>, + {error, Resp} = rabbitmq_amqp_client:declare_exchange(LinkPair, XName, #{}), + ?assertMatch(#{subject := <<"400">>}, amqp10_msg:properties(Resp)), + ?assertEqual(#'v1_0.amqp_value'{ + content = {utf8, <<"Bad name '", XName/binary, + "': line feed and carriage return characters not allowed">>}}, + amqp10_msg:body(Resp)), + ok = cleanup(Init). + +declare_exchange_inequivalent_fields(Config) -> + Init = {_, LinkPair} = init(Config), + XName = <<"👌"/utf8>>, + ok = rabbitmq_amqp_client:declare_exchange(LinkPair, XName, #{type => <<"direct">>}), + {error, Resp} = rabbitmq_amqp_client:declare_exchange(LinkPair, XName, #{type => <<"fanout">>}), + ?assertMatch(#{subject := <<"409">>}, amqp10_msg:properties(Resp)), + ?assertEqual(#'v1_0.amqp_value'{ + content = {utf8, <<"inequivalent arg 'type' for exchange '", XName/binary, + "' in vhost '/': received 'fanout' but current is 'direct'">>}}, + amqp10_msg:body(Resp)), + ok = cleanup(Init). + +classic_queue_stopped(Config) -> + Init2 = {_, LinkPair2} = init(Config, 2), + QName = <<"👌"/utf8>>, + {ok, #{durable := true, + type := <<"classic">>}} = rabbitmq_amqp_client:declare_queue(LinkPair2, QName, #{}), + ok = cleanup(Init2), + ok = rabbit_ct_broker_helpers:stop_node(Config, 2), + %% Classic queue is now stopped. + + Init0 = {_, LinkPair0} = init(Config), + {error, Resp0} = rabbitmq_amqp_client:declare_queue(LinkPair0, QName, #{}), + ?assertMatch(#{subject := <<"400">>}, amqp10_msg:properties(Resp0)), + ExpectedResponseBody = #'v1_0.amqp_value'{ + content = {utf8, <<"queue '", QName/binary, + "' in vhost '/' process is stopped by supervisor">>}}, + ?assertEqual(ExpectedResponseBody, + amqp10_msg:body(Resp0)), + + {error, Resp1} = rabbitmq_amqp_client:get_queue(LinkPair0, QName), + ?assertMatch(#{subject := <<"400">>}, amqp10_msg:properties(Resp1)), + ?assertEqual(ExpectedResponseBody, + amqp10_msg:body(Resp1)), + + ok = rabbit_ct_broker_helpers:start_node(Config, 2), + {ok, #{}} = rabbitmq_amqp_client:delete_queue(LinkPair0, QName), + ok = cleanup(Init0). + +delete_default_exchange(Config) -> + Init = {_, LinkPair} = init(Config), + {error, Resp} = rabbitmq_amqp_client:delete_exchange(LinkPair, ?DEFAULT_EXCHANGE), + ?assertMatch(#{subject := <<"403">>}, amqp10_msg:properties(Resp)), + ?assertEqual(#'v1_0.amqp_value'{content = {utf8, <<"operation not permitted on the default exchange">>}}, + amqp10_msg:body(Resp)), + ok = cleanup(Init). + +delete_exchange_amq_prefix(Config) -> + Init = {_, LinkPair} = init(Config), + XName = <<"amq.fanout">>, + {error, Resp} = rabbitmq_amqp_client:delete_exchange(LinkPair, XName), + ?assertMatch(#{subject := <<"403">>}, amqp10_msg:properties(Resp)), + ?assertEqual(#'v1_0.amqp_value'{ + content = {utf8, <<"exchange '", XName/binary, "' in vhost '/' " + "starts with reserved prefix 'amq.'">>}}, + amqp10_msg:body(Resp)), + ok = cleanup(Init). + +delete_exchange_carriage_return(Config) -> + Init = {_, LinkPair} = init(Config), + XName = <<"x\rx">>, + {error, Resp} = rabbitmq_amqp_client:delete_exchange(LinkPair, XName), + ?assertMatch(#{subject := <<"400">>}, amqp10_msg:properties(Resp)), + ?assertEqual(#'v1_0.amqp_value'{ + content = {utf8, <<"Bad name '", XName/binary, + "': line feed and carriage return characters not allowed">>}}, + amqp10_msg:body(Resp)), + ok = cleanup(Init). + +bind_source_default_exchange(Config) -> + Init = {_, LinkPair} = init(Config), + QName = <<"👀"/utf8>>, + {ok, #{}} = rabbitmq_amqp_client:declare_queue(LinkPair, QName, #{}), + + {error, Resp} = rabbitmq_amqp_client:bind_queue( + LinkPair, QName, ?DEFAULT_EXCHANGE, <<"my binding key">>, #{}), + ?assertMatch(#{subject := <<"403">>}, amqp10_msg:properties(Resp)), + ?assertEqual(#'v1_0.amqp_value'{content = {utf8, <<"operation not permitted on the default exchange">>}}, + amqp10_msg:body(Resp)), + + {ok, #{}} = rabbitmq_amqp_client:delete_queue(LinkPair, QName), + ok = cleanup(Init). + +bind_destination_default_exchange(Config) -> + Init = {_, LinkPair} = init(Config), + {error, Resp} = rabbitmq_amqp_client:bind_exchange( + LinkPair, ?DEFAULT_EXCHANGE, <<"amq.fanout">>, <<"my binding key">>, #{}), + ?assertMatch(#{subject := <<"403">>}, amqp10_msg:properties(Resp)), + ?assertEqual(#'v1_0.amqp_value'{content = {utf8, <<"operation not permitted on the default exchange">>}}, + amqp10_msg:body(Resp)), + ok = cleanup(Init). + +bind_source_line_feed(Config) -> + Init = {_, LinkPair} = init(Config), + XName = <<"🤠\n😱"/utf8>>, + {error, Resp} = rabbitmq_amqp_client:bind_exchange( + LinkPair, <<"amq.fanout">>, XName, <<"my binding key">>, #{}), + ?assertMatch(#{subject := <<"400">>}, amqp10_msg:properties(Resp)), + ?assertEqual(#'v1_0.amqp_value'{ + content = {utf8, <<"Bad name '", XName/binary, + "': line feed and carriage return characters not allowed">>}}, + amqp10_msg:body(Resp)), + ok = cleanup(Init). + +bind_destination_line_feed(Config) -> + Init = {_, LinkPair} = init(Config), + XName = <<"🤠\n😱"/utf8>>, + {error, Resp} = rabbitmq_amqp_client:bind_exchange( + LinkPair, XName, <<"amq.fanout">>, <<"my binding key">>, #{}), + ?assertMatch(#{subject := <<"400">>}, amqp10_msg:properties(Resp)), + ?assertEqual(#'v1_0.amqp_value'{ + content = {utf8, <<"Bad name '", XName/binary, + "': line feed and carriage return characters not allowed">>}}, + amqp10_msg:body(Resp)), + ok = cleanup(Init). + +bind_missing_queue(Config) -> + Init = {_, LinkPair} = init(Config), + QName = <<"👀"/utf8>>, + {error, Resp} = rabbitmq_amqp_client:bind_queue( + LinkPair, QName, <<"amq.direct">>, <<"my binding key">>, #{}), + ?assertMatch(#{subject := <<"400">>}, amqp10_msg:properties(Resp)), + ?assertEqual(#'v1_0.amqp_value'{content = {utf8, <<"no queue '", QName/binary, "' in vhost '/'">>}}, + amqp10_msg:body(Resp)), + ok = cleanup(Init). + +unbind_bad_binding_path_segment(Config) -> + Init = {_, #link_pair{outgoing_link = OutgoingLink, + incoming_link = IncomingLink}} = init(Config), + Correlation = <<1>>, + BadBindingPathSegment = <<"src=e1;dstq=q1;invalidkey=k1;args=">>, + Properties = #{subject => <<"DELETE">>, + to => <<"/bindings/", BadBindingPathSegment/binary>>, + message_id => {binary, Correlation}, + reply_to => <<"$me">>}, + Request0 = amqp10_msg:new(<<>>, #'v1_0.amqp_value'{content = null}, true), + Request = amqp10_msg:set_properties(Properties, Request0), + ok = amqp10_client:flow_link_credit(IncomingLink, 1, never), + ok = amqp10_client:send_msg(OutgoingLink, Request), + receive {amqp10_msg, IncomingLink, Response} -> + ?assertEqual( + #{subject => <<"400">>, + correlation_id => Correlation}, + amqp10_msg:properties(Response)), + ?assertEqual( + #'v1_0.amqp_value'{ + content = {utf8, <<"bad binding path segment '", + BadBindingPathSegment/binary, "'">>}}, + amqp10_msg:body(Response)) + after 5000 -> ct:fail({missing_message, ?LINE}) + end, + ok = cleanup(Init). + +exclusive_queue(Config) -> + Init1 = {_, LinkPair1} = init(Config), + BindingKey = <<"🗝️"/utf8>>, + XName = <<"amq.direct">>, + QName = <<"🙌"/utf8>>, + QProps = #{exclusive => true}, + {ok, #{}} = rabbitmq_amqp_client:declare_queue(LinkPair1, QName, QProps), + ok = rabbitmq_amqp_client:bind_queue(LinkPair1, QName, XName, BindingKey, #{}), + + {Conn2, LinkPair2} = init(Config), + {error, Resp1} = rabbitmq_amqp_client:bind_queue(LinkPair2, QName, XName, BindingKey, #{}), + ?assertMatch(#{subject := <<"400">>}, amqp10_msg:properties(Resp1)), + Body = #'v1_0.amqp_value'{content = {utf8, Reason}} = amqp10_msg:body(Resp1), + ?assertMatch(<<"cannot obtain exclusive access to locked queue '", + QName:(byte_size(QName))/binary, "' in vhost '/'.", _/binary >>, + Reason), + ok = amqp10_client:close_connection(Conn2), + + {Conn3, LinkPair3} = init(Config), + {error, Resp2} = rabbitmq_amqp_client:delete_queue(LinkPair3, QName), + ?assertMatch(#{subject := <<"400">>}, amqp10_msg:properties(Resp2)), + %% We expect the same error message as previously. + ?assertEqual(Body, amqp10_msg:body(Resp2)), + ok = amqp10_client:close_connection(Conn3), + + {Conn4, LinkPair4} = init(Config), + {error, Resp3} = rabbitmq_amqp_client:purge_queue(LinkPair4, QName), + ?assertMatch(#{subject := <<"400">>}, amqp10_msg:properties(Resp3)), + %% We expect the same error message as previously. + ?assertEqual(Body, amqp10_msg:body(Resp3)), + ok = amqp10_client:close_connection(Conn4), + + ok = rabbitmq_amqp_client:unbind_queue(LinkPair1, QName, XName, BindingKey, #{}), + {ok, #{}} = rabbitmq_amqp_client:delete_queue(LinkPair1, QName), + ok = cleanup(Init1). + +purge_stream(Config) -> + Init = {_, LinkPair} = init(Config), + QName = <<"🚀"/utf8>>, + QProps = #{arguments => #{<<"x-queue-type">> => {utf8, <<"stream">>}}}, + {ok, #{}} = rabbitmq_amqp_client:declare_queue(LinkPair, QName, QProps), + + {error, Resp} = rabbitmq_amqp_client:purge_queue(LinkPair, QName), + ?assertMatch(#{subject := <<"400">>}, amqp10_msg:properties(Resp)), + #'v1_0.amqp_value'{content = {utf8, Reason}} = amqp10_msg:body(Resp), + ?assertEqual(<<"purge not supported by queue '", QName/binary, "' in vhost '/'">>, + Reason), + + {ok, #{}} = rabbitmq_amqp_client:delete_queue(LinkPair, QName), + ok = cleanup(Init). + +queue_topology(Config) -> + NodeNames = rabbit_ct_broker_helpers:get_node_configs(Config, nodename), + Nodes = [N0, N1, N2] = lists:map(fun erlang:atom_to_binary/1, NodeNames), + Init0 = {_, LinkPair0} = init(Config, 0), + + CQName = <<"my classic queue">>, + QQName = <<"my quorum queue">>, + SQName = <<"my stream queue">>, + + CQProps = #{arguments => #{<<"x-queue-type">> => {utf8, <<"classic">>}}}, + QQProps = #{arguments => #{<<"x-queue-type">> => {utf8, <<"quorum">>}}}, + SQProps = #{arguments => #{<<"x-queue-type">> => {utf8, <<"stream">>}}}, + + {ok, CQInfo0} = rabbitmq_amqp_client:declare_queue(LinkPair0, CQName, CQProps), + {ok, QQInfo0} = rabbitmq_amqp_client:declare_queue(LinkPair0, QQName, QQProps), + {ok, SQInfo0} = rabbitmq_amqp_client:declare_queue(LinkPair0, SQName, SQProps), + + %% The default queue leader strategy is client-local. + ?assertEqual({ok, N0}, maps:find(leader, CQInfo0)), + ?assertEqual({ok, N0}, maps:find(leader, QQInfo0)), + ?assertEqual({ok, N0}, maps:find(leader, SQInfo0)), + + ?assertEqual({ok, [N0]}, maps:find(replicas, CQInfo0)), + {ok, QQReplicas0} = maps:find(replicas, QQInfo0), + ?assertEqual(Nodes, lists:usort(QQReplicas0)), + {ok, SQReplicas0} = maps:find(replicas, SQInfo0), + ?assertEqual(Nodes, lists:usort(SQReplicas0)), + + ok = cleanup(Init0), + ok = rabbit_ct_broker_helpers:stop_node(Config, 0), + + Init2 = {_, LinkPair2} = init(Config, 2), + {ok, QQInfo2} = rabbitmq_amqp_client:get_queue(LinkPair2, QQName), + {ok, SQInfo2} = rabbitmq_amqp_client:get_queue(LinkPair2, SQName), + + case maps:get(leader, QQInfo2) of + N1 -> ok; + N2 -> ok; + Other0 -> ct:fail({?LINE, Other0}) + end, + case maps:get(leader, SQInfo2) of + N1 -> ok; + N2 -> ok; + Other1 -> ct:fail({?LINE, Other1}) + end, + + %% Replicas should include both online and offline replicas. + {ok, QQReplicas2} = maps:find(replicas, QQInfo2), + ?assertEqual(Nodes, lists:usort(QQReplicas2)), + {ok, SQReplicas2} = maps:find(replicas, SQInfo2), + ?assertEqual(Nodes, lists:usort(SQReplicas2)), + + ok = rabbit_ct_broker_helpers:start_node(Config, 0), + {ok, _} = rabbitmq_amqp_client:delete_queue(LinkPair2, CQName), + {ok, _} = rabbitmq_amqp_client:delete_queue(LinkPair2, QQName), + {ok, _} = rabbitmq_amqp_client:delete_queue(LinkPair2, SQName), + ok = cleanup(Init2). + +%% Even though RabbitMQ processes management requests synchronously (one at a time), +%% the client should be able to send multiple requests at once before receiving a response. +pipeline(Config) -> + Init = {_, LinkPair} = init(Config), + flush(attached), + + %% We should be able to send 8 management requests at once + %% because RabbitMQ grants us 8 link credits initially. + Num = 8, + pipeline0(Num, LinkPair, <<"PUT">>, {map, []}), + eventually(?_assertEqual(Num, rpc(Config, rabbit_amqqueue, count, [])), 200, 20), + flush(queues_created), + + pipeline0(Num, LinkPair, <<"DELETE">>, null), + eventually(?_assertEqual(0, rpc(Config, rabbit_amqqueue, count, [])), 200, 20), + flush(queues_deleted), + + ok = cleanup(Init). + +pipeline0(Num, + #link_pair{outgoing_link = OutgoingLink, + incoming_link = IncomingLink}, + HttpMethod, + Body) -> + ok = amqp10_client:flow_link_credit(IncomingLink, Num, never), + [begin + Request0 = amqp10_msg:new(<<>>, #'v1_0.amqp_value'{content = Body}, true), + Bin = integer_to_binary(N), + Props = #{subject => HttpMethod, + to => <<"/queues/q-", Bin/binary>>, + message_id => {binary, Bin}, + reply_to => <<"$me">>}, + Request = amqp10_msg:set_properties(Props, Request0), + ok = amqp10_client:send_msg(OutgoingLink, Request) + end || N <- lists:seq(1, Num)]. + +%% RabbitMQ allows attaching multiple link pairs. +multiple_link_pairs(Config) -> + OpnConf = connection_config(Config), + {ok, Connection} = amqp10_client:open_connection(OpnConf), + {ok, Session} = amqp10_client:begin_session_sync(Connection), + {ok, LinkPair1} = rabbitmq_amqp_client:attach_management_link_pair_sync(Session, <<"link pair 1">>), + {ok, LinkPair2} = rabbitmq_amqp_client:attach_management_link_pair_sync(Session, <<"link pair 2">>), + + [SessionPid] = rpc(Config, rabbit_amqp_session, list_local, []), + #{management_link_pairs := Pairs0, + incoming_management_links := Incoming0, + outgoing_management_links := Outgoing0} = gen_server_state(SessionPid), + ?assertEqual(2, maps:size(Pairs0)), + ?assertEqual(2, maps:size(Incoming0)), + ?assertEqual(2, maps:size(Outgoing0)), + + QName = <<"q">>, + {ok, #{}} = rabbitmq_amqp_client:declare_queue(LinkPair1, QName, #{}), + {ok, #{}} = rabbitmq_amqp_client:delete_queue(LinkPair2, QName), + + ok = rabbitmq_amqp_client:detach_management_link_pair_sync(LinkPair1), + ok = rabbitmq_amqp_client:detach_management_link_pair_sync(LinkPair2), + + %% Assert that the server cleaned up its state. + #{management_link_pairs := Pairs, + incoming_management_links := Incoming, + outgoing_management_links := Outgoing} = gen_server_state(SessionPid), + ?assertEqual(0, maps:size(Pairs)), + ?assertEqual(0, maps:size(Incoming)), + ?assertEqual(0, maps:size(Outgoing)), + + ok = amqp10_client:end_session(Session), + ok = amqp10_client:close_connection(Connection). + +%% Attaching (and detaching) either the sender or the receiver link first should both work. +link_attach_order(Config) -> + PairName1 = <<"link pair 1">>, + PairName2 = <<"link pair 2">>, + + OpnConf = connection_config(Config), + {ok, Connection} = amqp10_client:open_connection(OpnConf), + {ok, Session} = amqp10_client:begin_session_sync(Connection), + + Terminus = #{address => <<"/management">>, + durable => none}, + OutgoingAttachArgs1 = #{name => PairName1, + role => {sender, Terminus}, + snd_settle_mode => settled, + rcv_settle_mode => first, + properties => #{<<"paired">> => true}}, + IncomingAttachArgs1 = OutgoingAttachArgs1#{role := {receiver, Terminus, self()}, + filter => #{}}, + OutgoingAttachArgs2 = OutgoingAttachArgs1#{name := PairName2}, + IncomingAttachArgs2 = IncomingAttachArgs1#{name := PairName2}, + + %% Attach sender before receiver. + {ok, OutgoingRef1} = amqp10_client:attach_link(Session, OutgoingAttachArgs1), + {ok, IncomingRef1} = amqp10_client:attach_link(Session, IncomingAttachArgs1), + %% Attach receiver before sender. + {ok, IncomingRef2} = amqp10_client:attach_link(Session, IncomingAttachArgs2), + {ok, OutgoingRef2} = amqp10_client:attach_link(Session, OutgoingAttachArgs2), + + Refs = [OutgoingRef1, + OutgoingRef2, + IncomingRef1, + IncomingRef2], + + [ok = wait_for_event(Ref, attached) || Ref <- Refs], + flush(attached), + + LinkPair1 = #link_pair{session = Session, + outgoing_link = OutgoingRef1, + incoming_link = IncomingRef1}, + LinkPair2 = #link_pair{session = Session, + outgoing_link = OutgoingRef2, + incoming_link = IncomingRef2}, + + QName = <<"test queue">>, + {ok, #{}} = rabbitmq_amqp_client:declare_queue(LinkPair1, QName, #{}), + {ok, #{}} = rabbitmq_amqp_client:delete_queue(LinkPair2, QName), + + %% Detach sender before receiver. + ok = amqp10_client:detach_link(OutgoingRef1), + ok = amqp10_client:detach_link(IncomingRef1), + %% Detach receiver before sender. + ok = amqp10_client:detach_link(IncomingRef2), + ok = amqp10_client:detach_link(OutgoingRef2), + + [ok = wait_for_event(Ref, {detached, normal}) || Ref <- Refs], + flush(detached), + ok = amqp10_client:end_session(Session), + ok = amqp10_client:close_connection(Connection). + +drain(Config) -> + {Conn, #link_pair{session = Session, + outgoing_link = OutgoingLink, + incoming_link = IncomingLink}} = init(Config), + + ok = amqp10_client:flow_link_credit(IncomingLink, 2, never), + ok = amqp10_client:flow_link_credit(IncomingLink, 3, never, _Drain = true), + %% After draining, link credit on our incoming link should be 0. + + Request0 = amqp10_msg:new(<<>>, #'v1_0.amqp_value'{content = null}, true), + Props = #{subject => <<"DELETE">>, + to => <<"/queues/q1">>, + message_id => {binary, <<1>>}, + reply_to => <<"$me">>}, + Request = amqp10_msg:set_properties(Props, Request0), + ok = amqp10_client:send_msg(OutgoingLink, Request), + receive + {amqp10_event, + {session, Session, + {ended, + #'v1_0.error'{ + condition = ?V_1_0_AMQP_ERROR_PRECONDITION_FAILED, + description = {utf8, <<"insufficient credit (0) for management link from RabbitMQ to client">>}}}}} -> ok + after 5000 -> flush(missing_ended), + ct:fail({missing_event, ?LINE}) + end, + ok = amqp10_client:close_connection(Conn). + +%% Test that RabbitMQ respects session flow control. +session_flow_control(Config) -> + Init = {_, #link_pair{session = Session, + outgoing_link = OutgoingLink, + incoming_link = IncomingLink}} = init(Config), + flush(attached), + + ok = amqp10_client:flow_link_credit(IncomingLink, 1, never), + %% Close our incoming window. + gen_statem:cast(Session, {flow_session, #'v1_0.flow'{incoming_window = {uint, 0}}}), + + Request0 = amqp10_msg:new(<<>>, #'v1_0.amqp_value'{content = null}, true), + MessageId = <<1>>, + Props = #{subject => <<"DELETE">>, + to => <<"/queues/q1">>, + message_id => {binary, MessageId}, + reply_to => <<"$me">>}, + Request = amqp10_msg:set_properties(Props, Request0), + ok = amqp10_client:send_msg(OutgoingLink, Request), + + receive Unexpected -> ct:fail({unexpected, Unexpected}) + after 100 -> ok + end, + + %% Open our incoming window + gen_statem:cast(Session, {flow_session, #'v1_0.flow'{incoming_window = {uint, 5}}}), + + receive {amqp10_msg, IncomingLink, Response} -> + ?assertMatch(#{correlation_id := MessageId, + subject := <<"200">>}, + amqp10_msg:properties(Response)) + after 5000 -> flush(missing_msg), + ct:fail({missing_msg, ?LINE}) + end, + ok = cleanup(Init). + +init(Config) -> + init(Config, 0). + +init(Config, Node) -> + OpnConf = connection_config(Config, Node), + {ok, Connection} = amqp10_client:open_connection(OpnConf), + {ok, Session} = amqp10_client:begin_session_sync(Connection), + {ok, LinkPair} = rabbitmq_amqp_client:attach_management_link_pair_sync(Session, <<"my link pair">>), + {Connection, LinkPair}. + +cleanup({Connection, LinkPair = #link_pair{session = Session}}) -> + ok = rabbitmq_amqp_client:detach_management_link_pair_sync(LinkPair), + ok = amqp10_client:end_session(Session), + ok = amqp10_client:close_connection(Connection). + +connection_config(Config) -> + connection_config(Config, 0). + +connection_config(Config, Node) -> + connection_config(Config, Node, <<"/">>). + +connection_config(Config, Node, Vhost) -> + Host = ?config(rmq_hostname, Config), + Port = get_node_config(Config, Node, tcp_port_amqp), + #{address => Host, + port => Port, + container_id => <<"my container">>, + sasl => {plain, <<"guest">>, <<"guest">>}, + hostname => <<"vhost:", Vhost/binary>>}. + +wait_for_credit(Sender) -> + receive + {amqp10_event, {link, Sender, credited}} -> + ok + after 5000 -> + flush(?FUNCTION_NAME), + ct:fail(?FUNCTION_NAME) + end. + +flush(Prefix) -> + receive + Msg -> + ct:pal("~p flushed: ~p~n", [Prefix, Msg]), + flush(Prefix) + after 1 -> + ok + end. + +wait_for_accepted(Tag) -> + wait_for_settlement(Tag, accepted). + +wait_for_settlement(Tag, State) -> + receive + {amqp10_disposition, {State, Tag}} -> + ok + after 5000 -> + Reason = {?FUNCTION_NAME, Tag}, + flush(Reason), + ct:fail(Reason) + end. + +wait_for_event(Ref, Event) -> + receive {amqp10_event, {link, Ref, Event}} -> ok + after 5000 -> ct:fail({missing_event, Ref, Event}) + end. + +%% Return the formatted state of a gen_server via sys:get_status/1. +%% (sys:get_state/1 is unformatted) +gen_server_state(Pid) -> + {status, _, _, L0} = sys:get_status(Pid, 20_000), + L1 = lists:last(L0), + {data, L2} = lists:last(L1), + proplists:get_value("State", L2). diff --git a/deps/rabbitmq_auth_backend_cache/.gitignore b/deps/rabbitmq_auth_backend_cache/.gitignore index 78a981fec8fc..0595211a7ee4 100644 --- a/deps/rabbitmq_auth_backend_cache/.gitignore +++ b/deps/rabbitmq_auth_backend_cache/.gitignore @@ -1,19 +1 @@ -.sw? -.*.sw? -*.beam -/.erlang.mk/ -/cover/ -/deps/ -/doc/ -/ebin/ -/escript/ -/escript.lock -/logs/ -/plugins/ -/plugins.lock -/sbin/ -/sbin.lock - test/config_schema_SUITE_data/schema/ - -/rabbitmq_auth_backend_cache.d diff --git a/deps/rabbitmq_auth_backend_cache/README.md b/deps/rabbitmq_auth_backend_cache/README.md index 4043f3682974..30decb5af7f8 100644 --- a/deps/rabbitmq_auth_backend_cache/README.md +++ b/deps/rabbitmq_auth_backend_cache/README.md @@ -220,6 +220,6 @@ The default values are `rabbit_auth_cache_ets` and `[]`, respectively. ## License and Copyright -(c) 2016-2020 VMware, Inc. or its affiliates. +(c) 2007-2024 Broadcom. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. Released under the Mozilla Public License 2.0, same as RabbitMQ. diff --git a/deps/rabbitmq_auth_backend_cache/include/rabbit_auth_backend_cache.hrl b/deps/rabbitmq_auth_backend_cache/include/rabbit_auth_backend_cache.hrl index 568b677a4c0b..1ee8a6ad6c9d 100644 --- a/deps/rabbitmq_auth_backend_cache/include/rabbit_auth_backend_cache.hrl +++ b/deps/rabbitmq_auth_backend_cache/include/rabbit_auth_backend_cache.hrl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% %% Same as default channel operation timeout. diff --git a/deps/rabbitmq_auth_backend_cache/src/rabbit_auth_backend_cache.erl b/deps/rabbitmq_auth_backend_cache/src/rabbit_auth_backend_cache.erl index 4af72f52c849..a8a3dbe3d838 100644 --- a/deps/rabbitmq_auth_backend_cache/src/rabbit_auth_backend_cache.erl +++ b/deps/rabbitmq_auth_backend_cache/src/rabbit_auth_backend_cache.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_auth_backend_cache). @@ -13,7 +13,7 @@ -export([user_login_authentication/2, user_login_authorization/2, check_vhost_access/3, check_resource_access/4, check_topic_access/4, - state_can_expire/0]). + expiry_timestamp/1]). %% API @@ -60,7 +60,7 @@ check_topic_access(#auth_user{} = AuthUser, (_) -> unknown end). -state_can_expire() -> false. +expiry_timestamp(_) -> never. %% %% Implementation diff --git a/deps/rabbitmq_auth_backend_cache/src/rabbit_auth_backend_cache_app.erl b/deps/rabbitmq_auth_backend_cache/src/rabbit_auth_backend_cache_app.erl index 3aa68c39fbb1..f95a1fee7d78 100644 --- a/deps/rabbitmq_auth_backend_cache/src/rabbit_auth_backend_cache_app.erl +++ b/deps/rabbitmq_auth_backend_cache/src/rabbit_auth_backend_cache_app.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_auth_backend_cache_app). @@ -28,7 +28,7 @@ init([]) -> {ok, AuthCacheArgs} = application:get_env(rabbitmq_auth_backend_cache, cache_module_args), % Load module to be able to check exported function. _ = code:load_file(AuthCache), - ChildSpecs = case erlang:function_exported(AuthCache, start_link, + ChildSpecs = case erlang:function_exported(AuthCache, start_link, length(AuthCacheArgs)) of true -> [{auth_cache, {AuthCache, start_link, AuthCacheArgs}, permanent, 5000, worker, [AuthCache]}]; diff --git a/deps/rabbitmq_auth_backend_cache/src/rabbit_auth_cache.erl b/deps/rabbitmq_auth_backend_cache/src/rabbit_auth_cache.erl index 00ee6f819996..04488763ea85 100644 --- a/deps/rabbitmq_auth_backend_cache/src/rabbit_auth_cache.erl +++ b/deps/rabbitmq_auth_backend_cache/src/rabbit_auth_cache.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_auth_cache). diff --git a/deps/rabbitmq_auth_backend_cache/src/rabbit_auth_cache_dict.erl b/deps/rabbitmq_auth_backend_cache/src/rabbit_auth_cache_dict.erl index a5cc42a4eafc..63aad3d2616e 100644 --- a/deps/rabbitmq_auth_backend_cache/src/rabbit_auth_cache_dict.erl +++ b/deps/rabbitmq_auth_backend_cache/src/rabbit_auth_cache_dict.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_auth_cache_dict). diff --git a/deps/rabbitmq_auth_backend_cache/src/rabbit_auth_cache_ets.erl b/deps/rabbitmq_auth_backend_cache/src/rabbit_auth_cache_ets.erl index 5d2fe3bec2f9..abcb4fc0bd15 100644 --- a/deps/rabbitmq_auth_backend_cache/src/rabbit_auth_cache_ets.erl +++ b/deps/rabbitmq_auth_backend_cache/src/rabbit_auth_cache_ets.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_auth_cache_ets). diff --git a/deps/rabbitmq_auth_backend_cache/src/rabbit_auth_cache_ets_segmented.erl b/deps/rabbitmq_auth_backend_cache/src/rabbit_auth_cache_ets_segmented.erl index 91505bbf4730..a32b820fe867 100644 --- a/deps/rabbitmq_auth_backend_cache/src/rabbit_auth_cache_ets_segmented.erl +++ b/deps/rabbitmq_auth_backend_cache/src/rabbit_auth_cache_ets_segmented.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_auth_cache_ets_segmented). diff --git a/deps/rabbitmq_auth_backend_cache/src/rabbit_auth_cache_ets_segmented_stateless.erl b/deps/rabbitmq_auth_backend_cache/src/rabbit_auth_cache_ets_segmented_stateless.erl index 09589bff035d..8f2f03d6bbe0 100644 --- a/deps/rabbitmq_auth_backend_cache/src/rabbit_auth_cache_ets_segmented_stateless.erl +++ b/deps/rabbitmq_auth_backend_cache/src/rabbit_auth_cache_ets_segmented_stateless.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_auth_cache_ets_segmented_stateless). diff --git a/deps/rabbitmq_auth_backend_cache/test/config_schema_SUITE.erl b/deps/rabbitmq_auth_backend_cache/test/config_schema_SUITE.erl index 9cfe800a4602..a40ad668cca0 100644 --- a/deps/rabbitmq_auth_backend_cache/test/config_schema_SUITE.erl +++ b/deps/rabbitmq_auth_backend_cache/test/config_schema_SUITE.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2016-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(config_schema_SUITE). diff --git a/deps/rabbitmq_auth_backend_cache/test/rabbit_auth_backend_cache_SUITE.erl b/deps/rabbitmq_auth_backend_cache/test/rabbit_auth_backend_cache_SUITE.erl index 8db7e7c70d58..a4f075e075b1 100644 --- a/deps/rabbitmq_auth_backend_cache/test/rabbit_auth_backend_cache_SUITE.erl +++ b/deps/rabbitmq_auth_backend_cache/test/rabbit_auth_backend_cache_SUITE.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_auth_backend_cache_SUITE). diff --git a/deps/rabbitmq_auth_backend_cache/test/rabbit_auth_cache_SUITE.erl b/deps/rabbitmq_auth_backend_cache/test/rabbit_auth_cache_SUITE.erl index 501029f2518b..e332425629a4 100644 --- a/deps/rabbitmq_auth_backend_cache/test/rabbit_auth_cache_SUITE.erl +++ b/deps/rabbitmq_auth_backend_cache/test/rabbit_auth_cache_SUITE.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_auth_cache_SUITE). diff --git a/deps/rabbitmq_auth_backend_http/.gitignore b/deps/rabbitmq_auth_backend_http/.gitignore index cc013357c839..0595211a7ee4 100644 --- a/deps/rabbitmq_auth_backend_http/.gitignore +++ b/deps/rabbitmq_auth_backend_http/.gitignore @@ -1,22 +1 @@ -.sw? -.*.sw? -*.beam -/.erlang.mk/ -/cover/ -/deps/ -/doc/ -/ebin/ -/escript/ -/escript.lock -/logs/ -/plugins/ -/plugins.lock -/sbin/ -/sbin.lock - test/config_schema_SUITE_data/schema/ - -/rabbitmq_auth_backend_http.d - -.idea - diff --git a/deps/rabbitmq_auth_backend_http/BUILD.bazel b/deps/rabbitmq_auth_backend_http/BUILD.bazel index f7fb0a4ede8f..f7ed1ea1c7b4 100644 --- a/deps/rabbitmq_auth_backend_http/BUILD.bazel +++ b/deps/rabbitmq_auth_backend_http/BUILD.bazel @@ -43,8 +43,10 @@ all_srcs(name = "all_srcs") test_suite_beam_files(name = "test_suite_beam_files") +# gazelle:erlang_app_extra_app crypto # gazelle:erlang_app_extra_app inets - +# gazelle:erlang_app_extra_app ssl +# gazelle:erlang_app_extra_app public_key # gazelle:erlang_app_dep rabbit rabbitmq_app( @@ -57,7 +59,12 @@ rabbitmq_app( app_module = APP_MODULE, app_name = APP_NAME, beam_files = [":beam_files"], - extra_apps = ["inets"], + extra_apps = [ + "crypto", + "inets", + "ssl", + "public_key", + ], license_files = [":license_files"], priv = [":priv"], deps = [ @@ -94,14 +101,13 @@ eunit( broker_for_integration_suites() -rabbitmq_suite( +rabbitmq_integration_suite( name = "auth_SUITE", size = "small", additional_beam = [ "test/auth_http_mock.beam", ], deps = [ - "//deps/rabbit_common:erlang_app", "@cowboy//:erlang_app", ], ) diff --git a/deps/rabbitmq_auth_backend_http/Makefile b/deps/rabbitmq_auth_backend_http/Makefile index 692dfeefb51c..67709e9afb1d 100644 --- a/deps/rabbitmq_auth_backend_http/Makefile +++ b/deps/rabbitmq_auth_backend_http/Makefile @@ -18,7 +18,7 @@ define PROJECT_APP_EXTRA_KEYS {broker_version_requirements, []} endef -LOCAL_DEPS = inets +LOCAL_DEPS = ssl inets crypto public_key DEPS = rabbit_common rabbit amqp_client TEST_DEPS = rabbitmq_ct_helpers rabbitmq_ct_client_helpers cowboy diff --git a/deps/rabbitmq_auth_backend_http/README.md b/deps/rabbitmq_auth_backend_http/README.md index 8409a1efb777..050e3837d2ec 100644 --- a/deps/rabbitmq_auth_backend_http/README.md +++ b/deps/rabbitmq_auth_backend_http/README.md @@ -84,6 +84,9 @@ against the URIs listed in the configuration file. It will add query string * `username`: the name of the user * `password`: the password provided (may be missing if e.g. rabbitmq-auth-mechanism-ssl is used) +Note: This request may include additional http request parameters in addition to the ones listed above. +For instance, if the user accessed RabbitMQ via the MQTT protocol, it is expected `client_id` and `vhost` request parameters too. + ### vhost_path * `username`: the name of the user @@ -100,6 +103,9 @@ Note that you cannot create arbitrary virtual hosts using this plugin; you can o * `name`: the name of the resource * `permission`:the access level to the resource (`configure`, `write`, `read`): see [the Access Control guide](http://www.rabbitmq.com/access-control.html) for their meaning +Note: This request may include additional http request parameters in addition to the ones listed above. +For instance, if the user accessed RabbitMQ via the MQTT protocol, it is expected `client_id` request parameter too. + ### topic_path * `username`: the name of the user diff --git a/deps/rabbitmq_auth_backend_http/examples/README.md b/deps/rabbitmq_auth_backend_http/examples/README.md index 24189f6ec36e..1500e2214018 100644 --- a/deps/rabbitmq_auth_backend_http/examples/README.md +++ b/deps/rabbitmq_auth_backend_http/examples/README.md @@ -9,7 +9,7 @@ different platforms and frameworks: * Java and Spring Boot * Kotlin and Spring Boot * C# and ASP.NET Web API - * C# and ASP.NET Core 7 + * C# and ASP.NET Core 8.0 * PHP ## Python Example @@ -124,15 +124,15 @@ Port number may vary but will likely be `62190`. When the example is hosted on IIS, port 80 will be used by default. -## ASP.NET Core 7 Example +## ASP.NET Core 8.0 Example `rabbitmq_auth_backend_webapi_dotnetcore` is a modification of the `rabbitmq_auth_backend_webapi_dotnet` example -designed for ASP.NET Core 7. It's very similar to the original version but it also adds some static typing +designed for ASP.NET Core 8.0. It's very similar to the original version but it also adds some static typing for requests and responses. ### Running the Example -Open the solution file, `RabbitMqAuthBackendHttp.sln` in Visual Studio 2022 version 17.4 or later. +Open the solution file, `RabbitMqAuthBackendHttp.sln` in Visual Studio 2022 version 17.8 or later. As with other examples, RabbitMQ [authentication and authorization backends](http://www.rabbitmq.com/access-control.html) must be configured to use this plugin and the endpoints provided by this example app. @@ -151,8 +151,8 @@ Have a look at `AuthController`. This example was developed using - * .NET SDK 7.0 - * Visual Studio 2022 version 17.4 or Visual Studio Code + * .NET SDK 8.0 + * Visual Studio 2022 version 17.8 or Visual Studio Code * Windows 10 It is possible to build and run service from Visual Studio using IIS or from Visual Studio or Visual Studio Code using cross-platform server Kestrel. diff --git a/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_spring_boot/.mvn/wrapper/MavenWrapperDownloader.java b/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_spring_boot/.mvn/wrapper/MavenWrapperDownloader.java deleted file mode 100644 index c32394f140a7..000000000000 --- a/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_spring_boot/.mvn/wrapper/MavenWrapperDownloader.java +++ /dev/null @@ -1,117 +0,0 @@ -/* - * Copyright 2007-present the original author or authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -import java.net.*; -import java.io.*; -import java.nio.channels.*; -import java.util.Properties; - -public class MavenWrapperDownloader { - - private static final String WRAPPER_VERSION = "0.5.5"; - /** - * Default URL to download the maven-wrapper.jar from, if no 'downloadUrl' is provided. - */ - private static final String DEFAULT_DOWNLOAD_URL = "https://repo.maven.apache.org/maven2/io/takari/maven-wrapper/" - + WRAPPER_VERSION + "/maven-wrapper-" + WRAPPER_VERSION + ".jar"; - - /** - * Path to the maven-wrapper.properties file, which might contain a downloadUrl property to - * use instead of the default one. - */ - private static final String MAVEN_WRAPPER_PROPERTIES_PATH = - ".mvn/wrapper/maven-wrapper.properties"; - - /** - * Path where the maven-wrapper.jar will be saved to. - */ - private static final String MAVEN_WRAPPER_JAR_PATH = - ".mvn/wrapper/maven-wrapper.jar"; - - /** - * Name of the property which should be used to override the default download url for the wrapper. - */ - private static final String PROPERTY_NAME_WRAPPER_URL = "wrapperUrl"; - - public static void main(String args[]) { - System.out.println("- Downloader started"); - File baseDirectory = new File(args[0]); - System.out.println("- Using base directory: " + baseDirectory.getAbsolutePath()); - - // If the maven-wrapper.properties exists, read it and check if it contains a custom - // wrapperUrl parameter. - File mavenWrapperPropertyFile = new File(baseDirectory, MAVEN_WRAPPER_PROPERTIES_PATH); - String url = DEFAULT_DOWNLOAD_URL; - if(mavenWrapperPropertyFile.exists()) { - FileInputStream mavenWrapperPropertyFileInputStream = null; - try { - mavenWrapperPropertyFileInputStream = new FileInputStream(mavenWrapperPropertyFile); - Properties mavenWrapperProperties = new Properties(); - mavenWrapperProperties.load(mavenWrapperPropertyFileInputStream); - url = mavenWrapperProperties.getProperty(PROPERTY_NAME_WRAPPER_URL, url); - } catch (IOException e) { - System.out.println("- ERROR loading '" + MAVEN_WRAPPER_PROPERTIES_PATH + "'"); - } finally { - try { - if(mavenWrapperPropertyFileInputStream != null) { - mavenWrapperPropertyFileInputStream.close(); - } - } catch (IOException e) { - // Ignore ... - } - } - } - System.out.println("- Downloading from: " + url); - - File outputFile = new File(baseDirectory.getAbsolutePath(), MAVEN_WRAPPER_JAR_PATH); - if(!outputFile.getParentFile().exists()) { - if(!outputFile.getParentFile().mkdirs()) { - System.out.println( - "- ERROR creating output directory '" + outputFile.getParentFile().getAbsolutePath() + "'"); - } - } - System.out.println("- Downloading to: " + outputFile.getAbsolutePath()); - try { - downloadFileFromURL(url, outputFile); - System.out.println("Done"); - System.exit(0); - } catch (Throwable e) { - System.out.println("- Error downloading"); - e.printStackTrace(); - System.exit(1); - } - } - - private static void downloadFileFromURL(String urlString, File destination) throws Exception { - if (System.getenv("MVNW_USERNAME") != null && System.getenv("MVNW_PASSWORD") != null) { - String username = System.getenv("MVNW_USERNAME"); - char[] password = System.getenv("MVNW_PASSWORD").toCharArray(); - Authenticator.setDefault(new Authenticator() { - @Override - protected PasswordAuthentication getPasswordAuthentication() { - return new PasswordAuthentication(username, password); - } - }); - } - URL website = new URL(urlString); - ReadableByteChannel rbc; - rbc = Channels.newChannel(website.openStream()); - FileOutputStream fos = new FileOutputStream(destination); - fos.getChannel().transferFrom(rbc, 0, Long.MAX_VALUE); - fos.close(); - rbc.close(); - } - -} diff --git a/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_spring_boot/.mvn/wrapper/maven-wrapper.jar b/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_spring_boot/.mvn/wrapper/maven-wrapper.jar deleted file mode 100644 index 0d5e649888a4..000000000000 Binary files a/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_spring_boot/.mvn/wrapper/maven-wrapper.jar and /dev/null differ diff --git a/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_spring_boot/.mvn/wrapper/maven-wrapper.properties b/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_spring_boot/.mvn/wrapper/maven-wrapper.properties index fa87ad7ddfdb..f95f1ee80715 100644 --- a/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_spring_boot/.mvn/wrapper/maven-wrapper.properties +++ b/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_spring_boot/.mvn/wrapper/maven-wrapper.properties @@ -1,2 +1,19 @@ -distributionUrl=https://repo.maven.apache.org/maven2/org/apache/maven/apache-maven/3.6.1/apache-maven-3.6.1-bin.zip -wrapperUrl=https://repo.maven.apache.org/maven2/io/takari/maven-wrapper/0.5.5/maven-wrapper-0.5.5.jar +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +wrapperVersion=3.3.2 +distributionType=only-script +distributionUrl=https://repo.maven.apache.org/maven2/org/apache/maven/apache-maven/3.9.8/apache-maven-3.9.8-bin.zip diff --git a/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_spring_boot/mvnw b/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_spring_boot/mvnw index d2f0ea38081d..19529ddf8c6e 100755 --- a/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_spring_boot/mvnw +++ b/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_spring_boot/mvnw @@ -19,292 +19,241 @@ # ---------------------------------------------------------------------------- # ---------------------------------------------------------------------------- -# Maven2 Start Up Batch script -# -# Required ENV vars: -# ------------------ -# JAVA_HOME - location of a JDK home dir +# Apache Maven Wrapper startup batch script, version 3.3.2 # # Optional ENV vars # ----------------- -# M2_HOME - location of maven2's installed home dir -# MAVEN_OPTS - parameters passed to the Java VM when running Maven -# e.g. to debug Maven itself, use -# set MAVEN_OPTS=-Xdebug -Xrunjdwp:transport=dt_socket,server=y,suspend=y,address=8000 -# MAVEN_SKIP_RC - flag to disable loading of mavenrc files +# JAVA_HOME - location of a JDK home dir, required when download maven via java source +# MVNW_REPOURL - repo url base for downloading maven distribution +# MVNW_USERNAME/MVNW_PASSWORD - user and password for downloading maven +# MVNW_VERBOSE - true: enable verbose log; debug: trace the mvnw script; others: silence the output # ---------------------------------------------------------------------------- -if [ -z "$MAVEN_SKIP_RC" ] ; then - - if [ -f /etc/mavenrc ] ; then - . /etc/mavenrc - fi +set -euf +[ "${MVNW_VERBOSE-}" != debug ] || set -x - if [ -f "$HOME/.mavenrc" ] ; then - . "$HOME/.mavenrc" - fi +# OS specific support. +native_path() { printf %s\\n "$1"; } +case "$(uname)" in +CYGWIN* | MINGW*) + [ -z "${JAVA_HOME-}" ] || JAVA_HOME="$(cygpath --unix "$JAVA_HOME")" + native_path() { cygpath --path --windows "$1"; } + ;; +esac -fi +# set JAVACMD and JAVACCMD +set_java_home() { + # For Cygwin and MinGW, ensure paths are in Unix format before anything is touched + if [ -n "${JAVA_HOME-}" ]; then + if [ -x "$JAVA_HOME/jre/sh/java" ]; then + # IBM's JDK on AIX uses strange locations for the executables + JAVACMD="$JAVA_HOME/jre/sh/java" + JAVACCMD="$JAVA_HOME/jre/sh/javac" + else + JAVACMD="$JAVA_HOME/bin/java" + JAVACCMD="$JAVA_HOME/bin/javac" -# OS specific support. $var _must_ be set to either true or false. -cygwin=false; -darwin=false; -mingw=false -case "`uname`" in - CYGWIN*) cygwin=true ;; - MINGW*) mingw=true;; - Darwin*) darwin=true - # Use /usr/libexec/java_home if available, otherwise fall back to /Library/Java/Home - # See https://developer.apple.com/library/mac/qa/qa1170/_index.html - if [ -z "$JAVA_HOME" ]; then - if [ -x "/usr/libexec/java_home" ]; then - export JAVA_HOME="`/usr/libexec/java_home`" - else - export JAVA_HOME="/Library/Java/Home" + if [ ! -x "$JAVACMD" ] || [ ! -x "$JAVACCMD" ]; then + echo "The JAVA_HOME environment variable is not defined correctly, so mvnw cannot run." >&2 + echo "JAVA_HOME is set to \"$JAVA_HOME\", but \"\$JAVA_HOME/bin/java\" or \"\$JAVA_HOME/bin/javac\" does not exist." >&2 + return 1 fi fi - ;; -esac - -if [ -z "$JAVA_HOME" ] ; then - if [ -r /etc/gentoo-release ] ; then - JAVA_HOME=`java-config --jre-home` + else + JAVACMD="$( + 'set' +e + 'unset' -f command 2>/dev/null + 'command' -v java + )" || : + JAVACCMD="$( + 'set' +e + 'unset' -f command 2>/dev/null + 'command' -v javac + )" || : + + if [ ! -x "${JAVACMD-}" ] || [ ! -x "${JAVACCMD-}" ]; then + echo "The java/javac command does not exist in PATH nor is JAVA_HOME set, so mvnw cannot run." >&2 + return 1 + fi fi -fi - -if [ -z "$M2_HOME" ] ; then - ## resolve links - $0 may be a link to maven's home - PRG="$0" +} - # need this for relative symlinks - while [ -h "$PRG" ] ; do - ls=`ls -ld "$PRG"` - link=`expr "$ls" : '.*-> \(.*\)$'` - if expr "$link" : '/.*' > /dev/null; then - PRG="$link" - else - PRG="`dirname "$PRG"`/$link" - fi +# hash string like Java String::hashCode +hash_string() { + str="${1:-}" h=0 + while [ -n "$str" ]; do + char="${str%"${str#?}"}" + h=$(((h * 31 + $(LC_CTYPE=C printf %d "'$char")) % 4294967296)) + str="${str#?}" done + printf %x\\n $h +} - saveddir=`pwd` +verbose() { :; } +[ "${MVNW_VERBOSE-}" != true ] || verbose() { printf %s\\n "${1-}"; } - M2_HOME=`dirname "$PRG"`/.. +die() { + printf %s\\n "$1" >&2 + exit 1 +} - # make it fully qualified - M2_HOME=`cd "$M2_HOME" && pwd` +trim() { + # MWRAPPER-139: + # Trims trailing and leading whitespace, carriage returns, tabs, and linefeeds. + # Needed for removing poorly interpreted newline sequences when running in more + # exotic environments such as mingw bash on Windows. + printf "%s" "${1}" | tr -d '[:space:]' +} - cd "$saveddir" - # echo Using m2 at $M2_HOME -fi +# parse distributionUrl and optional distributionSha256Sum, requires .mvn/wrapper/maven-wrapper.properties +while IFS="=" read -r key value; do + case "${key-}" in + distributionUrl) distributionUrl=$(trim "${value-}") ;; + distributionSha256Sum) distributionSha256Sum=$(trim "${value-}") ;; + esac +done <"${0%/*}/.mvn/wrapper/maven-wrapper.properties" +[ -n "${distributionUrl-}" ] || die "cannot read distributionUrl property in ${0%/*}/.mvn/wrapper/maven-wrapper.properties" + +case "${distributionUrl##*/}" in +maven-mvnd-*bin.*) + MVN_CMD=mvnd.sh _MVNW_REPO_PATTERN=/maven/mvnd/ + case "${PROCESSOR_ARCHITECTURE-}${PROCESSOR_ARCHITEW6432-}:$(uname -a)" in + *AMD64:CYGWIN* | *AMD64:MINGW*) distributionPlatform=windows-amd64 ;; + :Darwin*x86_64) distributionPlatform=darwin-amd64 ;; + :Darwin*arm64) distributionPlatform=darwin-aarch64 ;; + :Linux*x86_64*) distributionPlatform=linux-amd64 ;; + *) + echo "Cannot detect native platform for mvnd on $(uname)-$(uname -m), use pure java version" >&2 + distributionPlatform=linux-amd64 + ;; + esac + distributionUrl="${distributionUrl%-bin.*}-$distributionPlatform.zip" + ;; +maven-mvnd-*) MVN_CMD=mvnd.sh _MVNW_REPO_PATTERN=/maven/mvnd/ ;; +*) MVN_CMD="mvn${0##*/mvnw}" _MVNW_REPO_PATTERN=/org/apache/maven/ ;; +esac -# For Cygwin, ensure paths are in UNIX format before anything is touched -if $cygwin ; then - [ -n "$M2_HOME" ] && - M2_HOME=`cygpath --unix "$M2_HOME"` - [ -n "$JAVA_HOME" ] && - JAVA_HOME=`cygpath --unix "$JAVA_HOME"` - [ -n "$CLASSPATH" ] && - CLASSPATH=`cygpath --path --unix "$CLASSPATH"` -fi +# apply MVNW_REPOURL and calculate MAVEN_HOME +# maven home pattern: ~/.m2/wrapper/dists/{apache-maven-,maven-mvnd--}/ +[ -z "${MVNW_REPOURL-}" ] || distributionUrl="$MVNW_REPOURL$_MVNW_REPO_PATTERN${distributionUrl#*"$_MVNW_REPO_PATTERN"}" +distributionUrlName="${distributionUrl##*/}" +distributionUrlNameMain="${distributionUrlName%.*}" +distributionUrlNameMain="${distributionUrlNameMain%-bin}" +MAVEN_USER_HOME="${MAVEN_USER_HOME:-${HOME}/.m2}" +MAVEN_HOME="${MAVEN_USER_HOME}/wrapper/dists/${distributionUrlNameMain-}/$(hash_string "$distributionUrl")" + +exec_maven() { + unset MVNW_VERBOSE MVNW_USERNAME MVNW_PASSWORD MVNW_REPOURL || : + exec "$MAVEN_HOME/bin/$MVN_CMD" "$@" || die "cannot exec $MAVEN_HOME/bin/$MVN_CMD" +} -# For Mingw, ensure paths are in UNIX format before anything is touched -if $mingw ; then - [ -n "$M2_HOME" ] && - M2_HOME="`(cd "$M2_HOME"; pwd)`" - [ -n "$JAVA_HOME" ] && - JAVA_HOME="`(cd "$JAVA_HOME"; pwd)`" +if [ -d "$MAVEN_HOME" ]; then + verbose "found existing MAVEN_HOME at $MAVEN_HOME" + exec_maven "$@" fi -if [ -z "$JAVA_HOME" ]; then - javaExecutable="`which javac`" - if [ -n "$javaExecutable" ] && ! [ "`expr \"$javaExecutable\" : '\([^ ]*\)'`" = "no" ]; then - # readlink(1) is not available as standard on Solaris 10. - readLink=`which readlink` - if [ ! `expr "$readLink" : '\([^ ]*\)'` = "no" ]; then - if $darwin ; then - javaHome="`dirname \"$javaExecutable\"`" - javaExecutable="`cd \"$javaHome\" && pwd -P`/javac" - else - javaExecutable="`readlink -f \"$javaExecutable\"`" - fi - javaHome="`dirname \"$javaExecutable\"`" - javaHome=`expr "$javaHome" : '\(.*\)/bin'` - JAVA_HOME="$javaHome" - export JAVA_HOME - fi - fi -fi +case "${distributionUrl-}" in +*?-bin.zip | *?maven-mvnd-?*-?*.zip) ;; +*) die "distributionUrl is not valid, must match *-bin.zip or maven-mvnd-*.zip, but found '${distributionUrl-}'" ;; +esac -if [ -z "$JAVACMD" ] ; then - if [ -n "$JAVA_HOME" ] ; then - if [ -x "$JAVA_HOME/jre/sh/java" ] ; then - # IBM's JDK on AIX uses strange locations for the executables - JAVACMD="$JAVA_HOME/jre/sh/java" - else - JAVACMD="$JAVA_HOME/bin/java" - fi - else - JAVACMD="`which java`" - fi +# prepare tmp dir +if TMP_DOWNLOAD_DIR="$(mktemp -d)" && [ -d "$TMP_DOWNLOAD_DIR" ]; then + clean() { rm -rf -- "$TMP_DOWNLOAD_DIR"; } + trap clean HUP INT TERM EXIT +else + die "cannot create temp dir" fi -if [ ! -x "$JAVACMD" ] ; then - echo "Error: JAVA_HOME is not defined correctly." >&2 - echo " We cannot execute $JAVACMD" >&2 - exit 1 -fi +mkdir -p -- "${MAVEN_HOME%/*}" -if [ -z "$JAVA_HOME" ] ; then - echo "Warning: JAVA_HOME environment variable is not set." +# Download and Install Apache Maven +verbose "Couldn't find MAVEN_HOME, downloading and installing it ..." +verbose "Downloading from: $distributionUrl" +verbose "Downloading to: $TMP_DOWNLOAD_DIR/$distributionUrlName" + +# select .zip or .tar.gz +if ! command -v unzip >/dev/null; then + distributionUrl="${distributionUrl%.zip}.tar.gz" + distributionUrlName="${distributionUrl##*/}" fi -CLASSWORLDS_LAUNCHER=org.codehaus.plexus.classworlds.launcher.Launcher +# verbose opt +__MVNW_QUIET_WGET=--quiet __MVNW_QUIET_CURL=--silent __MVNW_QUIET_UNZIP=-q __MVNW_QUIET_TAR='' +[ "${MVNW_VERBOSE-}" != true ] || __MVNW_QUIET_WGET='' __MVNW_QUIET_CURL='' __MVNW_QUIET_UNZIP='' __MVNW_QUIET_TAR=v -# traverses directory structure from process work directory to filesystem root -# first directory with .mvn subdirectory is considered project base directory -find_maven_basedir() { +# normalize http auth +case "${MVNW_PASSWORD:+has-password}" in +'') MVNW_USERNAME='' MVNW_PASSWORD='' ;; +has-password) [ -n "${MVNW_USERNAME-}" ] || MVNW_USERNAME='' MVNW_PASSWORD='' ;; +esac - if [ -z "$1" ] - then - echo "Path not specified to find_maven_basedir" - return 1 - fi +if [ -z "${MVNW_USERNAME-}" ] && command -v wget >/dev/null; then + verbose "Found wget ... using wget" + wget ${__MVNW_QUIET_WGET:+"$__MVNW_QUIET_WGET"} "$distributionUrl" -O "$TMP_DOWNLOAD_DIR/$distributionUrlName" || die "wget: Failed to fetch $distributionUrl" +elif [ -z "${MVNW_USERNAME-}" ] && command -v curl >/dev/null; then + verbose "Found curl ... using curl" + curl ${__MVNW_QUIET_CURL:+"$__MVNW_QUIET_CURL"} -f -L -o "$TMP_DOWNLOAD_DIR/$distributionUrlName" "$distributionUrl" || die "curl: Failed to fetch $distributionUrl" +elif set_java_home; then + verbose "Falling back to use Java to download" + javaSource="$TMP_DOWNLOAD_DIR/Downloader.java" + targetZip="$TMP_DOWNLOAD_DIR/$distributionUrlName" + cat >"$javaSource" <<-END + public class Downloader extends java.net.Authenticator + { + protected java.net.PasswordAuthentication getPasswordAuthentication() + { + return new java.net.PasswordAuthentication( System.getenv( "MVNW_USERNAME" ), System.getenv( "MVNW_PASSWORD" ).toCharArray() ); + } + public static void main( String[] args ) throws Exception + { + setDefault( new Downloader() ); + java.nio.file.Files.copy( java.net.URI.create( args[0] ).toURL().openStream(), java.nio.file.Paths.get( args[1] ).toAbsolutePath().normalize() ); + } + } + END + # For Cygwin/MinGW, switch paths to Windows format before running javac and java + verbose " - Compiling Downloader.java ..." + "$(native_path "$JAVACCMD")" "$(native_path "$javaSource")" || die "Failed to compile Downloader.java" + verbose " - Running Downloader.java ..." + "$(native_path "$JAVACMD")" -cp "$(native_path "$TMP_DOWNLOAD_DIR")" Downloader "$distributionUrl" "$(native_path "$targetZip")" +fi - basedir="$1" - wdir="$1" - while [ "$wdir" != '/' ] ; do - if [ -d "$wdir"/.mvn ] ; then - basedir=$wdir - break +# If specified, validate the SHA-256 sum of the Maven distribution zip file +if [ -n "${distributionSha256Sum-}" ]; then + distributionSha256Result=false + if [ "$MVN_CMD" = mvnd.sh ]; then + echo "Checksum validation is not supported for maven-mvnd." >&2 + echo "Please disable validation by removing 'distributionSha256Sum' from your maven-wrapper.properties." >&2 + exit 1 + elif command -v sha256sum >/dev/null; then + if echo "$distributionSha256Sum $TMP_DOWNLOAD_DIR/$distributionUrlName" | sha256sum -c >/dev/null 2>&1; then + distributionSha256Result=true fi - # workaround for JBEAP-8937 (on Solaris 10/Sparc) - if [ -d "${wdir}" ]; then - wdir=`cd "$wdir/.."; pwd` + elif command -v shasum >/dev/null; then + if echo "$distributionSha256Sum $TMP_DOWNLOAD_DIR/$distributionUrlName" | shasum -a 256 -c >/dev/null 2>&1; then + distributionSha256Result=true fi - # end of workaround - done - echo "${basedir}" -} - -# concatenates all lines of a file -concat_lines() { - if [ -f "$1" ]; then - echo "$(tr -s '\n' ' ' < "$1")" + else + echo "Checksum validation was requested but neither 'sha256sum' or 'shasum' are available." >&2 + echo "Please install either command, or disable validation by removing 'distributionSha256Sum' from your maven-wrapper.properties." >&2 + exit 1 + fi + if [ $distributionSha256Result = false ]; then + echo "Error: Failed to validate Maven distribution SHA-256, your Maven distribution might be compromised." >&2 + echo "If you updated your Maven version, you need to update the specified distributionSha256Sum property." >&2 + exit 1 fi -} - -BASE_DIR=`find_maven_basedir "$(pwd)"` -if [ -z "$BASE_DIR" ]; then - exit 1; fi -########################################################################################## -# Extension to allow automatically downloading the maven-wrapper.jar from Maven-central -# This allows using the maven wrapper in projects that prohibit checking in binary data. -########################################################################################## -if [ -r "$BASE_DIR/.mvn/wrapper/maven-wrapper.jar" ]; then - if [ "$MVNW_VERBOSE" = true ]; then - echo "Found .mvn/wrapper/maven-wrapper.jar" - fi +# unzip and move +if command -v unzip >/dev/null; then + unzip ${__MVNW_QUIET_UNZIP:+"$__MVNW_QUIET_UNZIP"} "$TMP_DOWNLOAD_DIR/$distributionUrlName" -d "$TMP_DOWNLOAD_DIR" || die "failed to unzip" else - if [ "$MVNW_VERBOSE" = true ]; then - echo "Couldn't find .mvn/wrapper/maven-wrapper.jar, downloading it ..." - fi - if [ -n "$MVNW_REPOURL" ]; then - jarUrl="$MVNW_REPOURL/io/takari/maven-wrapper/0.5.5/maven-wrapper-0.5.5.jar" - else - jarUrl="https://repo.maven.apache.org/maven2/io/takari/maven-wrapper/0.5.5/maven-wrapper-0.5.5.jar" - fi - while IFS="=" read key value; do - case "$key" in (wrapperUrl) jarUrl="$value"; break ;; - esac - done < "$BASE_DIR/.mvn/wrapper/maven-wrapper.properties" - if [ "$MVNW_VERBOSE" = true ]; then - echo "Downloading from: $jarUrl" - fi - wrapperJarPath="$BASE_DIR/.mvn/wrapper/maven-wrapper.jar" - if $cygwin; then - wrapperJarPath=`cygpath --path --windows "$wrapperJarPath"` - fi - - if command -v wget > /dev/null; then - if [ "$MVNW_VERBOSE" = true ]; then - echo "Found wget ... using wget" - fi - if [ -z "$MVNW_USERNAME" ] || [ -z "$MVNW_PASSWORD" ]; then - wget "$jarUrl" -O "$wrapperJarPath" - else - wget --http-user=$MVNW_USERNAME --http-password=$MVNW_PASSWORD "$jarUrl" -O "$wrapperJarPath" - fi - elif command -v curl > /dev/null; then - if [ "$MVNW_VERBOSE" = true ]; then - echo "Found curl ... using curl" - fi - if [ -z "$MVNW_USERNAME" ] || [ -z "$MVNW_PASSWORD" ]; then - curl -o "$wrapperJarPath" "$jarUrl" -f - else - curl --user $MVNW_USERNAME:$MVNW_PASSWORD -o "$wrapperJarPath" "$jarUrl" -f - fi - - else - if [ "$MVNW_VERBOSE" = true ]; then - echo "Falling back to using Java to download" - fi - javaClass="$BASE_DIR/.mvn/wrapper/MavenWrapperDownloader.java" - # For Cygwin, switch paths to Windows format before running javac - if $cygwin; then - javaClass=`cygpath --path --windows "$javaClass"` - fi - if [ -e "$javaClass" ]; then - if [ ! -e "$BASE_DIR/.mvn/wrapper/MavenWrapperDownloader.class" ]; then - if [ "$MVNW_VERBOSE" = true ]; then - echo " - Compiling MavenWrapperDownloader.java ..." - fi - # Compiling the Java class - ("$JAVA_HOME/bin/javac" "$javaClass") - fi - if [ -e "$BASE_DIR/.mvn/wrapper/MavenWrapperDownloader.class" ]; then - # Running the downloader - if [ "$MVNW_VERBOSE" = true ]; then - echo " - Running MavenWrapperDownloader.java ..." - fi - ("$JAVA_HOME/bin/java" -cp .mvn/wrapper MavenWrapperDownloader "$MAVEN_PROJECTBASEDIR") - fi - fi - fi -fi -########################################################################################## -# End of extension -########################################################################################## - -export MAVEN_PROJECTBASEDIR=${MAVEN_BASEDIR:-"$BASE_DIR"} -if [ "$MVNW_VERBOSE" = true ]; then - echo $MAVEN_PROJECTBASEDIR + tar xzf${__MVNW_QUIET_TAR:+"$__MVNW_QUIET_TAR"} "$TMP_DOWNLOAD_DIR/$distributionUrlName" -C "$TMP_DOWNLOAD_DIR" || die "failed to untar" fi -MAVEN_OPTS="$(concat_lines "$MAVEN_PROJECTBASEDIR/.mvn/jvm.config") $MAVEN_OPTS" - -# For Cygwin, switch paths to Windows format before running java -if $cygwin; then - [ -n "$M2_HOME" ] && - M2_HOME=`cygpath --path --windows "$M2_HOME"` - [ -n "$JAVA_HOME" ] && - JAVA_HOME=`cygpath --path --windows "$JAVA_HOME"` - [ -n "$CLASSPATH" ] && - CLASSPATH=`cygpath --path --windows "$CLASSPATH"` - [ -n "$MAVEN_PROJECTBASEDIR" ] && - MAVEN_PROJECTBASEDIR=`cygpath --path --windows "$MAVEN_PROJECTBASEDIR"` -fi - -# Provide a "standardized" way to retrieve the CLI args that will -# work with both Windows and non-Windows executions. -MAVEN_CMD_LINE_ARGS="$MAVEN_CONFIG $@" -export MAVEN_CMD_LINE_ARGS - -WRAPPER_LAUNCHER=org.apache.maven.wrapper.MavenWrapperMain +printf %s\\n "$distributionUrl" >"$TMP_DOWNLOAD_DIR/$distributionUrlNameMain/mvnw.url" +mv -- "$TMP_DOWNLOAD_DIR/$distributionUrlNameMain" "$MAVEN_HOME" || [ -d "$MAVEN_HOME" ] || die "fail to move MAVEN_HOME" -exec "$JAVACMD" \ - $MAVEN_OPTS \ - -classpath "$MAVEN_PROJECTBASEDIR/.mvn/wrapper/maven-wrapper.jar" \ - "-Dmaven.home=${M2_HOME}" "-Dmaven.multiModuleProjectDirectory=${MAVEN_PROJECTBASEDIR}" \ - ${WRAPPER_LAUNCHER} $MAVEN_CONFIG "$@" +clean || : +exec_maven "$@" diff --git a/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_spring_boot/mvnw.cmd b/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_spring_boot/mvnw.cmd index b26ab24f039e..b150b91ed500 100644 --- a/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_spring_boot/mvnw.cmd +++ b/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_spring_boot/mvnw.cmd @@ -1,182 +1,149 @@ -@REM ---------------------------------------------------------------------------- -@REM Licensed to the Apache Software Foundation (ASF) under one -@REM or more contributor license agreements. See the NOTICE file -@REM distributed with this work for additional information -@REM regarding copyright ownership. The ASF licenses this file -@REM to you under the Apache License, Version 2.0 (the -@REM "License"); you may not use this file except in compliance -@REM with the License. You may obtain a copy of the License at -@REM -@REM http://www.apache.org/licenses/LICENSE-2.0 -@REM -@REM Unless required by applicable law or agreed to in writing, -@REM software distributed under the License is distributed on an -@REM "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -@REM KIND, either express or implied. See the License for the -@REM specific language governing permissions and limitations -@REM under the License. -@REM ---------------------------------------------------------------------------- - -@REM ---------------------------------------------------------------------------- -@REM Maven2 Start Up Batch script -@REM -@REM Required ENV vars: -@REM JAVA_HOME - location of a JDK home dir -@REM -@REM Optional ENV vars -@REM M2_HOME - location of maven2's installed home dir -@REM MAVEN_BATCH_ECHO - set to 'on' to enable the echoing of the batch commands -@REM MAVEN_BATCH_PAUSE - set to 'on' to wait for a key stroke before ending -@REM MAVEN_OPTS - parameters passed to the Java VM when running Maven -@REM e.g. to debug Maven itself, use -@REM set MAVEN_OPTS=-Xdebug -Xrunjdwp:transport=dt_socket,server=y,suspend=y,address=8000 -@REM MAVEN_SKIP_RC - flag to disable loading of mavenrc files -@REM ---------------------------------------------------------------------------- - -@REM Begin all REM lines with '@' in case MAVEN_BATCH_ECHO is 'on' -@echo off -@REM set title of command window -title %0 -@REM enable echoing by setting MAVEN_BATCH_ECHO to 'on' -@if "%MAVEN_BATCH_ECHO%" == "on" echo %MAVEN_BATCH_ECHO% - -@REM set %HOME% to equivalent of $HOME -if "%HOME%" == "" (set "HOME=%HOMEDRIVE%%HOMEPATH%") - -@REM Execute a user defined script before this one -if not "%MAVEN_SKIP_RC%" == "" goto skipRcPre -@REM check for pre script, once with legacy .bat ending and once with .cmd ending -if exist "%HOME%\mavenrc_pre.bat" call "%HOME%\mavenrc_pre.bat" -if exist "%HOME%\mavenrc_pre.cmd" call "%HOME%\mavenrc_pre.cmd" -:skipRcPre - -@setlocal - -set ERROR_CODE=0 - -@REM To isolate internal variables from possible post scripts, we use another setlocal -@setlocal - -@REM ==== START VALIDATION ==== -if not "%JAVA_HOME%" == "" goto OkJHome - -echo. -echo Error: JAVA_HOME not found in your environment. >&2 -echo Please set the JAVA_HOME variable in your environment to match the >&2 -echo location of your Java installation. >&2 -echo. -goto error - -:OkJHome -if exist "%JAVA_HOME%\bin\java.exe" goto init - -echo. -echo Error: JAVA_HOME is set to an invalid directory. >&2 -echo JAVA_HOME = "%JAVA_HOME%" >&2 -echo Please set the JAVA_HOME variable in your environment to match the >&2 -echo location of your Java installation. >&2 -echo. -goto error - -@REM ==== END VALIDATION ==== - -:init - -@REM Find the project base dir, i.e. the directory that contains the folder ".mvn". -@REM Fallback to current working directory if not found. - -set MAVEN_PROJECTBASEDIR=%MAVEN_BASEDIR% -IF NOT "%MAVEN_PROJECTBASEDIR%"=="" goto endDetectBaseDir - -set EXEC_DIR=%CD% -set WDIR=%EXEC_DIR% -:findBaseDir -IF EXIST "%WDIR%"\.mvn goto baseDirFound -cd .. -IF "%WDIR%"=="%CD%" goto baseDirNotFound -set WDIR=%CD% -goto findBaseDir - -:baseDirFound -set MAVEN_PROJECTBASEDIR=%WDIR% -cd "%EXEC_DIR%" -goto endDetectBaseDir - -:baseDirNotFound -set MAVEN_PROJECTBASEDIR=%EXEC_DIR% -cd "%EXEC_DIR%" - -:endDetectBaseDir - -IF NOT EXIST "%MAVEN_PROJECTBASEDIR%\.mvn\jvm.config" goto endReadAdditionalConfig - -@setlocal EnableExtensions EnableDelayedExpansion -for /F "usebackq delims=" %%a in ("%MAVEN_PROJECTBASEDIR%\.mvn\jvm.config") do set JVM_CONFIG_MAVEN_PROPS=!JVM_CONFIG_MAVEN_PROPS! %%a -@endlocal & set JVM_CONFIG_MAVEN_PROPS=%JVM_CONFIG_MAVEN_PROPS% - -:endReadAdditionalConfig - -SET MAVEN_JAVA_EXE="%JAVA_HOME%\bin\java.exe" -set WRAPPER_JAR="%MAVEN_PROJECTBASEDIR%\.mvn\wrapper\maven-wrapper.jar" -set WRAPPER_LAUNCHER=org.apache.maven.wrapper.MavenWrapperMain - -set DOWNLOAD_URL="https://repo.maven.apache.org/maven2/io/takari/maven-wrapper/0.5.5/maven-wrapper-0.5.5.jar" - -FOR /F "tokens=1,2 delims==" %%A IN ("%MAVEN_PROJECTBASEDIR%\.mvn\wrapper\maven-wrapper.properties") DO ( - IF "%%A"=="wrapperUrl" SET DOWNLOAD_URL=%%B -) - -@REM Extension to allow automatically downloading the maven-wrapper.jar from Maven-central -@REM This allows using the maven wrapper in projects that prohibit checking in binary data. -if exist %WRAPPER_JAR% ( - if "%MVNW_VERBOSE%" == "true" ( - echo Found %WRAPPER_JAR% - ) -) else ( - if not "%MVNW_REPOURL%" == "" ( - SET DOWNLOAD_URL="%MVNW_REPOURL%/io/takari/maven-wrapper/0.5.5/maven-wrapper-0.5.5.jar" - ) - if "%MVNW_VERBOSE%" == "true" ( - echo Couldn't find %WRAPPER_JAR%, downloading it ... - echo Downloading from: %DOWNLOAD_URL% - ) - - powershell -Command "&{"^ - "$webclient = new-object System.Net.WebClient;"^ - "if (-not ([string]::IsNullOrEmpty('%MVNW_USERNAME%') -and [string]::IsNullOrEmpty('%MVNW_PASSWORD%'))) {"^ - "$webclient.Credentials = new-object System.Net.NetworkCredential('%MVNW_USERNAME%', '%MVNW_PASSWORD%');"^ - "}"^ - "[Net.ServicePointManager]::SecurityProtocol = [Net.SecurityProtocolType]::Tls12; $webclient.DownloadFile('%DOWNLOAD_URL%', '%WRAPPER_JAR%')"^ - "}" - if "%MVNW_VERBOSE%" == "true" ( - echo Finished downloading %WRAPPER_JAR% - ) -) -@REM End of extension - -@REM Provide a "standardized" way to retrieve the CLI args that will -@REM work with both Windows and non-Windows executions. -set MAVEN_CMD_LINE_ARGS=%* - -%MAVEN_JAVA_EXE% %JVM_CONFIG_MAVEN_PROPS% %MAVEN_OPTS% %MAVEN_DEBUG_OPTS% -classpath %WRAPPER_JAR% "-Dmaven.multiModuleProjectDirectory=%MAVEN_PROJECTBASEDIR%" %WRAPPER_LAUNCHER% %MAVEN_CONFIG% %* -if ERRORLEVEL 1 goto error -goto end - -:error -set ERROR_CODE=1 - -:end -@endlocal & set ERROR_CODE=%ERROR_CODE% - -if not "%MAVEN_SKIP_RC%" == "" goto skipRcPost -@REM check for post script, once with legacy .bat ending and once with .cmd ending -if exist "%HOME%\mavenrc_post.bat" call "%HOME%\mavenrc_post.bat" -if exist "%HOME%\mavenrc_post.cmd" call "%HOME%\mavenrc_post.cmd" -:skipRcPost - -@REM pause the script if MAVEN_BATCH_PAUSE is set to 'on' -if "%MAVEN_BATCH_PAUSE%" == "on" pause - -if "%MAVEN_TERMINATE_CMD%" == "on" exit %ERROR_CODE% - -exit /B %ERROR_CODE% +<# : batch portion +@REM ---------------------------------------------------------------------------- +@REM Licensed to the Apache Software Foundation (ASF) under one +@REM or more contributor license agreements. See the NOTICE file +@REM distributed with this work for additional information +@REM regarding copyright ownership. The ASF licenses this file +@REM to you under the Apache License, Version 2.0 (the +@REM "License"); you may not use this file except in compliance +@REM with the License. You may obtain a copy of the License at +@REM +@REM http://www.apache.org/licenses/LICENSE-2.0 +@REM +@REM Unless required by applicable law or agreed to in writing, +@REM software distributed under the License is distributed on an +@REM "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +@REM KIND, either express or implied. See the License for the +@REM specific language governing permissions and limitations +@REM under the License. +@REM ---------------------------------------------------------------------------- + +@REM ---------------------------------------------------------------------------- +@REM Apache Maven Wrapper startup batch script, version 3.3.2 +@REM +@REM Optional ENV vars +@REM MVNW_REPOURL - repo url base for downloading maven distribution +@REM MVNW_USERNAME/MVNW_PASSWORD - user and password for downloading maven +@REM MVNW_VERBOSE - true: enable verbose log; others: silence the output +@REM ---------------------------------------------------------------------------- + +@IF "%__MVNW_ARG0_NAME__%"=="" (SET __MVNW_ARG0_NAME__=%~nx0) +@SET __MVNW_CMD__= +@SET __MVNW_ERROR__= +@SET __MVNW_PSMODULEP_SAVE=%PSModulePath% +@SET PSModulePath= +@FOR /F "usebackq tokens=1* delims==" %%A IN (`powershell -noprofile "& {$scriptDir='%~dp0'; $script='%__MVNW_ARG0_NAME__%'; icm -ScriptBlock ([Scriptblock]::Create((Get-Content -Raw '%~f0'))) -NoNewScope}"`) DO @( + IF "%%A"=="MVN_CMD" (set __MVNW_CMD__=%%B) ELSE IF "%%B"=="" (echo %%A) ELSE (echo %%A=%%B) +) +@SET PSModulePath=%__MVNW_PSMODULEP_SAVE% +@SET __MVNW_PSMODULEP_SAVE= +@SET __MVNW_ARG0_NAME__= +@SET MVNW_USERNAME= +@SET MVNW_PASSWORD= +@IF NOT "%__MVNW_CMD__%"=="" (%__MVNW_CMD__% %*) +@echo Cannot start maven from wrapper >&2 && exit /b 1 +@GOTO :EOF +: end batch / begin powershell #> + +$ErrorActionPreference = "Stop" +if ($env:MVNW_VERBOSE -eq "true") { + $VerbosePreference = "Continue" +} + +# calculate distributionUrl, requires .mvn/wrapper/maven-wrapper.properties +$distributionUrl = (Get-Content -Raw "$scriptDir/.mvn/wrapper/maven-wrapper.properties" | ConvertFrom-StringData).distributionUrl +if (!$distributionUrl) { + Write-Error "cannot read distributionUrl property in $scriptDir/.mvn/wrapper/maven-wrapper.properties" +} + +switch -wildcard -casesensitive ( $($distributionUrl -replace '^.*/','') ) { + "maven-mvnd-*" { + $USE_MVND = $true + $distributionUrl = $distributionUrl -replace '-bin\.[^.]*$',"-windows-amd64.zip" + $MVN_CMD = "mvnd.cmd" + break + } + default { + $USE_MVND = $false + $MVN_CMD = $script -replace '^mvnw','mvn' + break + } +} + +# apply MVNW_REPOURL and calculate MAVEN_HOME +# maven home pattern: ~/.m2/wrapper/dists/{apache-maven-,maven-mvnd--}/ +if ($env:MVNW_REPOURL) { + $MVNW_REPO_PATTERN = if ($USE_MVND) { "/org/apache/maven/" } else { "/maven/mvnd/" } + $distributionUrl = "$env:MVNW_REPOURL$MVNW_REPO_PATTERN$($distributionUrl -replace '^.*'+$MVNW_REPO_PATTERN,'')" +} +$distributionUrlName = $distributionUrl -replace '^.*/','' +$distributionUrlNameMain = $distributionUrlName -replace '\.[^.]*$','' -replace '-bin$','' +$MAVEN_HOME_PARENT = "$HOME/.m2/wrapper/dists/$distributionUrlNameMain" +if ($env:MAVEN_USER_HOME) { + $MAVEN_HOME_PARENT = "$env:MAVEN_USER_HOME/wrapper/dists/$distributionUrlNameMain" +} +$MAVEN_HOME_NAME = ([System.Security.Cryptography.MD5]::Create().ComputeHash([byte[]][char[]]$distributionUrl) | ForEach-Object {$_.ToString("x2")}) -join '' +$MAVEN_HOME = "$MAVEN_HOME_PARENT/$MAVEN_HOME_NAME" + +if (Test-Path -Path "$MAVEN_HOME" -PathType Container) { + Write-Verbose "found existing MAVEN_HOME at $MAVEN_HOME" + Write-Output "MVN_CMD=$MAVEN_HOME/bin/$MVN_CMD" + exit $? +} + +if (! $distributionUrlNameMain -or ($distributionUrlName -eq $distributionUrlNameMain)) { + Write-Error "distributionUrl is not valid, must end with *-bin.zip, but found $distributionUrl" +} + +# prepare tmp dir +$TMP_DOWNLOAD_DIR_HOLDER = New-TemporaryFile +$TMP_DOWNLOAD_DIR = New-Item -Itemtype Directory -Path "$TMP_DOWNLOAD_DIR_HOLDER.dir" +$TMP_DOWNLOAD_DIR_HOLDER.Delete() | Out-Null +trap { + if ($TMP_DOWNLOAD_DIR.Exists) { + try { Remove-Item $TMP_DOWNLOAD_DIR -Recurse -Force | Out-Null } + catch { Write-Warning "Cannot remove $TMP_DOWNLOAD_DIR" } + } +} + +New-Item -Itemtype Directory -Path "$MAVEN_HOME_PARENT" -Force | Out-Null + +# Download and Install Apache Maven +Write-Verbose "Couldn't find MAVEN_HOME, downloading and installing it ..." +Write-Verbose "Downloading from: $distributionUrl" +Write-Verbose "Downloading to: $TMP_DOWNLOAD_DIR/$distributionUrlName" + +$webclient = New-Object System.Net.WebClient +if ($env:MVNW_USERNAME -and $env:MVNW_PASSWORD) { + $webclient.Credentials = New-Object System.Net.NetworkCredential($env:MVNW_USERNAME, $env:MVNW_PASSWORD) +} +[Net.ServicePointManager]::SecurityProtocol = [Net.SecurityProtocolType]::Tls12 +$webclient.DownloadFile($distributionUrl, "$TMP_DOWNLOAD_DIR/$distributionUrlName") | Out-Null + +# If specified, validate the SHA-256 sum of the Maven distribution zip file +$distributionSha256Sum = (Get-Content -Raw "$scriptDir/.mvn/wrapper/maven-wrapper.properties" | ConvertFrom-StringData).distributionSha256Sum +if ($distributionSha256Sum) { + if ($USE_MVND) { + Write-Error "Checksum validation is not supported for maven-mvnd. `nPlease disable validation by removing 'distributionSha256Sum' from your maven-wrapper.properties." + } + Import-Module $PSHOME\Modules\Microsoft.PowerShell.Utility -Function Get-FileHash + if ((Get-FileHash "$TMP_DOWNLOAD_DIR/$distributionUrlName" -Algorithm SHA256).Hash.ToLower() -ne $distributionSha256Sum) { + Write-Error "Error: Failed to validate Maven distribution SHA-256, your Maven distribution might be compromised. If you updated your Maven version, you need to update the specified distributionSha256Sum property." + } +} + +# unzip and move +Expand-Archive "$TMP_DOWNLOAD_DIR/$distributionUrlName" -DestinationPath "$TMP_DOWNLOAD_DIR" | Out-Null +Rename-Item -Path "$TMP_DOWNLOAD_DIR/$distributionUrlNameMain" -NewName $MAVEN_HOME_NAME | Out-Null +try { + Move-Item -Path "$TMP_DOWNLOAD_DIR/$MAVEN_HOME_NAME" -Destination $MAVEN_HOME_PARENT | Out-Null +} catch { + if (! (Test-Path -Path "$MAVEN_HOME" -PathType Container)) { + Write-Error "fail to move MAVEN_HOME" + } +} finally { + try { Remove-Item $TMP_DOWNLOAD_DIR -Recurse -Force | Out-Null } + catch { Write-Warning "Cannot remove $TMP_DOWNLOAD_DIR" } +} + +Write-Output "MVN_CMD=$MAVEN_HOME/bin/$MVN_CMD" diff --git a/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_spring_boot/pom.xml b/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_spring_boot/pom.xml index 6a86f28c14ab..fe4d21cdea9d 100644 --- a/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_spring_boot/pom.xml +++ b/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_spring_boot/pom.xml @@ -16,26 +16,26 @@ rabbitmq-core@groups.vmware.com Team RabbitMQ - VMware, Inc. or its affiliates. + Broadcom Inc. and/or its subsidiaries. https://rabbitmq.com - VMware, Inc. or its affiliates. + Broadcom Inc. and/or its subsidiaries. https://www.rabbitmq.com org.springframework.boot spring-boot-starter-parent - 3.1.3 + 3.3.2 17 17 - 5.10.0 + 5.11.0 com.rabbitmq.examples diff --git a/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_spring_boot/src/main/java/com/rabbitmq/examples/AuthBackendHttpController.java b/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_spring_boot/src/main/java/com/rabbitmq/examples/AuthBackendHttpController.java index 1949623e6346..1c8d14f79d1a 100644 --- a/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_spring_boot/src/main/java/com/rabbitmq/examples/AuthBackendHttpController.java +++ b/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_spring_boot/src/main/java/com/rabbitmq/examples/AuthBackendHttpController.java @@ -3,7 +3,7 @@ * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at https://mozilla.org/MPL/2.0/. * - * Copyright (c) 2017-2020 VMware, Inc. or its affiliates. All rights reserved. + * (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. */ package com.rabbitmq.examples; diff --git a/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_spring_boot/src/main/java/com/rabbitmq/examples/BaseCheck.java b/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_spring_boot/src/main/java/com/rabbitmq/examples/BaseCheck.java index 5cf56350783d..6c3610734a27 100644 --- a/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_spring_boot/src/main/java/com/rabbitmq/examples/BaseCheck.java +++ b/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_spring_boot/src/main/java/com/rabbitmq/examples/BaseCheck.java @@ -3,7 +3,7 @@ * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at https://mozilla.org/MPL/2.0/. * - * Copyright (c) 2017-2020 VMware, Inc. or its affiliates. All rights reserved. + * (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. */ package com.rabbitmq.examples; diff --git a/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_spring_boot/src/main/java/com/rabbitmq/examples/RabbitMqAuthBackendHttp.java b/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_spring_boot/src/main/java/com/rabbitmq/examples/RabbitMqAuthBackendHttp.java index 50d5c57e8aa9..071fa603f63c 100644 --- a/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_spring_boot/src/main/java/com/rabbitmq/examples/RabbitMqAuthBackendHttp.java +++ b/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_spring_boot/src/main/java/com/rabbitmq/examples/RabbitMqAuthBackendHttp.java @@ -3,7 +3,7 @@ * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at https://mozilla.org/MPL/2.0/. * - * Copyright (c) 2017-2020 VMware, Inc. or its affiliates. All rights reserved. + * (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. */ package com.rabbitmq.examples; diff --git a/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_spring_boot/src/main/java/com/rabbitmq/examples/ResourceCheck.java b/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_spring_boot/src/main/java/com/rabbitmq/examples/ResourceCheck.java index 3df3ef3ed2cc..7f05e87d60c0 100644 --- a/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_spring_boot/src/main/java/com/rabbitmq/examples/ResourceCheck.java +++ b/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_spring_boot/src/main/java/com/rabbitmq/examples/ResourceCheck.java @@ -3,7 +3,7 @@ * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at https://mozilla.org/MPL/2.0/. * - * Copyright (c) 2017-2020 VMware, Inc. or its affiliates. All rights reserved. + * (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. */ package com.rabbitmq.examples; diff --git a/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_spring_boot/src/main/java/com/rabbitmq/examples/TopicCheck.java b/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_spring_boot/src/main/java/com/rabbitmq/examples/TopicCheck.java index 9b378fe52b29..ae87d56d2edb 100644 --- a/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_spring_boot/src/main/java/com/rabbitmq/examples/TopicCheck.java +++ b/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_spring_boot/src/main/java/com/rabbitmq/examples/TopicCheck.java @@ -3,7 +3,7 @@ * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at https://mozilla.org/MPL/2.0/. * - * Copyright (c) 2017-2020 VMware, Inc. or its affiliates. All rights reserved. + * (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. */ package com.rabbitmq.examples; diff --git a/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_spring_boot/src/main/java/com/rabbitmq/examples/User.java b/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_spring_boot/src/main/java/com/rabbitmq/examples/User.java index 47017a0714aa..2a5810dc6607 100644 --- a/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_spring_boot/src/main/java/com/rabbitmq/examples/User.java +++ b/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_spring_boot/src/main/java/com/rabbitmq/examples/User.java @@ -3,7 +3,7 @@ * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at https://mozilla.org/MPL/2.0/. * - * Copyright (c) 2017-2020 VMware, Inc. or its affiliates. All rights reserved. + * (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. */ package com.rabbitmq.examples; diff --git a/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_spring_boot/src/main/java/com/rabbitmq/examples/VirtualHostCheck.java b/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_spring_boot/src/main/java/com/rabbitmq/examples/VirtualHostCheck.java index 8cb11032f470..d741e9e03263 100644 --- a/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_spring_boot/src/main/java/com/rabbitmq/examples/VirtualHostCheck.java +++ b/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_spring_boot/src/main/java/com/rabbitmq/examples/VirtualHostCheck.java @@ -3,7 +3,7 @@ * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at https://mozilla.org/MPL/2.0/. * - * Copyright (c) 2017-2020 VMware, Inc. or its affiliates. All rights reserved. + * (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. */ package com.rabbitmq.examples; diff --git a/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_spring_boot/src/test/java/com/rabbitmq/examples/AuthBackendHttpControllerTest.java b/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_spring_boot/src/test/java/com/rabbitmq/examples/AuthBackendHttpControllerTest.java index 180cd72faa65..5b3cb2daf2ba 100644 --- a/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_spring_boot/src/test/java/com/rabbitmq/examples/AuthBackendHttpControllerTest.java +++ b/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_spring_boot/src/test/java/com/rabbitmq/examples/AuthBackendHttpControllerTest.java @@ -2,7 +2,7 @@ * This Source Code Form is subject to the terms of the Mozilla Public License, v. 2.0. If a copy of * the MPL was not distributed with this file, You can obtain one at https://mozilla.org/MPL/2.0/. * - *

      Copyright (c) 2017-2023 VMware, Inc. or its affiliates. All rights reserved. + * (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. */ package com.rabbitmq.examples; diff --git a/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_spring_boot_kotlin/.mvn/wrapper/maven-wrapper.jar b/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_spring_boot_kotlin/.mvn/wrapper/maven-wrapper.jar deleted file mode 100644 index 01e67997377a..000000000000 Binary files a/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_spring_boot_kotlin/.mvn/wrapper/maven-wrapper.jar and /dev/null differ diff --git a/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_spring_boot_kotlin/.mvn/wrapper/maven-wrapper.properties b/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_spring_boot_kotlin/.mvn/wrapper/maven-wrapper.properties index 71793467167c..f95f1ee80715 100644 --- a/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_spring_boot_kotlin/.mvn/wrapper/maven-wrapper.properties +++ b/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_spring_boot_kotlin/.mvn/wrapper/maven-wrapper.properties @@ -1 +1,19 @@ -distributionUrl=https://repo.maven.apache.org/maven2/org/apache/maven/apache-maven/3.5.4/apache-maven-3.5.4-bin.zip +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +wrapperVersion=3.3.2 +distributionType=only-script +distributionUrl=https://repo.maven.apache.org/maven2/org/apache/maven/apache-maven/3.9.8/apache-maven-3.9.8-bin.zip diff --git a/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_spring_boot_kotlin/mvnw b/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_spring_boot_kotlin/mvnw index 8b9da3b8b600..19529ddf8c6e 100755 --- a/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_spring_boot_kotlin/mvnw +++ b/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_spring_boot_kotlin/mvnw @@ -8,7 +8,7 @@ # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # -# https://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an @@ -19,268 +19,241 @@ # ---------------------------------------------------------------------------- # ---------------------------------------------------------------------------- -# Maven2 Start Up Batch script -# -# Required ENV vars: -# ------------------ -# JAVA_HOME - location of a JDK home dir +# Apache Maven Wrapper startup batch script, version 3.3.2 # # Optional ENV vars # ----------------- -# M2_HOME - location of maven2's installed home dir -# MAVEN_OPTS - parameters passed to the Java VM when running Maven -# e.g. to debug Maven itself, use -# set MAVEN_OPTS=-Xdebug -Xrunjdwp:transport=dt_socket,server=y,suspend=y,address=8000 -# MAVEN_SKIP_RC - flag to disable loading of mavenrc files +# JAVA_HOME - location of a JDK home dir, required when download maven via java source +# MVNW_REPOURL - repo url base for downloading maven distribution +# MVNW_USERNAME/MVNW_PASSWORD - user and password for downloading maven +# MVNW_VERBOSE - true: enable verbose log; debug: trace the mvnw script; others: silence the output # ---------------------------------------------------------------------------- -if [ -z "$MAVEN_SKIP_RC" ] ; then - - if [ -f /etc/mavenrc ] ; then - . /etc/mavenrc - fi +set -euf +[ "${MVNW_VERBOSE-}" != debug ] || set -x - if [ -f "$HOME/.mavenrc" ] ; then - . "$HOME/.mavenrc" - fi +# OS specific support. +native_path() { printf %s\\n "$1"; } +case "$(uname)" in +CYGWIN* | MINGW*) + [ -z "${JAVA_HOME-}" ] || JAVA_HOME="$(cygpath --unix "$JAVA_HOME")" + native_path() { cygpath --path --windows "$1"; } + ;; +esac -fi +# set JAVACMD and JAVACCMD +set_java_home() { + # For Cygwin and MinGW, ensure paths are in Unix format before anything is touched + if [ -n "${JAVA_HOME-}" ]; then + if [ -x "$JAVA_HOME/jre/sh/java" ]; then + # IBM's JDK on AIX uses strange locations for the executables + JAVACMD="$JAVA_HOME/jre/sh/java" + JAVACCMD="$JAVA_HOME/jre/sh/javac" + else + JAVACMD="$JAVA_HOME/bin/java" + JAVACCMD="$JAVA_HOME/bin/javac" -# OS specific support. $var _must_ be set to either true or false. -cygwin=false; -darwin=false; -mingw=false -case "`uname`" in - CYGWIN*) cygwin=true ;; - MINGW*) mingw=true;; - Darwin*) darwin=true - # Use /usr/libexec/java_home if available, otherwise fall back to /Library/Java/Home - # See https://developer.apple.com/library/mac/qa/qa1170/_index.html - if [ -z "$JAVA_HOME" ]; then - if [ -x "/usr/libexec/java_home" ]; then - export JAVA_HOME="`/usr/libexec/java_home`" - else - export JAVA_HOME="/Library/Java/Home" + if [ ! -x "$JAVACMD" ] || [ ! -x "$JAVACCMD" ]; then + echo "The JAVA_HOME environment variable is not defined correctly, so mvnw cannot run." >&2 + echo "JAVA_HOME is set to \"$JAVA_HOME\", but \"\$JAVA_HOME/bin/java\" or \"\$JAVA_HOME/bin/javac\" does not exist." >&2 + return 1 fi fi - ;; -esac + else + JAVACMD="$( + 'set' +e + 'unset' -f command 2>/dev/null + 'command' -v java + )" || : + JAVACCMD="$( + 'set' +e + 'unset' -f command 2>/dev/null + 'command' -v javac + )" || : -if [ -z "$JAVA_HOME" ] ; then - if [ -r /etc/gentoo-release ] ; then - JAVA_HOME=`java-config --jre-home` + if [ ! -x "${JAVACMD-}" ] || [ ! -x "${JAVACCMD-}" ]; then + echo "The java/javac command does not exist in PATH nor is JAVA_HOME set, so mvnw cannot run." >&2 + return 1 + fi fi -fi - -if [ -z "$M2_HOME" ] ; then - ## resolve links - $0 may be a link to maven's home - PRG="$0" +} - # need this for relative symlinks - while [ -h "$PRG" ] ; do - ls=`ls -ld "$PRG"` - link=`expr "$ls" : '.*-> \(.*\)$'` - if expr "$link" : '/.*' > /dev/null; then - PRG="$link" - else - PRG="`dirname "$PRG"`/$link" - fi +# hash string like Java String::hashCode +hash_string() { + str="${1:-}" h=0 + while [ -n "$str" ]; do + char="${str%"${str#?}"}" + h=$(((h * 31 + $(LC_CTYPE=C printf %d "'$char")) % 4294967296)) + str="${str#?}" done + printf %x\\n $h +} - saveddir=`pwd` +verbose() { :; } +[ "${MVNW_VERBOSE-}" != true ] || verbose() { printf %s\\n "${1-}"; } - M2_HOME=`dirname "$PRG"`/.. +die() { + printf %s\\n "$1" >&2 + exit 1 +} - # make it fully qualified - M2_HOME=`cd "$M2_HOME" && pwd` +trim() { + # MWRAPPER-139: + # Trims trailing and leading whitespace, carriage returns, tabs, and linefeeds. + # Needed for removing poorly interpreted newline sequences when running in more + # exotic environments such as mingw bash on Windows. + printf "%s" "${1}" | tr -d '[:space:]' +} - cd "$saveddir" - # echo Using m2 at $M2_HOME -fi +# parse distributionUrl and optional distributionSha256Sum, requires .mvn/wrapper/maven-wrapper.properties +while IFS="=" read -r key value; do + case "${key-}" in + distributionUrl) distributionUrl=$(trim "${value-}") ;; + distributionSha256Sum) distributionSha256Sum=$(trim "${value-}") ;; + esac +done <"${0%/*}/.mvn/wrapper/maven-wrapper.properties" +[ -n "${distributionUrl-}" ] || die "cannot read distributionUrl property in ${0%/*}/.mvn/wrapper/maven-wrapper.properties" -# For Cygwin, ensure paths are in UNIX format before anything is touched -if $cygwin ; then - [ -n "$M2_HOME" ] && - M2_HOME=`cygpath --unix "$M2_HOME"` - [ -n "$JAVA_HOME" ] && - JAVA_HOME=`cygpath --unix "$JAVA_HOME"` - [ -n "$CLASSPATH" ] && - CLASSPATH=`cygpath --path --unix "$CLASSPATH"` -fi +case "${distributionUrl##*/}" in +maven-mvnd-*bin.*) + MVN_CMD=mvnd.sh _MVNW_REPO_PATTERN=/maven/mvnd/ + case "${PROCESSOR_ARCHITECTURE-}${PROCESSOR_ARCHITEW6432-}:$(uname -a)" in + *AMD64:CYGWIN* | *AMD64:MINGW*) distributionPlatform=windows-amd64 ;; + :Darwin*x86_64) distributionPlatform=darwin-amd64 ;; + :Darwin*arm64) distributionPlatform=darwin-aarch64 ;; + :Linux*x86_64*) distributionPlatform=linux-amd64 ;; + *) + echo "Cannot detect native platform for mvnd on $(uname)-$(uname -m), use pure java version" >&2 + distributionPlatform=linux-amd64 + ;; + esac + distributionUrl="${distributionUrl%-bin.*}-$distributionPlatform.zip" + ;; +maven-mvnd-*) MVN_CMD=mvnd.sh _MVNW_REPO_PATTERN=/maven/mvnd/ ;; +*) MVN_CMD="mvn${0##*/mvnw}" _MVNW_REPO_PATTERN=/org/apache/maven/ ;; +esac -# For Mingw, ensure paths are in UNIX format before anything is touched -if $mingw ; then - [ -n "$M2_HOME" ] && - M2_HOME="`(cd "$M2_HOME"; pwd)`" - [ -n "$JAVA_HOME" ] && - JAVA_HOME="`(cd "$JAVA_HOME"; pwd)`" - # TODO classpath? -fi +# apply MVNW_REPOURL and calculate MAVEN_HOME +# maven home pattern: ~/.m2/wrapper/dists/{apache-maven-,maven-mvnd--}/ +[ -z "${MVNW_REPOURL-}" ] || distributionUrl="$MVNW_REPOURL$_MVNW_REPO_PATTERN${distributionUrl#*"$_MVNW_REPO_PATTERN"}" +distributionUrlName="${distributionUrl##*/}" +distributionUrlNameMain="${distributionUrlName%.*}" +distributionUrlNameMain="${distributionUrlNameMain%-bin}" +MAVEN_USER_HOME="${MAVEN_USER_HOME:-${HOME}/.m2}" +MAVEN_HOME="${MAVEN_USER_HOME}/wrapper/dists/${distributionUrlNameMain-}/$(hash_string "$distributionUrl")" -if [ -z "$JAVA_HOME" ]; then - javaExecutable="`which javac`" - if [ -n "$javaExecutable" ] && ! [ "`expr \"$javaExecutable\" : '\([^ ]*\)'`" = "no" ]; then - # readlink(1) is not available as standard on Solaris 10. - readLink=`which readlink` - if [ ! `expr "$readLink" : '\([^ ]*\)'` = "no" ]; then - if $darwin ; then - javaHome="`dirname \"$javaExecutable\"`" - javaExecutable="`cd \"$javaHome\" && pwd -P`/javac" - else - javaExecutable="`readlink -f \"$javaExecutable\"`" - fi - javaHome="`dirname \"$javaExecutable\"`" - javaHome=`expr "$javaHome" : '\(.*\)/bin'` - JAVA_HOME="$javaHome" - export JAVA_HOME - fi - fi -fi +exec_maven() { + unset MVNW_VERBOSE MVNW_USERNAME MVNW_PASSWORD MVNW_REPOURL || : + exec "$MAVEN_HOME/bin/$MVN_CMD" "$@" || die "cannot exec $MAVEN_HOME/bin/$MVN_CMD" +} -if [ -z "$JAVACMD" ] ; then - if [ -n "$JAVA_HOME" ] ; then - if [ -x "$JAVA_HOME/jre/sh/java" ] ; then - # IBM's JDK on AIX uses strange locations for the executables - JAVACMD="$JAVA_HOME/jre/sh/java" - else - JAVACMD="$JAVA_HOME/bin/java" - fi - else - JAVACMD="`which java`" - fi +if [ -d "$MAVEN_HOME" ]; then + verbose "found existing MAVEN_HOME at $MAVEN_HOME" + exec_maven "$@" fi -if [ ! -x "$JAVACMD" ] ; then - echo "Error: JAVA_HOME is not defined correctly." >&2 - echo " We cannot execute $JAVACMD" >&2 - exit 1 -fi +case "${distributionUrl-}" in +*?-bin.zip | *?maven-mvnd-?*-?*.zip) ;; +*) die "distributionUrl is not valid, must match *-bin.zip or maven-mvnd-*.zip, but found '${distributionUrl-}'" ;; +esac -if [ -z "$JAVA_HOME" ] ; then - echo "Warning: JAVA_HOME environment variable is not set." +# prepare tmp dir +if TMP_DOWNLOAD_DIR="$(mktemp -d)" && [ -d "$TMP_DOWNLOAD_DIR" ]; then + clean() { rm -rf -- "$TMP_DOWNLOAD_DIR"; } + trap clean HUP INT TERM EXIT +else + die "cannot create temp dir" fi -CLASSWORLDS_LAUNCHER=org.codehaus.plexus.classworlds.launcher.Launcher +mkdir -p -- "${MAVEN_HOME%/*}" -# traverses directory structure from process work directory to filesystem root -# first directory with .mvn subdirectory is considered project base directory -find_maven_basedir() { +# Download and Install Apache Maven +verbose "Couldn't find MAVEN_HOME, downloading and installing it ..." +verbose "Downloading from: $distributionUrl" +verbose "Downloading to: $TMP_DOWNLOAD_DIR/$distributionUrlName" - if [ -z "$1" ] - then - echo "Path not specified to find_maven_basedir" - return 1 - fi +# select .zip or .tar.gz +if ! command -v unzip >/dev/null; then + distributionUrl="${distributionUrl%.zip}.tar.gz" + distributionUrlName="${distributionUrl##*/}" +fi - basedir="$1" - wdir="$1" - while [ "$wdir" != '/' ] ; do - if [ -d "$wdir"/.mvn ] ; then - basedir=$wdir - break - fi - # workaround for JBEAP-8937 (on Solaris 10/Sparc) - if [ -d "${wdir}" ]; then - wdir=`cd "$wdir/.."; pwd` - fi - # end of workaround - done - echo "${basedir}" -} +# verbose opt +__MVNW_QUIET_WGET=--quiet __MVNW_QUIET_CURL=--silent __MVNW_QUIET_UNZIP=-q __MVNW_QUIET_TAR='' +[ "${MVNW_VERBOSE-}" != true ] || __MVNW_QUIET_WGET='' __MVNW_QUIET_CURL='' __MVNW_QUIET_UNZIP='' __MVNW_QUIET_TAR=v -# concatenates all lines of a file -concat_lines() { - if [ -f "$1" ]; then - echo "$(tr -s '\n' ' ' < "$1")" - fi -} +# normalize http auth +case "${MVNW_PASSWORD:+has-password}" in +'') MVNW_USERNAME='' MVNW_PASSWORD='' ;; +has-password) [ -n "${MVNW_USERNAME-}" ] || MVNW_USERNAME='' MVNW_PASSWORD='' ;; +esac -BASE_DIR=`find_maven_basedir "$(pwd)"` -if [ -z "$BASE_DIR" ]; then - exit 1; +if [ -z "${MVNW_USERNAME-}" ] && command -v wget >/dev/null; then + verbose "Found wget ... using wget" + wget ${__MVNW_QUIET_WGET:+"$__MVNW_QUIET_WGET"} "$distributionUrl" -O "$TMP_DOWNLOAD_DIR/$distributionUrlName" || die "wget: Failed to fetch $distributionUrl" +elif [ -z "${MVNW_USERNAME-}" ] && command -v curl >/dev/null; then + verbose "Found curl ... using curl" + curl ${__MVNW_QUIET_CURL:+"$__MVNW_QUIET_CURL"} -f -L -o "$TMP_DOWNLOAD_DIR/$distributionUrlName" "$distributionUrl" || die "curl: Failed to fetch $distributionUrl" +elif set_java_home; then + verbose "Falling back to use Java to download" + javaSource="$TMP_DOWNLOAD_DIR/Downloader.java" + targetZip="$TMP_DOWNLOAD_DIR/$distributionUrlName" + cat >"$javaSource" <<-END + public class Downloader extends java.net.Authenticator + { + protected java.net.PasswordAuthentication getPasswordAuthentication() + { + return new java.net.PasswordAuthentication( System.getenv( "MVNW_USERNAME" ), System.getenv( "MVNW_PASSWORD" ).toCharArray() ); + } + public static void main( String[] args ) throws Exception + { + setDefault( new Downloader() ); + java.nio.file.Files.copy( java.net.URI.create( args[0] ).toURL().openStream(), java.nio.file.Paths.get( args[1] ).toAbsolutePath().normalize() ); + } + } + END + # For Cygwin/MinGW, switch paths to Windows format before running javac and java + verbose " - Compiling Downloader.java ..." + "$(native_path "$JAVACCMD")" "$(native_path "$javaSource")" || die "Failed to compile Downloader.java" + verbose " - Running Downloader.java ..." + "$(native_path "$JAVACMD")" -cp "$(native_path "$TMP_DOWNLOAD_DIR")" Downloader "$distributionUrl" "$(native_path "$targetZip")" fi -########################################################################################## -# Extension to allow automatically downloading the maven-wrapper.jar from Maven-central -# This allows using the maven wrapper in projects that prohibit checking in binary data. -########################################################################################## -if [ -r "$BASE_DIR/.mvn/wrapper/maven-wrapper.jar" ]; then - if [ "$MVNW_VERBOSE" = true ]; then - echo "Found .mvn/wrapper/maven-wrapper.jar" +# If specified, validate the SHA-256 sum of the Maven distribution zip file +if [ -n "${distributionSha256Sum-}" ]; then + distributionSha256Result=false + if [ "$MVN_CMD" = mvnd.sh ]; then + echo "Checksum validation is not supported for maven-mvnd." >&2 + echo "Please disable validation by removing 'distributionSha256Sum' from your maven-wrapper.properties." >&2 + exit 1 + elif command -v sha256sum >/dev/null; then + if echo "$distributionSha256Sum $TMP_DOWNLOAD_DIR/$distributionUrlName" | sha256sum -c >/dev/null 2>&1; then + distributionSha256Result=true fi -else - if [ "$MVNW_VERBOSE" = true ]; then - echo "Couldn't find .mvn/wrapper/maven-wrapper.jar, downloading it ..." - fi - jarUrl="https://repo.maven.apache.org/maven2/io/takari/maven-wrapper/0.4.2/maven-wrapper-0.4.2.jar" - while IFS="=" read key value; do - case "$key" in (wrapperUrl) jarUrl="$value"; break ;; - esac - done < "$BASE_DIR/.mvn/wrapper/maven-wrapper.properties" - if [ "$MVNW_VERBOSE" = true ]; then - echo "Downloading from: $jarUrl" - fi - wrapperJarPath="$BASE_DIR/.mvn/wrapper/maven-wrapper.jar" - - if command -v wget > /dev/null; then - if [ "$MVNW_VERBOSE" = true ]; then - echo "Found wget ... using wget" - fi - wget "$jarUrl" -O "$wrapperJarPath" - elif command -v curl > /dev/null; then - if [ "$MVNW_VERBOSE" = true ]; then - echo "Found curl ... using curl" - fi - curl -o "$wrapperJarPath" "$jarUrl" - else - if [ "$MVNW_VERBOSE" = true ]; then - echo "Falling back to using Java to download" - fi - javaClass="$BASE_DIR/.mvn/wrapper/MavenWrapperDownloader.java" - if [ -e "$javaClass" ]; then - if [ ! -e "$BASE_DIR/.mvn/wrapper/MavenWrapperDownloader.class" ]; then - if [ "$MVNW_VERBOSE" = true ]; then - echo " - Compiling MavenWrapperDownloader.java ..." - fi - # Compiling the Java class - ("$JAVA_HOME/bin/javac" "$javaClass") - fi - if [ -e "$BASE_DIR/.mvn/wrapper/MavenWrapperDownloader.class" ]; then - # Running the downloader - if [ "$MVNW_VERBOSE" = true ]; then - echo " - Running MavenWrapperDownloader.java ..." - fi - ("$JAVA_HOME/bin/java" -cp .mvn/wrapper MavenWrapperDownloader "$MAVEN_PROJECTBASEDIR") - fi - fi + elif command -v shasum >/dev/null; then + if echo "$distributionSha256Sum $TMP_DOWNLOAD_DIR/$distributionUrlName" | shasum -a 256 -c >/dev/null 2>&1; then + distributionSha256Result=true fi + else + echo "Checksum validation was requested but neither 'sha256sum' or 'shasum' are available." >&2 + echo "Please install either command, or disable validation by removing 'distributionSha256Sum' from your maven-wrapper.properties." >&2 + exit 1 + fi + if [ $distributionSha256Result = false ]; then + echo "Error: Failed to validate Maven distribution SHA-256, your Maven distribution might be compromised." >&2 + echo "If you updated your Maven version, you need to update the specified distributionSha256Sum property." >&2 + exit 1 + fi fi -########################################################################################## -# End of extension -########################################################################################## - -export MAVEN_PROJECTBASEDIR=${MAVEN_BASEDIR:-"$BASE_DIR"} -if [ "$MVNW_VERBOSE" = true ]; then - echo $MAVEN_PROJECTBASEDIR -fi -MAVEN_OPTS="$(concat_lines "$MAVEN_PROJECTBASEDIR/.mvn/jvm.config") $MAVEN_OPTS" -# For Cygwin, switch paths to Windows format before running java -if $cygwin; then - [ -n "$M2_HOME" ] && - M2_HOME=`cygpath --path --windows "$M2_HOME"` - [ -n "$JAVA_HOME" ] && - JAVA_HOME=`cygpath --path --windows "$JAVA_HOME"` - [ -n "$CLASSPATH" ] && - CLASSPATH=`cygpath --path --windows "$CLASSPATH"` - [ -n "$MAVEN_PROJECTBASEDIR" ] && - MAVEN_PROJECTBASEDIR=`cygpath --path --windows "$MAVEN_PROJECTBASEDIR"` +# unzip and move +if command -v unzip >/dev/null; then + unzip ${__MVNW_QUIET_UNZIP:+"$__MVNW_QUIET_UNZIP"} "$TMP_DOWNLOAD_DIR/$distributionUrlName" -d "$TMP_DOWNLOAD_DIR" || die "failed to unzip" +else + tar xzf${__MVNW_QUIET_TAR:+"$__MVNW_QUIET_TAR"} "$TMP_DOWNLOAD_DIR/$distributionUrlName" -C "$TMP_DOWNLOAD_DIR" || die "failed to untar" fi +printf %s\\n "$distributionUrl" >"$TMP_DOWNLOAD_DIR/$distributionUrlNameMain/mvnw.url" +mv -- "$TMP_DOWNLOAD_DIR/$distributionUrlNameMain" "$MAVEN_HOME" || [ -d "$MAVEN_HOME" ] || die "fail to move MAVEN_HOME" -WRAPPER_LAUNCHER=org.apache.maven.wrapper.MavenWrapperMain - -exec "$JAVACMD" \ - $MAVEN_OPTS \ - -classpath "$MAVEN_PROJECTBASEDIR/.mvn/wrapper/maven-wrapper.jar" \ - "-Dmaven.home=${M2_HOME}" "-Dmaven.multiModuleProjectDirectory=${MAVEN_PROJECTBASEDIR}" \ - ${WRAPPER_LAUNCHER} $MAVEN_CONFIG "$@" +clean || : +exec_maven "$@" diff --git a/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_spring_boot_kotlin/mvnw.cmd b/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_spring_boot_kotlin/mvnw.cmd index fef5a8f7f988..b150b91ed500 100644 --- a/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_spring_boot_kotlin/mvnw.cmd +++ b/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_spring_boot_kotlin/mvnw.cmd @@ -1,161 +1,149 @@ -@REM ---------------------------------------------------------------------------- -@REM Licensed to the Apache Software Foundation (ASF) under one -@REM or more contributor license agreements. See the NOTICE file -@REM distributed with this work for additional information -@REM regarding copyright ownership. The ASF licenses this file -@REM to you under the Apache License, Version 2.0 (the -@REM "License"); you may not use this file except in compliance -@REM with the License. You may obtain a copy of the License at -@REM -@REM https://www.apache.org/licenses/LICENSE-2.0 -@REM -@REM Unless required by applicable law or agreed to in writing, -@REM software distributed under the License is distributed on an -@REM "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -@REM KIND, either express or implied. See the License for the -@REM specific language governing permissions and limitations -@REM under the License. -@REM ---------------------------------------------------------------------------- - -@REM ---------------------------------------------------------------------------- -@REM Maven2 Start Up Batch script -@REM -@REM Required ENV vars: -@REM JAVA_HOME - location of a JDK home dir -@REM -@REM Optional ENV vars -@REM M2_HOME - location of maven2's installed home dir -@REM MAVEN_BATCH_ECHO - set to 'on' to enable the echoing of the batch commands -@REM MAVEN_BATCH_PAUSE - set to 'on' to wait for a key stroke before ending -@REM MAVEN_OPTS - parameters passed to the Java VM when running Maven -@REM e.g. to debug Maven itself, use -@REM set MAVEN_OPTS=-Xdebug -Xrunjdwp:transport=dt_socket,server=y,suspend=y,address=8000 -@REM MAVEN_SKIP_RC - flag to disable loading of mavenrc files -@REM ---------------------------------------------------------------------------- - -@REM Begin all REM lines with '@' in case MAVEN_BATCH_ECHO is 'on' -@echo off -@REM set title of command window -title %0 -@REM enable echoing my setting MAVEN_BATCH_ECHO to 'on' -@if "%MAVEN_BATCH_ECHO%" == "on" echo %MAVEN_BATCH_ECHO% - -@REM set %HOME% to equivalent of $HOME -if "%HOME%" == "" (set "HOME=%HOMEDRIVE%%HOMEPATH%") - -@REM Execute a user defined script before this one -if not "%MAVEN_SKIP_RC%" == "" goto skipRcPre -@REM check for pre script, once with legacy .bat ending and once with .cmd ending -if exist "%HOME%\mavenrc_pre.bat" call "%HOME%\mavenrc_pre.bat" -if exist "%HOME%\mavenrc_pre.cmd" call "%HOME%\mavenrc_pre.cmd" -:skipRcPre - -@setlocal - -set ERROR_CODE=0 - -@REM To isolate internal variables from possible post scripts, we use another setlocal -@setlocal - -@REM ==== START VALIDATION ==== -if not "%JAVA_HOME%" == "" goto OkJHome - -echo. -echo Error: JAVA_HOME not found in your environment. >&2 -echo Please set the JAVA_HOME variable in your environment to match the >&2 -echo location of your Java installation. >&2 -echo. -goto error - -:OkJHome -if exist "%JAVA_HOME%\bin\java.exe" goto init - -echo. -echo Error: JAVA_HOME is set to an invalid directory. >&2 -echo JAVA_HOME = "%JAVA_HOME%" >&2 -echo Please set the JAVA_HOME variable in your environment to match the >&2 -echo location of your Java installation. >&2 -echo. -goto error - -@REM ==== END VALIDATION ==== - -:init - -@REM Find the project base dir, i.e. the directory that contains the folder ".mvn". -@REM Fallback to current working directory if not found. - -set MAVEN_PROJECTBASEDIR=%MAVEN_BASEDIR% -IF NOT "%MAVEN_PROJECTBASEDIR%"=="" goto endDetectBaseDir - -set EXEC_DIR=%CD% -set WDIR=%EXEC_DIR% -:findBaseDir -IF EXIST "%WDIR%"\.mvn goto baseDirFound -cd .. -IF "%WDIR%"=="%CD%" goto baseDirNotFound -set WDIR=%CD% -goto findBaseDir - -:baseDirFound -set MAVEN_PROJECTBASEDIR=%WDIR% -cd "%EXEC_DIR%" -goto endDetectBaseDir - -:baseDirNotFound -set MAVEN_PROJECTBASEDIR=%EXEC_DIR% -cd "%EXEC_DIR%" - -:endDetectBaseDir - -IF NOT EXIST "%MAVEN_PROJECTBASEDIR%\.mvn\jvm.config" goto endReadAdditionalConfig - -@setlocal EnableExtensions EnableDelayedExpansion -for /F "usebackq delims=" %%a in ("%MAVEN_PROJECTBASEDIR%\.mvn\jvm.config") do set JVM_CONFIG_MAVEN_PROPS=!JVM_CONFIG_MAVEN_PROPS! %%a -@endlocal & set JVM_CONFIG_MAVEN_PROPS=%JVM_CONFIG_MAVEN_PROPS% - -:endReadAdditionalConfig - -SET MAVEN_JAVA_EXE="%JAVA_HOME%\bin\java.exe" -set WRAPPER_JAR="%MAVEN_PROJECTBASEDIR%\.mvn\wrapper\maven-wrapper.jar" -set WRAPPER_LAUNCHER=org.apache.maven.wrapper.MavenWrapperMain - -set DOWNLOAD_URL="https://repo.maven.apache.org/maven2/io/takari/maven-wrapper/0.4.2/maven-wrapper-0.4.2.jar" -FOR /F "tokens=1,2 delims==" %%A IN (%MAVEN_PROJECTBASEDIR%\.mvn\wrapper\maven-wrapper.properties) DO ( - IF "%%A"=="wrapperUrl" SET DOWNLOAD_URL=%%B -) - -@REM Extension to allow automatically downloading the maven-wrapper.jar from Maven-central -@REM This allows using the maven wrapper in projects that prohibit checking in binary data. -if exist %WRAPPER_JAR% ( - echo Found %WRAPPER_JAR% -) else ( - echo Couldn't find %WRAPPER_JAR%, downloading it ... - echo Downloading from: %DOWNLOAD_URL% - powershell -Command "(New-Object Net.WebClient).DownloadFile('%DOWNLOAD_URL%', '%WRAPPER_JAR%')" - echo Finished downloading %WRAPPER_JAR% -) -@REM End of extension - -%MAVEN_JAVA_EXE% %JVM_CONFIG_MAVEN_PROPS% %MAVEN_OPTS% %MAVEN_DEBUG_OPTS% -classpath %WRAPPER_JAR% "-Dmaven.multiModuleProjectDirectory=%MAVEN_PROJECTBASEDIR%" %WRAPPER_LAUNCHER% %MAVEN_CONFIG% %* -if ERRORLEVEL 1 goto error -goto end - -:error -set ERROR_CODE=1 - -:end -@endlocal & set ERROR_CODE=%ERROR_CODE% - -if not "%MAVEN_SKIP_RC%" == "" goto skipRcPost -@REM check for post script, once with legacy .bat ending and once with .cmd ending -if exist "%HOME%\mavenrc_post.bat" call "%HOME%\mavenrc_post.bat" -if exist "%HOME%\mavenrc_post.cmd" call "%HOME%\mavenrc_post.cmd" -:skipRcPost - -@REM pause the script if MAVEN_BATCH_PAUSE is set to 'on' -if "%MAVEN_BATCH_PAUSE%" == "on" pause - -if "%MAVEN_TERMINATE_CMD%" == "on" exit %ERROR_CODE% - -exit /B %ERROR_CODE% +<# : batch portion +@REM ---------------------------------------------------------------------------- +@REM Licensed to the Apache Software Foundation (ASF) under one +@REM or more contributor license agreements. See the NOTICE file +@REM distributed with this work for additional information +@REM regarding copyright ownership. The ASF licenses this file +@REM to you under the Apache License, Version 2.0 (the +@REM "License"); you may not use this file except in compliance +@REM with the License. You may obtain a copy of the License at +@REM +@REM http://www.apache.org/licenses/LICENSE-2.0 +@REM +@REM Unless required by applicable law or agreed to in writing, +@REM software distributed under the License is distributed on an +@REM "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +@REM KIND, either express or implied. See the License for the +@REM specific language governing permissions and limitations +@REM under the License. +@REM ---------------------------------------------------------------------------- + +@REM ---------------------------------------------------------------------------- +@REM Apache Maven Wrapper startup batch script, version 3.3.2 +@REM +@REM Optional ENV vars +@REM MVNW_REPOURL - repo url base for downloading maven distribution +@REM MVNW_USERNAME/MVNW_PASSWORD - user and password for downloading maven +@REM MVNW_VERBOSE - true: enable verbose log; others: silence the output +@REM ---------------------------------------------------------------------------- + +@IF "%__MVNW_ARG0_NAME__%"=="" (SET __MVNW_ARG0_NAME__=%~nx0) +@SET __MVNW_CMD__= +@SET __MVNW_ERROR__= +@SET __MVNW_PSMODULEP_SAVE=%PSModulePath% +@SET PSModulePath= +@FOR /F "usebackq tokens=1* delims==" %%A IN (`powershell -noprofile "& {$scriptDir='%~dp0'; $script='%__MVNW_ARG0_NAME__%'; icm -ScriptBlock ([Scriptblock]::Create((Get-Content -Raw '%~f0'))) -NoNewScope}"`) DO @( + IF "%%A"=="MVN_CMD" (set __MVNW_CMD__=%%B) ELSE IF "%%B"=="" (echo %%A) ELSE (echo %%A=%%B) +) +@SET PSModulePath=%__MVNW_PSMODULEP_SAVE% +@SET __MVNW_PSMODULEP_SAVE= +@SET __MVNW_ARG0_NAME__= +@SET MVNW_USERNAME= +@SET MVNW_PASSWORD= +@IF NOT "%__MVNW_CMD__%"=="" (%__MVNW_CMD__% %*) +@echo Cannot start maven from wrapper >&2 && exit /b 1 +@GOTO :EOF +: end batch / begin powershell #> + +$ErrorActionPreference = "Stop" +if ($env:MVNW_VERBOSE -eq "true") { + $VerbosePreference = "Continue" +} + +# calculate distributionUrl, requires .mvn/wrapper/maven-wrapper.properties +$distributionUrl = (Get-Content -Raw "$scriptDir/.mvn/wrapper/maven-wrapper.properties" | ConvertFrom-StringData).distributionUrl +if (!$distributionUrl) { + Write-Error "cannot read distributionUrl property in $scriptDir/.mvn/wrapper/maven-wrapper.properties" +} + +switch -wildcard -casesensitive ( $($distributionUrl -replace '^.*/','') ) { + "maven-mvnd-*" { + $USE_MVND = $true + $distributionUrl = $distributionUrl -replace '-bin\.[^.]*$',"-windows-amd64.zip" + $MVN_CMD = "mvnd.cmd" + break + } + default { + $USE_MVND = $false + $MVN_CMD = $script -replace '^mvnw','mvn' + break + } +} + +# apply MVNW_REPOURL and calculate MAVEN_HOME +# maven home pattern: ~/.m2/wrapper/dists/{apache-maven-,maven-mvnd--}/ +if ($env:MVNW_REPOURL) { + $MVNW_REPO_PATTERN = if ($USE_MVND) { "/org/apache/maven/" } else { "/maven/mvnd/" } + $distributionUrl = "$env:MVNW_REPOURL$MVNW_REPO_PATTERN$($distributionUrl -replace '^.*'+$MVNW_REPO_PATTERN,'')" +} +$distributionUrlName = $distributionUrl -replace '^.*/','' +$distributionUrlNameMain = $distributionUrlName -replace '\.[^.]*$','' -replace '-bin$','' +$MAVEN_HOME_PARENT = "$HOME/.m2/wrapper/dists/$distributionUrlNameMain" +if ($env:MAVEN_USER_HOME) { + $MAVEN_HOME_PARENT = "$env:MAVEN_USER_HOME/wrapper/dists/$distributionUrlNameMain" +} +$MAVEN_HOME_NAME = ([System.Security.Cryptography.MD5]::Create().ComputeHash([byte[]][char[]]$distributionUrl) | ForEach-Object {$_.ToString("x2")}) -join '' +$MAVEN_HOME = "$MAVEN_HOME_PARENT/$MAVEN_HOME_NAME" + +if (Test-Path -Path "$MAVEN_HOME" -PathType Container) { + Write-Verbose "found existing MAVEN_HOME at $MAVEN_HOME" + Write-Output "MVN_CMD=$MAVEN_HOME/bin/$MVN_CMD" + exit $? +} + +if (! $distributionUrlNameMain -or ($distributionUrlName -eq $distributionUrlNameMain)) { + Write-Error "distributionUrl is not valid, must end with *-bin.zip, but found $distributionUrl" +} + +# prepare tmp dir +$TMP_DOWNLOAD_DIR_HOLDER = New-TemporaryFile +$TMP_DOWNLOAD_DIR = New-Item -Itemtype Directory -Path "$TMP_DOWNLOAD_DIR_HOLDER.dir" +$TMP_DOWNLOAD_DIR_HOLDER.Delete() | Out-Null +trap { + if ($TMP_DOWNLOAD_DIR.Exists) { + try { Remove-Item $TMP_DOWNLOAD_DIR -Recurse -Force | Out-Null } + catch { Write-Warning "Cannot remove $TMP_DOWNLOAD_DIR" } + } +} + +New-Item -Itemtype Directory -Path "$MAVEN_HOME_PARENT" -Force | Out-Null + +# Download and Install Apache Maven +Write-Verbose "Couldn't find MAVEN_HOME, downloading and installing it ..." +Write-Verbose "Downloading from: $distributionUrl" +Write-Verbose "Downloading to: $TMP_DOWNLOAD_DIR/$distributionUrlName" + +$webclient = New-Object System.Net.WebClient +if ($env:MVNW_USERNAME -and $env:MVNW_PASSWORD) { + $webclient.Credentials = New-Object System.Net.NetworkCredential($env:MVNW_USERNAME, $env:MVNW_PASSWORD) +} +[Net.ServicePointManager]::SecurityProtocol = [Net.SecurityProtocolType]::Tls12 +$webclient.DownloadFile($distributionUrl, "$TMP_DOWNLOAD_DIR/$distributionUrlName") | Out-Null + +# If specified, validate the SHA-256 sum of the Maven distribution zip file +$distributionSha256Sum = (Get-Content -Raw "$scriptDir/.mvn/wrapper/maven-wrapper.properties" | ConvertFrom-StringData).distributionSha256Sum +if ($distributionSha256Sum) { + if ($USE_MVND) { + Write-Error "Checksum validation is not supported for maven-mvnd. `nPlease disable validation by removing 'distributionSha256Sum' from your maven-wrapper.properties." + } + Import-Module $PSHOME\Modules\Microsoft.PowerShell.Utility -Function Get-FileHash + if ((Get-FileHash "$TMP_DOWNLOAD_DIR/$distributionUrlName" -Algorithm SHA256).Hash.ToLower() -ne $distributionSha256Sum) { + Write-Error "Error: Failed to validate Maven distribution SHA-256, your Maven distribution might be compromised. If you updated your Maven version, you need to update the specified distributionSha256Sum property." + } +} + +# unzip and move +Expand-Archive "$TMP_DOWNLOAD_DIR/$distributionUrlName" -DestinationPath "$TMP_DOWNLOAD_DIR" | Out-Null +Rename-Item -Path "$TMP_DOWNLOAD_DIR/$distributionUrlNameMain" -NewName $MAVEN_HOME_NAME | Out-Null +try { + Move-Item -Path "$TMP_DOWNLOAD_DIR/$MAVEN_HOME_NAME" -Destination $MAVEN_HOME_PARENT | Out-Null +} catch { + if (! (Test-Path -Path "$MAVEN_HOME" -PathType Container)) { + Write-Error "fail to move MAVEN_HOME" + } +} finally { + try { Remove-Item $TMP_DOWNLOAD_DIR -Recurse -Force | Out-Null } + catch { Write-Warning "Cannot remove $TMP_DOWNLOAD_DIR" } +} + +Write-Output "MVN_CMD=$MAVEN_HOME/bin/$MVN_CMD" diff --git a/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_spring_boot_kotlin/pom.xml b/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_spring_boot_kotlin/pom.xml index f9809542bdcd..c009697ebd91 100644 --- a/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_spring_boot_kotlin/pom.xml +++ b/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_spring_boot_kotlin/pom.xml @@ -14,7 +14,7 @@ org.springframework.boot spring-boot-starter-parent - 3.1.3 + 3.3.2 @@ -23,7 +23,7 @@ UTF-8 17 17 - 1.9.10 + 2.0.10 5.10.0 diff --git a/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_spring_boot_kotlin/src/main/kotlin/com/rabbitmq/examples/AuthController.kt b/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_spring_boot_kotlin/src/main/kotlin/com/rabbitmq/examples/AuthController.kt index 5281fa06a076..50ca36f49e9d 100644 --- a/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_spring_boot_kotlin/src/main/kotlin/com/rabbitmq/examples/AuthController.kt +++ b/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_spring_boot_kotlin/src/main/kotlin/com/rabbitmq/examples/AuthController.kt @@ -3,7 +3,7 @@ * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at https://mozilla.org/MPL/2.0/. * - * Copyright (c) 2018-2020 VMware, Inc. or its affiliates. All rights reserved. + * (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. */ package com.rabbitmq.examples; diff --git a/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_spring_boot_kotlin/src/main/kotlin/com/rabbitmq/examples/RabbitmqAuthBackendApplication.kt b/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_spring_boot_kotlin/src/main/kotlin/com/rabbitmq/examples/RabbitmqAuthBackendApplication.kt index fd0a8d8d656c..dedc6bdbec2b 100644 --- a/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_spring_boot_kotlin/src/main/kotlin/com/rabbitmq/examples/RabbitmqAuthBackendApplication.kt +++ b/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_spring_boot_kotlin/src/main/kotlin/com/rabbitmq/examples/RabbitmqAuthBackendApplication.kt @@ -3,7 +3,7 @@ * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at https://mozilla.org/MPL/2.0/. * - * Copyright (c) 2018-2020 VMware, Inc. or its affiliates. All rights reserved. + * (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. */ package com.rabbitmq.examples diff --git a/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_spring_boot_kotlin/src/test/kotlin/com/rabbitmq/examples/AuthApiTest.kt b/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_spring_boot_kotlin/src/test/kotlin/com/rabbitmq/examples/AuthApiTest.kt index be899d334ca0..5dffbed7c00d 100644 --- a/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_spring_boot_kotlin/src/test/kotlin/com/rabbitmq/examples/AuthApiTest.kt +++ b/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_spring_boot_kotlin/src/test/kotlin/com/rabbitmq/examples/AuthApiTest.kt @@ -3,7 +3,7 @@ * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at https://mozilla.org/MPL/2.0/. * - * Copyright (c) 2018-2020 VMware, Inc. or its affiliates. All rights reserved. + * (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. */ package com.rabbitmq.examples diff --git a/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_webapi_dotnetcore/RabbitMqAuthBackendHttp.csproj b/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_webapi_dotnetcore/RabbitMqAuthBackendHttp.csproj index 8f16e20c5afd..1efacc6e441f 100644 --- a/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_webapi_dotnetcore/RabbitMqAuthBackendHttp.csproj +++ b/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_webapi_dotnetcore/RabbitMqAuthBackendHttp.csproj @@ -1,12 +1,10 @@ - - net7.0 + net8.0 enable + enable - - + - - + \ No newline at end of file diff --git a/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_webapi_dotnetcore/Requests/ResourceAuthRequest.cs b/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_webapi_dotnetcore/Requests/ResourceAuthRequest.cs index 1b9bc9658c9d..d5e3b9244267 100644 --- a/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_webapi_dotnetcore/Requests/ResourceAuthRequest.cs +++ b/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_webapi_dotnetcore/Requests/ResourceAuthRequest.cs @@ -2,13 +2,13 @@ { public class ResourceAuthRequest { - public string UserName { get; set; } + public required string UserName { get; set; } - public string Vhost { get; set; } + public required string Vhost { get; set; } public Resource Resource { get; set; } - public string Name { get; set; } + public required string Name { get; set; } public ResourcePermission Permission { get; set; } } diff --git a/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_webapi_dotnetcore/Requests/TopicAuthRequest.cs b/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_webapi_dotnetcore/Requests/TopicAuthRequest.cs index fbc9440a674e..f3789232b8e2 100644 --- a/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_webapi_dotnetcore/Requests/TopicAuthRequest.cs +++ b/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_webapi_dotnetcore/Requests/TopicAuthRequest.cs @@ -4,18 +4,18 @@ namespace RabbitMqAuthBackendHttp.Requests { public class TopicAuthRequest { - public string UserName { get; set; } + public required string UserName { get; set; } - public string Vhost { get; set; } + public required string Vhost { get; set; } - public string Name { get; set; } + public required string Name { get; set; } public Resource Resource { get; set; } public TopicPermission Permission { get; set; } [ModelBinder(Name = "routing_key")] - public string RoutingKey { get; set; } + public required string RoutingKey { get; set; } } public enum TopicPermission diff --git a/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_webapi_dotnetcore/Requests/UserAuthRequest.cs b/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_webapi_dotnetcore/Requests/UserAuthRequest.cs index e9b7282cee6d..577fc111fefa 100644 --- a/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_webapi_dotnetcore/Requests/UserAuthRequest.cs +++ b/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_webapi_dotnetcore/Requests/UserAuthRequest.cs @@ -2,8 +2,8 @@ { public class UserAuthRequest { - public string UserName { get; set; } + public required string UserName { get; set; } - public string Password { get; set; } + public required string Password { get; set; } } } diff --git a/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_webapi_dotnetcore/Requests/VhostAuthRequest.cs b/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_webapi_dotnetcore/Requests/VhostAuthRequest.cs index 34f591da1bd6..ff90d12fbb20 100644 --- a/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_webapi_dotnetcore/Requests/VhostAuthRequest.cs +++ b/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_webapi_dotnetcore/Requests/VhostAuthRequest.cs @@ -2,10 +2,10 @@ { public class VhostAuthRequest { - public string UserName { get; set; } + public required string UserName { get; set; } - public string Vhost { get; set; } + public required string Vhost { get; set; } - public string Ip { get; set; } + public required string Ip { get; set; } } } diff --git a/deps/rabbitmq_auth_backend_http/priv/schema/rabbitmq_auth_backend_http.schema b/deps/rabbitmq_auth_backend_http/priv/schema/rabbitmq_auth_backend_http.schema index ceffd0d7731f..b50013fb1651 100644 --- a/deps/rabbitmq_auth_backend_http/priv/schema/rabbitmq_auth_backend_http.schema +++ b/deps/rabbitmq_auth_backend_http/priv/schema/rabbitmq_auth_backend_http.schema @@ -1,8 +1,5 @@ - -%% ========================================================================== %% ---------------------------------------------------------------------------- %% RabbitMQ HTTP Authorization -%% %% ---------------------------------------------------------------------------- {mapping, "auth_http.http_method", "rabbitmq_auth_backend_http.http_method", @@ -25,3 +22,131 @@ {mapping, "auth_http.connection_timeout", "rabbitmq_auth_backend_http.connection_timeout", [{datatype, integer}]}. + +%% TLS options + +{mapping, "auth_http.ssl_options", "rabbitmq_auth_backend_http.ssl_options", [ + {datatype, {enum, [none]}} +]}. + +{translation, "rabbitmq_auth_backend_http.ssl_options", +fun(Conf) -> + case cuttlefish:conf_get("auth_http.ssl_options", Conf, undefined) of + none -> []; + _ -> cuttlefish:invalid("Invalid auth_http.ssl_options") + end +end}. + +{mapping, "auth_http.ssl_options.verify", "rabbitmq_auth_backend_http.ssl_options.verify", [ + {datatype, {enum, [verify_peer, verify_none]}}]}. + +{mapping, "auth_http.ssl_options.fail_if_no_peer_cert", "rabbitmq_auth_backend_http.ssl_options.fail_if_no_peer_cert", [ + {datatype, {enum, [true, false]}}]}. + +{mapping, "auth_http.ssl_options.cacertfile", "rabbitmq_auth_backend_http.ssl_options.cacertfile", + [{datatype, string}, {validators, ["file_accessible"]}]}. + +{mapping, "auth_http.ssl_options.certfile", "rabbitmq_auth_backend_http.ssl_options.certfile", + [{datatype, string}, {validators, ["file_accessible"]}]}. + +{mapping, "auth_http.ssl_options.cacerts.$name", "rabbitmq_auth_backend_http.ssl_options.cacerts", + [{datatype, string}]}. + +{translation, "rabbitmq_auth_backend_http.ssl_options.cacerts", +fun(Conf) -> + Settings = cuttlefish_variable:filter_by_prefix("auth_http.ssl_options.cacerts", Conf), + [ list_to_binary(V) || {_, V} <- Settings ] +end}. + +{mapping, "auth_http.ssl_options.cert", "rabbitmq_auth_backend_http.ssl_options.cert", + [{datatype, string}]}. + +{translation, "rabbitmq_auth_backend_http.ssl_options.cert", +fun(Conf) -> + list_to_binary(cuttlefish:conf_get("auth_http.ssl_options.cert", Conf)) +end}. + +{mapping, "auth_http.ssl_options.client_renegotiation", "rabbitmq_auth_backend_http.ssl_options.client_renegotiation", + [{datatype, {enum, [true, false]}}]}. + +{mapping, "auth_http.ssl_options.crl_check", "rabbitmq_auth_backend_http.ssl_options.crl_check", + [{datatype, [{enum, [true, false, peer, best_effort]}]}]}. + +{mapping, "auth_http.ssl_options.depth", "rabbitmq_auth_backend_http.ssl_options.depth", + [{datatype, integer}, {validators, ["byte"]}]}. + +{mapping, "auth_http.ssl_options.dh", "rabbitmq_auth_backend_http.ssl_options.dh", + [{datatype, string}]}. + +{translation, "rabbitmq_auth_backend_http.ssl_options.dh", +fun(Conf) -> + list_to_binary(cuttlefish:conf_get("auth_http.ssl_options.dh", Conf)) +end}. + +{mapping, "auth_http.ssl_options.dhfile", "rabbitmq_auth_backend_http.ssl_options.dhfile", + [{datatype, string}, {validators, ["file_accessible"]}]}. + +{mapping, "auth_http.ssl_options.honor_cipher_order", "rabbitmq_auth_backend_http.ssl_options.honor_cipher_order", + [{datatype, {enum, [true, false]}}]}. + +{mapping, "auth_http.ssl_options.honor_ecc_order", "rabbitmq_auth_backend_http.ssl_options.honor_ecc_order", + [{datatype, {enum, [true, false]}}]}. + +{mapping, "auth_http.ssl_options.key.RSAPrivateKey", "rabbitmq_auth_backend_http.ssl_options.key", + [{datatype, string}]}. + +{mapping, "auth_http.ssl_options.key.DSAPrivateKey", "rabbitmq_auth_backend_http.ssl_options.key", + [{datatype, string}]}. + +{mapping, "auth_http.ssl_options.key.PrivateKeyInfo", "rabbitmq_auth_backend_http.ssl_options.key", + [{datatype, string}]}. + +{translation, "rabbitmq_auth_backend_http.ssl_options.key", +fun(Conf) -> + case cuttlefish_variable:filter_by_prefix("auth_http.ssl_options.key", Conf) of + [{[_,_,Key], Val}|_] -> {list_to_atom(Key), list_to_binary(Val)}; + _ -> undefined + end +end}. + +{mapping, "auth_http.ssl_options.keyfile", "rabbitmq_auth_backend_http.ssl_options.keyfile", + [{datatype, string}, {validators, ["file_accessible"]}]}. + +{mapping, "auth_http.ssl_options.log_alert", "rabbitmq_auth_backend_http.ssl_options.log_alert", + [{datatype, {enum, [true, false]}}]}. + +{mapping, "auth_http.ssl_options.password", "rabbitmq_auth_backend_http.ssl_options.password", + [{datatype, [tagged_binary, binary]}]}. + +{mapping, "auth_http.ssl_options.psk_identity", "rabbitmq_auth_backend_http.ssl_options.psk_identity", + [{datatype, string}]}. + +{mapping, "auth_http.ssl_options.reuse_sessions", "rabbitmq_auth_backend_http.ssl_options.reuse_sessions", + [{datatype, {enum, [true, false]}}]}. + +{mapping, "auth_http.ssl_options.secure_renegotiate", "rabbitmq_auth_backend_http.ssl_options.secure_renegotiate", + [{datatype, {enum, [true, false]}}]}. + +{mapping, "auth_http.ssl_options.versions.$version", "rabbitmq_auth_backend_http.ssl_options.versions", + [{datatype, atom}]}. + +{translation, "rabbitmq_auth_backend_http.ssl_options.versions", +fun(Conf) -> + Settings = cuttlefish_variable:filter_by_prefix("auth_http.ssl_options.versions", Conf), + [ V || {_, V} <- Settings ] +end}. + +{mapping, "auth_http.ssl_options.sni", "rabbitmq_auth_backend_http.ssl_options.server_name_indication", + [{datatype, [{enum, [none]}, string]}]}. + +{translation, "rabbitmq_auth_backend_http.ssl_options.server_name_indication", +fun(Conf) -> + case cuttlefish:conf_get("auth_http.ssl_options.sni", Conf, undefined) of + undefined -> cuttlefish:unset(); + none -> cuttlefish:unset(); + Hostname -> Hostname + end +end}. + +{mapping, "auth_http.ssl_options.hostname_verification", "rabbitmq_auth_backend_http.ssl_hostname_verification", [ + {datatype, {enum, [wildcard, none]}}]}. diff --git a/deps/rabbitmq_auth_backend_http/src/rabbit_auth_backend_http.erl b/deps/rabbitmq_auth_backend_http/src/rabbit_auth_backend_http.erl index 4e479c63dae5..c61aceeb8983 100644 --- a/deps/rabbitmq_auth_backend_http/src/rabbit_auth_backend_http.erl +++ b/deps/rabbitmq_auth_backend_http/src/rabbit_auth_backend_http.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_auth_backend_http). @@ -15,7 +15,7 @@ -export([description/0, p/1, q/1, join_tags/1]). -export([user_login_authentication/2, user_login_authorization/2, check_vhost_access/3, check_resource_access/4, check_topic_access/4, - state_can_expire/0]). + expiry_timestamp/1]). %% If keepalive connection is closed, retry N times before failing. -define(RETRY_ON_KEEPALIVE_CLOSED, 3). @@ -33,8 +33,7 @@ description() -> %%-------------------------------------------------------------------- user_login_authentication(Username, AuthProps) -> - - case http_req(p(user_path), q([{username, Username}|extractPassword(AuthProps)])) of + case http_req(p(user_path), q([{username, Username}] ++ extract_other_credentials(AuthProps))) of {error, _} = E -> E; "deny" -> {refused, "Denied by the backing HTTP service", []}; "allow" ++ Rest -> Tags = [rabbit_data_coercion:to_atom(T) || @@ -42,25 +41,48 @@ user_login_authentication(Username, AuthProps) -> {ok, #auth_user{username = Username, tags = Tags, - impl = fun() -> proplists:get_value(password, AuthProps, none) end}}; + impl = fun() -> proplists:delete(username, AuthProps) end}}; Other -> {error, {bad_response, Other}} end. -%% Credentials (i.e. password) maybe directly in the password attribute in AuthProps -%% or as a Function with the attribute rabbit_auth_backend_http if the user was already authenticated with http backend -%% or as a Function with the attribute rabbit_auth_backend_cache if the user was already authenticated via cache backend -extractPassword(AuthProps) -> - case proplists:get_value(password, AuthProps, none) of - none -> - case proplists:get_value(rabbit_auth_backend_http, AuthProps, none) of - none -> case proplists:get_value(rabbit_auth_backend_cache, AuthProps, none) of - none -> []; - PasswordFun -> [{password, PasswordFun()}] - end; - PasswordFun -> [{password, PasswordFun()}] - end; - Password -> [{password, Password}] - end. +%% When a protocol plugin uses an internal AMQP 0-9-1 client to interact with RabbitMQ core, +%% what happens that the plugin authenticates the entire authentication context (e.g. all of: password, client_id, vhost, etc) +%% and the internal AMQP 0-9-1 client also performs further authentication. +%% +%% In the latter case, the complete set of credentials are persisted behind a function call +%% that returns an AuthProps. +%% If the user was first authenticated by rabbit_auth_backend_http, there will be one property called +%% `rabbit_auth_backend_http` whose value is a function that returns a proplist with all the credentials used +%% on the first successful login. +%% +%% When rabbit_auth_backend_cache is involved, +%% the property `rabbit_auth_backend_cache` is a function which returns a proplist with all the credentials used +%% on the first successful login. +resolve_using_persisted_credentials(AuthProps) -> + case proplists:get_value(rabbit_auth_backend_http, AuthProps, undefined) of + undefined -> + case proplists:get_value(rabbit_auth_backend_cache, AuthProps, undefined) of + undefined -> AuthProps; + CacheAuthPropsFun -> AuthProps ++ CacheAuthPropsFun() + end; + HttpAuthPropsFun -> AuthProps ++ HttpAuthPropsFun() + end. + + +%% Some protocols may add additional credentials into the AuthProps that should be propagated to +%% the external authentication backends +%% This function excludes any attribute that starts with rabbit_auth_backend_ +is_internal_property(rabbit_auth_backend_http) -> true; +is_internal_property(rabbit_auth_backend_cache) -> true; +is_internal_property(_Other) -> false. + +extract_other_credentials(AuthProps) -> + PublicAuthProps = [{K,V} || {K,V} <-AuthProps, not is_internal_property(K)], + case PublicAuthProps of + [] -> resolve_using_persisted_credentials(AuthProps); + _ -> PublicAuthProps + end. + user_login_authorization(Username, AuthProps) -> case user_login_authentication(Username, AuthProps) of @@ -107,7 +129,7 @@ check_topic_access(#auth_user{username = Username, tags = Tags}, {permission, Permission}, {tags, join_tags(Tags)}] ++ OptionsParameters). -state_can_expire() -> false. +expiry_timestamp(_) -> never. %%-------------------------------------------------------------------- @@ -141,7 +163,6 @@ http_req(Path, Query, Retry) -> Other -> Other end. - do_http_req(Path0, Query) -> URI = uri_parser:parse(Path0, [{port, 80}]), {host, Host} = lists:keyfind(host, 1, URI), @@ -168,19 +189,8 @@ do_http_req(Path0, Query) -> _ -> RequestTimeout end, rabbit_log:debug("auth_backend_http: request timeout: ~tp, connection timeout: ~tp", [RequestTimeout, ConnectionTimeout]), - HttpOpts = case application:get_env(rabbitmq_auth_backend_http, ssl_options) of - {ok, Opts} when is_list(Opts) -> - [ - {ssl, Opts}, - {timeout, RequestTimeout}, - {connect_timeout, ConnectionTimeout}]; - _ -> - [ - {timeout, RequestTimeout}, - {connect_timeout, ConnectionTimeout} - ] - end, - + HttpOpts = [{timeout, RequestTimeout}, + {connect_timeout, ConnectionTimeout}] ++ ssl_options(), case httpc:request(Method, Request, HttpOpts, []) of {ok, {{_HTTP, Code, _}, _Headers, Body}} -> rabbit_log:debug("auth_backend_http: response code is ~tp, body: ~tp", [Code, Body]), @@ -192,6 +202,22 @@ do_http_req(Path0, Query) -> E end. +ssl_options() -> + case application:get_env(rabbitmq_auth_backend_http, ssl_options) of + {ok, Opts0} when is_list(Opts0) -> + Opts1 = [{ssl, rabbit_networking:fix_ssl_options(Opts0)}], + case application:get_env(rabbitmq_auth_backend_http, ssl_hostname_verification) of + {ok, wildcard} -> + rabbit_log:debug("Enabling wildcard-aware hostname verification for HTTP client connections"), + %% Needed for HTTPS connections that connect to servers that use wildcard certificates. + %% See https://erlang.org/doc/man/public_key.html#pkix_verify_hostname_match_fun-1. + [{customize_hostname_check, [{match_fun, public_key:pkix_verify_hostname_match_fun(https)}]} | Opts1]; + _ -> + Opts1 + end; + _ -> [] + end. + p(PathName) -> {ok, Path} = application:get_env(rabbitmq_auth_backend_http, PathName), Path. diff --git a/deps/rabbitmq_auth_backend_http/src/rabbit_auth_backend_http_app.erl b/deps/rabbitmq_auth_backend_http/src/rabbit_auth_backend_http_app.erl index 22d3f3ae243b..f7a66b0a4595 100644 --- a/deps/rabbitmq_auth_backend_http/src/rabbit_auth_backend_http_app.erl +++ b/deps/rabbitmq_auth_backend_http/src/rabbit_auth_backend_http_app.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_auth_backend_http_app). diff --git a/deps/rabbitmq_auth_backend_http/test/auth_SUITE.erl b/deps/rabbitmq_auth_backend_http/test/auth_SUITE.erl index bf9a3e746a4b..30f559a1e7ab 100644 --- a/deps/rabbitmq_auth_backend_http/test/auth_SUITE.erl +++ b/deps/rabbitmq_auth_backend_http/test/auth_SUITE.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2017-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. -module(auth_SUITE). @@ -14,50 +14,160 @@ -define(AUTH_PORT, 8000). -define(USER_PATH, "/auth/user"). --define(BACKEND_CONFIG, - [{http_method, get}, - {user_path, "http://localhost:" ++ integer_to_list(?AUTH_PORT) ++ ?USER_PATH}, - {vhost_path, "http://localhost:" ++ integer_to_list(?AUTH_PORT) ++ "/auth/vhost"}, - {resource_path, "http://localhost:" ++ integer_to_list(?AUTH_PORT) ++ "/auth/resource"}, - {topic_path, "http://localhost:" ++ integer_to_list(?AUTH_PORT) ++ "/auth/topic"}]). --define(ALLOWED_USER, #{username => <<"Ala">>, +-define(ALLOWED_USER, #{username => <<"Ala1">>, password => <<"Kocur">>, + expected_credentials => [username, password], tags => [policymaker, monitoring]}). --define(DENIED_USER, #{username => <<"Alice">>, password => <<"Cat">>}). +-define(ALLOWED_USER_WITH_EXTRA_CREDENTIALS, #{username => <<"Ala2">>, + password => <<"Kocur">>, + client_id => <<"some_id">>, + expected_credentials => [username, password, client_id], + tags => [policymaker, monitoring]}). +-define(DENIED_USER, #{username => <<"Alice">>, + password => <<"Cat">> + }). -all() -> [grants_access_to_user, denies_access_to_user]. +all() -> + [ + {group, over_https}, + {group, over_http} + ]. + +groups() -> + [ + {over_http, [], shared()}, + {over_https, [], shared()} + ]. + +shared() -> + [ + grants_access_to_user, + denies_access_to_user, + grants_access_to_user_passing_additional_required_authprops, + grants_access_to_user_skipping_internal_authprops, + grants_access_to_user_with_credentials_in_rabbit_auth_backend_http, + grants_access_to_user_with_credentials_in_rabbit_auth_backend_cache + ]. init_per_suite(Config) -> - configure_http_auth_backend(), - #{username := Username, password := Password, tags := Tags} = ?ALLOWED_USER, - start_http_auth_server(?AUTH_PORT, ?USER_PATH, #{Username => {Password, Tags}}), - [{allowed_user, ?ALLOWED_USER}, {denied_user, ?DENIED_USER} | Config]. + rabbit_ct_helpers:run_setup_steps(Config) ++ + [{allowed_user, ?ALLOWED_USER}, + {allowed_user_with_extra_credentials, ?ALLOWED_USER_WITH_EXTRA_CREDENTIALS}, + {denied_user, ?DENIED_USER}]. + +init_per_group(over_http, Config) -> + configure_http_auth_backend("http", Config), + {User1, Tuple1} = extractUserTuple(?ALLOWED_USER), + {User2, Tuple2} = extractUserTuple(?ALLOWED_USER_WITH_EXTRA_CREDENTIALS), + start_http_auth_server(?AUTH_PORT, ?USER_PATH, #{User1 => Tuple1, User2 => Tuple2}), + Config; + +init_per_group(over_https, Config) -> + configure_http_auth_backend("https", Config), + {User1, Tuple1} = extractUserTuple(?ALLOWED_USER), + {User2, Tuple2} = extractUserTuple(?ALLOWED_USER_WITH_EXTRA_CREDENTIALS), + CertsDir = ?config(rmq_certsdir, Config), + start_https_auth_server(?AUTH_PORT, CertsDir, ?USER_PATH, #{User1 => Tuple1, User2 => Tuple2}), + Config. + +extractUserTuple(User) -> + #{username := Username, password := Password, tags := Tags, expected_credentials := ExpectedCredentials} = User, + {Username, {Password, Tags, ExpectedCredentials}}. -end_per_suite(_Config) -> +end_per_suite(Config) -> + Config. + +end_per_group(over_http, Config) -> + undo_configure_http_auth_backend("http", Config), + stop_http_auth_server(); +end_per_group(over_https, Config) -> + undo_configure_http_auth_backend("https", Config), stop_http_auth_server(). grants_access_to_user(Config) -> #{username := U, password := P, tags := T} = ?config(allowed_user, Config), - {ok, User} = rabbit_auth_backend_http:user_login_authentication(U, [{password, P}]), - ?assertMatch({U, T, P}, + AuthProps = [{password, P}], + {ok, User} = rabbit_auth_backend_http:user_login_authentication(U, AuthProps), + ?assertMatch({U, T, AuthProps}, {User#auth_user.username, User#auth_user.tags, (User#auth_user.impl)()}). denies_access_to_user(Config) -> #{username := U, password := P} = ?config(denied_user, Config), ?assertMatch({refused, "Denied by the backing HTTP service", []}, - rabbit_auth_backend_http:user_login_authentication(U, [{password, P}])). + rabbit_auth_backend_http:user_login_authentication(U, [{password, P}])). + +grants_access_to_user_passing_additional_required_authprops(Config) -> + #{username := U, password := P, tags := T, client_id := ClientId} = ?config(allowed_user_with_extra_credentials, Config), + AuthProps = [{password, P}, {client_id, ClientId}], + {ok, User} = rabbit_auth_backend_http:user_login_authentication(U, AuthProps), + ?assertMatch({U, T, AuthProps}, + {User#auth_user.username, User#auth_user.tags, (User#auth_user.impl)()}). + +grants_access_to_user_skipping_internal_authprops(Config) -> + #{username := U, password := P, tags := T, client_id := ClientId} = ?config(allowed_user_with_extra_credentials, Config), + AuthProps = [{password, P}, {client_id, ClientId}, {rabbit_any_internal_property, <<"some value">>}], + {ok, User} = rabbit_auth_backend_http:user_login_authentication(U, AuthProps), + ?assertMatch({U, T, AuthProps}, + {User#auth_user.username, User#auth_user.tags, (User#auth_user.impl)()}). + +grants_access_to_user_with_credentials_in_rabbit_auth_backend_http(Config) -> + #{username := U, password := P, tags := T, client_id := ClientId} = ?config(allowed_user_with_extra_credentials, Config), + AuthProps = [{rabbit_auth_backend_http, fun() -> [{password, P}, {client_id, ClientId}] end}], + {ok, User} = rabbit_auth_backend_http:user_login_authentication(U, AuthProps), + ?assertMatch({U, T, AuthProps}, + {User#auth_user.username, User#auth_user.tags, (User#auth_user.impl)()}). + +grants_access_to_user_with_credentials_in_rabbit_auth_backend_cache(Config) -> + #{username := U, password := P, tags := T, client_id := ClientId} = ?config(allowed_user_with_extra_credentials, Config), + AuthProps = [{rabbit_auth_backend_cache, fun() -> [{password, P}, {client_id, ClientId}] end}], + {ok, User} = rabbit_auth_backend_http:user_login_authentication(U, AuthProps), + ?assertMatch({U, T, AuthProps}, + {User#auth_user.username, User#auth_user.tags, (User#auth_user.impl)()}). %%% HELPERS -configure_http_auth_backend() -> - {ok, _} = application:ensure_all_started(inets), - [application:set_env(rabbitmq_auth_backend_http, K, V) || {K, V} <- ?BACKEND_CONFIG]. +configure_http_auth_backend(Scheme, Config) -> + [application:set_env(rabbitmq_auth_backend_http, K, V) || {K, V} <- generate_backend_config(Scheme, Config)]. +undo_configure_http_auth_backend(Scheme, Config) -> + [application:unset_env(rabbitmq_auth_backend_http, K) || {K, _V} <- generate_backend_config(Scheme, Config)]. start_http_auth_server(Port, Path, Users) -> - application:ensure_all_started(cowboy), + {ok, _} = application:ensure_all_started(inets), + {ok, _} = application:ensure_all_started(cowboy), Dispatch = cowboy_router:compile([{'_', [{Path, auth_http_mock, Users}]}]), {ok, _} = cowboy:start_clear( - mock_http_auth_listener, [{port, Port}], #{env => #{dispatch => Dispatch}}). + mock_http_auth_listener, [{port, Port}], #{env => #{dispatch => Dispatch}}). + +start_https_auth_server(Port, CertsDir, Path, Users) -> + {ok, _} = application:ensure_all_started(inets), + {ok, _} = application:ensure_all_started(ssl), + {ok, _} = application:ensure_all_started(cowboy), + Dispatch = cowboy_router:compile([{'_', [{Path, auth_http_mock, Users}]}]), + {ok, _} = cowboy:start_tls(mock_http_auth_listener, + [{port, Port}, + {certfile, filename:join([CertsDir, "server", "cert.pem"])}, + {keyfile, filename:join([CertsDir, "server", "key.pem"])}], + #{env => #{dispatch => Dispatch}}). stop_http_auth_server() -> cowboy:stop_listener(mock_http_auth_listener). + +generate_backend_config(Scheme, Config) -> + Config0 = [{http_method, get}, + {user_path, Scheme ++ "://localhost:" ++ integer_to_list(?AUTH_PORT) ++ ?USER_PATH}, + {vhost_path, Scheme ++ "://localhost:" ++ integer_to_list(?AUTH_PORT) ++ "/auth/vhost"}, + {resource_path, Scheme ++ "://localhost:" ++ integer_to_list(?AUTH_PORT) ++ "/auth/resource"}, + {topic_path, Scheme ++ "://localhost:" ++ integer_to_list(?AUTH_PORT) ++ "/auth/topic"}], + Config1 = case Scheme of + "https" -> + CertsDir = ?config(rmq_certsdir, Config), + [{ssl_options, [ + {cacertfile, filename:join([CertsDir, "testca", "cacert.pem"])}, + {certfile, filename:join([CertsDir, "server", "cert.pem"])}, + {keyfile, filename:join([CertsDir, "server", "key.pem"])}, + {verify, verify_peer}, + {fail_if_no_peer_cert, false}] + }]; + "http" -> [] + end, + Config0 ++ Config1. diff --git a/deps/rabbitmq_auth_backend_http/test/auth_http_mock.erl b/deps/rabbitmq_auth_backend_http/test/auth_http_mock.erl index ed7304b12db2..b0112896e384 100644 --- a/deps/rabbitmq_auth_backend_http/test/auth_http_mock.erl +++ b/deps/rabbitmq_auth_backend_http/test/auth_http_mock.erl @@ -1,26 +1,31 @@ -module(auth_http_mock). -export([init/2]). +-include_lib("eunit/include/eunit.hrl"). %%% CALLBACKS init(Req = #{method := <<"GET">>}, Users) -> QsVals = cowboy_req:parse_qs(Req), - Reply = authenticate(proplists:get_value(<<"username">>, QsVals), - proplists:get_value(<<"password">>, QsVals), - Users), + Reply = authenticate(QsVals, Users), Req2 = cowboy_req:reply(200, #{<<"content-type">> => <<"text/plain">>}, Reply, Req), {ok, Req2, Users}. %%% HELPERS -authenticate(Username, Password, Users) -> +authenticate(QsVals, Users) -> + Username = proplists:get_value(<<"username">>, QsVals), + Password = proplists:get_value(<<"password">>, QsVals), case maps:get(Username, Users, undefined) of - {MatchingPassword, Tags} when Password =:= MatchingPassword -> - StringTags = lists:map(fun(T) -> io_lib:format("~ts", [T]) end, Tags), - <<"allow ", (list_to_binary(string:join(StringTags, " ")))/binary>>; - {_OtherPassword, _} -> + {MatchingPassword, Tags, ExpectedCredentials} when Password =:= MatchingPassword -> + case lists:all(fun(C) -> proplists:is_defined(list_to_binary(rabbit_data_coercion:to_list(C)),QsVals) end, ExpectedCredentials) of + true -> StringTags = lists:map(fun(T) -> io_lib:format("~ts", [T]) end, Tags), + <<"allow ", (list_to_binary(string:join(StringTags, " ")))/binary>>; + false -> ct:log("Missing required attributes. Expected ~p, Found: ~p", [ExpectedCredentials, QsVals]), + <<"deny">> + end; + {_OtherPassword, _, _} -> <<"deny">>; undefined -> <<"deny">> - end. \ No newline at end of file + end. diff --git a/deps/rabbitmq_auth_backend_http/test/config_schema_SUITE.erl b/deps/rabbitmq_auth_backend_http/test/config_schema_SUITE.erl index 080e62d3400d..055285f23c6a 100644 --- a/deps/rabbitmq_auth_backend_http/test/config_schema_SUITE.erl +++ b/deps/rabbitmq_auth_backend_http/test/config_schema_SUITE.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2016-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(config_schema_SUITE). diff --git a/deps/rabbitmq_auth_backend_http/test/config_schema_SUITE_data/certs/invalid_cacert.pem b/deps/rabbitmq_auth_backend_http/test/config_schema_SUITE_data/certs/invalid_cacert.pem new file mode 100644 index 000000000000..eaf6b67806ce --- /dev/null +++ b/deps/rabbitmq_auth_backend_http/test/config_schema_SUITE_data/certs/invalid_cacert.pem @@ -0,0 +1 @@ +I'm not a certificate diff --git a/deps/rabbitmq_auth_backend_http/test/config_schema_SUITE_data/certs/invalid_cert.pem b/deps/rabbitmq_auth_backend_http/test/config_schema_SUITE_data/certs/invalid_cert.pem new file mode 100644 index 000000000000..eaf6b67806ce --- /dev/null +++ b/deps/rabbitmq_auth_backend_http/test/config_schema_SUITE_data/certs/invalid_cert.pem @@ -0,0 +1 @@ +I'm not a certificate diff --git a/deps/rabbitmq_auth_backend_http/test/config_schema_SUITE_data/certs/invalid_key.pem b/deps/rabbitmq_auth_backend_http/test/config_schema_SUITE_data/certs/invalid_key.pem new file mode 100644 index 000000000000..eaf6b67806ce --- /dev/null +++ b/deps/rabbitmq_auth_backend_http/test/config_schema_SUITE_data/certs/invalid_key.pem @@ -0,0 +1 @@ +I'm not a certificate diff --git a/deps/rabbitmq_auth_backend_http/test/config_schema_SUITE_data/rabbitmq_auth_backend_http.snippets b/deps/rabbitmq_auth_backend_http/test/config_schema_SUITE_data/rabbitmq_auth_backend_http.snippets index 748515344c08..7d94d78bbc16 100644 --- a/deps/rabbitmq_auth_backend_http/test/config_schema_SUITE_data/rabbitmq_auth_backend_http.snippets +++ b/deps/rabbitmq_auth_backend_http/test/config_schema_SUITE_data/rabbitmq_auth_backend_http.snippets @@ -10,9 +10,8 @@ {user_path,"http://some-server/auth/user"}, {vhost_path,"http://some-server/auth/vhost"}, {resource_path,"http://some-server/auth/resource"}]}], - [rabbitmq_auth_backend_http]} - -, {default_http_method, + [rabbitmq_auth_backend_http]}, + {default_http_method, "auth_backends.1 = http auth_http.user_path = http://some-server/auth/user auth_http.vhost_path = http://some-server/auth/vhost @@ -22,9 +21,8 @@ [{user_path,"http://some-server/auth/user"}, {vhost_path,"http://some-server/auth/vhost"}, {resource_path,"http://some-server/auth/resource"}]}], - [rabbitmq_auth_backend_http]} - -, {timeouts, + [rabbitmq_auth_backend_http]}, + {timeouts, "auth_backends.1 = http auth_http.user_path = http://some-server/auth/user auth_http.vhost_path = http://some-server/auth/vhost @@ -33,11 +31,175 @@ auth_http.connection_timeout = 30000", [{rabbit,[{auth_backends,[rabbit_auth_backend_http]}]}, {rabbitmq_auth_backend_http, - [ - {connection_timeout,30000}, + [{connection_timeout,30000}, {request_timeout,30000}, {user_path,"http://some-server/auth/user"}, {vhost_path,"http://some-server/auth/vhost"}, {resource_path,"http://some-server/auth/resource"}]}], - [rabbitmq_auth_backend_http]} - ]. + [rabbitmq_auth_backend_http]}, + {ssl_options, + "auth_http.ssl_options.cacertfile = test/config_schema_SUITE_data/certs/invalid_cacert.pem + auth_http.ssl_options.certfile = test/config_schema_SUITE_data/certs/invalid_cert.pem + auth_http.ssl_options.keyfile = test/config_schema_SUITE_data/certs/invalid_key.pem + auth_http.ssl_options.verify = verify_peer + auth_http.ssl_options.fail_if_no_peer_cert = true", + [{rabbitmq_auth_backend_http, [ + {ssl_options, + [{cacertfile, "test/config_schema_SUITE_data/certs/invalid_cacert.pem"}, + {certfile, "test/config_schema_SUITE_data/certs/invalid_cert.pem"}, + {keyfile, "test/config_schema_SUITE_data/certs/invalid_key.pem"}, + {verify, verify_peer}, + {fail_if_no_peer_cert, true}]} + ]}], + [rabbitmq_auth_backend_http]}, + {ssl_options_verify_peer, + "auth_http.ssl_options.cacertfile = test/config_schema_SUITE_data/certs/invalid_cacert.pem + auth_http.ssl_options.certfile = test/config_schema_SUITE_data/certs/invalid_cert.pem + auth_http.ssl_options.keyfile = test/config_schema_SUITE_data/certs/invalid_key.pem + auth_http.ssl_options.verify = verify_peer + auth_http.ssl_options.fail_if_no_peer_cert = false", + [{rabbitmq_auth_backend_http, + [ + {ssl_options, + [{cacertfile,"test/config_schema_SUITE_data/certs/invalid_cacert.pem"}, + {certfile,"test/config_schema_SUITE_data/certs/invalid_cert.pem"}, + {keyfile,"test/config_schema_SUITE_data/certs/invalid_key.pem"}, + {verify,verify_peer}, + {fail_if_no_peer_cert,false}]}]}], + []}, + {ssl_options_password, + "auth_http.ssl_options.cacertfile = test/config_schema_SUITE_data/certs/invalid_cacert.pem + auth_http.ssl_options.certfile = test/config_schema_SUITE_data/certs/invalid_cert.pem + auth_http.ssl_options.keyfile = test/config_schema_SUITE_data/certs/invalid_key.pem + auth_http.ssl_options.password = t0p$3kRe7", + [{rabbitmq_auth_backend_http, + [ + {ssl_options, + [{cacertfile,"test/config_schema_SUITE_data/certs/invalid_cacert.pem"}, + {certfile,"test/config_schema_SUITE_data/certs/invalid_cert.pem"}, + {keyfile,"test/config_schema_SUITE_data/certs/invalid_key.pem"}, + {password,<<"t0p$3kRe7">>}]}]}], + []}, + {ssl_options_tls_versions, + "auth_http.ssl_options.cacertfile = test/config_schema_SUITE_data/certs/invalid_cacert.pem + auth_http.ssl_options.certfile = test/config_schema_SUITE_data/certs/invalid_cert.pem + auth_http.ssl_options.keyfile = test/config_schema_SUITE_data/certs/invalid_key.pem + auth_http.ssl_options.versions.tls1_2 = tlsv1.2 + auth_http.ssl_options.versions.tls1_1 = tlsv1.1", + [], + [{rabbitmq_auth_backend_http, + [{ssl_options, + [{cacertfile,"test/config_schema_SUITE_data/certs/invalid_cacert.pem"}, + {certfile,"test/config_schema_SUITE_data/certs/invalid_cert.pem"}, + {keyfile,"test/config_schema_SUITE_data/certs/invalid_key.pem"}, + {versions,['tlsv1.2','tlsv1.1']}]} + ]}], + []}, + {ssl_options_depth, + "auth_http.ssl_options.cacertfile = test/config_schema_SUITE_data/certs/invalid_cacert.pem + auth_http.ssl_options.certfile = test/config_schema_SUITE_data/certs/invalid_cert.pem + auth_http.ssl_options.keyfile = test/config_schema_SUITE_data/certs/invalid_key.pem + auth_http.ssl_options.depth = 2 + auth_http.ssl_options.verify = verify_peer + auth_http.ssl_options.fail_if_no_peer_cert = false", + [{rabbitmq_auth_backend_http, + [{ssl_options, + [{cacertfile,"test/config_schema_SUITE_data/certs/invalid_cacert.pem"}, + {certfile,"test/config_schema_SUITE_data/certs/invalid_cert.pem"}, + {keyfile,"test/config_schema_SUITE_data/certs/invalid_key.pem"}, + {depth,2}, + {verify,verify_peer}, + {fail_if_no_peer_cert,false}]}]}], + []}, + {ssl_options_honor_cipher_order, + "auth_http.ssl_options.cacertfile = test/config_schema_SUITE_data/certs/invalid_cacert.pem + auth_http.ssl_options.certfile = test/config_schema_SUITE_data/certs/invalid_cert.pem + auth_http.ssl_options.keyfile = test/config_schema_SUITE_data/certs/invalid_key.pem + auth_http.ssl_options.depth = 2 + auth_http.ssl_options.verify = verify_peer + auth_http.ssl_options.fail_if_no_peer_cert = false + auth_http.ssl_options.honor_cipher_order = true", + [{rabbitmq_auth_backend_http, + [{ssl_options, + [{cacertfile,"test/config_schema_SUITE_data/certs/invalid_cacert.pem"}, + {certfile,"test/config_schema_SUITE_data/certs/invalid_cert.pem"}, + {keyfile,"test/config_schema_SUITE_data/certs/invalid_key.pem"}, + {depth,2}, + {verify,verify_peer}, + {fail_if_no_peer_cert, false}, + {honor_cipher_order, true}]}]}], + []}, + {ssl_options_honor_ecc_order, + "auth_http.ssl_options.cacertfile = test/config_schema_SUITE_data/certs/invalid_cacert.pem + auth_http.ssl_options.certfile = test/config_schema_SUITE_data/certs/invalid_cert.pem + auth_http.ssl_options.keyfile = test/config_schema_SUITE_data/certs/invalid_key.pem + auth_http.ssl_options.depth = 2 + auth_http.ssl_options.verify = verify_peer + auth_http.ssl_options.fail_if_no_peer_cert = false + auth_http.ssl_options.honor_ecc_order = true", + [{rabbitmq_auth_backend_http, + [{ssl_options, + [{cacertfile,"test/config_schema_SUITE_data/certs/invalid_cacert.pem"}, + {certfile,"test/config_schema_SUITE_data/certs/invalid_cert.pem"}, + {keyfile,"test/config_schema_SUITE_data/certs/invalid_key.pem"}, + {depth,2}, + {verify,verify_peer}, + {fail_if_no_peer_cert, false}, + {honor_ecc_order, true}]} + ]}], + []}, + {ssl_options_sni_disabled, + "auth_http.ssl_options.cacertfile = test/config_schema_SUITE_data/certs/invalid_cacert.pem + auth_http.ssl_options.certfile = test/config_schema_SUITE_data/certs/invalid_cert.pem + auth_http.ssl_options.keyfile = test/config_schema_SUITE_data/certs/invalid_key.pem + auth_http.ssl_options.versions.tls1_2 = tlsv1.2 + auth_http.ssl_options.versions.tls1_1 = tlsv1.1 + auth_http.ssl_options.sni = none", + [], + [{rabbitmq_auth_backend_http, + [{ssl_options, + [{cacertfile,"test/config_schema_SUITE_data/certs/invalid_cacert.pem"}, + {certfile,"test/config_schema_SUITE_data/certs/invalid_cert.pem"}, + {keyfile,"test/config_schema_SUITE_data/certs/invalid_key.pem"}, + {versions,['tlsv1.2','tlsv1.1']}] + }] + }], + []}, + {ssl_options_sni_hostname, + "auth_http.ssl_options.cacertfile = test/config_schema_SUITE_data/certs/invalid_cacert.pem + auth_http.ssl_options.certfile = test/config_schema_SUITE_data/certs/invalid_cert.pem + auth_http.ssl_options.keyfile = test/config_schema_SUITE_data/certs/invalid_key.pem + auth_http.ssl_options.versions.tls1_2 = tlsv1.2 + auth_http.ssl_options.versions.tls1_1 = tlsv1.1 + auth_http.ssl_options.sni = hostname.dev", + [], + [{rabbitmq_auth_backend_http, + [{ssl_options, + [{cacertfile,"test/config_schema_SUITE_data/certs/invalid_cacert.pem"}, + {certfile,"test/config_schema_SUITE_data/certs/invalid_cert.pem"}, + {keyfile,"test/config_schema_SUITE_data/certs/invalid_key.pem"}, + {versions,['tlsv1.2','tlsv1.1']}, + {server_name_indication, "hostname.dev"} + ]} + ]}], + []}, + {ssl_options_hostname_verification_wildcard, + "auth_http.ssl_options.cacertfile = test/config_schema_SUITE_data/certs/invalid_cacert.pem + auth_http.ssl_options.certfile = test/config_schema_SUITE_data/certs/invalid_cert.pem + auth_http.ssl_options.keyfile = test/config_schema_SUITE_data/certs/invalid_key.pem + auth_http.ssl_options.versions.tls1_2 = tlsv1.2 + auth_http.ssl_options.versions.tls1_1 = tlsv1.1 + auth_http.ssl_options.hostname_verification = wildcard", + [], + [{rabbitmq_auth_backend_http, + [ + {ssl_hostname_verification, wildcard}, + {ssl_options, + [{cacertfile,"test/config_schema_SUITE_data/certs/invalid_cacert.pem"}, + {certfile,"test/config_schema_SUITE_data/certs/invalid_cert.pem"}, + {keyfile,"test/config_schema_SUITE_data/certs/invalid_key.pem"}, + {versions,['tlsv1.2','tlsv1.1']} + ]} + ]}], + []} +]. diff --git a/deps/rabbitmq_auth_backend_http/test/unit_SUITE.erl b/deps/rabbitmq_auth_backend_http/test/unit_SUITE.erl index 733a0a72ec71..b4f68d2bfc02 100644 --- a/deps/rabbitmq_auth_backend_http/test/unit_SUITE.erl +++ b/deps/rabbitmq_auth_backend_http/test/unit_SUITE.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2017-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(unit_SUITE). diff --git a/deps/rabbitmq_auth_backend_ldap/.gitignore b/deps/rabbitmq_auth_backend_ldap/.gitignore index c5df8bf79dd1..b539bb1e81bd 100644 --- a/deps/rabbitmq_auth_backend_ldap/.gitignore +++ b/deps/rabbitmq_auth_backend_ldap/.gitignore @@ -1,20 +1,3 @@ -.sw? -.*.sw? -*.beam .vagrant -/.erlang.mk/ -/cover/ -/deps/ -/doc/ -/ebin/ -/escript/ -/escript.lock -/logs/ -/plugins/ -/plugins.lock -/sbin/ -/sbin.lock test/config_schema_SUITE_data/schema/ - -rabbitmq_auth_backend_ldap.d diff --git a/deps/rabbitmq_auth_backend_ldap/README.md b/deps/rabbitmq_auth_backend_ldap/README.md index 91c6c6d1fbee..671a4d79a1bc 100644 --- a/deps/rabbitmq_auth_backend_ldap/README.md +++ b/deps/rabbitmq_auth_backend_ldap/README.md @@ -32,6 +32,6 @@ will build the plugin and put build artifacts under the `./plugins` directory. ## Copyright and License -(c) 2007-2020 VMware, Inc. or its affiliates. +(c) 2007-2024 Broadcom. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. Released under the MPL, the same license as RabbitMQ. diff --git a/deps/rabbitmq_auth_backend_ldap/example/global.ldif b/deps/rabbitmq_auth_backend_ldap/example/global.ldif index 373d9d9951b4..46f24ea67a49 100644 --- a/deps/rabbitmq_auth_backend_ldap/example/global.ldif +++ b/deps/rabbitmq_auth_backend_ldap/example/global.ldif @@ -2,13 +2,17 @@ dn: cn=module,cn=config objectclass: olcModuleList cn: module -olcModuleLoad: back_bdb.la +olcModuleLoad: back_mdb.la # Create directory database -dn: olcDatabase=bdb,cn=config +dn: olcBackend=mdb,cn=config +objectClass: olcBackendConfig +olcBackend: mdb + +dn: olcDatabase=mdb,cn=config objectClass: olcDatabaseConfig -objectClass: olcBdbConfig -olcDatabase: bdb +objectClass: olcMdbConfig +olcDatabase: mdb # Domain name (e.g. rabbitmq.com) olcSuffix: dc=rabbitmq,dc=com # Location on system where database is stored diff --git a/deps/rabbitmq_auth_backend_ldap/example/memberof_init.ldif b/deps/rabbitmq_auth_backend_ldap/example/memberof_init.ldif index 6301e937a4e5..3129540a975f 100644 --- a/deps/rabbitmq_auth_backend_ldap/example/memberof_init.ldif +++ b/deps/rabbitmq_auth_backend_ldap/example/memberof_init.ldif @@ -4,7 +4,7 @@ objectClass: olcModuleList olcModuleLoad: memberof olcModulePath: /usr/lib/ldap -dn: olcOverlay={0}memberof,olcDatabase={1}bdb,cn=config +dn: olcOverlay={0}memberof,olcDatabase={1}mdb,cn=config objectClass: olcConfig objectClass: olcMemberOf objectClass: olcOverlayConfig diff --git a/deps/rabbitmq_auth_backend_ldap/example/refint_2.ldif b/deps/rabbitmq_auth_backend_ldap/example/refint_2.ldif index 0955a1a5fc8f..fb6db88be947 100644 --- a/deps/rabbitmq_auth_backend_ldap/example/refint_2.ldif +++ b/deps/rabbitmq_auth_backend_ldap/example/refint_2.ldif @@ -1,4 +1,4 @@ -dn: olcOverlay={1}refint,olcDatabase={1}bdb,cn=config +dn: olcOverlay={1}refint,olcDatabase={1}mdb,cn=config objectClass: olcConfig objectClass: olcOverlayConfig objectClass: olcRefintConfig diff --git a/deps/rabbitmq_auth_backend_ldap/priv/schema/rabbitmq_auth_backend_ldap.schema b/deps/rabbitmq_auth_backend_ldap/priv/schema/rabbitmq_auth_backend_ldap.schema index 669e27912552..daf58bb49440 100644 --- a/deps/rabbitmq_auth_backend_ldap/priv/schema/rabbitmq_auth_backend_ldap.schema +++ b/deps/rabbitmq_auth_backend_ldap/priv/schema/rabbitmq_auth_backend_ldap.schema @@ -120,7 +120,7 @@ end}. [{datatype, [string]}]}. {mapping, "auth_ldap.dn_lookup_bind.password", "rabbitmq_auth_backend_ldap.dn_lookup_bind", - [{datatype, [string]}]}. + [{datatype, [tagged_binary, binary]}]}. %% - as_user (to bind as the authenticated user - requires a password) %% - anon (to bind anonymously) @@ -161,7 +161,7 @@ end}. [{datatype, string}]}. {mapping, "auth_ldap.other_bind.password", "rabbitmq_auth_backend_ldap.other_bind", - [{datatype, string}]}. + [{datatype, [tagged_binary, binary]}]}. {translation, "rabbitmq_auth_backend_ldap.other_bind", fun(Conf) -> diff --git a/deps/rabbitmq_auth_backend_ldap/src/rabbit_auth_backend_ldap.erl b/deps/rabbitmq_auth_backend_ldap/src/rabbit_auth_backend_ldap.erl index 5729ff6946b2..f84a19a683ea 100644 --- a/deps/rabbitmq_auth_backend_ldap/src/rabbit_auth_backend_ldap.erl +++ b/deps/rabbitmq_auth_backend_ldap/src/rabbit_auth_backend_ldap.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_auth_backend_ldap). @@ -17,7 +17,8 @@ -export([user_login_authentication/2, user_login_authorization/2, check_vhost_access/3, check_resource_access/4, check_topic_access/4, - state_can_expire/0, format_multi_attr/1, format_multi_attr/2]). + expiry_timestamp/1, + format_multi_attr/1, format_multi_attr/2]). -export([get_connections/0]). @@ -84,6 +85,7 @@ user_login_authentication(Username, _AuthProps) -> %% Credentials (i.e. password) maybe directly in the password attribute in AuthProps %% or as a Function with the attribute rabbit_auth_backend_ldap if the user was already authenticated with http backend %% or as a Function with the attribute rabbit_auth_backend_cache if the user was already authenticated via cache backend +-spec extractPassword(list()) -> rabbit_types:option(binary()). extractPassword(AuthProps) -> case proplists:get_value(password, AuthProps, none) of none -> @@ -167,7 +169,7 @@ check_topic_access(User = #auth_user{username = Username, log_result(R0), log_result(R1)]), R1. -state_can_expire() -> false. +expiry_timestamp(_) -> never. %%-------------------------------------------------------------------- diff --git a/deps/rabbitmq_auth_backend_ldap/src/rabbit_auth_backend_ldap_app.erl b/deps/rabbitmq_auth_backend_ldap/src/rabbit_auth_backend_ldap_app.erl index fd375af02768..c381f01abcf8 100644 --- a/deps/rabbitmq_auth_backend_ldap/src/rabbit_auth_backend_ldap_app.erl +++ b/deps/rabbitmq_auth_backend_ldap/src/rabbit_auth_backend_ldap_app.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_auth_backend_ldap_app). diff --git a/deps/rabbitmq_auth_backend_ldap/src/rabbit_auth_backend_ldap_util.erl b/deps/rabbitmq_auth_backend_ldap/src/rabbit_auth_backend_ldap_util.erl index fc238af0f8c2..23a92543a486 100644 --- a/deps/rabbitmq_auth_backend_ldap/src/rabbit_auth_backend_ldap_util.erl +++ b/deps/rabbitmq_auth_backend_ldap/src/rabbit_auth_backend_ldap_util.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_auth_backend_ldap_util). diff --git a/deps/rabbitmq_auth_backend_ldap/src/rabbit_log_ldap.erl b/deps/rabbitmq_auth_backend_ldap/src/rabbit_log_ldap.erl index 632989ce8e0a..1caa39af46aa 100644 --- a/deps/rabbitmq_auth_backend_ldap/src/rabbit_log_ldap.erl +++ b/deps/rabbitmq_auth_backend_ldap/src/rabbit_log_ldap.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2021 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% %% @doc Compatibility module for the old Lager-based logging API. diff --git a/deps/rabbitmq_auth_backend_ldap/test/config_schema_SUITE.erl b/deps/rabbitmq_auth_backend_ldap/test/config_schema_SUITE.erl index e4f34abb4758..d3343f673773 100644 --- a/deps/rabbitmq_auth_backend_ldap/test/config_schema_SUITE.erl +++ b/deps/rabbitmq_auth_backend_ldap/test/config_schema_SUITE.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2016-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(config_schema_SUITE). diff --git a/deps/rabbitmq_auth_backend_ldap/test/config_schema_SUITE_data/rabbitmq_auth_backend_ldap.snippets b/deps/rabbitmq_auth_backend_ldap/test/config_schema_SUITE_data/rabbitmq_auth_backend_ldap.snippets index c07e8aa37844..daa7e955cc0a 100644 --- a/deps/rabbitmq_auth_backend_ldap/test/config_schema_SUITE_data/rabbitmq_auth_backend_ldap.snippets +++ b/deps/rabbitmq_auth_backend_ldap/test/config_schema_SUITE_data/rabbitmq_auth_backend_ldap.snippets @@ -119,7 +119,7 @@ {db_lookup_bind, "auth_ldap.dn_lookup_bind.user_dn = username auth_ldap.dn_lookup_bind.password = password", - [{rabbitmq_auth_backend_ldap,[{dn_lookup_bind,{"username","password"}}]}], + [{rabbitmq_auth_backend_ldap,[{dn_lookup_bind,{"username",<<"password">>}}]}], [rabbitmq_auth_backend_ldap]}, {db_lookup_bind_anon, @@ -147,7 +147,7 @@ {other_bind_pass, "auth_ldap.other_bind.user_dn = username auth_ldap.other_bind.password = password", - [{rabbitmq_auth_backend_ldap,[{other_bind,{"username","password"}}]}], + [{rabbitmq_auth_backend_ldap,[{other_bind,{"username",<<"password">>}}]}], [rabbitmq_auth_backend_ldap]}, {ssl_options, diff --git a/deps/rabbitmq_auth_backend_ldap/test/rabbit_ldap_seed.erl b/deps/rabbitmq_auth_backend_ldap/test/rabbit_ldap_seed.erl index 0b5e12365218..7a63cb78b45d 100644 --- a/deps/rabbitmq_auth_backend_ldap/test/rabbit_ldap_seed.erl +++ b/deps/rabbitmq_auth_backend_ldap/test/rabbit_ldap_seed.erl @@ -2,12 +2,12 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_ldap_seed). --include_lib("eldap/include/eldap.hrl"). +-include_lib("stdlib/include/assert.hrl"). -export([seed/1,delete/1]). @@ -32,17 +32,22 @@ rabbitmq_com() -> delete(Logon) -> H = connect(Logon), - eldap:delete(H, "ou=test,dc=rabbitmq,dc=com"), - eldap:delete(H, "ou=test,ou=vhosts,dc=rabbitmq,dc=com"), - eldap:delete(H, "ou=vhosts,dc=rabbitmq,dc=com"), - [ eldap:delete(H, P) || {P, _} <- groups() ], - [ eldap:delete(H, P) || {P, _} <- people() ], - eldap:delete(H, "ou=groups,dc=rabbitmq,dc=com"), - eldap:delete(H, "ou=people,dc=rabbitmq,dc=com"), - eldap:delete(H, "dc=rabbitmq,dc=com"), - eldap:close(H), + assert_benign(eldap:delete(H, "ou=test,dc=rabbitmq,dc=com")), + assert_benign(eldap:delete(H, "ou=test,ou=vhosts,dc=rabbitmq,dc=com")), + assert_benign(eldap:delete(H, "ou=vhosts,dc=rabbitmq,dc=com")), + [ assert_benign(eldap:delete(H, P)) || {P, _} <- groups() ], + [ assert_benign(eldap:delete(H, P)) || {P, _} <- people() ], + assert_benign(eldap:delete(H, "ou=groups,dc=rabbitmq,dc=com")), + assert_benign(eldap:delete(H, "ou=people,dc=rabbitmq,dc=com")), + assert_benign(eldap:delete(H, "dc=rabbitmq,dc=com")), + ok = eldap:close(H), ok. +assert_benign({error,noSuchObject}) -> + ok; +assert_benign(Other) -> + ?assertEqual(ok, Other). + people() -> [ bob(), dominic(), @@ -152,10 +157,7 @@ peter() -> "organizationalPerson", "person"]}, {"loginShell", ["/bin/bash"]}, - {"userPassword", ["password"]}, - {"memberOf", ["cn=wheel,ou=groups,dc=rabbitmq,dc=com", - "cn=staff,ou=groups,dc=rabbitmq,dc=com", - "cn=people,ou=groups,dc=rabbitmq,dc=com"]}]}. + {"userPassword", ["password"]}]}. carol() -> {"uid=carol,ou=people,dc=rabbitmq,dc=com", @@ -189,7 +191,11 @@ add(H, {A, B}) -> ok = eldap:add(H, A, B). connect({Host, Port}) -> - {ok, H} = eldap:open([Host], [{port, Port}]), + LogOpts = [], + %% This can be swapped with the line above to add verbose logging of the + %% LDAP operations used for seeding. + %% LogOpts = [{log, fun(_Level, FormatString, FormatArgs) -> ct:pal(FormatString, FormatArgs) end}], + {ok, H} = eldap:open([Host], [{port, Port} | LogOpts]), ok = eldap:simple_bind(H, "cn=admin,dc=rabbitmq,dc=com", "admin"), H. diff --git a/deps/rabbitmq_auth_backend_ldap/test/system_SUITE.erl b/deps/rabbitmq_auth_backend_ldap/test/system_SUITE.erl index 1935edef28fd..4c335e7dcf2b 100644 --- a/deps/rabbitmq_auth_backend_ldap/test/system_SUITE.erl +++ b/deps/rabbitmq_auth_backend_ldap/test/system_SUITE.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(system_SUITE). @@ -142,9 +142,7 @@ init_per_group(Group, Config) -> base_conf_ldap(LdapPort, idle_timeout(Group), pool_size(Group))), - Logon = {"localhost", LdapPort}, - rabbit_ldap_seed:delete(Logon), - rabbit_ldap_seed:seed(Logon), + rabbit_ldap_seed:seed({"localhost", LdapPort}), Config4 = rabbit_ct_helpers:set_config(Config3, {ldap_port, LdapPort}), rabbit_ct_helpers:run_steps(Config4, diff --git a/deps/rabbitmq_auth_backend_ldap/test/unit_SUITE.erl b/deps/rabbitmq_auth_backend_ldap/test/unit_SUITE.erl index a17ccdb16aed..bfa583c5ee51 100644 --- a/deps/rabbitmq_auth_backend_ldap/test/unit_SUITE.erl +++ b/deps/rabbitmq_auth_backend_ldap/test/unit_SUITE.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(unit_SUITE). diff --git a/deps/rabbitmq_auth_backend_oauth2/.gitignore b/deps/rabbitmq_auth_backend_oauth2/.gitignore deleted file mode 100644 index 895b779b8d1c..000000000000 --- a/deps/rabbitmq_auth_backend_oauth2/.gitignore +++ /dev/null @@ -1,17 +0,0 @@ -.sw? -.*.sw? -*.beam -/.erlang.mk/ -/cover/ -/deps/ -/doc/ -/ebin/ -/escript/ -/escript.lock -/logs/ -/plugins/ -/plugins.lock -/sbin/ -/sbin.lock - -/rabbitmq_auth_backend_oauth2.d diff --git a/deps/rabbitmq_auth_backend_oauth2/BUILD.bazel b/deps/rabbitmq_auth_backend_oauth2/BUILD.bazel index 7b40b13517c6..f6596bdf44a5 100644 --- a/deps/rabbitmq_auth_backend_oauth2/BUILD.bazel +++ b/deps/rabbitmq_auth_backend_oauth2/BUILD.bazel @@ -46,6 +46,7 @@ rabbitmq_app( license_files = [":license_files"], priv = [":priv"], deps = [ + "//deps/oauth2_client:erlang_app", "//deps/rabbit:erlang_app", "@base64url//:erlang_app", "@cowlib//:erlang_app", @@ -65,7 +66,7 @@ plt( name = "deps_plt", for_target = ":erlang_app", ignore_warnings = True, - libs = ["//deps/rabbitmq_cli:elixir"], # keep + libs = ["@rules_elixir//elixir"], # keep plt = "//:base_plt", deps = ["//deps/rabbitmq_cli:erlang_app"], # keep ) @@ -82,15 +83,17 @@ eunit( compiled_suites = [ ":test_jwks_http_app_beam", ":test_jwks_http_handler_beam", + ":test_openid_http_handler_beam", ":test_jwks_http_sup_beam", ":test_rabbit_auth_backend_oauth2_test_util_beam", + ":test_oauth2_http_mock_beam", ], target = ":test_erlang_app", ) broker_for_integration_suites( extra_plugins = [ - "//deps/rabbitmq_mqtt:erlang_app", + "//deps/rabbitmq_web_mqtt:erlang_app", ], ) @@ -99,16 +102,32 @@ rabbitmq_integration_suite( size = "small", ) +rabbitmq_integration_suite( + name = "add_signing_key_command_SUITE", + size = "small", +) + rabbitmq_integration_suite( name = "config_schema_SUITE", ) +rabbitmq_integration_suite( + name = "rabbit_oauth2_config_SUITE", + additional_beam = [ + "test/oauth2_http_mock.beam", + ], + runtime_deps = [ + "@cowboy//:erlang_app", + ], +) + rabbitmq_integration_suite( name = "jwks_SUITE", additional_beam = [ "test/rabbit_auth_backend_oauth2_test_util.beam", "test/jwks_http_app.beam", "test/jwks_http_handler.beam", + "test/openid_http_handler.beam", "test/jwks_http_sup.beam", ], deps = [ @@ -131,6 +150,8 @@ rabbitmq_integration_suite( "test/rabbit_auth_backend_oauth2_test_util.beam", ], runtime_deps = [ + "//deps/oauth2_client:erlang_app", + "//deps/rabbitmq_amqp_client:erlang_app", "@emqtt//:erlang_app", ], ) diff --git a/deps/rabbitmq_auth_backend_oauth2/Makefile b/deps/rabbitmq_auth_backend_oauth2/Makefile index 5bc2cc1a1681..1066e7be8271 100644 --- a/deps/rabbitmq_auth_backend_oauth2/Makefile +++ b/deps/rabbitmq_auth_backend_oauth2/Makefile @@ -7,16 +7,17 @@ export BUILD_WITHOUT_QUIC LOCAL_DEPS = inets public_key BUILD_DEPS = rabbit_common -DEPS = rabbit cowlib jose base64url -TEST_DEPS = cowboy rabbitmq_web_dispatch rabbitmq_ct_helpers rabbitmq_ct_client_helpers amqp_client rabbitmq_mqtt emqtt +DEPS = rabbit cowlib jose base64url oauth2_client +TEST_DEPS = cowboy rabbitmq_web_dispatch rabbitmq_ct_helpers rabbitmq_ct_client_helpers amqp_client rabbitmq_web_mqtt emqtt rabbitmq_amqp_client + +PLT_APPS += rabbitmqctl DEP_EARLY_PLUGINS = rabbit_common/mk/rabbitmq-early-plugin.mk DEP_PLUGINS = rabbit_common/mk/rabbitmq-plugin.mk -dep_jose = git https://github.com/michaelklishin/erlang-jose mk-thoas-support dep_base64url = hex 1.0.1 -dep_emqtt = git https://github.com/rabbitmq/emqtt.git master +dep_emqtt = git https://github.com/emqx/emqtt.git 1.11.0 include ../../rabbitmq-components.mk include ../../erlang.mk diff --git a/deps/rabbitmq_auth_backend_oauth2/README.md b/deps/rabbitmq_auth_backend_oauth2/README.md index 7ed7f753e34d..1d72c5af3e0b 100644 --- a/deps/rabbitmq_auth_backend_oauth2/README.md +++ b/deps/rabbitmq_auth_backend_oauth2/README.md @@ -550,6 +550,6 @@ field will be ignored as long as the token provides a client ID. ## License and Copyright -(c) 2016-2023 VMware, Inc. or its affiliates. +(c) 2007-2024 Broadcom. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. Released under the Mozilla Public License 2.0, same as RabbitMQ. diff --git a/deps/rabbitmq_auth_backend_oauth2/app.bzl b/deps/rabbitmq_auth_backend_oauth2/app.bzl index fab38c07326a..ccf72932cfaa 100644 --- a/deps/rabbitmq_auth_backend_oauth2/app.bzl +++ b/deps/rabbitmq_auth_backend_oauth2/app.bzl @@ -9,9 +9,11 @@ def all_beam_files(name = "all_beam_files"): erlang_bytecode( name = "other_beam", srcs = [ + "src/Elixir.RabbitMQ.CLI.Ctl.Commands.AddSigningKeyCommand.erl", "src/Elixir.RabbitMQ.CLI.Ctl.Commands.AddUaaKeyCommand.erl", "src/rabbit_auth_backend_oauth2.erl", "src/rabbit_auth_backend_oauth2_app.erl", + "src/rabbit_oauth2_config.erl", "src/rabbit_oauth2_scope.erl", "src/uaa_jwks.erl", "src/uaa_jwt.erl", @@ -24,6 +26,7 @@ def all_beam_files(name = "all_beam_files"): dest = "ebin", erlc_opts = "//:erlc_opts", deps = [ + "//deps/oauth2_client:erlang_app", "//deps/rabbit_common:erlang_app", "//deps/rabbitmq_cli:erlang_app", "@jose//:erlang_app", @@ -40,9 +43,11 @@ def all_test_beam_files(name = "all_test_beam_files"): name = "test_other_beam", testonly = True, srcs = [ + "src/Elixir.RabbitMQ.CLI.Ctl.Commands.AddSigningKeyCommand.erl", "src/Elixir.RabbitMQ.CLI.Ctl.Commands.AddUaaKeyCommand.erl", "src/rabbit_auth_backend_oauth2.erl", "src/rabbit_auth_backend_oauth2_app.erl", + "src/rabbit_oauth2_config.erl", "src/rabbit_oauth2_scope.erl", "src/uaa_jwks.erl", "src/uaa_jwt.erl", @@ -55,6 +60,7 @@ def all_test_beam_files(name = "all_test_beam_files"): dest = "test", erlc_opts = "//:test_erlc_opts", deps = [ + "//deps/oauth2_client:erlang_app", "//deps/rabbit_common:erlang_app", "//deps/rabbitmq_cli:erlang_app", "@jose//:erlang_app", @@ -82,9 +88,11 @@ def all_srcs(name = "all_srcs"): filegroup( name = "srcs", srcs = [ + "src/Elixir.RabbitMQ.CLI.Ctl.Commands.AddSigningKeyCommand.erl", "src/Elixir.RabbitMQ.CLI.Ctl.Commands.AddUaaKeyCommand.erl", "src/rabbit_auth_backend_oauth2.erl", "src/rabbit_auth_backend_oauth2_app.erl", + "src/rabbit_oauth2_config.erl", "src/rabbit_oauth2_scope.erl", "src/uaa_jwks.erl", "src/uaa_jwt.erl", @@ -105,6 +113,14 @@ def all_srcs(name = "all_srcs"): ) def test_suite_beam_files(name = "test_suite_beam_files"): + erlang_bytecode( + name = "test_oauth2_http_mock_beam", + testonly = True, + srcs = ["test/oauth2_http_mock.erl"], + outs = ["test/oauth2_http_mock.beam"], + app_name = "rabbitmq_auth_backend_oauth2", + erlc_opts = "//:test_erlc_opts", + ) erlang_bytecode( name = "add_uaa_key_command_SUITE_beam_files", testonly = True, @@ -112,7 +128,6 @@ def test_suite_beam_files(name = "test_suite_beam_files"): outs = ["test/add_uaa_key_command_SUITE.beam"], app_name = "rabbitmq_auth_backend_oauth2", erlc_opts = "//:test_erlc_opts", - deps = ["//deps/rabbit_common:erlang_app"], ) erlang_bytecode( name = "config_schema_SUITE_beam_files", @@ -166,6 +181,15 @@ def test_suite_beam_files(name = "test_suite_beam_files"): erlc_opts = "//:test_erlc_opts", deps = ["@cowboy//:erlang_app"], ) + erlang_bytecode( + name = "test_openid_http_handler_beam", + testonly = True, + srcs = ["test/openid_http_handler.erl"], + outs = ["test/openid_http_handler.beam"], + app_name = "rabbitmq_auth_backend_oauth2", + erlc_opts = "//:test_erlc_opts", + deps = ["@cowboy//:erlang_app"], + ) erlang_bytecode( name = "test_jwks_http_sup_beam", testonly = True, @@ -199,3 +223,20 @@ def test_suite_beam_files(name = "test_suite_beam_files"): app_name = "rabbitmq_auth_backend_oauth2", erlc_opts = "//:test_erlc_opts", ) + erlang_bytecode( + name = "rabbit_oauth2_config_SUITE_beam_files", + testonly = True, + srcs = ["test/rabbit_oauth2_config_SUITE.erl"], + outs = ["test/rabbit_oauth2_config_SUITE.beam"], + app_name = "rabbitmq_auth_backend_oauth2", + erlc_opts = "//:test_erlc_opts", + deps = ["//deps/oauth2_client:erlang_app"], + ) + erlang_bytecode( + name = "add_signing_key_command_SUITE_beam_files", + testonly = True, + srcs = ["test/add_signing_key_command_SUITE.erl"], + outs = ["test/add_signing_key_command_SUITE.beam"], + app_name = "rabbitmq_auth_backend_oauth2", + erlc_opts = "//:test_erlc_opts", + ) diff --git a/deps/rabbitmq_auth_backend_oauth2/priv/schema/rabbitmq_auth_backend_oauth2.schema b/deps/rabbitmq_auth_backend_oauth2/priv/schema/rabbitmq_auth_backend_oauth2.schema index 8ee313ba5ece..c53c5d162b80 100644 --- a/deps/rabbitmq_auth_backend_oauth2/priv/schema/rabbitmq_auth_backend_oauth2.schema +++ b/deps/rabbitmq_auth_backend_oauth2/priv/schema/rabbitmq_auth_backend_oauth2.schema @@ -73,6 +73,7 @@ list_to_binary(cuttlefish:conf_get("auth_oauth2.additional_scopes_key", Conf)) end}. + %% Configure the plugin to skip validation of the aud field %% %% {verify_aud, true}, @@ -98,9 +99,11 @@ "rabbitmq_auth_backend_oauth2.preferred_username_claims", fun(Conf) -> Settings = cuttlefish_variable:filter_by_prefix("auth_oauth2.preferred_username_claims", Conf), - [list_to_binary(V) || {_, V} <- Settings] + [list_to_binary(V) || {_, V} <- lists:reverse(Settings)] end}. + + %% ID of the default signing key %% %% {default_key, <<"key-1">>}, @@ -145,16 +148,42 @@ maps:from_list(SigningKeys) end}. +{mapping, + "auth_oauth2.issuer", + "rabbitmq_auth_backend_oauth2.issuer", + [{datatype, string}, {validators, ["uri", "https_uri"]}]}. + +{mapping, + "auth_oauth2.token_endpoint", + "rabbitmq_auth_backend_oauth2.token_endpoint", + [{datatype, string}, {validators, ["uri", "https_uri"]}]}. + {mapping, "auth_oauth2.jwks_url", "rabbitmq_auth_backend_oauth2.key_config.jwks_url", [{datatype, string}, {validators, ["uri", "https_uri"]}]}. +{mapping, + "auth_oauth2.end_session_endpoint", + "rabbitmq_auth_backend_oauth2.end_session_endpoint", + [{datatype, string}, {validators, ["uri", "https_uri"]}]}. + +{mapping, + "auth_oauth2.authorization_endpoint", + "rabbitmq_auth_backend_oauth2.authorization_endpoint", + [{datatype, string}, {validators, ["uri", "https_uri"]}]}. + {mapping, "auth_oauth2.https.peer_verification", "rabbitmq_auth_backend_oauth2.key_config.peer_verification", [{datatype, {enum, [verify_peer, verify_none]}}]}. +% Alias configuration variable. `auth_oauth2.https.peer_verification` will be soon deprecated +{mapping, + "auth_oauth2.https.verify", + "rabbitmq_auth_backend_oauth2.key_config.verify", + [{datatype, {enum, [verify_peer, verify_none]}}]}. + {mapping, "auth_oauth2.https.cacertfile", "rabbitmq_auth_backend_oauth2.key_config.cacertfile", @@ -193,3 +222,159 @@ Settings = cuttlefish_variable:filter_by_prefix("auth_oauth2.algorithms", Conf), [list_to_binary(V) || {_, V} <- Settings] end}. + + +%% This setting is only required when there are +1 auth_oauth2.oauth_providers +%% If this setting is omitted, its default to the first oauth_provider + +{mapping, + "auth_oauth2.default_oauth_provider", + "rabbitmq_auth_backend_oauth2.default_oauth_provider", + [{datatype, string}]}. + +{mapping, + "auth_oauth2.oauth_providers.$name.issuer", + "rabbitmq_auth_backend_oauth2.oauth_providers", + [{datatype, string}, {validators, ["uri", "https_uri"]}] +}. + +{mapping, + "auth_oauth2.oauth_providers.$name.token_endpoint", + "rabbitmq_auth_backend_oauth2.oauth_providers", + [{datatype, string}, {validators, ["uri", "https_uri"]}] +}. + +{mapping, + "auth_oauth2.oauth_providers.$name.jwks_uri", + "rabbitmq_auth_backend_oauth2.oauth_providers", + [{datatype, string}, {validators, ["uri", "https_uri"]}] +}. + +{mapping, + "auth_oauth2.oauth_providers.$name.end_session_endpoint", + "rabbitmq_auth_backend_oauth2.oauth_providers", + [{datatype, string}, {validators, ["uri", "https_uri"]}]}. + +{mapping, + "auth_oauth2.oauth_providers.$name.authorization_endpoint", + "rabbitmq_auth_backend_oauth2.oauth_providers", + [{datatype, string}, {validators, ["uri", "https_uri"]}]}. + +{mapping, + "auth_oauth2.oauth_providers.$name.https.verify", + "rabbitmq_auth_backend_oauth2.oauth_providers", + [{datatype, {enum, [verify_peer, verify_none]}}]}. + +{mapping, + "auth_oauth2.oauth_providers.$name.https.cacertfile", + "rabbitmq_auth_backend_oauth2.oauth_providers", + [{datatype, file}, {validators, ["file_accessible"]}]}. + +{mapping, + "auth_oauth2.oauth_providers.$name.https.depth", + "rabbitmq_auth_backend_oauth2.oauth_providers", + [{datatype, integer}]}. + +{mapping, + "auth_oauth2.oauth_providers.$name.https.hostname_verification", + "rabbitmq_auth_backend_oauth2.oauth_providers", + [{datatype, {enum, [wildcard, none]}}]}. + +{mapping, + "auth_oauth2.oauth_providers.$name.https.crl_check", + "rabbitmq_auth_backend_oauth2.oauth_providers", + [{datatype, {enum, [true, false, peer, best_effort]}}]}. + +{translation, "rabbitmq_auth_backend_oauth2.oauth_providers", + fun(Conf) -> + Settings = cuttlefish_variable:filter_by_prefix("auth_oauth2.oauth_providers", Conf), + AuthBackends = [{Name, {list_to_atom(Key), list_to_binary(V)}} || {["auth_oauth2","oauth_providers", Name, Key], V} <- Settings ], + Https = [{Name, {https, {list_to_atom(Key), V}}} || {["auth_oauth2","oauth_providers", Name, "https", Key], V} <- Settings ], + + %% Aggregate all options for one provider + KeyFun = fun({Name, _}) -> list_to_binary(Name) end, + ValueFun = fun({_, V}) -> V end, + ProviderNameToListOfSettings = maps:groups_from_list(KeyFun, ValueFun, AuthBackends), + ProviderNameToListOfHttpsSettings = maps:groups_from_list(KeyFun, fun({_, {https, V}}) -> V end, Https), + ProviderNameToListWithHttps = maps:map(fun(K1,L1) -> [{https, L1}] end, ProviderNameToListOfHttpsSettings), + NewGroup = maps:merge_with(fun(K, V1, V2) -> V1 ++ V2 end, ProviderNameToListOfSettings, ProviderNameToListWithHttps), + + ListOrSingleFun = fun(K, List) -> + case K of + ssl_options -> proplists:get_all_values(K, List); + _ -> + case proplists:lookup_all(K, List) of + [One] -> proplists:get_value(K, List); + [One|_] = V -> V + end + end + end, + GroupKeyConfigFun = fun(K, List) -> + ListKeys = proplists:get_keys(List), + [{K, ListOrSingleFun(K, List)} || K <- ListKeys] + end, + maps:map(GroupKeyConfigFun, NewGroup) + + end}. + +{mapping, + "auth_oauth2.resource_servers.$name.id", + "rabbitmq_auth_backend_oauth2.resource_servers", + [{datatype, string}] +}. + +{mapping, + "auth_oauth2.resource_servers.$name.scope_prefix", + "rabbitmq_auth_backend_oauth2.resource_servers", + [{datatype, string}] +}. + +{mapping, + "auth_oauth2.resource_servers.$name.additional_scopes_key", + "rabbitmq_auth_backend_oauth2.resource_servers", + [{datatype, string}] +}. + +{mapping, + "auth_oauth2.resource_servers.$name.resource_server_type", + "rabbitmq_auth_backend_oauth2.resource_servers", + [{datatype, string}] +}. + +{mapping, + "auth_oauth2.resource_servers.$name.oauth_provider_id", + "rabbitmq_auth_backend_oauth2.resource_servers", + [{datatype, string}] +}. + +{translation, "rabbitmq_auth_backend_oauth2.resource_servers", + fun(Conf) -> + Settings = cuttlefish_variable:filter_by_prefix("auth_oauth2.resource_servers", Conf), + AuthBackends = [{Name, {list_to_atom(Key), list_to_binary(V)}} || {["auth_oauth2","resource_servers", Name, Key], V} <- Settings], + KeyFun = fun({Name,_}) -> list_to_binary(Name) end, + ValueFun = fun({_,V}) -> V end, + NewGroup = maps:groups_from_list(KeyFun, ValueFun, AuthBackends), + ListOrSingleFun = fun(K, List) -> + case K of + key_config -> proplists:get_all_values(K, List); + _ -> + case proplists:lookup_all(K, List) of + [One] -> proplists:get_value(K, List); + [One|_] = V -> V + end + end + end, + GroupKeyConfigFun = fun(K, List) -> + ListKeys = proplists:get_keys(List), + [ {K,ListOrSingleFun(K,List)} || K <- ListKeys ] + end, + NewGroupTwo = maps:map(GroupKeyConfigFun, NewGroup), + IndexByIdOrElseNameFun = fun(K, V, NewMap) -> + case proplists:get_value(id, V) of + undefined -> maps:put(K, V, NewMap); + ID -> maps:put(ID, V, NewMap) + end + end, + maps:fold(IndexByIdOrElseNameFun,#{}, NewGroupTwo) + + end}. diff --git a/deps/rabbitmq_auth_backend_oauth2/src/Elixir.RabbitMQ.CLI.Ctl.Commands.AddSigningKeyCommand.erl b/deps/rabbitmq_auth_backend_oauth2/src/Elixir.RabbitMQ.CLI.Ctl.Commands.AddSigningKeyCommand.erl new file mode 100644 index 000000000000..1114d93b07b9 --- /dev/null +++ b/deps/rabbitmq_auth_backend_oauth2/src/Elixir.RabbitMQ.CLI.Ctl.Commands.AddSigningKeyCommand.erl @@ -0,0 +1,137 @@ +%% This Source Code Form is subject to the terms of the Mozilla Public +%% License, v. 2.0. If a copy of the MPL was not distributed with this +%% file, You can obtain one at https://mozilla.org/MPL/2.0/. +%% +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. +%% +-module('Elixir.RabbitMQ.CLI.Ctl.Commands.AddSigningKeyCommand'). + +-behaviour('Elixir.RabbitMQ.CLI.CommandBehaviour'). + +-export([ + usage/0, + validate/2, + merge_defaults/2, + banner/2, + run/2, + switches/0, + aliases/0, + output/2, + description/0, + formatter/0 + ]). + + +usage() -> + <<"add_signing_key [--json=] [--pem=] [--pem-file=]">>. + +description() -> <<"Add signing key required to validate JWT's digital signatures">>. + +switches() -> + [{json, string}, + {pem, string}, + {pem_file, string}]. + +aliases() -> []. + +validate([], _Options) -> {validation_failure, not_enough_args}; +validate([_,_|_], _Options) -> {validation_failure, too_many_args}; +validate([_], Options) -> + Json = maps:get(json, Options, undefined), + Pem = maps:get(pem, Options, undefined), + PemFile = maps:get(pem_file, Options, undefined), + case {is_binary(Json), is_binary(Pem), is_binary(PemFile)} of + {false, false, false} -> + {validation_failure, + {bad_argument, <<"No key specified">>}}; + {true, false, false} -> + validate_json(Json); + {false, true, false} -> + validate_pem(Pem); + {false, false, true} -> + validate_pem_file(PemFile); + {_, _, _} -> + {validation_failure, + {bad_argument, <<"There can be only one key type">>}} + end. + +validate_json(Json) -> + case rabbit_json:try_decode(Json) of + {ok, _} -> + case uaa_jwt:verify_signing_key(json, Json) of + ok -> ok; + {error, {fields_missing_for_kty, Kty}} -> + {validation_failure, + {bad_argument, + <<"Key fields are missing fot kty \"", Kty/binary, "\"">>}}; + {error, unknown_kty} -> + {validation_failure, + {bad_argument, <<"\"kty\" field is invalid">>}}; + {error, no_kty} -> + {validation_failure, + {bad_argument, <<"Json key should contain \"kty\" field">>}}; + {error, Err} -> + {validation_failure, {bad_argument, Err}} + end; + {error, _} -> + {validation_failure, {bad_argument, <<"Invalid JSON">>}} + end. + +validate_pem(Pem) -> + case uaa_jwt:verify_signing_key(pem, Pem) of + ok -> ok; + {error, invalid_pem_string} -> + {validation_failure, <<"Unable to read a key from the PEM string">>}; + {error, Err} -> + {validation_failure, Err} + end. + +validate_pem_file(PemFile) -> + case uaa_jwt:verify_signing_key(pem_file, PemFile) of + ok -> ok; + {error, enoent} -> + {validation_failure, {bad_argument, <<"PEM file not found">>}}; + {error, invalid_pem_file} -> + {validation_failure, <<"Unable to read a key from the PEM file">>}; + {error, Err} -> + {validation_failure, Err} + end. + +merge_defaults(Args, #{pem_file := FileName} = Options) -> + AbsFileName = filename:absname(FileName), + {Args, Options#{pem_file := AbsFileName}}; +merge_defaults(Args, Options) -> {Args, Options}. + +banner([Name], #{json := Json}) -> + <<"Adding OAuth signing key \"", + Name/binary, + "\" in JSON format: \"", + Json/binary, "\"">>; +banner([Name], #{pem := Pem}) -> + <<"Adding OAuth signing key \"", + Name/binary, + "\" public key: \"", + Pem/binary, "\"">>; +banner([Name], #{pem_file := PemFile}) -> + <<"Adding OAuth signing key \"", + Name/binary, + "\" filename: \"", + PemFile/binary, "\"">>. + +run([Name], #{node := Node} = Options) -> + {Type, Value} = case Options of + #{json := Json} -> {json, Json}; + #{pem := Pem} -> {pem, Pem}; + #{pem_file := PemFile} -> {pem_file, PemFile} + end, + case rabbit_misc:rpc_call(Node, + uaa_jwt, add_signing_key, + [Name, Type, Value]) of + {ok, _Keys} -> ok; + {error, Err} -> {error, Err} + end. + +output(E, _Opts) -> + 'Elixir.RabbitMQ.CLI.DefaultOutput':output(E). + +formatter() -> 'Elixir.RabbitMQ.CLI.Formatters.Erlang'. diff --git a/deps/rabbitmq_auth_backend_oauth2/src/Elixir.RabbitMQ.CLI.Ctl.Commands.AddUaaKeyCommand.erl b/deps/rabbitmq_auth_backend_oauth2/src/Elixir.RabbitMQ.CLI.Ctl.Commands.AddUaaKeyCommand.erl index a0446a6b9f9a..f0ecc5cf04bb 100644 --- a/deps/rabbitmq_auth_backend_oauth2/src/Elixir.RabbitMQ.CLI.Ctl.Commands.AddUaaKeyCommand.erl +++ b/deps/rabbitmq_auth_backend_oauth2/src/Elixir.RabbitMQ.CLI.Ctl.Commands.AddUaaKeyCommand.erl @@ -2,12 +2,14 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module('Elixir.RabbitMQ.CLI.Ctl.Commands.AddUaaKeyCommand'). -behaviour('Elixir.RabbitMQ.CLI.CommandBehaviour'). +-define(COMMAND, 'Elixir.RabbitMQ.CLI.Ctl.Commands.AddSigningKeyCommand'). + -export([ usage/0, validate/2, @@ -17,6 +19,7 @@ switches/0, aliases/0, output/2, + description/0, formatter/0 ]). @@ -24,118 +27,20 @@ usage() -> <<"add_uaa_key [--json=] [--pem=] [--pem-file=]">>. -switches() -> - [{json, string}, - {pem, string}, - {pem_file, string}]. - -aliases() -> []. - -validate([], _Options) -> {validation_failure, not_enough_args}; -validate([_,_|_], _Options) -> {validation_failure, too_many_args}; -validate([_], Options) -> - Json = maps:get(json, Options, undefined), - Pem = maps:get(pem, Options, undefined), - PemFile = maps:get(pem_file, Options, undefined), - case {is_binary(Json), is_binary(Pem), is_binary(PemFile)} of - {false, false, false} -> - {validation_failure, - {bad_argument, <<"No key specified">>}}; - {true, false, false} -> - validate_json(Json); - {false, true, false} -> - validate_pem(Pem); - {false, false, true} -> - validate_pem_file(PemFile); - {_, _, _} -> - {validation_failure, - {bad_argument, <<"There can be only one key type">>}} - end. - -validate_json(Json) -> - case rabbit_json:try_decode(Json) of - {ok, _} -> - case uaa_jwt:verify_signing_key(json, Json) of - ok -> ok; - {error, {fields_missing_for_kty, Kty}} -> - {validation_failure, - {bad_argument, - <<"Key fields are missing fot kty \"", Kty/binary, "\"">>}}; - {error, unknown_kty} -> - {validation_failure, - {bad_argument, <<"\"kty\" field is invalid">>}}; - {error, no_kty} -> - {validation_failure, - {bad_argument, <<"Json key should contain \"kty\" field">>}}; - {error, Err} -> - {validation_failure, {bad_argument, Err}} - end; - {error, _} -> - {validation_failure, {bad_argument, <<"Invalid JSON">>}} - end. - -validate_pem(Pem) -> - case uaa_jwt:verify_signing_key(pem, Pem) of - ok -> ok; - {error, invalid_pem_string} -> - {validation_failure, <<"Unable to read a key from the PEM string">>}; - {error, Err} -> - {validation_failure, Err} - end. +description() -> <<"DEPRECATED. Use instead add_signing_key">>. -validate_pem_file(PemFile) -> - case uaa_jwt:verify_signing_key(pem_file, PemFile) of - ok -> ok; - {error, enoent} -> - {validation_failure, {bad_argument, <<"PEM file not found">>}}; - {error, invalid_pem_file} -> - {validation_failure, <<"Unable to read a key from the PEM file">>}; - {error, Err} -> - {validation_failure, Err} - end. - -merge_defaults(Args, #{pem_file := FileName} = Options) -> - AbsFileName = filename:absname(FileName), - {Args, Options#{pem_file := AbsFileName}}; -merge_defaults(Args, Options) -> {Args, Options}. - -banner([Name], #{json := Json}) -> - <<"Adding UAA signing key \"", - Name/binary, - "\" in JSON format: \"", - Json/binary, "\"">>; -banner([Name], #{pem := Pem}) -> - <<"Adding UAA signing key \"", - Name/binary, - "\" public key: \"", - Pem/binary, "\"">>; -banner([Name], #{pem_file := PemFile}) -> - <<"Adding UAA signing key \"", - Name/binary, - "\" filename: \"", - PemFile/binary, "\"">>. - -run([Name], #{node := Node} = Options) -> - {Type, Value} = case Options of - #{json := Json} -> {json, Json}; - #{pem := Pem} -> {pem, Pem}; - #{pem_file := PemFile} -> {pem_file, PemFile} - end, - case rabbit_misc:rpc_call(Node, - uaa_jwt, add_signing_key, - [Name, Type, Value]) of - {ok, _Keys} -> ok; - {error, Err} -> {error, Err} - end. - -output(E, _Opts) -> - 'Elixir.RabbitMQ.CLI.DefaultOutput':output(E). - -formatter() -> 'Elixir.RabbitMQ.CLI.Formatters.Erlang'. +switches() -> ?COMMAND:switches(). +aliases() -> []. +validate(Args, Options) -> ?COMMAND:validate(Args, Options). +merge_defaults(Args, Options) -> ?COMMAND:merge_defaults(Args, Options). +banner(Names, Args) -> ?COMMAND:banner(Names, Args). +run(Names, Options) -> ?COMMAND:run(Names, Options). +output(E, Opts) -> ?COMMAND:output(E, Opts). +formatter() -> ?COMMAND:formatter(). diff --git a/deps/rabbitmq_auth_backend_oauth2/src/rabbit_auth_backend_oauth2.erl b/deps/rabbitmq_auth_backend_oauth2/src/rabbit_auth_backend_oauth2.erl index 8c839e92570c..a43212655b87 100644 --- a/deps/rabbitmq_auth_backend_oauth2/src/rabbit_auth_backend_oauth2.erl +++ b/deps/rabbitmq_auth_backend_oauth2/src/rabbit_auth_backend_oauth2.erl @@ -2,8 +2,9 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% + -module(rabbit_auth_backend_oauth2). -include_lib("rabbit_common/include/rabbit.hrl"). @@ -14,10 +15,11 @@ -export([description/0]). -export([user_login_authentication/2, user_login_authorization/2, check_vhost_access/3, check_resource_access/4, - check_topic_access/4, check_token/1, state_can_expire/0, update_state/2]). + check_topic_access/4, check_token/1, update_state/2, + expiry_timestamp/1]). % for testing --export([post_process_payload/1, get_expanded_scopes/2]). +-export([post_process_payload/2, get_expanded_scopes/2]). -import(rabbit_data_coercion, [to_map/1]). @@ -29,22 +31,13 @@ %% App environment %% --type app_env() :: [{atom(), any()}]. --define(APP, rabbitmq_auth_backend_oauth2). -define(RESOURCE_SERVER_ID, resource_server_id). --define(SCOPE_PREFIX, scope_prefix). %% a term defined for Rich Authorization Request tokens to identify a RabbitMQ permission --define(RESOURCE_SERVER_TYPE, resource_server_type). %% verify server_server_id aud field is on the aud field --define(VERIFY_AUD, verify_aud). %% a term used by the IdentityServer community --define(COMPLEX_CLAIM_APP_ENV_KEY, extra_scopes_source). %% scope aliases map "role names" to a set of scopes --define(SCOPE_MAPPINGS_APP_ENV_KEY, scope_aliases). -%% list of JWT claims (such as <<"sub">>) used to determine the username --define(PREFERRED_USERNAME_CLAIMS, preferred_username_claims). --define(DEFAULT_PREFERRED_USERNAME_CLAIMS, [<<"sub">>, <<"client_id">>]). + %% %% Key JWT fields @@ -81,7 +74,8 @@ check_vhost_access(#auth_user{impl = DecodedTokenFun}, VHost, _AuthzData) -> with_decoded_token(DecodedTokenFun(), fun(_Token) -> - Scopes = get_scopes(DecodedTokenFun()), + DecodedToken = DecodedTokenFun(), + Scopes = get_scopes(DecodedToken), ScopeString = rabbit_oauth2_scope:concat_scopes(Scopes, ","), rabbit_log:debug("Matching virtual host '~ts' against the following scopes: ~ts", [VHost, ScopeString]), rabbit_oauth2_scope:vhost_access(VHost, Scopes) @@ -103,8 +97,6 @@ check_topic_access(#auth_user{impl = DecodedTokenFun}, rabbit_oauth2_scope:topic_access(Resource, Permission, Context, Scopes) end). -state_can_expire() -> true. - update_state(AuthUser, NewToken) -> case check_token(NewToken) of %% avoid logging the token @@ -120,11 +112,20 @@ update_state(AuthUser, NewToken) -> impl = fun() -> DecodedToken end}} end. +expiry_timestamp(#auth_user{impl = DecodedTokenFun}) -> + case DecodedTokenFun() of + #{<<"exp">> := Exp} when is_integer(Exp) -> + Exp; + _ -> + never + end. + %%-------------------------------------------------------------------- authenticate(_, AuthProps0) -> AuthProps = to_map(AuthProps0), Token = token_from_context(AuthProps), + case check_token(Token) of %% avoid logging the token {error, _} = E -> E; @@ -134,9 +135,7 @@ authenticate(_, AuthProps0) -> {refused, "Authentication using an OAuth 2/JWT token failed: ~tp", [Err]}; {ok, DecodedToken} -> Func = fun(Token0) -> - Username = username_from( - application:get_env(?APP, ?PREFERRED_USERNAME_CLAIMS, []), - Token0), + Username = username_from(rabbit_oauth2_config:get_preferred_username_claims(), Token0), Tags = tags_from(Token0), {ok, #auth_user{username = Username, @@ -179,29 +178,28 @@ check_token(DecodedToken) when is_map(DecodedToken) -> {ok, DecodedToken}; check_token(Token) -> - Settings = application:get_all_env(?APP), case uaa_jwt:decode_and_verify(Token) of - {error, Reason} -> {refused, {error, Reason}}; - {true, Payload} -> - validate_payload(post_process_payload(Payload, Settings)); - {false, _} -> {refused, signature_invalid} + {error, Reason} -> + {refused, {error, Reason}}; + {true, TargetResourceServerId, Payload} -> + Payload0 = post_process_payload(TargetResourceServerId, Payload), + validate_payload(TargetResourceServerId, Payload0); + {false, _, _} -> {refused, signature_invalid} end. -post_process_payload(Payload) when is_map(Payload) -> - post_process_payload(Payload, []). - -post_process_payload(Payload, AppEnv) when is_map(Payload) -> +post_process_payload(ResourceServerId, Payload) when is_map(Payload) -> Payload0 = maps:map(fun(K, V) -> - case K of - ?AUD_JWT_FIELD when is_binary(V) -> binary:split(V, <<" ">>, [global, trim_all]); - ?SCOPE_JWT_FIELD when is_binary(V) -> binary:split(V, <<" ">>, [global, trim_all]); - _ -> V - end - end, - Payload + case K of + ?AUD_JWT_FIELD when is_binary(V) -> binary:split(V, <<" ">>, [global, trim_all]); + ?SCOPE_JWT_FIELD when is_binary(V) -> binary:split(V, <<" ">>, [global, trim_all]); + _ -> V + end + end, + Payload ), - Payload1 = case does_include_complex_claim_field(Payload0) of - true -> post_process_payload_with_complex_claim(Payload0); + + Payload1 = case does_include_complex_claim_field(ResourceServerId, Payload0) of + true -> post_process_payload_with_complex_claim(ResourceServerId, Payload0); false -> Payload0 end, @@ -210,65 +208,56 @@ post_process_payload(Payload, AppEnv) when is_map(Payload) -> false -> Payload1 end, - Payload3 = case has_configured_scope_aliases(AppEnv) of - true -> post_process_payload_with_scope_aliases(Payload2, AppEnv); + Payload3 = case rabbit_oauth2_config:has_scope_aliases(ResourceServerId) of + true -> post_process_payload_with_scope_aliases(ResourceServerId, Payload2); false -> Payload2 end, Payload4 = case maps:is_key(<<"authorization_details">>, Payload3) of - true -> post_process_payload_in_rich_auth_request_format(Payload3); + true -> post_process_payload_in_rich_auth_request_format(ResourceServerId, Payload3); false -> Payload3 end, Payload4. --spec has_configured_scope_aliases(AppEnv :: app_env()) -> boolean(). -has_configured_scope_aliases(AppEnv) -> - Map = maps:from_list(AppEnv), - maps:is_key(?SCOPE_MAPPINGS_APP_ENV_KEY, Map). - --spec post_process_payload_with_scope_aliases(Payload :: map(), AppEnv :: app_env()) -> map(). +-spec post_process_payload_with_scope_aliases(ResourceServerId :: binary(), Payload :: map()) -> map(). %% This is for those hopeless environments where the token structure is so out of %% messaging team's control that even the extra scopes field is no longer an option. %% %% This assumes that scopes can be random values that do not follow the RabbitMQ %% convention, or any other convention, in any way. They are just random client role IDs. %% See rabbitmq/rabbitmq-server#4588 for details. -post_process_payload_with_scope_aliases(Payload, AppEnv) -> +post_process_payload_with_scope_aliases(ResourceServerId, Payload) -> %% try JWT scope field value for alias - Payload1 = post_process_payload_with_scope_alias_in_scope_field(Payload, AppEnv), + Payload1 = post_process_payload_with_scope_alias_in_scope_field(ResourceServerId, Payload), %% try the configurable 'extra_scopes_source' field value for alias - Payload2 = post_process_payload_with_scope_alias_in_extra_scopes_source(Payload1, AppEnv), - Payload2. + post_process_payload_with_scope_alias_in_extra_scopes_source(ResourceServerId, Payload1). + --spec post_process_payload_with_scope_alias_in_scope_field(Payload :: map(), - AppEnv :: app_env()) -> map(). +-spec post_process_payload_with_scope_alias_in_scope_field(ResourceServerId :: binary(), Payload :: map()) -> map(). %% First attempt: use the value in the 'scope' field for alias -post_process_payload_with_scope_alias_in_scope_field(Payload, AppEnv) -> - ScopeMappings = proplists:get_value(?SCOPE_MAPPINGS_APP_ENV_KEY, AppEnv, #{}), +post_process_payload_with_scope_alias_in_scope_field(ResourceServerId, Payload) -> + ScopeMappings = rabbit_oauth2_config:get_scope_aliases(ResourceServerId), post_process_payload_with_scope_alias_field_named(Payload, ?SCOPE_JWT_FIELD, ScopeMappings). --spec post_process_payload_with_scope_alias_in_extra_scopes_source(Payload :: map(), - AppEnv :: app_env()) -> map(). +-spec post_process_payload_with_scope_alias_in_extra_scopes_source(ResourceServerId :: binary(), Payload :: map()) -> map(). %% Second attempt: use the value in the configurable 'extra scopes source' field for alias -post_process_payload_with_scope_alias_in_extra_scopes_source(Payload, AppEnv) -> - ExtraScopesField = proplists:get_value(?COMPLEX_CLAIM_APP_ENV_KEY, AppEnv, undefined), +post_process_payload_with_scope_alias_in_extra_scopes_source(ResourceServerId, Payload) -> + ExtraScopesField = rabbit_oauth2_config:get_additional_scopes_key(ResourceServerId), case ExtraScopesField of %% nothing to inject - undefined -> Payload; - _ -> - ScopeMappings = proplists:get_value(?SCOPE_MAPPINGS_APP_ENV_KEY, AppEnv, #{}), - post_process_payload_with_scope_alias_field_named(Payload, ExtraScopesField, ScopeMappings) + {error, not_found} -> Payload; + {ok, ExtraScopes} -> + ScopeMappings = rabbit_oauth2_config:get_scope_aliases(ResourceServerId), + post_process_payload_with_scope_alias_field_named(Payload, ExtraScopes, ScopeMappings) end. -spec post_process_payload_with_scope_alias_field_named(Payload :: map(), Field :: binary(), ScopeAliasMapping :: map()) -> map(). -post_process_payload_with_scope_alias_field_named(Payload, undefined, _ScopeAliasMapping) -> - Payload; post_process_payload_with_scope_alias_field_named(Payload, FieldName, ScopeAliasMapping) -> Scopes0 = maps:get(FieldName, Payload, []), Scopes = rabbit_data_coercion:to_list_of_binaries(Scopes0), @@ -291,39 +280,44 @@ post_process_payload_with_scope_alias_field_named(Payload, FieldName, ScopeAlias maps:put(?SCOPE_JWT_FIELD, ExpandedScopes, Payload). --spec does_include_complex_claim_field(Payload :: map()) -> boolean(). -does_include_complex_claim_field(Payload) when is_map(Payload) -> - maps:is_key(application:get_env(?APP, ?COMPLEX_CLAIM_APP_ENV_KEY, undefined), Payload). - --spec post_process_payload_with_complex_claim(Payload :: map()) -> map(). -post_process_payload_with_complex_claim(Payload) -> - ComplexClaim = maps:get(application:get_env(?APP, ?COMPLEX_CLAIM_APP_ENV_KEY, undefined), Payload), - ResourceServerId = rabbit_data_coercion:to_binary(application:get_env(?APP, ?RESOURCE_SERVER_ID, <<>>)), - - AdditionalScopes = - case ComplexClaim of - L when is_list(L) -> L; - M when is_map(M) -> - case maps:get(ResourceServerId, M, undefined) of - undefined -> []; - Ks when is_list(Ks) -> - [erlang:iolist_to_binary([ResourceServerId, <<".">>, K]) || K <- Ks]; - ClaimBin when is_binary(ClaimBin) -> - UnprefixedClaims = binary:split(ClaimBin, <<" ">>, [global, trim_all]), - [erlang:iolist_to_binary([ResourceServerId, <<".">>, K]) || K <- UnprefixedClaims]; - _ -> [] - end; - Bin when is_binary(Bin) -> - binary:split(Bin, <<" ">>, [global, trim_all]); - _ -> [] - end, +-spec does_include_complex_claim_field(ResourceServerId :: binary(), Payload :: map()) -> boolean(). +does_include_complex_claim_field(ResourceServerId, Payload) when is_map(Payload) -> + case rabbit_oauth2_config:get_additional_scopes_key(ResourceServerId) of + {ok, ScopeKey} -> maps:is_key(ScopeKey, Payload); + {error, not_found} -> false + end. - case AdditionalScopes of - [] -> Payload; - _ -> - ExistingScopes = maps:get(?SCOPE_JWT_FIELD, Payload, []), - maps:put(?SCOPE_JWT_FIELD, AdditionalScopes ++ ExistingScopes, Payload) - end. +-spec post_process_payload_with_complex_claim(ResourceServerId :: binary(), Payload :: map()) -> map(). +post_process_payload_with_complex_claim(ResourceServerId, Payload) -> + case rabbit_oauth2_config:get_additional_scopes_key(ResourceServerId) of + {ok, ScopesKey} -> + ComplexClaim = maps:get(ScopesKey, Payload), + AdditionalScopes = + case ComplexClaim of + L when is_list(L) -> L; + M when is_map(M) -> + case maps:get(ResourceServerId, M, undefined) of + undefined -> []; + Ks when is_list(Ks) -> + [erlang:iolist_to_binary([ResourceServerId, <<".">>, K]) || K <- Ks]; + ClaimBin when is_binary(ClaimBin) -> + UnprefixedClaims = binary:split(ClaimBin, <<" ">>, [global, trim_all]), + [erlang:iolist_to_binary([ResourceServerId, <<".">>, K]) || K <- UnprefixedClaims]; + _ -> [] + end; + Bin when is_binary(Bin) -> + binary:split(Bin, <<" ">>, [global, trim_all]); + _ -> [] + end, + + case AdditionalScopes of + [] -> Payload; + _ -> + ExistingScopes = maps:get(?SCOPE_JWT_FIELD, Payload, []), + maps:put(?SCOPE_JWT_FIELD, AdditionalScopes ++ ExistingScopes, Payload) + end; + {error, not_found} -> Payload + end. -spec post_process_payload_in_keycloak_format(Payload :: map()) -> map(). %% keycloak token format: https://github.com/rabbitmq/rabbitmq-auth-backend-oauth2/issues/36 @@ -367,156 +361,155 @@ extract_scopes_from_keycloak_permissions(Acc, [_ | T]) -> put_location_attribute(Attribute, Map) -> - put_attribute(binary:split(Attribute, <<":">>, [global, trim_all]), Map). + put_attribute(binary:split(Attribute, <<":">>, [global, trim_all]), Map). put_attribute([Key, Value | _], Map) -> - case lists:member(Key, ?LOCATION_ATTRIBUTES) of - true -> maps:put(Key, Value, Map); - false -> Map - end; + case lists:member(Key, ?LOCATION_ATTRIBUTES) of + true -> maps:put(Key, Value, Map); + false -> Map + end; put_attribute([_|_], Map) -> Map. % convert [ <<"cluster:A">>, <<"vhost:B" >>, <<"A">>, <<"unknown:C">> ] to #{ <<"cluster">> : <<"A">>, <<"vhost">> : <<"B">> } % filtering out non-key-value-pairs and keys which are not part of LOCATION_ATTRIBUTES convert_attribute_list_to_attribute_map(L) -> - convert_attribute_list_to_attribute_map(L, #{}). + convert_attribute_list_to_attribute_map(L, #{}). convert_attribute_list_to_attribute_map([H|L],Map) when is_binary(H) -> - convert_attribute_list_to_attribute_map(L, put_location_attribute(H,Map)); + convert_attribute_list_to_attribute_map(L, put_location_attribute(H,Map)); convert_attribute_list_to_attribute_map([], Map) -> Map. build_permission_resource_path(Map) -> - Vhost = maps:get(?VHOST_LOCATION_ATTRIBUTE, Map, <<"*">>), - Resource = maps:get(?QUEUE_LOCATION_ATTRIBUTE, Map, - maps:get(?EXCHANGE_LOCATION_ATTRIBUTE, Map, <<"*">>)), - RoutingKey = maps:get(?ROUTING_KEY_LOCATION_ATTRIBUTE, Map, <<"*">>), + Vhost = maps:get(?VHOST_LOCATION_ATTRIBUTE, Map, <<"*">>), + Resource = maps:get(?QUEUE_LOCATION_ATTRIBUTE, Map, + maps:get(?EXCHANGE_LOCATION_ATTRIBUTE, Map, <<"*">>)), + RoutingKey = maps:get(?ROUTING_KEY_LOCATION_ATTRIBUTE, Map, <<"*">>), - <>. + <>. map_locations_to_permission_resource_paths(ResourceServerId, L) -> - Locations = case L of - undefined -> []; - LocationsAsList when is_list(LocationsAsList) -> - lists:map(fun(Location) -> convert_attribute_list_to_attribute_map( - binary:split(Location,<<"/">>,[global,trim_all])) end, LocationsAsList); - LocationsAsBinary when is_binary(LocationsAsBinary) -> - [convert_attribute_list_to_attribute_map( - binary:split(LocationsAsBinary,<<"/">>,[global,trim_all]))] - end, + Locations = case L of + undefined -> []; + LocationsAsList when is_list(LocationsAsList) -> + lists:map(fun(Location) -> convert_attribute_list_to_attribute_map( + binary:split(Location,<<"/">>,[global,trim_all])) end, LocationsAsList); + LocationsAsBinary when is_binary(LocationsAsBinary) -> + [convert_attribute_list_to_attribute_map( + binary:split(LocationsAsBinary,<<"/">>,[global,trim_all]))] + end, - FilteredLocations = lists:filtermap(fun(L2) -> - case cluster_matches_resource_server_id(L2, ResourceServerId) and - legal_queue_and_exchange_values(L2) of - true -> { true, build_permission_resource_path(L2) }; - false -> false - end end, Locations), + FilteredLocations = lists:filtermap(fun(L2) -> + case cluster_matches_resource_server_id(L2, ResourceServerId) and + legal_queue_and_exchange_values(L2) of + true -> { true, build_permission_resource_path(L2) }; + false -> false + end end, Locations), - FilteredLocations. + FilteredLocations. cluster_matches_resource_server_id(#{?CLUSTER_LOCATION_ATTRIBUTE := Cluster}, - ResourceServerId) -> - wildcard:match(ResourceServerId, Cluster); + ResourceServerId) -> + wildcard:match(ResourceServerId, Cluster); cluster_matches_resource_server_id(_,_) -> - false. + false. legal_queue_and_exchange_values(#{?QUEUE_LOCATION_ATTRIBUTE := Queue, - ?EXCHANGE_LOCATION_ATTRIBUTE := Exchange}) -> - case Queue of - <<>> -> case Exchange of - <<>> -> true; - _ -> false - end; - _ -> case Exchange of - Queue -> true; - _ -> false - end - end; + ?EXCHANGE_LOCATION_ATTRIBUTE := Exchange}) -> + case Queue of + <<>> -> + case Exchange of + <<>> -> true; + _ -> false + end; + _ -> + case Exchange of + Queue -> true; + _ -> false + end + end; legal_queue_and_exchange_values(_) -> true. map_rich_auth_permissions_to_scopes(ResourceServerId, Permissions) -> - map_rich_auth_permissions_to_scopes(ResourceServerId, Permissions, []). + map_rich_auth_permissions_to_scopes(ResourceServerId, Permissions, []). map_rich_auth_permissions_to_scopes(_, [], Acc) -> Acc; map_rich_auth_permissions_to_scopes(ResourceServerId, [ #{?ACTIONS_FIELD := Actions, ?LOCATIONS_FIELD := Locations } | T ], Acc) -> - ResourcePaths = map_locations_to_permission_resource_paths(ResourceServerId, Locations), - case ResourcePaths of - [] -> map_rich_auth_permissions_to_scopes(ResourceServerId, T, Acc); - _ -> Scopes = case Actions of - undefined -> []; - ActionsAsList when is_list(ActionsAsList) -> - build_scopes(ResourceServerId, skip_unknown_actions(ActionsAsList), ResourcePaths); - ActionsAsBinary when is_binary(ActionsAsBinary) -> - build_scopes(ResourceServerId, skip_unknown_actions([ActionsAsBinary]), ResourcePaths) - end, - map_rich_auth_permissions_to_scopes(ResourceServerId, T, Acc ++ Scopes) - end. + ResourcePaths = map_locations_to_permission_resource_paths(ResourceServerId, Locations), + case ResourcePaths of + [] -> map_rich_auth_permissions_to_scopes(ResourceServerId, T, Acc); + _ -> + Scopes = case Actions of + undefined -> []; + ActionsAsList when is_list(ActionsAsList) -> + build_scopes(ResourceServerId, + skip_unknown_actions(ActionsAsList), ResourcePaths); + ActionsAsBinary when is_binary(ActionsAsBinary) -> + build_scopes(ResourceServerId, + skip_unknown_actions([ActionsAsBinary]), ResourcePaths) + end, + map_rich_auth_permissions_to_scopes(ResourceServerId, T, Acc ++ Scopes) + end. skip_unknown_actions(Actions) -> - lists:filter(fun(A) -> lists:member(A, ?ALLOWED_ACTION_VALUES) end, Actions). + lists:filter(fun(A) -> lists:member(A, ?ALLOWED_ACTION_VALUES) end, Actions). produce_list_of_user_tag_or_action_on_resources(ResourceServerId, ActionOrUserTag, Locations) -> - case lists:member(ActionOrUserTag, ?ALLOWED_TAG_VALUES) of - true -> [<< ResourceServerId/binary, ".tag:", ActionOrUserTag/binary >>]; - _ -> build_scopes_for_action(ResourceServerId, ActionOrUserTag, Locations, []) - end. + case lists:member(ActionOrUserTag, ?ALLOWED_TAG_VALUES) of + true -> [<< ResourceServerId/binary, ".tag:", ActionOrUserTag/binary >>]; + _ -> build_scopes_for_action(ResourceServerId, ActionOrUserTag, Locations, []) + end. build_scopes_for_action(ResourceServerId, Action, [Location|Locations], Acc) -> - Scope = << ResourceServerId/binary, ".", Action/binary, ":", Location/binary >>, - build_scopes_for_action(ResourceServerId, Action, Locations, [ Scope | Acc ] ); + Scope = << ResourceServerId/binary, ".", Action/binary, ":", Location/binary >>, + build_scopes_for_action(ResourceServerId, Action, Locations, [ Scope | Acc ] ); build_scopes_for_action(_, _, [], Acc) -> Acc. -build_scopes(ResourceServerId, Actions, Locations) -> lists:flatmap( - fun(Action) -> - produce_list_of_user_tag_or_action_on_resources(ResourceServerId, Action, Locations) end, Actions). +build_scopes(ResourceServerId, Actions, Locations) -> + lists:flatmap(fun(Action) -> + produce_list_of_user_tag_or_action_on_resources(ResourceServerId, + Action, Locations) end, Actions). is_recognized_permission(#{?ACTIONS_FIELD := _, ?LOCATIONS_FIELD:= _ , ?TYPE_FIELD := Type }, ResourceServerType) -> - case ResourceServerType of - <<>> -> false; - V when V == Type -> true; - _ -> false - end; + case ResourceServerType of + <<>> -> false; + V when V == Type -> true; + _ -> false + end; is_recognized_permission(_, _) -> false. --spec post_process_payload_in_rich_auth_request_format(Payload :: map()) -> map(). +-spec post_process_payload_in_rich_auth_request_format(ResourceServerId :: binary(), Payload :: map()) -> map(). %% https://oauth.net/2/rich-authorization-requests/ -post_process_payload_in_rich_auth_request_format(#{<<"authorization_details">> := Permissions} = Payload) -> - ResourceServerId = rabbit_data_coercion:to_binary( - application:get_env(?APP, ?RESOURCE_SERVER_ID, <<>>)), - ResourceServerType = rabbit_data_coercion:to_binary( - application:get_env(?APP, ?RESOURCE_SERVER_TYPE, <<>>)), +post_process_payload_in_rich_auth_request_format(ResourceServerId, #{<<"authorization_details">> := Permissions} = Payload) -> + ResourceServerType = rabbit_oauth2_config:get_resource_server_type(ResourceServerId), - FilteredPermissionsByType = lists:filter(fun(P) -> + FilteredPermissionsByType = lists:filter(fun(P) -> is_recognized_permission(P, ResourceServerType) end, Permissions), - AdditionalScopes = map_rich_auth_permissions_to_scopes(ResourceServerId, FilteredPermissionsByType), - - ExistingScopes = maps:get(?SCOPE_JWT_FIELD, Payload, []), - maps:put(?SCOPE_JWT_FIELD, AdditionalScopes ++ ExistingScopes, Payload). - + AdditionalScopes = map_rich_auth_permissions_to_scopes(ResourceServerId, FilteredPermissionsByType), + ExistingScopes = maps:get(?SCOPE_JWT_FIELD, Payload, []), + maps:put(?SCOPE_JWT_FIELD, AdditionalScopes ++ ExistingScopes, Payload). -validate_payload(DecodedToken) -> - ResourceServerEnv = application:get_env(?APP, ?RESOURCE_SERVER_ID, <<>>), - ResourceServerId = rabbit_data_coercion:to_binary(ResourceServerEnv), - ScopePrefix = application:get_env(?APP, ?SCOPE_PREFIX, <>), - validate_payload(DecodedToken, ResourceServerId, ScopePrefix). +validate_payload(ResourceServerId, DecodedToken) -> + ScopePrefix = rabbit_oauth2_config:get_scope_prefix(ResourceServerId), + validate_payload(ResourceServerId, DecodedToken, ScopePrefix). -validate_payload(#{?SCOPE_JWT_FIELD := Scope, ?AUD_JWT_FIELD := Aud} = DecodedToken, ResourceServerId, ScopePrefix) -> +validate_payload(ResourceServerId, #{?SCOPE_JWT_FIELD := Scope, ?AUD_JWT_FIELD := Aud} = DecodedToken, ScopePrefix) -> case check_aud(Aud, ResourceServerId) of ok -> {ok, DecodedToken#{?SCOPE_JWT_FIELD => filter_scopes(Scope, ScopePrefix)}}; {error, Err} -> {refused, {invalid_aud, Err}} end; -validate_payload(#{?AUD_JWT_FIELD := Aud} = DecodedToken, ResourceServerId, _ScopePrefix) -> +validate_payload(ResourceServerId, #{?AUD_JWT_FIELD := Aud} = DecodedToken, _ScopePrefix) -> case check_aud(Aud, ResourceServerId) of ok -> {ok, DecodedToken}; {error, Err} -> {refused, {invalid_aud, Err}} end; -validate_payload(#{?SCOPE_JWT_FIELD := Scope} = DecodedToken, _ResourceServerId, ScopePrefix) -> - case application:get_env(?APP, ?VERIFY_AUD, true) of - true -> {error, {badarg, {aud_field_is_missing}}}; - false -> {ok, DecodedToken#{?SCOPE_JWT_FIELD => filter_scopes(Scope, ScopePrefix)}} - end. +validate_payload(ResourceServerId, #{?SCOPE_JWT_FIELD := Scope} = DecodedToken, ScopePrefix) -> + case rabbit_oauth2_config:is_verify_aud(ResourceServerId) of + true -> {error, {badarg, {aud_field_is_missing}}}; + false -> {ok, DecodedToken#{?SCOPE_JWT_FIELD => filter_scopes(Scope, ScopePrefix)}} + end. filter_scopes(Scopes, <<"">>) -> Scopes; filter_scopes(Scopes, ScopePrefix) -> @@ -524,7 +517,7 @@ filter_scopes(Scopes, ScopePrefix) -> check_aud(_, <<>>) -> ok; check_aud(Aud, ResourceServerId) -> - case application:get_env(?APP, ?VERIFY_AUD, true) of + case rabbit_oauth2_config:is_verify_aud(ResourceServerId) of true -> case Aud of List when is_list(List) -> @@ -544,44 +537,44 @@ get_scopes(#{}) -> []. -spec get_expanded_scopes(map(), #resource{}) -> [binary()]. get_expanded_scopes(Token, #resource{virtual_host = VHost}) -> - Context = #{ token => Token , vhost => VHost}, - case maps:get(?SCOPE_JWT_FIELD, Token, []) of - [] -> []; - Scopes -> lists:map(fun(Scope) -> list_to_binary(parse_scope(Scope, Context)) end, Scopes) - end. + Context = #{ token => Token , vhost => VHost}, + case maps:get(?SCOPE_JWT_FIELD, Token, []) of + [] -> []; + Scopes -> lists:map(fun(Scope) -> list_to_binary(parse_scope(Scope, Context)) end, Scopes) + end. parse_scope(Scope, Context) -> - { Acc0, _} = lists:foldl(fun(Elem, { Acc, Stage }) -> parse_scope_part(Elem, Acc, Stage, Context) end, - { [], undefined }, re:split(Scope,"([\{.*\}])",[{return,list},trim])), - Acc0. + { Acc0, _} = lists:foldl(fun(Elem, { Acc, Stage }) -> parse_scope_part(Elem, Acc, Stage, Context) end, + { [], undefined }, re:split(Scope,"([\{.*\}])",[{return,list},trim])), + Acc0. parse_scope_part(Elem, Acc, Stage, Context) -> - case Stage of - error -> {Acc, error}; - undefined -> - case Elem of - "{" -> { Acc, fun capture_var_name/3}; - Value -> { Acc ++ Value, Stage} - end; - _ -> Stage(Elem, Acc, Context) - end. + case Stage of + error -> {Acc, error}; + undefined -> + case Elem of + "{" -> { Acc, fun capture_var_name/3}; + Value -> { Acc ++ Value, Stage} + end; + _ -> Stage(Elem, Acc, Context) + end. capture_var_name(Elem, Acc, #{ token := Token, vhost := Vhost}) -> - { Acc ++ resolve_scope_var(Elem, Token, Vhost), fun expect_closing_var/3}. + { Acc ++ resolve_scope_var(Elem, Token, Vhost), fun expect_closing_var/3}. expect_closing_var("}" , Acc, _Context) -> { Acc , undefined }; expect_closing_var(_ , _Acc, _Context) -> {"", error}. resolve_scope_var(Elem, Token, Vhost) -> - case Elem of - "vhost" -> binary_to_list(Vhost); - _ -> - ElemAsBinary = list_to_binary(Elem), - binary_to_list(case maps:get(ElemAsBinary, Token, ElemAsBinary) of + case Elem of + "vhost" -> binary_to_list(Vhost); + _ -> + ElemAsBinary = list_to_binary(Elem), + binary_to_list(case maps:get(ElemAsBinary, Token, ElemAsBinary) of Value when is_binary(Value) -> Value; _ -> ElemAsBinary end) - end. + end. %% A token may be present in the password credential or in the rabbit_auth_backend_oauth2 %% credential. The former is the most common scenario for the first time authentication. @@ -620,8 +613,7 @@ token_from_context(AuthProps) -> -spec username_from(list(), map()) -> binary() | undefined. username_from(PreferredUsernameClaims, DecodedToken) -> - UsernameClaims = append_or_return_default(PreferredUsernameClaims, ?DEFAULT_PREFERRED_USERNAME_CLAIMS), - ResolvedUsernameClaims = lists:filtermap(fun(Claim) -> find_claim_in_token(Claim, DecodedToken) end, UsernameClaims), + ResolvedUsernameClaims = lists:filtermap(fun(Claim) -> find_claim_in_token(Claim, DecodedToken) end, PreferredUsernameClaims), Username = case ResolvedUsernameClaims of [ ] -> <<"unknown">>; [ _One ] -> _One; @@ -631,19 +623,12 @@ username_from(PreferredUsernameClaims, DecodedToken) -> [lists:flatten(io_lib:format("~p",[ResolvedUsernameClaims])), Username]), Username. -append_or_return_default(ListOrBinary, Default) -> - case ListOrBinary of - VarList when is_list(VarList) -> VarList ++ Default; - VarBinary when is_binary(VarBinary) -> [VarBinary] ++ Default; - _ -> Default - end. - find_claim_in_token(Claim, Token) -> - case maps:get(Claim, Token, undefined) of - undefined -> false; - ClaimValue when is_binary(ClaimValue) -> {true, ClaimValue}; - _ -> false - end. + case maps:get(Claim, Token, undefined) of + undefined -> false; + ClaimValue when is_binary(ClaimValue) -> {true, ClaimValue}; + _ -> false + end. -define(TAG_SCOPE_PREFIX, <<"tag:">>). diff --git a/deps/rabbitmq_auth_backend_oauth2/src/rabbit_auth_backend_oauth2_app.erl b/deps/rabbitmq_auth_backend_oauth2/src/rabbit_auth_backend_oauth2_app.erl index 1c1dfa6c9459..405941b9ba9f 100644 --- a/deps/rabbitmq_auth_backend_oauth2/src/rabbit_auth_backend_oauth2_app.erl +++ b/deps/rabbitmq_auth_backend_oauth2/src/rabbit_auth_backend_oauth2_app.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_auth_backend_oauth2_app). @@ -23,4 +23,3 @@ stop(_State) -> init([]) -> {ok, {{one_for_one,3,10},[]}}. - diff --git a/deps/rabbitmq_auth_backend_oauth2/src/rabbit_oauth2_config.erl b/deps/rabbitmq_auth_backend_oauth2/src/rabbit_oauth2_config.erl new file mode 100644 index 000000000000..1a02dccde057 --- /dev/null +++ b/deps/rabbitmq_auth_backend_oauth2/src/rabbit_oauth2_config.erl @@ -0,0 +1,371 @@ +%% This Source Code Form is subject to the terms of the Mozilla Public +%% License, v. 2.0. If a copy of the MPL was not distributed with this +%% file, You can obtain one at https://mozilla.org/MPL/2.0/. +%% +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. +%% + +-module(rabbit_oauth2_config). + +-include_lib("oauth2_client/include/oauth2_client.hrl"). + +-define(APP, rabbitmq_auth_backend_oauth2). +-define(DEFAULT_PREFERRED_USERNAME_CLAIMS, [<<"sub">>, <<"client_id">>]). + +-define(TOP_RESOURCE_SERVER_ID, application:get_env(?APP, resource_server_id)). +%% scope aliases map "role names" to a set of scopes + + +-export([ + add_signing_key/2, add_signing_key/3, replace_signing_keys/1, + replace_signing_keys/2, + get_signing_keys/0, get_signing_keys/1, get_signing_key/2, + get_key_config/0, get_key_config/1, get_default_resource_server_id/0, + get_oauth_provider_for_resource_server_id/2, + get_allowed_resource_server_ids/0, find_audience_in_resource_server_ids/1, + is_verify_aud/0, is_verify_aud/1, + get_additional_scopes_key/0, get_additional_scopes_key/1, + get_default_preferred_username_claims/0, get_preferred_username_claims/0, + get_preferred_username_claims/1, + get_scope_prefix/0, get_scope_prefix/1, + get_resource_server_type/0, get_resource_server_type/1, + has_scope_aliases/1, get_scope_aliases/1 + ]). + +-spec get_default_preferred_username_claims() -> list(). +get_default_preferred_username_claims() -> ?DEFAULT_PREFERRED_USERNAME_CLAIMS. + +-spec get_preferred_username_claims() -> list(). +get_preferred_username_claims() -> + case application:get_env(?APP, preferred_username_claims) of + {ok, Value} -> + append_or_return_default(Value, ?DEFAULT_PREFERRED_USERNAME_CLAIMS); + _ -> ?DEFAULT_PREFERRED_USERNAME_CLAIMS + end. +-spec get_preferred_username_claims(binary()) -> list(). +get_preferred_username_claims(ResourceServerId) -> + get_preferred_username_claims(get_default_resource_server_id(), + ResourceServerId). +get_preferred_username_claims(TopResourceServerId, ResourceServerId) + when ResourceServerId =:= TopResourceServerId -> + get_preferred_username_claims(); +get_preferred_username_claims(TopResourceServerId, ResourceServerId) + when ResourceServerId =/= TopResourceServerId -> + ResourceServer = maps:get(ResourceServerId, application:get_env(?APP, + resource_servers, #{})), + case proplists:get_value(preferred_username_claims, ResourceServer) of + undefined -> get_preferred_username_claims(); + Value -> append_or_return_default(Value, ?DEFAULT_PREFERRED_USERNAME_CLAIMS) + end. + +-type key_type() :: json | pem | map. +-spec add_signing_key(binary(), {key_type(), binary()} ) -> {ok, map()} | {error, term()}. +add_signing_key(KeyId, Key) -> + LockId = lock(), + try do_add_signing_key(KeyId, Key) of + V -> V + after + unlock(LockId) + end. + +-spec add_signing_key(binary(), binary(), {key_type(), binary()}) -> {ok, map()} | {error, term()}. +add_signing_key(ResourceServerId, KeyId, Key) -> + LockId = lock(), + try do_add_signing_key(ResourceServerId, KeyId, Key) of + V -> V + after + unlock(LockId) + end. + +do_add_signing_key(KeyId, Key) -> + do_replace_signing_keys(maps:put(KeyId, Key, get_signing_keys())). + +do_add_signing_key(ResourceServerId, KeyId, Key) -> + do_replace_signing_keys(ResourceServerId, + maps:put(KeyId, Key, get_signing_keys(ResourceServerId))). + +replace_signing_keys(SigningKeys) -> + LockId = lock(), + try do_replace_signing_keys(SigningKeys) of + V -> V + after + unlock(LockId) + end. + +replace_signing_keys(ResourceServerId, SigningKeys) -> + LockId = lock(), + try do_replace_signing_keys(ResourceServerId, SigningKeys) of + V -> V + after + unlock(LockId) + end. + +do_replace_signing_keys(SigningKeys) -> + KeyConfig = application:get_env(?APP, key_config, []), + KeyConfig1 = proplists:delete(signing_keys, KeyConfig), + KeyConfig2 = [{signing_keys, SigningKeys} | KeyConfig1], + application:set_env(?APP, key_config, KeyConfig2), + rabbit_log:debug("Replacing signing keys ~p", [ KeyConfig2]), + SigningKeys. + +do_replace_signing_keys(ResourceServerId, SigningKeys) -> + do_replace_signing_keys(get_default_resource_server_id(), + ResourceServerId, SigningKeys). +do_replace_signing_keys(TopResourceServerId, ResourceServerId, SigningKeys) + when ResourceServerId =:= TopResourceServerId -> + do_replace_signing_keys(SigningKeys); +do_replace_signing_keys(TopResourceServerId, ResourceServerId, SigningKeys) + when ResourceServerId =/= TopResourceServerId -> + ResourceServers = application:get_env(?APP, resource_servers, #{}), + ResourceServer = maps:get(ResourceServerId, ResourceServers, []), + KeyConfig0 = proplists:get_value(key_config, ResourceServer, []), + KeyConfig1 = proplists:delete(signing_keys, KeyConfig0), + KeyConfig2 = [{signing_keys, SigningKeys} | KeyConfig1], + + ResourceServer1 = proplists:delete(key_config, ResourceServer), + ResourceServer2 = [{key_config, KeyConfig2} | ResourceServer1], + + ResourceServers1 = maps:put(ResourceServerId, ResourceServer2, ResourceServers), + application:set_env(?APP, resource_servers, ResourceServers1), + rabbit_log:debug("Replacing signing keys for ~p -> ~p", [ResourceServerId, ResourceServers1]), + SigningKeys. + +-spec get_signing_keys() -> map(). +get_signing_keys() -> proplists:get_value(signing_keys, get_key_config(), #{}). + +-spec get_signing_keys(binary()) -> map(). +get_signing_keys(ResourceServerId) -> + get_signing_keys(get_default_resource_server_id(), ResourceServerId). + +get_signing_keys(TopResourceServerId, ResourceServerId) + when ResourceServerId =:= TopResourceServerId -> + get_signing_keys(); +get_signing_keys(TopResourceServerId, ResourceServerId) + when ResourceServerId =/= TopResourceServerId -> + proplists:get_value(signing_keys, get_key_config(ResourceServerId), #{}). + +-spec get_oauth_provider_for_resource_server_id(binary(), list()) -> + {ok, oauth_provider()} | {error, any()}. + +get_oauth_provider_for_resource_server_id(ResourceServerId, RequiredAttributeList) -> + get_oauth_provider_for_resource_server_id(get_default_resource_server_id(), + ResourceServerId, RequiredAttributeList). +get_oauth_provider_for_resource_server_id(TopResourceServerId, + ResourceServerId, RequiredAttributeList) when ResourceServerId =:= TopResourceServerId -> + case application:get_env(?APP, default_oauth_provider) of + undefined -> + oauth2_client:get_oauth_provider(RequiredAttributeList); + {ok, DefaultOauthProviderId} -> + oauth2_client:get_oauth_provider(DefaultOauthProviderId, RequiredAttributeList) + end; + +get_oauth_provider_for_resource_server_id(TopResourceServerId, ResourceServerId, + RequiredAttributeList) when ResourceServerId =/= TopResourceServerId -> + case proplists:get_value(oauth_provider_id, get_resource_server_props(ResourceServerId)) of + undefined -> + case application:get_env(?APP, default_oauth_provider) of + undefined -> + oauth2_client:get_oauth_provider(RequiredAttributeList); + {ok, DefaultOauthProviderId} -> + oauth2_client:get_oauth_provider(DefaultOauthProviderId, + RequiredAttributeList) + end; + OauthProviderId -> + oauth2_client:get_oauth_provider(OauthProviderId, RequiredAttributeList) + end. + +-spec get_key_config() -> list(). +get_key_config() -> application:get_env(?APP, key_config, []). + +-spec get_key_config(binary()) -> list(). +get_key_config(ResourceServerId) -> + get_key_config(get_default_resource_server_id(), ResourceServerId). +get_key_config(TopResourceServerId, ResourceServerId) + when ResourceServerId =:= TopResourceServerId -> + get_key_config(); +get_key_config(TopResourceServerId, ResourceServerId) + when ResourceServerId =/= TopResourceServerId -> + proplists:get_value(key_config, get_resource_server_props(ResourceServerId), + get_key_config()). + +get_resource_server_props(ResourceServerId) -> + ResourceServers = application:get_env(?APP, resource_servers, #{}), + maps:get(ResourceServerId, ResourceServers, []). + +get_signing_key(KeyId, ResourceServerId) -> + get_signing_key(get_default_resource_server_id(), KeyId, ResourceServerId). + +get_signing_key(TopResourceServerId, KeyId, ResourceServerId) + when ResourceServerId =:= TopResourceServerId -> + maps:get(KeyId, get_signing_keys(), undefined); +get_signing_key(TopResourceServerId, KeyId, ResourceServerId) + when ResourceServerId =/= TopResourceServerId -> + maps:get(KeyId, get_signing_keys(ResourceServerId), undefined). + + +append_or_return_default(ListOrBinary, Default) -> + case ListOrBinary of + VarList when is_list(VarList) -> VarList ++ Default; + VarBinary when is_binary(VarBinary) -> [VarBinary] ++ Default; + _ -> Default + end. + +-spec get_default_resource_server_id() -> binary() | {error, term()}. +get_default_resource_server_id() -> + case ?TOP_RESOURCE_SERVER_ID of + undefined -> {error, missing_token_audience_and_or_config_resource_server_id }; + {ok, ResourceServerId} -> ResourceServerId + end. + +-spec get_allowed_resource_server_ids() -> list(). +get_allowed_resource_server_ids() -> + ResourceServers = application:get_env(?APP, resource_servers, #{}), + rabbit_log:debug("ResourceServers: ~p", [ResourceServers]), + ResourceServerIds = maps:fold(fun(K, V, List) -> List ++ + [proplists:get_value(id, V, K)] end, [], ResourceServers), + rabbit_log:debug("ResourceServersIds: ~p", [ResourceServerIds]), + ResourceServerIds ++ case get_default_resource_server_id() of + {error, _} -> []; + ResourceServerId -> [ ResourceServerId ] + end. + +-spec find_audience_in_resource_server_ids(binary() | list()) -> + {ok, binary()} | {error, term()}. +find_audience_in_resource_server_ids(Audience) when is_binary(Audience) -> + find_audience_in_resource_server_ids(binary:split(Audience, <<" ">>, [global, trim_all])); +find_audience_in_resource_server_ids(AudList) when is_list(AudList) -> + AllowedAudList = get_allowed_resource_server_ids(), + case intersection(AudList, AllowedAudList) of + [One] -> {ok, One}; + [_One|_Tail] -> {error, only_one_resource_server_as_audience_found_many}; + [] -> {error, no_matching_aud_found} + end. + + +-spec is_verify_aud() -> boolean(). +is_verify_aud() -> application:get_env(?APP, verify_aud, true). + +-spec is_verify_aud(binary()) -> boolean(). +is_verify_aud(ResourceServerId) -> + is_verify_aud(get_default_resource_server_id(), ResourceServerId). +is_verify_aud(TopResourceServerId, ResourceServerId) + when ResourceServerId =:= TopResourceServerId -> is_verify_aud(); +is_verify_aud(TopResourceServerId, ResourceServerId) + when ResourceServerId =/= TopResourceServerId -> + proplists:get_value(verify_aud, maps:get(ResourceServerId, + application:get_env(?APP, resource_servers, #{}), []), is_verify_aud()). + +-spec get_additional_scopes_key() -> {ok, binary()} | {error, not_found}. +get_additional_scopes_key() -> + case application:get_env(?APP, extra_scopes_source, undefined) of + undefined -> {error, not_found}; + ScopeKey -> {ok, ScopeKey} + end. + +-spec get_additional_scopes_key(binary()) -> {ok, binary()} | {error, not_found}. +get_additional_scopes_key(ResourceServerId) -> + get_additional_scopes_key(get_default_resource_server_id(), ResourceServerId). +get_additional_scopes_key(TopResourceServerId, ResourceServerId) + when ResourceServerId =:= TopResourceServerId -> get_additional_scopes_key(); +get_additional_scopes_key(TopResourceServerId, ResourceServerId) + when ResourceServerId =/= TopResourceServerId -> + ResourceServer = maps:get(ResourceServerId, + application:get_env(?APP, resource_servers, #{}), []), + case proplists:get_value(extra_scopes_source, ResourceServer) of + undefined -> get_additional_scopes_key(); + <<>> -> get_additional_scopes_key(); + ScopeKey -> {ok, ScopeKey} + end. + +-spec get_scope_prefix() -> binary(). +get_scope_prefix() -> + DefaultScopePrefix = erlang:iolist_to_binary([ + get_default_resource_server_id(), <<".">>]), + application:get_env(?APP, scope_prefix, DefaultScopePrefix). + +-spec get_scope_prefix(binary()) -> binary(). +get_scope_prefix(ResourceServerId) -> + get_scope_prefix(get_default_resource_server_id(), ResourceServerId). +get_scope_prefix(TopResourceServerId, ResourceServerId) + when ResourceServerId =:= TopResourceServerId -> get_scope_prefix(); +get_scope_prefix(TopResourceServerId, ResourceServerId) + when ResourceServerId =/= TopResourceServerId -> + ResourceServer = maps:get(ResourceServerId, + application:get_env(?APP, resource_servers, #{}), []), + case proplists:get_value(scope_prefix, ResourceServer) of + undefined -> + case application:get_env(?APP, scope_prefix) of + undefined -> <>; + {ok, Prefix} -> Prefix + end; + Prefix -> Prefix + end. + +-spec get_resource_server_type() -> binary(). +get_resource_server_type() -> application:get_env(?APP, resource_server_type, <<>>). + +-spec get_resource_server_type(binary()) -> binary(). +get_resource_server_type(ResourceServerId) -> + get_resource_server_type(get_default_resource_server_id(), ResourceServerId). +get_resource_server_type(TopResourceServerId, ResourceServerId) + when ResourceServerId =:= TopResourceServerId -> get_resource_server_type(); +get_resource_server_type(TopResourceServerId, ResourceServerId) + when ResourceServerId =/= TopResourceServerId -> + ResourceServer = maps:get(ResourceServerId, + application:get_env(?APP, resource_servers, #{}), []), + proplists:get_value(resource_server_type, ResourceServer, + get_resource_server_type()). + +-spec has_scope_aliases(binary()) -> boolean(). +has_scope_aliases(ResourceServerId) -> + has_scope_aliases(get_default_resource_server_id(), ResourceServerId). +has_scope_aliases(TopResourceServerId, ResourceServerId) + when ResourceServerId =:= TopResourceServerId -> + case application:get_env(?APP, scope_aliases) of + undefined -> false; + _ -> true + end; +has_scope_aliases(TopResourceServerId, ResourceServerId) + when ResourceServerId =/= TopResourceServerId -> + ResourceServerProps = maps:get(ResourceServerId, + application:get_env(?APP, resource_servers, #{}),[]), + case proplists:is_defined(scope_aliases, ResourceServerProps) of + true -> true; + false -> has_scope_aliases(TopResourceServerId) + end. + +-spec get_scope_aliases(binary()) -> map(). +get_scope_aliases(ResourceServerId) -> + get_scope_aliases(get_default_resource_server_id(), ResourceServerId). +get_scope_aliases(TopResourceServerId, ResourceServerId) + when ResourceServerId =:= TopResourceServerId -> + application:get_env(?APP, scope_aliases, #{}); +get_scope_aliases(TopResourceServerId, ResourceServerId) + when ResourceServerId =/= TopResourceServerId -> + ResourceServerProps = maps:get(ResourceServerId, + application:get_env(?APP, resource_servers, #{}),[]), + proplists:get_value(scope_aliases, ResourceServerProps, + get_scope_aliases(TopResourceServerId)). + + +intersection(List1, List2) -> + [I || I <- List1, lists:member(I, List2)]. + +lock() -> + Nodes = rabbit_nodes:list_running(), + Retries = rabbit_nodes:lock_retries(), + LockId = case global:set_lock({oauth2_config_lock, + rabbitmq_auth_backend_oauth2}, Nodes, Retries) of + true -> rabbitmq_auth_backend_oauth2; + false -> undefined + end, + LockId. + +unlock(LockId) -> + Nodes = rabbit_nodes:list_running(), + case LockId of + undefined -> ok; + Value -> + global:del_lock({oauth2_config_lock, Value}, Nodes) + end, + ok. diff --git a/deps/rabbitmq_auth_backend_oauth2/src/rabbit_oauth2_scope.erl b/deps/rabbitmq_auth_backend_oauth2/src/rabbit_oauth2_scope.erl index 387c400b6f74..d81c7ded0c8f 100644 --- a/deps/rabbitmq_auth_backend_oauth2/src/rabbit_oauth2_scope.erl +++ b/deps/rabbitmq_auth_backend_oauth2/src/rabbit_oauth2_scope.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_oauth2_scope). diff --git a/deps/rabbitmq_auth_backend_oauth2/src/uaa_jwks.erl b/deps/rabbitmq_auth_backend_oauth2/src/uaa_jwks.erl index 6975d7974197..edd81902da15 100644 --- a/deps/rabbitmq_auth_backend_oauth2/src/uaa_jwks.erl +++ b/deps/rabbitmq_auth_backend_oauth2/src/uaa_jwks.erl @@ -1,32 +1,7 @@ -module(uaa_jwks). --export([get/1, ssl_options/0]). +-export([get/2]). --spec get(string() | binary()) -> {ok, term()} | {error, term()}. -get(JwksUrl) -> - httpc:request(get, {JwksUrl, []}, [{ssl, ssl_options()}, {timeout, 60000}], []). - --spec ssl_options() -> list(). -ssl_options() -> - UaaEnv = application:get_env(rabbitmq_auth_backend_oauth2, key_config, []), - PeerVerification = proplists:get_value(peer_verification, UaaEnv, verify_none), - Depth = proplists:get_value(depth, UaaEnv, 10), - FailIfNoPeerCert = proplists:get_value(fail_if_no_peer_cert, UaaEnv, false), - CrlCheck = proplists:get_value(crl_check, UaaEnv, false), - SslOpts0 = [{verify, PeerVerification}, - {depth, Depth}, - {fail_if_no_peer_cert, FailIfNoPeerCert}, - {crl_check, CrlCheck}, - {crl_cache, {ssl_crl_cache, {internal, [{http, 10000}]}}} | cacertfile(UaaEnv)], - - case proplists:get_value(hostname_verification, UaaEnv, none) of - wildcard -> - [{customize_hostname_check, [{match_fun, public_key:pkix_verify_hostname_match_fun(https)}]} | SslOpts0]; - none -> - SslOpts0 - end. - -cacertfile(UaaEnv) -> - case proplists:get_value(cacertfile, UaaEnv) of - undefined -> []; - CaCertFile -> [{cacertfile, CaCertFile}] - end. +-spec get(string() | binary(), term()) -> {ok, term()} | {error, term()}. +get(JwksUrl, SslOptions) -> + Options = [{timeout, 60000}] ++ [{ssl, SslOptions}], + httpc:request(get, {JwksUrl, []}, Options, []). diff --git a/deps/rabbitmq_auth_backend_oauth2/src/uaa_jwt.erl b/deps/rabbitmq_auth_backend_oauth2/src/uaa_jwt.erl index 0ec9ee5a3a79..d78b7b4c9c1c 100644 --- a/deps/rabbitmq_auth_backend_oauth2/src/uaa_jwt.erl +++ b/deps/rabbitmq_auth_backend_oauth2/src/uaa_jwt.erl @@ -2,109 +2,103 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(uaa_jwt). -export([add_signing_key/3, - remove_signing_key/1, decode_and_verify/1, - get_jwk/1, - verify_signing_key/2, - signing_keys/0]). + get_jwk/2, + verify_signing_key/2]). -export([client_id/1, sub/1, client_id/2, sub/2]). -include_lib("jose/include/jose_jwk.hrl"). +-include_lib("oauth2_client/include/oauth2_client.hrl"). -define(APP, rabbitmq_auth_backend_oauth2). -type key_type() :: json | pem | map. -spec add_signing_key(binary(), key_type(), binary() | map()) -> {ok, map()} | {error, term()}. - add_signing_key(KeyId, Type, Value) -> case verify_signing_key(Type, Value) of ok -> - SigningKeys0 = signing_keys(), - SigningKeys1 = maps:put(KeyId, {Type, Value}, SigningKeys0), - ok = update_uaa_jwt_signing_keys(SigningKeys1), - {ok, SigningKeys1}; + {ok, rabbit_oauth2_config:add_signing_key(KeyId, {Type, Value})}; {error, _} = Err -> Err end. -remove_signing_key(KeyId) -> - UaaEnv = application:get_env(?APP, key_config, []), - Keys0 = proplists:get_value(signing_keys, UaaEnv), - Keys1 = maps:remove(KeyId, Keys0), - update_uaa_jwt_signing_keys(UaaEnv, Keys1). - --spec update_uaa_jwt_signing_keys(map()) -> ok. -update_uaa_jwt_signing_keys(SigningKeys) -> - UaaEnv0 = application:get_env(?APP, key_config, []), - update_uaa_jwt_signing_keys(UaaEnv0, SigningKeys). - --spec update_uaa_jwt_signing_keys([term()], map()) -> ok. -update_uaa_jwt_signing_keys(UaaEnv0, SigningKeys) -> - UaaEnv1 = proplists:delete(signing_keys, UaaEnv0), - UaaEnv2 = [{signing_keys, SigningKeys} | UaaEnv1], - application:set_env(?APP, key_config, UaaEnv2). - --spec update_jwks_signing_keys() -> ok | {error, term()}. -update_jwks_signing_keys() -> - UaaEnv = application:get_env(?APP, key_config, []), - case proplists:get_value(jwks_url, UaaEnv) of - undefined -> - {error, no_jwks_url}; - JwksUrl -> - rabbit_log:debug("Retrieving signing keys from ~ts", [JwksUrl]), - case uaa_jwks:get(JwksUrl) of +-spec update_jwks_signing_keys(term()) -> ok | {error, term()}. +update_jwks_signing_keys(ResourceServerId) -> + case rabbit_oauth2_config:get_oauth_provider_for_resource_server_id(ResourceServerId, [jwks_uri]) of + {error, _} = Error -> + rabbit_log:error("Failed to obtain a JWKS URL for resource_server_id '~tp'", [ResourceServerId]), + Error; + {ok, #oauth_provider{jwks_uri = JwksUrl, ssl_options = SslOptions}} -> + rabbit_log:debug("OAuth 2 JWT: downloading keys from ~tp (TLS options: ~p)", [JwksUrl, SslOptions]), + case uaa_jwks:get(JwksUrl, SslOptions) of {ok, {_, _, JwksBody}} -> KeyList = maps:get(<<"keys">>, jose:decode(erlang:iolist_to_binary(JwksBody)), []), Keys = maps:from_list(lists:map(fun(Key) -> {maps:get(<<"kid">>, Key, undefined), {json, Key}} end, KeyList)), - update_uaa_jwt_signing_keys(UaaEnv, Keys); + rabbit_log:debug("OAuth 2 JWT: downloaded keys ~tp", [Keys]), + case rabbit_oauth2_config:replace_signing_keys(ResourceServerId, Keys) of + {error, _} = Err -> Err; + _ -> ok + end; {error, _} = Err -> + rabbit_log:error("OAuth 2 JWT: failed to download keys: ~tp", [Err]), Err end end. --spec decode_and_verify(binary()) -> {boolean(), map()} | {error, term()}. +-spec decode_and_verify(binary()) -> {boolean(), binary(), map()} | {error, term()}. decode_and_verify(Token) -> - case uaa_jwt_jwt:get_key_id(Token) of - {ok, KeyId} -> - case get_jwk(KeyId) of - {ok, JWK} -> - uaa_jwt_jwt:decode_and_verify(JWK, Token); - {error, _} = Err -> - Err - end; + case uaa_jwt_jwt:resolve_resource_server_id(Token) of {error, _} = Err -> - Err + Err; + ResourceServerId -> + rabbit_log:debug("OAuth 2 JWT: resolved resource_server_id: '~tp'", [ResourceServerId]), + case uaa_jwt_jwt:get_key_id(ResourceServerId, Token) of + {ok, KeyId} -> + rabbit_log:debug("OAuth 2 JWT: signing_key_id : '~tp'", [KeyId]), + case get_jwk(KeyId, ResourceServerId) of + {ok, JWK} -> + case uaa_jwt_jwt:decode_and_verify(ResourceServerId, JWK, Token) of + {true, Payload} -> {true, ResourceServerId, Payload}; + {false, Payload} -> {false, ResourceServerId, Payload} + end; + {error, _} = Err -> + Err + end; + {error, _} = Err -> Err + end end. --spec get_jwk(binary()) -> {ok, map()} | {error, term()}. -get_jwk(KeyId) -> - get_jwk(KeyId, true). +-spec get_jwk(binary(), binary()) -> {ok, map()} | {error, term()}. +get_jwk(KeyId, ResourceServerId) -> + get_jwk(KeyId, ResourceServerId, true). -get_jwk(KeyId, AllowUpdateJwks) -> - Keys = signing_keys(), - case maps:get(KeyId, Keys, undefined) of +get_jwk(KeyId, ResourceServerId, AllowUpdateJwks) -> + case rabbit_oauth2_config:get_signing_key(KeyId, ResourceServerId) of undefined -> if AllowUpdateJwks -> - case update_jwks_signing_keys() of + rabbit_log:debug("OAuth 2 JWT: signing key '~tp' not found. Downloading it... ", [KeyId]), + case update_jwks_signing_keys(ResourceServerId) of ok -> - get_jwk(KeyId, false); + get_jwk(KeyId, ResourceServerId, false); {error, no_jwks_url} -> {error, key_not_found}; {error, _} = Err -> Err end; true -> + rabbit_log:debug("OAuth 2 JWT: signing key '~tp' not found. Downloading is not allowed", [KeyId]), {error, key_not_found} end; {Type, Value} -> + rabbit_log:debug("OAuth 2 JWT: signing key found: '~tp', '~tp'", [Type, Value]), case Type of json -> uaa_jwt_jwk:make_jwk(Value); pem -> uaa_jwt_jwk:from_pem(Value); @@ -131,9 +125,6 @@ verify_signing_key(Type, Value) -> Err -> Err end. -signing_keys() -> - UaaEnv = application:get_env(?APP, key_config, []), - proplists:get_value(signing_keys, UaaEnv, #{}). -spec client_id(map()) -> binary() | undefined. client_id(DecodedToken) -> diff --git a/deps/rabbitmq_auth_backend_oauth2/src/uaa_jwt_jwk.erl b/deps/rabbitmq_auth_backend_oauth2/src/uaa_jwt_jwk.erl index 9bb1e27d12da..f1d2690c0d7e 100644 --- a/deps/rabbitmq_auth_backend_oauth2/src/uaa_jwt_jwk.erl +++ b/deps/rabbitmq_auth_backend_oauth2/src/uaa_jwt_jwk.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(uaa_jwt_jwk). diff --git a/deps/rabbitmq_auth_backend_oauth2/src/uaa_jwt_jwt.erl b/deps/rabbitmq_auth_backend_oauth2/src/uaa_jwt_jwt.erl index 692dd4e4bf06..962a3b55daba 100644 --- a/deps/rabbitmq_auth_backend_oauth2/src/uaa_jwt_jwt.erl +++ b/deps/rabbitmq_auth_backend_oauth2/src/uaa_jwt_jwt.erl @@ -2,11 +2,11 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(uaa_jwt_jwt). --export([decode/1, decode_and_verify/2, get_key_id/1]). +-export([decode/1, decode_and_verify/3, get_key_id/2, get_aud/1, resolve_resource_server_id/1]). -include_lib("jose/include/jose_jwt.hrl"). -include_lib("jose/include/jose_jws.hrl"). @@ -19,34 +19,63 @@ decode(Token) -> {error, {invalid_token, Type, Err, Stacktrace}} end. -decode_and_verify(Jwk, Token) -> - UaaEnv = application:get_env(rabbitmq_auth_backend_oauth2, key_config, []), +decode_and_verify(ResourceServerId, Jwk, Token) -> + KeyConfig = rabbit_oauth2_config:get_key_config(ResourceServerId), Verify = - case proplists:get_value(algorithms, UaaEnv) of - undefined -> - jose_jwt:verify(Jwk, Token); - Algs -> - jose_jwt:verify_strict(Jwk, Algs, Token) + case proplists:get_value(algorithms, KeyConfig) of + undefined -> jose_jwt:verify(Jwk, Token); + Algs -> jose_jwt:verify_strict(Jwk, Algs, Token) end, case Verify of {true, #jose_jwt{fields = Fields}, _} -> {true, Fields}; {false, #jose_jwt{fields = Fields}, _} -> {false, Fields} end. -get_key_id(Token) -> + +resolve_resource_server_id(Token) -> + case get_aud(Token) of + {error, _} = Error -> Error; + undefined -> + case rabbit_oauth2_config:is_verify_aud() of + true -> {error, no_matching_aud_found}; + false -> rabbit_oauth2_config:get_default_resource_server_id() + end; + {ok, Audience} -> + case rabbit_oauth2_config:find_audience_in_resource_server_ids(Audience) of + {ok, ResourceServerId} -> ResourceServerId; + {error, only_one_resource_server_as_audience_found_many} = Error -> Error; + {error, no_matching_aud_found} -> + case rabbit_oauth2_config:is_verify_aud() of + true -> {error, no_matching_aud_found}; + false -> rabbit_oauth2_config:get_default_resource_server_id() + end + end + end. + +get_key_id(ResourceServerId, Token) -> try case jose_jwt:peek_protected(Token) of #jose_jws{fields = #{<<"kid">> := Kid}} -> {ok, Kid}; - #jose_jws{} -> get_default_key() + #jose_jws{} -> get_default_key(ResourceServerId) + end + catch Type:Err:Stacktrace -> + {error, {invalid_token, Type, Err, Stacktrace}} + end. + +get_aud(Token) -> + try + case jose_jwt:peek_payload(Token) of + #jose_jwt{fields = #{<<"aud">> := Aud}} -> {ok, Aud}; + #jose_jwt{} -> undefined end catch Type:Err:Stacktrace -> {error, {invalid_token, Type, Err, Stacktrace}} end. -get_default_key() -> - UaaEnv = application:get_env(rabbitmq_auth_backend_oauth2, key_config, []), - case proplists:get_value(default_key, UaaEnv, undefined) of +get_default_key(ResourceServerId) -> + KeyConfig = rabbit_oauth2_config:get_key_config(ResourceServerId), + case proplists:get_value(default_key, KeyConfig, undefined) of undefined -> {error, no_key}; Val -> {ok, Val} end. diff --git a/deps/rabbitmq_auth_backend_oauth2/src/wildcard.erl b/deps/rabbitmq_auth_backend_oauth2/src/wildcard.erl index 6f8e5ba30ee5..ef0775c76966 100644 --- a/deps/rabbitmq_auth_backend_oauth2/src/wildcard.erl +++ b/deps/rabbitmq_auth_backend_oauth2/src/wildcard.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(wildcard). diff --git a/deps/rabbitmq_auth_backend_oauth2/test/add_signing_key_command_SUITE.erl b/deps/rabbitmq_auth_backend_oauth2/test/add_signing_key_command_SUITE.erl new file mode 100644 index 000000000000..d400f3fad8aa --- /dev/null +++ b/deps/rabbitmq_auth_backend_oauth2/test/add_signing_key_command_SUITE.erl @@ -0,0 +1,73 @@ +%% This Source Code Form is subject to the terms of the Mozilla Public +%% License, v. 2.0. If a copy of the MPL was not distributed with this +%% file, You can obtain one at https://mozilla.org/MPL/2.0/. +%% +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. +%% +-module(add_signing_key_command_SUITE). + +-compile(export_all). + +-include_lib("common_test/include/ct.hrl"). + +-define(COMMAND, 'Elixir.RabbitMQ.CLI.Ctl.Commands.AddSigningKeyCommand'). + +all() -> + [validate_arguments, + validate_json_key, + validate_pem_key, + validate_pem_file_key + ]. + + +init_per_suite(Config) -> + rabbit_ct_helpers:run_setup_steps(Config, []). + +end_per_suite(Config) -> + rabbit_ct_helpers:run_teardown_steps(Config, []). + + +validate_arguments(_) -> + {validation_failure, too_many_args} = + ?COMMAND:validate([<<"one">>, <<"two">>], #{json => <<"{}">>}), + {validation_failure, not_enough_args} = + ?COMMAND:validate([], #{json => <<"{}">>}), + {validation_failure, {bad_argument, <<"No key specified">>}} = + ?COMMAND:validate([<<"foo">>], #{}), + {validation_failure, {bad_argument, <<"There can be only one key type">>}} = + ?COMMAND:validate([<<"foo">>], #{json => <<"{}">>, pem => <<"pem">>}), + {validation_failure, {bad_argument, <<"There can be only one key type">>}} = + ?COMMAND:validate([<<"foo">>], #{json => <<"{}">>, pem_file => <<"/tmp/key.pem">>}), + {validation_failure, {bad_argument, <<"There can be only one key type">>}} = + ?COMMAND:validate([<<"foo">>], #{pem => <<"pem">>, pem_file => <<"/tmp/key.pem">>}). + +validate_json_key(_) -> + {validation_failure, {bad_argument, <<"Invalid JSON">>}} = + ?COMMAND:validate([<<"foo">>], #{json => <<"foobar">>}), + {validation_failure, {bad_argument, <<"Json key should contain \"kty\" field">>}} = + ?COMMAND:validate([<<"foo">>], #{json => <<"{}">>}), + {validation_failure, {bad_argument, _}} = + ?COMMAND:validate([<<"foo">>], #{json => <<"{\"kty\": \"oct\"}">>}), + ValidJson = <<"{\"alg\":\"HS256\",\"k\":\"dG9rZW5rZXk\",\"kid\":\"token-key\",\"kty\":\"oct\",\"use\":\"sig\",\"value\":\"tokenkey\"}">>, + ok = ?COMMAND:validate([<<"foo">>], #{json => ValidJson}). + +validate_pem_key(Config) -> + {validation_failure, <<"Unable to read a key from the PEM string">>} = + ?COMMAND:validate([<<"foo">>], #{pem => <<"not a key">>}), + CertsDir = ?config(rmq_certsdir, Config), + Keyfile = filename:join([CertsDir, <<"client">>, <<"key.pem">>]), + {ok, Key} = file:read_file(Keyfile), + ok = ?COMMAND:validate([<<"foo">>], #{pem => Key}). + +validate_pem_file_key(Config) -> + {validation_failure, {bad_argument, <<"PEM file not found">>}} = + ?COMMAND:validate([<<"foo">>], #{pem_file => <<"non_existent_file">>}), + file:write_file("empty.pem", <<"">>), + {validation_failure, <<"Unable to read a key from the PEM file">>} = + ?COMMAND:validate([<<"foo">>], #{pem_file => <<"empty.pem">>}), + file:write_file("not_pem.pem", <<"">>), + {validation_failure, _} = + ?COMMAND:validate([<<"foo">>], #{pem_file => <<"not_pem.pem">>}), + CertsDir = ?config(rmq_certsdir, Config), + Keyfile = filename:join([CertsDir, <<"client">>, <<"key.pem">>]), + ok = ?COMMAND:validate([<<"foo">>], #{pem_file => Keyfile}). diff --git a/deps/rabbitmq_auth_backend_oauth2/test/add_uaa_key_command_SUITE.erl b/deps/rabbitmq_auth_backend_oauth2/test/add_uaa_key_command_SUITE.erl index e41b2e9a4685..24bfb2e42d1a 100644 --- a/deps/rabbitmq_auth_backend_oauth2/test/add_uaa_key_command_SUITE.erl +++ b/deps/rabbitmq_auth_backend_oauth2/test/add_uaa_key_command_SUITE.erl @@ -2,13 +2,12 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(add_uaa_key_command_SUITE). -compile(export_all). --include_lib("rabbit_common/include/rabbit.hrl"). -include_lib("common_test/include/ct.hrl"). -define(COMMAND, 'Elixir.RabbitMQ.CLI.Ctl.Commands.AddUaaKeyCommand'). @@ -72,4 +71,3 @@ validate_pem_file_key(Config) -> CertsDir = ?config(rmq_certsdir, Config), Keyfile = filename:join([CertsDir, <<"client">>, <<"key.pem">>]), ok = ?COMMAND:validate([<<"foo">>], #{pem_file => Keyfile}). - diff --git a/deps/rabbitmq_auth_backend_oauth2/test/config_schema_SUITE.erl b/deps/rabbitmq_auth_backend_oauth2/test/config_schema_SUITE.erl index 6544fd2d11e4..750ef73d21c8 100644 --- a/deps/rabbitmq_auth_backend_oauth2/test/config_schema_SUITE.erl +++ b/deps/rabbitmq_auth_backend_oauth2/test/config_schema_SUITE.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2016-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(config_schema_SUITE). diff --git a/deps/rabbitmq_auth_backend_oauth2/test/config_schema_SUITE_data/rabbitmq_auth_backend_oauth2.snippets b/deps/rabbitmq_auth_backend_oauth2/test/config_schema_SUITE_data/rabbitmq_auth_backend_oauth2.snippets index 21aca91815a2..3d93e06d4d42 100644 --- a/deps/rabbitmq_auth_backend_oauth2/test/config_schema_SUITE_data/rabbitmq_auth_backend_oauth2.snippets +++ b/deps/rabbitmq_auth_backend_oauth2/test/config_schema_SUITE_data/rabbitmq_auth_backend_oauth2.snippets @@ -12,6 +12,7 @@ auth_oauth2.signing_keys.id1 = test/config_schema_SUITE_data/certs/key.pem auth_oauth2.signing_keys.id2 = test/config_schema_SUITE_data/certs/cert.pem auth_oauth2.jwks_url = https://my-jwt-issuer/jwks.json + auth_oauth2.issuer = https://my-jwt-issuer auth_oauth2.https.cacertfile = test/config_schema_SUITE_data/certs/cacert.pem auth_oauth2.https.peer_verification = verify_none auth_oauth2.https.depth = 5 @@ -28,6 +29,73 @@ {extra_scopes_source, <<"my_custom_scope_key">>}, {preferred_username_claims, [<<"user_name">>, <<"username">>, <<"email">>]}, {verify_aud, true}, + {issuer, "https://my-jwt-issuer"}, + {key_config, [ + {default_key, <<"id1">>}, + {signing_keys, + #{ + <<"id1">> => {pem, <<"I'm not a certificate">>}, + <<"id2">> => {pem, <<"I'm not a certificate">>} + } + }, + {jwks_url, "https://my-jwt-issuer/jwks.json"}, + {cacertfile, "test/config_schema_SUITE_data/certs/cacert.pem"}, + {peer_verification, verify_none}, + {depth, 5}, + {fail_if_no_peer_cert, false}, + {hostname_verification, wildcard}, + {crl_check, true}, + {algorithms, [<<"HS256">>, <<"RS256">>]} + ] + } + ]} + ],[] + }, + {oauth2_pem_config3, + "auth_oauth2.resource_server_id = new_resource_server_id + auth_oauth2.scope_prefix = new_resource_server_id. + auth_oauth2.resource_server_type = new_resource_server_type + auth_oauth2.additional_scopes_key = my_custom_scope_key + auth_oauth2.preferred_username_claims.1 = user_name + auth_oauth2.preferred_username_claims.2 = username + auth_oauth2.preferred_username_claims.3 = email + auth_oauth2.verify_aud = true + auth_oauth2.default_key = id1 + auth_oauth2.signing_keys.id1 = test/config_schema_SUITE_data/certs/key.pem + auth_oauth2.signing_keys.id2 = test/config_schema_SUITE_data/certs/cert.pem + auth_oauth2.jwks_url = https://my-jwt-issuer/jwks.json + auth_oauth2.https.cacertfile = test/config_schema_SUITE_data/certs/cacert.pem + auth_oauth2.https.peer_verification = verify_none + auth_oauth2.https.depth = 5 + auth_oauth2.https.fail_if_no_peer_cert = false + auth_oauth2.https.hostname_verification = wildcard + auth_oauth2.https.crl_check = true + auth_oauth2.algorithms.1 = HS256 + auth_oauth2.algorithms.2 = RS256 + auth_oauth2.resource_servers.1.id = rabbitmq-operations + auth_oauth2.resource_servers.1.scope_prefix = api:// + auth_oauth2.resource_servers.customers.id = rabbitmq-customers + auth_oauth2.resource_servers.customers.additional_scopes_key = roles", + [ + {rabbitmq_auth_backend_oauth2, [ + {resource_server_id,<<"new_resource_server_id">>}, + {scope_prefix,<<"new_resource_server_id.">>}, + {resource_server_type,<<"new_resource_server_type">>}, + {extra_scopes_source, <<"my_custom_scope_key">>}, + {preferred_username_claims, [<<"user_name">>, <<"username">>, <<"email">>]}, + {verify_aud, true}, + {resource_servers, + #{ + <<"rabbitmq-operations">> => [ + {id, <<"rabbitmq-operations">>}, + {scope_prefix, <<"api://">>} + ], + <<"rabbitmq-customers">> => [ + {id, <<"rabbitmq-customers">>}, + {additional_scopes_key, <<"roles">>} + ] + } + }, {key_config, [ {default_key, <<"id1">>}, {signing_keys, @@ -48,5 +116,51 @@ } ]} ],[] + }, + {oauth2_pem_config4, + "auth_oauth2.resource_server_id = new_resource_server_id + auth_oauth2.scope_prefix = new_resource_server_id. + auth_oauth2.resource_server_type = new_resource_server_type + auth_oauth2.additional_scopes_key = my_custom_scope_key + auth_oauth2.preferred_username_claims.1 = user_name + auth_oauth2.preferred_username_claims.2 = username + auth_oauth2.preferred_username_claims.3 = email + auth_oauth2.verify_aud = true + auth_oauth2.oauth_providers.uaa.issuer = https://uaa + auth_oauth2.oauth_providers.keycloak.token_endpoint = https://keycloak/token + auth_oauth2.oauth_providers.keycloak.jwks_uri = https://keycloak/keys + auth_oauth2.oauth_providers.keycloak.authorization_endpoint = https://keycloak/authorize + auth_oauth2.oauth_providers.keycloak.end_session_endpoint = https://keycloak/logout + auth_oauth2.oauth_providers.keycloak.https.cacertfile = /mnt/certs/ca_certificate.pem + auth_oauth2.oauth_providers.keycloak.https.verify = verify_none", + [ + {rabbitmq_auth_backend_oauth2, [ + {resource_server_id,<<"new_resource_server_id">>}, + {scope_prefix,<<"new_resource_server_id.">>}, + {resource_server_type,<<"new_resource_server_type">>}, + {extra_scopes_source, <<"my_custom_scope_key">>}, + {preferred_username_claims, [<<"user_name">>, <<"username">>, <<"email">>]}, + {verify_aud, true}, + {oauth_providers, + #{ + <<"uaa">> => [ + {issuer, <<"https://uaa">>} + ], + <<"keycloak">> => [ + {https, [ + {verify, verify_none}, + {cacertfile, "/mnt/certs/ca_certificate.pem"} + ]}, + {end_session_endpoint, <<"https://keycloak/logout">>}, + {authorization_endpoint, <<"https://keycloak/authorize">>}, + {token_endpoint, <<"https://keycloak/token">>}, + {jwks_uri, <<"https://keycloak/keys">>} + ] + + } + } + ]} + ],[] } + ]. diff --git a/deps/rabbitmq_auth_backend_oauth2/test/jwks_SUITE.erl b/deps/rabbitmq_auth_backend_oauth2/test/jwks_SUITE.erl index cf568424fe71..ec72a0f46abf 100644 --- a/deps/rabbitmq_auth_backend_oauth2/test/jwks_SUITE.erl +++ b/deps/rabbitmq_auth_backend_oauth2/test/jwks_SUITE.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(jwks_SUITE). @@ -22,8 +22,8 @@ all() -> [ {group, happy_path}, {group, unhappy_path}, - {group, unvalidated_jwks_server}, - {group, no_peer_verification} + {group, no_peer_verification}, + {group, multi_resource} ]. groups() -> @@ -48,8 +48,14 @@ groups() -> test_failed_token_refresh_case1, test_failed_token_refresh_case2 ]}, - {unvalidated_jwks_server, [], [test_failed_connection_with_unvalidated_jwks_server]}, - {no_peer_verification, [], [{group, happy_path}, {group, unhappy_path}]} + {no_peer_verification, [], [ + {group, happy_path}, + {group, unhappy_path} + ]}, + {multi_resource, [], [ + test_m_successful_connection, + test_m_failed_connection_due_to_missing_key + ]} ]. %% @@ -82,8 +88,39 @@ init_per_group(no_peer_verification, Config) -> ok = rabbit_ct_broker_helpers:rpc(Config, 0, application, set_env, [rabbitmq_auth_backend_oauth2, key_config, KeyConfig]), rabbit_ct_helpers:set_config(Config, {key_config, KeyConfig}); +init_per_group(multi_resource, Config) -> + add_vhosts(Config), + ResourceServersConfig = + #{ + <<"rabbitmq1">> => [ + {id, <<"rabbitmq1">>}, + {oauth_provider_id, <<"one">>} + ], + <<"rabbitmq2">> => [ + {id, <<"rabbitmq2">>}, + {oauth_provider_id, <<"two">>} + ] + }, + OAuthProviders = + #{ + <<"one">> => [ + {issuer, strict_jwks_url(Config, "/")}, + {jwks_uri, strict_jwks_url(Config, "/jwks1")}, + {https, [{verify, verify_none}]} + ], + <<"two">> => [ + {issuer, strict_jwks_url(Config, "/")}, + {jwks_uri, strict_jwks_url(Config, "/jwks2")}, + {https, [{verify, verify_none}]} + ] + }, + ok = rabbit_ct_broker_helpers:rpc(Config, 0, application, set_env, [rabbitmq_auth_backend_oauth2, resource_servers, ResourceServersConfig]), + ok = rabbit_ct_broker_helpers:rpc(Config, 0, application, set_env, [rabbitmq_auth_backend_oauth2, oauth_providers, OAuthProviders]), + Config; + init_per_group(_Group, Config) -> add_vhosts(Config), + ok = rabbit_ct_broker_helpers:rpc(Config, 0, application, set_env, [rabbitmq_auth_backend_oauth2, resource_server_id, ?RESOURCE_SERVER_ID]), Config. end_per_group(no_peer_verification, Config) -> @@ -138,12 +175,6 @@ init_per_testcase(Testcase, Config) when Testcase =:= test_failed_connection_wit rabbit_ct_helpers:testcase_started(Config, Testcase), Config; -init_per_testcase(Testcase, Config) when Testcase =:= test_failed_connection_with_unvalidated_jwks_server -> - KeyConfig = rabbit_ct_helpers:set_config(?config(key_config, Config), {jwks_url, ?config(non_strict_jwks_url, Config)}), - ok = rabbit_ct_broker_helpers:rpc(Config, 0, application, set_env, [rabbitmq_auth_backend_oauth2, key_config, KeyConfig]), - rabbit_ct_helpers:testcase_started(Config, Testcase), - Config; - init_per_testcase(Testcase, Config) -> rabbit_ct_helpers:testcase_started(Config, Testcase), Config. @@ -158,14 +189,13 @@ end_per_testcase(Testcase, Config) when Testcase =:= test_successful_connection_ Testcase =:= test_successful_connection_with_complex_claim_as_a_list orelse Testcase =:= test_successful_connection_with_complex_claim_as_a_binary -> rabbit_ct_broker_helpers:delete_vhost(Config, <<"vhost1">>), - ok = rabbit_ct_broker_helpers:rpc(Config, 0, application, set_env, - [rabbitmq_auth_backend_oauth2, extra_scopes_source, undefined]), + ok = rabbit_ct_broker_helpers:rpc(Config, 0, application, unset_env, + [rabbitmq_auth_backend_oauth2, extra_scopes_source]), rabbit_ct_helpers:testcase_started(Config, Testcase), Config; end_per_testcase(Testcase, Config) when Testcase =:= test_successful_connection_with_algorithm_restriction orelse - Testcase =:= test_failed_connection_with_algorithm_restriction orelse - Testcase =:= test_failed_connection_with_unvalidated_jwks_server -> + Testcase =:= test_failed_connection_with_algorithm_restriction -> rabbit_ct_broker_helpers:delete_vhost(Config, <<"vhost1">>), ok = rabbit_ct_broker_helpers:rpc(Config, 0, application, set_env, [rabbitmq_auth_backend_oauth2, key_config, ?config(key_config, Config)]), rabbit_ct_helpers:testcase_finished(Config, Testcase), @@ -183,32 +213,52 @@ preconfigure_node(Config) -> [rabbitmq_auth_backend_oauth2, resource_server_id, ?RESOURCE_SERVER_ID]), Config. -start_jwks_server(Config) -> +start_jwks_server(Config0) -> Jwk = ?UTIL_MOD:fixture_jwk(), + Jwk1 = ?UTIL_MOD:fixture_jwk(<<"token-key-1">>), + Jwk2 = ?UTIL_MOD:fixture_jwk(<<"token-key-2">>), + Jwk3 = ?UTIL_MOD:fixture_jwk(<<"token-key-3">>), %% Assume we don't have more than 100 ports allocated for tests - PortBase = rabbit_ct_broker_helpers:get_node_config(Config, 0, tcp_ports_base), + PortBase = rabbit_ct_broker_helpers:get_node_config(Config0, 0, tcp_ports_base), JwksServerPort = PortBase + 100, + Config = rabbit_ct_helpers:set_config(Config0, [{jwksServerPort, JwksServerPort}]), %% Both URLs direct to the same JWKS server %% The NonStrictJwksUrl identity cannot be validated while StrictJwksUrl identity can be validated - NonStrictJwksUrl = "https://127.0.0.1:" ++ integer_to_list(JwksServerPort) ++ "/jwks", - StrictJwksUrl = "https://localhost:" ++ integer_to_list(JwksServerPort) ++ "/jwks", + NonStrictJwksUrl = non_strict_jwks_url(Config), + StrictJwksUrl = strict_jwks_url(Config), - ok = application:set_env(jwks_http, keys, [Jwk]), {ok, _} = application:ensure_all_started(ssl), {ok, _} = application:ensure_all_started(cowboy), CertsDir = ?config(rmq_certsdir, Config), - ok = jwks_http_app:start(JwksServerPort, CertsDir), + ok = jwks_http_app:start(JwksServerPort, CertsDir, + [ {"/jwks", [Jwk]}, + {"/jwks1", [Jwk1, Jwk3]}, + {"/jwks2", [Jwk2]} + ]), KeyConfig = [{jwks_url, StrictJwksUrl}, {peer_verification, verify_peer}, {cacertfile, filename:join([CertsDir, "testca", "cacert.pem"])}], ok = rabbit_ct_broker_helpers:rpc(Config, 0, application, set_env, [rabbitmq_auth_backend_oauth2, key_config, KeyConfig]), rabbit_ct_helpers:set_config(Config, - [{non_strict_jwks_url, NonStrictJwksUrl}, + [ + {non_strict_jwks_url, NonStrictJwksUrl}, {strict_jwks_url, StrictJwksUrl}, {key_config, KeyConfig}, - {fixture_jwk, Jwk}]). + {fixture_jwk, Jwk}, + {fixture_jwks_1, [Jwk1, Jwk3]}, + {fixture_jwks_2, [Jwk2]} + ]). +strict_jwks_url(Config) -> + strict_jwks_url(Config, "/jwks"). +strict_jwks_url(Config, Path) -> + "https://localhost:" ++ integer_to_list(?config(jwksServerPort, Config)) ++ Path. +non_strict_jwks_url(Config) -> + non_strict_jwks_url(Config, "/jwks"). +non_strict_jwks_url(Config, Path) -> + "https://127.0.0.1:" ++ integer_to_list(?config(jwksServerPort, Config)) ++ Path. + stop_jwks_server(Config) -> ok = jwks_http_app:stop(), @@ -222,9 +272,12 @@ generate_valid_token(Config, Scopes) -> generate_valid_token(Config, Scopes, Audience) -> Jwk = case rabbit_ct_helpers:get_config(Config, fixture_jwk) of - undefined -> ?UTIL_MOD:fixture_jwk(); - Value -> Value - end, + undefined -> ?UTIL_MOD:fixture_jwk(); + Value -> Value + end, + generate_valid_token(Config, Jwk, Scopes, Audience). + +generate_valid_token(_Config, Jwk, Scopes, Audience) -> Token = case Audience of undefined -> ?UTIL_MOD:fixture_token_with_scopes(Scopes); DefinedAudience -> maps:put(<<"aud">>, DefinedAudience, ?UTIL_MOD:fixture_token_with_scopes(Scopes)) @@ -264,18 +317,59 @@ preconfigure_token(Config) -> Token = generate_valid_token(Config), rabbit_ct_helpers:set_config(Config, {fixture_jwt, Token}). + %% %% Test Cases %% test_successful_connection_with_a_full_permission_token_and_all_defaults(Config) -> {_Algo, Token} = rabbit_ct_helpers:get_config(Config, fixture_jwt), + verify_queue_declare_with_token(Config, Token). + +verify_queue_declare_with_token(Config, Token) -> Conn = open_unmanaged_connection(Config, 0, <<"username">>, Token), {ok, Ch} = amqp_connection:open_channel(Conn), #'queue.declare_ok'{queue = _} = amqp_channel:call(Ch, #'queue.declare'{exclusive = true}), close_connection_and_channel(Conn, Ch). +test_m_successful_connection(Config) -> + {_Alg, Token1} = generate_valid_token( + Config, + lists:nth(1, ?config(fixture_jwks_1, Config)), + <<"rabbitmq1.configure:*/* rabbitmq1.write:*/* rabbitmq1.read:*/*">>, + [<<"rabbitmq1">>] + ), + verify_queue_declare_with_token(Config, Token1), + + {_Alg2, Token2} = generate_valid_token( + Config, + lists:nth(2, ?config(fixture_jwks_1, Config)), + <<"rabbitmq1.configure:*/* rabbitmq1.write:*/* rabbitmq1.read:*/*">>, + [<<"rabbitmq1">>] + ), + verify_queue_declare_with_token(Config, Token2), + + {_Alg3, Token3} = generate_valid_token( + Config, + lists:nth(1, ?config(fixture_jwks_2, Config)), + <<"rabbitmq2.configure:*/* rabbitmq2.write:*/* rabbitmq2.read:*/*">>, + [<<"rabbitmq2">>] + ), + verify_queue_declare_with_token(Config, Token3). + + +test_m_failed_connection_due_to_missing_key(Config) -> + {_Alg, Token} = generate_valid_token( + Config, + lists:nth(1, ?config(fixture_jwks_2, Config)), %% used signing key for rabbitmq2 instead of rabbitmq1 one + <<"rabbitmq1.configure:*/* rabbitmq1.write:*/* rabbitmq1.read:*/*">>, + [<<"rabbitmq1">>] + ), + ?assertMatch({error, {auth_failure, _}}, + open_unmanaged_connection(Config, 0, <<"username">>, Token)). + + test_successful_connection_with_a_full_permission_token_and_explicitly_configured_vhost(Config) -> {_Algo, Token} = generate_valid_token(Config, [<<"rabbitmq.configure:vhost1/*">>, <<"rabbitmq.write:vhost1/*">>, @@ -290,7 +384,7 @@ test_successful_connection_with_simple_strings_for_aud_and_scope(Config) -> {_Algo, Token} = generate_valid_token( Config, <<"rabbitmq.configure:*/* rabbitmq.write:*/* rabbitmq.read:*/*">>, - <<"hare rabbitmq">> + [<<"hare">>, <<"rabbitmq">>] ), Conn = open_unmanaged_connection(Config, 0, <<"username">>, Token), {ok, Ch} = amqp_connection:open_channel(Conn), @@ -323,7 +417,7 @@ test_successful_connection_with_complex_claim_as_a_list(Config) -> test_successful_connection_with_complex_claim_as_a_binary(Config) -> {_Algo, Token} = generate_valid_token_with_extra_fields( Config, - #{<<"additional_rabbitmq_scopes">> => <<"rabbitmq.configure:*/* rabbitmq.read:*/*" "rabbitmq.write:*/*">>} + #{<<"additional_rabbitmq_scopes">> => <<"rabbitmq.configure:*/* rabbitmq.read:*/* rabbitmq.write:*/*">>} ), Conn = open_unmanaged_connection(Config, 0, <<"username">>, Token), {ok, Ch} = amqp_connection:open_channel(Conn), @@ -459,8 +553,3 @@ test_failed_connection_with_algorithm_restriction(Config) -> {_Algo, Token} = rabbit_ct_helpers:get_config(Config, fixture_jwt), ?assertMatch({error, {auth_failure, _}}, open_unmanaged_connection(Config, 0, <<"username">>, Token)). - -test_failed_connection_with_unvalidated_jwks_server(Config) -> - {_Algo, Token} = rabbit_ct_helpers:get_config(Config, fixture_jwt), - ?assertMatch({error, {auth_failure, _}}, - open_unmanaged_connection(Config, 0, <<"username">>, Token)). diff --git a/deps/rabbitmq_auth_backend_oauth2/test/jwks_http_app.erl b/deps/rabbitmq_auth_backend_oauth2/test/jwks_http_app.erl index c745e436f64d..d95c7f82f567 100644 --- a/deps/rabbitmq_auth_backend_oauth2/test/jwks_http_app.erl +++ b/deps/rabbitmq_auth_backend_oauth2/test/jwks_http_app.erl @@ -1,16 +1,17 @@ +%% This Source Code Form is subject to the terms of the Mozilla Public +%% License, v. 2.0. If a copy of the MPL was not distributed with this +%% file, You can obtain one at https://mozilla.org/MPL/2.0/. +%% +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. +%% -module(jwks_http_app). --export([start/2, stop/0]). +-export([start/3, stop/0]). -start(Port, CertsDir) -> - Dispatch = - cowboy_router:compile( - [ - {'_', [ - {"/jwks", jwks_http_handler, []} - ]} - ] - ), +start(Port, CertsDir, Mounts) -> + Endpoints = [ {Mount, jwks_http_handler, [{keys, Keys}]} || {Mount,Keys} <- Mounts ] ++ + [{"/.well-known/openid-configuration", openid_http_handler, []}], + Dispatch = cowboy_router:compile([{'_', Endpoints}]), {ok, _} = cowboy:start_tls(jwks_http_listener, [{port, Port}, {certfile, filename:join([CertsDir, "server", "cert.pem"])}, diff --git a/deps/rabbitmq_auth_backend_oauth2/test/jwks_http_handler.erl b/deps/rabbitmq_auth_backend_oauth2/test/jwks_http_handler.erl index 53af1d56d741..90751f18775a 100644 --- a/deps/rabbitmq_auth_backend_oauth2/test/jwks_http_handler.erl +++ b/deps/rabbitmq_auth_backend_oauth2/test/jwks_http_handler.erl @@ -1,10 +1,17 @@ +%% This Source Code Form is subject to the terms of the Mozilla Public +%% License, v. 2.0. If a copy of the MPL was not distributed with this +%% file, You can obtain one at https://mozilla.org/MPL/2.0/. +%% +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. +%% -module(jwks_http_handler). + -behavior(cowboy_handler). -export([init/2, terminate/3]). init(Req, State) -> - {ok, Keys} = application:get_env(jwks_http, keys), + Keys = proplists:get_value(keys, State, []), Body = rabbit_json:encode(#{keys => Keys}), Headers = #{<<"content-type">> => <<"application/json">>}, Req2 = cowboy_req:reply(200, Headers, Body, Req), diff --git a/deps/rabbitmq_auth_backend_oauth2/test/jwks_http_sup.erl b/deps/rabbitmq_auth_backend_oauth2/test/jwks_http_sup.erl index c0130cd0837a..bdbfa51f7982 100644 --- a/deps/rabbitmq_auth_backend_oauth2/test/jwks_http_sup.erl +++ b/deps/rabbitmq_auth_backend_oauth2/test/jwks_http_sup.erl @@ -1,4 +1,11 @@ +%% This Source Code Form is subject to the terms of the Mozilla Public +%% License, v. 2.0. If a copy of the MPL was not distributed with this +%% file, You can obtain one at https://mozilla.org/MPL/2.0/. +%% +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. +%% -module(jwks_http_sup). + -behaviour(supervisor). -export([start_link/0, init/1]). diff --git a/deps/rabbitmq_auth_backend_oauth2/test/oauth2_http_mock.erl b/deps/rabbitmq_auth_backend_oauth2/test/oauth2_http_mock.erl new file mode 100644 index 000000000000..fc4f87177e91 --- /dev/null +++ b/deps/rabbitmq_auth_backend_oauth2/test/oauth2_http_mock.erl @@ -0,0 +1,58 @@ +%% This Source Code Form is subject to the terms of the Mozilla Public +%% License, v. 2.0. If a copy of the MPL was not distributed with this +%% file, You can obtain one at https://mozilla.org/MPL/2.0/. +%% +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. +%% +-module(oauth2_http_mock). + +-include_lib("eunit/include/eunit.hrl"). + +-export([init/2]). + +%%% CALLBACKS + +init(Req, #{request := ExpectedRequest, response := ExpectedResponse} = Expected) -> + ct:log("init oauth_http_mock Req:~p", [Req]), + match_request(Req, ExpectedRequest), + {Code, Headers, JsonPayload} = produce_expected_response(ExpectedResponse), + {ok, case JsonPayload of + undefined -> cowboy_req:reply(Code, Req); + _ -> cowboy_req:reply(Code, Headers, JsonPayload, Req) + end, Expected}. + +match_request_parameters_in_body(Req, #{parameters := Parameters}) -> + ?assertEqual(true, cowboy_req:has_body(Req)), + {ok, KeyValues, _Req2} = cowboy_req:read_urlencoded_body(Req), + [ ?assertEqual(Value, proplists:get_value(list_to_binary(Parameter), KeyValues)) + || {Parameter, Value} <- Parameters]. + +match_request(Req, #{method := Method} = ExpectedRequest) -> + ?assertEqual(Method, maps:get(method, Req)), + case maps:is_key(parameters, ExpectedRequest) of + true -> match_request_parameters_in_body(Req, ExpectedRequest); + false -> ok + end. + +produce_expected_response(ExpectedResponse) -> + case proplists:is_defined(content_type, ExpectedResponse) of + true -> + Payload = proplists:get_value(payload, ExpectedResponse), + case is_proplist(Payload) of + true -> + { proplists:get_value(code, ExpectedResponse), + #{<<"content-type">> => proplists:get_value(content_type, ExpectedResponse)}, + rabbit_json:encode(Payload) + }; + _ -> + { proplists:get_value(code, ExpectedResponse), + #{<<"content-type">> => proplists:get_value(content_type, ExpectedResponse)}, + Payload + } + end; + false -> {proplists:get_value(code, ExpectedResponse), undefined, undefined} + end. + + +is_proplist([{_Key, _Val}|_] = List) -> lists:all(fun({_K, _V}) -> true; (_) -> false end, List); +is_proplist(_) -> false. diff --git a/deps/rabbitmq_auth_backend_oauth2/test/openid_http_handler.erl b/deps/rabbitmq_auth_backend_oauth2/test/openid_http_handler.erl new file mode 100644 index 000000000000..192b1d72265a --- /dev/null +++ b/deps/rabbitmq_auth_backend_oauth2/test/openid_http_handler.erl @@ -0,0 +1,14 @@ +-module(openid_http_handler). +-behavior(cowboy_handler). + +-export([init/2, terminate/3]). + +init(Req, State) -> + OpenIdConfig = application:get_env(jwks_http, openid_config, #{}), + Body = rabbit_json:encode(OpenIdConfig), + Headers = #{<<"content-type">> => <<"application/json">>}, + Req2 = cowboy_req:reply(200, Headers, Body, Req), + {ok, Req2, State}. + +terminate(_Reason, _Req, _State) -> + ok. diff --git a/deps/rabbitmq_auth_backend_oauth2/test/rabbit_auth_backend_oauth2_test_util.erl b/deps/rabbitmq_auth_backend_oauth2/test/rabbit_auth_backend_oauth2_test_util.erl index 715806179b63..cea238a1e857 100644 --- a/deps/rabbitmq_auth_backend_oauth2/test/rabbit_auth_backend_oauth2_test_util.erl +++ b/deps/rabbitmq_auth_backend_oauth2/test/rabbit_auth_backend_oauth2_test_util.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_auth_backend_oauth2_test_util). @@ -40,12 +40,18 @@ sign_token(Token, Jwk, Jws) -> jose_jws:compact(Signed). fixture_jwk() -> + fixture_jwk(<<"token-key">>). + +fixture_jwk(TokenKey) -> + fixture_jwk(TokenKey, <<"dG9rZW5rZXk">>). + +fixture_jwk(TokenKey, K) -> #{<<"alg">> => <<"HS256">>, - <<"k">> => <<"dG9rZW5rZXk">>, - <<"kid">> => <<"token-key">>, + <<"k">> => K, + <<"kid">> => TokenKey, <<"kty">> => <<"oct">>, <<"use">> => <<"sig">>, - <<"value">> => <<"tokenkey">>}. + <<"value">> => TokenKey}. full_permission_scopes() -> [<<"rabbitmq.configure:*/*">>, @@ -78,7 +84,6 @@ fixture_token_with_scopes(Scopes) -> token_with_scopes_and_expiration(Scopes, Expiration) -> %% expiration is a timestamp with precision in seconds #{<<"exp">> => Expiration, - <<"kid">> => <<"token-key">>, <<"iss">> => <<"unit_test">>, <<"foo">> => <<"bar">>, <<"aud">> => [<<"rabbitmq">>], @@ -87,7 +92,6 @@ token_with_scopes_and_expiration(Scopes, Expiration) -> token_without_scopes() -> %% expiration is a timestamp with precision in seconds #{ - <<"kid">> => <<"token-key">>, <<"iss">> => <<"unit_test">>, <<"foo">> => <<"bar">>, <<"aud">> => [<<"rabbitmq">>] @@ -115,14 +119,12 @@ fixture_token_with_full_permissions() -> plain_token_without_scopes_and_aud() -> %% expiration is a timestamp with precision in seconds #{<<"exp">> => default_expiration_moment(), - <<"kid">> => <<"token-key">>, <<"iss">> => <<"unit_test">>, <<"foo">> => <<"bar">>}. token_with_scope_alias_in_scope_field(Value) -> %% expiration is a timestamp with precision in seconds #{<<"exp">> => default_expiration_moment(), - <<"kid">> => <<"token-key">>, <<"iss">> => <<"unit_test">>, <<"foo">> => <<"bar">>, <<"aud">> => [<<"rabbitmq">>], @@ -131,7 +133,6 @@ token_with_scope_alias_in_scope_field(Value) -> token_with_scope_alias_in_claim_field(Claims, Scopes) -> %% expiration is a timestamp with precision in seconds #{<<"exp">> => default_expiration_moment(), - <<"kid">> => <<"token-key">>, <<"iss">> => <<"unit_test">>, <<"foo">> => <<"bar">>, <<"aud">> => [<<"rabbitmq">>], diff --git a/deps/rabbitmq_auth_backend_oauth2/test/rabbit_oauth2_config_SUITE.erl b/deps/rabbitmq_auth_backend_oauth2/test/rabbit_oauth2_config_SUITE.erl new file mode 100644 index 000000000000..b94f743baba0 --- /dev/null +++ b/deps/rabbitmq_auth_backend_oauth2/test/rabbit_oauth2_config_SUITE.erl @@ -0,0 +1,707 @@ +%% This Source Code Form is subject to the terms of the Mozilla Public +%% License, v. 2.0. If a copy of the MPL was not distributed with this +%% file, You can obtain one at https://mozilla.org/MPL/2.0/. +%% +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. +%% + +-module(rabbit_oauth2_config_SUITE). + +-compile(export_all). +-include_lib("common_test/include/ct.hrl"). +-include_lib("eunit/include/eunit.hrl"). +-include_lib("oauth2_client/include/oauth2_client.hrl"). + +-define(RABBITMQ,<<"rabbitmq">>). +-define(RABBITMQ_RESOURCE_ONE,<<"rabbitmq1">>). +-define(RABBITMQ_RESOURCE_TWO,<<"rabbitmq2">>). +-define(AUTH_PORT, 8000). + + +all() -> + [ + {group, with_resource_server_id}, + {group, without_resource_server_id}, + {group, with_resource_servers}, + {group, with_resource_servers_and_resource_server_id}, + {group, inheritance_group} + + ]. +groups() -> + [ + {with_rabbitmq_node, [], [ + add_signing_keys_for_top_specific_resource_server, + add_signing_keys_for_top_level_resource_server, + + replace_signing_keys_for_top_level_resource_server, + replace_signing_keys_for_specific_resource_server + ] + }, + + {with_resource_server_id, [], [ + get_default_resource_server_id, + get_allowed_resource_server_ids_returns_resource_server_id, + find_audience_in_resource_server_ids_found_resource_server_id, + get_oauth_provider_should_fail, + {with_jwks_url, [], [ + get_oauth_provider_should_return_root_oauth_provider_with_jwks_uri, + {with_oauth_providers_A_with_jwks_uri, [], [ + get_oauth_provider_should_return_root_oauth_provider_with_jwks_uri, + {with_default_oauth_provider_A, [], [ + get_oauth_provider_should_return_oauth_provider_A_with_jwks_uri + ] + } + ] + }, + {with_oauth_providers_A_B_with_jwks_uri, [], [ + get_oauth_provider_should_return_root_oauth_provider_with_jwks_uri, + {with_default_oauth_provider_B, [], [ + get_oauth_provider_should_return_oauth_provider_B_with_jwks_uri + ] + } + ] + } + ] + }, + {with_oauth_providers_A_with_jwks_uri, [], [ + get_oauth_provider_should_fail, + {with_default_oauth_provider_A, [], [ + get_oauth_provider_should_return_oauth_provider_A_with_jwks_uri + ] + } + ] + }, + {with_issuer, [], [ + get_oauth_provider_should_return_root_oauth_provider_with_all_discovered_endpoints, + {with_oauth_providers_A_with_issuer, [], [ + get_oauth_provider_should_return_root_oauth_provider_with_all_discovered_endpoints, + {with_default_oauth_provider_A, [], [ + get_oauth_provider_should_return_oauth_provider_A_with_all_discovered_endpoints + ] + } + ] + }, + {with_oauth_providers_A_B_with_issuer, [], [ + get_oauth_provider_should_return_root_oauth_provider_with_all_discovered_endpoints, + {with_default_oauth_provider_B, [], [ + get_oauth_provider_should_return_oauth_provider_B_with_all_discovered_endpoints + ] + } + ] + } + ] + } + ] + }, + {without_resource_server_id, [], [ + get_default_resource_server_id_returns_error, + get_allowed_resource_server_ids_returns_empty_list + ] + }, + {with_resource_servers, [], [ + get_allowed_resource_server_ids_returns_resource_servers_ids, + find_audience_in_resource_server_ids_found_one_resource_servers, + index_resource_servers_by_id_else_by_key, + {with_jwks_url, [], [ + get_oauth_provider_for_both_resources_should_return_root_oauth_provider, + {with_oauth_providers_A_with_jwks_uri, [], [ + {with_default_oauth_provider_A, [], [ + get_oauth_provider_for_both_resources_should_return_oauth_provider_A + ] + } + ] + }, + {with_different_oauth_provider_for_each_resource, [], [ + {with_oauth_providers_A_B_with_jwks_uri, [], [ + get_oauth_provider_for_resource_one_should_return_oauth_provider_A, + get_oauth_provider_for_resource_two_should_return_oauth_provider_B + ]} + ] + } + ] + } + ] + }, + {with_resource_servers_and_resource_server_id, [], [ + get_allowed_resource_server_ids_returns_all_resource_servers_ids, + find_audience_in_resource_server_ids_found_resource_server_id, + find_audience_in_resource_server_ids_found_one_resource_servers, + find_audience_in_resource_server_ids_using_binary_audience + + ] + }, + + {inheritance_group, [], [ + get_key_config, + get_additional_scopes_key, + get_additional_scopes_key_when_not_defined, + is_verify_aud, + is_verify_aud_when_is_false, + get_default_preferred_username_claims, + get_preferred_username_claims, + get_scope_prefix, + get_scope_prefix_when_not_defined, + get_resource_server_type, + get_resource_server_type_when_not_defined, + has_scope_aliases, + has_scope_aliases_when_not_defined, + get_scope_aliases + ] + } + + ]. + +init_per_suite(Config) -> + rabbit_ct_helpers:log_environment(), + rabbit_ct_helpers:run_setup_steps(Config). + +end_per_suite(Config) -> + rabbit_ct_helpers:run_teardown_steps(Config). + +init_per_group(with_rabbitmq_node, Config) -> + Config1 = rabbit_ct_helpers:set_config(Config, [ + {rmq_nodename_suffix, with_rabbitmq_node}, + {rmq_nodes_count, 1} + ]), + rabbit_ct_helpers:run_steps(Config1, rabbit_ct_broker_helpers:setup_steps()); + + +init_per_group(with_jwks_url, Config) -> + application:set_env(rabbitmq_auth_backend_oauth2, key_config, [{jwks_url,build_url_to_oauth_provider(<<"/keys">>)}]), + Config; +init_per_group(with_issuer, Config) -> + {ok, _} = application:ensure_all_started(inets), + {ok, _} = application:ensure_all_started(ssl), + application:ensure_all_started(cowboy), + CertsDir = ?config(rmq_certsdir, Config), + CaCertFile = filename:join([CertsDir, "testca", "cacert.pem"]), + SslOptions = ssl_options(verify_peer, false, CaCertFile), + + HttpOauthServerExpectations = get_openid_configuration_expectations(), + ListOfExpectations = maps:values(proplists:to_map(HttpOauthServerExpectations)), + + start_https_oauth_server(?AUTH_PORT, CertsDir, ListOfExpectations), + application:set_env(rabbitmq_auth_backend_oauth2, use_global_locks, false), + application:set_env(rabbitmq_auth_backend_oauth2, issuer, build_url_to_oauth_provider(<<"/">>)), + application:set_env(rabbitmq_auth_backend_oauth2, key_config, SslOptions), + + [{ssl_options, SslOptions} | Config]; + +init_per_group(with_oauth_providers_A_with_jwks_uri, Config) -> + application:set_env(rabbitmq_auth_backend_oauth2, oauth_providers, + #{<<"A">> => [ + {issuer,build_url_to_oauth_provider(<<"/A">>) }, + {jwks_uri,build_url_to_oauth_provider(<<"/A/keys">>) } + ] } ), + Config; +init_per_group(with_oauth_providers_A_with_issuer, Config) -> + application:set_env(rabbitmq_auth_backend_oauth2, oauth_providers, + #{<<"A">> => [ + {issuer,build_url_to_oauth_provider(<<"/A">>) }, + {https, ?config(ssl_options, Config)} + ] } ), + Config; +init_per_group(with_oauth_providers_A_B_with_jwks_uri, Config) -> + application:set_env(rabbitmq_auth_backend_oauth2, oauth_providers, + #{ <<"A">> => [ + {issuer,build_url_to_oauth_provider(<<"/A">>) }, + {jwks_uri,build_url_to_oauth_provider(<<"/A/keys">>)} + ], + <<"B">> => [ + {issuer,build_url_to_oauth_provider(<<"/B">>) }, + {jwks_uri,build_url_to_oauth_provider(<<"/B/keys">>)} + ] }), + Config; +init_per_group(with_oauth_providers_A_B_with_issuer, Config) -> + application:set_env(rabbitmq_auth_backend_oauth2, oauth_providers, + #{ <<"A">> => [ + {issuer,build_url_to_oauth_provider(<<"/A">>) }, + {https, ?config(ssl_options, Config)} + ], + <<"B">> => [ + {issuer,build_url_to_oauth_provider(<<"/B">>) }, + {https, ?config(ssl_options, Config)} + ] }), + Config; + +init_per_group(with_default_oauth_provider_A, Config) -> + application:set_env(rabbitmq_auth_backend_oauth2, default_oauth_provider, <<"A">>), + Config; + +init_per_group(with_default_oauth_provider_B, Config) -> + application:set_env(rabbitmq_auth_backend_oauth2, default_oauth_provider, <<"B">>), + Config; + + + +init_per_group(with_resource_server_id, Config) -> + application:set_env(rabbitmq_auth_backend_oauth2, resource_server_id, ?RABBITMQ), + Config; + +init_per_group(with_resource_servers_and_resource_server_id, Config) -> + application:set_env(rabbitmq_auth_backend_oauth2, resource_server_id, ?RABBITMQ), + application:set_env(rabbitmq_auth_backend_oauth2, key_config, [{jwks_url,<<"https://oauth-for-rabbitmq">> }]), + application:set_env(rabbitmq_auth_backend_oauth2, resource_servers, + #{?RABBITMQ_RESOURCE_ONE => [ { key_config, [ + {jwks_url,<<"https://oauth-for-rabbitmq1">> } + ] + } + + ], + ?RABBITMQ_RESOURCE_TWO => [ { key_config, [ + {jwks_url,<<"https://oauth-for-rabbitmq2">> } + ] + } + ] + }), + Config; +init_per_group(with_different_oauth_provider_for_each_resource, Config) -> + {ok, ResourceServers} = application:get_env(rabbitmq_auth_backend_oauth2, resource_servers), + Rabbit1 = maps:get(?RABBITMQ_RESOURCE_ONE, ResourceServers) ++ [ {oauth_provider_id, <<"A">>} ], + Rabbit2 = maps:get(?RABBITMQ_RESOURCE_TWO, ResourceServers) ++ [ {oauth_provider_id, <<"B">>} ], + ResourceServers1 = maps:update(?RABBITMQ_RESOURCE_ONE, Rabbit1, ResourceServers), + application:set_env(rabbitmq_auth_backend_oauth2, resource_servers, maps:update(?RABBITMQ_RESOURCE_TWO, Rabbit2, ResourceServers1)), + Config; + +init_per_group(with_resource_servers, Config) -> + application:set_env(rabbitmq_auth_backend_oauth2, resource_servers, + #{?RABBITMQ_RESOURCE_ONE => [ { key_config, [ + {jwks_url,<<"https://oauth-for-rabbitmq1">> } + ] + } + ], + ?RABBITMQ_RESOURCE_TWO => [ { key_config, [ + {jwks_url,<<"https://oauth-for-rabbitmq2">> } + ] + } + ], + <<"0">> => [ {id, <<"rabbitmq-0">> } ], + <<"1">> => [ {id, <<"rabbitmq-1">> } ] + + }), + Config; + +init_per_group(inheritance_group, Config) -> + application:set_env(rabbitmq_auth_backend_oauth2, resource_server_id, ?RABBITMQ), + application:set_env(rabbitmq_auth_backend_oauth2, resource_server_type, <<"rabbitmq-type">>), + application:set_env(rabbitmq_auth_backend_oauth2, scope_prefix, <<"some-prefix-">>), + application:set_env(rabbitmq_auth_backend_oauth2, extra_scopes_source, <<"roles">>), + application:set_env(rabbitmq_auth_backend_oauth2, scope_aliases, #{}), + + application:set_env(rabbitmq_auth_backend_oauth2, key_config, [ {jwks_url,<<"https://oauth-for-rabbitmq">> } ]), + + application:set_env(rabbitmq_auth_backend_oauth2, resource_servers, + #{?RABBITMQ_RESOURCE_ONE => [ { key_config, [ {jwks_url,<<"https://oauth-for-rabbitmq1">> } ] }, + { extra_scopes_source, <<"extra-scope-1">>}, + { verify_aud, false}, + { preferred_username_claims, [<<"email-address">>] }, + { scope_prefix, <<"my-prefix:">> }, + { resource_server_type, <<"my-type">> }, + { scope_aliases, #{} } + ], + ?RABBITMQ_RESOURCE_TWO => [ {id, ?RABBITMQ_RESOURCE_TWO } ] + } + ), + Config; + +init_per_group(_any, Config) -> + Config. + +end_per_group(with_rabbitmq_node, Config) -> + rabbit_ct_helpers:run_steps(Config, rabbit_ct_broker_helpers:teardown_steps()); + +end_per_group(with_resource_server_id, Config) -> + application:unset_env(rabbitmq_auth_backend_oauth2, resource_server_id), + Config; +end_per_group(with_jwks_url, Config) -> + application:unset_env(rabbitmq_auth_backend_oauth2, key_config), + Config; +end_per_group(with_issuer, Config) -> + application:unset_env(rabbitmq_auth_backend_oauth2, issuer), + stop_http_auth_server(), + Config; +end_per_group(with_oauth_providers_A_with_jwks_uri, Config) -> + application:unset_env(rabbitmq_auth_backend_oauth2, oauth_providers), + Config; +end_per_group(with_oauth_providers_A_with_issuer, Config) -> + application:unset_env(rabbitmq_auth_backend_oauth2, oauth_providers), + Config; +end_per_group(with_oauth_providers_A_B_with_jwks_uri, Config) -> + application:unset_env(rabbitmq_auth_backend_oauth2, oauth_providers), + Config; +end_per_group(with_oauth_providers_A_B_with_issuer, Config) -> + application:unset_env(rabbitmq_auth_backend_oauth2, oauth_providers), + Config; + +end_per_group(with_oauth_providers_A, Config) -> + application:unset_env(rabbitmq_auth_backend_oauth2, oauth_providers), + Config; +end_per_group(with_oauth_providers_A_B, Config) -> + application:unset_env(rabbitmq_auth_backend_oauth2, oauth_providers), + Config; +end_per_group(with_default_oauth_provider_B, Config) -> + application:unset_env(rabbitmq_auth_backend_oauth2, default_oauth_provider), + Config; +end_per_group(with_default_oauth_provider_A, Config) -> + application:unset_env(rabbitmq_auth_backend_oauth2, default_oauth_provider), + Config; + +end_per_group(get_oauth_provider_for_resource_server_id, Config) -> + application:unset_env(rabbitmq_auth_backend_oauth2, resource_server_id), + Config; + +end_per_group(with_resource_servers_and_resource_server_id, Config) -> + application:unset_env(rabbitmq_auth_backend_oauth2, resource_server_id), + Config; + +end_per_group(with_resource_servers, Config) -> + application:unset_env(rabbitmq_auth_backend_oauth2, resource_servers), + Config; + +end_per_group(with_different_oauth_provider_for_each_resource, Config) -> + {ok, ResourceServers} = application:get_env(rabbitmq_auth_backend_oauth2, resource_servers), + Rabbit1 = proplists:delete(oauth_provider_id, maps:get(?RABBITMQ_RESOURCE_ONE, ResourceServers)), + Rabbit2 = proplists:delete(oauth_provider_id, maps:get(?RABBITMQ_RESOURCE_TWO, ResourceServers)), + ResourceServers1 = maps:update(?RABBITMQ_RESOURCE_ONE, Rabbit1, ResourceServers), + application:set_env(rabbitmq_auth_backend_oauth2, resource_servers, maps:update(?RABBITMQ_RESOURCE_TWO, Rabbit2, ResourceServers1)), + Config; + +end_per_group(inheritance_group, Config) -> + application:unset_env(rabbitmq_auth_backend_oauth2, resource_server_id), + application:unset_env(rabbitmq_auth_backend_oauth2, scope_prefix), + application:unset_env(rabbitmq_auth_backend_oauth2, extra_scopes_source), + + application:unset_env(rabbitmq_auth_backend_oauth2, key_config), + + application:unset_env(rabbitmq_auth_backend_oauth2, resource_servers), + Config; + +end_per_group(_any, Config) -> + Config. + +init_per_testcase(get_preferred_username_claims, Config) -> + application:set_env(rabbitmq_auth_backend_oauth2, preferred_username_claims, [<<"username">>]), + Config; + +init_per_testcase(get_additional_scopes_key_when_not_defined, Config) -> + application:unset_env(rabbitmq_auth_backend_oauth2, extra_scopes_source), + Config; +init_per_testcase(is_verify_aud_when_is_false, Config) -> + application:set_env(rabbitmq_auth_backend_oauth2, verify_aud, false), + Config; +init_per_testcase(get_scope_prefix_when_not_defined, Config) -> + application:unset_env(rabbitmq_auth_backend_oauth2, scope_prefix), + Config; +init_per_testcase(get_resource_server_type_when_not_defined, Config) -> + application:unset_env(rabbitmq_auth_backend_oauth2, resource_server_type), + Config; +init_per_testcase(has_scope_aliases_when_not_defined, Config) -> + application:unset_env(rabbitmq_auth_backend_oauth2, scope_aliases), + Config; + +init_per_testcase(_TestCase, Config) -> + Config. + +end_per_testcase(get_preferred_username_claims, Config) -> + application:unset_env(rabbitmq_auth_backend_oauth2, preferred_username_claims), + Config; + + +end_per_testcase(_Testcase, Config) -> + Config. + +%% ----- + +call_add_signing_key(Config, Args) -> + rabbit_ct_broker_helpers:rpc(Config, 0, rabbit_oauth2_config, add_signing_key, Args). + +call_get_signing_keys(Config, Args) -> + rabbit_ct_broker_helpers:rpc(Config, 0, rabbit_oauth2_config, get_signing_keys, Args). + +call_get_signing_key(Config, Args) -> + rabbit_ct_broker_helpers:rpc(Config, 0, rabbit_oauth2_config, get_signing_key, Args). + +call_add_signing_keys(Config, Args) -> + rabbit_ct_broker_helpers:rpc(Config, 0, rabbit_oauth2_config, add_signing_keys, Args). + +call_replace_signing_keys(Config, Args) -> + rabbit_ct_broker_helpers:rpc(Config, 0, rabbit_oauth2_config, replace_signing_keys, Args). + +add_signing_keys_for_top_level_resource_server(Config) -> + #{<<"mykey-1">> := <<"some key 1">>} = call_add_signing_key(Config, [<<"mykey-1">>, <<"some key 1">>]), + #{<<"mykey-1">> := <<"some key 1">>} = call_get_signing_keys(Config, []), + + #{<<"mykey-1">> := <<"some key 1">>, <<"mykey-2">> := <<"some key 2">>} = call_add_signing_key(Config, [<<"mykey-2">>, <<"some key 2">>]), + #{<<"mykey-1">> := <<"some key 1">>, <<"mykey-2">> := <<"some key 2">>} = call_get_signing_keys(Config, []), + + ?assertEqual(<<"some key 1">>, call_get_signing_key(Config, [<<"mykey-1">>, ?RABBITMQ])). + +add_signing_keys_for_top_specific_resource_server(Config) -> + #{<<"mykey-3-1">> := <<"some key 3-1">>} = call_add_signing_key(Config, [<<"my-resource-server-3">>, <<"mykey-3-1">>, <<"some key 3-1">>]), + #{<<"mykey-4-1">> := <<"some key 4-1">>} = call_add_signing_key(Config, [<<"my-resource-server-4">>, <<"mykey-4-1">>, <<"some key 4-1">>]), + #{<<"mykey-3-1">> := <<"some key 3-1">>} = call_get_signing_keys(Config, [<<"my-resource-server-3">>]), + #{<<"mykey-4-1">> := <<"some key 4-1">>} = call_get_signing_keys(Config, [<<"my-resource-server-4">>]), + + #{<<"mykey-3-1">> := <<"some key 3-1">>, <<"mykey-3-2">> := <<"some key 3-2">>} = call_add_signing_key(Config, [<<"my-resource-server-3">>, <<"mykey-3-2">>, <<"some key 3-2">>]), + + #{<<"mykey-1">> := <<"some key 1">>} = call_add_signing_key(Config, [<<"mykey-1">>, <<"some key 1">>]), + #{<<"mykey-1">> := <<"some key 1">>} = call_get_signing_keys(Config, []), + + ?assertEqual(<<"some key 3-1">>, call_get_signing_key(Config, [<<"mykey-3-1">> , <<"my-resource-server-3">>])). + +replace_signing_keys_for_top_level_resource_server(Config) -> + call_add_signing_key(Config, [<<"mykey-1">>, <<"some key 1">>]), + NewKeys = #{<<"key-2">> => <<"some key 2">>, <<"key-3">> => <<"some key 3">>}, + call_replace_signing_keys(Config, [NewKeys]), + #{<<"key-2">> := <<"some key 2">>, <<"key-3">> := <<"some key 3">>} = call_get_signing_keys(Config, []). + +replace_signing_keys_for_specific_resource_server(Config) -> + ResourceServerId = <<"my-resource-server-3">>, + #{<<"mykey-3-1">> := <<"some key 3-1">>} = call_add_signing_key(Config, [ResourceServerId, <<"mykey-3-1">>, <<"some key 3-1">>]), + NewKeys = #{<<"key-2">> => <<"some key 2">>, <<"key-3">> => <<"some key 3">>}, + call_replace_signing_keys(Config, [ResourceServerId, NewKeys]), + #{<<"key-2">> := <<"some key 2">>, <<"key-3">> := <<"some key 3">>} = call_get_signing_keys(Config, [ResourceServerId]). + +get_default_resource_server_id_returns_error(_Config) -> + {error, _} = rabbit_oauth2_config:get_default_resource_server_id(). + +get_default_resource_server_id(_Config) -> + ?assertEqual(?RABBITMQ, rabbit_oauth2_config:get_default_resource_server_id()). + +get_allowed_resource_server_ids_returns_empty_list(_Config) -> + [] = rabbit_oauth2_config:get_allowed_resource_server_ids(). + +get_allowed_resource_server_ids_returns_resource_server_id(_Config) -> + [?RABBITMQ] = rabbit_oauth2_config:get_allowed_resource_server_ids(). + +get_allowed_resource_server_ids_returns_all_resource_servers_ids(_Config) -> + [ <<"rabbitmq1">>, <<"rabbitmq2">>, ?RABBITMQ] = rabbit_oauth2_config:get_allowed_resource_server_ids(). + +get_allowed_resource_server_ids_returns_resource_servers_ids(_Config) -> + [<<"rabbitmq-0">>, <<"rabbitmq-1">>, <<"rabbitmq1">>, <<"rabbitmq2">> ] = + lists:sort(rabbit_oauth2_config:get_allowed_resource_server_ids()). + +index_resource_servers_by_id_else_by_key(_Config) -> + {error, no_matching_aud_found} = rabbit_oauth2_config:find_audience_in_resource_server_ids(<<"0">>), + {ok, <<"rabbitmq-0">>} = rabbit_oauth2_config:find_audience_in_resource_server_ids([<<"rabbitmq-0">>]), + {ok, <<"rabbitmq-0">>} = rabbit_oauth2_config:find_audience_in_resource_server_ids(<<"rabbitmq-0">>). + +find_audience_in_resource_server_ids_returns_key_not_found(_Config) -> + {error, no_matching_aud_found} = rabbit_oauth2_config:find_audience_in_resource_server_ids(?RABBITMQ). + +find_audience_in_resource_server_ids_returns_found_too_many(_Config) -> + {error, only_one_resource_server_as_audience_found_many} = rabbit_oauth2_config:find_audience_in_resource_server_ids([?RABBITMQ, <<"rabbitmq1">>]). + +find_audience_in_resource_server_ids_found_one_resource_servers(_Config) -> + {ok, <<"rabbitmq1">>} = rabbit_oauth2_config:find_audience_in_resource_server_ids(<<"rabbitmq1">>), + {ok, <<"rabbitmq1">>} = rabbit_oauth2_config:find_audience_in_resource_server_ids([<<"rabbitmq1">>, <<"other">>]). + +find_audience_in_resource_server_ids_found_resource_server_id(_Config) -> + {ok, ?RABBITMQ} = rabbit_oauth2_config:find_audience_in_resource_server_ids(?RABBITMQ), + {ok, ?RABBITMQ} = rabbit_oauth2_config:find_audience_in_resource_server_ids([?RABBITMQ, <<"other">>]). + +find_audience_in_resource_server_ids_using_binary_audience(_Config) -> + {ok, ?RABBITMQ} = rabbit_oauth2_config:find_audience_in_resource_server_ids(<<"rabbitmq other">>). + +get_key_config(_Config) -> + RootKeyConfig = rabbit_oauth2_config:get_key_config(<<"rabbitmq-2">>), + ?assertEqual(<<"https://oauth-for-rabbitmq">>, proplists:get_value(jwks_url, RootKeyConfig)), + + KeyConfig = rabbit_oauth2_config:get_key_config(<<"rabbitmq1">>), + ?assertEqual(<<"https://oauth-for-rabbitmq1">>, proplists:get_value(jwks_url, KeyConfig)). + +get_additional_scopes_key(_Config) -> + ?assertEqual({ok, <<"roles">>}, rabbit_oauth2_config:get_additional_scopes_key()), + ?assertEqual({ok, <<"extra-scope-1">>}, rabbit_oauth2_config:get_additional_scopes_key(<<"rabbitmq1">> )), + ?assertEqual(rabbit_oauth2_config:get_additional_scopes_key(), rabbit_oauth2_config:get_additional_scopes_key(<<"rabbitmq2">>)), + ?assertEqual({ok, <<"roles">>}, rabbit_oauth2_config:get_additional_scopes_key(?RABBITMQ)). + +get_additional_scopes_key_when_not_defined(_Config) -> + ?assertEqual({error, not_found}, rabbit_oauth2_config:get_additional_scopes_key()), + ?assertEqual(rabbit_oauth2_config:get_additional_scopes_key(), rabbit_oauth2_config:get_additional_scopes_key(<<"rabbitmq2">>)). + +is_verify_aud(_Config) -> + ?assertEqual(true, rabbit_oauth2_config:is_verify_aud()), + ?assertEqual(rabbit_oauth2_config:is_verify_aud(?RABBITMQ), rabbit_oauth2_config:is_verify_aud()), + ?assertEqual(false, rabbit_oauth2_config:is_verify_aud(<<"rabbitmq1">>)), + ?assertEqual(rabbit_oauth2_config:is_verify_aud(), rabbit_oauth2_config:is_verify_aud(<<"rabbitmq2">>)). + +is_verify_aud_when_is_false(_Config) -> + ?assertEqual(false, rabbit_oauth2_config:is_verify_aud()), + ?assertEqual(rabbit_oauth2_config:is_verify_aud(), rabbit_oauth2_config:is_verify_aud(<<"rabbitmq2">>)). + +get_default_preferred_username_claims(_Config) -> + ?assertEqual(rabbit_oauth2_config:get_default_preferred_username_claims(), rabbit_oauth2_config:get_preferred_username_claims()). + +get_preferred_username_claims(_Config) -> + ?assertEqual([<<"username">>] ++ rabbit_oauth2_config:get_default_preferred_username_claims(), + rabbit_oauth2_config:get_preferred_username_claims()), + ?assertEqual([<<"email-address">>] ++ rabbit_oauth2_config:get_default_preferred_username_claims(), + rabbit_oauth2_config:get_preferred_username_claims(<<"rabbitmq1">>)), + ?assertEqual(rabbit_oauth2_config:get_preferred_username_claims(), + rabbit_oauth2_config:get_preferred_username_claims(<<"rabbitmq2">>)). + +get_scope_prefix_when_not_defined(_Config) -> + ?assertEqual(<<"rabbitmq.">>, rabbit_oauth2_config:get_scope_prefix()), + ?assertEqual(<<"rabbitmq2.">>, rabbit_oauth2_config:get_scope_prefix(<<"rabbitmq2">>)). + +get_scope_prefix(_Config) -> + ?assertEqual(<<"some-prefix-">>, rabbit_oauth2_config:get_scope_prefix()), + ?assertEqual(<<"my-prefix:">>, rabbit_oauth2_config:get_scope_prefix(<<"rabbitmq1">>)), + ?assertEqual(rabbit_oauth2_config:get_scope_prefix(), rabbit_oauth2_config:get_scope_prefix(<<"rabbitmq2">>)). + +get_resource_server_type_when_not_defined(_Config) -> + ?assertEqual(<<>>, rabbit_oauth2_config:get_resource_server_type()), + ?assertEqual(<<>>, rabbit_oauth2_config:get_resource_server_type(<<"rabbitmq2">>)). + +get_resource_server_type(_Config) -> + ?assertEqual(<<"rabbitmq-type">>, rabbit_oauth2_config:get_resource_server_type()), + ?assertEqual(<<"my-type">>, rabbit_oauth2_config:get_resource_server_type(<<"rabbitmq1">>)), + ?assertEqual(rabbit_oauth2_config:get_resource_server_type(), rabbit_oauth2_config:get_resource_server_type(<<"rabbitmq2">>)). + +has_scope_aliases_when_not_defined(_Config) -> + ?assertEqual(false, rabbit_oauth2_config:has_scope_aliases(?RABBITMQ)), + ?assertEqual(true, rabbit_oauth2_config:has_scope_aliases(<<"rabbitmq1">>)), + ?assertEqual(rabbit_oauth2_config:has_scope_aliases(?RABBITMQ), rabbit_oauth2_config:has_scope_aliases(<<"rabbitmq2">>)). + +has_scope_aliases(_Config) -> + ?assertEqual(true, rabbit_oauth2_config:has_scope_aliases(?RABBITMQ)), + ?assertEqual(true, rabbit_oauth2_config:has_scope_aliases(<<"rabbitmq1">>)), + ?assertEqual(rabbit_oauth2_config:has_scope_aliases(?RABBITMQ), rabbit_oauth2_config:has_scope_aliases(<<"rabbitmq2">>)). + +get_scope_aliases(_Config) -> + ?assertEqual(#{}, rabbit_oauth2_config:get_scope_aliases(?RABBITMQ)), + ?assertEqual(#{}, rabbit_oauth2_config:get_scope_aliases(<<"rabbitmq1">>)), + ?assertEqual(rabbit_oauth2_config:get_scope_aliases(?RABBITMQ), rabbit_oauth2_config:get_scope_aliases(<<"rabbitmq2">>)). + +get_oauth_provider_should_fail(_Config) -> + {error, _Message} = rabbit_oauth2_config:get_oauth_provider_for_resource_server_id(?RABBITMQ, [jwks_uri]). +get_oauth_provider_should_return_root_oauth_provider_with_jwks_uri(_Config) -> + {ok, OAuthProvider} = rabbit_oauth2_config:get_oauth_provider_for_resource_server_id(?RABBITMQ, [jwks_uri]), + ?assertEqual(build_url_to_oauth_provider(<<"/keys">>), OAuthProvider#oauth_provider.jwks_uri). +get_oauth_provider_for_both_resources_should_return_root_oauth_provider(_Config) -> + {ok, OAuthProvider} = rabbit_oauth2_config:get_oauth_provider_for_resource_server_id(?RABBITMQ_RESOURCE_ONE, [jwks_uri]), + ?assertEqual(build_url_to_oauth_provider(<<"/keys">>), OAuthProvider#oauth_provider.jwks_uri), + {ok, OAuthProvider} = rabbit_oauth2_config:get_oauth_provider_for_resource_server_id(?RABBITMQ_RESOURCE_TWO, [jwks_uri]), + ?assertEqual(build_url_to_oauth_provider(<<"/keys">>), OAuthProvider#oauth_provider.jwks_uri). +get_oauth_provider_for_resource_one_should_return_oauth_provider_A(_Config) -> + {ok, ResourceServers} = application:get_env(rabbitmq_auth_backend_oauth2, resource_servers), + ct:log("ResourceServers : ~p", [ResourceServers]), + {ok, OAuthProvider} = rabbit_oauth2_config:get_oauth_provider_for_resource_server_id(?RABBITMQ_RESOURCE_ONE, [jwks_uri]), + ?assertEqual(build_url_to_oauth_provider(<<"/A/keys">>), OAuthProvider#oauth_provider.jwks_uri). +get_oauth_provider_for_both_resources_should_return_oauth_provider_A(_Config) -> + {ok, OAuthProvider} = rabbit_oauth2_config:get_oauth_provider_for_resource_server_id(?RABBITMQ_RESOURCE_ONE, [jwks_uri]), + ?assertEqual(build_url_to_oauth_provider(<<"/A/keys">>), OAuthProvider#oauth_provider.jwks_uri), + {ok, OAuthProvider} = rabbit_oauth2_config:get_oauth_provider_for_resource_server_id(?RABBITMQ_RESOURCE_TWO, [jwks_uri]), + ?assertEqual(build_url_to_oauth_provider(<<"/A/keys">>), OAuthProvider#oauth_provider.jwks_uri). +get_oauth_provider_for_resource_two_should_return_oauth_provider_B(_Config) -> + {ok, OAuthProvider} = rabbit_oauth2_config:get_oauth_provider_for_resource_server_id(?RABBITMQ_RESOURCE_TWO, [jwks_uri]), + ?assertEqual(build_url_to_oauth_provider(<<"/B/keys">>), OAuthProvider#oauth_provider.jwks_uri). + +get_oauth_provider_should_return_root_oauth_provider_with_all_discovered_endpoints(_Config) -> + {ok, OAuthProvider} = rabbit_oauth2_config:get_oauth_provider_for_resource_server_id(?RABBITMQ, [jwks_uri]), + ?assertEqual(build_url_to_oauth_provider(<<"/keys">>), OAuthProvider#oauth_provider.jwks_uri), + ?assertEqual(build_url_to_oauth_provider(<<"/">>), OAuthProvider#oauth_provider.issuer). +append_paths(Path1, Path2) -> + erlang:iolist_to_binary([Path1, Path2]). + +get_oauth_provider_should_return_oauth_provider_B_with_jwks_uri(_Config) -> + {ok, OAuthProvider} = rabbit_oauth2_config:get_oauth_provider_for_resource_server_id(?RABBITMQ, [jwks_uri]), + ?assertEqual(build_url_to_oauth_provider(<<"/B/keys">>), OAuthProvider#oauth_provider.jwks_uri). + +get_oauth_provider_should_return_oauth_provider_B_with_all_discovered_endpoints(_Config) -> + {ok, OAuthProvider} = rabbit_oauth2_config:get_oauth_provider_for_resource_server_id(?RABBITMQ, [jwks_uri]), + ?assertEqual(build_url_to_oauth_provider(<<"/B/keys">>), OAuthProvider#oauth_provider.jwks_uri), + ?assertEqual(build_url_to_oauth_provider(<<"/B">>), OAuthProvider#oauth_provider.issuer). + +get_oauth_provider_should_return_oauth_provider_A_with_jwks_uri(_Config) -> + {ok, OAuthProvider} = rabbit_oauth2_config:get_oauth_provider_for_resource_server_id(?RABBITMQ, [jwks_uri]), + ?assertEqual(build_url_to_oauth_provider(<<"/A/keys">>), OAuthProvider#oauth_provider.jwks_uri). + +get_oauth_provider_should_return_oauth_provider_A_with_all_discovered_endpoints(_Config) -> + {ok, OAuthProvider} = rabbit_oauth2_config:get_oauth_provider_for_resource_server_id(?RABBITMQ, [jwks_uri]), + ?assertEqual(build_url_to_oauth_provider(<<"/A/keys">>), OAuthProvider#oauth_provider.jwks_uri), + ?assertEqual(build_url_to_oauth_provider(<<"/A">>), OAuthProvider#oauth_provider.issuer). + +get_openid_configuration_expectations() -> + [ {get_root_openid_configuration, + + #{request => #{ + method => <<"GET">>, + path => <<"/.well-known/openid-configuration">> + }, + response => [ + {code, 200}, + {content_type, ?CONTENT_JSON}, + {payload, [ + {issuer, build_url_to_oauth_provider(<<"/">>) }, + {jwks_uri, build_url_to_oauth_provider(<<"/keys">>)} + ]} + ] + } + }, + {get_A_openid_configuration, + + #{request => #{ + method => <<"GET">>, + path => <<"/A/.well-known/openid-configuration">> + }, + response => [ + {code, 200}, + {content_type, ?CONTENT_JSON}, + {payload, [ + {issuer, build_url_to_oauth_provider(<<"/A">>) }, + {jwks_uri, build_url_to_oauth_provider(<<"/A/keys">>)} + ]} + ] + } + }, + {get_B_openid_configuration, + + #{request => #{ + method => <<"GET">>, + path => <<"/B/.well-known/openid-configuration">> + }, + response => [ + {code, 200}, + {content_type, ?CONTENT_JSON}, + {payload, [ + {issuer, build_url_to_oauth_provider(<<"/B">>) }, + {jwks_uri, build_url_to_oauth_provider(<<"/B/keys">>)} + ]} + ] + } + } + ]. + +start_https_oauth_server(Port, CertsDir, Expectations) when is_list(Expectations) -> + Dispatch = cowboy_router:compile([ + {'_', [{Path, oauth2_http_mock, Expected} || #{request := #{path := Path}} = Expected <- Expectations ]} + ]), + ct:log("start_https_oauth_server (port:~p) with expectation list : ~p -> dispatch: ~p", [Port, Expectations, Dispatch]), + {ok, Pid} = cowboy:start_tls( + mock_http_auth_listener, + [{port, Port}, + {certfile, filename:join([CertsDir, "server", "cert.pem"])}, + {keyfile, filename:join([CertsDir, "server", "key.pem"])} + ], + #{env => #{dispatch => Dispatch}}), + ct:log("Started on Port ~p and pid ~p", [ranch:get_port(mock_http_auth_listener), Pid]). + +build_url_to_oauth_provider(Path) -> + uri_string:recompose(#{scheme => "https", + host => "localhost", + port => rabbit_data_coercion:to_integer(?AUTH_PORT), + path => Path}). + +stop_http_auth_server() -> + cowboy:stop_listener(mock_http_auth_listener). + +-spec ssl_options(ssl:verify_type(), boolean(), file:filename()) -> list(). +ssl_options(PeerVerification, FailIfNoPeerCert, CaCertFile) -> + [{verify, PeerVerification}, + {depth, 10}, + {fail_if_no_peer_cert, FailIfNoPeerCert}, + {crl_check, false}, + {crl_cache, {ssl_crl_cache, {internal, [{http, 10000}]}}}, + {cacertfile, CaCertFile}]. diff --git a/deps/rabbitmq_auth_backend_oauth2/test/scope_SUITE.erl b/deps/rabbitmq_auth_backend_oauth2/test/scope_SUITE.erl index 65964871a540..7a7d2888ed17 100644 --- a/deps/rabbitmq_auth_backend_oauth2/test/scope_SUITE.erl +++ b/deps/rabbitmq_auth_backend_oauth2/test/scope_SUITE.erl @@ -2,14 +2,13 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(scope_SUITE). -compile(export_all). -include_lib("rabbit_common/include/rabbit.hrl"). --include_lib("common_test/include/ct.hrl"). -include_lib("eunit/include/eunit.hrl"). all() -> diff --git a/deps/rabbitmq_auth_backend_oauth2/test/system_SUITE.erl b/deps/rabbitmq_auth_backend_oauth2/test/system_SUITE.erl index 0c71607e5cbe..e17a76281411 100644 --- a/deps/rabbitmq_auth_backend_oauth2/test/system_SUITE.erl +++ b/deps/rabbitmq_auth_backend_oauth2/test/system_SUITE.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(system_SUITE). @@ -42,7 +42,12 @@ groups() -> test_failed_connection_with_expired_token, test_failed_connection_with_a_non_token, test_failed_connection_with_a_token_with_insufficient_vhost_permission, - test_failed_connection_with_a_token_with_insufficient_resource_permission + test_failed_connection_with_a_token_with_insufficient_resource_permission, + more_than_one_resource_server_id_not_allowed_in_one_token, + mqtt_expired_token, + mqtt_expirable_token, + web_mqtt_expirable_token, + amqp_expirable_token ]}, {token_refresh, [], [ @@ -213,11 +218,23 @@ init_per_testcase(Testcase, Config) when Testcase =:= test_successful_connection rabbit_ct_helpers:testcase_started(Config, Testcase), Config; +init_per_testcase(multiple_resource_server_ids, Config) -> + ok = rabbit_ct_broker_helpers:rpc(Config, 0, application, set_env, + [rabbitmq_auth_backend_oauth2, scope_prefix, <<"rmq.">> ]), + ok = rabbit_ct_broker_helpers:rpc(Config, 0, application, set_env, + [rabbitmq_auth_backend_oauth2, resource_servers, #{ + <<"prod">> => [ ], + <<"dev">> => [ ] + }]), + rabbit_ct_helpers:testcase_started(Config, multiple_resource_server_ids), + Config; + init_per_testcase(Testcase, Config) -> rabbit_ct_helpers:testcase_started(Config, Testcase), Config. + %% %% Per-case Teardown %% @@ -270,6 +287,14 @@ end_per_testcase(Testcase, Config) when Testcase =:= test_successful_connection_ rabbit_ct_helpers:testcase_finished(Config, Testcase), Config; +end_per_testcase(multiple_resource_server_ids, Config) -> + rabbit_ct_broker_helpers:rpc(Config, 0, application, unset_env, + [rabbitmq_auth_backend_oauth2, scope_prefix ]), + rabbit_ct_broker_helpers:rpc(Config, 0, application, unset_env, + [rabbitmq_auth_backend_oauth2, resource_servers ]), + rabbit_ct_helpers:testcase_started(Config, multiple_resource_server_ids), + Config; + end_per_testcase(Testcase, Config) -> rabbit_ct_broker_helpers:delete_vhost(Config, <<"vhost1">>), rabbit_ct_helpers:testcase_finished(Config, Testcase), @@ -363,7 +388,7 @@ test_successful_connection_with_simple_strings_for_aud_and_scope(Config) -> {_Algo, Token} = generate_valid_token( Config, <<"rabbitmq.configure:*/* rabbitmq.write:*/* rabbitmq.read:*/*">>, - <<"hare rabbitmq">> + [<<"hare">>, <<"rabbitmq">>] ), Conn = open_unmanaged_connection(Config, 0, <<"username">>, Token), {ok, Ch} = amqp_connection:open_channel(Conn), @@ -401,15 +426,124 @@ mqtt(Config) -> {ok, Pub} = emqtt:start_link([{clientid, <<"mqtt-publisher">>} | Opts]), {ok, _} = emqtt:connect(Pub), {ok, _} = emqtt:publish(Pub, Topic, Payload, at_least_once), - receive - {publish, #{client_pid := Sub, - topic := Topic, - payload := Payload}} -> ok + receive {publish, #{client_pid := Sub, + topic := Topic, + payload := Payload}} -> ok after 1000 -> ct:fail("no publish received") end, ok = emqtt:disconnect(Sub), ok = emqtt:disconnect(Pub). +mqtt_expired_token(Config) -> + {_Algo, Token} = generate_expired_token(Config), + Opts = [{port, rabbit_ct_broker_helpers:get_node_config(Config, 0, tcp_port_mqtt)}, + {proto_ver, v5}, + {username, <<"">>}, + {password, Token}], + ClientId = atom_to_binary(?FUNCTION_NAME), + {ok, C} = emqtt:start_link([{clientid, ClientId} | Opts]), + true = unlink(C), + ?assertMatch({error, {bad_username_or_password, _}}, + emqtt:connect(C)). + +mqtt_expirable_token(Config) -> + mqtt_expirable_token0(tcp_port_mqtt, + [], + fun emqtt:connect/1, + Config). + +web_mqtt_expirable_token(Config) -> + mqtt_expirable_token0(tcp_port_web_mqtt, + [{ws_path, "/ws"}], + fun emqtt:ws_connect/1, + Config). + +mqtt_expirable_token0(Port, AdditionalOpts, Connect, Config) -> + Topic = <<"test/topic">>, + Payload = <<"mqtt-test-message">>, + + Seconds = 4, + Millis = Seconds * 1000, + {_Algo, Token} = generate_expirable_token(Config, + [<<"rabbitmq.configure:*/*/*">>, + <<"rabbitmq.write:*/*/*">>, + <<"rabbitmq.read:*/*/*">>], + Seconds), + + Opts = [{port, rabbit_ct_broker_helpers:get_node_config(Config, 0, Port)}, + {proto_ver, v5}, + {username, <<"">>}, + {password, Token}] ++ AdditionalOpts, + {ok, Sub} = emqtt:start_link([{clientid, <<"my subscriber">>} | Opts]), + {ok, _} = Connect(Sub), + {ok, _, [1]} = emqtt:subscribe(Sub, Topic, at_least_once), + {ok, Pub} = emqtt:start_link([{clientid, <<"my publisher">>} | Opts]), + {ok, _} = Connect(Pub), + {ok, _} = emqtt:publish(Pub, Topic, Payload, at_least_once), + receive {publish, #{client_pid := Sub, + topic := Topic, + payload := Payload}} -> ok + after 1000 -> ct:fail("no publish received") + end, + + %% reason code "Maximum connect time" defined in + %% https://docs.oasis-open.org/mqtt/mqtt/v5.0/os/mqtt-v5.0-os.html#_Toc3901208 + ReasonCode = 16#A0, + true = unlink(Sub), + true = unlink(Pub), + + %% In 4 seconds from now, we expect that RabbitMQ disconnects us because our token expired. + receive {disconnected, ReasonCode, _} -> ok + after Millis * 2 -> ct:fail("missing DISCONNECT packet from server") + end, + receive {disconnected, ReasonCode, _} -> ok + after Millis * 2 -> ct:fail("missing DISCONNECT packet from server") + end. + +amqp_expirable_token(Config) -> + {ok, _} = application:ensure_all_started(rabbitmq_amqp_client), + + Seconds = 4, + Millis = Seconds * 1000, + {_Algo, Token} = generate_expirable_token(Config, + [<<"rabbitmq.configure:*/*">>, + <<"rabbitmq.write:*/*">>, + <<"rabbitmq.read:*/*">>], + Seconds), + + %% Send and receive a message via AMQP 1.0. + QName = atom_to_binary(?FUNCTION_NAME), + Address = rabbitmq_amqp_address:queue(QName), + Host = ?config(rmq_hostname, Config), + Port = rabbit_ct_broker_helpers:get_node_config(Config, 0, tcp_port_amqp), + OpnConf = #{address => Host, + port => Port, + container_id => <<"my container">>, + sasl => {plain, <<"">>, Token}}, + {ok, Connection} = amqp10_client:open_connection(OpnConf), + {ok, Session} = amqp10_client:begin_session_sync(Connection), + {ok, LinkPair} = rabbitmq_amqp_client:attach_management_link_pair_sync(Session, <<"my link pair">>), + {ok, _} = rabbitmq_amqp_client:declare_queue(LinkPair, QName, #{}), + {ok, Sender} = amqp10_client:attach_sender_link(Session, <<"my sender">>, Address), + receive {amqp10_event, {link, Sender, credited}} -> ok + after 5000 -> ct:fail({missing_event, ?LINE}) + end, + Body = <<"hey">>, + Msg0 = amqp10_msg:new(<<"tag">>, Body), + ok = amqp10_client:send_msg(Sender, Msg0), + {ok, Receiver} = amqp10_client:attach_receiver_link(Session, <<"my receiver">>, Address), + {ok, Msg} = amqp10_client:get_msg(Receiver), + ?assertEqual([Body], amqp10_msg:body(Msg)), + + %% In 4 seconds from now, we expect that RabbitMQ disconnects us because our token expired. + receive {amqp10_event, + {connection, Connection, + {closed, {unauthorized_access, <<"credential expired">>}}}} -> + ok + after Millis * 2 -> + ct:fail("server did not close our connection") + end. + test_successful_connection_with_complex_claim_as_a_map(Config) -> {_Algo, Token} = generate_valid_token_with_extra_fields( Config, @@ -626,3 +760,8 @@ test_failed_connection_with_non_existent_scope_alias_in_scope_field(Config) -> {_Algo, Token} = generate_valid_token(Config, <<"non-existent alias a8798s7doaisd79">>), ?assertMatch({error, not_allowed}, open_unmanaged_connection(Config, 0, <<"vhost2">>, <<"username">>, Token)). + + +more_than_one_resource_server_id_not_allowed_in_one_token(Config) -> + {_Algo, Token} = generate_valid_token(Config, <<"rmq.configure:*/*">>, [<<"prod">>, <<"dev">>]), + {error, _} = open_unmanaged_connection(Config, 0, <<"username">>, Token). diff --git a/deps/rabbitmq_auth_backend_oauth2/test/unit_SUITE.erl b/deps/rabbitmq_auth_backend_oauth2/test/unit_SUITE.erl index edda0445ffa6..2efc81f0fe98 100644 --- a/deps/rabbitmq_auth_backend_oauth2/test/unit_SUITE.erl +++ b/deps/rabbitmq_auth_backend_oauth2/test/unit_SUITE.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(unit_SUITE). @@ -16,14 +16,11 @@ all() -> [ test_own_scope, - test_validate_payload_resource_server_id_mismatch, test_validate_payload_with_scope_prefix, test_validate_payload, test_validate_payload_without_scope, test_validate_payload_when_verify_aud_false, - test_successful_access_with_a_token, - test_successful_authentication_without_scopes, - test_successful_authorization_without_scopes, + test_unsuccessful_access_without_scopes, test_successful_access_with_a_token_with_variables_in_scopes, test_successful_access_with_a_parsed_token, @@ -31,26 +28,38 @@ all() -> test_unsuccessful_access_with_a_bogus_token, test_restricted_vhost_access_with_a_valid_token, test_insufficient_permissions_in_a_valid_token, - test_command_json, - test_command_pem, - test_username_from, - test_command_pem_no_kid, test_token_expiration, + test_invalid_signature, test_incorrect_kid, test_post_process_token_payload, test_post_process_token_payload_keycloak, test_post_process_payload_rich_auth_request, test_post_process_payload_rich_auth_request_using_regular_expression_with_cluster, - test_post_process_token_payload_complex_claims, - test_successful_access_with_a_token_that_uses_single_scope_alias_in_scope_field, - test_successful_access_with_a_token_that_uses_multiple_scope_aliases_in_scope_field, - test_successful_access_with_a_token_that_uses_single_scope_alias_in_scope_field_and_custom_scope_prefix, test_unsuccessful_access_with_a_token_that_uses_missing_scope_alias_in_scope_field, - test_successful_access_with_a_token_that_uses_single_scope_alias_in_extra_scope_source_field, - test_successful_access_with_a_token_that_uses_multiple_scope_aliases_in_extra_scope_source_field, test_unsuccessful_access_with_a_token_that_uses_missing_scope_alias_in_extra_scope_source_field, - test_default_ssl_options, - test_default_ssl_options_with_cacertfile + test_username_from, + {group, with_rabbitmq_node} + ]. +groups() -> + [ + {with_rabbitmq_node, [], [ + test_command_json, + test_command_pem, + test_command_pem_no_kid + ] + }, + {with_resource_server_id, [], [ + test_successful_access_with_a_token, + test_validate_payload_resource_server_id_mismatch, + test_successful_access_with_a_token_that_uses_single_scope_alias_in_scope_field, + test_successful_access_with_a_token_that_uses_multiple_scope_aliases_in_scope_field, + test_successful_authorization_without_scopes, + test_successful_authentication_without_scopes, + test_successful_access_with_a_token_that_uses_single_scope_alias_in_extra_scope_source_field, + test_successful_access_with_a_token_that_uses_multiple_scope_aliases_in_extra_scope_source_field, + test_post_process_token_payload_complex_claims, + test_successful_access_with_a_token_that_uses_single_scope_alias_in_scope_field_and_custom_scope_prefix + ]} ]. init_per_suite(Config) -> @@ -68,21 +77,45 @@ end_per_suite(Config) -> Env), rabbit_ct_helpers:run_teardown_steps(Config). +init_per_group(with_rabbitmq_node, Config) -> + Config1 = rabbit_ct_helpers:set_config(Config, [ + {rmq_nodename_suffix, signing_key_group}, + {rmq_nodes_count, 1} + ]), + Config2 = rabbit_ct_helpers:merge_app_env( + Config1, {rabbitmq_auth_backend_oauth2, [ + {resource_server_id, <<"rabbitmq">>}, + {key_config, [{default_key, <<"token-key">>}]} + ]}), + rabbit_ct_helpers:run_steps(Config2, rabbit_ct_broker_helpers:setup_steps()); + +init_per_group(with_resource_server_id, Config) -> + application:set_env(rabbitmq_auth_backend_oauth2, resource_server_id, <<"rabbitmq">>), + Config; + +init_per_group(_, Config) -> + Config. + +end_per_group(with_rabbitmq_node, Config) -> + rabbit_ct_helpers:run_steps(Config, rabbit_ct_broker_helpers:teardown_steps()); + +end_per_group(_, Config) -> + application:unset_env(rabbitmq_auth_backend_oauth2, resource_server_id), + Config. + init_per_testcase(test_post_process_token_payload_complex_claims, Config) -> application:set_env(rabbitmq_auth_backend_oauth2, extra_scopes_source, <<"additional_rabbitmq_scopes">>), - application:set_env(rabbitmq_auth_backend_oauth2, resource_server_id, <<"rabbitmq">>), + application:set_env(rabbitmq_auth_backend_oauth2, resource_server_id, <<"rabbitmq-resource">>), Config; init_per_testcase(test_validate_payload_when_verify_aud_false, Config) -> application:set_env(rabbitmq_auth_backend_oauth2, verify_aud, false), - application:set_env(rabbitmq_auth_backend_oauth2, resource_server_id, <<"rabbitmq">>), Config; init_per_testcase(test_post_process_payload_rich_auth_request, Config) -> application:set_env(rabbitmq_auth_backend_oauth2, resource_server_type, <<"rabbitmq-type">>), - application:set_env(rabbitmq_auth_backend_oauth2, resource_server_id, <<"rabbitmq">>), Config; init_per_testcase(test_post_process_payload_rich_auth_request_using_regular_expression_with_cluster, Config) -> @@ -90,10 +123,6 @@ init_per_testcase(test_post_process_payload_rich_auth_request_using_regular_expr application:set_env(rabbitmq_auth_backend_oauth2, resource_server_id, <<"rabbitmq-test">>), Config; -init_per_testcase(test_default_ssl_options_with_cacertfile, Config) -> - application:set_env(rabbitmq_auth_backend_oauth2, key_config, [{ cacertfile, filename:join(["testca", "cacert.pem"]) }] ), - Config; - init_per_testcase(_, Config) -> Config. @@ -102,10 +131,6 @@ end_per_testcase(test_post_process_token_payload_complex_claims, Config) -> application:set_env(rabbitmq_auth_backend_oauth2, resource_server_id, undefined), Config; -end_per_testcase(test_default_ssl_options_with_cacertfile, Config) -> - application:set_env(rabbitmq_auth_backend_oauth2, key_config, undefined), - Config; - end_per_testcase(_, Config) -> Config. @@ -139,8 +164,12 @@ post_process_token_payload(Audience, Scopes) -> Jwk = ?UTIL_MOD:fixture_jwk(), Token = maps:put(<<"aud">>, Audience, ?UTIL_MOD:fixture_token_with_scopes(Scopes)), {_, EncodedToken} = ?UTIL_MOD:sign_token_hs(Token, Jwk), - {true, Payload} = uaa_jwt_jwt:decode_and_verify(Jwk, EncodedToken), - rabbit_auth_backend_oauth2:post_process_payload(Payload). + case rabbit_oauth2_config:find_audience_in_resource_server_ids(Audience) of + {ok, TargetResourceServerId} -> + {true, Payload} = uaa_jwt_jwt:decode_and_verify(TargetResourceServerId, Jwk, EncodedToken), + rabbit_auth_backend_oauth2:post_process_payload(TargetResourceServerId, Payload); + {error, _} = Error -> Error + end. test_post_process_token_payload_keycloak(_) -> Pairs = [ @@ -202,8 +231,8 @@ post_process_payload_with_keycloak_authorization(Authorization) -> Jwk = ?UTIL_MOD:fixture_jwk(), Token = maps:put(<<"authorization">>, Authorization, ?UTIL_MOD:fixture_token_with_scopes([])), {_, EncodedToken} = ?UTIL_MOD:sign_token_hs(Token, Jwk), - {true, Payload} = uaa_jwt_jwt:decode_and_verify(Jwk, EncodedToken), - rabbit_auth_backend_oauth2:post_process_payload(Payload). + {true, Payload} = uaa_jwt_jwt:decode_and_verify(<<"rabbitmq">>, Jwk, EncodedToken), + rabbit_auth_backend_oauth2:post_process_payload(<<"rabbitmq">>, Payload). test_post_process_payload_rich_auth_request_using_regular_expression_with_cluster(_) -> @@ -244,7 +273,7 @@ test_post_process_payload_rich_auth_request_using_regular_expression_with_cluste lists:foreach( fun({Case, Permissions, ExpectedScope}) -> - Payload = post_process_payload_with_rich_auth_request(Permissions), + Payload = post_process_payload_with_rich_auth_request(<<"rabbitmq-test">>, Permissions), ?assertEqual(lists:sort(ExpectedScope), lists:sort(maps:get(<<"scope">>, Payload)), Case) end, Pairs). @@ -542,16 +571,16 @@ test_post_process_payload_rich_auth_request(_) -> lists:foreach( fun({Case, Permissions, ExpectedScope}) -> - Payload = post_process_payload_with_rich_auth_request(Permissions), + Payload = post_process_payload_with_rich_auth_request(<<"rabbitmq">>, Permissions), ?assertEqual(lists:sort(ExpectedScope), lists:sort(maps:get(<<"scope">>, Payload)), Case) end, Pairs). -post_process_payload_with_rich_auth_request(Permissions) -> +post_process_payload_with_rich_auth_request(ResourceServerId, Permissions) -> Jwk = ?UTIL_MOD:fixture_jwk(), Token = maps:put(<<"authorization_details">>, Permissions, ?UTIL_MOD:plain_token_without_scopes_and_aud()), {_, EncodedToken} = ?UTIL_MOD:sign_token_hs(Token, Jwk), - {true, Payload} = uaa_jwt_jwt:decode_and_verify(Jwk, EncodedToken), - rabbit_auth_backend_oauth2:post_process_payload(Payload). + {true, Payload} = uaa_jwt_jwt:decode_and_verify(<<"rabbitmq">>, Jwk, EncodedToken), + rabbit_auth_backend_oauth2:post_process_payload(ResourceServerId, Payload). test_post_process_token_payload_complex_claims(_) -> Pairs = [ @@ -612,22 +641,21 @@ test_post_process_token_payload_complex_claims(_) -> ], lists:foreach( fun({Authorization, ExpectedScope}) -> - Payload = post_process_payload_with_complex_claim_authorization(Authorization), + Payload = post_process_payload_with_complex_claim_authorization(<<"rabbitmq-resource">>, Authorization), ?assertEqual(ExpectedScope, maps:get(<<"scope">>, Payload)) end, Pairs). -post_process_payload_with_complex_claim_authorization(Authorization) -> +post_process_payload_with_complex_claim_authorization(ResourceServerId, Authorization) -> Jwk = ?UTIL_MOD:fixture_jwk(), Token = maps:put(<<"additional_rabbitmq_scopes">>, Authorization, ?UTIL_MOD:fixture_token_with_scopes([])), {_, EncodedToken} = ?UTIL_MOD:sign_token_hs(Token, Jwk), {true, Payload} = uaa_jwt_jwt:decode_and_verify(Jwk, EncodedToken), - rabbit_auth_backend_oauth2:post_process_payload(Payload). + rabbit_auth_backend_oauth2:post_process_payload(ResourceServerId, Payload). test_successful_authentication_without_scopes(_) -> Jwk = ?UTIL_MOD:fixture_jwk(), UaaEnv = [{signing_keys, #{<<"token-key">> => {map, Jwk}}}], application:set_env(rabbitmq_auth_backend_oauth2, key_config, UaaEnv), - application:set_env(rabbitmq_auth_backend_oauth2, resource_server_id, <<"rabbitmq">>), Username = <<"username">>, Token = ?UTIL_MOD:sign_token_hs(?UTIL_MOD:token_with_sub(?UTIL_MOD:fixture_token(), Username), Jwk), @@ -639,7 +667,6 @@ test_successful_authorization_without_scopes(_) -> Jwk = ?UTIL_MOD:fixture_jwk(), UaaEnv = [{signing_keys, #{<<"token-key">> => {map, Jwk}}}], application:set_env(rabbitmq_auth_backend_oauth2, key_config, UaaEnv), - application:set_env(rabbitmq_auth_backend_oauth2, resource_server_id, <<"rabbitmq">>), Username = <<"username">>, Token = ?UTIL_MOD:sign_token_hs(?UTIL_MOD:token_with_sub(?UTIL_MOD:fixture_token(), Username), Jwk), @@ -654,7 +681,6 @@ test_successful_access_with_a_token(_) -> Jwk = ?UTIL_MOD:fixture_jwk(), UaaEnv = [{signing_keys, #{<<"token-key">> => {map, Jwk}}}], application:set_env(rabbitmq_auth_backend_oauth2, key_config, UaaEnv), - application:set_env(rabbitmq_auth_backend_oauth2, resource_server_id, <<"rabbitmq">>), VHost = <<"vhost">>, Username = <<"username">>, @@ -662,8 +688,8 @@ test_successful_access_with_a_token(_) -> {ok, #auth_user{username = Username} = User} = rabbit_auth_backend_oauth2:user_login_authentication(Username, [{password, Token}]), - {ok, #auth_user{username = Username} = User} = - rabbit_auth_backend_oauth2:user_login_authentication(Username, #{password => Token}), +% {ok, #auth_user{username = Username} = User} = +% rabbit_auth_backend_oauth2:user_login_authentication(Username, #{password => Token}), ?assertEqual(true, rabbit_auth_backend_oauth2:check_vhost_access(User, <<"vhost">>, none)), assert_resource_access_granted(User, VHost, <<"foo">>, configure), @@ -680,7 +706,6 @@ test_successful_access_with_a_token_with_variables_in_scopes(_) -> Jwk = ?UTIL_MOD:fixture_jwk(), UaaEnv = [{signing_keys, #{<<"token-key">> => {map, Jwk}}}], application:set_env(rabbitmq_auth_backend_oauth2, key_config, UaaEnv), - application:set_env(rabbitmq_auth_backend_oauth2, resource_server_id, <<"rabbitmq">>), VHost = <<"my-vhost">>, Username = <<"username">>, @@ -696,7 +721,6 @@ test_successful_access_with_a_parsed_token(_) -> Jwk = ?UTIL_MOD:fixture_jwk(), UaaEnv = [{signing_keys, #{<<"token-key">> => {map, Jwk}}}], application:set_env(rabbitmq_auth_backend_oauth2, key_config, UaaEnv), - application:set_env(rabbitmq_auth_backend_oauth2, resource_server_id, <<"rabbitmq">>), Username = <<"username">>, Token = ?UTIL_MOD:sign_token_hs(?UTIL_MOD:token_with_sub(?UTIL_MOD:fixture_token(), Username), Jwk), @@ -711,7 +735,6 @@ test_successful_access_with_a_token_that_has_tag_scopes(_) -> Jwk = ?UTIL_MOD:fixture_jwk(), UaaEnv = [{signing_keys, #{<<"token-key">> => {map, Jwk}}}], application:set_env(rabbitmq_auth_backend_oauth2, key_config, UaaEnv), - application:set_env(rabbitmq_auth_backend_oauth2, resource_server_id, <<"rabbitmq">>), Username = <<"username">>, Token = ?UTIL_MOD:sign_token_hs(?UTIL_MOD:token_with_sub(?UTIL_MOD:fixture_token( [<<"rabbitmq.tag:management">>, <<"rabbitmq.tag:policymaker">>]), Username), Jwk), @@ -723,7 +746,6 @@ test_successful_access_with_a_token_that_uses_single_scope_alias_in_scope_field( Jwk = ?UTIL_MOD:fixture_jwk(), UaaEnv = [{signing_keys, #{<<"token-key">> => {map, Jwk}}}], application:set_env(rabbitmq_auth_backend_oauth2, key_config, UaaEnv), - application:set_env(rabbitmq_auth_backend_oauth2, resource_server_id, <<"rabbitmq">>), Alias = <<"client-alias-1">>, application:set_env(rabbitmq_auth_backend_oauth2, scope_aliases, #{ Alias => [ @@ -742,7 +764,7 @@ test_successful_access_with_a_token_that_uses_single_scope_alias_in_scope_field( Token = ?UTIL_MOD:sign_token_hs(?UTIL_MOD:token_with_sub( ?UTIL_MOD:token_with_scope_alias_in_scope_field(Alias), Username), Jwk), - {ok, #auth_user{username = Username, tags = [custom, management]} = AuthUser} = + {ok, #auth_user{username = Username} = AuthUser} = rabbit_auth_backend_oauth2:user_login_authentication(Username, [{password, Token}]), assert_vhost_access_granted(AuthUser, VHost), assert_vhost_access_denied(AuthUser, <<"some-other-vhost">>), @@ -764,7 +786,6 @@ test_successful_access_with_a_token_that_uses_single_scope_alias_in_scope_field_ Jwk = ?UTIL_MOD:fixture_jwk(), UaaEnv = [{signing_keys, #{<<"token-key">> => {map, Jwk}}}], application:set_env(rabbitmq_auth_backend_oauth2, key_config, UaaEnv), - application:set_env(rabbitmq_auth_backend_oauth2, resource_server_id, <<"rabbitmq">>), application:set_env(rabbitmq_auth_backend_oauth2, scope_prefix, <<>>), Alias = <<"client-alias-1">>, application:set_env(rabbitmq_auth_backend_oauth2, scope_aliases, #{ @@ -784,7 +805,7 @@ test_successful_access_with_a_token_that_uses_single_scope_alias_in_scope_field_ Token = ?UTIL_MOD:sign_token_hs(?UTIL_MOD:token_with_sub( ?UTIL_MOD:token_with_scope_alias_in_scope_field(Alias), Username), Jwk), - {ok, #auth_user{username = Username, tags = [custom, management]} = AuthUser} = + {ok, #auth_user{username = Username} = AuthUser} = rabbit_auth_backend_oauth2:user_login_authentication(Username, [{password, Token}]), assert_vhost_access_granted(AuthUser, VHost), assert_vhost_access_denied(AuthUser, <<"some-other-vhost">>), @@ -806,7 +827,6 @@ test_successful_access_with_a_token_that_uses_multiple_scope_aliases_in_scope_fi Jwk = ?UTIL_MOD:fixture_jwk(), UaaEnv = [{signing_keys, #{<<"token-key">> => {map, Jwk}}}], application:set_env(rabbitmq_auth_backend_oauth2, key_config, UaaEnv), - application:set_env(rabbitmq_auth_backend_oauth2, resource_server_id, <<"rabbitmq">>), Role1 = <<"client-aliases-1">>, Role2 = <<"client-aliases-2">>, Role3 = <<"client-aliases-3">>, @@ -831,7 +851,7 @@ test_successful_access_with_a_token_that_uses_multiple_scope_aliases_in_scope_fi Token = ?UTIL_MOD:sign_token_hs(?UTIL_MOD:token_with_sub( ?UTIL_MOD:token_with_scope_alias_in_scope_field([Role1, Role2, Role3]), Username), Jwk), - {ok, #auth_user{username = Username, tags = [custom, management]} = AuthUser} = + {ok, #auth_user{username = Username} = AuthUser} = rabbit_auth_backend_oauth2:user_login_authentication(Username, [{password, Token}]), assert_vhost_access_granted(AuthUser, VHost), assert_vhost_access_denied(AuthUser, <<"some-other-vhost">>), @@ -890,7 +910,6 @@ test_successful_access_with_a_token_that_uses_single_scope_alias_in_extra_scope_ UaaEnv = [{signing_keys, #{<<"token-key">> => {map, Jwk}}}], application:set_env(rabbitmq_auth_backend_oauth2, key_config, UaaEnv), application:set_env(rabbitmq_auth_backend_oauth2, extra_scopes_source, <<"claims">>), - application:set_env(rabbitmq_auth_backend_oauth2, resource_server_id, <<"rabbitmq">>), Alias = <<"client-alias-1">>, application:set_env(rabbitmq_auth_backend_oauth2, scope_aliases, #{ Alias => [ @@ -928,7 +947,6 @@ test_successful_access_with_a_token_that_uses_multiple_scope_aliases_in_extra_sc UaaEnv = [{signing_keys, #{<<"token-key">> => {map, Jwk}}}], application:set_env(rabbitmq_auth_backend_oauth2, key_config, UaaEnv), application:set_env(rabbitmq_auth_backend_oauth2, extra_scopes_source, <<"claims">>), - application:set_env(rabbitmq_auth_backend_oauth2, resource_server_id, <<"rabbitmq">>), Role1 = <<"client-aliases-1">>, Role2 = <<"client-aliases-2">>, Role3 = <<"client-aliases-3">>, @@ -1027,10 +1045,9 @@ test_unsuccessful_access_without_scopes(_) -> UaaEnv = [{signing_keys, #{<<"token-key">> => {map, Jwk}}}], application:set_env(rabbitmq_auth_backend_oauth2, key_config, UaaEnv), - {ok, #auth_user{username = Username, tags = [], impl = CredentialsFun } = AuthUser} = + {ok, #auth_user{username = Username, tags = [], impl = _CredentialsFun } = AuthUser} = rabbit_auth_backend_oauth2:user_login_authentication(Username, [{password, Token}]), - ct:log("authuser ~p ~p ", [AuthUser, CredentialsFun()]), assert_vhost_access_denied(AuthUser, <<"vhost">>). test_restricted_vhost_access_with_a_valid_token(_) -> @@ -1067,6 +1084,18 @@ test_insufficient_permissions_in_a_valid_token(_) -> assert_resource_access_denied(User, VHost, <<"bar">>, write), assert_topic_access_refused(User, VHost, <<"bar">>, read, #{routing_key => <<"foo/#">>}). +test_invalid_signature(_) -> + Username = <<"username">>, + Jwk = ?UTIL_MOD:fixture_jwk(), + WrongJwk = ?UTIL_MOD:fixture_jwk("wrong", <<"GawgguFyGrWKav7AX4VKUg">>), + UaaEnv = [{signing_keys, #{<<"token-key">> => {map, WrongJwk}}}], + application:set_env(rabbitmq_auth_backend_oauth2, key_config, UaaEnv), + application:set_env(rabbitmq_auth_backend_oauth2, resource_server_id, <<"rabbitmq">>), + TokenData = ?UTIL_MOD:token_with_sub(?UTIL_MOD:expirable_token(), Username), + Token = ?UTIL_MOD:sign_token_hs(TokenData, Jwk), + ?assertMatch({refused, _, [signature_invalid]}, + rabbit_auth_backend_oauth2:user_login_authentication(Username, [{password, Token}])). + test_token_expiration(_) -> VHost = <<"vhost">>, Username = <<"username">>, @@ -1081,6 +1110,10 @@ test_token_expiration(_) -> assert_resource_access_granted(User, VHost, <<"foo">>, configure), assert_resource_access_granted(User, VHost, <<"foo">>, write), + Now = os:system_time(seconds), + ExpiryTs = rabbit_auth_backend_oauth2:expiry_timestamp(User), + ?assert(ExpiryTs > (Now - 10)), + ?assert(ExpiryTs < (Now + 10)), ?UTIL_MOD:wait_for_token_to_expire(), #{<<"exp">> := Exp} = TokenData, @@ -1094,50 +1127,30 @@ test_incorrect_kid(_) -> AltKid = <<"other-token-key">>, Username = <<"username">>, Jwk = ?UTIL_MOD:fixture_jwk(), - Jwk1 = Jwk#{<<"kid">> := AltKid}, application:set_env(rabbitmq_auth_backend_oauth2, resource_server_id, <<"rabbitmq">>), - Token = ?UTIL_MOD:sign_token_hs(?UTIL_MOD:token_with_sub(?UTIL_MOD:fixture_token(), Username), Jwk1), - - ?assertMatch({refused, "Authentication using an OAuth 2/JWT token failed: ~tp", [{error,key_not_found}]}, + Token = ?UTIL_MOD:sign_token_hs(?UTIL_MOD:token_with_sub(?UTIL_MOD:fixture_token(), Username), Jwk, AltKid), + ?assertMatch({refused, "Authentication using an OAuth 2/JWT token failed: ~tp", [{error,{missing_oauth_provider_attributes, [issuer]}}]}, rabbit_auth_backend_oauth2:user_login_authentication(Username, #{password => Token})). -test_command_json(_) -> +login_and_check_vhost_access(Username, Token, Vhost) -> + {ok, #auth_user{username = Username} = User} = + rabbit_auth_backend_oauth2:user_login_authentication(Username, #{password => Token}), + + ?assertEqual(true, rabbit_auth_backend_oauth2:check_vhost_access(User, <<"vhost">>, Vhost)). + +test_command_json(Config) -> Username = <<"username">>, Jwk = ?UTIL_MOD:fixture_jwk(), Json = rabbit_json:encode(Jwk), + 'Elixir.RabbitMQ.CLI.Ctl.Commands.AddUaaKeyCommand':run( [<<"token-key">>], - #{node => node(), json => Json}), - application:set_env(rabbitmq_auth_backend_oauth2, resource_server_id, <<"rabbitmq">>), + #{node => rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename), json => Json}), Token = ?UTIL_MOD:sign_token_hs(?UTIL_MOD:token_with_sub(?UTIL_MOD:fixture_token(), Username), Jwk), - {ok, #auth_user{username = Username} = User} = - rabbit_auth_backend_oauth2:user_login_authentication(Username, #{password => Token}), - - ?assertEqual(true, rabbit_auth_backend_oauth2:check_vhost_access(User, <<"vhost">>, none)). + rabbit_ct_broker_helpers:rpc(Config, 0, unit_SUITE, login_and_check_vhost_access, [Username, Token, none]). test_username_from(_) -> Pairs = [ - { <<"resolved username from DEFAULT_PREFERRED_USERNAME_CLAIMS 'sub' ">>, % Comment - [ ], % Given this configure preferred_username_claims - #{ % When we test this Token - <<"sub">> => <<"rabbit_user">> - }, - <<"rabbit_user">> % We expect username to be this one - }, - { <<"resolved username from DEFAULT_PREFERRED_USERNAME_CLAIMS when there are no preferred_username_claims">>, % Comment - <<>>, % Given this configure preferred_username_claims - #{ % When we test this Token - <<"sub">> => <<"rabbit_user">> - }, - <<"rabbit_user">> % We expect username to be this one - }, - { <<"resolved username from DEFAULT_PREFERRED_USERNAME_CLAIMS 'client_id' ">>, % Comment - [ ], % Given this configure preferred_username_claims - #{ % When we test this Token - <<"client_id">> => <<"rabbit_user">> - }, - <<"rabbit_user">> % We expect username to be this one - }, { <<"resolve username from 1st claim in the array of configured claims ">>, [<<"user_name">>, <<"email">>], #{ @@ -1154,7 +1167,7 @@ test_username_from(_) -> <<"rabbit_user">> }, { <<"resolve username from configured string claim ">>, - <<"email">>, + [<<"email">>], #{ <<"email">> => <<"rabbit_user">> }, @@ -1178,7 +1191,6 @@ test_username_from(_) -> test_command_pem_file(Config) -> Username = <<"username">>, - application:set_env(rabbitmq_auth_backend_oauth2, resource_server_id, <<"rabbitmq">>), CertsDir = ?config(rmq_certsdir, Config), Keyfile = filename:join([CertsDir, "client", "key.pem"]), Jwk = jose_jwk:from_pem_file(Keyfile), @@ -1189,45 +1201,14 @@ test_command_pem_file(Config) -> 'Elixir.RabbitMQ.CLI.Ctl.Commands.AddUaaKeyCommand':run( [<<"token-key">>], - #{node => node(), pem_file => PublicKeyFile}), + #{node => rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename), pem_file => PublicKeyFile}), Token = ?UTIL_MOD:sign_token_rsa(?UTIL_MOD:fixture_token(), Jwk, <<"token-key">>), - {ok, #auth_user{username = Username} = User} = - rabbit_auth_backend_oauth2:user_login_authentication(Username, #{password => Token}), + rabbit_ct_broker_helpers:rpc(Config, 0, unit_SUITE, login_and_check_vhost_access, [Username, Token, none]). - ?assertEqual(true, rabbit_auth_backend_oauth2:check_vhost_access(User, <<"vhost">>, none)). - - -test_command_pem_file_no_kid(Config) -> - Username = <<"username">>, - application:set_env(rabbitmq_auth_backend_oauth2, resource_server_id, <<"rabbitmq">>), - CertsDir = ?config(rmq_certsdir, Config), - Keyfile = filename:join([CertsDir, "client", "key.pem"]), - Jwk = jose_jwk:from_pem_file(Keyfile), - - PublicJwk = jose_jwk:to_public(Jwk), - PublicKeyFile = filename:join([CertsDir, "client", "public.pem"]), - jose_jwk:to_pem_file(PublicKeyFile, PublicJwk), - - 'Elixir.RabbitMQ.CLI.Ctl.Commands.AddUaaKeyCommand':run( - [<<"token-key">>], - #{node => node(), pem_file => PublicKeyFile}), - - %% Set default key - {ok, UaaEnv0} = application:get_env(rabbitmq_auth_backend_oauth2, key_config), - UaaEnv1 = proplists:delete(default_key, UaaEnv0), - UaaEnv2 = [{default_key, <<"token-key">>} | UaaEnv1], - application:set_env(rabbitmq_auth_backend_oauth2, key_config, UaaEnv2), - - Token = ?UTIL_MOD:sign_token_no_kid(?UTIL_MOD:fixture_token(), Jwk), - {ok, #auth_user{username = Username} = User} = - rabbit_auth_backend_oauth2:user_login_authentication(Username, #{password => Token}), - - ?assertEqual(true, rabbit_auth_backend_oauth2:check_vhost_access(User, <<"vhost">>, none)). test_command_pem(Config) -> Username = <<"username">>, - application:set_env(rabbitmq_auth_backend_oauth2, resource_server_id, <<"rabbitmq">>), CertsDir = ?config(rmq_certsdir, Config), Keyfile = filename:join([CertsDir, "client", "key.pem"]), Jwk = jose_jwk:from_pem_file(Keyfile), @@ -1236,18 +1217,13 @@ test_command_pem(Config) -> 'Elixir.RabbitMQ.CLI.Ctl.Commands.AddUaaKeyCommand':run( [<<"token-key">>], - #{node => node(), pem => Pem}), + #{node => rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename), pem => Pem}), Token = ?UTIL_MOD:sign_token_rsa(?UTIL_MOD:token_with_sub(?UTIL_MOD:fixture_token(), Username), Jwk, <<"token-key">>), - {ok, #auth_user{username = Username} = User} = - rabbit_auth_backend_oauth2:user_login_authentication(Username, #{password => Token}), - - ?assertEqual(true, rabbit_auth_backend_oauth2:check_vhost_access(User, <<"vhost">>, none)). - + rabbit_ct_broker_helpers:rpc(Config, 0, unit_SUITE, login_and_check_vhost_access, [Username, Token, none]). test_command_pem_no_kid(Config) -> Username = <<"username">>, - application:set_env(rabbitmq_auth_backend_oauth2, resource_server_id, <<"rabbitmq">>), CertsDir = ?config(rmq_certsdir, Config), Keyfile = filename:join([CertsDir, "client", "key.pem"]), Jwk = jose_jwk:from_pem_file(Keyfile), @@ -1256,19 +1232,10 @@ test_command_pem_no_kid(Config) -> 'Elixir.RabbitMQ.CLI.Ctl.Commands.AddUaaKeyCommand':run( [<<"token-key">>], - #{node => node(), pem => Pem}), - - %% This is the default key - {ok, UaaEnv0} = application:get_env(rabbitmq_auth_backend_oauth2, key_config), - UaaEnv1 = proplists:delete(default_key, UaaEnv0), - UaaEnv2 = [{default_key, <<"token-key">>} | UaaEnv1], - application:set_env(rabbitmq_auth_backend_oauth2, key_config, UaaEnv2), + #{node => rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename), pem => Pem}), Token = ?UTIL_MOD:sign_token_no_kid(?UTIL_MOD:token_with_sub(?UTIL_MOD:fixture_token(), Username), Jwk), - {ok, #auth_user{username = Username} = User} = - rabbit_auth_backend_oauth2:user_login_authentication(Username, #{password => Token}), - - ?assertEqual(true, rabbit_auth_backend_oauth2:check_vhost_access(User, <<"vhost">>, none)). + rabbit_ct_broker_helpers:rpc(Config, 0, unit_SUITE, login_and_check_vhost_access, [Username, Token, none]). test_own_scope(_) -> @@ -1296,10 +1263,10 @@ test_validate_payload_resource_server_id_mismatch(_) -> ?assertEqual({refused, {invalid_aud, {resource_id_not_found_in_aud, ?RESOURCE_SERVER_ID, [<<"foo">>,<<"bar">>]}}}, - rabbit_auth_backend_oauth2:validate_payload(NoKnownResourceServerId, ?RESOURCE_SERVER_ID, ?DEFAULT_SCOPE_PREFIX)), + rabbit_auth_backend_oauth2:validate_payload(?RESOURCE_SERVER_ID, NoKnownResourceServerId, ?DEFAULT_SCOPE_PREFIX)), ?assertEqual({refused, {invalid_aud, {resource_id_not_found_in_aud, ?RESOURCE_SERVER_ID, []}}}, - rabbit_auth_backend_oauth2:validate_payload(EmptyAud, ?RESOURCE_SERVER_ID, ?DEFAULT_SCOPE_PREFIX)). + rabbit_auth_backend_oauth2:validate_payload(?RESOURCE_SERVER_ID, EmptyAud, ?DEFAULT_SCOPE_PREFIX)). test_validate_payload_with_scope_prefix(_) -> Scenarios = [ { <<>>, @@ -1317,7 +1284,7 @@ test_validate_payload_with_scope_prefix(_) -> lists:map(fun({ ScopePrefix, Token, ExpectedScopes}) -> ?assertEqual({ok, #{<<"aud">> => [?RESOURCE_SERVER_ID], <<"scope">> => ExpectedScopes } }, - rabbit_auth_backend_oauth2:validate_payload(Token, ?RESOURCE_SERVER_ID, ScopePrefix)) + rabbit_auth_backend_oauth2:validate_payload(?RESOURCE_SERVER_ID, Token, ScopePrefix)) end , Scenarios). @@ -1328,13 +1295,13 @@ test_validate_payload(_) -> <<"foobar">>, <<"rabbitmq.other.third">>]}, ?assertEqual({ok, #{<<"aud">> => [?RESOURCE_SERVER_ID], <<"scope">> => [<<"bar">>, <<"other.third">>]}}, - rabbit_auth_backend_oauth2:validate_payload(KnownResourceServerId, ?RESOURCE_SERVER_ID, ?DEFAULT_SCOPE_PREFIX)). + rabbit_auth_backend_oauth2:validate_payload(?RESOURCE_SERVER_ID, KnownResourceServerId, ?DEFAULT_SCOPE_PREFIX)). test_validate_payload_without_scope(_) -> KnownResourceServerId = #{<<"aud">> => [?RESOURCE_SERVER_ID] }, ?assertEqual({ok, #{<<"aud">> => [?RESOURCE_SERVER_ID] }}, - rabbit_auth_backend_oauth2:validate_payload(KnownResourceServerId, ?RESOURCE_SERVER_ID, ?DEFAULT_SCOPE_PREFIX)). + rabbit_auth_backend_oauth2:validate_payload(?RESOURCE_SERVER_ID, KnownResourceServerId, ?DEFAULT_SCOPE_PREFIX)). test_validate_payload_when_verify_aud_false(_) -> WithoutAud = #{ @@ -1343,7 +1310,7 @@ test_validate_payload_when_verify_aud_false(_) -> <<"foobar">>, <<"rabbitmq.other.third">>]}, ?assertEqual({ok, #{ <<"scope">> => [<<"bar">>, <<"other.third">>]}}, - rabbit_auth_backend_oauth2:validate_payload(WithoutAud, ?RESOURCE_SERVER_ID, ?DEFAULT_SCOPE_PREFIX)), + rabbit_auth_backend_oauth2:validate_payload(?RESOURCE_SERVER_ID, WithoutAud, ?DEFAULT_SCOPE_PREFIX)), WithAudWithUnknownResourceId = #{ <<"aud">> => [<<"unknown">>], @@ -1352,26 +1319,7 @@ test_validate_payload_when_verify_aud_false(_) -> <<"foobar">>, <<"rabbitmq.other.third">>]}, ?assertEqual({ok, #{<<"aud">> => [<<"unknown">>], <<"scope">> => [<<"bar">>, <<"other.third">>]}}, - rabbit_auth_backend_oauth2:validate_payload(WithAudWithUnknownResourceId, ?RESOURCE_SERVER_ID, ?DEFAULT_SCOPE_PREFIX)). - -test_default_ssl_options(_) -> - ?assertEqual([ - {verify, verify_none}, - {depth, 10}, - {fail_if_no_peer_cert, false}, - {crl_check, false}, - {crl_cache, {ssl_crl_cache, {internal, [{http, 10000}]}}} - ], uaa_jwks:ssl_options()). - -test_default_ssl_options_with_cacertfile(_) -> - ?assertEqual([ - {verify, verify_none}, - {depth, 10}, - {fail_if_no_peer_cert, false}, - {crl_check, false}, - {crl_cache, {ssl_crl_cache, {internal, [{http, 10000}]}}}, - {cacertfile, filename:join(["testca", "cacert.pem"])} - ], uaa_jwks:ssl_options()). + rabbit_auth_backend_oauth2:validate_payload(?RESOURCE_SERVER_ID, WithAudWithUnknownResourceId, ?DEFAULT_SCOPE_PREFIX)). %% %% Helpers diff --git a/deps/rabbitmq_auth_backend_oauth2/test/wildcard_match_SUITE.erl b/deps/rabbitmq_auth_backend_oauth2/test/wildcard_match_SUITE.erl index bfce800cf219..aa26857ce6cc 100644 --- a/deps/rabbitmq_auth_backend_oauth2/test/wildcard_match_SUITE.erl +++ b/deps/rabbitmq_auth_backend_oauth2/test/wildcard_match_SUITE.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(wildcard_match_SUITE). diff --git a/deps/rabbitmq_auth_mechanism_ssl/.gitignore b/deps/rabbitmq_auth_mechanism_ssl/.gitignore deleted file mode 100644 index 2e68166f1c84..000000000000 --- a/deps/rabbitmq_auth_mechanism_ssl/.gitignore +++ /dev/null @@ -1,17 +0,0 @@ -.sw? -.*.sw? -*.beam -/.erlang.mk/ -/cover/ -/deps/ -/doc/ -/ebin/ -/escript/ -/escript.lock -/logs/ -/plugins/ -/plugins.lock -/sbin/ -/sbin.lock - -/rabbitmq_auth_mechanism_ssl.d diff --git a/deps/rabbitmq_auth_mechanism_ssl/BUILD.bazel b/deps/rabbitmq_auth_mechanism_ssl/BUILD.bazel index 778774f9e63b..6127cccd64ec 100644 --- a/deps/rabbitmq_auth_mechanism_ssl/BUILD.bazel +++ b/deps/rabbitmq_auth_mechanism_ssl/BUILD.bazel @@ -1,17 +1,22 @@ +load("@rules_erlang//:eunit2.bzl", "eunit") load("@rules_erlang//:xref2.bzl", "xref") load("@rules_erlang//:dialyze.bzl", "dialyze", "plt") +load("//:rabbitmq_home.bzl", "rabbitmq_home") +load("//:rabbitmq_run.bzl", "rabbitmq_run") load( "//:rabbitmq.bzl", "BROKER_VERSION_REQUIREMENTS_ANY", "RABBITMQ_DIALYZER_OPTS", "assert_suites", "rabbitmq_app", + "rabbitmq_integration_suite", ) load( ":app.bzl", "all_beam_files", "all_srcs", "all_test_beam_files", + "test_suite_beam_files", ) APP_NAME = "rabbitmq_auth_mechanism_ssl" @@ -26,7 +31,7 @@ APP_ENV = """[ all_beam_files(name = "all_beam_files") -all_test_beam_files() +all_test_beam_files(name = "all_test_beam_files") all_srcs(name = "all_srcs") @@ -70,6 +75,28 @@ dialyze( target = ":erlang_app", ) +rabbitmq_home( + name = "broker-for-tests-home", + testonly = True, + plugins = [ + ":test_erlang_app", + ], +) + +rabbitmq_run( + name = "rabbitmq-for-tests-run", + testonly = True, + home = ":broker-for-tests-home", +) + +rabbitmq_integration_suite( + name = "system_SUITE", + shard_count = 1, + runtime_deps = [ + "//deps/amqp10_client:erlang_app", + ], +) + assert_suites() alias( @@ -77,3 +104,10 @@ alias( actual = ":erlang_app", visibility = ["//visibility:public"], ) + +test_suite_beam_files(name = "test_suite_beam_files") + +eunit( + name = "eunit", + target = ":test_erlang_app", +) diff --git a/deps/rabbitmq_auth_mechanism_ssl/Makefile b/deps/rabbitmq_auth_mechanism_ssl/Makefile index 9b540fdaf716..f6705d7c3a6a 100644 --- a/deps/rabbitmq_auth_mechanism_ssl/Makefile +++ b/deps/rabbitmq_auth_mechanism_ssl/Makefile @@ -14,6 +14,7 @@ endef LOCAL_DEPS = public_key DEPS = rabbit_common rabbit +TEST_DEPS = rabbitmq_ct_helpers rabbitmq_ct_client_helpers amqp10_client DEP_EARLY_PLUGINS = rabbit_common/mk/rabbitmq-early-plugin.mk DEP_PLUGINS = rabbit_common/mk/rabbitmq-plugin.mk diff --git a/deps/rabbitmq_auth_mechanism_ssl/README.md b/deps/rabbitmq_auth_mechanism_ssl/README.md index 96c4a5dd36f8..68aff0e462c0 100644 --- a/deps/rabbitmq_auth_mechanism_ssl/README.md +++ b/deps/rabbitmq_auth_mechanism_ssl/README.md @@ -18,7 +18,7 @@ present a client certificate. ## Usage This mechanism must also be enabled in RabbitMQ's configuration file, -see [Authentication Mechanisms](https://www.rabbitmq.com/authentication.html) and +see [Authentication Mechanisms](https://www.rabbitmq.com/docs/access-control/) and [Configuration](https://www.rabbitmq.com/configure.html) guides for more details. @@ -26,8 +26,8 @@ A couple of examples: ``` ini auth_mechanisms.1 = PLAIN -auth_mechanisms.1 = AMQPLAIN -auth_mechanisms.1 = EXTERNAL +auth_mechanisms.2 = AMQPLAIN +auth_mechanisms.3 = EXTERNAL ``` to allow this mechanism in addition to the defaults, or: @@ -107,6 +107,6 @@ backends if so configured. ## Copyright & License -(c) 2007-2023 VMware, Inc. or its affiliates. +(c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. Released under the same license as RabbitMQ. diff --git a/deps/rabbitmq_auth_mechanism_ssl/app.bzl b/deps/rabbitmq_auth_mechanism_ssl/app.bzl index 6a95279a2cff..335857be922e 100644 --- a/deps/rabbitmq_auth_mechanism_ssl/app.bzl +++ b/deps/rabbitmq_auth_mechanism_ssl/app.bzl @@ -75,4 +75,11 @@ def all_test_beam_files(name = "all_test_beam_files"): ) def test_suite_beam_files(name = "test_suite_beam_files"): - pass + erlang_bytecode( + name = "system_SUITE_beam_files", + testonly = True, + srcs = ["test/system_SUITE.erl"], + outs = ["test/system_SUITE.beam"], + app_name = "rabbitmq_auth_mechanism_ssl", + erlc_opts = "//:test_erlc_opts", + ) diff --git a/deps/rabbitmq_auth_mechanism_ssl/src/rabbit_auth_mechanism_ssl.erl b/deps/rabbitmq_auth_mechanism_ssl/src/rabbit_auth_mechanism_ssl.erl index 61b2aa2fedb5..11a7e79ee700 100644 --- a/deps/rabbitmq_auth_mechanism_ssl/src/rabbit_auth_mechanism_ssl.erl +++ b/deps/rabbitmq_auth_mechanism_ssl/src/rabbit_auth_mechanism_ssl.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% @@ -12,7 +12,6 @@ -export([description/0, should_offer/1, init/1, handle_response/2]). --include_lib("rabbit_common/include/rabbit.hrl"). -include_lib("public_key/include/public_key.hrl"). -rabbit_boot_step({?MODULE, @@ -24,7 +23,9 @@ {cleanup, {rabbit_registry, unregister, [auth_mechanism, <<"EXTERNAL">>]}}]}). --record(state, {username = undefined}). +-record(state, { + username = undefined :: undefined | rabbit_types:username() | {refused, none, string(), [term()]} + }). description() -> [{description, <<"TLS peer verification-based authentication plugin. Used in combination with the EXTERNAL SASL mechanism.">>}]. diff --git a/deps/rabbitmq_auth_mechanism_ssl/src/rabbit_auth_mechanism_ssl_app.erl b/deps/rabbitmq_auth_mechanism_ssl/src/rabbit_auth_mechanism_ssl_app.erl index 393634793a6f..71104f438ff1 100644 --- a/deps/rabbitmq_auth_mechanism_ssl/src/rabbit_auth_mechanism_ssl_app.erl +++ b/deps/rabbitmq_auth_mechanism_ssl/src/rabbit_auth_mechanism_ssl_app.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_auth_mechanism_ssl_app). diff --git a/deps/rabbitmq_auth_mechanism_ssl/test/system_SUITE.erl b/deps/rabbitmq_auth_mechanism_ssl/test/system_SUITE.erl new file mode 100644 index 000000000000..402704fbfe89 --- /dev/null +++ b/deps/rabbitmq_auth_mechanism_ssl/test/system_SUITE.erl @@ -0,0 +1,124 @@ +%% This Source Code Form is subject to the terms of the Mozilla Public +%% License, v. 2.0. If a copy of the MPL was not distributed with this +%% file, You can obtain one at https://mozilla.org/MPL/2.0/. +%% +%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. + +-module(system_SUITE). + +-compile([export_all, + nowarn_export_all]). + +-include_lib("common_test/include/ct.hrl"). +-include_lib("eunit/include/eunit.hrl"). + +all() -> + [{group, external_enforced}]. + +groups() -> + [ + {external_enforced, [shuffle], + [external_succeeds, + anonymous_fails] + } + ]. + +init_per_suite(Config) -> + {ok, _} = application:ensure_all_started(amqp10_client), + rabbit_ct_helpers:log_environment(), + Config. + +end_per_suite(Config) -> + Config. + +init_per_group(_Group, Config0) -> + %% Command `deps/rabbitmq_ct_helpers/tools/tls-certs$ make` + %% will put our hostname as common name in the client cert. + Config1 = rabbit_ct_helpers:merge_app_env( + Config0, + {rabbit, + [ + %% Enforce EXTERNAL disallowing other mechanisms. + {auth_mechanisms, ['EXTERNAL']}, + {ssl_cert_login_from, common_name} + ]}), + Config = rabbit_ct_helpers:run_setup_steps( + Config1, + rabbit_ct_broker_helpers:setup_steps() ++ + rabbit_ct_client_helpers:setup_steps()), + {ok, UserString} = inet:gethostname(), + User = unicode:characters_to_binary(UserString), + ok = rabbit_ct_broker_helpers:add_user(Config, User), + Vhost = <<"test vhost">>, + ok = rabbit_ct_broker_helpers:add_vhost(Config, Vhost), + [{test_vhost, Vhost}, + {test_user, User}] ++ Config. + +end_per_group(_Group, Config) -> + ok = rabbit_ct_broker_helpers:delete_user(Config, ?config(test_user, Config)), + ok = rabbit_ct_broker_helpers:delete_vhost(Config, ?config(test_vhost, Config)), + rabbit_ct_helpers:run_teardown_steps( + Config, + rabbit_ct_client_helpers:teardown_steps() ++ + rabbit_ct_broker_helpers:teardown_steps()). + +init_per_testcase(Testcase, Config) -> + ok = set_permissions(Config, <<>>, <<>>, <<"^some vhost permission">>), + rabbit_ct_helpers:testcase_started(Config, Testcase). + +end_per_testcase(Testcase, Config) -> + ok = clear_permissions(Config), + rabbit_ct_helpers:testcase_finished(Config, Testcase). + +external_succeeds(Config) -> + Port = rabbit_ct_broker_helpers:get_node_config(Config, 0, tcp_port_amqp_tls), + Host = ?config(rmq_hostname, Config), + Vhost = ?config(test_vhost, Config), + CACertFile = ?config(rmq_certsdir, Config) ++ "/testca/cacert.pem", + CertFile = ?config(rmq_certsdir, Config) ++ "/client/cert.pem", + KeyFile = ?config(rmq_certsdir, Config) ++ "/client/key.pem", + OpnConf = #{address => Host, + port => Port, + container_id => atom_to_binary(?FUNCTION_NAME), + hostname => <<"vhost:", Vhost/binary>>, + sasl => external, + tls_opts => {secure_port, [{cacertfile, CACertFile}, + {certfile, CertFile}, + {keyfile, KeyFile}]} + }, + {ok, Connection} = amqp10_client:open_connection(OpnConf), + receive {amqp10_event, {connection, Connection, opened}} -> ok + after 5000 -> ct:fail(missing_opened) + end, + ok = amqp10_client:close_connection(Connection). + +anonymous_fails(Config) -> + Mechansim = anon, + OpnConf0 = connection_config(Config, <<"/">>), + OpnConf = OpnConf0#{sasl => Mechansim}, + {ok, Connection} = amqp10_client:open_connection(OpnConf), + receive {amqp10_event, {connection, Connection, {closed, Reason}}} -> + ?assertEqual({sasl_not_supported, Mechansim}, Reason) + after 5000 -> ct:fail(missing_closed) + end. + +connection_config(Config, Vhost) -> + Host = ?config(rmq_hostname, Config), + Port = rabbit_ct_broker_helpers:get_node_config(Config, 0, tcp_port_amqp), + #{address => Host, + port => Port, + container_id => <<"my container">>, + hostname => <<"vhost:", Vhost/binary>>}. + +set_permissions(Config, ConfigurePerm, WritePerm, ReadPerm) -> + ok = rabbit_ct_broker_helpers:set_permissions(Config, + ?config(test_user, Config), + ?config(test_vhost, Config), + ConfigurePerm, + WritePerm, + ReadPerm). + +clear_permissions(Config) -> + User = ?config(test_user, Config), + Vhost = ?config(test_vhost, Config), + ok = rabbit_ct_broker_helpers:clear_permissions(Config, User, Vhost). diff --git a/deps/rabbitmq_aws/.gitignore b/deps/rabbitmq_aws/.gitignore index 9b81a19f05ca..c9da1ab1973d 100644 --- a/deps/rabbitmq_aws/.gitignore +++ b/deps/rabbitmq_aws/.gitignore @@ -1,30 +1,6 @@ -.rebar3 _* .eunit *.o -*.beam -*.plt -*.swp -*.swo -.erlang.cookie -ebin log -erl_crash.dump -.rebar -logs -_build -.idea cobertura.xml -rebar.lock -.erlang.mk -deps -rabbitmq_aws.d test/*xml -cover -/escript/ -/escript.lock -/plugins/ -/plugins.lock -/sbin/ -/sbin.lock -xrefr diff --git a/deps/rabbitmq_aws/README.md b/deps/rabbitmq_aws/README.md index b6ff70965709..bc112b49bcee 100644 --- a/deps/rabbitmq_aws/README.md +++ b/deps/rabbitmq_aws/README.md @@ -2,8 +2,6 @@ A fork of [gmr/httpc-aws](https://github.com/gmr/httpc-aws) for use in building RabbitMQ plugins that interact with Amazon Web Services APIs. -[![Build Status](https://travis-ci.org/gmr/rabbitmq-aws.svg?branch=master)](https://travis-ci.org/gmr/rabbitmq-aws) - ## Supported Erlang Versions [Same as RabbitMQ](http://www.rabbitmq.com/which-erlang.html) diff --git a/deps/rabbitmq_aws/app.bzl b/deps/rabbitmq_aws/app.bzl index c1da7e3fa851..07ea8396bad2 100644 --- a/deps/rabbitmq_aws/app.bzl +++ b/deps/rabbitmq_aws/app.bzl @@ -124,7 +124,6 @@ def test_suite_beam_files(name = "test_suite_beam_files"): testonly = True, srcs = ["test/rabbitmq_aws_json_tests.erl"], outs = ["test/rabbitmq_aws_json_tests.beam"], - hdrs = ["include/rabbitmq_aws.hrl"], app_name = "rabbitmq_aws", erlc_opts = "//:test_erlc_opts", ) @@ -168,7 +167,6 @@ def test_suite_beam_files(name = "test_suite_beam_files"): testonly = True, srcs = ["test/rabbitmq_aws_xml_tests.erl"], outs = ["test/rabbitmq_aws_xml_tests.beam"], - hdrs = ["include/rabbitmq_aws.hrl"], app_name = "rabbitmq_aws", erlc_opts = "//:test_erlc_opts", ) diff --git a/deps/rabbitmq_aws/include/rabbitmq_aws.hrl b/deps/rabbitmq_aws/include/rabbitmq_aws.hrl index 89d50b6d3935..a0ced292e9e1 100644 --- a/deps/rabbitmq_aws/include/rabbitmq_aws.hrl +++ b/deps/rabbitmq_aws/include/rabbitmq_aws.hrl @@ -1,7 +1,7 @@ %% ==================================================================== %% @author Gavin M. Roy %% @copyright 2016, Gavin M. Roy -%% @copyright 2016-2020 VMware, Inc. or its affiliates. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% @headerfile %% @private %% @doc rabbitmq_aws client library constants and records diff --git a/deps/rabbitmq_aws/priv/schema/rabbitmq_aws.schema b/deps/rabbitmq_aws/priv/schema/rabbitmq_aws.schema index 889830974e38..d0c497fa2545 100644 --- a/deps/rabbitmq_aws/priv/schema/rabbitmq_aws.schema +++ b/deps/rabbitmq_aws/priv/schema/rabbitmq_aws.schema @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% % =============================== diff --git a/deps/rabbitmq_aws/src/rabbitmq_aws.erl b/deps/rabbitmq_aws/src/rabbitmq_aws.erl index bce3bb2f3055..444121d76845 100644 --- a/deps/rabbitmq_aws/src/rabbitmq_aws.erl +++ b/deps/rabbitmq_aws/src/rabbitmq_aws.erl @@ -42,7 +42,7 @@ -spec get(Service :: string(), Path :: path()) -> result(). %% @doc Perform a HTTP GET request to the AWS API for the specified service. The -%% response will automatically be decoded if it is either in JSON or XML +%% response will automatically be decoded if it is either in JSON, or XML %% format. %% @end get(Service, Path) -> @@ -243,7 +243,7 @@ handle_msg(_Request, State) -> -spec endpoint(State :: state(), Host :: string(), Service :: string(), Path :: string()) -> string(). %% @doc Return the endpoint URL, either by constructing it with the service -%% information passed in or by using the passed in Host value. +%% information passed in, or by using the passed in Host value. %% @ednd endpoint(#state{region = Region}, undefined, Service, Path) -> lists:flatten(["https://", endpoint_host(Region, Service), Path]); @@ -294,11 +294,11 @@ get_content_type(Headers) -> end, parse_content_type(Value). --spec has_credentials() -> true | false. +-spec has_credentials() -> boolean(). has_credentials() -> gen_server:call(rabbitmq_aws, has_credentials). --spec has_credentials(state()) -> true | false. +-spec has_credentials(state()) -> boolean(). %% @doc check to see if there are credentials made available in the current state %% returning false if not or if they have expired. %% @end @@ -307,7 +307,7 @@ has_credentials(#state{access_key = Key}) when Key /= undefined -> true; has_credentials(_) -> false. --spec expired_credentials(Expiration :: calendar:datetime()) -> true | false. +-spec expired_credentials(Expiration :: calendar:datetime()) -> boolean(). %% @doc Indicates if the date that is passed in has expired. %% end expired_credentials(undefined) -> false; @@ -354,9 +354,8 @@ local_time() -> -spec maybe_decode_body(ContentType :: {nonempty_string(), nonempty_string()}, Body :: body()) -> list() | body(). -%% @doc Attempt to decode the response body based upon the mime type that is -%% presented. -%% @end. +%% @doc Attempt to decode the response body by its MIME +%% @end maybe_decode_body({"application", "x-amz-json-1.0"}, Body) -> rabbitmq_aws_json:decode(Body); maybe_decode_body({"application", "json"}, Body) -> @@ -387,10 +386,10 @@ perform_request(State, Service, Method, Headers, Path, Body, Options, Host) -> Headers, Path, Body, Options, Host). --spec perform_request_has_creds(true | false, State :: state(), - Service :: string(), Method :: method(), - Headers :: headers(), Path :: path(), Body :: body(), - Options :: http_options(), Host :: string() | undefined) +-spec perform_request_has_creds(HasCreds :: boolean(), State :: state(), + Service :: string(), Method :: method(), + Headers :: headers(), Path :: path(), Body :: body(), + Options :: http_options(), Host :: string() | undefined) -> {Result :: result(), NewState :: state()}. %% @doc Invoked after checking to see if there are credentials. If there are, %% validate they have not or will not expire, performing the request if not, @@ -403,10 +402,10 @@ perform_request_has_creds(false, State, _, _, _, _, _, _, _) -> perform_request_creds_error(State). --spec perform_request_creds_expired(true | false, State :: state(), - Service :: string(), Method :: method(), - Headers :: headers(), Path :: path(), Body :: body(), - Options :: http_options(), Host :: string() | undefined) +-spec perform_request_creds_expired(CredsExp :: boolean(), State :: state(), + Service :: string(), Method :: method(), + Headers :: headers(), Path :: path(), Body :: body(), + Options :: http_options(), Host :: string() | undefined) -> {Result :: result(), NewState :: state()}. %% @doc Invoked after checking to see if the current credentials have expired. %% If they haven't, perform the request, otherwise try and refresh the diff --git a/deps/rabbitmq_aws/src/rabbitmq_aws_config.erl b/deps/rabbitmq_aws/src/rabbitmq_aws_config.erl index d5715650ceb6..3dbd71e61626 100644 --- a/deps/rabbitmq_aws/src/rabbitmq_aws_config.erl +++ b/deps/rabbitmq_aws/src/rabbitmq_aws_config.erl @@ -1,7 +1,7 @@ %% ==================================================================== %% @author Gavin M. Roy %% @copyright 2016, Gavin M. Roy -%% @copyright 2016-2023 VMware, Inc. or its affiliates. +%% @copyright 2007-2024 Broadcom. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% @private %% @doc rabbitmq_aws configuration functionality %% @end @@ -290,7 +290,7 @@ ini_file_data(Path) -> ini_file_data(Path, filelib:is_file(Path)). --spec ini_file_data(Path :: string(), FileExists :: true | false) +-spec ini_file_data(Path :: string(), FileExists :: boolean()) -> list() | {error, atom()}. %% @doc Return the parsed ini file for the specified path. %% @end diff --git a/deps/rabbitmq_aws/src/rabbitmq_aws_json.erl b/deps/rabbitmq_aws/src/rabbitmq_aws_json.erl index 5741f524a092..731ce3152c07 100644 --- a/deps/rabbitmq_aws/src/rabbitmq_aws_json.erl +++ b/deps/rabbitmq_aws/src/rabbitmq_aws_json.erl @@ -14,6 +14,8 @@ %% @end decode(Value) when is_list(Value) -> decode(list_to_binary(Value)); +decode(<<>>) -> + []; decode(Value) when is_binary(Value) -> Decoded0 = rabbit_json:decode(Value), Decoded = maps:to_list(Decoded0), diff --git a/deps/rabbitmq_aws/test/rabbitmq_aws_json_tests.erl b/deps/rabbitmq_aws/test/rabbitmq_aws_json_tests.erl index 98910cf25771..c69049e81efd 100644 --- a/deps/rabbitmq_aws/test/rabbitmq_aws_json_tests.erl +++ b/deps/rabbitmq_aws/test/rabbitmq_aws_json_tests.erl @@ -2,8 +2,6 @@ -include_lib("eunit/include/eunit.hrl"). --include("rabbitmq_aws.hrl"). - parse_test_() -> [ {"string decoding", fun() -> diff --git a/deps/rabbitmq_aws/test/rabbitmq_aws_xml_tests.erl b/deps/rabbitmq_aws/test/rabbitmq_aws_xml_tests.erl index 41b9d4ceb052..02c044be900d 100644 --- a/deps/rabbitmq_aws/test/rabbitmq_aws_xml_tests.erl +++ b/deps/rabbitmq_aws/test/rabbitmq_aws_xml_tests.erl @@ -2,8 +2,6 @@ -include_lib("eunit/include/eunit.hrl"). --include("rabbitmq_aws.hrl"). - parse_test_() -> [ {"s3 error response", fun() -> diff --git a/deps/rabbitmq_cli/.gitignore b/deps/rabbitmq_cli/.gitignore index 316e39505d58..43c231de0dd8 100644 --- a/deps/rabbitmq_cli/.gitignore +++ b/deps/rabbitmq_cli/.gitignore @@ -1,13 +1 @@ -/_build -/cover -/deps -/escript -/log -/.erlang.mk/ -/ebin -/sbin -erl_crash.dump -mix.lock -*.ez -.sw? -.*.sw? +/deps/ diff --git a/deps/rabbitmq_cli/BUILD.bazel b/deps/rabbitmq_cli/BUILD.bazel index 98611acd4aad..69ac9bffdf1c 100644 --- a/deps/rabbitmq_cli/BUILD.bazel +++ b/deps/rabbitmq_cli/BUILD.bazel @@ -1,14 +1,38 @@ +load("@rules_elixir//:ex_unit_test.bzl", "ex_unit_test") +load("@rules_elixir//private:elixir_bytecode.bzl", "elixir_bytecode") +load( + "@rules_elixir//private:elixir_ebin_dir.bzl", + "elixir_ebin_dir", +) +load( + "@rules_elixir//private:erlang_app_filter_module_conflicts.bzl", + "erlang_app_filter_module_conflicts", +) +load("@rules_erlang//:app_file2.bzl", "app_file") load("@rules_erlang//:dialyze.bzl", "dialyze", "plt") -load(":rabbitmqctl.bzl", "rabbitmqctl") -load(":rabbitmqctl_check_formatted.bzl", "rabbitmqctl_check_formatted_test") -load(":rabbitmqctl_test.bzl", "rabbitmqctl_test") +load("@rules_erlang//:erlang_app_info.bzl", "erlang_app_info") +load("@rules_erlang//:escript.bzl", "escript_archive") +load( + "//:rabbitmq.bzl", + "APP_VERSION", + "RABBITMQ_DIALYZER_OPTS", + "STARTS_BACKGROUND_BROKER_TAG", + "without", +) load("//:rabbitmq_home.bzl", "rabbitmq_home") load("//:rabbitmq_run.bzl", "rabbitmq_run") -load("//:rabbitmq.bzl", "RABBITMQ_DIALYZER_OPTS", "STARTS_BACKGROUND_BROKER_TAG", "without") load( - "@rabbitmq-server//bazel/elixir:mix_archive_build.bzl", + "//bazel/elixir:elixir_escript_main.bzl", + "elixir_escript_main", +) +load( + "//bazel/elixir:mix_archive_build.bzl", "mix_archive_build", ) +load( + "//bazel/elixir:mix_archive_extract.bzl", + "mix_archive_extract", +) mix_archive_build( name = "csv_ez", @@ -17,6 +41,16 @@ mix_archive_build( archives = ["@hex//:archive"], ) +mix_archive_extract( + name = "csv", + srcs = ["@csv//:sources"], + app_name = "csv", + archive = ":csv_ez", + deps = [ + "@rules_elixir//elixir", + ], +) + mix_archive_build( name = "json_ez", srcs = ["@json//:sources"], @@ -24,35 +58,230 @@ mix_archive_build( archives = ["@hex//:archive"], ) -# Note: All the various rabbitmq-* scripts are just copies of rabbitmqctl -rabbitmqctl( - name = "rabbitmqctl", - srcs = [ - "config/config.exs", - "mix.exs", - ] + glob([ - "lib/**/*.ex", - ]), - archives = [ - "@hex//:archive", +mix_archive_extract( + name = "json", + srcs = ["@json//:sources"], + app_name = "json", + archive = ":json_ez", + deps = [ + "@rules_elixir//elixir", + "@rules_elixir//elixir:logger", + ], +) + +mix_archive_build( + name = "amqp_ez", + testonly = True, + srcs = ["@amqp//:sources"], + out = "amqp.ez", + archives = ["@hex//:archive"], + setup = """\ +export DEPS_DIR="$ERL_LIBS" +""", + deps = [ + "//deps/amqp_client:erlang_app", + "//deps/rabbit_common:erlang_app", + ], +) + +mix_archive_build( + name = "temp_ez", + testonly = True, + srcs = ["@temp//:sources"], + out = "temp.ez", + archives = ["@hex//:archive"], +) + +mix_archive_build( + name = "x509_ez", + testonly = True, + srcs = ["@x509//:sources"], + out = "x509.ez", + archives = ["@hex//:archive"], +) + +APP_NAME = "rabbitmqctl" + +APP_ENV = """[{scopes,[{'rabbitmq-plugins',plugins}, + {rabbitmqctl,ctl}, + {'rabbitmq-diagnostics',diagnostics}, + {'rabbitmq-queues',queues}, + {'rabbitmq-streams',streams}, + {'rabbitmq-upgrade',upgrade}, + {'vmware-rabbitmq',vmware}]}]""" + +SRCS = glob([ + "lib/**/*.ex", +]) + +DEPS = [ + ":csv", + ":json", + "//deps/rabbit_common:erlang_app", + "@observer_cli//:erlang_app", + "@stdout_formatter//:erlang_app", +] + +elixir_bytecode( + name = "beam_files", + srcs = SRCS, + dest = "beam_files", + elixirc_opts = [ + "-e", + ":application.ensure_all_started(:mix)", + ], + env = { + "HOME": '"$(mktemp -d)"', + "MIX_ENV": "prod", + "DEPS_DIR": "$ERL_LIBS", + "ERL_COMPILER_OPTIONS": "deterministic", + "LANG": "en_US.UTF-8", + "LC_ALL": "en_US.UTF-8", + }, + setup = """\ +mkdir -p _build/$MIX_ENV/lib/csv +cp -RL $ERL_LIBS/csv/ebin _build/$MIX_ENV/lib/csv +""", + deps = DEPS, +) + +app_file( + name = "app_file", + out = "%s.app" % APP_NAME, + app_description = APP_NAME, + app_env = APP_ENV, + app_name = APP_NAME, + app_version = APP_VERSION, + modules = [":beam_files"], + # mix escripts do not include dependencies in the applications key + deps = [ + "@rules_elixir//elixir", + "@rules_elixir//elixir:logger", ], +) + +elixir_ebin_dir( + name = "ebin", + app_file = ":app_file", + beam_files_dir = ":beam_files", + dest = "ebin", +) + +erlang_app_filter_module_conflicts( + name = "elixir_without_rabbitmqctl_overlap", + src = "@rules_elixir//elixir", + dest = "unconsolidated", + without = [":ebin"], +) + +erlang_app_info( + name = "erlang_app", + srcs = SRCS, + hdrs = [], + app_name = APP_NAME, + beam = [":ebin"], license_files = glob(["LICENSE*"]), - source_deps = { - "@csv//:sources": "csv", - "@json//:sources": "json", + priv = [], + visibility = ["//visibility:public"], + deps = [ + ":elixir_without_rabbitmqctl_overlap", + "@rules_elixir//elixir:logger", + ] + DEPS, +) + +elixir_escript_main( + name = "escript_main", + out = "rabbitmqctl_escript.beam", + app = ":erlang_app", + env = { + "HOME": '"$(mktemp -d)"', + "LANG": "en_US.UTF-8", + "LC_ALL": "en_US.UTF-8", }, + main_module = "RabbitMQCtl", + mix_config = "config/config.exs", +) + +# Note: All the various rabbitmq-* scripts are just copies of rabbitmqctl +escript_archive( + name = "rabbitmqctl", + app = ":erlang_app", + beam = [":escript_main"], + drop_hrl = True, + flat = True, + headers = [ + "shebang", + '{emu_args, "-escript main rabbitmqctl_escript -hidden"}', + ], visibility = ["//visibility:public"], +) + +_TEST_MODULES = [ + "RabbitMQ.CLI.Ctl.Commands.DuckCommand", + "RabbitMQ.CLI.Ctl.Commands.GrayGooseCommand", + "RabbitMQ.CLI.Ctl.Commands.UglyDucklingCommand", + "RabbitMQ.CLI.Plugins.Commands.StorkCommand", + "RabbitMQ.CLI.Plugins.Commands.HeronCommand", + "RabbitMQ.CLI.Custom.Commands.CrowCommand", + "RabbitMQ.CLI.Custom.Commands.RavenCommand", + "RabbitMQ.CLI.Seagull.Commands.SeagullCommand", + "RabbitMQ.CLI.Seagull.Commands.PacificGullCommand", + "RabbitMQ.CLI.Seagull.Commands.HerringGullCommand", + "RabbitMQ.CLI.Seagull.Commands.HermannGullCommand", + "RabbitMQ.CLI.Wolf.Commands.CanisLupusCommand", + "RabbitMQ.CLI.Wolf.Commands.CanisLatransCommand", + "RabbitMQ.CLI.Wolf.Commands.CanisAureusCommand", +] + +app_file( + name = "test_app_file", + testonly = True, + out = "test/%s.app" % APP_NAME, + app_description = APP_NAME, + app_env = APP_ENV, + app_name = APP_NAME, + app_version = APP_VERSION, + modules = [":beam_files"], + synthetic_module_names = [ + "Elixir." + name + for name in _TEST_MODULES + ], + # mix escripts do not include dependencies in the applications key deps = [ - "//deps/rabbit_common:erlang_app", - "@observer_cli//:erlang_app", - "@stdout_formatter//:erlang_app", + "@rules_elixir//elixir", + "@rules_elixir//elixir:logger", ], ) +elixir_ebin_dir( + name = "test_ebin", + testonly = True, + app_file = ":test_app_file", + beam_files_dir = ":beam_files", + dest = "test_ebin", +) + +erlang_app_info( + name = "test_erlang_app", + testonly = True, + srcs = SRCS, + hdrs = [], + app_name = APP_NAME, + beam = [":test_ebin"], + license_files = glob(["LICENSE*"]), + priv = [], + visibility = ["//visibility:public"], + deps = [ + ":elixir_without_rabbitmqctl_overlap", + "@rules_elixir//elixir:logger", + ] + DEPS, +) + rabbitmq_home( name = "broker-for-cli-tests-home", testonly = True, plugins = [ + ":test_erlang_app", "//deps/rabbit:erlang_app", "//deps/rabbitmq_federation:erlang_app", "//deps/rabbitmq_stomp:erlang_app", @@ -68,35 +297,6 @@ rabbitmq_run( visibility = ["//visibility:public"], ) -rabbitmqctl_check_formatted_test( - name = "check_formatted", - size = "small", - srcs = [ - ".formatter.exs", - "config/config.exs", - "mix.exs", - ] + glob([ - "lib/**/*.ex", - "test/**/*.exs", - ]), - data = glob(["test/fixtures/**/*"]), - target_compatible_with = select({ - "@platforms//os:macos": [ - "@platforms//os:macos", - "@elixir_config//:elixir_1_15", - ], - "//conditions:default": [ - "@platforms//os:linux", - "@elixir_config//:elixir_1_15", - ], - }), -) - -test_suite( - name = "rabbitmqctl_check_formatted", - tests = ["check_formatted"], -) - plt( name = "deps_plt", apps = [ @@ -107,17 +307,15 @@ plt( "public_key", "runtime_tools", ], - ez_deps = [ - ":csv_ez", - ":json_ez", - ], ignore_warnings = True, - libs = [":elixir"], + libs = ["@rules_elixir//elixir:elixir"], deps = [ - ":elixir", - "//bazel/elixir:logger", + ":csv", + ":json", "//deps/rabbit:erlang_app", "//deps/rabbit_common:erlang_app", + "@rules_elixir//elixir", + "@rules_elixir//elixir:logger", ], ) @@ -127,37 +325,57 @@ dialyze( "-Wunknown", RABBITMQ_DIALYZER_OPTS, ), - libs = [":elixir"], + libs = ["@rules_elixir//elixir:elixir"], plt = ":deps_plt", ) -rabbitmqctl_test( +ex_unit_test( name = "tests", - size = "large", srcs = [ - ".formatter.exs", - "config/config.exs", - "mix.exs", + "test/test_helper.exs", ] + glob([ - "lib/**/*.ex", - "test/**/*.exs", + "test/**/*_test.exs", ]), - archives = [ - "@hex//:archive", - ], - data = glob(["test/fixtures/**/*"]), - flaky = True, - rabbitmq_run = ":rabbitmq-for-cli-tests-run", - source_deps = { - "@amqp//:sources": "amqp", - "@csv//:sources": "csv", - "@dialyxir//:sources": "dialyxir", - "@json//:sources": "json", - "@temp//:sources": "temp", - "@x509//:sources": "x509", + data = glob([ + "test/fixtures/**/*", + ]), + env = { + "MIX_ENV": "prod", + "DEPS_DIR": "$ERL_LIBS", + "ERL_COMPILER_OPTIONS": "deterministic", + "LANG": "en_US.UTF-8", + "LC_ALL": "en_US.UTF-8", }, + ez_deps = [ + ":amqp.ez", + ":temp.ez", + ":x509.ez", + ], + setup = """\ +# pretend that mix build the deps, as some modules add mix code paths in +# their module definitions +for app in amqp csv json temp x509; do + mkdir -p _build/$MIX_ENV/lib/$app + ln -s $ERL_LIBS/$app/ebin _build/$MIX_ENV/lib/$app/ebin +done + +# we need a running broker with certain plugins for this to pass +export TEST_TMPDIR=${TEST_UNDECLARED_OUTPUTS_DIR} +trap 'catch $?' EXIT +catch() { + pid=$(cat ${TEST_TMPDIR}/*/*.pid) + echo "stopping broker (pid ${pid})" + kill -TERM "${pid}" +} +$TEST_SRCDIR/$TEST_WORKSPACE/deps/rabbitmq_cli/rabbitmq-for-cli-tests-run \\ + start-background-broker\ +""", tags = [STARTS_BACKGROUND_BROKER_TAG], + tools = [ + ":rabbitmq-for-cli-tests-run", + ], deps = [ + ":test_erlang_app", "//deps/amqp_client:erlang_app", "//deps/rabbit:erlang_app", "//deps/rabbit_common:erlang_app", @@ -170,3 +388,30 @@ test_suite( name = "rabbitmqctl_tests", tests = ["tests"], ) + +elixir_bytecode( + name = "compile_warnings_as_errors", + srcs = SRCS, + dest = "beam_files_werror", + elixirc_opts = [ + "--warnings-as-errors", + "-e", + ":application.ensure_all_started(:mix)", + ], + env = { + "HOME": '"$(mktemp -d)"', + "MIX_ENV": "prod", + "DEPS_DIR": "$ERL_LIBS", + "ERL_COMPILER_OPTIONS": "deterministic", + "LANG": "en_US.UTF-8", + "LC_ALL": "en_US.UTF-8", + }, + setup = """\ +mkdir -p _build/$MIX_ENV/lib/csv +cp -RL $ERL_LIBS/csv/ebin _build/$MIX_ENV/lib/csv +""", + tags = ["manual"], + deps = DEPS + [ + "//deps/rabbit:erlang_app", + ], +) diff --git a/deps/rabbitmq_cli/Makefile b/deps/rabbitmq_cli/Makefile index 49c4547acbe4..a76d414f08f0 100644 --- a/deps/rabbitmq_cli/Makefile +++ b/deps/rabbitmq_cli/Makefile @@ -2,14 +2,13 @@ PROJECT = rabbitmq_cli BUILD_DEPS = rabbit_common DEPS = csv json observer_cli stdout_formatter -TEST_DEPS = amqp amqp_client dialyxir temp x509 rabbit +TEST_DEPS = amqp amqp_client temp x509 rabbit -dep_amqp = hex 2.1.1 -dep_csv = hex 3.0.5 -dep_dialyxir = hex 0.5.1 +dep_amqp = hex 3.3.0 +dep_csv = hex 3.2.0 dep_json = hex 1.4.1 dep_temp = hex 0.4.7 -dep_x509 = hex 0.7.0 +dep_x509 = hex 0.8.8 DEP_EARLY_PLUGINS = rabbit_common/mk/rabbitmq-early-plugin.mk DEP_PLUGINS = rabbit_common/mk/rabbitmq-plugin.mk @@ -79,7 +78,8 @@ app:: $(ESCRIPTS) @: rabbitmqctl_srcs := mix.exs \ - $(shell find config lib -name "*.ex" -o -name "*.exs") + $(call core_find,config/,*.exs) \ + $(call core_find,lib/,*.ex) # Elixir dependencies are fetched and compiled as part of the alias # `mix make_all`. We do not fetch and build them in `make deps` because diff --git a/deps/rabbitmq_cli/README.md b/deps/rabbitmq_cli/README.md index dca99a26acf2..ab0e37c59574 100644 --- a/deps/rabbitmq_cli/README.md +++ b/deps/rabbitmq_cli/README.md @@ -1,7 +1,5 @@ # RabbitMQ CLI Tools -[![Build Status](https://travis-ci.org/rabbitmq/rabbitmq-cli.svg?branch=master)](https://travis-ci.org/rabbitmq/rabbitmq-cli) - This repository contains [RabbitMQ CLI tools](https://rabbitmq.com/cli.html) ([rabbitmqctl](https://www.rabbitmq.com/man/rabbitmqctl.1.man.html) and others). @@ -125,5 +123,5 @@ but not entirely trivial examples. The project is [licensed under the MPL](LICENSE-MPL-RabbitMQ), the same license as RabbitMQ. -(c) 2007-2020 VMware, Inc. or its affiliates. +(c) 2007-2024 Broadcom. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. diff --git a/deps/rabbitmq_cli/config/config.exs b/deps/rabbitmq_cli/config/config.exs index b17b656f69f9..83d3a608bbab 100644 --- a/deps/rabbitmq_cli/config/config.exs +++ b/deps/rabbitmq_cli/config/config.exs @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. # This file is responsible for configuring your application # and its dependencies with the aid of the Mix.Config module. diff --git a/deps/rabbitmq_cli/lib/rabbit_common/records.ex b/deps/rabbitmq_cli/lib/rabbit_common/records.ex index 9249fbadbbf0..dbb4fe3f2d6d 100644 --- a/deps/rabbitmq_cli/lib/rabbit_common/records.ex +++ b/deps/rabbitmq_cli/lib/rabbit_common/records.ex @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2016-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule RabbitCommon.Records do require Record diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/auto_complete.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/auto_complete.ex index f32756226ed6..9983125bf272 100644 --- a/deps/rabbitmq_cli/lib/rabbitmq/cli/auto_complete.ex +++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/auto_complete.ex @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule RabbitMQ.CLI.AutoComplete do alias RabbitMQ.CLI.Core.{CommandModules, Parser} diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/command_behaviour.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/command_behaviour.ex index baa328519f19..387a36d39597 100644 --- a/deps/rabbitmq_cli/lib/rabbitmq/cli/command_behaviour.ex +++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/command_behaviour.ex @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule RabbitMQ.CLI.CommandBehaviour do alias RabbitMQ.CLI.Core.Helpers diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/core/accepts_default_switches_and_timeout.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/core/accepts_default_switches_and_timeout.ex index 2c0bf04c481d..ff09e82b8502 100644 --- a/deps/rabbitmq_cli/lib/rabbitmq/cli/core/accepts_default_switches_and_timeout.ex +++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/core/accepts_default_switches_and_timeout.ex @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. # Should be used by commands that require rabbit app to be stopped # but need no other execution environment validators. diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/core/accepts_no_positional_arguments.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/core/accepts_no_positional_arguments.ex index 4286db3ecaf9..8aa887d200d1 100644 --- a/deps/rabbitmq_cli/lib/rabbitmq/cli/core/accepts_no_positional_arguments.ex +++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/core/accepts_no_positional_arguments.ex @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. # Should be used by commands that require rabbit app to be stopped # but need no other execution environment validators. diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/core/accepts_one_positional_argument.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/core/accepts_one_positional_argument.ex index 65dcd6922e71..1e8f6cdad77d 100644 --- a/deps/rabbitmq_cli/lib/rabbitmq/cli/core/accepts_one_positional_argument.ex +++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/core/accepts_one_positional_argument.ex @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. # Should be used by commands that require rabbit app to be stopped # but need no other execution environment validators. diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/core/accepts_one_positive_integer_argument.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/core/accepts_one_positive_integer_argument.ex index ffe6f41d882b..4c33ca1cf05a 100644 --- a/deps/rabbitmq_cli/lib/rabbitmq/cli/core/accepts_one_positive_integer_argument.ex +++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/core/accepts_one_positive_integer_argument.ex @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. # Should be used by commands that require rabbit app to be stopped # but need no other execution environment validators. diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/core/accepts_two_positional_arguments.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/core/accepts_two_positional_arguments.ex index faf36f371f8c..f2f521eb1fb1 100644 --- a/deps/rabbitmq_cli/lib/rabbitmq/cli/core/accepts_two_positional_arguments.ex +++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/core/accepts_two_positional_arguments.ex @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. # Should be used by commands that require rabbit app to be stopped # but need no other execution environment validators. diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/core/alarms.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/core/alarms.ex index d3ae9fd36eab..5c612cc8b3b7 100644 --- a/deps/rabbitmq_cli/lib/rabbitmq/cli/core/alarms.ex +++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/core/alarms.ex @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule RabbitMQ.CLI.Core.Alarms do def alarm_lines(alarms, node_name) do diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/core/ansi.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/core/ansi.ex index 1c3900a2b17a..a8df2682ff00 100644 --- a/deps/rabbitmq_cli/lib/rabbitmq/cli/core/ansi.ex +++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/core/ansi.ex @@ -2,34 +2,40 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule RabbitMQ.CLI.Core.ANSI do def bright(string) do - "#{IO.ANSI.bright()}#{string}#{IO.ANSI.reset()}" + maybe_colorize([:bright, string]) end def red(string) do - "#{IO.ANSI.red()}#{string}#{IO.ANSI.reset()}" + maybe_colorize([:red, string]) end def yellow(string) do - "#{IO.ANSI.yellow()}#{string}#{IO.ANSI.reset()}" + maybe_colorize([:yellow, string]) end def magenta(string) do - "#{IO.ANSI.magenta()}#{string}#{IO.ANSI.reset()}" + maybe_colorize([:magenta, string]) end def bright_red(string) do - "#{IO.ANSI.bright()}#{IO.ANSI.red()}#{string}#{IO.ANSI.reset()}" + maybe_colorize([:bright, :red, string]) end def bright_yellow(string) do - "#{IO.ANSI.bright()}#{IO.ANSI.yellow()}#{string}#{IO.ANSI.reset()}" + maybe_colorize([:bright, :yellow, string]) end def bright_magenta(string) do - "#{IO.ANSI.bright()}#{IO.ANSI.magenta()}#{string}#{IO.ANSI.reset()}" + maybe_colorize([:bright, :magenta, string]) + end + + defp maybe_colorize(ascii_esc_and_string) do + ascii_esc_and_string + |> IO.ANSI.format() + |> IO.chardata_to_string() end end diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/core/code_path.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/core/code_path.ex index d30cf807d9c0..d5901f8c2f12 100644 --- a/deps/rabbitmq_cli/lib/rabbitmq/cli/core/code_path.ex +++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/core/code_path.ex @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule RabbitMQ.CLI.Core.CodePath do alias RabbitMQ.CLI.Core.{Config, Paths, Platform} diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/core/command_modules.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/core/command_modules.ex index 8e7f916ab026..bc9189092e16 100644 --- a/deps/rabbitmq_cli/lib/rabbitmq/cli/core/command_modules.ex +++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/core/command_modules.ex @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule RabbitMQ.CLI.Core.CommandModules do alias RabbitMQ.CLI.Core.{Config, DataCoercion, Helpers} @@ -69,7 +69,7 @@ defmodule RabbitMQ.CLI.Core.CommandModules do {:ok, enabled_plugins_file} = PluginsHelpers.enabled_plugins_file(opts) require Logger - Logger.warn( + Logger.warning( "Unable to read the enabled plugins file.\n" <> " Reason: #{inspect(err)}\n" <> " Commands provided by plugins will not be available.\n" <> diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/core/config.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/core/config.ex index c72c2e4dadfd..ab5564cb1b1d 100644 --- a/deps/rabbitmq_cli/lib/rabbitmq/cli/core/config.ex +++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/core/config.ex @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2016-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule RabbitMQ.CLI.Core.Config do alias RabbitMQ.CLI.{ diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/core/data_coercion.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/core/data_coercion.ex index 123ef55ddde6..260e84d51292 100644 --- a/deps/rabbitmq_cli/lib/rabbitmq/cli/core/data_coercion.ex +++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/core/data_coercion.ex @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defprotocol RabbitMQ.CLI.Core.DataCoercion do def to_atom(data) diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/core/distribution.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/core/distribution.ex index 90875f9ca61f..ce1a5a51a503 100644 --- a/deps/rabbitmq_cli/lib/rabbitmq/cli/core/distribution.ex +++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/core/distribution.ex @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2016-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule RabbitMQ.CLI.Core.Distribution do alias RabbitMQ.CLI.Core.{Config, Helpers} diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/core/doc_guide.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/core/doc_guide.ex index fd74bb28d6a0..716c6a99430a 100644 --- a/deps/rabbitmq_cli/lib/rabbitmq/cli/core/doc_guide.ex +++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/core/doc_guide.ex @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2016-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule RabbitMQ.CLI.Core.DocGuide.Macros do @moduledoc """ @@ -18,7 +18,7 @@ defmodule RabbitMQ.CLI.Core.DocGuide.Macros do quote do def unquote(fn_name)() do - unquote("https://#{domain}/#{path_segment}.html") + unquote("https://#{domain}/docs/#{path_segment}/") end end end @@ -47,13 +47,13 @@ defmodule RabbitMQ.CLI.Core.DocGuide do Macros.defguide("erlang_versions", path_segment: "which-erlang") Macros.defguide("feature_flags") Macros.defguide("firehose") - Macros.defguide("mirroring", path_segment: "ha") Macros.defguide("logging") Macros.defguide("management") Macros.defguide("memory_use") Macros.defguide("monitoring") Macros.defguide("networking") Macros.defguide("parameters") + Macros.defguide("passwords") Macros.defguide("plugins") Macros.defguide("prometheus") Macros.defguide("publishers") diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/core/erl_eval.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/core/erl_eval.ex index e3ef24b021d8..38283aac6bf2 100644 --- a/deps/rabbitmq_cli/lib/rabbitmq/cli/core/erl_eval.ex +++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/core/erl_eval.ex @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule RabbitMQ.CLI.Core.ErlEval do def parse_expr(expr) do diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/core/exit_codes.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/core/exit_codes.ex index 88f52a6ba267..5aa3f3ccc23b 100644 --- a/deps/rabbitmq_cli/lib/rabbitmq/cli/core/exit_codes.ex +++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/core/exit_codes.ex @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. # Lists predefined error exit codes used by RabbitMQ CLI tools. # The codes are adopted from [1], which (according to our team's research) diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/core/feature_flags.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/core/feature_flags.ex index f36d8c44842b..afecbf66f7d7 100644 --- a/deps/rabbitmq_cli/lib/rabbitmq/cli/core/feature_flags.ex +++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/core/feature_flags.ex @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule RabbitMQ.CLI.Core.FeatureFlags do alias RabbitMQ.CLI.Core.ExitCodes diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/core/helpers.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/core/helpers.ex index fb1699a2b28d..6548722b5049 100644 --- a/deps/rabbitmq_cli/lib/rabbitmq/cli/core/helpers.ex +++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/core/helpers.ex @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule RabbitMQ.CLI.Core.Helpers do alias RabbitMQ.CLI.Core.{Config, DataCoercion, NodeName} diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/core/input.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/core/input.ex index 7126b8fbf458..39f188260ff7 100644 --- a/deps/rabbitmq_cli/lib/rabbitmq/cli/core/input.ex +++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/core/input.ex @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule RabbitMQ.CLI.Core.Input do alias RabbitMQ.CLI.Core.Config diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/core/listeners.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/core/listeners.ex index 0fd8e68f7b76..94cf9f8e3b36 100644 --- a/deps/rabbitmq_cli/lib/rabbitmq/cli/core/listeners.ex +++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/core/listeners.ex @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule RabbitMQ.CLI.Core.Listeners do import Record, only: [defrecord: 3, extract: 2] diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/core/log_files.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/core/log_files.ex index d1d2fbf9d7fb..2649e75d3b27 100644 --- a/deps/rabbitmq_cli/lib/rabbitmq/cli/core/log_files.ex +++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/core/log_files.ex @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2019-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule RabbitMQ.CLI.Core.LogFiles do @spec get_log_locations(atom, integer | :infinity) :: [String.t()] | {:badrpc, term} diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/core/memory.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/core/memory.ex index ed5189816bdc..1ffecfd1d606 100644 --- a/deps/rabbitmq_cli/lib/rabbitmq/cli/core/memory.ex +++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/core/memory.ex @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule RabbitMQ.CLI.Core.Memory do alias RabbitMQ.CLI.InformationUnit, as: IU diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/core/merges_default_virtual_host.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/core/merges_default_virtual_host.ex index b26874b6504d..2432d1686c4d 100644 --- a/deps/rabbitmq_cli/lib/rabbitmq/cli/core/merges_default_virtual_host.ex +++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/core/merges_default_virtual_host.ex @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. # Should be used by commands that require rabbit app to be stopped # but need no other execution environment validators. diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/core/merges_no_defaults.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/core/merges_no_defaults.ex index 37ef11e35d5d..01a46040e000 100644 --- a/deps/rabbitmq_cli/lib/rabbitmq/cli/core/merges_no_defaults.ex +++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/core/merges_no_defaults.ex @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. # Should be used by commands that require rabbit app to be stopped # but need no other execution environment validators. diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/core/networking.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/core/networking.ex index 690f2f7f0717..9554134f4e8e 100644 --- a/deps/rabbitmq_cli/lib/rabbitmq/cli/core/networking.ex +++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/core/networking.ex @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule RabbitMQ.CLI.Core.Networking do @type address_family() :: :inet | :inet6 diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/core/node_name.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/core/node_name.ex index fe2b04c119bb..a9977488161e 100644 --- a/deps/rabbitmq_cli/lib/rabbitmq/cli/core/node_name.ex +++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/core/node_name.ex @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule RabbitMQ.CLI.Core.NodeName do alias RabbitMQ.CLI.Core.Config diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/core/os_pid.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/core/os_pid.ex index 710ec6c9bffd..838775ddba6e 100644 --- a/deps/rabbitmq_cli/lib/rabbitmq/cli/core/os_pid.ex +++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/core/os_pid.ex @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule RabbitMQ.CLI.Core.OsPid do @external_process_check_interval 1000 diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/core/output.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/core/output.ex index cd79d8bdcc2c..067ae1c8b2b1 100644 --- a/deps/rabbitmq_cli/lib/rabbitmq/cli/core/output.ex +++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/core/output.ex @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule RabbitMQ.CLI.Core.Output do def format_output(:ok, _, _) do diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/core/parser.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/core/parser.ex index d734a5c0378f..90496c172521 100644 --- a/deps/rabbitmq_cli/lib/rabbitmq/cli/core/parser.ex +++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/core/parser.ex @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule RabbitMQ.CLI.Core.Parser do alias RabbitMQ.CLI.{CommandBehaviour, FormatterBehaviour} diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/core/paths.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/core/paths.ex index 1bb1d0a4805f..93df49e9fdfd 100644 --- a/deps/rabbitmq_cli/lib/rabbitmq/cli/core/paths.ex +++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/core/paths.ex @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule RabbitMQ.CLI.Core.Paths do alias RabbitMQ.CLI.Core.Config diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/core/platform.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/core/platform.ex index 6aca26a3c6c9..e91aa446d0ef 100644 --- a/deps/rabbitmq_cli/lib/rabbitmq/cli/core/platform.ex +++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/core/platform.ex @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule RabbitMQ.CLI.Core.Platform do def path_separator() do @@ -19,10 +19,6 @@ defmodule RabbitMQ.CLI.Core.Platform do end end - def os_name({:unix, :linux}) do - "Linux" - end - def os_name({:unix, :darwin}) do "macOS" end diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/core/requires_rabbit_app_running.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/core/requires_rabbit_app_running.ex index b1999505cbff..d546f5727b8f 100644 --- a/deps/rabbitmq_cli/lib/rabbitmq/cli/core/requires_rabbit_app_running.ex +++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/core/requires_rabbit_app_running.ex @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. # Should be used by commands that require rabbit app to be running # but need no other execution environment validators. diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/core/requires_rabbit_app_stopped.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/core/requires_rabbit_app_stopped.ex index 5398d0e69723..c0dae8a4c206 100644 --- a/deps/rabbitmq_cli/lib/rabbitmq/cli/core/requires_rabbit_app_stopped.ex +++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/core/requires_rabbit_app_stopped.ex @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. # Should be used by commands that require rabbit app to be stopped # but need no other execution environment validators. diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/core/validators.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/core/validators.ex index a4063d6b34aa..cea4c1ae95bd 100644 --- a/deps/rabbitmq_cli/lib/rabbitmq/cli/core/validators.ex +++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/core/validators.ex @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. # Provides common validation functions. defmodule RabbitMQ.CLI.Core.Validators do diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/core/version.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/core/version.ex index 2f7bc6ffc916..bd9962d357b4 100644 --- a/deps/rabbitmq_cli/lib/rabbitmq/cli/core/version.ex +++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/core/version.ex @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule RabbitMQ.CLI.Core.Version do @default_timeout 30_000 diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/core/virtual_hosts.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/core/virtual_hosts.ex index bc751cd5ec4a..610da981c90d 100644 --- a/deps/rabbitmq_cli/lib/rabbitmq/cli/core/virtual_hosts.ex +++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/core/virtual_hosts.ex @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule RabbitMQ.CLI.Core.VirtualHosts do def parse_tags(tags) do case tags do diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/activate_free_disk_space_monitoring_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/activate_free_disk_space_monitoring_command.ex index 4f689a57a6bd..6333276892a7 100644 --- a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/activate_free_disk_space_monitoring_command.ex +++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/activate_free_disk_space_monitoring_command.ex @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule RabbitMQ.CLI.Ctl.Commands.ActivateFreeDiskSpaceMonitoringCommand do alias RabbitMQ.CLI.Core.DocGuide diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/add_user_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/add_user_command.ex index 94f2f11dacce..44d3a0224c5a 100644 --- a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/add_user_command.ex +++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/add_user_command.ex @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule RabbitMQ.CLI.Ctl.Commands.AddUserCommand do alias RabbitMQ.CLI.Core.{DocGuide, ExitCodes, Helpers, Input} @@ -10,21 +10,37 @@ defmodule RabbitMQ.CLI.Ctl.Commands.AddUserCommand do @behaviour RabbitMQ.CLI.CommandBehaviour - use RabbitMQ.CLI.Core.MergesNoDefaults + def switches(), do: [pre_hashed_password: :boolean] + + def merge_defaults(args, opts) do + {args, Map.merge(%{pre_hashed_password: false}, opts)} + end def validate(args, _) when length(args) < 1, do: {:validation_failure, :not_enough_args} def validate(args, _) when length(args) > 2, do: {:validation_failure, :too_many_args} - def validate([_], _), do: :ok + # Password will be provided via standard input + def validate([_username], _), do: :ok def validate(["", _], _) do {:validation_failure, {:bad_argument, "user cannot be an empty string"}} end + def validate([_, base64_encoded_password_hash], %{pre_hashed_password: true}) do + case Base.decode64(base64_encoded_password_hash) do + {:ok, _password_hash} -> + :ok + + _ -> + {:validation_failure, + {:bad_argument, "Could not Base64 decode provided password hash value"}} + end + end + def validate([_, _], _), do: :ok use RabbitMQ.CLI.Core.RequiresRabbitAppRunning - def run([username], %{node: node_name} = opts) do + def run([username], %{node: node_name, pre_hashed_password: false} = opts) do # note: blank passwords are currently allowed, they make sense # e.g. when a user only authenticates using X.509 certificates. # Credential validators can be used to require passwords of a certain length @@ -43,6 +59,46 @@ defmodule RabbitMQ.CLI.Ctl.Commands.AddUserCommand do end end + def run([username], %{node: node_name, pre_hashed_password: true} = opts) do + case Input.infer_password("Hashed and salted password: ", opts) do + :eof -> + {:error, :not_enough_args} + + base64_encoded_password_hash -> + case Base.decode64(base64_encoded_password_hash) do + {:ok, password_hash} -> + :rabbit_misc.rpc_call( + node_name, + :rabbit_auth_backend_internal, + :add_user_with_pre_hashed_password_sans_validation, + [username, password_hash, Helpers.cli_acting_user()] + ) + + _ -> + {:error, ExitCodes.exit_dataerr(), + "Could not Base64 decode provided password hash value"} + end + end + end + + def run( + [username, base64_encoded_password_hash], + %{node: node_name, pre_hashed_password: true} + ) do + case Base.decode64(base64_encoded_password_hash) do + {:ok, password_hash} -> + :rabbit_misc.rpc_call( + node_name, + :rabbit_auth_backend_internal, + :add_user_with_pre_hashed_password_sans_validation, + [username, password_hash, Helpers.cli_acting_user()] + ) + + _ -> + {:error, ExitCodes.exit_dataerr(), "Could not Base64 decode provided password hash value"} + end + end + def run([username, password], %{node: node_name}) do :rabbit_misc.rpc_call( node_name, @@ -89,21 +145,30 @@ defmodule RabbitMQ.CLI.Ctl.Commands.AddUserCommand do use RabbitMQ.CLI.DefaultOutput - def usage, do: "add_user " + def usage, do: "add_user [] [ --pre-hashed-password]" def usage_additional() do [ ["", "Self-explanatory"], [ "", - "Password this user will authenticate with. Use a blank string to disable password-based authentication." + "Password this user will authenticate with. Use a blank string to disable password-based authentication. Mutually exclusive with " + ], + [ + "", + "A Base64-encoded password hash produced by the 'hash_password' command or a different method as described in the Passwords guide. Must be used in combination with --pre-hashed-password. Mutually exclusive with " + ], + [ + "--pre-hashed-password", + "Use to pass in a password hash instead of a clear text password. Disabled by default" ] ] end def usage_doc_guides() do [ - DocGuide.access_control() + DocGuide.access_control(), + DocGuide.passwords() ] end diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/add_vhost_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/add_vhost_command.ex index 41eaaf8194a7..5e2b627a82de 100644 --- a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/add_vhost_command.ex +++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/add_vhost_command.ex @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule RabbitMQ.CLI.Ctl.Commands.AddVhostCommand do alias RabbitMQ.CLI.Core.{DocGuide, ExitCodes, Helpers, VirtualHosts} diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/authenticate_user_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/authenticate_user_command.ex index df0b557a6dac..5dd416ffe3df 100644 --- a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/authenticate_user_command.ex +++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/authenticate_user_command.ex @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule RabbitMQ.CLI.Ctl.Commands.AuthenticateUserCommand do alias RabbitMQ.CLI.Core.{DocGuide, ExitCodes, Input} diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/autocomplete_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/autocomplete_command.ex index 6bf553c5e222..075d1a435f34 100644 --- a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/autocomplete_command.ex +++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/autocomplete_command.ex @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2016-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule RabbitMQ.CLI.Ctl.Commands.AutocompleteCommand do @behaviour RabbitMQ.CLI.CommandBehaviour diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/await_online_nodes_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/await_online_nodes_command.ex index 83305ba704eb..f2bf9e946807 100644 --- a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/await_online_nodes_command.ex +++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/await_online_nodes_command.ex @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2016-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule RabbitMQ.CLI.Ctl.Commands.AwaitOnlineNodesCommand do @behaviour RabbitMQ.CLI.CommandBehaviour diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/await_startup_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/await_startup_command.ex index 308bcb62466d..a5d6dfa48334 100644 --- a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/await_startup_command.ex +++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/await_startup_command.ex @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule RabbitMQ.CLI.Ctl.Commands.AwaitStartupCommand do @moduledoc """ diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/cancel_sync_queue_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/cancel_sync_queue_command.ex deleted file mode 100644 index e8a888707843..000000000000 --- a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/cancel_sync_queue_command.ex +++ /dev/null @@ -1,52 +0,0 @@ -## This Source Code Form is subject to the terms of the Mozilla Public -## License, v. 2.0. If a copy of the MPL was not distributed with this -## file, You can obtain one at https://mozilla.org/MPL/2.0/. -## -## Copyright (c) 2016-2023 VMware, Inc. or its affiliates. All rights reserved. - -defmodule RabbitMQ.CLI.Ctl.Commands.CancelSyncQueueCommand do - alias RabbitMQ.CLI.Core.DocGuide - - @behaviour RabbitMQ.CLI.CommandBehaviour - use RabbitMQ.CLI.DefaultOutput - - def merge_defaults(args, opts) do - {args, Map.merge(%{vhost: "/"}, opts)} - end - - use RabbitMQ.CLI.Core.AcceptsOnePositionalArgument - - use RabbitMQ.CLI.Core.RequiresRabbitAppRunning - - def run([queue], %{vhost: vhost, node: node_name}) do - :rpc.call( - node_name, - :rabbit_mirror_queue_misc, - :cancel_sync_queue, - [:rabbit_misc.r(vhost, :queue, queue)], - :infinity - ) - end - - def usage, do: "cancel_sync_queue [--vhost ] " - - def usage_additional() do - [ - ["", "Queue name"] - ] - end - - def usage_doc_guides() do - [ - DocGuide.mirroring() - ] - end - - def help_section(), do: :replication - - def description(), do: "Instructs a synchronising mirrored queue to stop synchronising itself" - - def banner([queue], %{vhost: vhost, node: _node}) do - "Stopping synchronising queue '#{queue}' in vhost '#{vhost}' ..." - end -end diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/change_cluster_node_type_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/change_cluster_node_type_command.ex index 0f385e4a3120..b01a09fc87ba 100644 --- a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/change_cluster_node_type_command.ex +++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/change_cluster_node_type_command.ex @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2016-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule RabbitMQ.CLI.Ctl.Commands.ChangeClusterNodeTypeCommand do alias RabbitMQ.CLI.Core.DocGuide diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/change_password_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/change_password_command.ex index 03e21816951d..e0b63db30158 100644 --- a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/change_password_command.ex +++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/change_password_command.ex @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule RabbitMQ.CLI.Ctl.Commands.ChangePasswordCommand do alias RabbitMQ.CLI.Core.{DocGuide, ExitCodes, Helpers, Input} diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/clear_global_parameter_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/clear_global_parameter_command.ex index 75c0c54b2d85..c2177244d396 100644 --- a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/clear_global_parameter_command.ex +++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/clear_global_parameter_command.ex @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule RabbitMQ.CLI.Ctl.Commands.ClearGlobalParameterCommand do alias RabbitMQ.CLI.Core.{DocGuide, Helpers} diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/clear_operator_policy_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/clear_operator_policy_command.ex index e15fcad86c22..adc805472025 100644 --- a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/clear_operator_policy_command.ex +++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/clear_operator_policy_command.ex @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule RabbitMQ.CLI.Ctl.Commands.ClearOperatorPolicyCommand do alias RabbitMQ.CLI.Core.{DocGuide, Helpers} diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/clear_parameter_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/clear_parameter_command.ex index ef0620306fae..d12aeebecd2b 100644 --- a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/clear_parameter_command.ex +++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/clear_parameter_command.ex @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule RabbitMQ.CLI.Ctl.Commands.ClearParameterCommand do alias RabbitMQ.CLI.Core.{DocGuide, Helpers} diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/clear_password_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/clear_password_command.ex index 7f91844d1fb5..7d80635b0cb2 100644 --- a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/clear_password_command.ex +++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/clear_password_command.ex @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule RabbitMQ.CLI.Ctl.Commands.ClearPasswordCommand do alias RabbitMQ.CLI.Core.{DocGuide, Helpers} diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/clear_permissions_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/clear_permissions_command.ex index 28671aafdcdd..19e7c455c759 100644 --- a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/clear_permissions_command.ex +++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/clear_permissions_command.ex @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule RabbitMQ.CLI.Ctl.Commands.ClearPermissionsCommand do alias RabbitMQ.CLI.Core.{DocGuide, ExitCodes, Helpers} diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/clear_policy_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/clear_policy_command.ex index f7114e4e570d..9118a58081c5 100644 --- a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/clear_policy_command.ex +++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/clear_policy_command.ex @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule RabbitMQ.CLI.Ctl.Commands.ClearPolicyCommand do alias RabbitMQ.CLI.Core.{Helpers, DocGuide} diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/clear_topic_permissions_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/clear_topic_permissions_command.ex index e83e2cde8b8b..8227266ff96a 100644 --- a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/clear_topic_permissions_command.ex +++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/clear_topic_permissions_command.ex @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule RabbitMQ.CLI.Ctl.Commands.ClearTopicPermissionsCommand do alias RabbitMQ.CLI.Core.{DocGuide, ExitCodes, Helpers} diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/clear_user_limits_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/clear_user_limits_command.ex index 3264b03ce60b..a303069f2571 100644 --- a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/clear_user_limits_command.ex +++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/clear_user_limits_command.ex @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2020-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule RabbitMQ.CLI.Ctl.Commands.ClearUserLimitsCommand do alias RabbitMQ.CLI.Core.{DocGuide, Helpers} diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/clear_vhost_limits_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/clear_vhost_limits_command.ex index e6ab0102e64c..24e90854d8b9 100644 --- a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/clear_vhost_limits_command.ex +++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/clear_vhost_limits_command.ex @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule RabbitMQ.CLI.Ctl.Commands.ClearVhostLimitsCommand do alias RabbitMQ.CLI.Core.{DocGuide, Helpers} diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/close_all_connections_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/close_all_connections_command.ex index 01170350180b..76b2465f4a7d 100644 --- a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/close_all_connections_command.ex +++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/close_all_connections_command.ex @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule RabbitMQ.CLI.Ctl.Commands.CloseAllConnectionsCommand do alias RabbitMQ.CLI.Core.DocGuide diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/close_all_user_connections_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/close_all_user_connections_command.ex index 8a673a29ff3b..e593f3a18e72 100644 --- a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/close_all_user_connections_command.ex +++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/close_all_user_connections_command.ex @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule RabbitMQ.CLI.Ctl.Commands.CloseAllUserConnectionsCommand do alias RabbitMQ.CLI.Core.DocGuide diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/close_connection_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/close_connection_command.ex index 0f34f68edd69..6f8e4fede649 100644 --- a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/close_connection_command.ex +++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/close_connection_command.ex @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule RabbitMQ.CLI.Ctl.Commands.CloseConnectionCommand do alias RabbitMQ.CLI.Core.DocGuide diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/cluster_status_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/cluster_status_command.ex index b683d0bcc9c5..bc5ca76ca0bc 100644 --- a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/cluster_status_command.ex +++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/cluster_status_command.ex @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule RabbitMQ.CLI.Ctl.Commands.ClusterStatusCommand do alias RabbitMQ.CLI.Core.DocGuide @@ -32,7 +32,7 @@ defmodule RabbitMQ.CLI.Ctl.Commands.ClusterStatusCommand do use RabbitMQ.CLI.Core.AcceptsNoPositionalArguments use RabbitMQ.CLI.Core.RequiresRabbitAppRunning - def run([], %{node: node_name, timeout: timeout}) do + def run([], %{node: node_name, timeout: timeout} = opts) do status = case :rabbit_misc.rpc_call(node_name, :rabbit_db_cluster, :cli_cluster_status, []) do {:badrpc, {:EXIT, {:undef, _}}} -> @@ -72,7 +72,7 @@ defmodule RabbitMQ.CLI.Ctl.Commands.ClusterStatusCommand do maintenance_status_by_node = Enum.map( nodes, - fn n -> maintenance_status_by_node(n, per_node_timeout(timeout, count)) end + fn n -> maintenance_status_by_node(n, per_node_timeout(timeout, count), opts) end ) cpu_cores_by_node = @@ -285,23 +285,21 @@ defmodule RabbitMQ.CLI.Ctl.Commands.ClusterStatusCommand do end defp listeners_of(node, timeout) do - # This may seem inefficient since this call returns all known listeners - # in the cluster, so why do we run it on every node? See the badrpc clause, - # some nodes may be inavailable or partitioned from other nodes. This way we - # gather as complete a picture as possible. MK. + node = to_atom(node) + listeners = case :rabbit_misc.rpc_call( - to_atom(node), + node, :rabbit_networking, - :active_listeners, - [], + :node_listeners, + [node], timeout ) do {:badrpc, _} -> [] xs -> xs end - {node, listeners_on(listeners, node)} + {node, listeners} end defp versions_by_node(node, timeout) do @@ -342,26 +340,26 @@ defmodule RabbitMQ.CLI.Ctl.Commands.ClusterStatusCommand do }} end - defp maintenance_status_by_node(node, timeout) do + defp maintenance_status_by_node(node, timeout, opts) do target = to_atom(node) + formatter = Map.get(opts, :formatter) + + rpc_result = + :rabbit_misc.rpc_call(target, :rabbit_maintenance, :status_local_read, [target], timeout) result = - case :rabbit_misc.rpc_call( - target, - :rabbit_maintenance, - :status_local_read, - [target], - timeout - ) do - {:badrpc, _} -> "unknown" - :regular -> "not under maintenance" - :draining -> magenta("marked for maintenance") + case {rpc_result, formatter} do + {{:badrpc, _}, _} -> "unknown" + {:regular, _} -> "not under maintenance" + {:draining, "json"} -> "marked for maintenance" + {:draining, _} -> magenta("marked for maintenance") # forward compatibility: should we figure out a way to know when # draining completes (it involves inherently asynchronous cluster # operations such as quorum queue leader re-election), we'd introduce # a new state - :drained -> magenta("marked for maintenance") - value -> to_string(value) + {:drained, "json"} -> "marked for maintenance" + {:drained, _} -> magenta("marked for maintenance") + {value, _} -> to_string(value) end {node, result} diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/deactivate_free_disk_space_monitoring_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/deactivate_free_disk_space_monitoring_command.ex index ad07c8620552..ca3e2c267236 100644 --- a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/deactivate_free_disk_space_monitoring_command.ex +++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/deactivate_free_disk_space_monitoring_command.ex @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule RabbitMQ.CLI.Ctl.Commands.DeactivateFreeDiskSpaceMonitoringCommand do alias RabbitMQ.CLI.Core.DocGuide diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/decode_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/decode_command.ex index 4fca52211a43..da124ae55564 100644 --- a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/decode_command.ex +++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/decode_command.ex @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. alias RabbitMQ.CLI.Core.Helpers @@ -86,6 +86,7 @@ defmodule RabbitMQ.CLI.Ctl.Commands.DecodeCommand do {:ok, result} catch _, _ -> + IO.inspect(__STACKTRACE__) {:error, "Failed to decrypt the value. Things to check: is the passphrase correct? Are the cipher and hash algorithms the same as those used for encryption?"} end @@ -109,6 +110,7 @@ defmodule RabbitMQ.CLI.Ctl.Commands.DecodeCommand do {:ok, result} catch _, _ -> + IO.inspect(__STACKTRACE__) {:error, "Failed to decrypt the value. Things to check: is the passphrase correct? Are the cipher and hash algorithms the same as those used for encryption?"} end @@ -117,7 +119,7 @@ defmodule RabbitMQ.CLI.Ctl.Commands.DecodeCommand do def formatter(), do: RabbitMQ.CLI.Formatters.Erlang def banner(_, _) do - "Decrypting value..." + "Decrypting an advanced.config (Erlang term) value..." end def usage, @@ -125,7 +127,7 @@ defmodule RabbitMQ.CLI.Ctl.Commands.DecodeCommand do def usage_additional() do [ - ["", "config value to decode"], + ["", "advanced.config (Erlang term) value to decode"], ["", "passphrase to use with the config value encryption key"], ["--cipher ", "cipher suite to use"], ["--hash ", "hashing function to use"], @@ -141,7 +143,7 @@ defmodule RabbitMQ.CLI.Ctl.Commands.DecodeCommand do def help_section(), do: :configuration - def description(), do: "Decrypts an encrypted configuration value" + def description(), do: "Decrypts an encrypted advanced.config value" # # Implementation diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/decrypt_conf_value_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/decrypt_conf_value_command.ex new file mode 100644 index 000000000000..6ac5958a96a1 --- /dev/null +++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/decrypt_conf_value_command.ex @@ -0,0 +1,172 @@ +## This Source Code Form is subject to the terms of the Mozilla Public +## License, v. 2.0. If a copy of the MPL was not distributed with this +## file, You can obtain one at https://mozilla.org/MPL/2.0/. +## +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. + +alias RabbitMQ.CLI.Core.Helpers + +defmodule RabbitMQ.CLI.Ctl.Commands.DecryptConfValueCommand do + alias RabbitMQ.CLI.Core.{DocGuide, Input} + + @behaviour RabbitMQ.CLI.CommandBehaviour + use RabbitMQ.CLI.DefaultOutput + + def switches() do + [ + cipher: :string, + hash: :string, + iterations: :integer + ] + end + + @atomized_keys [:cipher, :hash] + @prefix "encrypted:" + + def distribution(_), do: :none + + def merge_defaults(args, opts) do + with_defaults = + Map.merge( + %{ + cipher: :rabbit_pbe.default_cipher(), + hash: :rabbit_pbe.default_hash(), + iterations: :rabbit_pbe.default_iterations() + }, + opts + ) + + {args, Helpers.atomize_values(with_defaults, @atomized_keys)} + end + + def validate(args, _) when length(args) < 1 do + {:validation_failure, {:not_enough_args, "Please provide a value to decode and a passphrase"}} + end + + def validate(args, _) when length(args) > 2 do + {:validation_failure, :too_many_args} + end + + def validate(_args, opts) do + case {supports_cipher(opts.cipher), supports_hash(opts.hash), opts.iterations > 0} do + {false, _, _} -> + {:validation_failure, {:bad_argument, "The requested cipher is not supported"}} + + {_, false, _} -> + {:validation_failure, {:bad_argument, "The requested hash is not supported"}} + + {_, _, false} -> + {:validation_failure, + {:bad_argument, + "The requested number of iterations is incorrect (must be a positive integer)"}} + + {true, true, true} -> + :ok + end + end + + def run([value], %{cipher: cipher, hash: hash, iterations: iterations} = opts) do + case Input.consume_single_line_string_with_prompt("Passphrase: ", opts) do + :eof -> + {:error, :not_enough_args} + + passphrase -> + try do + term_value = Helpers.evaluate_input_as_term(value) + + term_to_decrypt = + case term_value do + prefixed_val when is_bitstring(prefixed_val) or is_list(prefixed_val) -> + tag_input_value_with_encrypted(prefixed_val) + + {:encrypted, _} = encrypted -> + encrypted + + _ -> + {:encrypted, term_value} + end + + result = :rabbit_pbe.decrypt_term(cipher, hash, iterations, passphrase, term_to_decrypt) + {:ok, result} + catch + _, _ -> + IO.inspect(__STACKTRACE__) + {:error, + "Failed to decrypt the value. Things to check: is the passphrase correct? Are the cipher and hash algorithms the same as those used for encryption?"} + end + end + end + + def run([value, passphrase], %{cipher: cipher, hash: hash, iterations: iterations}) do + try do + term_value = Helpers.evaluate_input_as_term(value) + + term_to_decrypt = + case term_value do + prefixed_val when is_bitstring(prefixed_val) or is_list(prefixed_val) -> + tag_input_value_with_encrypted(prefixed_val) + + {:encrypted, _} = encrypted -> + encrypted + + _ -> + {:encrypted, term_value} + end + + result = :rabbit_pbe.decrypt_term(cipher, hash, iterations, passphrase, term_to_decrypt) + {:ok, result} + catch + _, _ -> + IO.inspect(__STACKTRACE__) + {:error, + "Failed to decrypt the value. Things to check: is the passphrase correct? Are the cipher and hash algorithms the same as those used for encryption?"} + end + end + + def formatter(), do: RabbitMQ.CLI.Formatters.Erlang + + def banner(_, _) do + "Decrypting a rabbitmq.conf string value..." + end + + def usage, + do: "decrypt_conf_value value passphrase [--cipher ] [--hash ] [--iterations ]" + + def usage_additional() do + [ + ["", "a double-quoted rabbitmq.conf string value to decode"], + ["", "passphrase to use with the config value encryption key"], + ["--cipher ", "cipher suite to use"], + ["--hash ", "hashing function to use"], + ["--iterations ", "number of iteration to apply"] + ] + end + + def usage_doc_guides() do + [ + DocGuide.configuration() + ] + end + + def help_section(), do: :configuration + + def description(), do: "Decrypts an encrypted configuration value" + + # + # Implementation + # + + defp supports_cipher(cipher), do: Enum.member?(:rabbit_pbe.supported_ciphers(), cipher) + + defp supports_hash(hash), do: Enum.member?(:rabbit_pbe.supported_hashes(), hash) + + defp tag_input_value_with_encrypted(value) when is_bitstring(value) or is_list(value) do + bin_val = :rabbit_data_coercion.to_binary(value) + untagged_val = String.replace_prefix(bin_val, @prefix, "") + + {:encrypted, untagged_val} + end + defp tag_input_value_with_encrypted(value) do + {:encrypted, value} + end +end diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/delete_queue_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/delete_queue_command.ex index 3aabdbf760fb..2877f79e12d0 100644 --- a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/delete_queue_command.ex +++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/delete_queue_command.ex @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule RabbitMQ.CLI.Ctl.Commands.DeleteQueueCommand do alias RabbitMQ.CLI.Core.DocGuide @@ -103,10 +103,11 @@ defmodule RabbitMQ.CLI.Ctl.Commands.DeleteQueueCommand do Enum.join(Enum.concat([if_empty_str, if_unused_str]), "and ") <> "..." end - def usage(), do: "delete_queue [--if-empty|-e] [--if-unused|-u]" + def usage(), do: "delete_queue [--vhost ] [--if-empty|-e] [--if-unused|-u]" def usage_additional() do [ + ["--vhost", "Virtual host name"], ["", "name of the queue to delete"], ["--if-empty", "delete the queue if it is empty (has no messages ready for delivery)"], ["--if-unused", "delete the queue only if it has no consumers"] diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/delete_user_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/delete_user_command.ex index 940c380b4834..3b24451bf918 100644 --- a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/delete_user_command.ex +++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/delete_user_command.ex @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule RabbitMQ.CLI.Ctl.Commands.DeleteUserCommand do alias RabbitMQ.CLI.Core.{DocGuide, ExitCodes, Helpers} diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/delete_vhost_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/delete_vhost_command.ex index faa33aef095d..02f741b62d0c 100644 --- a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/delete_vhost_command.ex +++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/delete_vhost_command.ex @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule RabbitMQ.CLI.Ctl.Commands.DeleteVhostCommand do alias RabbitMQ.CLI.Core.{DocGuide, Helpers} diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/enable_feature_flag_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/enable_feature_flag_command.ex index 0530f4f96fec..b94074056070 100644 --- a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/enable_feature_flag_command.ex +++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/enable_feature_flag_command.ex @@ -2,59 +2,73 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2018-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule RabbitMQ.CLI.Ctl.Commands.EnableFeatureFlagCommand do @behaviour RabbitMQ.CLI.CommandBehaviour - def merge_defaults(args, opts), do: {args, opts} + def switches(), do: [experimental: :boolean] + def aliases(), do: [e: :experimental] - def validate([], _), do: {:validation_failure, :not_enough_args} - def validate([_ | _] = args, _) when length(args) > 1, do: {:validation_failure, :too_many_args} + def merge_defaults(args, opts), do: { args, Map.merge(%{experimental: false}, opts) } - def validate([""], _), + def validate([], _opts), do: {:validation_failure, :not_enough_args} + def validate([_ | _] = args, _opts) when length(args) > 1, do: {:validation_failure, :too_many_args} + + def validate([""], _opts), do: {:validation_failure, {:bad_argument, "feature_flag cannot be an empty string."}} - def validate([_], _), do: :ok + def validate([_], _opts), do: :ok use RabbitMQ.CLI.Core.RequiresRabbitAppRunning - def run(["all"], %{node: node_name}) do - case :rabbit_misc.rpc_call(node_name, :rabbit_feature_flags, :enable_all, []) do - # Server does not support feature flags, consider none are available. - # See rabbitmq/rabbitmq-cli#344 for context. MK. - {:badrpc, {:EXIT, {:undef, _}}} -> {:error, :unsupported} - {:badrpc, _} = err -> err - other -> other + def run(["all"], %{node: node_name, experimental: experimental}) do + case experimental do + true -> + {:error, RabbitMQ.CLI.Core.ExitCodes.exit_usage(), "`--experimental` flag is not allowed when enabling all feature flags.\nUse --experimental with a specific feature flag if you want to enable an experimental feature."} + false -> + case :rabbit_misc.rpc_call(node_name, :rabbit_feature_flags, :enable_all, []) do + {:badrpc, _} = err -> err + other -> other + end end end - def run([feature_flag], %{node: node_name}) do - case :rabbit_misc.rpc_call(node_name, :rabbit_feature_flags, :enable, [ - String.to_atom(feature_flag) - ]) do - # Server does not support feature flags, consider none are available. - # See rabbitmq/rabbitmq-cli#344 for context. MK. - {:badrpc, {:EXIT, {:undef, _}}} -> {:error, :unsupported} - {:badrpc, _} = err -> err - other -> other + def run([feature_flag], %{node: node_name, experimental: experimental}) do + case {experimental, :rabbit_misc.rpc_call(node_name, :rabbit_feature_flags, :get_stability, [ + String.to_atom(feature_flag) + ])} do + {_, {:badrpc, _} = err} -> err + {false, :experimental} -> + {:error, RabbitMQ.CLI.Core.ExitCodes.exit_usage(), "Feature flag #{feature_flag} is experimental. If you understand the risk, use --experimental to enable it."} + _ -> + case :rabbit_misc.rpc_call(node_name, :rabbit_feature_flags, :enable, [ + String.to_atom(feature_flag) + ]) do + {:badrpc, _} = err -> err + other -> other + end end end def output({:error, :unsupported}, %{node: node_name}) do {:error, RabbitMQ.CLI.Core.ExitCodes.exit_usage(), - "This feature flag is not supported by node #{node_name}"} + "This feature flag is not supported by node #{node_name}"} end use RabbitMQ.CLI.DefaultOutput - def usage, do: "enable_feature_flag " + def usage, do: "enable_feature_flag [--experimental] " def usage_additional() do [ [ "", "name of the feature flag to enable, or \"all\" to enable all supported flags" + ], + [ + "--experimental", + "required to enable experimental feature flags (make sure you understand the risks!)" ] ] end diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/encode_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/encode_command.ex index 885d6fda5554..8eb43e688c91 100644 --- a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/encode_command.ex +++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/encode_command.ex @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule RabbitMQ.CLI.Ctl.Commands.EncodeCommand do alias RabbitMQ.CLI.Core.{DocGuide, Helpers, Input} @@ -77,6 +77,7 @@ defmodule RabbitMQ.CLI.Ctl.Commands.EncodeCommand do {:ok, result} catch _, _ -> + IO.inspect(__STACKTRACE__) {:error, "Error during cipher operation"} end end @@ -99,6 +100,7 @@ defmodule RabbitMQ.CLI.Ctl.Commands.EncodeCommand do {:ok, result} catch _, _ -> + IO.inspect(__STACKTRACE__) {:error, "Error during cipher operation"} end end @@ -115,6 +117,7 @@ defmodule RabbitMQ.CLI.Ctl.Commands.EncodeCommand do {:ok, result} catch _, _ -> + IO.inspect(__STACKTRACE__) {:error, "Error during cipher operation"} end end @@ -122,7 +125,7 @@ defmodule RabbitMQ.CLI.Ctl.Commands.EncodeCommand do def formatter(), do: RabbitMQ.CLI.Formatters.Erlang def banner(_, _) do - "Encrypting value ..." + "Encrypting value to be used in advanced.config..." end def usage, @@ -130,7 +133,7 @@ defmodule RabbitMQ.CLI.Ctl.Commands.EncodeCommand do def usage_additional() do [ - ["", "config value to encode"], + ["", "value to encode, to be used in advanced.config"], ["", "passphrase to use with the config value encryption key"], ["--cipher ", "cipher suite to use"], ["--hash ", "hashing function to use"], @@ -146,7 +149,7 @@ defmodule RabbitMQ.CLI.Ctl.Commands.EncodeCommand do def help_section(), do: :configuration - def description(), do: "Encrypts a sensitive configuration value" + def description(), do: "Encrypts a sensitive configuration value to be used in the advanced.config file" # # Implementation diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/encrypt_conf_value_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/encrypt_conf_value_command.ex new file mode 100644 index 000000000000..914ad7debeb2 --- /dev/null +++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/encrypt_conf_value_command.ex @@ -0,0 +1,157 @@ +## This Source Code Form is subject to the terms of the Mozilla Public +## License, v. 2.0. If a copy of the MPL was not distributed with this +## file, You can obtain one at https://mozilla.org/MPL/2.0/. +## +## Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. + +defmodule RabbitMQ.CLI.Ctl.Commands.EncryptConfValueCommand do + alias RabbitMQ.CLI.Core.{DocGuide, Helpers, Input} + + @behaviour RabbitMQ.CLI.CommandBehaviour + use RabbitMQ.CLI.DefaultOutput + + def switches() do + [ + cipher: :string, + hash: :string, + iterations: :integer + ] + end + + @atomized_keys [:cipher, :hash] + + def distribution(_), do: :none + + def merge_defaults(args, opts) do + with_defaults = + Map.merge( + %{ + cipher: :rabbit_pbe.default_cipher(), + hash: :rabbit_pbe.default_hash(), + iterations: :rabbit_pbe.default_iterations() + }, + opts + ) + + {args, Helpers.atomize_values(with_defaults, @atomized_keys)} + end + + def validate(args, _) when length(args) > 2 do + {:validation_failure, :too_many_args} + end + + def validate(_args, opts) do + case {supports_cipher(opts.cipher), supports_hash(opts.hash), opts.iterations > 0} do + {false, _, _} -> + {:validation_failure, {:bad_argument, "The requested cipher is not supported."}} + + {_, false, _} -> + {:validation_failure, {:bad_argument, "The requested hash is not supported"}} + + {_, _, false} -> + {:validation_failure, {:bad_argument, "The requested number of iterations is incorrect"}} + + {true, true, true} -> + :ok + end + end + + def run([], %{cipher: cipher, hash: hash, iterations: iterations} = opts) do + case Input.consume_single_line_string_with_prompt("Value to encode: ", opts) do + :eof -> + {:error, :not_enough_args} + + value -> + case Input.consume_single_line_string_with_prompt("Passphrase: ", opts) do + :eof -> + {:error, :not_enough_args} + + passphrase -> + try do + term_value = Helpers.evaluate_input_as_term(value) + + {:encrypted, result} = + :rabbit_pbe.encrypt_term(cipher, hash, iterations, passphrase, term_value) + + {:ok, result} + catch + _, _ -> + {:error, "Error during cipher operation"} + end + end + end + end + + def run([value], %{cipher: cipher, hash: hash, iterations: iterations} = opts) do + case Input.consume_single_line_string_with_prompt("Passphrase: ", opts) do + :eof -> + {:error, :not_enough_args} + + passphrase -> + try do + term_value = Helpers.evaluate_input_as_term(value) + + {:encrypted, result} = + :rabbit_pbe.encrypt_term(cipher, hash, iterations, passphrase, term_value) + + {:ok, result} + catch + _, _ -> + IO.inspect(__STACKTRACE__) + {:error, "Error during cipher operation"} + end + end + end + + def run([value, passphrase], %{cipher: cipher, hash: hash, iterations: iterations}) do + try do + term_value = Helpers.evaluate_input_as_term(value) + + {:encrypted, result} = + :rabbit_pbe.encrypt_term(cipher, hash, iterations, passphrase, term_value) + + {:ok, result} + catch + _, _ -> + IO.inspect(__STACKTRACE__) + {:error, "Error during cipher operation"} + end + end + + def formatter(), do: RabbitMQ.CLI.Formatters.EncryptedConfValue + + def banner(_, _) do + "Encrypting value to be used in rabbitmq.conf..." + end + + def usage, + do: "encrypt_conf_value value passphrase [--cipher ] [--hash ] [--iterations ]" + + def usage_additional() do + [ + ["", "config value to encode"], + ["", "passphrase to use with the config value encryption key"], + ["--cipher ", "cipher suite to use"], + ["--hash ", "hashing function to use"], + ["--iterations ", "number of iteration to apply"] + ] + end + + def usage_doc_guides() do + [ + DocGuide.configuration() + ] + end + + def help_section(), do: :configuration + + def description(), do: "Encrypts a sensitive configuration value to be used in the advanced.config file" + + # + # Implementation + # + + defp supports_cipher(cipher), do: Enum.member?(:rabbit_pbe.supported_ciphers(), cipher) + + defp supports_hash(hash), do: Enum.member?(:rabbit_pbe.supported_hashes(), hash) +end diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/environment_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/environment_command.ex index 5e763403c9d9..634672cddfb5 100644 --- a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/environment_command.ex +++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/environment_command.ex @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule RabbitMQ.CLI.Ctl.Commands.EnvironmentCommand do alias RabbitMQ.CLI.Core.DocGuide diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/eval_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/eval_command.ex index 33782726e7cc..e1c2acacaf02 100644 --- a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/eval_command.ex +++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/eval_command.ex @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule RabbitMQ.CLI.Ctl.Commands.EvalCommand do alias RabbitMQ.CLI.Core.{DocGuide, ErlEval, ExitCodes, Input} diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/eval_file_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/eval_file_command.ex index d85c8bfef6ed..0f98c5769436 100644 --- a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/eval_file_command.ex +++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/eval_file_command.ex @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule RabbitMQ.CLI.Ctl.Commands.EvalFileCommand do alias RabbitMQ.CLI.Core.{DocGuide, ErlEval, ExitCodes} diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/exec_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/exec_command.ex index 8184797a35c6..4c0b670df0da 100644 --- a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/exec_command.ex +++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/exec_command.ex @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule RabbitMQ.CLI.Ctl.Commands.ExecCommand do alias RabbitMQ.CLI.Core.DocGuide diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/export_definitions_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/export_definitions_command.ex index c009c79d187e..cc21e2a0a46d 100644 --- a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/export_definitions_command.ex +++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/export_definitions_command.ex @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule RabbitMQ.CLI.Ctl.Commands.ExportDefinitionsCommand do alias RabbitMQ.CLI.Core.{DocGuide, ExitCodes, Helpers} @@ -149,17 +149,9 @@ defmodule RabbitMQ.CLI.Ctl.Commands.ExportDefinitionsCommand do # defp serialise(raw_map, "json") do - # make sure all runtime parameter values are maps, otherwise - # they will end up being a list of pairs (a keyword list/proplist) - # in the resulting JSON document - map = - Map.update!(raw_map, :parameters, fn params -> - Enum.map(params, fn param -> - Map.update!(param, "value", &:rabbit_data_coercion.to_map/1) - end) - end) - - {:ok, json} = JSON.encode(map) + # rabbit_definitions already takes care of transforming all + # proplists into maps + {:ok, json} = JSON.encode(raw_map) json end diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/force_boot_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/force_boot_command.ex index 00c4101d6a85..680b196df2dd 100644 --- a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/force_boot_command.ex +++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/force_boot_command.ex @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule RabbitMQ.CLI.Ctl.Commands.ForceBootCommand do alias RabbitMQ.CLI.Core.{Config, DocGuide} @@ -43,6 +43,10 @@ defmodule RabbitMQ.CLI.Ctl.Commands.ForceBootCommand do File.write(Path.join(dir, "force_load"), "") end + {:error, :not_supported} -> + {:error, RabbitMQ.CLI.Core.ExitCodes.exit_usage(), + "This command is not supported by node #{node_name}"} + _ -> :ok end diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/force_gc_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/force_gc_command.ex index 5c3948a361f5..fa871b498fdb 100644 --- a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/force_gc_command.ex +++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/force_gc_command.ex @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule RabbitMQ.CLI.Ctl.Commands.ForceGcCommand do alias RabbitMQ.CLI.Core.DocGuide diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/force_reset_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/force_reset_command.ex index 97bb78e6ec46..83d8ab48e323 100644 --- a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/force_reset_command.ex +++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/force_reset_command.ex @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule RabbitMQ.CLI.Ctl.Commands.ForceResetCommand do alias RabbitMQ.CLI.Core.{DocGuide, ExitCodes} diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/force_standalone_khepri_boot.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/force_standalone_khepri_boot.ex new file mode 100644 index 000000000000..d0f648622348 --- /dev/null +++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/force_standalone_khepri_boot.ex @@ -0,0 +1,45 @@ +## This Source Code Form is subject to the terms of the Mozilla Public +## License, v. 2.0. If a copy of the MPL was not distributed with this +## file, You can obtain one at https://mozilla.org/MPL/2.0/. +## +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. + +defmodule RabbitMQ.CLI.Ctl.Commands.ForceStandaloneKhepriBootCommand do + alias RabbitMQ.CLI.Core.DocGuide + + @behaviour RabbitMQ.CLI.CommandBehaviour + + use RabbitMQ.CLI.Core.MergesNoDefaults + use RabbitMQ.CLI.Core.AcceptsNoPositionalArguments + + def run([], %{node: node_name}) do + ret = + :rabbit_misc.rpc_call(node_name, :rabbit_khepri, :force_shrink_member_to_current_member, []) + + case ret do + {:badrpc, {:EXIT, {:undef, _}}} -> + {:error, RabbitMQ.CLI.Core.ExitCodes.exit_usage(), + "This command is not supported by node #{node_name}"} + + _ -> + ret + end + end + + use RabbitMQ.CLI.DefaultOutput + + def usage, do: "force_standalone_khepri_boot" + + def usage_doc_guides() do + [ + DocGuide.clustering() + ] + end + + def help_section(), do: :cluster_management + + def description(), + do: "Forces node to start as a standalone node" + + def banner(_, _), do: nil +end diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/forget_cluster_node_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/forget_cluster_node_command.ex index 0cee971813aa..edb7d37f89fa 100644 --- a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/forget_cluster_node_command.ex +++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/forget_cluster_node_command.ex @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2016-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule RabbitMQ.CLI.Ctl.Commands.ForgetClusterNodeCommand do alias RabbitMQ.CLI.Core.{DocGuide, Distribution, Validators} @@ -72,7 +72,7 @@ defmodule RabbitMQ.CLI.Ctl.Commands.ForgetClusterNodeCommand do {:error, "RabbitMQ on node #{node_to_remove} must be stopped with 'rabbitmqctl -n #{node_to_remove} stop_app' before it can be removed"} - {:error, {:failed_to_remove_node, ^atom_name, unavailable}} -> + {:error, {:failed_to_remove_node, ^atom_name, :unavailable}} -> {:error, "Node #{node_to_remove} must be running before it can be removed"} {:error, _} = error -> @@ -82,13 +82,60 @@ defmodule RabbitMQ.CLI.Ctl.Commands.ForgetClusterNodeCommand do error :ok -> - case :rabbit_misc.rpc_call(node_name, :rabbit_quorum_queue, :shrink_all, [atom_name]) do - {:error, _} -> - {:error, - "RabbitMQ failed to shrink some of the quorum queues on node #{node_to_remove}"} + qq_shrink_result = + :rabbit_misc.rpc_call(node_name, :rabbit_quorum_queue, :shrink_all, [atom_name]) - _ -> + stream_shrink_result = + case :rabbit_misc.rpc_call(node_name, :rabbit_stream_queue, :delete_all_replicas, [ + atom_name + ]) do + ## For backwards compatibility + {:badrpc, {:EXIT, {:undef, [{:rabbit_stream_queue, :delete_all_replicas, _, _}]}}} -> + [] + + any -> + any + end + + stream_coord_result = + :rabbit_misc.rpc_call(node_name, :rabbit_stream_coordinator, :forget_node, [atom_name]) + + is_error_fun = fn + {_, {:ok, _}} -> + false + + {_, :ok} -> + false + + {_, {:error, _, _}} -> + true + + {_, {:error, _}} -> + true + end + + has_qq_error = + not Enum.empty?(qq_shrink_result) and Enum.any?(qq_shrink_result, is_error_fun) + + has_stream_error = + not Enum.empty?(stream_shrink_result) and Enum.any?(stream_shrink_result, is_error_fun) + + errors = + append_err(stream_coord_result != :ok, "Stream coordinator", []) + + errors = + append_err(has_qq_error, "Quorum queues", errors) + + errors = + append_err(has_stream_error, "Streams", errors) + + case errors do + [] -> :ok + + _ -> + {:error, + "RabbitMQ failed to shrink some entities on node #{node_to_remove} : #{errors}"} end other -> @@ -134,6 +181,18 @@ defmodule RabbitMQ.CLI.Ctl.Commands.ForgetClusterNodeCommand do # Implementation # + defp append_err(false, _err, errs) do + errs + end + + defp append_err(true, err, []) do + [err] + end + + defp append_err(true, err, errs) do + [err, ", " | errs] + end + defp become(node_name, opts) do :error_logger.tty(false) diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/hash_password_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/hash_password_command.ex index b71dcef34d8c..498cca8a1e34 100644 --- a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/hash_password_command.ex +++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/hash_password_command.ex @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule RabbitMQ.CLI.Ctl.Commands.HashPasswordCommand do alias RabbitMQ.CLI.Core.{Input} diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/help_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/help_command.ex index eb80f0b40ee6..504c97a31d77 100644 --- a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/help_command.ex +++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/help_command.ex @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. alias RabbitMQ.CLI.CommandBehaviour diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/hipe_compile_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/hipe_compile_command.ex index 5fc88efbeacd..3cc58a8c9127 100644 --- a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/hipe_compile_command.ex +++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/hipe_compile_command.ex @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2016-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule RabbitMQ.CLI.Ctl.Commands.HipeCompileCommand do @moduledoc """ diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/import_definitions_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/import_definitions_command.ex index 665b821fcc01..044c763176e1 100644 --- a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/import_definitions_command.ex +++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/import_definitions_command.ex @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule RabbitMQ.CLI.Ctl.Commands.ImportDefinitionsCommand do alias RabbitMQ.CLI.Core.{Config, DocGuide, ExitCodes, Helpers} @@ -121,7 +121,7 @@ defmodule RabbitMQ.CLI.Ctl.Commands.ImportDefinitionsCommand do false -> {:ok, "Successfully started definition import. " <> - "This process is asynchronous and can take some time.\n"} + "This process is asynchronous and can take some time. Watch target node logs for completion.\n"} end end diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/join_cluster_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/join_cluster_command.ex index 11b9695ac508..d19db7edbf46 100644 --- a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/join_cluster_command.ex +++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/join_cluster_command.ex @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2016-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule RabbitMQ.CLI.Ctl.Commands.JoinClusterCommand do alias RabbitMQ.CLI.Core.{Config, DocGuide, Helpers} @@ -28,8 +28,6 @@ defmodule RabbitMQ.CLI.Ctl.Commands.JoinClusterCommand do def validate([_], _), do: :ok def validate(_, _), do: {:validation_failure, :too_many_args} - use RabbitMQ.CLI.Core.RequiresRabbitAppStopped - def run([target_node], %{node: node_name, ram: ram, disc: disc} = opts) do node_type = case {ram, disc} do @@ -78,6 +76,21 @@ defmodule RabbitMQ.CLI.Ctl.Commands.JoinClusterCommand do "Error: cannot cluster node with itself: #{node_name}"} end + def output({:error, {:node_type_unsupported, db, node_type}}, %{node: _node_name}) do + {:error, RabbitMQ.CLI.Core.ExitCodes.exit_software(), + "Error: `#{node_type}` node type is unsupported by the #{db} by database engine"} + end + + def output( + {:error, + {:khepri_mnesia_migration_ex, :all_mnesia_nodes_must_run, + %{all_nodes: nodes, running_nodes: running}}}, + _opts + ) do + {:error, RabbitMQ.CLI.Core.ExitCodes.exit_software(), + "Error: all mnesia nodes must run to join the cluster, mnesia nodes: #{inspect(nodes)}, running nodes: #{inspect(running)}"} + end + use RabbitMQ.CLI.DefaultOutput def banner([target_node], %{node: node_name}) do diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/list_bindings_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/list_bindings_command.ex index c72c77df29ed..b014d1ec9b4e 100644 --- a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/list_bindings_command.ex +++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/list_bindings_command.ex @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule RabbitMQ.CLI.Ctl.Commands.ListBindingsCommand do alias RabbitMQ.CLI.Ctl.{InfoKeys, RpcStream} diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/list_channels_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/list_channels_command.ex index 01075a42dcdd..37c38955e07f 100644 --- a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/list_channels_command.ex +++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/list_channels_command.ex @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. ## defmodule RabbitMQ.CLI.Ctl.Commands.ListChannelsCommand do diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/list_ciphers_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/list_ciphers_command.ex index d1e820a4b446..63a0c1524e9f 100644 --- a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/list_ciphers_command.ex +++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/list_ciphers_command.ex @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule RabbitMQ.CLI.Ctl.Commands.ListCiphersCommand do alias RabbitMQ.CLI.Core.DocGuide diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/list_connections_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/list_connections_command.ex index 8dc09989cbb8..c5a362e8859c 100644 --- a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/list_connections_command.ex +++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/list_connections_command.ex @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule RabbitMQ.CLI.Ctl.Commands.ListConnectionsCommand do alias RabbitMQ.CLI.Core.{DocGuide, Helpers} diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/list_consumers_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/list_consumers_command.ex index 913963dfbf80..5e575e19bcde 100644 --- a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/list_consumers_command.ex +++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/list_consumers_command.ex @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule RabbitMQ.CLI.Ctl.Commands.ListConsumersCommand do alias RabbitMQ.CLI.Core.{DocGuide, Helpers} diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/list_deprecated_features_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/list_deprecated_features_command.ex new file mode 100644 index 000000000000..e1845cce274f --- /dev/null +++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/list_deprecated_features_command.ex @@ -0,0 +1,123 @@ +## This Source Code Form is subject to the terms of the Mozilla Public +## License, v. 2.0. If a copy of the MPL was not distributed with this +## file, You can obtain one at https://mozilla.org/MPL/2.0/. +## +## Copyright (c) 2023 Broadcom. All Rights Reserved. The term “Broadcom” +## refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. + +defmodule RabbitMQ.CLI.Ctl.Commands.ListDeprecatedFeaturesCommand do + alias RabbitMQ.CLI.Core.{DocGuide, Validators} + alias RabbitMQ.CLI.Ctl.InfoKeys + + @behaviour RabbitMQ.CLI.CommandBehaviour + use RabbitMQ.CLI.DefaultOutput + + def formatter(), do: RabbitMQ.CLI.Formatters.Table + + @info_keys ~w(name deprecation_phase provided_by desc doc_url)a + + def info_keys(), do: @info_keys + + def scopes(), do: [:ctl, :diagnostics] + + def switches(), do: [used: :boolean] + + def merge_defaults([], opts) do + {["name", "deprecation_phase"], Map.merge(%{used: false}, opts)} + end + + def merge_defaults(args, opts) do + {args, Map.merge(%{used: false}, opts)} + end + + def validate(args, _) do + case InfoKeys.validate_info_keys(args, @info_keys) do + {:ok, _} -> :ok + err -> err + end + end + + def validate_execution_environment(args, opts) do + Validators.chain( + [ + &Validators.rabbit_is_loaded/2, + &Validators.rabbit_is_running/2 + ], + [args, opts] + ) + end + + def run([_ | _] = args, %{node: node_name, timeout: timeout, used: false}) do + case :rabbit_misc.rpc_call( + node_name, + :rabbit_depr_ff_extra, + :cli_info, + [:all], + timeout + ) do + # Server does not support deprecated features, consider none are available. + {:badrpc, {:EXIT, {:undef, _}}} -> [] + {:badrpc, _} = err -> err + val -> filter_by_arg(val, args) + end + end + + def run([_ | _] = args, %{node: node_name, timeout: timeout, used: true}) do + case :rabbit_misc.rpc_call( + node_name, + :rabbit_deprecated_feature_extra, + :cli_info, + [:used], + timeout + ) do + # Server does not support deprecated features, consider none are available. + {:badrpc, {:EXIT, {:undef, _}}} -> [] + {:badrpc, _} = err -> err + val -> filter_by_arg(val, args) + end + end + + def banner(_, %{used: false}), do: "Listing deprecated features ..." + def banner(_, %{used: true}), do: "Listing deprecated features in use ..." + + def usage, do: "list_deprecated_features [--used] [ ...]" + + def usage_additional() do + [ + ["", "must be one of " <> Enum.join(Enum.sort(@info_keys), ", ")], + ["--used", "returns deprecated features in use"] + ] + end + + def usage_doc_guides() do + [ + DocGuide.feature_flags() + ] + end + + def help_section(), do: :feature_flags + + def description(), do: "Lists deprecated features" + + # + # Implementation + # + + defp filter_by_arg(ff_info, _) when is_tuple(ff_info) do + # tuple means unexpected data + ff_info + end + + defp filter_by_arg(ff_info, [_ | _] = args) when is_list(ff_info) do + symbol_args = InfoKeys.prepare_info_keys(args) + + Enum.map( + ff_info, + fn ff -> + symbol_args + |> Enum.filter(fn arg -> ff[arg] != nil end) + |> Enum.map(fn arg -> {arg, ff[arg]} end) + end + ) + end +end diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/list_exchanges_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/list_exchanges_command.ex index 784055045788..38e0f7da6872 100644 --- a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/list_exchanges_command.ex +++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/list_exchanges_command.ex @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule RabbitMQ.CLI.Ctl.Commands.ListExchangesCommand do alias RabbitMQ.CLI.Ctl.{InfoKeys, RpcStream} diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/list_feature_flags_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/list_feature_flags_command.ex index 30caba83394c..e9d287547f3c 100644 --- a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/list_feature_flags_command.ex +++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/list_feature_flags_command.ex @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2018-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule RabbitMQ.CLI.Ctl.Commands.ListFeatureFlagsCommand do alias RabbitMQ.CLI.Core.{DocGuide, Validators} diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/list_global_parameters_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/list_global_parameters_command.ex index a73e7322bb50..154fe162e391 100644 --- a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/list_global_parameters_command.ex +++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/list_global_parameters_command.ex @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule RabbitMQ.CLI.Ctl.Commands.ListGlobalParametersCommand do alias RabbitMQ.CLI.Core.DocGuide diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/list_hashes_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/list_hashes_command.ex index 2cbf813aad92..0ec885104f97 100644 --- a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/list_hashes_command.ex +++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/list_hashes_command.ex @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule RabbitMQ.CLI.Ctl.Commands.ListHashesCommand do alias RabbitMQ.CLI.Core.DocGuide diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/list_operator_policies_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/list_operator_policies_command.ex index b68dc32bf375..03a92b03bd96 100644 --- a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/list_operator_policies_command.ex +++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/list_operator_policies_command.ex @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule RabbitMQ.CLI.Ctl.Commands.ListOperatorPoliciesCommand do alias RabbitMQ.CLI.Core.DocGuide diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/list_parameters_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/list_parameters_command.ex index a67dba4ad719..8ab68774bf44 100644 --- a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/list_parameters_command.ex +++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/list_parameters_command.ex @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule RabbitMQ.CLI.Ctl.Commands.ListParametersCommand do alias RabbitMQ.CLI.Core.DocGuide diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/list_permissions_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/list_permissions_command.ex index 9097c96ee1e7..679f41bf182d 100644 --- a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/list_permissions_command.ex +++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/list_permissions_command.ex @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule RabbitMQ.CLI.Ctl.Commands.ListPermissionsCommand do alias RabbitMQ.CLI.Core.DocGuide diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/list_policies_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/list_policies_command.ex index 692956b6b475..0fd1c58e02d0 100644 --- a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/list_policies_command.ex +++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/list_policies_command.ex @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule RabbitMQ.CLI.Ctl.Commands.ListPoliciesCommand do alias RabbitMQ.CLI.Core.DocGuide diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/list_queues_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/list_queues_command.ex index 5788f61169b1..cb4b96232625 100644 --- a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/list_queues_command.ex +++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/list_queues_command.ex @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule RabbitMQ.CLI.Ctl.Commands.ListQueuesCommand do require RabbitMQ.CLI.Ctl.InfoKeys @@ -23,13 +23,8 @@ defmodule RabbitMQ.CLI.Ctl.Commands.ListQueuesCommand do message_bytes_unacknowledged message_bytes_ram message_bytes_persistent head_message_timestamp disk_reads disk_writes consumers consumer_utilisation consumer_capacity - memory slave_pids synchronised_slave_pids state type - leader members online - mirror_pids synchronised_mirror_pids)a - @info_key_aliases [ - {:mirror_pids, :slave_pids}, - {:synchronised_mirror_pids, :synchronised_slave_pids} - ] + memory state type + leader members online)a def description(), do: "Lists queues and their properties" @@ -67,7 +62,7 @@ defmodule RabbitMQ.CLI.Ctl.Commands.ListQueuesCommand do end def validate(args, _opts) do - case InfoKeys.validate_info_keys(args, @info_keys, @info_key_aliases) do + case InfoKeys.validate_info_keys(args, @info_keys) do {:ok, _} -> :ok err -> err end @@ -91,7 +86,7 @@ defmodule RabbitMQ.CLI.Ctl.Commands.ListQueuesCommand do other -> other end - info_keys = InfoKeys.prepare_info_keys(args, @info_key_aliases) + info_keys = InfoKeys.prepare_info_keys(args) broker_keys = InfoKeys.broker_keys(info_keys) Helpers.with_nodes_in_cluster(node_name, fn nodes -> diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/list_topic_permissions_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/list_topic_permissions_command.ex index 7b50b343ce8a..42afbe1abe99 100644 --- a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/list_topic_permissions_command.ex +++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/list_topic_permissions_command.ex @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule RabbitMQ.CLI.Ctl.Commands.ListTopicPermissionsCommand do alias RabbitMQ.CLI.Core.DocGuide diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/list_unresponsive_queues_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/list_unresponsive_queues_command.ex index b9674dd7745e..78ce1d47c289 100644 --- a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/list_unresponsive_queues_command.ex +++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/list_unresponsive_queues_command.ex @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule RabbitMQ.CLI.Ctl.Commands.ListUnresponsiveQueuesCommand do require RabbitMQ.CLI.Ctl.InfoKeys @@ -14,9 +14,7 @@ defmodule RabbitMQ.CLI.Ctl.Commands.ListUnresponsiveQueuesCommand do @behaviour RabbitMQ.CLI.CommandBehaviour @info_keys ~w(name durable auto_delete - arguments pid recoverable_slaves - recoverable_mirrors)a - @info_key_aliases [recoverable_mirrors: :recoverable_slaves] + arguments pid)a def info_keys(), do: @info_keys @@ -41,7 +39,7 @@ defmodule RabbitMQ.CLI.Ctl.Commands.ListUnresponsiveQueuesCommand do end def validate(args, _opts) do - case InfoKeys.validate_info_keys(args, @info_keys, @info_key_aliases) do + case InfoKeys.validate_info_keys(args, @info_keys) do {:ok, _} -> :ok err -> err end @@ -56,7 +54,7 @@ defmodule RabbitMQ.CLI.Ctl.Commands.ListUnresponsiveQueuesCommand do queue_timeout: qtimeout, local: local_opt }) do - info_keys = InfoKeys.prepare_info_keys(args, @info_key_aliases) + info_keys = InfoKeys.prepare_info_keys(args) broker_keys = InfoKeys.broker_keys(info_keys) queue_timeout = qtimeout * 1000 diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/list_user_limits_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/list_user_limits_command.ex index 1e01a13f644b..c5961cecbb94 100644 --- a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/list_user_limits_command.ex +++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/list_user_limits_command.ex @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2020 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule RabbitMQ.CLI.Ctl.Commands.ListUserLimitsCommand do alias RabbitMQ.CLI.Core.DocGuide diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/list_user_permissions_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/list_user_permissions_command.ex index 1060e71e8fed..4c7f4db0a508 100644 --- a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/list_user_permissions_command.ex +++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/list_user_permissions_command.ex @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule RabbitMQ.CLI.Ctl.Commands.ListUserPermissionsCommand do alias RabbitMQ.CLI.Core.DocGuide diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/list_user_topic_permissions_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/list_user_topic_permissions_command.ex index abca7c900fe6..5b8dbfd33c50 100644 --- a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/list_user_topic_permissions_command.ex +++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/list_user_topic_permissions_command.ex @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule RabbitMQ.CLI.Ctl.Commands.ListUserTopicPermissionsCommand do alias RabbitMQ.CLI.Core.DocGuide diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/list_users_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/list_users_command.ex index 81e3180bde7f..1f242e89e600 100644 --- a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/list_users_command.ex +++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/list_users_command.ex @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule RabbitMQ.CLI.Ctl.Commands.ListUsersCommand do alias RabbitMQ.CLI.Core.DocGuide diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/list_vhost_limits_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/list_vhost_limits_command.ex index 973452816bac..40e39858563a 100644 --- a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/list_vhost_limits_command.ex +++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/list_vhost_limits_command.ex @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule RabbitMQ.CLI.Ctl.Commands.ListVhostLimitsCommand do alias RabbitMQ.CLI.Core.DocGuide diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/list_vhosts_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/list_vhosts_command.ex index 36507eb38121..c9b4222e4d89 100644 --- a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/list_vhosts_command.ex +++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/list_vhosts_command.ex @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule RabbitMQ.CLI.Ctl.Commands.ListVhostsCommand do alias RabbitMQ.CLI.Core.DocGuide diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/node_health_check_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/node_health_check_command.ex index a641b7be8282..85aa8cd10ac8 100644 --- a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/node_health_check_command.ex +++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/node_health_check_command.ex @@ -2,61 +2,25 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule RabbitMQ.CLI.Ctl.Commands.NodeHealthCheckCommand do alias RabbitMQ.CLI.Core.DocGuide @behaviour RabbitMQ.CLI.CommandBehaviour - @default_timeout 70_000 - def scopes(), do: [:ctl, :diagnostics] use RabbitMQ.CLI.Core.AcceptsDefaultSwitchesAndTimeout def merge_defaults(args, opts) do - timeout = - case opts[:timeout] do - nil -> @default_timeout - :infinity -> @default_timeout - other -> other - end - - {args, Map.merge(opts, %{timeout: timeout})} + {args, opts} end use RabbitMQ.CLI.Core.AcceptsNoPositionalArguments use RabbitMQ.CLI.Core.RequiresRabbitAppRunning - def run([], %{node: node_name, timeout: timeout}) do - case :rabbit_misc.rpc_call(node_name, :rabbit_health_check, :node, [node_name, timeout]) do - :ok -> - :ok - - true -> - :ok - - {:badrpc, _} = err -> - err - - {:error_string, error_message} -> - {:healthcheck_failed, error_message} - - {:node_is_ko, error_message, _exit_code} -> - {:healthcheck_failed, error_message} - - other -> - other - end - end - - def output(:ok, _) do - {:ok, "Health check passed"} - end - - def output({:healthcheck_failed, message}, _) do - {:error, RabbitMQ.CLI.Core.ExitCodes.exit_software(), - "Error: health check failed. Message: #{message}"} + def run([], _opts) do + :ok end use RabbitMQ.CLI.DefaultOutput @@ -72,17 +36,14 @@ defmodule RabbitMQ.CLI.Ctl.Commands.NodeHealthCheckCommand do def help_section(), do: :deprecated def description() do - "DEPRECATED. Performs intrusive, opinionated health checks on a fully booted node. " <> - "See https://www.rabbitmq.com/monitoring.html#health-checks instead" + "DEPRECATED. This command is a no-op. " <> + "See https://www.rabbitmq.com/monitoring.html#health-checks" end - def banner(_, %{node: node_name, timeout: timeout}) do + def banner(_, _opts) do [ - "This command is DEPRECATED and will be removed in a future version.", - "It performs intrusive, opinionated health checks and requires a fully booted node.", - "Use one of the options covered in https://www.rabbitmq.com/monitoring.html#health-checks instead.", - "Timeout: #{trunc(timeout / 1000)} seconds ...", - "Checking health of node #{node_name} ..." + "This command is DEPRECATED and is a no-op. It will be removed in a future version. ", + "Use one of the options covered in https://www.rabbitmq.com/monitoring.html#health-checks instead." ] end end diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/ping_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/ping_command.ex index cc250a734a4b..aee69d7f7c24 100644 --- a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/ping_command.ex +++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/ping_command.ex @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2016-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule RabbitMQ.CLI.Ctl.Commands.PingCommand do alias RabbitMQ.CLI.Core.DocGuide diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/purge_queue_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/purge_queue_command.ex index 7343830fbb36..532d95184fd2 100644 --- a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/purge_queue_command.ex +++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/purge_queue_command.ex @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule RabbitMQ.CLI.Ctl.Commands.PurgeQueueCommand do alias RabbitMQ.CLI.Core.DocGuide diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/reconcile_vhosts_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/reconcile_vhosts_command.ex new file mode 100644 index 000000000000..ee8ac42254b1 --- /dev/null +++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/reconcile_vhosts_command.ex @@ -0,0 +1,57 @@ +## This Source Code Form is subject to the terms of the Mozilla Public +## License, v. 2.0. If a copy of the MPL was not distributed with this +## file, You can obtain one at https://mozilla.org/MPL/2.0/. +## +## Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. + +alias RabbitMQ.CLI.Core.ExitCodes + +defmodule RabbitMQ.CLI.Ctl.Commands.ReconcileVhostsCommand do + alias RabbitMQ.CLI.Core.DocGuide + + @behaviour RabbitMQ.CLI.CommandBehaviour + + use RabbitMQ.CLI.Core.AcceptsDefaultSwitchesAndTimeout + + def merge_defaults(args, opts), do: {args, opts} + + use RabbitMQ.CLI.Core.AcceptsNoPositionalArguments + use RabbitMQ.CLI.Core.RequiresRabbitAppRunning + + def run([], %{node: node_name, timeout: timeout}) do + :rabbit_misc.rpc_call(node_name, :rabbit_vhosts, :reconcile_once, [], timeout) + end + + def output({:ok, _pid}, _opts) do + {:ok, "Will reconcile all virtual hosts in the cluster. This operation is asynchronous."} + end + + def output({:error, err}, _output) do + {:error, ExitCodes.exit_software(), + ["Failed to start virtual host reconciliation", "Reason: #{inspect(err)}"]} + end + + use RabbitMQ.CLI.DefaultOutput + + def usage, do: "reconcile_vhosts" + + def usage_additional() do + [] + end + + def usage_doc_guides() do + [ + DocGuide.virtual_hosts(), + DocGuide.monitoring() + ] + end + + def help_section(), do: :virtual_hosts + + def description(), + do: "Makes sure all virtual hosts were initialized on all reachable cluster nodes" + + def banner(_, _) do + "Will try to initiate virtual host reconciliation on all reachable cluster nodes..." + end +end diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/remove_classic_queue_mirroring_from_policies_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/remove_classic_queue_mirroring_from_policies_command.ex new file mode 100644 index 000000000000..e4b1c9e1b52e --- /dev/null +++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/remove_classic_queue_mirroring_from_policies_command.ex @@ -0,0 +1,40 @@ +## This Source Code Form is subject to the terms of the Mozilla Public +## License, v. 2.0. If a copy of the MPL was not distributed with this +## file, You can obtain one at https://mozilla.org/MPL/2.0/. +## +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. + +defmodule RabbitMQ.CLI.Ctl.Commands.RemoveClassicQueueMirroringFromPoliciesCommand do + @behaviour RabbitMQ.CLI.CommandBehaviour + + use RabbitMQ.CLI.Core.MergesNoDefaults + use RabbitMQ.CLI.Core.AcceptsDefaultSwitchesAndTimeout + use RabbitMQ.CLI.Core.AcceptsNoPositionalArguments + + def run([], %{node: node_name, timeout: timeout}) do + :rabbit_misc.rpc_call( + node_name, + :rabbit_mirror_queue_misc, + :remove_classic_queue_mirroring_from_policies_for_cli, + [], + timeout + ) + end + + use RabbitMQ.CLI.DefaultOutput + + def usage, do: "remove_classic_queue_mirroring_from_policies" + + def usage_doc_guides() do + [] + end + + def help_section(), do: :operations + + def description, + do: "Removes keys that enable classic queue mirroring from all regular and operator policies" + + def banner([], %{}), + do: + "Will remove keys that enable classic queue mirroring from all regular and operator policies" +end diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/rename_cluster_node_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/rename_cluster_node_command.ex index 1d9afcb1e7fc..6f926da49d92 100644 --- a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/rename_cluster_node_command.ex +++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/rename_cluster_node_command.ex @@ -2,12 +2,11 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2016-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule RabbitMQ.CLI.Ctl.Commands.RenameClusterNodeCommand do require Integer - alias RabbitMQ.CLI.Core.{DocGuide, Validators} - import RabbitMQ.CLI.Core.DataCoercion + alias RabbitMQ.CLI.Core.DocGuide @behaviour RabbitMQ.CLI.CommandBehaviour @@ -21,28 +20,8 @@ defmodule RabbitMQ.CLI.Ctl.Commands.RenameClusterNodeCommand do :ok end - def validate_execution_environment(args, opts) do - Validators.chain( - [ - &validate_args_count_even/2, - &Validators.node_is_not_running/2, - &Validators.data_dir_is_set/2, - &Validators.feature_flags_file_is_set/2, - &Validators.rabbit_is_loaded/2 - ], - [args, opts] - ) - end - - def run(nodes, %{node: node_name}) do - node_pairs = make_node_pairs(nodes) - - try do - :rabbit_mnesia_rename.rename(node_name, node_pairs) - catch - _, reason -> - {:rename_failed, reason} - end + def run(_nodes, %{node: _node_name}) do + :ok end use RabbitMQ.CLI.DefaultOutput @@ -51,58 +30,19 @@ defmodule RabbitMQ.CLI.Ctl.Commands.RenameClusterNodeCommand do "rename_cluster_node [oldnode2] [newnode2] ..." end - def usage_additional() do - [ - ["", "Original node name"], - ["", "New node name"] - ] - end - def usage_doc_guides() do [ DocGuide.clustering() ] end - def help_section(), do: :cluster_management - - def description(), do: "Renames cluster nodes in the local database" - - def banner(args, _) do - [ - "Renaming cluster nodes: \n ", - for {node_from, node_to} <- make_node_pairs(args) do - "#{node_from} -> #{node_to} \n" - end - ] - |> List.flatten() - |> Enum.join() - end - - # - # Implementation - # - - defp validate_args_count_even(args, _) do - case agrs_count_even?(args) do - true -> - :ok - - false -> - {:validation_failure, - {:bad_argument, "Argument list should contain even number of nodes"}} - end - end - - defp agrs_count_even?(args) do - Integer.is_even(length(args)) - end + def help_section(), do: :deprecated - defp make_node_pairs([]) do - [] + def description() do + "DEPRECATED. This command is a no-op. Node renaming is incompatible with Raft-based features such as quorum queues, streams, Khepri. " end - defp make_node_pairs([from, to | rest]) do - [{to_atom(from), to_atom(to)} | make_node_pairs(rest)] + def banner(_, _opts) do + "DEPRECATED. This command is a no-op. Node renaming is incompatible with Raft-based features such as quorum queues, streams, Khepri. " end end diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/report_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/report_command.ex index 008cba7f07ea..9bf3ca254ea0 100644 --- a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/report_command.ex +++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/report_command.ex @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule RabbitMQ.CLI.Ctl.Commands.ReportCommand do alias RabbitMQ.CLI.Core.DocGuide diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/reset_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/reset_command.ex index fa3bcf53eab7..6b51e5d10de7 100644 --- a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/reset_command.ex +++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/reset_command.ex @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule RabbitMQ.CLI.Ctl.Commands.ResetCommand do alias RabbitMQ.CLI.Core.DocGuide diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/restart_vhost_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/restart_vhost_command.ex index 9b0d80268c58..24a94f390582 100644 --- a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/restart_vhost_command.ex +++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/restart_vhost_command.ex @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. alias RabbitMQ.CLI.Core.ExitCodes diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/resume_listeners_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/resume_listeners_command.ex index 7b0067dc1254..bef769436a78 100644 --- a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/resume_listeners_command.ex +++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/resume_listeners_command.ex @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule RabbitMQ.CLI.Ctl.Commands.ResumeListenersCommand do @moduledoc """ diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/rotate_logs_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/rotate_logs_command.ex index e88df859a667..13573bd95fb1 100644 --- a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/rotate_logs_command.ex +++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/rotate_logs_command.ex @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule RabbitMQ.CLI.Ctl.Commands.RotateLogsCommand do alias RabbitMQ.CLI.Core.{DocGuide, ExitCodes} diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/set_cluster_name_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/set_cluster_name_command.ex index 5bef54ec9ddf..bcb77f3113e6 100644 --- a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/set_cluster_name_command.ex +++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/set_cluster_name_command.ex @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule RabbitMQ.CLI.Ctl.Commands.SetClusterNameCommand do alias RabbitMQ.CLI.Core.{DocGuide, Helpers} diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/set_disk_free_limit_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/set_disk_free_limit_command.ex index 135cc5a939ed..27eb54d5f434 100644 --- a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/set_disk_free_limit_command.ex +++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/set_disk_free_limit_command.ex @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule RabbitMQ.CLI.Ctl.Commands.SetDiskFreeLimitCommand do alias RabbitMQ.CLI.Core.DocGuide diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/set_global_parameter_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/set_global_parameter_command.ex index e549f518b1dd..9c09a9751d4d 100644 --- a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/set_global_parameter_command.ex +++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/set_global_parameter_command.ex @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule RabbitMQ.CLI.Ctl.Commands.SetGlobalParameterCommand do alias RabbitMQ.CLI.Core.{DocGuide, Helpers} diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/set_log_level_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/set_log_level_command.ex index 4f8bd83bb8bc..67316f4fcc99 100644 --- a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/set_log_level_command.ex +++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/set_log_level_command.ex @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule RabbitMQ.CLI.Ctl.Commands.SetLogLevelCommand do alias RabbitMQ.CLI.Core.DocGuide diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/set_operator_policy_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/set_operator_policy_command.ex index c5d241b931b9..d5b5d8b6c7ad 100644 --- a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/set_operator_policy_command.ex +++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/set_operator_policy_command.ex @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule RabbitMQ.CLI.Ctl.Commands.SetOperatorPolicyCommand do alias RabbitMQ.CLI.Core.{DocGuide, Helpers} diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/set_parameter_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/set_parameter_command.ex index ba10ab45c080..2f0ef54276a2 100644 --- a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/set_parameter_command.ex +++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/set_parameter_command.ex @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule RabbitMQ.CLI.Ctl.Commands.SetParameterCommand do alias RabbitMQ.CLI.Core.{DocGuide, Helpers} diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/set_permissions_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/set_permissions_command.ex index b5b5b25478a6..23fe3f7c93a0 100644 --- a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/set_permissions_command.ex +++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/set_permissions_command.ex @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule RabbitMQ.CLI.Ctl.Commands.SetPermissionsCommand do alias RabbitMQ.CLI.Core.{DocGuide, ExitCodes, Helpers} diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/set_permissions_globally.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/set_permissions_globally.ex index f2f7ffc44c25..44b79ab9e3a9 100644 --- a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/set_permissions_globally.ex +++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/set_permissions_globally.ex @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule RabbitMQ.CLI.Ctl.Commands.SetPermissionsGloballyCommand do alias RabbitMQ.CLI.Core.{DocGuide, ExitCodes, Helpers} diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/set_policy_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/set_policy_command.ex index 749c1ee36d03..191e0894e719 100644 --- a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/set_policy_command.ex +++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/set_policy_command.ex @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule RabbitMQ.CLI.Ctl.Commands.SetPolicyCommand do alias RabbitMQ.CLI.Core.{Helpers, DocGuide} diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/set_topic_permissions_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/set_topic_permissions_command.ex index a8089dc9fc48..5eed3f600fdc 100644 --- a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/set_topic_permissions_command.ex +++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/set_topic_permissions_command.ex @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule RabbitMQ.CLI.Ctl.Commands.SetTopicPermissionsCommand do alias RabbitMQ.CLI.Core.{DocGuide, ExitCodes, Helpers} diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/set_user_limits_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/set_user_limits_command.ex index 603a8008e779..a38e8e68223d 100644 --- a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/set_user_limits_command.ex +++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/set_user_limits_command.ex @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2020 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule RabbitMQ.CLI.Ctl.Commands.SetUserLimitsCommand do alias RabbitMQ.CLI.Core.{DocGuide, Helpers} diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/set_user_tags_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/set_user_tags_command.ex index bc43c7502cc0..ee3aabc6936f 100644 --- a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/set_user_tags_command.ex +++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/set_user_tags_command.ex @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule RabbitMQ.CLI.Ctl.Commands.SetUserTagsCommand do alias RabbitMQ.CLI.Core.{DocGuide, ExitCodes, Helpers} diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/set_vhost_limits_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/set_vhost_limits_command.ex index 975b4fc4c5bc..9be2dde90315 100644 --- a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/set_vhost_limits_command.ex +++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/set_vhost_limits_command.ex @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule RabbitMQ.CLI.Ctl.Commands.SetVhostLimitsCommand do alias RabbitMQ.CLI.Core.{DocGuide, Helpers} diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/set_vhost_tags_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/set_vhost_tags_command.ex index 23349dbe6eb9..a73548636588 100644 --- a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/set_vhost_tags_command.ex +++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/set_vhost_tags_command.ex @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule RabbitMQ.CLI.Ctl.Commands.SetVhostTagsCommand do alias RabbitMQ.CLI.Core.{DocGuide, ExitCodes, Helpers} diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/set_vm_memory_high_watermark_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/set_vm_memory_high_watermark_command.ex index be96e015550b..ba4e16bf71db 100644 --- a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/set_vm_memory_high_watermark_command.ex +++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/set_vm_memory_high_watermark_command.ex @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule RabbitMQ.CLI.Ctl.Commands.SetVmMemoryHighWatermarkCommand do alias RabbitMQ.CLI.Core.DocGuide diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/shutdown_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/shutdown_command.ex index 2b61638a0640..a74e5d6e9da4 100644 --- a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/shutdown_command.ex +++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/shutdown_command.ex @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule RabbitMQ.CLI.Ctl.Commands.ShutdownCommand do @behaviour RabbitMQ.CLI.CommandBehaviour diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/start_app_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/start_app_command.ex index 6362c1141869..125575b16d78 100644 --- a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/start_app_command.ex +++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/start_app_command.ex @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule RabbitMQ.CLI.Ctl.Commands.StartAppCommand do @behaviour RabbitMQ.CLI.CommandBehaviour diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/status_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/status_command.ex index 3ef2a4f6df85..c37c0971ceee 100644 --- a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/status_command.ex +++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/status_command.ex @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule RabbitMQ.CLI.Ctl.Commands.StatusCommand do alias RabbitMQ.CLI.Core.DocGuide @@ -39,7 +39,8 @@ defmodule RabbitMQ.CLI.Ctl.Commands.StatusCommand do :ok false -> - {:validation_failure, "unit '#{unit}' is not supported. Please use one of: bytes, mb, gb"} + {:validation_failure, + "unit '#{unit}' is not supported. Please use one of: bytes, mb, mib, gb, gib, tb, tib"} end end @@ -100,7 +101,7 @@ defmodule RabbitMQ.CLI.Ctl.Commands.StatusCommand do product_version_section ++ [ "RabbitMQ version: #{m[:rabbitmq_version]}", - "RabbitMQ release series support status: #{m[:release_series_support_status]}", + "RabbitMQ release series support status: see https://www.rabbitmq.com/release-information", "Node name: #{node_name}", "Erlang configuration: #{m[:erlang_version]}", "Crypto library: #{m[:crypto_lib_version]}", @@ -165,8 +166,7 @@ defmodule RabbitMQ.CLI.Ctl.Commands.StatusCommand do file_descriptors = [ "\n#{bright("File Descriptors")}\n", - "Total: #{m[:file_descriptors][:total_used]}, limit: #{m[:file_descriptors][:total_limit]}", - "Sockets: #{m[:file_descriptors][:sockets_used]}, limit: #{m[:file_descriptors][:sockets_limit]}" + "Total: #{m[:file_descriptors][:total_used]}, limit: #{m[:file_descriptors][:total_limit]}" ] disk_space_section = [ @@ -212,7 +212,10 @@ defmodule RabbitMQ.CLI.Ctl.Commands.StatusCommand do def usage_additional() do [ - ["--unit ", "byte multiple (bytes, megabytes, gigabytes) to use"], + [ + "--unit ", + "byte multiple (bytes, megabytes, gigabytes) to use" + ], ["--formatter ", "alternative formatter (JSON, Erlang terms)"] ] end diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/stop_app_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/stop_app_command.ex index b83c4ef7ebbe..0c679562d62d 100644 --- a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/stop_app_command.ex +++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/stop_app_command.ex @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule RabbitMQ.CLI.Ctl.Commands.StopAppCommand do @behaviour RabbitMQ.CLI.CommandBehaviour diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/stop_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/stop_command.ex index b0249b214f3b..3908e051db0b 100644 --- a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/stop_command.ex +++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/stop_command.ex @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule RabbitMQ.CLI.Ctl.Commands.StopCommand do @behaviour RabbitMQ.CLI.CommandBehaviour diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/suspend_listeners_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/suspend_listeners_command.ex index 93a567538476..2e1cc66ee334 100644 --- a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/suspend_listeners_command.ex +++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/suspend_listeners_command.ex @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule RabbitMQ.CLI.Ctl.Commands.SuspendListenersCommand do @moduledoc """ diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/sync_queue_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/sync_queue_command.ex deleted file mode 100644 index f3e6a8236961..000000000000 --- a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/sync_queue_command.ex +++ /dev/null @@ -1,56 +0,0 @@ -## This Source Code Form is subject to the terms of the Mozilla Public -## License, v. 2.0. If a copy of the MPL was not distributed with this -## file, You can obtain one at https://mozilla.org/MPL/2.0/. -## -## Copyright (c) 2016-2023 VMware, Inc. or its affiliates. All rights reserved. - -defmodule RabbitMQ.CLI.Ctl.Commands.SyncQueueCommand do - alias RabbitMQ.CLI.Core.DocGuide - - @behaviour RabbitMQ.CLI.CommandBehaviour - - def merge_defaults(args, opts) do - {args, Map.merge(%{vhost: "/"}, opts)} - end - - use RabbitMQ.CLI.Core.AcceptsOnePositionalArgument - use RabbitMQ.CLI.Core.RequiresRabbitAppRunning - - def run([queue], %{vhost: vhost, node: node_name}) do - :rpc.call( - node_name, - :rabbit_mirror_queue_misc, - :sync_queue, - [:rabbit_misc.r(vhost, :queue, queue)], - :infinity - ) - end - - use RabbitMQ.CLI.DefaultOutput - - def usage do - "sync_queue [--vhost ] " - end - - def usage_additional() do - [ - ["", "Name of the queue to synchronise"] - ] - end - - def usage_doc_guides() do - [ - DocGuide.mirroring() - ] - end - - def help_section(), do: :replication - - def description(), - do: - "Instructs a mirrored queue with unsynchronised mirrors (follower replicas) to synchronise them" - - def banner([queue], %{vhost: vhost, node: _node}) do - "Synchronising queue '#{queue}' in vhost '#{vhost}' ..." - end -end diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/trace_off_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/trace_off_command.ex index b305214ec8b6..19ffc796e9c6 100644 --- a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/trace_off_command.ex +++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/trace_off_command.ex @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule RabbitMQ.CLI.Ctl.Commands.TraceOffCommand do alias RabbitMQ.CLI.Core.DocGuide diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/trace_on_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/trace_on_command.ex index ec4d519c72ad..43cb4ac108d4 100644 --- a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/trace_on_command.ex +++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/trace_on_command.ex @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule RabbitMQ.CLI.Ctl.Commands.TraceOnCommand do alias RabbitMQ.CLI.Core.DocGuide diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/update_cluster_nodes_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/update_cluster_nodes_command.ex index 7d565897e09e..d443f4b27cf5 100644 --- a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/update_cluster_nodes_command.ex +++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/update_cluster_nodes_command.ex @@ -2,10 +2,10 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2016-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule RabbitMQ.CLI.Ctl.Commands.UpdateClusterNodesCommand do - alias RabbitMQ.CLI.Core.{Config, DocGuide, Helpers} + alias RabbitMQ.CLI.Core.DocGuide @behaviour RabbitMQ.CLI.CommandBehaviour @@ -13,23 +13,12 @@ defmodule RabbitMQ.CLI.Ctl.Commands.UpdateClusterNodesCommand do use RabbitMQ.CLI.Core.AcceptsOnePositionalArgument use RabbitMQ.CLI.Core.RequiresRabbitAppStopped - def run([seed_node], options = %{node: node_name}) do - long_or_short_names = Config.get_option(:longnames, options) - seed_node_normalised = Helpers.normalise_node(seed_node, long_or_short_names) - - case :rabbit_misc.rpc_call(node_name, :rabbit_db_cluster, :update_cluster_nodes, [ - seed_node_normalised - ]) do - {:badrpc, {:EXIT, {:undef, _}}} -> - :rabbit_misc.rpc_call(node_name, :rabbit_mnesia, :update_cluster_nodes, [ - seed_node_normalised - ]) - - ret0 -> - ret0 - end + def run([_seed_node], _opts) do + :ok end + use RabbitMQ.CLI.DefaultOutput + def usage() do "update_cluster_nodes " end @@ -46,25 +35,13 @@ defmodule RabbitMQ.CLI.Ctl.Commands.UpdateClusterNodesCommand do ] end - def help_section(), do: :cluster_management - - def description(), - do: - "Instructs a cluster member node to sync the list of known cluster members from " + def help_section(), do: :deprecated - def banner([seed_node], %{node: node_name}) do - "Will seed #{node_name} from #{seed_node} on next start" + def description() do + "DEPRECATED. This command is a no-op. Node update is incompatible with Raft-based features such as quorum queues, streams, Khepri. " end - def output({:error, :mnesia_unexpectedly_running}, %{node: node_name}) do - {:error, RabbitMQ.CLI.Core.ExitCodes.exit_software(), - RabbitMQ.CLI.DefaultOutput.mnesia_running_error(node_name)} + def banner(_, _opts) do + "DEPRECATED. This command is a no-op. Node update is incompatible with Raft-based features such as quorum queues, streams, Khepri. " end - - def output({:error, :cannot_cluster_node_with_itself}, %{node: node_name}) do - {:error, RabbitMQ.CLI.Core.ExitCodes.exit_software(), - "Error: cannot cluster node with itself: #{node_name}"} - end - - use RabbitMQ.CLI.DefaultOutput end diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/update_vhost_metadata_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/update_vhost_metadata_command.ex index e0ac6d3efb0b..c01a4904bb4d 100644 --- a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/update_vhost_metadata_command.ex +++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/update_vhost_metadata_command.ex @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule RabbitMQ.CLI.Ctl.Commands.UpdateVhostMetadataCommand do alias RabbitMQ.CLI.Core.{DocGuide, ExitCodes, Helpers, VirtualHosts} diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/version_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/version_command.ex index 7f3dc6f6d6e9..cc36c9eb21d6 100644 --- a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/version_command.ex +++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/version_command.ex @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule RabbitMQ.CLI.Ctl.Commands.VersionCommand do alias RabbitMQ.CLI.Core.{Validators, Version} diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/wait_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/wait_command.ex index 9e38f7a43ef3..e35e315481c3 100644 --- a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/wait_command.ex +++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/wait_command.ex @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule RabbitMQ.CLI.Ctl.Commands.WaitCommand do alias RabbitMQ.CLI.Core.{Helpers, Validators} @@ -245,6 +245,9 @@ defmodule RabbitMQ.CLI.Ctl.Commands.WaitCommand do timeout, fn -> case :file.read_file(pid_file) do + {:ok, <<>>} -> + {:error, :loop} + {:ok, bin} -> case Integer.parse(bin) do :error -> diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/info_keys.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/info_keys.ex index 345ad4649513..42933c6529fa 100644 --- a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/info_keys.ex +++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/info_keys.ex @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule RabbitMQ.CLI.Ctl.InfoKeys do import RabbitCommon.Records diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/rpc_stream.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/rpc_stream.ex index cd2a44dbe0c5..7addb7c56cf5 100644 --- a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/rpc_stream.ex +++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/rpc_stream.ex @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule RabbitMQ.CLI.Ctl.RpcStream do alias RabbitMQ.CLI.Ctl.InfoKeys diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/default_output.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/default_output.ex index 81199ce76a98..fa2b03a7222f 100644 --- a/deps/rabbitmq_cli/lib/rabbitmq/cli/default_output.ex +++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/default_output.ex @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. alias RabbitMQ.CLI.Formatters.FormatterHelpers defmodule RabbitMQ.CLI.DefaultOutput do @@ -17,7 +17,7 @@ defmodule RabbitMQ.CLI.DefaultOutput do end def output(result, opts \\ %{}) do - format_output(normalize_output(result, opts)) + format_output(format_khepri_output(normalize_output(result, opts), opts)) end def mnesia_running_error(node_name) do @@ -25,6 +25,11 @@ defmodule RabbitMQ.CLI.DefaultOutput do "Please stop RabbitMQ with 'rabbitmqctl stop_app' first." end + def khepri_timeout_error(node_name) do + "Khepri has timed out on node #{node_name}.\n" <> + "Khepri cluster could be in minority." + end + defp normalize_output(:ok, %{node: node_name, formatter: "json"}) do {:ok, %{"result" => "ok", "node" => node_name}} end @@ -63,6 +68,28 @@ defmodule RabbitMQ.CLI.DefaultOutput do defp normalize_output({unknown, _} = input, _opts) when is_atom(unknown), do: {:error, input} defp normalize_output(result, _opts) when not is_atom(result), do: {:ok, result} + defp format_khepri_output({:error, :timeout}, %{node: node_name}) do + # Khepri >= 0.14.0 + {:error, RabbitMQ.CLI.Core.ExitCodes.exit_tempfail(), khepri_timeout_error(node_name)} + end + + defp format_khepri_output({:error, {:timeout, {:rabbitmq_metadata, _}}}, %{node: node_name}) do + # Khepri < 0.14.0 + {:error, RabbitMQ.CLI.Core.ExitCodes.exit_tempfail(), khepri_timeout_error(node_name)} + end + + defp format_khepri_output({:error, :timeout_waiting_for_leader}, %{node: node_name}) do + {:error, RabbitMQ.CLI.Core.ExitCodes.exit_tempfail(), khepri_timeout_error(node_name)} + end + + defp format_khepri_output({:error, :timeout_waiting_for_khepri_projections}, %{node: node_name}) do + {:error, RabbitMQ.CLI.Core.ExitCodes.exit_tempfail(), khepri_timeout_error(node_name)} + end + + defp format_khepri_output(result, _opts) do + result + end + defp format_output({:error, _} = result) do result end diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/alarms_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/alarms_command.ex index 800f85c29f45..4c7cd5d289d9 100644 --- a/deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/alarms_command.ex +++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/alarms_command.ex @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule RabbitMQ.CLI.Diagnostics.Commands.AlarmsCommand do @moduledoc """ diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/certificates_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/certificates_command.ex index 733492a1df65..3a1432dab3c6 100644 --- a/deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/certificates_command.ex +++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/certificates_command.ex @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule RabbitMQ.CLI.Diagnostics.Commands.CertificatesCommand do alias RabbitMQ.CLI.Core.DocGuide diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/check_alarms_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/check_alarms_command.ex index 4130fe26d0cb..47c41676ba3b 100644 --- a/deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/check_alarms_command.ex +++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/check_alarms_command.ex @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule RabbitMQ.CLI.Diagnostics.Commands.CheckAlarmsCommand do @moduledoc """ diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/check_certificate_expiration_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/check_certificate_expiration_command.ex index 20ee83a975a3..89c65af29713 100644 --- a/deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/check_certificate_expiration_command.ex +++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/check_certificate_expiration_command.ex @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule RabbitMQ.CLI.Diagnostics.Commands.CheckCertificateExpirationCommand do alias RabbitMQ.CLI.Core.DocGuide diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/check_if_any_deprecated_features_are_used_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/check_if_any_deprecated_features_are_used_command.ex new file mode 100644 index 000000000000..25463173b66a --- /dev/null +++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/check_if_any_deprecated_features_are_used_command.ex @@ -0,0 +1,98 @@ +## This Source Code Form is subject to the terms of the Mozilla Public +## License, v. 2.0. If a copy of the MPL was not distributed with this +## file, You can obtain one at https://mozilla.org/MPL/2.0/. +## +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. + +defmodule RabbitMQ.CLI.Diagnostics.Commands.CheckIfAnyDeprecatedFeaturesAreUsedCommand do + @behaviour RabbitMQ.CLI.CommandBehaviour + + def scopes(), do: [:ctl, :diagnostics] + + use RabbitMQ.CLI.Core.AcceptsDefaultSwitchesAndTimeout + use RabbitMQ.CLI.Core.MergesNoDefaults + use RabbitMQ.CLI.Core.AcceptsNoPositionalArguments + use RabbitMQ.CLI.Core.RequiresRabbitAppRunning + + def run([], opts) do + are_deprecated_features_used = %{ + :classic_queue_mirroring => is_used_classic_queue_mirroring(opts) + } + + deprecated_features_list = + Enum.reduce( + are_deprecated_features_used, + [], + fn + {_feat, _result}, {:badrpc, _} = acc -> + acc + + {feat, result}, acc -> + case result do + {:badrpc, _} = err -> err + {:error, _} = err -> err + true -> [feat | acc] + false -> acc + end + end + ) + + # health checks return true if they pass + case deprecated_features_list do + {:badrpc, _} = err -> err + {:error, _} = err -> err + [] -> true + xs when is_list(xs) -> {false, deprecated_features_list} + end + end + + def is_used_classic_queue_mirroring(%{node: node_name, timeout: timeout}) do + :rabbit_misc.rpc_call( + node_name, + :rabbit_mirror_queue_misc, + :are_cmqs_used, + [:none], + timeout + ) + end + + def output(true, %{formatter: "json"}) do + {:ok, %{"result" => "ok"}} + end + + def output(true, %{silent: true}) do + {:ok, :check_passed} + end + + def output(true, %{}) do + {:ok, "Cluster reported no deprecated features in use"} + end + + def output({false, deprecated_features_list}, %{formatter: "json"}) do + {:error, :check_failed, + %{ + "result" => "error", + "deprecated_features" => deprecated_features_list, + "message" => "Cluster reported deprecated features in use" + }} + end + + def output({false, _deprecated_features_list}, %{silent: true}) do + {:error, :check_failed} + end + + def output({false, deprecated_features_list}, _) do + {:error, :check_failed, deprecated_features_list} + end + + use RabbitMQ.CLI.DefaultOutput + + def usage, do: "check_if_any_deprecated_features_are_used" + + def help_section(), do: :observability_and_health_checks + + def description(), + do: "Generate a report listing all deprecated features in use" + + def banner(_, %{node: _node_name}), do: "Checking if any deprecated features are used ..." +end diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/check_if_cluster_has_classic_queue_mirroring_policy_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/check_if_cluster_has_classic_queue_mirroring_policy_command.ex new file mode 100644 index 000000000000..16392be9f397 --- /dev/null +++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/check_if_cluster_has_classic_queue_mirroring_policy_command.ex @@ -0,0 +1,126 @@ +## This Source Code Form is subject to the terms of the Mozilla Public +## License, v. 2.0. If a copy of the MPL was not distributed with this +## file, You can obtain one at https://mozilla.org/MPL/2.0/. +## +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. + +defmodule RabbitMQ.CLI.Diagnostics.Commands.CheckIfClusterHasClassicQueueMirroringPolicyCommand do + @moduledoc """ + Exits with a non-zero code if there are policies enabling classic queue mirroring. + + This command is meant to be used as a pre-upgrade (pre-shutdown) check before classic queue + mirroring is removed. + """ + + @behaviour RabbitMQ.CLI.CommandBehaviour + + import RabbitMQ.CLI.Core.Platform, only: [line_separator: 0] + + def scopes(), do: [:diagnostics, :queues] + + use RabbitMQ.CLI.Core.AcceptsDefaultSwitchesAndTimeout + use RabbitMQ.CLI.Core.MergesNoDefaults + use RabbitMQ.CLI.Core.AcceptsNoPositionalArguments + use RabbitMQ.CLI.Core.RequiresRabbitAppRunning + + def run([], %{node: node_name, timeout: timeout}) do + policies = + :rabbit_misc.rpc_call( + node_name, + :rabbit_mirror_queue_misc, + :list_policies_with_classic_queue_mirroring_for_cli, + [], + timeout + ) + + op_policies = + :rabbit_misc.rpc_call( + node_name, + :rabbit_mirror_queue_misc, + :list_operator_policies_with_classic_queue_mirroring_for_cli, + [], + timeout + ) + + case {policies, op_policies} do + {[], []} -> + true + + {_, _} when is_list(policies) and is_list(op_policies) -> + {false, policies, op_policies} + + {{:badrpc, _} = left, _} -> + left + + {_, {:badrpc, _} = right} -> + right + + other -> + other + end + end + + def output(true, %{formatter: "json"}) do + {:ok, %{"result" => "ok"}} + end + + def output(true, %{silent: true}) do + {:ok, :check_passed} + end + + def output(true, %{}) do + {:ok, "Cluster reported no policies that enable classic queue mirroring"} + end + + def output({false, ps, op_ps}, %{formatter: "json"}) + when is_list(ps) and is_list(op_ps) do + {:error, :check_failed, + %{ + "result" => "error", + "policies" => ps, + "operator_policies" => op_ps, + "message" => "Cluster reported policies enabling classic queue mirroring" + }} + end + + def output({false, ps, op_ps}, %{silent: true}) when is_list(ps) and is_list(op_ps) do + {:error, :check_failed} + end + + def output({false, ps, op_ps}, _) when is_list(ps) and is_list(op_ps) do + lines = policy_lines(ps) + op_lines = op_policy_lines(op_ps) + + {:error, :check_failed, Enum.join(Enum.concat(lines, op_lines), line_separator())} + end + + use RabbitMQ.CLI.DefaultOutput + + def help_section(), do: :observability_and_health_checks + + def description() do + "Health check that exits with a non-zero code if there are policies that enable classic queue mirroring" + end + + def usage, do: "check_if_cluster_has_classic_queue_mirroring_policy" + + def banner([], _) do + "Checking if cluster has any classic queue mirroring policy ..." + end + + # + # Implementation + # + + def policy_lines(ps) do + for p <- ps do + "Policy #{p[:name]} enables classic queue mirroring" + end + end + + def op_policy_lines(ps) do + for p <- ps do + "Operator policy #{p[:name]} enables classic queue mirroring" + end + end +end diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/check_local_alarms_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/check_local_alarms_command.ex index d967d4379a66..61cec04d8c08 100644 --- a/deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/check_local_alarms_command.ex +++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/check_local_alarms_command.ex @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule RabbitMQ.CLI.Diagnostics.Commands.CheckLocalAlarmsCommand do @moduledoc """ diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/check_port_connectivity_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/check_port_connectivity_command.ex index 4f25c8418e16..21e19a4405f4 100644 --- a/deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/check_port_connectivity_command.ex +++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/check_port_connectivity_command.ex @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule RabbitMQ.CLI.Diagnostics.Commands.CheckPortConnectivityCommand do @moduledoc """ diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/check_port_listener_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/check_port_listener_command.ex index 0552cfd1a97e..9b9c3b182e8b 100644 --- a/deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/check_port_listener_command.ex +++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/check_port_listener_command.ex @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule RabbitMQ.CLI.Diagnostics.Commands.CheckPortListenerCommand do @moduledoc """ diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/check_protocol_listener_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/check_protocol_listener_command.ex index 20ae31668479..544bc5aebe5d 100644 --- a/deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/check_protocol_listener_command.ex +++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/check_protocol_listener_command.ex @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule RabbitMQ.CLI.Diagnostics.Commands.CheckProtocolListenerCommand do @moduledoc """ diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/check_running_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/check_running_command.ex index 15ff34d7f48a..b385a1ab914e 100644 --- a/deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/check_running_command.ex +++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/check_running_command.ex @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule RabbitMQ.CLI.Diagnostics.Commands.CheckRunningCommand do @moduledoc """ diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/check_virtual_hosts_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/check_virtual_hosts_command.ex index 82733cc31271..19977eb64377 100644 --- a/deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/check_virtual_hosts_command.ex +++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/check_virtual_hosts_command.ex @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule RabbitMQ.CLI.Diagnostics.Commands.CheckVirtualHostsCommand do @moduledoc """ diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/cipher_suites_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/cipher_suites_command.ex index 4aab0554bcf0..a815d0e972cb 100644 --- a/deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/cipher_suites_command.ex +++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/cipher_suites_command.ex @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule RabbitMQ.CLI.Diagnostics.Commands.CipherSuitesCommand do alias RabbitMQ.CLI.Core.Helpers diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/command_line_arguments_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/command_line_arguments_command.ex index 10e7635e70fe..91875eaaf817 100644 --- a/deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/command_line_arguments_command.ex +++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/command_line_arguments_command.ex @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule RabbitMQ.CLI.Diagnostics.Commands.CommandLineArgumentsCommand do alias RabbitMQ.CLI.Core.DocGuide diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/consume_event_stream_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/consume_event_stream_command.ex index 8bbd5df12444..3efe3acfbf0b 100644 --- a/deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/consume_event_stream_command.ex +++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/consume_event_stream_command.ex @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2019-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule RabbitMQ.CLI.Diagnostics.Commands.ConsumeEventStreamCommand do @moduledoc """ diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/disable_auth_attempt_source_tracking_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/disable_auth_attempt_source_tracking_command.ex index 8d1d058f30a2..a09b7b3c58a3 100644 --- a/deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/disable_auth_attempt_source_tracking_command.ex +++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/disable_auth_attempt_source_tracking_command.ex @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule RabbitMQ.CLI.Diagnostics.Commands.DisableAuthAttemptSourceTrackingCommand do alias RabbitMQ.CLI.Core.DocGuide diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/discover_peers_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/discover_peers_command.ex index de7f73296079..2fb72eec9d9a 100644 --- a/deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/discover_peers_command.ex +++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/discover_peers_command.ex @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule RabbitMQ.CLI.Diagnostics.Commands.DiscoverPeersCommand do @behaviour RabbitMQ.CLI.CommandBehaviour diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/enable_auth_attempt_source_tracking_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/enable_auth_attempt_source_tracking_command.ex index 323eaaa5f11b..2930dc6653dc 100644 --- a/deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/enable_auth_attempt_source_tracking_command.ex +++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/enable_auth_attempt_source_tracking_command.ex @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule RabbitMQ.CLI.Diagnostics.Commands.EnableAuthAttemptSourceTrackingCommand do alias RabbitMQ.CLI.Core.DocGuide diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/erlang_cookie_hash_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/erlang_cookie_hash_command.ex index cdf3d02be4e6..b596d37c9328 100644 --- a/deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/erlang_cookie_hash_command.ex +++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/erlang_cookie_hash_command.ex @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule RabbitMQ.CLI.Diagnostics.Commands.ErlangCookieHashCommand do @behaviour RabbitMQ.CLI.CommandBehaviour diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/erlang_cookie_sources_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/erlang_cookie_sources_command.ex index 1e6219de1afd..07803f81c405 100644 --- a/deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/erlang_cookie_sources_command.ex +++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/erlang_cookie_sources_command.ex @@ -2,12 +2,13 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule RabbitMQ.CLI.Diagnostics.Commands.ErlangCookieSourcesCommand do @behaviour RabbitMQ.CLI.CommandBehaviour import RabbitMQ.CLI.Core.ANSI + import RabbitMQ.CLI.Core.Platform, only: [line_separator: 0] use RabbitMQ.CLI.Core.AcceptsDefaultSwitchesAndTimeout use RabbitMQ.CLI.Core.MergesNoDefaults @@ -67,7 +68,7 @@ defmodule RabbitMQ.CLI.Diagnostics.Commands.ErlangCookieSourcesCommand do def output(result, _opts) do cookie_file_lines = [ - "#{bright("Cookie File")}\n", + "#{bright("Cookie File")}", "Effective user: #{result[:effective_user] || "(none)"}", "Effective home directory: #{result[:home_dir] || "(none)"}", "Cookie file path: #{result[:cookie_file_path]}", @@ -78,20 +79,20 @@ defmodule RabbitMQ.CLI.Diagnostics.Commands.ErlangCookieSourcesCommand do ] switch_lines = [ - "\n#{bright("Cookie CLI Switch")}\n", + "#{line_separator()}#{bright("Cookie CLI Switch")}", "--erlang-cookie value set? #{result[:switch_cookie_set]}", "--erlang-cookie value length: #{result[:switch_cookie_value_length] || 0}" ] os_env_lines = [ - "\n#{bright("Env variable ")} #{bright_red("(Deprecated)")}\n", + "#{line_separator()}#{bright("Env variable ")} #{bright_red("(Deprecated)")}", "RABBITMQ_ERLANG_COOKIE value set? #{result[:os_env_cookie_set]}", "RABBITMQ_ERLANG_COOKIE value length: #{result[:os_env_cookie_value_length] || 0}" ] lines = cookie_file_lines ++ switch_lines ++ os_env_lines - {:ok, lines} + {:ok, Enum.join(lines, line_separator())} end def help_section(), do: :configuration @@ -102,7 +103,7 @@ defmodule RabbitMQ.CLI.Diagnostics.Commands.ErlangCookieSourcesCommand do def usage, do: "erlang_cookie_sources" - def formatter(), do: RabbitMQ.CLI.Formatters.StringPerLine + def formatter(), do: RabbitMQ.CLI.Formatters.String # # Implementation diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/erlang_version_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/erlang_version_command.ex index 48923d5ee6ab..5d5165bf268e 100644 --- a/deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/erlang_version_command.ex +++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/erlang_version_command.ex @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule RabbitMQ.CLI.Diagnostics.Commands.ErlangVersionCommand do @behaviour RabbitMQ.CLI.CommandBehaviour diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/is_booting_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/is_booting_command.ex index c1fc626c3495..91e6f7e33d2c 100644 --- a/deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/is_booting_command.ex +++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/is_booting_command.ex @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule RabbitMQ.CLI.Diagnostics.Commands.IsBootingCommand do @behaviour RabbitMQ.CLI.CommandBehaviour diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/is_running_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/is_running_command.ex index 65212baeec71..a0c3a81d0607 100644 --- a/deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/is_running_command.ex +++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/is_running_command.ex @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule RabbitMQ.CLI.Diagnostics.Commands.IsRunningCommand do @behaviour RabbitMQ.CLI.CommandBehaviour diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/list_network_interfaces_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/list_network_interfaces_command.ex index 54542901cb4b..7b26cb972524 100644 --- a/deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/list_network_interfaces_command.ex +++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/list_network_interfaces_command.ex @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule RabbitMQ.CLI.Diagnostics.Commands.ListNetworkInterfacesCommand do @moduledoc """ diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/list_node_auth_attempt_stats_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/list_node_auth_attempt_stats_command.ex index 845cc224e961..7947066485d6 100644 --- a/deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/list_node_auth_attempt_stats_command.ex +++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/list_node_auth_attempt_stats_command.ex @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule RabbitMQ.CLI.Diagnostics.Commands.ListNodeAuthAttemptStatsCommand do alias RabbitMQ.CLI.Core.DocGuide diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/list_policies_that_match.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/list_policies_that_match.ex new file mode 100644 index 000000000000..7422c71ce9ce --- /dev/null +++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/list_policies_that_match.ex @@ -0,0 +1,113 @@ +## This Source Code Form is subject to the terms of the Mozilla Public +## License, v. 2.0. If a copy of the MPL was not distributed with this +## file, You can obtain one at https://mozilla.org/MPL/2.0/. +## +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. + +defmodule RabbitMQ.CLI.Diagnostics.Commands.ListPoliciesThatMatchCommand do + alias RabbitMQ.CLI.Core.DocGuide + + @behaviour RabbitMQ.CLI.CommandBehaviour + def scopes(), do: [:diagnostics] + + def switches(), do: [object_type: :string] + use RabbitMQ.CLI.Core.AcceptsOnePositionalArgument + + def merge_defaults(args, opts) do + {args, Map.merge(%{vhost: "/", object_type: "queue"}, opts)} + end + + use RabbitMQ.CLI.Core.RequiresRabbitAppRunning + + def run([queue], %{node: node_name, vhost: vhost, object_type: object_type, timeout: timeout}) do + resource = + :rabbit_misc.rpc_call( + node_name, + :rabbit_misc, + :r, + [vhost, String.to_atom(object_type), queue], + timeout + ) + + res = + case object_type do + "exchange" -> + resource + + "queue" -> + :rabbit_misc.rpc_call( + node_name, + :rabbit_amqqueue, + :lookup, + [resource], + timeout + ) + end + + case res do + {:ok, q} -> + list_policies_that_match(node_name, q, timeout) + + {:resource, _, :exchange, _} = ex -> + list_policies_that_match(node_name, ex, timeout) + + _ -> + res + end + end + + def output([], %{node: _node_name, formatter: "json"}) do + {:ok, %{"result" => "ok", "policies" => []}} + end + + def output({:error, :not_found}, %{node: _node_name, formatter: "json"}) do + {:ok, + %{"result" => "error", "message" => "object (queue, exchange) not found", "policies" => []}} + end + + def output(value, %{node: _node_name, formatter: "json"}) when is_list(value) do + {:ok, %{"result" => "ok", "policies" => value}} + end + + use RabbitMQ.CLI.DefaultOutput + + def formatter(), do: RabbitMQ.CLI.Formatters.PrettyTable + + def usage, do: "list_policies_that_match [--object-type ] " + + def usage_additional() do + [ + ["", "The name of the queue/exchange"], + ["--object-type ", "the type of object to match (default: queue)"] + ] + end + + def usage_doc_guides() do + [ + DocGuide.queues() + ] + end + + def help_section(), do: :policies + + def description(), + do: + "Lists all policies matching a queue/exchange (only the highest priority policy is active)" + + def banner([name], %{vhost: vhost, object_type: object_type}) do + "Listing policies that match #{object_type} '#{name}' in vhost '#{vhost}' ..." + end + + # + # Implementation + # + + defp list_policies_that_match(node_name, name, timeout) do + res = :rabbit_misc.rpc_call(node_name, :rabbit_policy, :match_all, [name], timeout) + + case res do + {:ok, _message_count} -> :ok + _ -> res + end + end +end diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/listeners_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/listeners_command.ex index aebfaa35ea17..77a6e8c4a4d1 100644 --- a/deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/listeners_command.ex +++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/listeners_command.ex @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule RabbitMQ.CLI.Diagnostics.Commands.ListenersCommand do @moduledoc """ diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/log_location_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/log_location_command.ex index 2eb66dae9bee..d76e4e7c4d94 100644 --- a/deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/log_location_command.ex +++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/log_location_command.ex @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2019-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule RabbitMQ.CLI.Diagnostics.Commands.LogLocationCommand do @moduledoc """ diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/log_tail_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/log_tail_command.ex index 0f06ee8632b6..c9ca5026dfae 100644 --- a/deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/log_tail_command.ex +++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/log_tail_command.ex @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2019-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule RabbitMQ.CLI.Diagnostics.Commands.LogTailCommand do @moduledoc """ diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/log_tail_stream_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/log_tail_stream_command.ex index 32f347513f2b..3870492995bc 100644 --- a/deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/log_tail_stream_command.ex +++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/log_tail_stream_command.ex @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2019-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule RabbitMQ.CLI.Diagnostics.Commands.LogTailStreamCommand do @moduledoc """ diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/maybe_stuck_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/maybe_stuck_command.ex index db1188fe6677..e73cb2b1a126 100644 --- a/deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/maybe_stuck_command.ex +++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/maybe_stuck_command.ex @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule RabbitMQ.CLI.Diagnostics.Commands.MaybeStuckCommand do @behaviour RabbitMQ.CLI.CommandBehaviour diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/memory_breakdown_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/memory_breakdown_command.ex index 2707b97cf9eb..66be12057d5b 100644 --- a/deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/memory_breakdown_command.ex +++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/memory_breakdown_command.ex @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule RabbitMQ.CLI.Diagnostics.Commands.MemoryBreakdownCommand do alias RabbitMQ.CLI.InformationUnit, as: IU diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/metadata_store_status_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/metadata_store_status_command.ex new file mode 100644 index 000000000000..35e1f2f78402 --- /dev/null +++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/metadata_store_status_command.ex @@ -0,0 +1,34 @@ +## This Source Code Form is subject to the terms of the Mozilla Public +## License, v. 2.0. If a copy of the MPL was not distributed with this +## file, You can obtain one at https://mozilla.org/MPL/2.0/. +## +## Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. + +defmodule RabbitMQ.CLI.Diagnostics.Commands.MetadataStoreStatusCommand do + @behaviour RabbitMQ.CLI.CommandBehaviour + def scopes(), do: [:diagnostics] + + def merge_defaults(args, opts), do: {args, Map.merge(%{vhost: "/"}, opts)} + + use RabbitMQ.CLI.Core.AcceptsNoPositionalArguments + use RabbitMQ.CLI.Core.RequiresRabbitAppRunning + + def run([] = _args, %{node: node_name}) do + :rabbit_misc.rpc_call(node_name, :rabbit_khepri, :status, []) + end + + use RabbitMQ.CLI.DefaultOutput + + def formatter(), do: RabbitMQ.CLI.Formatters.PrettyTable + + def usage() do + "metadata_store_status" + end + + def help_section(), do: :observability_and_health_checks + + def description(), do: "Displays quorum status of Khepri metadata store" + + def banner([], %{node: node_name}), + do: "Status of metadata store on node #{node_name} ..." +end diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/observer_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/observer_command.ex index c0f226386065..9c22a6dba5f9 100644 --- a/deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/observer_command.ex +++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/observer_command.ex @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule RabbitMQ.CLI.Diagnostics.Commands.ObserverCommand do @behaviour RabbitMQ.CLI.CommandBehaviour diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/os_env_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/os_env_command.ex index cee60360f841..c34d1d99daf0 100644 --- a/deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/os_env_command.ex +++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/os_env_command.ex @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule RabbitMQ.CLI.Diagnostics.Commands.OsEnvCommand do @moduledoc """ diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/remote_shell_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/remote_shell_command.ex index e25e3fd0bd05..2896c112ea92 100644 --- a/deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/remote_shell_command.ex +++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/remote_shell_command.ex @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule RabbitMQ.CLI.Diagnostics.Commands.RemoteShellCommand do @behaviour RabbitMQ.CLI.CommandBehaviour @@ -38,7 +38,7 @@ defmodule RabbitMQ.CLI.Diagnostics.Commands.RemoteShellCommand do case :shell.start_interactive({node_name, {:shell, :start, []}}) do :ok -> :ok {:error, :already_started} -> :ok - {error, _} -> {:error, {:badrpc, :nodedown}} + {:error, _} -> {:error, {:badrpc, :nodedown}} end :timer.sleep(:infinity) diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/reset_node_auth_attempt_metrics_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/reset_node_auth_attempt_metrics_command.ex index dda2344a6fcd..a01cca151aca 100644 --- a/deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/reset_node_auth_attempt_metrics_command.ex +++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/reset_node_auth_attempt_metrics_command.ex @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule RabbitMQ.CLI.Diagnostics.Commands.ResetNodeAuthAttemptMetricsCommand do alias RabbitMQ.CLI.Core.DocGuide diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/resolve_hostname_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/resolve_hostname_command.ex index 1847a5b4190c..5cfdefde851e 100644 --- a/deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/resolve_hostname_command.ex +++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/resolve_hostname_command.ex @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule RabbitMQ.CLI.Diagnostics.Commands.ResolveHostnameCommand do @moduledoc """ diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/resolver_info_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/resolver_info_command.ex index 9dea234c49a7..c6ba75a3a716 100644 --- a/deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/resolver_info_command.ex +++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/resolver_info_command.ex @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule RabbitMQ.CLI.Diagnostics.Commands.ResolverInfoCommand do @moduledoc """ diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/runtime_thread_stats_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/runtime_thread_stats_command.ex index d6116106b51c..e41dba74643a 100644 --- a/deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/runtime_thread_stats_command.ex +++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/runtime_thread_stats_command.ex @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule RabbitMQ.CLI.Diagnostics.Commands.RuntimeThreadStatsCommand do alias RabbitMQ.CLI.Core.DocGuide diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/schema_info_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/schema_info_command.ex index b59f45f57791..d99194cf60e5 100644 --- a/deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/schema_info_command.ex +++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/schema_info_command.ex @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule RabbitMQ.CLI.Diagnostics.Commands.SchemaInfoCommand do @moduledoc """ diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/server_version_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/server_version_command.ex index 5418dd29d11d..6e51020fcaef 100644 --- a/deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/server_version_command.ex +++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/server_version_command.ex @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule RabbitMQ.CLI.Diagnostics.Commands.ServerVersionCommand do @behaviour RabbitMQ.CLI.CommandBehaviour diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/tls_versions_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/tls_versions_command.ex index 0e6c099e51e9..aac855ea8e00 100644 --- a/deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/tls_versions_command.ex +++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/tls_versions_command.ex @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule RabbitMQ.CLI.Diagnostics.Commands.TlsVersionsCommand do @behaviour RabbitMQ.CLI.CommandBehaviour diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/diagnostics_helpers.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/diagnostics_helpers.ex index c24e72ca0e95..db2840e772a2 100644 --- a/deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/diagnostics_helpers.ex +++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/diagnostics_helpers.ex @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule RabbitMQ.CLI.Diagnostics.Helpers do def test_connection(hostname_or_ip, port, timeout) do diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/formatter_behaviour.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/formatter_behaviour.ex index 684950936ec6..66b1833942d1 100644 --- a/deps/rabbitmq_cli/lib/rabbitmq/cli/formatter_behaviour.ex +++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/formatter_behaviour.ex @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. # Formats returned values e.g. to human-readable text or JSON. defmodule RabbitMQ.CLI.FormatterBehaviour do diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/formatters/csv.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/formatters/csv.ex index 759dad4d1210..1a13f55573b3 100644 --- a/deps/rabbitmq_cli/lib/rabbitmq/cli/formatters/csv.ex +++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/formatters/csv.ex @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. alias RabbitMQ.CLI.Formatters.FormatterHelpers diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/formatters/encrypted_conf_value.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/formatters/encrypted_conf_value.ex new file mode 100644 index 000000000000..7eabc77b3a7a --- /dev/null +++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/formatters/encrypted_conf_value.ex @@ -0,0 +1,26 @@ +## This Source Code Form is subject to the terms of the Mozilla Public +## License, v. 2.0. If a copy of the MPL was not distributed with this +## file, You can obtain one at https://mozilla.org/MPL/2.0/. +## +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. + +## Prints values from a command as strings(if possible) +defmodule RabbitMQ.CLI.Formatters.EncryptedConfValue do + alias RabbitMQ.CLI.Core.Helpers + alias RabbitMQ.CLI.Formatters.FormatterHelpers + + @behaviour RabbitMQ.CLI.FormatterBehaviour + + def format_output(output, _) do + Helpers.string_or_inspect("encrypted:#{output}") + end + + def format_stream(stream, options) do + Stream.map( + stream, + FormatterHelpers.without_errors_1(fn el -> + format_output(el, options) + end) + ) + end +end diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/formatters/erlang.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/formatters/erlang.ex index 5919512b234c..003e77b1adc1 100644 --- a/deps/rabbitmq_cli/lib/rabbitmq/cli/formatters/erlang.ex +++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/formatters/erlang.ex @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule RabbitMQ.CLI.Formatters.Erlang do @behaviour RabbitMQ.CLI.FormatterBehaviour diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/formatters/formatter_helpers.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/formatters/formatter_helpers.ex index edd703f55123..d1b47f1006f2 100644 --- a/deps/rabbitmq_cli/lib/rabbitmq/cli/formatters/formatter_helpers.ex +++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/formatters/formatter_helpers.ex @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule RabbitMQ.CLI.Formatters.FormatterHelpers do import RabbitCommon.Records diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/formatters/inspect.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/formatters/inspect.ex index 7cc34b2ef04a..3c1b09ea966a 100644 --- a/deps/rabbitmq_cli/lib/rabbitmq/cli/formatters/inspect.ex +++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/formatters/inspect.ex @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. alias RabbitMQ.CLI.Formatters.FormatterHelpers defmodule RabbitMQ.CLI.Formatters.Inspect do diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/formatters/json.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/formatters/json.ex index ad46b1be9bf3..348d6e8ea740 100644 --- a/deps/rabbitmq_cli/lib/rabbitmq/cli/formatters/json.ex +++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/formatters/json.ex @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. # Basic JSON formatter. Supports 1-level of # collection using start/finish_collection. diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/formatters/json_stream.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/formatters/json_stream.ex index 7f29998a8631..ba91714cd0d2 100644 --- a/deps/rabbitmq_cli/lib/rabbitmq/cli/formatters/json_stream.ex +++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/formatters/json_stream.ex @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. # Basic JSON formatter. Supports 1-level of # collection using start/finish_collection. diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/formatters/msacc.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/formatters/msacc.ex index 02ce7b3abd2b..b35e9316b467 100644 --- a/deps/rabbitmq_cli/lib/rabbitmq/cli/formatters/msacc.ex +++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/formatters/msacc.ex @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule RabbitMQ.CLI.Formatters.Msacc do @behaviour RabbitMQ.CLI.FormatterBehaviour diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/formatters/plugins.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/formatters/plugins.ex index 19411b86f80b..f72600ab2008 100644 --- a/deps/rabbitmq_cli/lib/rabbitmq/cli/formatters/plugins.ex +++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/formatters/plugins.ex @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. alias RabbitMQ.CLI.Formatters.FormatterHelpers defmodule RabbitMQ.CLI.Formatters.Plugins do @@ -146,10 +146,18 @@ defmodule RabbitMQ.CLI.Formatters.Plugins do ] end + defp augment_version(%{version: version, running: false}) do + to_string(version) + end + defp augment_version(%{version: version, running_version: nil}) do to_string(version) end + defp augment_version(%{version: version, running_version: ""}) do + to_string(version) + end + defp augment_version(%{version: version, running_version: version}) do to_string(version) end diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/formatters/pretty_table.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/formatters/pretty_table.ex index 9e2c64649c1c..155e85874231 100644 --- a/deps/rabbitmq_cli/lib/rabbitmq/cli/formatters/pretty_table.ex +++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/formatters/pretty_table.ex @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule RabbitMQ.CLI.Formatters.PrettyTable do @behaviour RabbitMQ.CLI.FormatterBehaviour diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/formatters/report.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/formatters/report.ex index 98fefae6430a..7817a88cfb34 100644 --- a/deps/rabbitmq_cli/lib/rabbitmq/cli/formatters/report.ex +++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/formatters/report.ex @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2017-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule RabbitMQ.CLI.Formatters.Report do alias RabbitMQ.CLI.Formatters.FormatterHelpers diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/formatters/string.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/formatters/string.ex index 2631e0874236..60404a2ca2c3 100644 --- a/deps/rabbitmq_cli/lib/rabbitmq/cli/formatters/string.ex +++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/formatters/string.ex @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. ## Prints values from a command as strings(if possible) defmodule RabbitMQ.CLI.Formatters.String do diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/formatters/string_per_line.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/formatters/string_per_line.ex index 85ae2ed81c42..1533a8d0fc2c 100644 --- a/deps/rabbitmq_cli/lib/rabbitmq/cli/formatters/string_per_line.ex +++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/formatters/string_per_line.ex @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule RabbitMQ.CLI.Formatters.StringPerLine do @doc """ diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/formatters/table.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/formatters/table.ex index 2672d65a4a0a..2cdac5b0bb07 100644 --- a/deps/rabbitmq_cli/lib/rabbitmq/cli/formatters/table.ex +++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/formatters/table.ex @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. alias RabbitMQ.CLI.Formatters.FormatterHelpers diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/information_unit.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/information_unit.ex index ceced30d4aea..cd6d335d9baa 100644 --- a/deps/rabbitmq_cli/lib/rabbitmq/cli/information_unit.ex +++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/information_unit.ex @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule RabbitMQ.CLI.InformationUnit do require MapSet @@ -11,18 +11,42 @@ defmodule RabbitMQ.CLI.InformationUnit do @megabyte_bytes @kilobyte_bytes * 1000 @gigabyte_bytes @megabyte_bytes * 1000 @terabyte_bytes @gigabyte_bytes * 1000 + @petabyte_bytes @terabyte_bytes * 1000 + + @kibibyte_bytes 1024 + @mebibyte_bytes @kibibyte_bytes * 1024 + @gibibyte_bytes @mebibyte_bytes * 1024 + @tebibyte_bytes @gibibyte_bytes * 1024 + @pebibyte_bytes @tebibyte_bytes * 1024 def known_units() do MapSet.new([ "bytes", + "k", "kb", + "ki", + "kib", "kilobytes", + "m", "mb", + "mi", + "mib", "megabytes", + "g", "gb", + "gi", + "gib", "gigabytes", + "t", "tb", - "terabytes" + "ti", + "tib", + "terabytes", + "p", + "pb", + "pi", + "pib", + "petabytes" ]) end @@ -50,23 +74,64 @@ defmodule RabbitMQ.CLI.InformationUnit do Float.round(bytes / @kilobyte_bytes, 4) end + defp do_convert(bytes, "k"), do: do_convert(bytes, "kb") + + defp do_convert(bytes, "ki") do + Float.round(bytes / @kibibyte_bytes, 4) + end + + defp do_convert(bytes, "kib"), do: do_convert(bytes, "ki") defp do_convert(bytes, "kilobytes"), do: do_convert(bytes, "kb") defp do_convert(bytes, "mb") do Float.round(bytes / @megabyte_bytes, 4) end + defp do_convert(bytes, "m"), do: do_convert(bytes, "mb") + + defp do_convert(bytes, "mi") do + Float.round(bytes / @mebibyte_bytes, 4) + end + + defp do_convert(bytes, "mib"), do: do_convert(bytes, "mi") defp do_convert(bytes, "megabytes"), do: do_convert(bytes, "mb") defp do_convert(bytes, "gb") do Float.round(bytes / @gigabyte_bytes, 4) end + defp do_convert(bytes, "g"), do: do_convert(bytes, "gb") + + defp do_convert(bytes, "gi") do + Float.round(bytes / @gigabyte_bytes, 4) + end + + defp do_convert(bytes, "gib"), do: do_convert(bytes, "gi") defp do_convert(bytes, "gigabytes"), do: do_convert(bytes, "gb") defp do_convert(bytes, "tb") do Float.round(bytes / @terabyte_bytes, 4) end + defp do_convert(bytes, "t"), do: do_convert(bytes, "tb") + + defp do_convert(bytes, "ti") do + Float.round(bytes / @tebibyte_bytes, 4) + end + + defp do_convert(bytes, "tib"), do: do_convert(bytes, "ti") defp do_convert(bytes, "terabytes"), do: do_convert(bytes, "tb") + + defp do_convert(bytes, "pb") do + Float.round(bytes / @petabyte_bytes, 4) + end + + defp do_convert(bytes, "p"), do: do_convert(bytes, "pb") + + defp do_convert(bytes, "pi") do + Float.round(bytes / @pebibyte_bytes, 4) + end + + defp do_convert(bytes, "pib"), do: do_convert(bytes, "pi") + defp do_convert(bytes, "petabytes"), do: do_convert(bytes, "pb") end diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/plugins/commands/directories_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/plugins/commands/directories_command.ex index b1df7716463f..0cf711b4d18b 100644 --- a/deps/rabbitmq_cli/lib/rabbitmq/cli/plugins/commands/directories_command.ex +++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/plugins/commands/directories_command.ex @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule RabbitMQ.CLI.Plugins.Commands.DirectoriesCommand do alias RabbitMQ.CLI.Plugins.Helpers, as: PluginHelpers diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/plugins/commands/disable_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/plugins/commands/disable_command.ex index 4c4ae384faae..f96897df7793 100644 --- a/deps/rabbitmq_cli/lib/rabbitmq/cli/plugins/commands/disable_command.ex +++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/plugins/commands/disable_command.ex @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule RabbitMQ.CLI.Plugins.Commands.DisableCommand do alias RabbitMQ.CLI.Plugins.Helpers, as: PluginHelpers diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/plugins/commands/enable_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/plugins/commands/enable_command.ex index 9f687df4f05c..779e2ef54f05 100644 --- a/deps/rabbitmq_cli/lib/rabbitmq/cli/plugins/commands/enable_command.ex +++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/plugins/commands/enable_command.ex @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule RabbitMQ.CLI.Plugins.Commands.EnableCommand do alias RabbitMQ.CLI.Plugins.Helpers, as: PluginHelpers diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/plugins/commands/is_enabled.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/plugins/commands/is_enabled.ex index a18037720f7c..56f622aff81c 100644 --- a/deps/rabbitmq_cli/lib/rabbitmq/cli/plugins/commands/is_enabled.ex +++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/plugins/commands/is_enabled.ex @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule RabbitMQ.CLI.Plugins.Commands.IsEnabledCommand do alias RabbitMQ.CLI.Plugins.Helpers, as: PluginHelpers diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/plugins/commands/list_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/plugins/commands/list_command.ex index 632b64c432df..7168f0ed6dbe 100644 --- a/deps/rabbitmq_cli/lib/rabbitmq/cli/plugins/commands/list_command.ex +++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/plugins/commands/list_command.ex @@ -2,12 +2,12 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule RabbitMQ.CLI.Plugins.Commands.ListCommand do import RabbitCommon.Records - alias RabbitMQ.CLI.Core.{DocGuide, Validators} + alias RabbitMQ.CLI.Core.{Config, DocGuide, Validators} alias RabbitMQ.CLI.Plugins.Helpers, as: PluginHelpers import RabbitMQ.CLI.Core.{CodePath, Paths} @@ -61,8 +61,14 @@ defmodule RabbitMQ.CLI.Plugins.Commands.ListCommand do :ok false -> - names = Enum.join(Enum.to_list(missing), ", ") - IO.puts("WARNING - plugins currently enabled but missing: #{names}\n") + case Config.output_less?(opts) do + false -> + names = Enum.join(Enum.to_list(missing), ", ") + IO.puts("WARNING - plugins currently enabled but missing: #{names}\n") + + true -> + :ok + end end implicit = :rabbit_plugins.dependencies(false, enabled, all) @@ -116,7 +122,7 @@ defmodule RabbitMQ.CLI.Plugins.Commands.ListCommand do ["--verbose", "output more information"], [ "--minimal", - "only print plugin names. Most useful in compbination with --silent and --enabled." + "only print plugin names. Most useful in combination with --silent and --enabled." ], ["--enabled", "only list enabled plugins"], ["--implicitly-enabled", "include plugins enabled as dependencies of other plugins"] @@ -172,8 +178,8 @@ defmodule RabbitMQ.CLI.Plugins.Commands.ListCommand do %{ name: name, - version: version, - running_version: running[name], + version: to_string(version), + running_version: to_string(running[name]), enabled: enabled_mode, running: Keyword.has_key?(running, name) } diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/plugins/commands/set_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/plugins/commands/set_command.ex index 42ed68c0c728..fc5a993b2930 100644 --- a/deps/rabbitmq_cli/lib/rabbitmq/cli/plugins/commands/set_command.ex +++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/plugins/commands/set_command.ex @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule RabbitMQ.CLI.Plugins.Commands.SetCommand do alias RabbitMQ.CLI.Plugins.Helpers, as: PluginHelpers diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/plugins/error_output.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/plugins/error_output.ex index ad21bb68661c..8509504481e8 100644 --- a/deps/rabbitmq_cli/lib/rabbitmq/cli/plugins/error_output.ex +++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/plugins/error_output.ex @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. # Default output implementation for plugin commands defmodule RabbitMQ.CLI.Plugins.ErrorOutput do diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/plugins/plugins_helpers.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/plugins/plugins_helpers.ex index 59341f42c815..31073749305f 100644 --- a/deps/rabbitmq_cli/lib/rabbitmq/cli/plugins/plugins_helpers.ex +++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/plugins/plugins_helpers.ex @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule RabbitMQ.CLI.Plugins.Helpers do import RabbitMQ.CLI.Core.DataCoercion @@ -195,7 +195,9 @@ defmodule RabbitMQ.CLI.Plugins.Helpers do all_plugin_names = Enum.map(all, &plugin_name/1) missing = MapSet.difference(MapSet.new(plugins), MapSet.new(all_plugin_names)) - case Enum.empty?(missing) do + hard_write = Map.get(opts, :hard_write, false) + + case Enum.empty?(missing) or hard_write do true -> case :rabbit_file.write_term_file(to_charlist(plugins_file), [plugins]) do :ok -> diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/printer_behaviour.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/printer_behaviour.ex index e8bf9425e80f..1d7b1e51c052 100644 --- a/deps/rabbitmq_cli/lib/rabbitmq/cli/printer_behaviour.ex +++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/printer_behaviour.ex @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule RabbitMQ.CLI.PrinterBehaviour do @callback init(options :: map()) :: {:ok, printer_state :: any} | {:error, error :: any} diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/printers/file.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/printers/file.ex index 498e2039b963..67f84666913d 100644 --- a/deps/rabbitmq_cli/lib/rabbitmq/cli/printers/file.ex +++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/printers/file.ex @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule RabbitMQ.CLI.Printers.File do @behaviour RabbitMQ.CLI.PrinterBehaviour diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/printers/std_io.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/printers/std_io.ex index 5fbe53407f37..9c26e24b5bdf 100644 --- a/deps/rabbitmq_cli/lib/rabbitmq/cli/printers/std_io.ex +++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/printers/std_io.ex @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule RabbitMQ.CLI.Printers.StdIO do @behaviour RabbitMQ.CLI.PrinterBehaviour diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/printers/std_io_raw.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/printers/std_io_raw.ex index df95e5c11626..801ab7affd15 100644 --- a/deps/rabbitmq_cli/lib/rabbitmq/cli/printers/std_io_raw.ex +++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/printers/std_io_raw.ex @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule RabbitMQ.CLI.Printers.StdIORaw do @behaviour RabbitMQ.CLI.PrinterBehaviour diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/queues/commands/add_member_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/queues/commands/add_member_command.ex index e4d96715bc5f..d621b729142a 100644 --- a/deps/rabbitmq_cli/lib/rabbitmq/cli/queues/commands/add_member_command.ex +++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/queues/commands/add_member_command.ex @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule RabbitMQ.CLI.Queues.Commands.AddMemberCommand do alias RabbitMQ.CLI.Core.{DocGuide, Validators} @@ -10,21 +10,51 @@ defmodule RabbitMQ.CLI.Queues.Commands.AddMemberCommand do @behaviour RabbitMQ.CLI.CommandBehaviour - @default_timeout 5_000 + defp default_opts, do: %{vhost: "/", membership: "promotable", timeout: 5_000} def merge_defaults(args, opts) do - timeout = - case opts[:timeout] do - nil -> @default_timeout - :infinity -> @default_timeout - other -> other - end + default = default_opts() + + opts = + Map.update( + opts, + :timeout, + :infinity, + &case &1 do + :infinity -> default.timeout + other -> other + end + ) + + {args, Map.merge(default, opts)} + end - {args, Map.merge(%{vhost: "/", timeout: timeout}, opts)} + def switches(), + do: [ + timeout: :integer, + membership: :string + ] + + def aliases(), do: [t: :timeout] + + def validate(args, _) when length(args) < 2 do + {:validation_failure, :not_enough_args} end - use RabbitMQ.CLI.Core.AcceptsDefaultSwitchesAndTimeout - use RabbitMQ.CLI.Core.AcceptsTwoPositionalArguments + def validate(args, _) when length(args) > 2 do + {:validation_failure, :too_many_args} + end + + def validate(_, %{membership: m}) + when not (m == "promotable" or + m == "non_voter" or + m == "voter") do + {:validation_failure, "voter status '#{m}' is not recognised."} + end + + def validate(_, _) do + :ok + end def validate_execution_environment(args, opts) do Validators.chain( @@ -39,13 +69,19 @@ defmodule RabbitMQ.CLI.Queues.Commands.AddMemberCommand do ) end - def run([name, node] = _args, %{vhost: vhost, node: node_name, timeout: timeout}) do - case :rabbit_misc.rpc_call(node_name, :rabbit_quorum_queue, :add_member, [ - vhost, - name, - to_atom(node), - timeout - ]) do + def run( + [name, node] = _args, + %{vhost: vhost, node: node_name, timeout: timeout, membership: membership} + ) do + args = [vhost, name, to_atom(node)] + + args = + case to_atom(membership) do + :promotable -> args ++ [timeout] + other -> args ++ [other, timeout] + end + + case :rabbit_misc.rpc_call(node_name, :rabbit_quorum_queue, :add_member, args) do {:error, :classic_queue_not_supported} -> {:error, "Cannot add members to a classic queue"} @@ -59,12 +95,13 @@ defmodule RabbitMQ.CLI.Queues.Commands.AddMemberCommand do use RabbitMQ.CLI.DefaultOutput - def usage, do: "add_member [--vhost ] " + def usage, do: "add_member [--vhost ] [--membership ]" def usage_additional do [ ["", "quorum queue name"], - ["", "node to add a new replica on"] + ["", "node to add a new replica on"], + ["--membership ", "add a promotable non-voter (default) or full voter"] ] end diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/queues/commands/check_if_new_quorum_queue_replicas_have_finished_initial_sync.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/queues/commands/check_if_new_quorum_queue_replicas_have_finished_initial_sync.ex new file mode 100644 index 000000000000..e893792ae616 --- /dev/null +++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/queues/commands/check_if_new_quorum_queue_replicas_have_finished_initial_sync.ex @@ -0,0 +1,95 @@ +## This Source Code Form is subject to the terms of the Mozilla Public +## License, v. 2.0. If a copy of the MPL was not distributed with this +## file, You can obtain one at https://mozilla.org/MPL/2.0/. +## +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. + +defmodule RabbitMQ.CLI.Queues.Commands.CheckIfNewQuorumQueueReplicasHaveFinishedInitialSyncCommand do + @moduledoc """ + Exits with a non-zero code if there are quorum queues + that run "non-voter" (not yet done with their initial sync, promotable to voters) + replicas on the current node. + + This command is used to verify if a new cluster node hosts only + fully synchronized. + """ + + @behaviour RabbitMQ.CLI.CommandBehaviour + + import RabbitMQ.CLI.Core.Platform, only: [line_separator: 0] + + def scopes(), do: [:diagnostics, :queues] + + use RabbitMQ.CLI.Core.AcceptsDefaultSwitchesAndTimeout + use RabbitMQ.CLI.Core.MergesNoDefaults + use RabbitMQ.CLI.Core.AcceptsNoPositionalArguments + use RabbitMQ.CLI.Core.RequiresRabbitAppRunning + + def run([], %{node: node_name, timeout: timeout}) do + case :rabbit_misc.rpc_call( + node_name, + :rabbit_quorum_queue, + :list_with_local_promotable_for_cli, + [], + timeout + ) do + [] -> {:ok, []} + qs when is_list(qs) -> {:ok, qs} + other -> other + end + end + + def output({:ok, []}, %{formatter: "json"}) do + {:ok, %{"result" => "ok"}} + end + + def output({:ok, []}, %{silent: true}) do + {:ok, :check_passed} + end + + def output({:ok, []}, %{node: node_name}) do + {:ok, "Node #{node_name} reported no queues with promotable replicas"} + end + + def output({:ok, qs}, %{node: node_name, formatter: "json"}) when is_list(qs) do + {:error, :check_failed, + %{ + "result" => "error", + "queues" => qs, + "message" => "Node #{node_name} reported local queues promotable replicas" + }} + end + + def output({:ok, qs}, %{silent: true}) when is_list(qs) do + {:error, :check_failed} + end + + def output({:ok, qs}, %{node: node_name}) when is_list(qs) do + lines = queue_lines(qs, node_name) + + {:error, :check_failed, Enum.join(lines, line_separator())} + end + + use RabbitMQ.CLI.DefaultOutput + + def help_section(), do: :observability_and_health_checks + + def description() do + "Health check that exits with a non-zero code if there are queues " <> + "that run promotable replicas on the current node." + end + + def usage, do: "check_if_new_quorum_queue_replicas_have_finished_initial_sync" + + def banner([], %{node: node_name}) do + "Checking if node #{node_name} runs promotable replicas of any queues ..." + end + + # + # Implementation + # + + def queue_lines(qs, node_name) do + for q <- qs, do: "#{q["readable_name"]} hasn't finished synchronization with #{node_name}." + end +end diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/queues/commands/check_if_node_is_mirror_sync_critical_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/queues/commands/check_if_node_is_mirror_sync_critical_command.ex index 47081be13184..3b9d66f311e2 100644 --- a/deps/rabbitmq_cli/lib/rabbitmq/cli/queues/commands/check_if_node_is_mirror_sync_critical_command.ex +++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/queues/commands/check_if_node_is_mirror_sync_critical_command.ex @@ -2,10 +2,12 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule RabbitMQ.CLI.Queues.Commands.CheckIfNodeIsMirrorSyncCriticalCommand do @moduledoc """ + DEPRECATED: this command does nothing in RabbitMQ 4.0 and newer. + Exits with a non-zero code if there are classic mirrored queues that don't have any in sync mirrors online and would potentially lose data if the target node is shut down. @@ -15,8 +17,6 @@ defmodule RabbitMQ.CLI.Queues.Commands.CheckIfNodeIsMirrorSyncCriticalCommand do @behaviour RabbitMQ.CLI.CommandBehaviour - import RabbitMQ.CLI.Core.Platform, only: [line_separator: 0] - def scopes(), do: [:diagnostics, :queues] use RabbitMQ.CLI.Core.AcceptsDefaultSwitchesAndTimeout @@ -24,104 +24,30 @@ defmodule RabbitMQ.CLI.Queues.Commands.CheckIfNodeIsMirrorSyncCriticalCommand do use RabbitMQ.CLI.Core.AcceptsNoPositionalArguments use RabbitMQ.CLI.Core.RequiresRabbitAppRunning - def run([], %{node: node_name, timeout: timeout}) do - case :rabbit_misc.rpc_call(node_name, :rabbit_nodes, :is_single_node_cluster, [], timeout) do - # if target node is the only one in the cluster, the check makes little sense - # and false positives can be misleading - true -> - {:ok, :single_node_cluster} - - false -> - case :rabbit_misc.rpc_call( - node_name, - :rabbit_amqqueue, - :list_local_mirrored_classic_without_synchronised_mirrors_for_cli, - [], - timeout - ) do - [] -> {:ok, []} - qs when is_list(qs) -> {:ok, qs} - other -> other - end - - other -> - other - end - end - - def output({:ok, :single_node_cluster}, %{formatter: "json"}) do - {:ok, - %{ - "result" => "ok", - "message" => - "Target node seems to be the only one in a single node cluster, the check does not apply" - }} + def run([], _opts) do + :ok end - def output({:ok, []}, %{formatter: "json"}) do + def output(:ok, %{formatter: "json"}) do {:ok, %{"result" => "ok"}} end - def output({:ok, :single_node_cluster}, %{silent: true}) do - {:ok, :check_passed} - end - - def output({:ok, []}, %{silent: true}) do - {:ok, :check_passed} - end - - def output({:ok, :single_node_cluster}, %{node: node_name}) do - {:ok, - "Node #{node_name} seems to be the only one in a single node cluster, the check does not apply"} - end - - def output({:ok, []}, %{node: node_name}) do - {:ok, - "Node #{node_name} reported no classic mirrored queues without online synchronised mirrors"} - end - - def output({:ok, qs}, %{node: node_name, formatter: "json"}) when is_list(qs) do - {:error, :check_failed, - %{ - "result" => "error", - "queues" => qs, - "message" => - "Node #{node_name} reported local classic mirrored queues without online synchronised mirrors" - }} - end - - def output({:ok, qs}, %{silent: true}) when is_list(qs) do - {:error, :check_failed} - end - - def output({:ok, qs}, %{node: node_name}) when is_list(qs) do - lines = queue_lines(qs, node_name) - - {:error, :check_failed, Enum.join(lines, line_separator())} + def output(:ok, _opts) do + {:ok, "ok"} end use RabbitMQ.CLI.DefaultOutput - def help_section(), do: :observability_and_health_checks + def help_section(), do: :deprecated def description() do - "Health check that exits with a non-zero code if there are classic mirrored queues " <> - "without online synchronised mirrors (queues that would potentially lose data if the target node is shut down)" + "DEPRECATED. Mirrored queues were removed in RabbitMQ 4.0. This command is a no-op." end def usage, do: "check_if_node_is_mirror_sync_critical" - def banner([], %{node: node_name}) do - "Checking if node #{node_name} is critical for data safety of any classic mirrored queues ..." + def banner([], _) do + "This command is DEPRECATED and is a no-op. It will be removed in a future version." end - # - # Implementation - # - - def queue_lines(qs, node_name) do - for q <- qs do - "#{q["readable_name"]} would lose its only synchronised replica (master) if node #{node_name} is stopped" - end - end end diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/queues/commands/check_if_node_is_quorum_critical_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/queues/commands/check_if_node_is_quorum_critical_command.ex index 4b1ad529e835..665f80ddff3e 100644 --- a/deps/rabbitmq_cli/lib/rabbitmq/cli/queues/commands/check_if_node_is_quorum_critical_command.ex +++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/queues/commands/check_if_node_is_quorum_critical_command.ex @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule RabbitMQ.CLI.Queues.Commands.CheckIfNodeIsQuorumCriticalCommand do @moduledoc """ diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/queues/commands/delete_member_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/queues/commands/delete_member_command.ex index ef3eb5cc7a24..c891f1ac50df 100644 --- a/deps/rabbitmq_cli/lib/rabbitmq/cli/queues/commands/delete_member_command.ex +++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/queues/commands/delete_member_command.ex @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule RabbitMQ.CLI.Queues.Commands.DeleteMemberCommand do alias RabbitMQ.CLI.Core.DocGuide diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/queues/commands/grow_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/queues/commands/grow_command.ex index f6ab616a62b0..36a4a084f8e4 100644 --- a/deps/rabbitmq_cli/lib/rabbitmq/cli/queues/commands/grow_command.ex +++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/queues/commands/grow_command.ex @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule RabbitMQ.CLI.Queues.Commands.GrowCommand do alias RabbitMQ.CLI.Core.{DocGuide, Validators} @@ -10,12 +10,14 @@ defmodule RabbitMQ.CLI.Queues.Commands.GrowCommand do @behaviour RabbitMQ.CLI.CommandBehaviour - defp default_opts, do: %{vhost_pattern: ".*", queue_pattern: ".*", errors_only: false} + defp default_opts, + do: %{vhost_pattern: ".*", queue_pattern: ".*", membership: "promotable", errors_only: false} def switches(), do: [ vhost_pattern: :string, queue_pattern: :string, + membership: :string, errors_only: :boolean ] @@ -31,17 +33,21 @@ defmodule RabbitMQ.CLI.Queues.Commands.GrowCommand do {:validation_failure, :too_many_args} end - def validate([_, s], _) do - case s do - "all" -> - :ok + def validate([_, s], _) + when not (s == "all" or + s == "even") do + {:validation_failure, "strategy '#{s}' is not recognised."} + end - "even" -> - :ok + def validate(_, %{membership: m}) + when not (m == "promotable" or + m == "non_voter" or + m == "voter") do + {:validation_failure, "voter status '#{m}' is not recognised."} + end - _ -> - {:validation_failure, "strategy '#{s}' is not recognised."} - end + def validate(_, _) do + :ok end def validate_execution_environment(args, opts) do @@ -58,14 +64,18 @@ defmodule RabbitMQ.CLI.Queues.Commands.GrowCommand do node: node_name, vhost_pattern: vhost_pat, queue_pattern: queue_pat, + membership: membership, errors_only: errors_only }) do - case :rabbit_misc.rpc_call(node_name, :rabbit_quorum_queue, :grow, [ - to_atom(node), - vhost_pat, - queue_pat, - to_atom(strategy) - ]) do + args = [to_atom(node), vhost_pat, queue_pat, to_atom(strategy)] + + args = + case to_atom(membership) do + :promotable -> args + other -> args ++ [other] + end + + case :rabbit_misc.rpc_call(node_name, :rabbit_quorum_queue, :grow, args) do {:error, _} = error -> error @@ -97,7 +107,8 @@ defmodule RabbitMQ.CLI.Queues.Commands.GrowCommand do def formatter(), do: RabbitMQ.CLI.Formatters.Table def usage, - do: "grow [--vhost-pattern ] [--queue-pattern ]" + do: + "grow [--vhost-pattern ] [--queue-pattern ] [--membership ]" def usage_additional do [ @@ -108,6 +119,7 @@ defmodule RabbitMQ.CLI.Queues.Commands.GrowCommand do ], ["--queue-pattern ", "regular expression to match queue names"], ["--vhost-pattern ", "regular expression to match virtual host names"], + ["--membership ", "add a promotable non-voter (default) or full voter"], ["--errors-only", "only list queues which reported an error"] ] end diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/queues/commands/list_operator_policies_with_classic_queue_mirroring_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/queues/commands/list_operator_policies_with_classic_queue_mirroring_command.ex new file mode 100644 index 000000000000..a801afb2a8f7 --- /dev/null +++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/queues/commands/list_operator_policies_with_classic_queue_mirroring_command.ex @@ -0,0 +1,39 @@ +## This Source Code Form is subject to the terms of the Mozilla Public +## License, v. 2.0. If a copy of the MPL was not distributed with this +## file, You can obtain one at https://mozilla.org/MPL/2.0/. +## +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. + +defmodule RabbitMQ.CLI.Queues.Commands.ListOperatorPoliciesWithClassicQueueMirroringCommand do + @behaviour RabbitMQ.CLI.CommandBehaviour + + def scopes(), do: [:diagnostics, :queues] + use RabbitMQ.CLI.Core.AcceptsDefaultSwitchesAndTimeout + use RabbitMQ.CLI.Core.MergesNoDefaults + use RabbitMQ.CLI.Core.AcceptsNoPositionalArguments + use RabbitMQ.CLI.Core.RequiresRabbitAppRunning + + def run([], %{node: node_name, timeout: timeout}) do + :rabbit_misc.rpc_call( + node_name, + :rabbit_mirror_queue_misc, + :list_operator_policies_with_classic_queue_mirroring_for_cli, + [], + timeout + ) + end + + use RabbitMQ.CLI.DefaultOutput + + def formatter(), do: RabbitMQ.CLI.Formatters.Table + + def usage, do: "list_operator_policies_with_classic_queue_mirroring [--no-table-headers]" + + def help_section(), do: :observability_and_health_checks + + def description() do + "List all operator policies that enable classic queue mirroring" + end + + def banner(_, _), do: "Listing operator policies with classic queue mirroring ..." +end diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/queues/commands/list_policies_with_classic_queue_mirroring_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/queues/commands/list_policies_with_classic_queue_mirroring_command.ex new file mode 100644 index 000000000000..b75eeba91e46 --- /dev/null +++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/queues/commands/list_policies_with_classic_queue_mirroring_command.ex @@ -0,0 +1,39 @@ +## This Source Code Form is subject to the terms of the Mozilla Public +## License, v. 2.0. If a copy of the MPL was not distributed with this +## file, You can obtain one at https://mozilla.org/MPL/2.0/. +## +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. + +defmodule RabbitMQ.CLI.Queues.Commands.ListPoliciesWithClassicQueueMirroringCommand do + @behaviour RabbitMQ.CLI.CommandBehaviour + + def scopes(), do: [:diagnostics, :queues] + use RabbitMQ.CLI.Core.AcceptsDefaultSwitchesAndTimeout + use RabbitMQ.CLI.Core.MergesNoDefaults + use RabbitMQ.CLI.Core.AcceptsNoPositionalArguments + use RabbitMQ.CLI.Core.RequiresRabbitAppRunning + + def run([], %{node: node_name, timeout: timeout}) do + :rabbit_misc.rpc_call( + node_name, + :rabbit_mirror_queue_misc, + :list_policies_with_classic_queue_mirroring_for_cli, + [], + timeout + ) + end + + use RabbitMQ.CLI.DefaultOutput + + def formatter(), do: RabbitMQ.CLI.Formatters.Table + + def usage, do: "list_policies_with_classic_queue_mirroring [--no-table-headers]" + + def help_section(), do: :observability_and_health_checks + + def description() do + "List all policies that enable classic queue mirroring" + end + + def banner(_, _), do: "Listing policies with classic queue mirroring ..." +end diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/queues/commands/peek_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/queues/commands/peek_command.ex index f8ea0b95e20d..db64845ee82b 100644 --- a/deps/rabbitmq_cli/lib/rabbitmq/cli/queues/commands/peek_command.ex +++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/queues/commands/peek_command.ex @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule RabbitMQ.CLI.Queues.Commands.PeekCommand do alias RabbitMQ.CLI.Core.DocGuide diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/queues/commands/quorum_status_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/queues/commands/quorum_status_command.ex index 58f1fd543cc7..4bfda4b51507 100644 --- a/deps/rabbitmq_cli/lib/rabbitmq/cli/queues/commands/quorum_status_command.ex +++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/queues/commands/quorum_status_command.ex @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule RabbitMQ.CLI.Queues.Commands.QuorumStatusCommand do alias RabbitMQ.CLI.Core.DocGuide diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/queues/commands/rebalance_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/queues/commands/rebalance_command.ex index 9c2444472c57..e9312df0b6a6 100644 --- a/deps/rabbitmq_cli/lib/rabbitmq/cli/queues/commands/rebalance_command.ex +++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/queues/commands/rebalance_command.ex @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule RabbitMQ.CLI.Queues.Commands.RebalanceCommand do alias RabbitMQ.CLI.Core.DocGuide @@ -12,7 +12,6 @@ defmodule RabbitMQ.CLI.Queues.Commands.RebalanceCommand do @known_types [ "all", - "classic", "quorum", "stream" ] @@ -45,7 +44,7 @@ defmodule RabbitMQ.CLI.Queues.Commands.RebalanceCommand do :ok false -> - {:error, "type #{type} is not supported. Try one of all, classic, quorum, stream."} + {:error, "type #{type} is not supported. Try one of all, quorum, stream."} end end @@ -58,11 +57,11 @@ defmodule RabbitMQ.CLI.Queues.Commands.RebalanceCommand do def usage, do: - "rebalance < all | classic | quorum | stream > [--vhost-pattern ] [--queue-pattern ]" + "rebalance < all | quorum | stream > [--vhost-pattern ] [--queue-pattern ]" def usage_additional do [ - ["", "queue type, must be one of: all, classic, quorum, stream"], + ["", "queue type, must be one of: all, quorum, stream"], ["--queue-pattern ", "regular expression to match queue names"], ["--vhost-pattern ", "regular expression to match virtual host names"] ] @@ -83,10 +82,6 @@ defmodule RabbitMQ.CLI.Queues.Commands.RebalanceCommand do "Re-balancing leaders of all replicated queues..." end - def banner([:classic], _) do - "Re-balancing leaders of replicated (mirrored, non-exclusive) classic queues..." - end - def banner([:quorum], _) do "Re-balancing leaders of quorum queues..." end diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/queues/commands/reclaim_quorum_memory_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/queues/commands/reclaim_quorum_memory_command.ex index 03e7457f1393..bb4330069cec 100644 --- a/deps/rabbitmq_cli/lib/rabbitmq/cli/queues/commands/reclaim_quorum_memory_command.ex +++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/queues/commands/reclaim_quorum_memory_command.ex @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule RabbitMQ.CLI.Queues.Commands.ReclaimQuorumMemoryCommand do alias RabbitMQ.CLI.Core.DocGuide diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/queues/commands/shrink_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/queues/commands/shrink_command.ex index 85cf539db37a..22e595a986e7 100644 --- a/deps/rabbitmq_cli/lib/rabbitmq/cli/queues/commands/shrink_command.ex +++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/queues/commands/shrink_command.ex @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule RabbitMQ.CLI.Queues.Commands.ShrinkCommand do alias RabbitMQ.CLI.Core.DocGuide diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/streams/commands/add_replica_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/streams/commands/add_replica_command.ex index ec065e6eaf51..9c9c03a748ba 100644 --- a/deps/rabbitmq_cli/lib/rabbitmq/cli/streams/commands/add_replica_command.ex +++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/streams/commands/add_replica_command.ex @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule RabbitMQ.CLI.Streams.Commands.AddReplicaCommand do alias RabbitMQ.CLI.Core.DocGuide diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/streams/commands/coordinator_status_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/streams/commands/coordinator_status_command.ex new file mode 100644 index 000000000000..f111a1d5d248 --- /dev/null +++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/streams/commands/coordinator_status_command.ex @@ -0,0 +1,52 @@ +## This Source Code Form is subject to the terms of the Mozilla Public +## License, v. 2.0. If a copy of the MPL was not distributed with this +## file, You can obtain one at https://mozilla.org/MPL/2.0/. +## +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. + +defmodule RabbitMQ.CLI.Queues.Commands.CoordinatorStatusCommand do + alias RabbitMQ.CLI.Core.DocGuide + + @behaviour RabbitMQ.CLI.CommandBehaviour + def scopes(), do: [:diagnostics, :streams] + + def merge_defaults(args, opts), do: {args, opts} + + use RabbitMQ.CLI.Core.AcceptsNoPositionalArguments + use RabbitMQ.CLI.Core.RequiresRabbitAppRunning + + def run([] = _args, %{node: node_name}) do + case :rabbit_misc.rpc_call(node_name, :rabbit_stream_coordinator, :status, []) do + {:error, :coordinator_not_started_or_available} -> + {:error, "Cannot get coordinator status as coordinator not started or unavailable"} + + other -> + other + end + end + + use RabbitMQ.CLI.DefaultOutput + + def formatter(), do: RabbitMQ.CLI.Formatters.PrettyTable + + def usage() do + "coordinator_status" + end + + def usage_additional do + [] + end + + def usage_doc_guides() do + [ + DocGuide.streams() + ] + end + + def help_section(), do: :observability_and_health_checks + + def description(), do: "Displays raft status of the stream coordinator" + + def banner([], %{node: _node_name}), + do: "Status of stream coordinator ..." +end diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/streams/commands/delete_replica_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/streams/commands/delete_replica_command.ex index f0064834f8cd..d5d23b21ef7d 100644 --- a/deps/rabbitmq_cli/lib/rabbitmq/cli/streams/commands/delete_replica_command.ex +++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/streams/commands/delete_replica_command.ex @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule RabbitMQ.CLI.Streams.Commands.DeleteReplicaCommand do alias RabbitMQ.CLI.Core.DocGuide diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/streams/commands/restart_stream_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/streams/commands/restart_stream_command.ex index 2ea682c05d04..f8fd76c29345 100644 --- a/deps/rabbitmq_cli/lib/rabbitmq/cli/streams/commands/restart_stream_command.ex +++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/streams/commands/restart_stream_command.ex @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule RabbitMQ.CLI.Streams.Commands.RestartStreamCommand do alias RabbitMQ.CLI.Core.DocGuide diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/streams/commands/set_stream_retention_policy_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/streams/commands/set_stream_retention_policy_command.ex index 319709516ae4..efd97a9f79f7 100644 --- a/deps/rabbitmq_cli/lib/rabbitmq/cli/streams/commands/set_stream_retention_policy_command.ex +++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/streams/commands/set_stream_retention_policy_command.ex @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule RabbitMQ.CLI.Streams.Commands.SetStreamRetentionPolicyCommand do alias RabbitMQ.CLI.Core.DocGuide diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/streams/commands/stream_status_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/streams/commands/stream_status_command.ex index 4011525a045d..31b9ec35a176 100644 --- a/deps/rabbitmq_cli/lib/rabbitmq/cli/streams/commands/stream_status_command.ex +++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/streams/commands/stream_status_command.ex @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule RabbitMQ.CLI.Streams.Commands.StreamStatusCommand do alias RabbitMQ.CLI.Core.DocGuide diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/time_unit.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/time_unit.ex index 96d86b2c1081..cf66197dbc1d 100644 --- a/deps/rabbitmq_cli/lib/rabbitmq/cli/time_unit.ex +++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/time_unit.ex @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule RabbitMQ.CLI.TimeUnit do require MapSet diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/upgrade/commands/await_online_quorum_plus_one_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/upgrade/commands/await_online_quorum_plus_one_command.ex index e3d99b2d3412..03a61b83f506 100644 --- a/deps/rabbitmq_cli/lib/rabbitmq/cli/upgrade/commands/await_online_quorum_plus_one_command.ex +++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/upgrade/commands/await_online_quorum_plus_one_command.ex @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule RabbitMQ.CLI.Upgrade.Commands.AwaitOnlineQuorumPlusOneCommand do alias RabbitMQ.CLI.Core.DocGuide diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/upgrade/commands/await_online_synchronized_mirror_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/upgrade/commands/await_online_synchronized_mirror_command.ex deleted file mode 100644 index 306fde2e4f4c..000000000000 --- a/deps/rabbitmq_cli/lib/rabbitmq/cli/upgrade/commands/await_online_synchronized_mirror_command.ex +++ /dev/null @@ -1,113 +0,0 @@ -## This Source Code Form is subject to the terms of the Mozilla Public -## License, v. 2.0. If a copy of the MPL was not distributed with this -## file, You can obtain one at https://mozilla.org/MPL/2.0/. -## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. - -defmodule RabbitMQ.CLI.Upgrade.Commands.AwaitOnlineSynchronizedMirrorCommand do - alias RabbitMQ.CLI.Core.DocGuide - import RabbitMQ.CLI.Core.Config, only: [output_less?: 1] - - @behaviour RabbitMQ.CLI.CommandBehaviour - - @default_timeout 120_000 - - use RabbitMQ.CLI.Core.RequiresRabbitAppRunning - use RabbitMQ.CLI.Core.AcceptsNoPositionalArguments - - def merge_defaults(args, opts) do - timeout = - case opts[:timeout] do - nil -> @default_timeout - :infinity -> @default_timeout - val -> val - end - - {args, Map.put(opts, :timeout, timeout)} - end - - def run([], %{node: node_name, timeout: timeout}) do - rpc_timeout = timeout + 500 - - case :rabbit_misc.rpc_call(node_name, :rabbit_nodes, :is_single_node_cluster, [], rpc_timeout) do - # if target node is the only one in the cluster, the command makes little sense - # and false positives can be misleading - true -> - {:ok, :single_node_cluster} - - false -> - case :rabbit_misc.rpc_call( - node_name, - :rabbit_upgrade_preparation, - :await_online_synchronised_mirrors, - [timeout], - rpc_timeout - ) do - {:error, _} = err -> - err - - {:error, _, _} = err -> - err - - {:badrpc, _} = err -> - err - - true -> - :ok - - false -> - {:error, - "time is up, no synchronised mirror came online for at least some classic mirrored queues"} - end - - other -> - other - end - end - - def output({:ok, :single_node_cluster}, %{formatter: "json"}) do - {:ok, - %{ - "result" => "ok", - "message" => - "Target node seems to be the only one in a single node cluster, the check does not apply" - }} - end - - def output({:error, msg}, %{node: node_name, formatter: "json"}) do - {:error, %{"result" => "error", "node" => node_name, "message" => msg}} - end - - def output({:ok, :single_node_cluster}, opts) do - case output_less?(opts) do - true -> - :ok - - false -> - {:ok, - "Target node seems to be the only one in a single node cluster, the command does not apply"} - end - end - - use RabbitMQ.CLI.DefaultOutput - - def usage, do: "await_online_synchronized_mirror" - - def usage_doc_guides() do - [ - DocGuide.mirroring(), - DocGuide.upgrade() - ] - end - - def help_section, do: :upgrade - - def description() do - "Waits for all classic mirrored queues hosted on the target node to have at least one synchronized mirror online. " <> - "This makes sure that if target node is shut down, there will be an up-to-date mirror to promote." - end - - def banner([], %{timeout: timeout}) do - "Will wait for a synchronised mirror be online for all classic mirrored queues for #{round(timeout / 1000)} seconds..." - end -end diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/upgrade/commands/drain_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/upgrade/commands/drain_command.ex index 31a2d15b0b0a..96b865dd8ffa 100644 --- a/deps/rabbitmq_cli/lib/rabbitmq/cli/upgrade/commands/drain_command.ex +++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/upgrade/commands/drain_command.ex @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule RabbitMQ.CLI.Upgrade.Commands.DrainCommand do @moduledoc """ diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/upgrade/commands/post_upgrade_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/upgrade/commands/post_upgrade_command.ex index 89913cb48cb3..e495f9ae81e0 100644 --- a/deps/rabbitmq_cli/lib/rabbitmq/cli/upgrade/commands/post_upgrade_command.ex +++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/upgrade/commands/post_upgrade_command.ex @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule RabbitMQ.CLI.Upgrade.Commands.PostUpgradeCommand do alias RabbitMQ.CLI.Core.DocGuide diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/upgrade/commands/revive_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/upgrade/commands/revive_command.ex index af95294270c3..ae3f3c6277e9 100644 --- a/deps/rabbitmq_cli/lib/rabbitmq/cli/upgrade/commands/revive_command.ex +++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/upgrade/commands/revive_command.ex @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule RabbitMQ.CLI.Upgrade.Commands.ReviveCommand do @moduledoc """ diff --git a/deps/rabbitmq_cli/lib/rabbitmqctl.ex b/deps/rabbitmq_cli/lib/rabbitmqctl.ex index fc9fd2321f39..d91a7403d50c 100644 --- a/deps/rabbitmq_cli/lib/rabbitmqctl.ex +++ b/deps/rabbitmq_cli/lib/rabbitmqctl.ex @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule RabbitMQCtl do alias RabbitMQ.CLI.Core.{ diff --git a/deps/rabbitmq_cli/mix.exs b/deps/rabbitmq_cli/mix.exs index 5f08edcdc096..e810ce44bb3b 100644 --- a/deps/rabbitmq_cli/mix.exs +++ b/deps/rabbitmq_cli/mix.exs @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule RabbitMQCtl.MixfileBase do use Mix.Project @@ -10,11 +10,16 @@ defmodule RabbitMQCtl.MixfileBase do def project do [ app: :rabbitmqctl, - version: "3.13.0-dev", - elixir: ">= 1.13.4 and < 1.16.0", + version: "4.0.0-dev", + elixir: ">= 1.13.4 and < 1.18.0", build_embedded: Mix.env() == :prod, start_permanent: Mix.env() == :prod, - escript: [main_module: RabbitMQCtl, emu_args: "-hidden", path: "escript/rabbitmqctl"], + escript: [ + main_module: RabbitMQCtl, + emu_args: "-hidden", + path: "escript/rabbitmqctl" + ], + prune_code_paths: false, deps: deps(Mix.env()), aliases: aliases(), xref: [ @@ -38,7 +43,6 @@ defmodule RabbitMQCtl.MixfileBase do :rabbit_log, :rabbit_misc, :rabbit_mnesia, - :rabbit_mnesia_rename, :rabbit_nodes_common, :rabbit_pbe, :rabbit_plugins, @@ -172,10 +176,6 @@ defmodule RabbitMQCtl.MixfileBase do :amqp, path: Path.join(deps_dir, "amqp") }, - { - :dialyxir, - path: Path.join(deps_dir, "dialyxir"), runtime: false - }, { :rabbit, path: Path.join(deps_dir, "rabbit"), diff --git a/deps/rabbitmq_cli/rabbitmqctl.bzl b/deps/rabbitmq_cli/rabbitmqctl.bzl index 2fc4d9790815..fd8e0c4aec1e 100644 --- a/deps/rabbitmq_cli/rabbitmqctl.bzl +++ b/deps/rabbitmq_cli/rabbitmqctl.bzl @@ -1,4 +1,10 @@ load("@bazel_skylib//lib:shell.bzl", "shell") +load( + "@rules_elixir//private:elixir_toolchain.bzl", + "elixir_dirs", + "erlang_dirs", + "maybe_install_erlang", +) load( "@rules_erlang//:erlang_app_info.bzl", "ErlangAppInfo", @@ -12,12 +18,6 @@ load( "@rules_erlang//private:util.bzl", "additional_file_dest_relative_path", ) -load( - "//bazel/elixir:elixir_toolchain.bzl", - "elixir_dirs", - "erlang_dirs", - "maybe_install_erlang", -) ElixirAppInfo = provider( doc = "Compiled Elixir Application", @@ -34,6 +34,19 @@ ElixirAppInfo = provider( }, ) +def _copy(ctx, src, dst): + ctx.actions.run_shell( + inputs = [src], + outputs = [dst], + command = """set -euo pipefail + +cp -RL "{src}" "{dst}" +""".format( + src = src.path, + dst = dst.path, + ), + ) + def deps_dir_contents(ctx, deps, dir): files = [] for dep in deps: @@ -51,20 +64,18 @@ def deps_dir_contents(ctx, deps, dir): lib_info.app_name, rp, )) - ctx.actions.symlink( - output = f, - target_file = src, - ) - files.extend([f, src]) + _copy(ctx, src, f) + files.append(f) for beam in lib_info.beam: if not beam.is_directory: f = ctx.actions.declare_file(path_join( - dir, lib_info.app_name, "ebin", beam.basename)) - ctx.actions.symlink( - output = f, - target_file = beam, - ) - files.extend([f, beam]) + dir, + lib_info.app_name, + "ebin", + beam.basename, + )) + _copy(ctx, beam, f) + files.append(f) else: fail("unexpected directory in", lib_info) return files @@ -144,8 +155,8 @@ done cp escript/rabbitmqctl ${{ABS_ESCRIPT_PATH}} -cp _build/${{MIX_ENV}}/lib/rabbitmqctl/ebin/* ${{ABS_EBIN_DIR}} -cp _build/${{MIX_ENV}}/lib/rabbitmqctl/consolidated/* ${{ABS_CONSOLIDATED_DIR}} +cp -RL _build/${{MIX_ENV}}/lib/rabbitmqctl/ebin/* ${{ABS_EBIN_DIR}} +cp -RL _build/${{MIX_ENV}}/lib/rabbitmqctl/consolidated/* ${{ABS_CONSOLIDATED_DIR}} # remove symlinks from the _build directory since it # is not used, and bazel does not allow them @@ -238,7 +249,7 @@ rabbitmqctl_private = rule( "source_deps": attr.label_keyed_string_dict(), }, toolchains = [ - "//bazel/elixir:toolchain_type", + "@rules_elixir//:toolchain_type", ], provides = [ElixirAppInfo], executable = True, @@ -362,7 +373,7 @@ elixir_app_to_erlang_app = rule( ), }, toolchains = [ - "//bazel/elixir:toolchain_type", + "@rules_elixir//:toolchain_type", ], provides = [ErlangAppInfo], ) @@ -396,7 +407,7 @@ def rabbitmqctl( elixir_app_to_erlang_app( name = "elixir", - elixir_as_app = Label("//bazel/elixir:erlang_app"), + elixir_as_app = Label("@rules_elixir//elixir:elixir"), elixir_app = ":" + name, mode = "elixir", visibility = visibility, @@ -404,7 +415,7 @@ def rabbitmqctl( elixir_app_to_erlang_app( name = "erlang_app", - elixir_as_app = Label("//bazel/elixir:erlang_app"), + elixir_as_app = Label("@rules_elixir//elixir:elixir"), elixir_app = ":" + name, mode = "app", visibility = visibility, diff --git a/deps/rabbitmq_cli/rabbitmqctl_check_formatted.bzl b/deps/rabbitmq_cli/rabbitmqctl_check_formatted.bzl deleted file mode 100644 index b6f166a82970..000000000000 --- a/deps/rabbitmq_cli/rabbitmqctl_check_formatted.bzl +++ /dev/null @@ -1,135 +0,0 @@ -load( - "@rules_erlang//:util.bzl", - "path_join", - "windows_path", -) -load( - "//bazel/elixir:elixir_toolchain.bzl", - "elixir_dirs", - "erlang_dirs", - "maybe_install_erlang", -) - -def _impl(ctx): - (erlang_home, _, erlang_runfiles) = erlang_dirs(ctx) - (elixir_home, elixir_runfiles) = elixir_dirs(ctx, short_path = True) - - package_dir = path_join( - ctx.label.workspace_root, - ctx.label.package, - ) - - if not ctx.attr.is_windows: - output = ctx.actions.declare_file(ctx.label.name) - script = """set -euo pipefail - -{maybe_install_erlang} - -if [[ "{elixir_home}" == /* ]]; then - ABS_ELIXIR_HOME="{elixir_home}" -else - ABS_ELIXIR_HOME=$PWD/{elixir_home} -fi - -export PATH="$ABS_ELIXIR_HOME"/bin:"{erlang_home}"/bin:${{PATH}} - -export LANG="en_US.UTF-8" -export LC_ALL="en_US.UTF-8" - -INITIAL_DIR="$(pwd)" - -if [ ! -f ${{INITIAL_DIR}}/{package_dir}/test/test_helper.exs ]; then - echo "test_helper.exs cannot be found. 'bazel clean' might fix this." - exit 1 -fi - -cp -r ${{INITIAL_DIR}}/{package_dir}/config ${{TEST_UNDECLARED_OUTPUTS_DIR}} -cp -r ${{INITIAL_DIR}}/{package_dir}/lib ${{TEST_UNDECLARED_OUTPUTS_DIR}} -cp -r ${{INITIAL_DIR}}/{package_dir}/test ${{TEST_UNDECLARED_OUTPUTS_DIR}} -cp ${{INITIAL_DIR}}/{package_dir}/mix.exs ${{TEST_UNDECLARED_OUTPUTS_DIR}} -cp ${{INITIAL_DIR}}/{package_dir}/.formatter.exs ${{TEST_UNDECLARED_OUTPUTS_DIR}} - -cd ${{TEST_UNDECLARED_OUTPUTS_DIR}} - -export IS_BAZEL=true -export HOME=${{PWD}} -export MIX_ENV=test -export ERL_COMPILER_OPTIONS=deterministic -set -x -"${{ABS_ELIXIR_HOME}}"/bin/mix format --check-formatted -""".format( - maybe_install_erlang = maybe_install_erlang(ctx, short_path = True), - erlang_home = erlang_home, - elixir_home = elixir_home, - package_dir = package_dir, - ) - else: - output = ctx.actions.declare_file(ctx.label.name + ".bat") - script = """@echo off -:: set LANG="en_US.UTF-8" -:: set LC_ALL="en_US.UTF-8" - -set PATH="{elixir_home}\\bin";"{erlang_home}\\bin";%PATH% - -set OUTPUTS_DIR=%TEST_UNDECLARED_OUTPUTS_DIR:/=\\% - -:: robocopy exits non-zero when files are copied successfully -:: https://social.msdn.microsoft.com/Forums/en-US/d599833c-dcea-46f5-85e9-b1f028a0fefe/robocopy-exits-with-error-code-1?forum=tfsbuild -robocopy {package_dir}\\config %OUTPUTS_DIR%\\config /E /NFL /NDL /NJH /NJS /nc /ns /np -robocopy {package_dir}\\lib %OUTPUTS_DIR%\\lib /E /NFL /NDL /NJH /NJS /nc /ns /np -robocopy {package_dir}\\test %OUTPUTS_DIR%\\test /E /NFL /NDL /NJH /NJS /nc /ns /np -copy {package_dir}\\mix.exs %OUTPUTS_DIR%\\mix.exs || goto :error -copy {package_dir}\\.formatter.exs %OUTPUTS_DIR%\\.formatter.exs || goto :error - -cd %OUTPUTS_DIR% || goto :error - -set ERL_COMPILER_OPTIONS=deterministic -set MIX_ENV=test -"{elixir_home}\\bin\\mix" format --check-formatted || goto :error -goto :EOF -:error -exit /b 1 -""".format( - erlang_home = windows_path(erlang_home), - elixir_home = windows_path(elixir_home), - package_dir = windows_path(ctx.label.package), - ) - - ctx.actions.write( - output = output, - content = script, - ) - - runfiles = ctx.runfiles( - files = ctx.files.srcs + ctx.files.data, - ).merge_all([ - erlang_runfiles, - elixir_runfiles, - ]) - - return [DefaultInfo( - runfiles = runfiles, - executable = output, - )] - -rabbitmqctl_check_formatted_private_test = rule( - implementation = _impl, - attrs = { - "is_windows": attr.bool(mandatory = True), - "srcs": attr.label_list(allow_files = [".ex", ".exs"]), - "data": attr.label_list(allow_files = True), - }, - toolchains = [ - "//bazel/elixir:toolchain_type", - ], - test = True, -) - -def rabbitmqctl_check_formatted_test(**kwargs): - rabbitmqctl_check_formatted_private_test( - is_windows = select({ - "@bazel_tools//src/conditions:host_windows": True, - "//conditions:default": False, - }), - **kwargs - ) diff --git a/deps/rabbitmq_cli/rabbitmqctl_test.bzl b/deps/rabbitmq_cli/rabbitmqctl_test.bzl deleted file mode 100644 index a4f05a0dea0e..000000000000 --- a/deps/rabbitmq_cli/rabbitmqctl_test.bzl +++ /dev/null @@ -1,237 +0,0 @@ -load("@bazel_skylib//lib:shell.bzl", "shell") -load( - "@rules_erlang//:erlang_app_info.bzl", - "ErlangAppInfo", -) -load( - "@rules_erlang//:util.bzl", - "path_join", - "windows_path", -) -load( - "@rules_erlang//private:util.bzl", - "additional_file_dest_relative_path", -) -load( - "//bazel/elixir:elixir_toolchain.bzl", - "elixir_dirs", - "erlang_dirs", - "maybe_install_erlang", -) -load( - ":rabbitmqctl.bzl", - "deps_dir_contents", -) - -def _impl(ctx): - (erlang_home, _, erlang_runfiles) = erlang_dirs(ctx) - (elixir_home, elixir_runfiles) = elixir_dirs(ctx, short_path = True) - - deps_dir = ctx.label.name + "_deps" - - deps_dir_files = deps_dir_contents( - ctx, - ctx.attr.deps, - deps_dir, - ) - - for dep, app_name in ctx.attr.source_deps.items(): - for src in dep.files.to_list(): - if not src.is_directory: - rp = additional_file_dest_relative_path(dep.label, src) - f = ctx.actions.declare_file(path_join( - deps_dir, - app_name, - rp, - )) - ctx.actions.symlink( - output = f, - target_file = src, - ) - deps_dir_files.append(f) - - package_dir = path_join( - ctx.label.workspace_root, - ctx.label.package, - ) - - precompiled_deps = " ".join([ - dep[ErlangAppInfo].app_name - for dep in ctx.attr.deps - ]) - - if not ctx.attr.is_windows: - output = ctx.actions.declare_file(ctx.label.name) - script = """set -euo pipefail - -{maybe_install_erlang} - -if [[ "{elixir_home}" == /* ]]; then - ABS_ELIXIR_HOME="{elixir_home}" -else - ABS_ELIXIR_HOME=$PWD/{elixir_home} -fi - -export PATH="$ABS_ELIXIR_HOME"/bin:"{erlang_home}"/bin:${{PATH}} - -export LANG="en_US.UTF-8" -export LC_ALL="en_US.UTF-8" - -INITIAL_DIR="$(pwd)" - -if [ ! -f ${{INITIAL_DIR}}/{package_dir}/test/test_helper.exs ]; then - echo "test_helper.exs cannot be found. 'bazel clean' might fix this." - exit 1 -fi - -cp -r ${{INITIAL_DIR}}/{package_dir}/config ${{TEST_UNDECLARED_OUTPUTS_DIR}} -cp -r ${{INITIAL_DIR}}/{package_dir}/lib ${{TEST_UNDECLARED_OUTPUTS_DIR}} -cp -r ${{INITIAL_DIR}}/{package_dir}/test ${{TEST_UNDECLARED_OUTPUTS_DIR}} -cp ${{INITIAL_DIR}}/{package_dir}/mix.exs ${{TEST_UNDECLARED_OUTPUTS_DIR}} -cp ${{INITIAL_DIR}}/{package_dir}/.formatter.exs ${{TEST_UNDECLARED_OUTPUTS_DIR}} - -cd ${{TEST_UNDECLARED_OUTPUTS_DIR}} - -export IS_BAZEL=true -export HOME=${{PWD}} -export DEPS_DIR=$TEST_SRCDIR/$TEST_WORKSPACE/{package_dir}/{deps_dir} -export MIX_ENV=test -export ERL_COMPILER_OPTIONS=deterministic -for archive in {archives}; do - "${{ABS_ELIXIR_HOME}}"/bin/mix archive.install --force $INITIAL_DIR/$archive -done -"${{ABS_ELIXIR_HOME}}"/bin/mix deps.compile -"${{ABS_ELIXIR_HOME}}"/bin/mix compile - -export TEST_TMPDIR=${{TEST_UNDECLARED_OUTPUTS_DIR}} - -# we need a running broker with certain plugins for this to pass -trap 'catch $?' EXIT -catch() {{ - pid=$(cat ${{TEST_TMPDIR}}/*/*.pid) - kill -TERM "${{pid}}" -}} -cd ${{INITIAL_DIR}} -./{rabbitmq_run_cmd} start-background-broker -cd ${{TEST_UNDECLARED_OUTPUTS_DIR}} - -# The test cases will need to be able to load code from the deps -# directly, so we set ERL_LIBS -export ERL_LIBS=$DEPS_DIR - -# run the actual tests -set +u -set -x -"${{ABS_ELIXIR_HOME}}"/bin/mix test --trace --max-failures 1 ${{TEST_FILE}} -""".format( - maybe_install_erlang = maybe_install_erlang(ctx, short_path = True), - erlang_home = erlang_home, - elixir_home = elixir_home, - package_dir = package_dir, - deps_dir = deps_dir, - archives = " ".join([shell.quote(a.short_path) for a in ctx.files.archives]), - precompiled_deps = precompiled_deps, - rabbitmq_run_cmd = ctx.attr.rabbitmq_run[DefaultInfo].files_to_run.executable.short_path, - ) - else: - output = ctx.actions.declare_file(ctx.label.name + ".bat") - script = """@echo off -echo Erlang Version: {erlang_version} - -:: set LANG="en_US.UTF-8" -:: set LC_ALL="en_US.UTF-8" - -set PATH="{elixir_home}\\bin";"{erlang_home}\\bin";%PATH% - -set OUTPUTS_DIR=%TEST_UNDECLARED_OUTPUTS_DIR:/=\\% - -:: robocopy exits non-zero when files are copied successfully -:: https://social.msdn.microsoft.com/Forums/en-US/d599833c-dcea-46f5-85e9-b1f028a0fefe/robocopy-exits-with-error-code-1?forum=tfsbuild -robocopy {package_dir}\\config %OUTPUTS_DIR%\\config /E /NFL /NDL /NJH /NJS /nc /ns /np -robocopy {package_dir}\\lib %OUTPUTS_DIR%\\lib /E /NFL /NDL /NJH /NJS /nc /ns /np -robocopy {package_dir}\\test %OUTPUTS_DIR%\\test /E /NFL /NDL /NJH /NJS /nc /ns /np -copy {package_dir}\\mix.exs %OUTPUTS_DIR%\\mix.exs || goto :error -copy {package_dir}\\.formatter.exs %OUTPUTS_DIR%\\.formatter.exs || goto :error - -cd %OUTPUTS_DIR% || goto :error - -set DEPS_DIR=%TEST_SRCDIR%/%TEST_WORKSPACE%/{package_dir}/{deps_dir} -set DEPS_DIR=%DEPS_DIR:/=\\% -set ERL_COMPILER_OPTIONS=deterministic -set MIX_ENV=test -for %%a in ({archives}) do ( - set ARCH=%TEST_SRCDIR%/%TEST_WORKSPACE%/%%a - set ARCH=%ARCH:/=\\% - "{elixir_home}\\bin\\mix" archive.install --force %ARCH% || goto :error -) -"{elixir_home}\\bin\\mix" deps.compile || goto :error -"{elixir_home}\\bin\\mix" compile || goto :error - -REM need to start the background broker here -set TEST_TEMPDIR=%OUTPUTS_DIR% - -set ERL_LIBS=%DEPS_DIR% - -"{elixir_home}\\bin\\mix" test --trace --max-failures 1 || goto :error -goto :EOF -:error -exit /b 1 -""".format( - erlang_home = windows_path(erlang_home), - elixir_home = windows_path(elixir_home), - package_dir = windows_path(ctx.label.package), - deps_dir = deps_dir, - archives = " ".join([shell.quote(a.short_path) for a in ctx.files.archives]), - precompiled_deps = precompiled_deps, - rabbitmq_run_cmd = ctx.attr.rabbitmq_run[DefaultInfo].files_to_run.executable.short_path, - ) - - ctx.actions.write( - output = output, - content = script, - ) - - runfiles = ctx.runfiles( - files = ctx.files.srcs + ctx.files.data + ctx.files.archives, - transitive_files = depset(deps_dir_files), - ).merge_all([ - erlang_runfiles, - elixir_runfiles, - ctx.attr.rabbitmq_run[DefaultInfo].default_runfiles, - ]) - - return [DefaultInfo( - runfiles = runfiles, - executable = output, - )] - -rabbitmqctl_private_test = rule( - implementation = _impl, - attrs = { - "is_windows": attr.bool(mandatory = True), - "srcs": attr.label_list(allow_files = [".ex", ".exs"]), - "data": attr.label_list(allow_files = True), - "deps": attr.label_list(providers = [ErlangAppInfo]), - "archives": attr.label_list( - allow_files = [".ez"], - ), - "source_deps": attr.label_keyed_string_dict(), - "rabbitmq_run": attr.label( - executable = True, - cfg = "target", - ), - }, - toolchains = [ - "//bazel/elixir:toolchain_type", - ], - test = True, -) - -def rabbitmqctl_test(**kwargs): - rabbitmqctl_private_test( - is_windows = select({ - "@bazel_tools//src/conditions:host_windows": True, - "//conditions:default": False, - }), - **kwargs - ) diff --git a/deps/rabbitmq_cli/test/core/args_processing_test.exs b/deps/rabbitmq_cli/test/core/args_processing_test.exs index 2cfe0c3cbe07..45d1ddef893f 100644 --- a/deps/rabbitmq_cli/test/core/args_processing_test.exs +++ b/deps/rabbitmq_cli/test/core/args_processing_test.exs @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule ArgsProcessingTest do use ExUnit.Case, async: false diff --git a/deps/rabbitmq_cli/test/core/auto_complete_test.exs b/deps/rabbitmq_cli/test/core/auto_complete_test.exs index 9a3801f83d45..e57501df9a1f 100644 --- a/deps/rabbitmq_cli/test/core/auto_complete_test.exs +++ b/deps/rabbitmq_cli/test/core/auto_complete_test.exs @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule AutoCompleteTest do use ExUnit.Case, async: false diff --git a/deps/rabbitmq_cli/test/core/command_modules_test.exs b/deps/rabbitmq_cli/test/core/command_modules_test.exs index f9fdad028a12..938e9e3a2cc8 100644 --- a/deps/rabbitmq_cli/test/core/command_modules_test.exs +++ b/deps/rabbitmq_cli/test/core/command_modules_test.exs @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule CommandModulesTest do use ExUnit.Case, async: false @@ -20,6 +20,9 @@ defmodule CommandModulesTest do end test "command modules has existing commands" do + # true = Enum.take(RabbitMQ.CLI.Core.CommandModules.ctl_modules(), -15) + # true = Enum.find_index(RabbitMQ.CLI.Core.CommandModules.ctl_modules(), fn x -> x == RabbitMQ.CLI.Ctl.Commands.DuckCommand end) + assert @subject.load_commands(:all, %{})["duck"] == RabbitMQ.CLI.Ctl.Commands.DuckCommand end diff --git a/deps/rabbitmq_cli/test/core/default_output_test.exs b/deps/rabbitmq_cli/test/core/default_output_test.exs index 12ee0be26f2a..9c103a86a076 100644 --- a/deps/rabbitmq_cli/test/core/default_output_test.exs +++ b/deps/rabbitmq_cli/test/core/default_output_test.exs @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule DefaultOutputTest do use ExUnit.Case, async: false diff --git a/deps/rabbitmq_cli/test/core/distribution_test.exs b/deps/rabbitmq_cli/test/core/distribution_test.exs index e7bfd3fe1e69..d14d574b8b09 100644 --- a/deps/rabbitmq_cli/test/core/distribution_test.exs +++ b/deps/rabbitmq_cli/test/core/distribution_test.exs @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. alias RabbitMQ.CLI.Core.Distribution diff --git a/deps/rabbitmq_cli/test/core/helpers_test.exs b/deps/rabbitmq_cli/test/core/helpers_test.exs index a0ee0fa1d0ae..0130961a0385 100644 --- a/deps/rabbitmq_cli/test/core/helpers_test.exs +++ b/deps/rabbitmq_cli/test/core/helpers_test.exs @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule HelpersTest do alias RabbitMQ.CLI.Core.{Config, Helpers} diff --git a/deps/rabbitmq_cli/test/core/information_unit_test.exs b/deps/rabbitmq_cli/test/core/information_unit_test.exs index 28f0b9cb0f82..7f0340208076 100644 --- a/deps/rabbitmq_cli/test/core/information_unit_test.exs +++ b/deps/rabbitmq_cli/test/core/information_unit_test.exs @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule InformationUnitTest do use ExUnit.Case, async: true diff --git a/deps/rabbitmq_cli/test/core/json_stream_test.exs b/deps/rabbitmq_cli/test/core/json_stream_test.exs index 68bbcfe7b6ee..ccbe0c54b65f 100644 --- a/deps/rabbitmq_cli/test/core/json_stream_test.exs +++ b/deps/rabbitmq_cli/test/core/json_stream_test.exs @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2019-2020 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule JsonStreamTest do use ExUnit.Case, async: false diff --git a/deps/rabbitmq_cli/test/core/listeners_test.exs b/deps/rabbitmq_cli/test/core/listeners_test.exs index 63baef4941cd..b84534d1189d 100644 --- a/deps/rabbitmq_cli/test/core/listeners_test.exs +++ b/deps/rabbitmq_cli/test/core/listeners_test.exs @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule CoreListenersTest do use ExUnit.Case, async: true diff --git a/deps/rabbitmq_cli/test/core/node_name_test.exs b/deps/rabbitmq_cli/test/core/node_name_test.exs index ef5d6abfc04e..dcd8917d984e 100644 --- a/deps/rabbitmq_cli/test/core/node_name_test.exs +++ b/deps/rabbitmq_cli/test/core/node_name_test.exs @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule NodeNameTest do use ExUnit.Case, async: true diff --git a/deps/rabbitmq_cli/test/core/os_pid_test.exs b/deps/rabbitmq_cli/test/core/os_pid_test.exs index 694ad6ec784a..243e5a33ce91 100644 --- a/deps/rabbitmq_cli/test/core/os_pid_test.exs +++ b/deps/rabbitmq_cli/test/core/os_pid_test.exs @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule OsPidTest do use ExUnit.Case, async: false diff --git a/deps/rabbitmq_cli/test/core/parser_test.exs b/deps/rabbitmq_cli/test/core/parser_test.exs index 9f38be7229c6..f58303a1537e 100644 --- a/deps/rabbitmq_cli/test/core/parser_test.exs +++ b/deps/rabbitmq_cli/test/core/parser_test.exs @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. ## Mock command for command specific parser defmodule RabbitMQ.CLI.Seagull.Commands.HerringGullCommand do diff --git a/deps/rabbitmq_cli/test/core/table_formatter_test.exs b/deps/rabbitmq_cli/test/core/table_formatter_test.exs index 09d4ba3ab363..b5d059ffd5e1 100644 --- a/deps/rabbitmq_cli/test/core/table_formatter_test.exs +++ b/deps/rabbitmq_cli/test/core/table_formatter_test.exs @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule TableFormatterTest do use ExUnit.Case, async: false diff --git a/deps/rabbitmq_cli/test/ctl/activate_disk_free_space_monitoring_command_test.exs b/deps/rabbitmq_cli/test/ctl/activate_disk_free_space_monitoring_command_test.exs index fdc7805bd8b4..f8f861a6cf79 100644 --- a/deps/rabbitmq_cli/test/ctl/activate_disk_free_space_monitoring_command_test.exs +++ b/deps/rabbitmq_cli/test/ctl/activate_disk_free_space_monitoring_command_test.exs @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule ActivateDiskFreeSpaceMonitoringCommandTest do use ExUnit.Case, async: false diff --git a/deps/rabbitmq_cli/test/ctl/add_user_command_test.exs b/deps/rabbitmq_cli/test/ctl/add_user_command_test.exs index 33749447128c..f94b4138a02a 100644 --- a/deps/rabbitmq_cli/test/ctl/add_user_command_test.exs +++ b/deps/rabbitmq_cli/test/ctl/add_user_command_test.exs @@ -2,13 +2,15 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule AddUserCommandTest do use ExUnit.Case, async: false import TestHelper @command RabbitMQ.CLI.Ctl.Commands.AddUserCommand + @hash_password_command RabbitMQ.CLI.Ctl.Commands.HashPasswordCommand + @authenticate_user_command RabbitMQ.CLI.Ctl.Commands.AuthenticateUserCommand setup_all do RabbitMQ.CLI.Core.Distribution.start() @@ -18,7 +20,7 @@ defmodule AddUserCommandTest do setup context do on_exit(context, fn -> delete_user(context[:user]) end) - {:ok, opts: %{node: get_rabbit_hostname()}} + {:ok, opts: %{node: get_rabbit_hostname(), pre_hashed_password: false}} end test "validate: no positional arguments fails" do @@ -55,6 +57,17 @@ defmodule AddUserCommandTest do assert @command.validate([context[:user], context[:password]], context[:opts]) == :ok end + @tag user: "someone" + test "validate: pre-hashed with a non-Base64-encoded value returns an error", context do + hashed = "this is not a Base64-encoded value" + opts = Map.merge(context[:opts], %{pre_hashed_password: true}) + + assert match?( + {:validation_failure, {:bad_argument, _}}, + @command.validate([context[:user], hashed], opts) + ) + end + @tag user: "someone", password: "password" test "run: request to a non-existent node returns a badrpc", context do opts = %{node: :jake@thedog, timeout: 200} @@ -62,9 +75,30 @@ defmodule AddUserCommandTest do end @tag user: "someone", password: "password" - test "run: default case completes successfully", context do + test "run: happy path completes successfully", context do assert @command.run([context[:user], context[:password]], context[:opts]) == :ok assert list_users() |> Enum.count(fn record -> record[:user] == context[:user] end) == 1 + + assert @authenticate_user_command.run([context[:user], context[:password]], context[:opts]) + end + + @tag user: "someone" + test "run: a pre-hashed request to a non-existent node returns a badrpc", context do + opts = %{node: :jake@thedog, timeout: 200} + hashed = "BMT6cj/MsI+4UOBtsPPQWpQfk7ViRLj4VqpMTxu54FU3qa1G" + assert match?({:badrpc, _}, @command.run([context[:user], hashed], opts)) + end + + @tag user: "someone" + test "run: pre-hashed happy path completes successfully", context do + pwd = "guest10" + hashed = @hash_password_command.hash_password(pwd) + opts = Map.merge(%{pre_hashed_password: true}, context[:opts]) + + assert @command.run([context[:user], hashed], opts) == :ok + assert list_users() |> Enum.count(fn record -> record[:user] == context[:user] end) == 1 + + assert @authenticate_user_command.run([context[:user], pwd], opts) end @tag user: "someone", password: "password" diff --git a/deps/rabbitmq_cli/test/ctl/add_vhost_command_test.exs b/deps/rabbitmq_cli/test/ctl/add_vhost_command_test.exs index 3f1da1ef8945..4c0d29db881e 100644 --- a/deps/rabbitmq_cli/test/ctl/add_vhost_command_test.exs +++ b/deps/rabbitmq_cli/test/ctl/add_vhost_command_test.exs @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule AddVhostCommandTest do use ExUnit.Case, async: false diff --git a/deps/rabbitmq_cli/test/ctl/authenticate_user_command_test.exs b/deps/rabbitmq_cli/test/ctl/authenticate_user_command_test.exs index 00acf3010cbe..bfbb9aa13118 100644 --- a/deps/rabbitmq_cli/test/ctl/authenticate_user_command_test.exs +++ b/deps/rabbitmq_cli/test/ctl/authenticate_user_command_test.exs @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule AuthenticateUserCommandTest do use ExUnit.Case, async: false diff --git a/deps/rabbitmq_cli/test/ctl/autocomplete_command_test.exs b/deps/rabbitmq_cli/test/ctl/autocomplete_command_test.exs index afc3a1385b97..8bd19867e0dd 100644 --- a/deps/rabbitmq_cli/test/ctl/autocomplete_command_test.exs +++ b/deps/rabbitmq_cli/test/ctl/autocomplete_command_test.exs @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule AutocompleteCommandTest do use ExUnit.Case, async: true diff --git a/deps/rabbitmq_cli/test/ctl/await_online_nodes_command_test.exs b/deps/rabbitmq_cli/test/ctl/await_online_nodes_command_test.exs index 44788782c1a8..de1756d4e1c4 100644 --- a/deps/rabbitmq_cli/test/ctl/await_online_nodes_command_test.exs +++ b/deps/rabbitmq_cli/test/ctl/await_online_nodes_command_test.exs @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule AwaitOnlineNodesCommandTest do use ExUnit.Case, async: false diff --git a/deps/rabbitmq_cli/test/ctl/await_startup_command_test.exs b/deps/rabbitmq_cli/test/ctl/await_startup_command_test.exs index 7b739ddac0f6..30af6b15345e 100644 --- a/deps/rabbitmq_cli/test/ctl/await_startup_command_test.exs +++ b/deps/rabbitmq_cli/test/ctl/await_startup_command_test.exs @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -# Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved. +# Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule AwaitStartupCommandTest do use ExUnit.Case, async: false diff --git a/deps/rabbitmq_cli/test/ctl/cancel_sync_command_test.exs b/deps/rabbitmq_cli/test/ctl/cancel_sync_command_test.exs deleted file mode 100644 index 5c67836cceb8..000000000000 --- a/deps/rabbitmq_cli/test/ctl/cancel_sync_command_test.exs +++ /dev/null @@ -1,65 +0,0 @@ -## This Source Code Form is subject to the terms of the Mozilla Public -## License, v. 2.0. If a copy of the MPL was not distributed with this -## file, You can obtain one at https://mozilla.org/MPL/2.0/. -## -## Copyright (c) 2016-2020 VMware, Inc. or its affiliates. All rights reserved. - -defmodule CancelSyncQueueCommandTest do - use ExUnit.Case, async: false - import TestHelper - - @command RabbitMQ.CLI.Ctl.Commands.CancelSyncQueueCommand - - @vhost "/" - - setup_all do - RabbitMQ.CLI.Core.Distribution.start() - - start_rabbitmq_app() - - on_exit([], fn -> - start_rabbitmq_app() - end) - - :ok - end - - setup do - {:ok, - opts: %{ - node: get_rabbit_hostname(), - vhost: @vhost - }} - end - - test "validate: specifying no queue name is reported as an error", context do - assert @command.validate([], context[:opts]) == - {:validation_failure, :not_enough_args} - end - - test "validate: specifying two queue names is reported as an error", context do - assert @command.validate(["q1", "q2"], context[:opts]) == - {:validation_failure, :too_many_args} - end - - test "validate: specifying three queue names is reported as an error", context do - assert @command.validate(["q1", "q2", "q3"], context[:opts]) == - {:validation_failure, :too_many_args} - end - - test "validate: specifying one queue name succeeds", context do - assert @command.validate(["q1"], context[:opts]) == :ok - end - - test "run: request to a non-existent RabbitMQ node returns a nodedown" do - opts = %{node: :jake@thedog, vhost: @vhost, timeout: 200} - assert match?({:badrpc, _}, @command.run(["q1"], opts)) - end - - test "banner", context do - s = @command.banner(["q1"], context[:opts]) - - assert s =~ ~r/Stopping synchronising queue/ - assert s =~ ~r/q1/ - end -end diff --git a/deps/rabbitmq_cli/test/ctl/change_cluster_node_type_command_test.exs b/deps/rabbitmq_cli/test/ctl/change_cluster_node_type_command_test.exs index 834a0f8712a8..20b8cc17a2d8 100644 --- a/deps/rabbitmq_cli/test/ctl/change_cluster_node_type_command_test.exs +++ b/deps/rabbitmq_cli/test/ctl/change_cluster_node_type_command_test.exs @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2016-2020 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule ChangeClusterNodeTypeCommandTest do use ExUnit.Case, async: false @@ -59,10 +59,21 @@ defmodule ChangeClusterNodeTypeCommandTest do # end test "run: request to a node with running RabbitMQ app fails", context do - assert match?( - {:error, :mnesia_unexpectedly_running}, - @command.run(["ram"], context[:opts]) - ) + node = RabbitMQ.CLI.Core.Helpers.normalise_node(context[:node], :shortnames) + + case :rabbit_misc.rpc_call(node, :rabbit_khepri, :is_enabled, []) do + true -> + assert match?( + :ok, + @command.run(["ram"], context[:opts]) + ) + + false -> + assert match?( + {:error, :mnesia_unexpectedly_running}, + @command.run(["ram"], context[:opts]) + ) + end end test "run: request to an unreachable node returns a badrpc", _context do diff --git a/deps/rabbitmq_cli/test/ctl/change_password_command_test.exs b/deps/rabbitmq_cli/test/ctl/change_password_command_test.exs index 48215a4d79f9..d834022d1e48 100644 --- a/deps/rabbitmq_cli/test/ctl/change_password_command_test.exs +++ b/deps/rabbitmq_cli/test/ctl/change_password_command_test.exs @@ -8,7 +8,7 @@ ## The Original Code is RabbitMQ. ## ## The Initial Developer of the Original Code is GoPivotal, Inc. -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule ChangePasswordCommandTest do use ExUnit.Case, async: false diff --git a/deps/rabbitmq_cli/test/ctl/clear_global_parameter_command_test.exs b/deps/rabbitmq_cli/test/ctl/clear_global_parameter_command_test.exs index b768c7c22534..5c63657cf8e5 100644 --- a/deps/rabbitmq_cli/test/ctl/clear_global_parameter_command_test.exs +++ b/deps/rabbitmq_cli/test/ctl/clear_global_parameter_command_test.exs @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule ClearGlobalParameterCommandTest do use ExUnit.Case, async: false diff --git a/deps/rabbitmq_cli/test/ctl/clear_operator_policy_command_test.exs b/deps/rabbitmq_cli/test/ctl/clear_operator_policy_command_test.exs index d4178844505e..4f87a069179c 100644 --- a/deps/rabbitmq_cli/test/ctl/clear_operator_policy_command_test.exs +++ b/deps/rabbitmq_cli/test/ctl/clear_operator_policy_command_test.exs @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule ClearOperatorPolicyCommandTest do use ExUnit.Case, async: false diff --git a/deps/rabbitmq_cli/test/ctl/clear_parameter_command_test.exs b/deps/rabbitmq_cli/test/ctl/clear_parameter_command_test.exs index ae9998b2847d..8ac884bcd339 100644 --- a/deps/rabbitmq_cli/test/ctl/clear_parameter_command_test.exs +++ b/deps/rabbitmq_cli/test/ctl/clear_parameter_command_test.exs @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule ClearParameterCommandTest do use ExUnit.Case, async: false diff --git a/deps/rabbitmq_cli/test/ctl/clear_password_command_test.exs b/deps/rabbitmq_cli/test/ctl/clear_password_command_test.exs index d2ef8a35b4f3..bcf128a58a48 100644 --- a/deps/rabbitmq_cli/test/ctl/clear_password_command_test.exs +++ b/deps/rabbitmq_cli/test/ctl/clear_password_command_test.exs @@ -8,7 +8,7 @@ ## The Original Code is RabbitMQ. ## ## The Initial Developer of the Original Code is GoPivotal, Inc. -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule ClearPasswordCommandTest do use ExUnit.Case, async: false diff --git a/deps/rabbitmq_cli/test/ctl/clear_permissions_command_test.exs b/deps/rabbitmq_cli/test/ctl/clear_permissions_command_test.exs index 3177c25bf4ca..e4ac252f9fbe 100644 --- a/deps/rabbitmq_cli/test/ctl/clear_permissions_command_test.exs +++ b/deps/rabbitmq_cli/test/ctl/clear_permissions_command_test.exs @@ -8,7 +8,7 @@ ## The Original Code is RabbitMQ. ## ## The Initial Developer of the Original Code is GoPivotal, Inc. -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule ClearPermissionsTest do use ExUnit.Case, async: false diff --git a/deps/rabbitmq_cli/test/ctl/clear_policy_command_test.exs b/deps/rabbitmq_cli/test/ctl/clear_policy_command_test.exs index 8d82f1ece71c..cf1c39739117 100644 --- a/deps/rabbitmq_cli/test/ctl/clear_policy_command_test.exs +++ b/deps/rabbitmq_cli/test/ctl/clear_policy_command_test.exs @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule ClearPolicyCommandTest do use ExUnit.Case, async: false diff --git a/deps/rabbitmq_cli/test/ctl/clear_topic_permissions_command_test.exs b/deps/rabbitmq_cli/test/ctl/clear_topic_permissions_command_test.exs index 154e487f8243..3aba4b882599 100644 --- a/deps/rabbitmq_cli/test/ctl/clear_topic_permissions_command_test.exs +++ b/deps/rabbitmq_cli/test/ctl/clear_topic_permissions_command_test.exs @@ -8,7 +8,7 @@ ## The Original Code is RabbitMQ. ## ## The Initial Developer of the Original Code is GoPivotal, Inc. -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule ClearTopicPermissionsTest do use ExUnit.Case, async: false diff --git a/deps/rabbitmq_cli/test/ctl/clear_user_limits_command_test.exs b/deps/rabbitmq_cli/test/ctl/clear_user_limits_command_test.exs index 0e504596295d..34af06f625d3 100644 --- a/deps/rabbitmq_cli/test/ctl/clear_user_limits_command_test.exs +++ b/deps/rabbitmq_cli/test/ctl/clear_user_limits_command_test.exs @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2020 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule ClearUserLimitsCommandTest do use ExUnit.Case, async: false diff --git a/deps/rabbitmq_cli/test/ctl/clear_vhost_limits_command_test.exs b/deps/rabbitmq_cli/test/ctl/clear_vhost_limits_command_test.exs index e084eed993e5..f36c8f5463a9 100644 --- a/deps/rabbitmq_cli/test/ctl/clear_vhost_limits_command_test.exs +++ b/deps/rabbitmq_cli/test/ctl/clear_vhost_limits_command_test.exs @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule ClearVhostLimitsCommandTest do use ExUnit.Case, async: false diff --git a/deps/rabbitmq_cli/test/ctl/close_all_connections_command_test.exs b/deps/rabbitmq_cli/test/ctl/close_all_connections_command_test.exs index ef199ad10c98..2540ae970cd4 100644 --- a/deps/rabbitmq_cli/test/ctl/close_all_connections_command_test.exs +++ b/deps/rabbitmq_cli/test/ctl/close_all_connections_command_test.exs @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule CloseAllConnectionsCommandTest do use ExUnit.Case, async: false diff --git a/deps/rabbitmq_cli/test/ctl/close_all_user_connections_command_test.exs b/deps/rabbitmq_cli/test/ctl/close_all_user_connections_command_test.exs index 0a870a2ad0ba..5acc40ee3dba 100644 --- a/deps/rabbitmq_cli/test/ctl/close_all_user_connections_command_test.exs +++ b/deps/rabbitmq_cli/test/ctl/close_all_user_connections_command_test.exs @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule CloseAllUserConnectionsCommandTest do use ExUnit.Case, async: false diff --git a/deps/rabbitmq_cli/test/ctl/close_connection_command_test.exs b/deps/rabbitmq_cli/test/ctl/close_connection_command_test.exs index d760aca90dcf..831d8c4e118a 100644 --- a/deps/rabbitmq_cli/test/ctl/close_connection_command_test.exs +++ b/deps/rabbitmq_cli/test/ctl/close_connection_command_test.exs @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule CloseConnectionCommandTest do use ExUnit.Case, async: false diff --git a/deps/rabbitmq_cli/test/ctl/cluster_status_command_test.exs b/deps/rabbitmq_cli/test/ctl/cluster_status_command_test.exs index a0d75059e3e8..d8a761fc6c6a 100644 --- a/deps/rabbitmq_cli/test/ctl/cluster_status_command_test.exs +++ b/deps/rabbitmq_cli/test/ctl/cluster_status_command_test.exs @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule ClusterStatusCommandTest do use ExUnit.Case, async: false diff --git a/deps/rabbitmq_cli/test/ctl/deactivate_disk_free_space_monitoring_command_test.exs b/deps/rabbitmq_cli/test/ctl/deactivate_disk_free_space_monitoring_command_test.exs index 0e3fc7622bf5..079a3b581e0a 100644 --- a/deps/rabbitmq_cli/test/ctl/deactivate_disk_free_space_monitoring_command_test.exs +++ b/deps/rabbitmq_cli/test/ctl/deactivate_disk_free_space_monitoring_command_test.exs @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule DeactivateDiskFreeSpaceMonitoringCommandTest do use ExUnit.Case, async: false diff --git a/deps/rabbitmq_cli/test/ctl/decode_command_test.exs b/deps/rabbitmq_cli/test/ctl/decode_command_test.exs index 197b3dcf796a..e6d4c354929e 100644 --- a/deps/rabbitmq_cli/test/ctl/decode_command_test.exs +++ b/deps/rabbitmq_cli/test/ctl/decode_command_test.exs @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule DecodeCommandTest do use ExUnit.Case, async: false diff --git a/deps/rabbitmq_cli/test/ctl/decrypt_conf_value_command_test.exs b/deps/rabbitmq_cli/test/ctl/decrypt_conf_value_command_test.exs new file mode 100644 index 000000000000..e6dff24dbc21 --- /dev/null +++ b/deps/rabbitmq_cli/test/ctl/decrypt_conf_value_command_test.exs @@ -0,0 +1,83 @@ +## This Source Code Form is subject to the terms of the Mozilla Public +## License, v. 2.0. If a copy of the MPL was not distributed with this +## file, You can obtain one at https://mozilla.org/MPL/2.0/. +## +## Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. + +defmodule DecryptConfValueCommandTest do + use ExUnit.Case, async: false + @command RabbitMQ.CLI.Ctl.Commands.DecryptConfValueCommand + + setup _context do + {:ok, + opts: %{ + cipher: :rabbit_pbe.default_cipher(), + hash: :rabbit_pbe.default_hash(), + iterations: :rabbit_pbe.default_iterations() + }} + end + + test "validate: providing exactly 2 positional arguments passes", context do + assert :ok == @command.validate(["value", "secret"], context[:opts]) + end + + test "validate: providing no positional arguments fails", context do + assert match?( + {:validation_failure, {:not_enough_args, _}}, + @command.validate([], context[:opts]) + ) + end + + test "validate: providing one positional argument passes", context do + assert :ok == @command.validate(["value"], context[:opts]) + end + + test "validate: providing three or more positional argument fails", context do + assert match?( + {:validation_failure, :too_many_args}, + @command.validate(["value", "secret", "incorrect"], context[:opts]) + ) + end + + test "validate: hash and cipher must be supported", context do + assert match?( + {:validation_failure, {:bad_argument, _}}, + @command.validate( + ["value", "secret"], + Map.merge(context[:opts], %{cipher: :funny_cipher}) + ) + ) + + assert match?( + {:validation_failure, {:bad_argument, _}}, + @command.validate( + ["value", "secret"], + Map.merge(context[:opts], %{hash: :funny_hash}) + ) + ) + + assert match?( + {:validation_failure, {:bad_argument, _}}, + @command.validate( + ["value", "secret"], + Map.merge(context[:opts], %{cipher: :funny_cipher, hash: :funny_hash}) + ) + ) + + assert :ok == @command.validate(["value", "secret"], context[:opts]) + end + + test "validate: number of iterations must greater than 0", context do + assert match?( + {:validation_failure, {:bad_argument, _}}, + @command.validate(["value", "secret"], Map.merge(context[:opts], %{iterations: 0})) + ) + + assert match?( + {:validation_failure, {:bad_argument, _}}, + @command.validate(["value", "secret"], Map.merge(context[:opts], %{iterations: -1})) + ) + + assert :ok == @command.validate(["value", "secret"], context[:opts]) + end +end diff --git a/deps/rabbitmq_cli/test/ctl/delete_queue_command_test.exs b/deps/rabbitmq_cli/test/ctl/delete_queue_command_test.exs index 6e9036dd6501..db38a5536ba2 100644 --- a/deps/rabbitmq_cli/test/ctl/delete_queue_command_test.exs +++ b/deps/rabbitmq_cli/test/ctl/delete_queue_command_test.exs @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule DeleteQueueCommandTest do use ExUnit.Case, async: false diff --git a/deps/rabbitmq_cli/test/ctl/delete_user_command_test.exs b/deps/rabbitmq_cli/test/ctl/delete_user_command_test.exs index 7d17a632130b..979053feb272 100644 --- a/deps/rabbitmq_cli/test/ctl/delete_user_command_test.exs +++ b/deps/rabbitmq_cli/test/ctl/delete_user_command_test.exs @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule DeleteUserCommandTest do use ExUnit.Case, async: false diff --git a/deps/rabbitmq_cli/test/ctl/delete_vhost_command_test.exs b/deps/rabbitmq_cli/test/ctl/delete_vhost_command_test.exs index 872bdf151e09..ea27e9b632b8 100644 --- a/deps/rabbitmq_cli/test/ctl/delete_vhost_command_test.exs +++ b/deps/rabbitmq_cli/test/ctl/delete_vhost_command_test.exs @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule DeleteVhostCommandTest do use ExUnit.Case, async: false diff --git a/deps/rabbitmq_cli/test/ctl/enable_feature_flag_test.exs b/deps/rabbitmq_cli/test/ctl/enable_feature_flag_test.exs index f9f9b725e679..92264641344d 100644 --- a/deps/rabbitmq_cli/test/ctl/enable_feature_flag_test.exs +++ b/deps/rabbitmq_cli/test/ctl/enable_feature_flag_test.exs @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2018-2020 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule EnableFeatureFlagCommandTest do use ExUnit.Case, async: false @@ -10,6 +10,8 @@ defmodule EnableFeatureFlagCommandTest do @command RabbitMQ.CLI.Ctl.Commands.EnableFeatureFlagCommand @feature_flag :ff_from_enable_ff_testsuite + @experimental_flag :ff_from_enable_ff_testsuite_experimental + @usage_exit_code RabbitMQ.CLI.Core.ExitCodes.exit_usage() setup_all do RabbitMQ.CLI.Core.Distribution.start() @@ -22,6 +24,11 @@ defmodule EnableFeatureFlagCommandTest do desc: ~c"My feature flag", provided_by: :EnableFeatureFlagCommandTest, stability: :stable + }, + @experimental_flag => %{ + desc: ~c"An **experimental** feature!", + provided_by: :EnableFeatureFlagCommandTest, + stability: :experimental } } @@ -35,7 +42,9 @@ defmodule EnableFeatureFlagCommandTest do { :ok, - opts: %{node: get_rabbit_hostname()}, feature_flag: @feature_flag + opts: %{node: get_rabbit_hostname(), experimental: false}, + feature_flag: @feature_flag, + experimental_flag: @experimental_flag } end @@ -59,10 +68,20 @@ defmodule EnableFeatureFlagCommandTest do end test "run: attempt to use an unreachable node returns a nodedown" do - opts = %{node: :jake@thedog, timeout: 200} + opts = %{node: :jake@thedog, timeout: 200, experimental: false} assert match?({:badrpc, _}, @command.run(["na"], opts)) end + test "run: enabling an experimental flag requires '--experimental'", context do + experimental_flag = Atom.to_string(context[:experimental_flag]) + assert match?( + {:error, @usage_exit_code, _}, + @command.run([experimental_flag], context[:opts]) + ) + opts = Map.put(context[:opts], :experimental, true) + assert @command.run([experimental_flag], opts) == :ok + end + test "run: enabling the same feature flag twice is idempotent", context do enable_feature_flag(context[:feature_flag]) assert @command.run([Atom.to_string(context[:feature_flag])], context[:opts]) == :ok @@ -75,6 +94,12 @@ defmodule EnableFeatureFlagCommandTest do assert list_feature_flags(:enabled) |> Map.has_key?(context[:feature_flag]) end + test "run: enabling all feature flags with '--experimental' returns an error", context do + enable_feature_flag(context[:feature_flag]) + opts = Map.put(context[:opts], :experimental, true) + assert match?({:error, @usage_exit_code, _}, @command.run(["all"], opts)) + end + test "banner", context do assert @command.banner([context[:feature_flag]], context[:opts]) =~ ~r/Enabling feature flag \"#{context[:feature_flag]}\" \.\.\./ diff --git a/deps/rabbitmq_cli/test/ctl/encode_command_test.exs b/deps/rabbitmq_cli/test/ctl/encode_command_test.exs index 36dbb2be0ae9..451270cf0a64 100644 --- a/deps/rabbitmq_cli/test/ctl/encode_command_test.exs +++ b/deps/rabbitmq_cli/test/ctl/encode_command_test.exs @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule EncodeCommandTest do use ExUnit.Case, async: false diff --git a/deps/rabbitmq_cli/test/ctl/encrypt_conf_value_command_test.exs b/deps/rabbitmq_cli/test/ctl/encrypt_conf_value_command_test.exs new file mode 100644 index 000000000000..e65f3b99a22a --- /dev/null +++ b/deps/rabbitmq_cli/test/ctl/encrypt_conf_value_command_test.exs @@ -0,0 +1,78 @@ +## This Source Code Form is subject to the terms of the Mozilla Public +## License, v. 2.0. If a copy of the MPL was not distributed with this +## file, You can obtain one at https://mozilla.org/MPL/2.0/. +## +## Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. + +defmodule EncryptConfValueCommandTest do + use ExUnit.Case, async: false + + @command RabbitMQ.CLI.Ctl.Commands.EncryptConfValueCommand + + setup _context do + {:ok, + opts: %{ + cipher: :rabbit_pbe.default_cipher(), + hash: :rabbit_pbe.default_hash(), + iterations: :rabbit_pbe.default_iterations() + }} + end + + test "validate: providing exactly 2 positional arguments passes", context do + assert :ok == @command.validate(["value", "secret"], context[:opts]) + end + + test "validate: providing zero or one positional argument passes", context do + assert :ok == @command.validate([], context[:opts]) + assert :ok == @command.validate(["value"], context[:opts]) + end + + test "validate: providing three or more positional argument fails", context do + assert match?( + {:validation_failure, :too_many_args}, + @command.validate(["value", "secret", "incorrect"], context[:opts]) + ) + end + + test "validate: hash and cipher must be supported", context do + assert match?( + {:validation_failure, {:bad_argument, _}}, + @command.validate( + ["value", "secret"], + Map.merge(context[:opts], %{cipher: :funny_cipher}) + ) + ) + + assert match?( + {:validation_failure, {:bad_argument, _}}, + @command.validate( + ["value", "secret"], + Map.merge(context[:opts], %{hash: :funny_hash}) + ) + ) + + assert match?( + {:validation_failure, {:bad_argument, _}}, + @command.validate( + ["value", "secret"], + Map.merge(context[:opts], %{cipher: :funny_cipher, hash: :funny_hash}) + ) + ) + + assert :ok == @command.validate(["value", "secret"], context[:opts]) + end + + test "validate: number of iterations must greater than 0", context do + assert match?( + {:validation_failure, {:bad_argument, _}}, + @command.validate(["value", "secret"], Map.merge(context[:opts], %{iterations: 0})) + ) + + assert match?( + {:validation_failure, {:bad_argument, _}}, + @command.validate(["value", "secret"], Map.merge(context[:opts], %{iterations: -1})) + ) + + assert :ok == @command.validate(["value", "secret"], context[:opts]) + end +end diff --git a/deps/rabbitmq_cli/test/ctl/environment_command_test.exs b/deps/rabbitmq_cli/test/ctl/environment_command_test.exs index 638ff08c5c70..5c0274c6abea 100644 --- a/deps/rabbitmq_cli/test/ctl/environment_command_test.exs +++ b/deps/rabbitmq_cli/test/ctl/environment_command_test.exs @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule EnvironmentCommandTest do use ExUnit.Case, async: false diff --git a/deps/rabbitmq_cli/test/ctl/eval_command_test.exs b/deps/rabbitmq_cli/test/ctl/eval_command_test.exs index 057f3c1067dd..0c60a58977b5 100644 --- a/deps/rabbitmq_cli/test/ctl/eval_command_test.exs +++ b/deps/rabbitmq_cli/test/ctl/eval_command_test.exs @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule EvalCommandTest do use ExUnit.Case, async: false diff --git a/deps/rabbitmq_cli/test/ctl/eval_file_command_test.exs b/deps/rabbitmq_cli/test/ctl/eval_file_command_test.exs index d4e9042ee458..78d25e6fd866 100644 --- a/deps/rabbitmq_cli/test/ctl/eval_file_command_test.exs +++ b/deps/rabbitmq_cli/test/ctl/eval_file_command_test.exs @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule EvalFileCommandTest do use ExUnit.Case, async: false diff --git a/deps/rabbitmq_cli/test/ctl/exec_command_test.exs b/deps/rabbitmq_cli/test/ctl/exec_command_test.exs index 8fdc886d8e30..d195a3501b77 100644 --- a/deps/rabbitmq_cli/test/ctl/exec_command_test.exs +++ b/deps/rabbitmq_cli/test/ctl/exec_command_test.exs @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule ExecCommandTest do use ExUnit.Case, async: false diff --git a/deps/rabbitmq_cli/test/ctl/export_definitions_command_test.exs b/deps/rabbitmq_cli/test/ctl/export_definitions_command_test.exs index d1be4f6897ad..ce508ddfc394 100644 --- a/deps/rabbitmq_cli/test/ctl/export_definitions_command_test.exs +++ b/deps/rabbitmq_cli/test/ctl/export_definitions_command_test.exs @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule ExportDefinitionsCommandTest do use ExUnit.Case, async: false diff --git a/deps/rabbitmq_cli/test/ctl/force_boot_command_test.exs b/deps/rabbitmq_cli/test/ctl/force_boot_command_test.exs index 7fe3c49f6439..aa5377102590 100644 --- a/deps/rabbitmq_cli/test/ctl/force_boot_command_test.exs +++ b/deps/rabbitmq_cli/test/ctl/force_boot_command_test.exs @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule ForceBootCommandTest do use ExUnit.Case, async: false diff --git a/deps/rabbitmq_cli/test/ctl/force_gc_command_test.exs b/deps/rabbitmq_cli/test/ctl/force_gc_command_test.exs index 43aaf80555b4..7870f3f503a2 100644 --- a/deps/rabbitmq_cli/test/ctl/force_gc_command_test.exs +++ b/deps/rabbitmq_cli/test/ctl/force_gc_command_test.exs @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule ForceGcCommandTest do use ExUnit.Case, async: false diff --git a/deps/rabbitmq_cli/test/ctl/force_reset_command_test.exs b/deps/rabbitmq_cli/test/ctl/force_reset_command_test.exs index 38664c78319b..481e42583520 100644 --- a/deps/rabbitmq_cli/test/ctl/force_reset_command_test.exs +++ b/deps/rabbitmq_cli/test/ctl/force_reset_command_test.exs @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule ForceResetCommandTest do use ExUnit.Case, async: false diff --git a/deps/rabbitmq_cli/test/ctl/forget_cluster_node_command_test.exs b/deps/rabbitmq_cli/test/ctl/forget_cluster_node_command_test.exs index e21de2c469d1..74d8a9f377a0 100644 --- a/deps/rabbitmq_cli/test/ctl/forget_cluster_node_command_test.exs +++ b/deps/rabbitmq_cli/test/ctl/forget_cluster_node_command_test.exs @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2016-2020 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule ForgetClusterNodeCommandTest do use ExUnit.Case, async: false diff --git a/deps/rabbitmq_cli/test/ctl/hash_password_command_test.exs b/deps/rabbitmq_cli/test/ctl/hash_password_command_test.exs index fb30534dcc6c..31a1ced6f9f8 100644 --- a/deps/rabbitmq_cli/test/ctl/hash_password_command_test.exs +++ b/deps/rabbitmq_cli/test/ctl/hash_password_command_test.exs @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule HashPasswordCommandTest do use ExUnit.Case, async: false diff --git a/deps/rabbitmq_cli/test/ctl/help_command_test.exs b/deps/rabbitmq_cli/test/ctl/help_command_test.exs index 27ce1d22440a..df6829683faf 100644 --- a/deps/rabbitmq_cli/test/ctl/help_command_test.exs +++ b/deps/rabbitmq_cli/test/ctl/help_command_test.exs @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule HelpCommandTest do use ExUnit.Case, async: false diff --git a/deps/rabbitmq_cli/test/ctl/import_definitions_command_test.exs b/deps/rabbitmq_cli/test/ctl/import_definitions_command_test.exs index d918db8db39d..fd05a59723bf 100644 --- a/deps/rabbitmq_cli/test/ctl/import_definitions_command_test.exs +++ b/deps/rabbitmq_cli/test/ctl/import_definitions_command_test.exs @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule ImportDefinitionsCommandTest do use ExUnit.Case, async: false diff --git a/deps/rabbitmq_cli/test/ctl/info_keys_test.exs b/deps/rabbitmq_cli/test/ctl/info_keys_test.exs index 76fbc6546194..53a60ec23c1b 100644 --- a/deps/rabbitmq_cli/test/ctl/info_keys_test.exs +++ b/deps/rabbitmq_cli/test/ctl/info_keys_test.exs @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule InfoKeysTest do use ExUnit.Case diff --git a/deps/rabbitmq_cli/test/ctl/join_cluster_command_test.exs b/deps/rabbitmq_cli/test/ctl/join_cluster_command_test.exs index f2e9a17b7d85..816e291f033d 100644 --- a/deps/rabbitmq_cli/test/ctl/join_cluster_command_test.exs +++ b/deps/rabbitmq_cli/test/ctl/join_cluster_command_test.exs @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2016-2020 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule JoinClusterCommandTest do use ExUnit.Case, async: false @@ -67,14 +67,6 @@ defmodule JoinClusterCommandTest do start_rabbitmq_app() end - # TODO - test "run: request to an active node fails", context do - assert match?( - {:error, :mnesia_unexpectedly_running}, - @command.run([context[:opts][:node]], context[:opts]) - ) - end - test "run: request to a non-existent node returns a badrpc", context do opts = %{ node: :jake@thedog, diff --git a/deps/rabbitmq_cli/test/ctl/list_channels_command_test.exs b/deps/rabbitmq_cli/test/ctl/list_channels_command_test.exs index 6d97de645280..d5dc6140edfe 100644 --- a/deps/rabbitmq_cli/test/ctl/list_channels_command_test.exs +++ b/deps/rabbitmq_cli/test/ctl/list_channels_command_test.exs @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule ListChannelsCommandTest do use ExUnit.Case, async: false diff --git a/deps/rabbitmq_cli/test/ctl/list_ciphers_command_test.exs b/deps/rabbitmq_cli/test/ctl/list_ciphers_command_test.exs index 03f968f34d81..ef4d449d7c79 100644 --- a/deps/rabbitmq_cli/test/ctl/list_ciphers_command_test.exs +++ b/deps/rabbitmq_cli/test/ctl/list_ciphers_command_test.exs @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule ListCiphersCommandTest do use ExUnit.Case diff --git a/deps/rabbitmq_cli/test/ctl/list_deprecated_features_command_test.exs b/deps/rabbitmq_cli/test/ctl/list_deprecated_features_command_test.exs new file mode 100644 index 000000000000..d7bbf0f89529 --- /dev/null +++ b/deps/rabbitmq_cli/test/ctl/list_deprecated_features_command_test.exs @@ -0,0 +1,145 @@ +## This Source Code Form is subject to the terms of the Mozilla Public +## License, v. 2.0. If a copy of the MPL was not distributed with this +## file, You can obtain one at https://mozilla.org/MPL/2.0/. +## +## Copyright (c) 2023 Broadcom. All Rights Reserved. The term “Broadcom” +## refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. + +defmodule ListDeprecatedFeaturesCommandTest do + use ExUnit.Case, async: false + import TestHelper + + @command RabbitMQ.CLI.Ctl.Commands.ListDeprecatedFeaturesCommand + + @df1 :df1_from_list_df_testsuite + @df2 :df2_from_list_df_testsuite + + setup_all do + RabbitMQ.CLI.Core.Distribution.start() + + # Define an arbitrary deprecated feature for the test. + node = get_rabbit_hostname() + + new_deprecated_features = %{ + @df1 => %{ + desc: ~c"My deprecated feature #1", + provided_by: :ListDeprecatedFeaturesCommandTest, + deprecation_phase: :permitted_by_default + }, + @df2 => %{ + desc: ~c"My deprecated feature #2", + provided_by: :ListDeprecatedFeaturesCommandTest, + deprecation_phase: :removed + } + } + + :ok = + :rabbit_misc.rpc_call( + node, + :rabbit_feature_flags, + :inject_test_feature_flags, + [new_deprecated_features] + ) + + name_result = [ + [{:name, @df1}], + [{:name, @df2}] + ] + + full_result = [ + [{:name, @df1}, {:deprecation_phase, :permitted_by_default}], + [{:name, @df2}, {:deprecation_phase, :removed}] + ] + + { + :ok, + name_result: name_result, full_result: full_result + } + end + + setup context do + { + :ok, + opts: %{node: get_rabbit_hostname(), timeout: context[:test_timeout], used: false} + } + end + + test "merge_defaults with no command, print just use the names" do + assert match?({["name", "deprecation_phase"], %{}}, @command.merge_defaults([], %{})) + end + + test "validate: return bad_info_key on a single bad arg", context do + assert @command.validate(["quack"], context[:opts]) == + {:validation_failure, {:bad_info_key, [:quack]}} + end + + test "validate: returns multiple bad args return a list of bad info key values", context do + result = @command.validate(["quack", "oink"], context[:opts]) + assert match?({:validation_failure, {:bad_info_key, _}}, result) + {_, {_, keys}} = result + assert :lists.sort(keys) == [:oink, :quack] + end + + test "validate: return bad_info_key on mix of good and bad args", context do + assert @command.validate(["quack", "name"], context[:opts]) == + {:validation_failure, {:bad_info_key, [:quack]}} + + assert @command.validate(["name", "oink"], context[:opts]) == + {:validation_failure, {:bad_info_key, [:oink]}} + + assert @command.validate(["name", "oink", "deprecation_phase"], context[:opts]) == + {:validation_failure, {:bad_info_key, [:oink]}} + end + + test "run: on a bad RabbitMQ node, return a badrpc" do + opts = %{node: :jake@thedog, timeout: 200, used: false} + assert match?({:badrpc, _}, @command.run(["name"], opts)) + end + + @tag test_timeout: :infinity + test "run: with the name tag, print just the names", context do + matches_found = @command.run(["name"], context[:opts]) + + assert Enum.all?(context[:name_result], fn feature_name -> + Enum.find(matches_found, fn found -> found == feature_name end) + end) + end + + @tag test_timeout: :infinity + test "run: with the name tag, print just the names for used features", context do + opts = %{node: get_rabbit_hostname(), timeout: context[:test_timeout], used: true} + matches_found = @command.run(["name"], opts) + + assert Enum.empty?(matches_found) + end + + @tag test_timeout: :infinity + test "run: duplicate args do not produce duplicate entries", context do + # checks to ensure that all expected deprecated features are in the results + matches_found = @command.run(["name", "name"], context[:opts]) + + assert Enum.all?(context[:name_result], fn feature_name -> + Enum.find(matches_found, fn found -> found == feature_name end) + end) + end + + @tag test_timeout: 30000 + test "run: sufficiently long timeouts don't interfere with results", context do + matches_found = @command.run(["name", "deprecation_phase"], context[:opts]) + + assert Enum.all?(context[:full_result], fn feature_name -> + Enum.find(matches_found, fn found -> found == feature_name end) + end) + end + + @tag test_timeout: 0, username: "guest" + test "run: timeout causes command to return a bad RPC", context do + assert @command.run(["name", "state"], context[:opts]) == + {:badrpc, :timeout} + end + + @tag test_timeout: :infinity + test "banner", context do + assert @command.banner([], context[:opts]) =~ ~r/Listing deprecated features \.\.\./ + end +end diff --git a/deps/rabbitmq_cli/test/ctl/list_exchanges_command_test.exs b/deps/rabbitmq_cli/test/ctl/list_exchanges_command_test.exs index 1c5f2c8acec0..e8d146ae3139 100644 --- a/deps/rabbitmq_cli/test/ctl/list_exchanges_command_test.exs +++ b/deps/rabbitmq_cli/test/ctl/list_exchanges_command_test.exs @@ -96,7 +96,7 @@ defmodule ListExchangesCommandTest do test "run: default options test", context do exchange_name = "test_exchange" - declare_exchange(exchange_name, @vhost) + {:ok, _} = declare_exchange(exchange_name, @vhost) assert MapSet.new(run_command_to_list(@command, [["name", "type"], context[:opts]])) == MapSet.new( @@ -106,8 +106,8 @@ defmodule ListExchangesCommandTest do end test "run: list multiple exchanges", context do - declare_exchange("test_exchange_1", @vhost, :direct) - declare_exchange("test_exchange_2", @vhost, :fanout) + {:ok, _} = declare_exchange("test_exchange_1", @vhost, :direct) + {:ok, _} = declare_exchange("test_exchange_2", @vhost, :fanout) non_default_exchanges = run_command_to_list(@command, [["name", "type"], context[:opts]]) @@ -124,8 +124,8 @@ defmodule ListExchangesCommandTest do end test "run: info keys filter single key", context do - declare_exchange("test_exchange_1", @vhost) - declare_exchange("test_exchange_2", @vhost) + {:ok, _} = declare_exchange("test_exchange_1", @vhost) + {:ok, _} = declare_exchange("test_exchange_2", @vhost) non_default_exchanges = run_command_to_list(@command, [["name"], context[:opts]]) @@ -138,8 +138,8 @@ defmodule ListExchangesCommandTest do end test "run: info keys add additional keys", context do - declare_exchange("durable_exchange", @vhost, :direct, true) - declare_exchange("auto_delete_exchange", @vhost, :fanout, false, true) + {:ok, _} = declare_exchange("durable_exchange", @vhost, :direct, true) + {:ok, _} = declare_exchange("auto_delete_exchange", @vhost, :fanout, false, true) non_default_exchanges = run_command_to_list(@command, [["name", "type", "durable", "auto_delete"], context[:opts]]) @@ -162,8 +162,8 @@ defmodule ListExchangesCommandTest do delete_vhost(other_vhost) end) - declare_exchange("test_exchange_1", @vhost) - declare_exchange("test_exchange_2", other_vhost) + {:ok, _} = declare_exchange("test_exchange_1", @vhost) + {:ok, _} = declare_exchange("test_exchange_2", other_vhost) non_default_exchanges1 = run_command_to_list(@command, [["name"], context[:opts]]) diff --git a/deps/rabbitmq_cli/test/ctl/list_feature_flags_command_test.exs b/deps/rabbitmq_cli/test/ctl/list_feature_flags_command_test.exs index ef0ce087e4df..c1d8baf9611a 100644 --- a/deps/rabbitmq_cli/test/ctl/list_feature_flags_command_test.exs +++ b/deps/rabbitmq_cli/test/ctl/list_feature_flags_command_test.exs @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2018-2020 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule ListFeatureFlagsCommandTest do use ExUnit.Case, async: false diff --git a/deps/rabbitmq_cli/test/ctl/list_global_parameters_command_test.exs b/deps/rabbitmq_cli/test/ctl/list_global_parameters_command_test.exs index 272b7cfd1e09..988b297e8bdf 100644 --- a/deps/rabbitmq_cli/test/ctl/list_global_parameters_command_test.exs +++ b/deps/rabbitmq_cli/test/ctl/list_global_parameters_command_test.exs @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule ListGlobalParametersCommandTest do use ExUnit.Case, async: false @@ -81,12 +81,12 @@ defmodule ListGlobalParametersCommandTest do # Checks each element of the first parameter against the expected context values defp assert_parameter_list(params, context) do - [param | _] = params + exp = + MapSet.new( + name: context[:key], + value: context[:value] + ) - assert MapSet.new(param) == - MapSet.new( - name: context[:key], - value: context[:value] - ) + assert List.foldl(params, false, fn param, acc -> MapSet.new(param) == exp or acc end) end end diff --git a/deps/rabbitmq_cli/test/ctl/list_hashes_command_test.exs b/deps/rabbitmq_cli/test/ctl/list_hashes_command_test.exs index 681965f97c38..bbd3811f1da4 100644 --- a/deps/rabbitmq_cli/test/ctl/list_hashes_command_test.exs +++ b/deps/rabbitmq_cli/test/ctl/list_hashes_command_test.exs @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule ListHashesCommandTest do use ExUnit.Case diff --git a/deps/rabbitmq_cli/test/ctl/list_operator_policies_command_test.exs b/deps/rabbitmq_cli/test/ctl/list_operator_policies_command_test.exs index aef97528c329..d00aa896b827 100644 --- a/deps/rabbitmq_cli/test/ctl/list_operator_policies_command_test.exs +++ b/deps/rabbitmq_cli/test/ctl/list_operator_policies_command_test.exs @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule ListOperatorPoliciesCommandTest do use ExUnit.Case, async: false diff --git a/deps/rabbitmq_cli/test/ctl/list_parameters_command_test.exs b/deps/rabbitmq_cli/test/ctl/list_parameters_command_test.exs index 8008961f3661..7f8ca9791886 100644 --- a/deps/rabbitmq_cli/test/ctl/list_parameters_command_test.exs +++ b/deps/rabbitmq_cli/test/ctl/list_parameters_command_test.exs @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule ListParametersCommandTest do use ExUnit.Case, async: false diff --git a/deps/rabbitmq_cli/test/ctl/list_permissions_command_test.exs b/deps/rabbitmq_cli/test/ctl/list_permissions_command_test.exs index 2999743aad38..75b8bfc6ade1 100644 --- a/deps/rabbitmq_cli/test/ctl/list_permissions_command_test.exs +++ b/deps/rabbitmq_cli/test/ctl/list_permissions_command_test.exs @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule ListPermissionsCommandTest do use ExUnit.Case, async: false diff --git a/deps/rabbitmq_cli/test/ctl/list_policies_command_test.exs b/deps/rabbitmq_cli/test/ctl/list_policies_command_test.exs index aa34bc1229e9..f45e2fcf4f7b 100644 --- a/deps/rabbitmq_cli/test/ctl/list_policies_command_test.exs +++ b/deps/rabbitmq_cli/test/ctl/list_policies_command_test.exs @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule ListPoliciesCommandTest do use ExUnit.Case, async: false diff --git a/deps/rabbitmq_cli/test/ctl/list_topic_permissions_command_test.exs b/deps/rabbitmq_cli/test/ctl/list_topic_permissions_command_test.exs index 3ea57104acb6..ff3d0b3cc90d 100644 --- a/deps/rabbitmq_cli/test/ctl/list_topic_permissions_command_test.exs +++ b/deps/rabbitmq_cli/test/ctl/list_topic_permissions_command_test.exs @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule ListTopicPermissionsCommandTest do use ExUnit.Case, async: false diff --git a/deps/rabbitmq_cli/test/ctl/list_user_limits_command_test.exs b/deps/rabbitmq_cli/test/ctl/list_user_limits_command_test.exs index 4b15da293942..43d5c9851c59 100644 --- a/deps/rabbitmq_cli/test/ctl/list_user_limits_command_test.exs +++ b/deps/rabbitmq_cli/test/ctl/list_user_limits_command_test.exs @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2020 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule ListUserLimitsCommandTest do use ExUnit.Case, async: false diff --git a/deps/rabbitmq_cli/test/ctl/list_user_permissions_command_test.exs b/deps/rabbitmq_cli/test/ctl/list_user_permissions_command_test.exs index 3409cc1253f4..563c365ea820 100644 --- a/deps/rabbitmq_cli/test/ctl/list_user_permissions_command_test.exs +++ b/deps/rabbitmq_cli/test/ctl/list_user_permissions_command_test.exs @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule ListUserPermissionsCommandTest do use ExUnit.Case, async: false diff --git a/deps/rabbitmq_cli/test/ctl/list_user_topic_permissions_command_test.exs b/deps/rabbitmq_cli/test/ctl/list_user_topic_permissions_command_test.exs index 6650908dbfb1..b9bdb901fba6 100644 --- a/deps/rabbitmq_cli/test/ctl/list_user_topic_permissions_command_test.exs +++ b/deps/rabbitmq_cli/test/ctl/list_user_topic_permissions_command_test.exs @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule ListUserTopicPermissionsCommandTest do use ExUnit.Case, async: false diff --git a/deps/rabbitmq_cli/test/ctl/list_users_command_test.exs b/deps/rabbitmq_cli/test/ctl/list_users_command_test.exs index 8f9a0ca8bff3..4039823f16b8 100644 --- a/deps/rabbitmq_cli/test/ctl/list_users_command_test.exs +++ b/deps/rabbitmq_cli/test/ctl/list_users_command_test.exs @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule ListUsersCommandTest do use ExUnit.Case, async: false diff --git a/deps/rabbitmq_cli/test/ctl/list_vhost_limits_command_test.exs b/deps/rabbitmq_cli/test/ctl/list_vhost_limits_command_test.exs index 55f43acf94c9..39bae8d272e7 100644 --- a/deps/rabbitmq_cli/test/ctl/list_vhost_limits_command_test.exs +++ b/deps/rabbitmq_cli/test/ctl/list_vhost_limits_command_test.exs @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule ListVhostLimitsCommandTest do use ExUnit.Case, async: false diff --git a/deps/rabbitmq_cli/test/ctl/list_vhosts_command_test.exs b/deps/rabbitmq_cli/test/ctl/list_vhosts_command_test.exs index 6d4301d05e21..3cd3453d4440 100644 --- a/deps/rabbitmq_cli/test/ctl/list_vhosts_command_test.exs +++ b/deps/rabbitmq_cli/test/ctl/list_vhosts_command_test.exs @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule ListVhostsCommandTest do use ExUnit.Case, async: false diff --git a/deps/rabbitmq_cli/test/ctl/node_health_check_command_test.exs b/deps/rabbitmq_cli/test/ctl/node_health_check_command_test.exs index e2d98a641356..f63a0ff532f1 100644 --- a/deps/rabbitmq_cli/test/ctl/node_health_check_command_test.exs +++ b/deps/rabbitmq_cli/test/ctl/node_health_check_command_test.exs @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule NodeHealthCheckCommandTest do use ExUnit.Case, async: false @@ -38,27 +38,11 @@ defmodule NodeHealthCheckCommandTest do assert @command.validate([], context[:opts]) == :ok end - test "run: request to a named, active node succeeds", context do + test "run: is a no-op", context do assert @command.run([], context[:opts]) end - test "run: request to a named, active node with an alarm in effect fails", context do - set_vm_memory_high_watermark(0.0000000000001) - # give VM memory monitor check some time to kick in - :timer.sleep(1500) - {:healthcheck_failed, _message} = @command.run([], context[:opts]) - - reset_vm_memory_high_watermark() - :timer.sleep(1500) - assert @command.run([], context[:opts]) == :ok - end - - test "run: request to a non-existent node returns a badrpc" do - assert match?({:badrpc, _}, @command.run([], %{node: :jake@thedog, timeout: 200})) - end - test "banner", context do - assert @command.banner([], context[:opts]) |> Enum.join("\n") =~ ~r/Checking health/ - assert @command.banner([], context[:opts]) |> Enum.join("\n") =~ ~r/#{get_rabbit_hostname()}/ + assert @command.banner([], context[:opts]) |> Enum.join("\n") =~ ~r/DEPRECATED/ end end diff --git a/deps/rabbitmq_cli/test/ctl/ping_command_test.exs b/deps/rabbitmq_cli/test/ctl/ping_command_test.exs index fd13d37ac67e..465f22159ebc 100644 --- a/deps/rabbitmq_cli/test/ctl/ping_command_test.exs +++ b/deps/rabbitmq_cli/test/ctl/ping_command_test.exs @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule PingCommandTest do use ExUnit.Case, async: false diff --git a/deps/rabbitmq_cli/test/ctl/purge_queue_command_test.exs b/deps/rabbitmq_cli/test/ctl/purge_queue_command_test.exs index 87717b634ed6..7c3ad0f88d7b 100644 --- a/deps/rabbitmq_cli/test/ctl/purge_queue_command_test.exs +++ b/deps/rabbitmq_cli/test/ctl/purge_queue_command_test.exs @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule PurgeQueueCommandTest do use ExUnit.Case, async: false diff --git a/deps/rabbitmq_cli/test/ctl/reconcile_vhosts_command_test.exs b/deps/rabbitmq_cli/test/ctl/reconcile_vhosts_command_test.exs new file mode 100644 index 000000000000..a42927d09ea8 --- /dev/null +++ b/deps/rabbitmq_cli/test/ctl/reconcile_vhosts_command_test.exs @@ -0,0 +1,98 @@ +## This Source Code Form is subject to the terms of the Mozilla Public +## License, v. 2.0. If a copy of the MPL was not distributed with this +## file, You can obtain one at https://mozilla.org/MPL/2.0/. +## +## Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. + +defmodule ReconcileVhostsCommandTest do + use ExUnit.Case, async: false + import TestHelper + + @command RabbitMQ.CLI.Ctl.Commands.ReconcileVhostsCommand + + setup_all do + RabbitMQ.CLI.Core.Distribution.start() + :ok + end + + @vhost "vhost_to_reconcile" + @timeout 10000 + + setup do + {:ok, + opts: %{ + node: get_rabbit_hostname(), + vhost: @vhost, + timeout: @timeout + }} + end + + test "validate: specifying arguments is reported as an error", context do + assert @command.validate(["a"], context[:opts]) == + {:validation_failure, :too_many_args} + + assert @command.validate(["a", "b"], context[:opts]) == + {:validation_failure, :too_many_args} + + assert @command.validate(["a", "b", "c"], context[:opts]) == + {:validation_failure, :too_many_args} + end + + test "run: request to a non-existent node returns a badrpc", _context do + opts = %{node: :jake@thedog, vhost: @vhost, timeout: @timeout} + + assert match?( + {:badrpc, _}, + @command.run([], opts) + ) + end + + test "banner", context do + assert @command.banner([], context[:opts]) =~ + ~r/Will try to initiate virtual host reconciliation/ + end + + test "run: initiates an async operation and returns ok", context do + setup_vhosts() + vhost = context[:opts][:vhost] + node_name = context[:opts][:node] + force_vhost_failure(node_name, vhost) + assert :ok == @command.run([], context[:opts]) + :timer.sleep(1000) + assert match?({:ok, _}, :rpc.call(node_name, :rabbit_vhost_sup_sup, :get_vhost_sup, [vhost])) + end + + # + # Implementation + # + + defp setup_vhosts do + add_vhost(@vhost) + # give the vhost a chance to fully start and initialise + :timer.sleep(1000) + + on_exit(fn -> + delete_vhost(@vhost) + end) + end + + defp force_vhost_failure(node_name, vhost) do + case :rpc.call(node_name, :rabbit_vhost_sup_sup, :get_vhost_sup, [vhost]) do + {:ok, sup} -> + case :lists.keyfind(:msg_store_persistent, 1, :supervisor.which_children(sup)) do + {_, pid, _, _} -> + Process.exit(pid, :foo) + :timer.sleep(5000) + force_vhost_failure(node_name, vhost) + + false -> + Process.exit(sup, :foo) + :timer.sleep(5000) + force_vhost_failure(node_name, vhost) + end + + {:error, {:vhost_supervisor_not_running, _}} -> + :ok + end + end +end diff --git a/deps/rabbitmq_cli/test/ctl/sync_queue_command_test.exs b/deps/rabbitmq_cli/test/ctl/remove_classic_queue_mirroring_from_policies_command_test.exs similarity index 55% rename from deps/rabbitmq_cli/test/ctl/sync_queue_command_test.exs rename to deps/rabbitmq_cli/test/ctl/remove_classic_queue_mirroring_from_policies_command_test.exs index 780ff43f86e9..447a98c874a6 100644 --- a/deps/rabbitmq_cli/test/ctl/sync_queue_command_test.exs +++ b/deps/rabbitmq_cli/test/ctl/remove_classic_queue_mirroring_from_policies_command_test.exs @@ -2,13 +2,13 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2016-2020 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. -defmodule SyncQueueCommandTest do +defmodule RemoveClassicQueueMirroringFromPoliciesCommandTest do use ExUnit.Case, async: false import TestHelper - @command RabbitMQ.CLI.Ctl.Commands.SyncQueueCommand + @command RabbitMQ.CLI.Ctl.Commands.RemoveClassicQueueMirroringFromPoliciesCommand @vhost "/" @@ -32,13 +32,12 @@ defmodule SyncQueueCommandTest do }} end - test "validate: specifying no queue name is reported as an error", context do - assert @command.validate([], context[:opts]) == - {:validation_failure, :not_enough_args} + test "validate: specifying no arguments succeeds", context do + assert @command.validate([], context[:opts]) == :ok end - test "validate: specifying two queue names is reported as an error", context do - assert @command.validate(["q1", "q2"], context[:opts]) == + test "validate: specifying a position argument is reported as an error", context do + assert @command.validate(["q1"], context[:opts]) == {:validation_failure, :too_many_args} end @@ -47,19 +46,14 @@ defmodule SyncQueueCommandTest do {:validation_failure, :too_many_args} end - test "validate: specifying one queue name succeeds", context do - assert @command.validate(["q1"], context[:opts]) == :ok - end - test "run: request to a non-existent RabbitMQ node returns a nodedown" do opts = %{node: :jake@thedog, vhost: @vhost, timeout: 200} - assert match?({:badrpc, _}, @command.run(["q1"], opts)) + assert match?({:badrpc, _}, @command.run([], opts)) end test "banner", context do - s = @command.banner(["q1"], context[:opts]) + s = @command.banner([], context[:opts]) - assert s =~ ~r/Synchronising queue/ - assert s =~ ~r/q1/ + assert s =~ ~r/Will remove/ end end diff --git a/deps/rabbitmq_cli/test/ctl/rename_cluster_node_command_test.exs b/deps/rabbitmq_cli/test/ctl/rename_cluster_node_command_test.exs index 1dd9046ecd16..ea48db3f7075 100644 --- a/deps/rabbitmq_cli/test/ctl/rename_cluster_node_command_test.exs +++ b/deps/rabbitmq_cli/test/ctl/rename_cluster_node_command_test.exs @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2016-2020 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule RenameClusterNodeCommandTest do use ExUnit.Case, async: false @@ -38,65 +38,8 @@ defmodule RenameClusterNodeCommandTest do )} end - test "validate: specifying no nodes fails validation", context do - assert @command.validate([], context[:opts]) == - {:validation_failure, :not_enough_args} - end - - test "validate: specifying one node only fails validation", context do - assert @command.validate(["a"], context[:opts]) == - {:validation_failure, :not_enough_args} - end - - test "validate_execution_environment: specifying an uneven number of arguments fails validation", - context do - assert match?( - {:validation_failure, {:bad_argument, _}}, - @command.validate_execution_environment(["a", "b", "c"], context[:opts]) - ) - end - - test "validate_execution_environment: request to a running node fails", _context do - node = get_rabbit_hostname() - - assert match?( - {:validation_failure, :node_running}, - @command.validate_execution_environment([to_string(node), "other_node@localhost"], %{ - node: node - }) - ) - end - - test "validate_execution_environment: not providing node data dir fails validation", - context do - opts_without_data_dir = Map.delete(context[:opts], :data_dir) - Application.put_env(:rabbit, :data_dir, "/tmp") - on_exit(fn -> Application.delete_env(:rabbit, :data_dir) end) - - assert :ok == - @command.validate( - ["some_node@localhost", "other_node@localhost"], - opts_without_data_dir - ) - - Application.delete_env(:rabbit, :data_dir) - System.put_env("RABBITMQ_MNESIA_DIR", "/tmp") - on_exit(fn -> System.delete_env("RABBITMQ_MNESIA_DIR") end) - - assert :ok == - @command.validate( - ["some_node@localhost", "other_node@localhost"], - opts_without_data_dir - ) - - System.delete_env("RABBITMQ_MNESIA_DIR") - - assert :ok == - @command.validate(["some_node@localhost", "other_node@localhost"], context[:opts]) - end - test "banner", context do assert @command.banner(["a", "b"], context[:opts]) =~ - ~r/Renaming cluster nodes: \n a -> b/ + ~r/DEPRECATED. This command is a no-op./ end end diff --git a/deps/rabbitmq_cli/test/ctl/report_command_test.exs b/deps/rabbitmq_cli/test/ctl/report_command_test.exs index 1a59bc27c10e..908ef377dad1 100644 --- a/deps/rabbitmq_cli/test/ctl/report_command_test.exs +++ b/deps/rabbitmq_cli/test/ctl/report_command_test.exs @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule ReportTest do use ExUnit.Case, async: false diff --git a/deps/rabbitmq_cli/test/ctl/reset_command_test.exs b/deps/rabbitmq_cli/test/ctl/reset_command_test.exs index 168a3d19ed3b..e8935ae7f414 100644 --- a/deps/rabbitmq_cli/test/ctl/reset_command_test.exs +++ b/deps/rabbitmq_cli/test/ctl/reset_command_test.exs @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule ResetCommandTest do use ExUnit.Case, async: false diff --git a/deps/rabbitmq_cli/test/ctl/restart_vhost_command_test.exs b/deps/rabbitmq_cli/test/ctl/restart_vhost_command_test.exs index 725c5832006e..542a6c523555 100644 --- a/deps/rabbitmq_cli/test/ctl/restart_vhost_command_test.exs +++ b/deps/rabbitmq_cli/test/ctl/restart_vhost_command_test.exs @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2016-2020 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule RestartVhostCommandTest do use ExUnit.Case, async: false diff --git a/deps/rabbitmq_cli/test/ctl/resume_listeners_command_test.exs b/deps/rabbitmq_cli/test/ctl/resume_listeners_command_test.exs index 7c9db72cc9e8..a629172535be 100644 --- a/deps/rabbitmq_cli/test/ctl/resume_listeners_command_test.exs +++ b/deps/rabbitmq_cli/test/ctl/resume_listeners_command_test.exs @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. ## defmodule ResumeListenersCommandTest do diff --git a/deps/rabbitmq_cli/test/ctl/set_cluster_name_command_test.exs b/deps/rabbitmq_cli/test/ctl/set_cluster_name_command_test.exs index 53bc14ab6472..7fa70f055b0f 100644 --- a/deps/rabbitmq_cli/test/ctl/set_cluster_name_command_test.exs +++ b/deps/rabbitmq_cli/test/ctl/set_cluster_name_command_test.exs @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule SetClusterNameCommandTest do use ExUnit.Case, async: false diff --git a/deps/rabbitmq_cli/test/ctl/set_disk_free_limit_command_test.exs b/deps/rabbitmq_cli/test/ctl/set_disk_free_limit_command_test.exs index b5f31b9e5a4c..b1157b6eee2d 100644 --- a/deps/rabbitmq_cli/test/ctl/set_disk_free_limit_command_test.exs +++ b/deps/rabbitmq_cli/test/ctl/set_disk_free_limit_command_test.exs @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule SetDiskFreeLimitCommandTest do use ExUnit.Case, async: false diff --git a/deps/rabbitmq_cli/test/ctl/set_global_parameter_command_test.exs b/deps/rabbitmq_cli/test/ctl/set_global_parameter_command_test.exs index 683ee1397aac..9f31ecccea89 100644 --- a/deps/rabbitmq_cli/test/ctl/set_global_parameter_command_test.exs +++ b/deps/rabbitmq_cli/test/ctl/set_global_parameter_command_test.exs @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule SetGlobalParameterCommandTest do use ExUnit.Case, async: false @@ -79,9 +79,14 @@ defmodule SetGlobalParameterCommandTest do # Checks each element of the first parameter against the expected context values defp assert_parameter_fields(context) do - result_param = list_global_parameters() |> List.first() + result_params = list_global_parameters() - assert result_param[:value] == context[:value] - assert result_param[:name] == context[:key] + exp = + MapSet.new( + name: context[:key], + value: context[:value] + ) + + assert List.foldl(result_params, false, fn param, acc -> MapSet.new(param) == exp or acc end) end end diff --git a/deps/rabbitmq_cli/test/ctl/set_log_level_command_test.exs b/deps/rabbitmq_cli/test/ctl/set_log_level_command_test.exs index db8761fe5b37..7faabdd695ff 100644 --- a/deps/rabbitmq_cli/test/ctl/set_log_level_command_test.exs +++ b/deps/rabbitmq_cli/test/ctl/set_log_level_command_test.exs @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule SetLogLevelCommandTest do use ExUnit.Case, async: false diff --git a/deps/rabbitmq_cli/test/ctl/set_operator_policy_command_test.exs b/deps/rabbitmq_cli/test/ctl/set_operator_policy_command_test.exs index dc5ad5d9760c..fcc45eef3027 100644 --- a/deps/rabbitmq_cli/test/ctl/set_operator_policy_command_test.exs +++ b/deps/rabbitmq_cli/test/ctl/set_operator_policy_command_test.exs @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule SetOperatorPolicyCommandTest do use ExUnit.Case, async: false diff --git a/deps/rabbitmq_cli/test/ctl/set_parameter_command_test.exs b/deps/rabbitmq_cli/test/ctl/set_parameter_command_test.exs index f0386640e652..8ad7962c207a 100644 --- a/deps/rabbitmq_cli/test/ctl/set_parameter_command_test.exs +++ b/deps/rabbitmq_cli/test/ctl/set_parameter_command_test.exs @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule SetParameterCommandTest do use ExUnit.Case, async: false diff --git a/deps/rabbitmq_cli/test/ctl/set_permissions_command_test.exs b/deps/rabbitmq_cli/test/ctl/set_permissions_command_test.exs index 3c62c501c789..2c301e392140 100644 --- a/deps/rabbitmq_cli/test/ctl/set_permissions_command_test.exs +++ b/deps/rabbitmq_cli/test/ctl/set_permissions_command_test.exs @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule SetPermissionsCommandTest do use ExUnit.Case, async: false diff --git a/deps/rabbitmq_cli/test/ctl/set_permissions_globally_command_test.exs b/deps/rabbitmq_cli/test/ctl/set_permissions_globally_command_test.exs index 43b57631be12..5ce13340a098 100644 --- a/deps/rabbitmq_cli/test/ctl/set_permissions_globally_command_test.exs +++ b/deps/rabbitmq_cli/test/ctl/set_permissions_globally_command_test.exs @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule SetPermissionsGloballyCommandTest do use ExUnit.Case, async: false diff --git a/deps/rabbitmq_cli/test/ctl/set_policy_command_test.exs b/deps/rabbitmq_cli/test/ctl/set_policy_command_test.exs index 1281ed3ad992..deee77f86abe 100644 --- a/deps/rabbitmq_cli/test/ctl/set_policy_command_test.exs +++ b/deps/rabbitmq_cli/test/ctl/set_policy_command_test.exs @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule SetPolicyCommandTest do use ExUnit.Case, async: false @@ -156,25 +156,7 @@ defmodule SetPolicyCommandTest do test "ha policy validation", context do vhost_opts = Map.merge(context[:opts], %{vhost: context[:vhost]}) context = Map.put(context, :opts, vhost_opts) - pass_validation(context, "{\"ha-mode\":\"all\"}") - fail_validation(context, "{\"ha-mode\":\"made_up\"}") - - fail_validation(context, "{\"ha-mode\":\"nodes\"}") - fail_validation(context, "{\"ha-mode\":\"nodes\",\"ha-params\":2}") - fail_validation(context, "{\"ha-mode\":\"nodes\",\"ha-params\":[\"a\",2]}") - pass_validation(context, "{\"ha-mode\":\"nodes\",\"ha-params\":[\"a\",\"b\"]}") - fail_validation(context, "{\"ha-params\":[\"a\",\"b\"]}") - - fail_validation(context, "{\"ha-mode\":\"exactly\"}") - fail_validation(context, "{\"ha-mode\":\"exactly\",\"ha-params\":[\"a\",\"b\"]}") - pass_validation(context, "{\"ha-mode\":\"exactly\",\"ha-params\":2}") - fail_validation(context, "{\"ha-params\":2}") - - pass_validation(context, "{\"ha-mode\":\"all\",\"ha-sync-mode\":\"manual\"}") - pass_validation(context, "{\"ha-mode\":\"all\",\"ha-sync-mode\":\"automatic\"}") - fail_validation(context, "{\"ha-mode\":\"all\",\"ha-sync-mode\":\"made_up\"}") - fail_validation(context, "{\"ha-sync-mode\":\"manual\"}") - fail_validation(context, "{\"ha-sync-mode\":\"automatic\"}") + fail_validation(context, "{\"ha-mode\":\"all\"}") end @tag pattern: "ha_", key: "ha_policy_test", vhost: @vhost diff --git a/deps/rabbitmq_cli/test/ctl/set_topic_permissions_command_test.exs b/deps/rabbitmq_cli/test/ctl/set_topic_permissions_command_test.exs index e23fa84c157f..684df5fb1944 100644 --- a/deps/rabbitmq_cli/test/ctl/set_topic_permissions_command_test.exs +++ b/deps/rabbitmq_cli/test/ctl/set_topic_permissions_command_test.exs @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule SetTopicPermissionsCommandTest do use ExUnit.Case, async: false diff --git a/deps/rabbitmq_cli/test/ctl/set_user_limits_command_test.exs b/deps/rabbitmq_cli/test/ctl/set_user_limits_command_test.exs index bb9da3588dc4..5859a115fab2 100644 --- a/deps/rabbitmq_cli/test/ctl/set_user_limits_command_test.exs +++ b/deps/rabbitmq_cli/test/ctl/set_user_limits_command_test.exs @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2020 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule SetUserLimitsCommandTest do use ExUnit.Case, async: false diff --git a/deps/rabbitmq_cli/test/ctl/set_user_tags_command_test.exs b/deps/rabbitmq_cli/test/ctl/set_user_tags_command_test.exs index b2f3b33825d3..01b092622667 100644 --- a/deps/rabbitmq_cli/test/ctl/set_user_tags_command_test.exs +++ b/deps/rabbitmq_cli/test/ctl/set_user_tags_command_test.exs @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule SetUserTagsCommandTest do use ExUnit.Case, async: false diff --git a/deps/rabbitmq_cli/test/ctl/set_vhost_limits_command_test.exs b/deps/rabbitmq_cli/test/ctl/set_vhost_limits_command_test.exs index 6a1bec75aac7..a3b1eaf0d00b 100644 --- a/deps/rabbitmq_cli/test/ctl/set_vhost_limits_command_test.exs +++ b/deps/rabbitmq_cli/test/ctl/set_vhost_limits_command_test.exs @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule SetVhostLimitsCommandTest do use ExUnit.Case, async: false diff --git a/deps/rabbitmq_cli/test/ctl/set_vhost_tags_command_test.exs b/deps/rabbitmq_cli/test/ctl/set_vhost_tags_command_test.exs index 3a662bd21cc7..d590312c01c1 100644 --- a/deps/rabbitmq_cli/test/ctl/set_vhost_tags_command_test.exs +++ b/deps/rabbitmq_cli/test/ctl/set_vhost_tags_command_test.exs @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule SetVhostTagsCommandTest do use ExUnit.Case, async: false diff --git a/deps/rabbitmq_cli/test/ctl/set_vm_memory_high_watermark_command_test.exs b/deps/rabbitmq_cli/test/ctl/set_vm_memory_high_watermark_command_test.exs index 16073387fc37..bf2a12d5a72f 100644 --- a/deps/rabbitmq_cli/test/ctl/set_vm_memory_high_watermark_command_test.exs +++ b/deps/rabbitmq_cli/test/ctl/set_vm_memory_high_watermark_command_test.exs @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule SetVmMemoryHighWatermarkCommandTest do use ExUnit.Case, async: false diff --git a/deps/rabbitmq_cli/test/ctl/shutdown_command_test.exs b/deps/rabbitmq_cli/test/ctl/shutdown_command_test.exs index 264469f2ae1c..6f3f9c3c867c 100644 --- a/deps/rabbitmq_cli/test/ctl/shutdown_command_test.exs +++ b/deps/rabbitmq_cli/test/ctl/shutdown_command_test.exs @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule ShutdownCommandTest do use ExUnit.Case, async: false diff --git a/deps/rabbitmq_cli/test/ctl/start_app_command_test.exs b/deps/rabbitmq_cli/test/ctl/start_app_command_test.exs index f64c5dbe788f..78f22080a19a 100644 --- a/deps/rabbitmq_cli/test/ctl/start_app_command_test.exs +++ b/deps/rabbitmq_cli/test/ctl/start_app_command_test.exs @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule StartAppCommandTest do use ExUnit.Case, async: false diff --git a/deps/rabbitmq_cli/test/ctl/status_command_test.exs b/deps/rabbitmq_cli/test/ctl/status_command_test.exs index 64cec4948d98..61709aae3255 100644 --- a/deps/rabbitmq_cli/test/ctl/status_command_test.exs +++ b/deps/rabbitmq_cli/test/ctl/status_command_test.exs @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule StatusCommandTest do use ExUnit.Case, async: false diff --git a/deps/rabbitmq_cli/test/ctl/stop_app_command_test.exs b/deps/rabbitmq_cli/test/ctl/stop_app_command_test.exs index 2c7cc04c393a..e8750123a89e 100644 --- a/deps/rabbitmq_cli/test/ctl/stop_app_command_test.exs +++ b/deps/rabbitmq_cli/test/ctl/stop_app_command_test.exs @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule StopAppCommandTest do use ExUnit.Case, async: false diff --git a/deps/rabbitmq_cli/test/ctl/stop_command_test.exs b/deps/rabbitmq_cli/test/ctl/stop_command_test.exs index 1a3f9c65a969..d4dec11b8ab7 100644 --- a/deps/rabbitmq_cli/test/ctl/stop_command_test.exs +++ b/deps/rabbitmq_cli/test/ctl/stop_command_test.exs @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule StopCommandTest do use ExUnit.Case, async: false diff --git a/deps/rabbitmq_cli/test/ctl/suspend_listeners_command_test.exs b/deps/rabbitmq_cli/test/ctl/suspend_listeners_command_test.exs index 1779f3c28a32..0b07ea322779 100644 --- a/deps/rabbitmq_cli/test/ctl/suspend_listeners_command_test.exs +++ b/deps/rabbitmq_cli/test/ctl/suspend_listeners_command_test.exs @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. ## defmodule SuspendListenersCommandTest do diff --git a/deps/rabbitmq_cli/test/ctl/trace_off_command_test.exs b/deps/rabbitmq_cli/test/ctl/trace_off_command_test.exs index d62862ef7d00..285c951b8c9d 100644 --- a/deps/rabbitmq_cli/test/ctl/trace_off_command_test.exs +++ b/deps/rabbitmq_cli/test/ctl/trace_off_command_test.exs @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule TraceOffCommandTest do use ExUnit.Case, async: false diff --git a/deps/rabbitmq_cli/test/ctl/trace_on_command_test.exs b/deps/rabbitmq_cli/test/ctl/trace_on_command_test.exs index a17f5a4f249f..302e86b9458a 100644 --- a/deps/rabbitmq_cli/test/ctl/trace_on_command_test.exs +++ b/deps/rabbitmq_cli/test/ctl/trace_on_command_test.exs @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule TraceOnCommandTest do use ExUnit.Case, async: false diff --git a/deps/rabbitmq_cli/test/ctl/update_cluster_nodes_command_test.exs b/deps/rabbitmq_cli/test/ctl/update_cluster_nodes_command_test.exs index f35d734f67ca..cbc63f1ca3ec 100644 --- a/deps/rabbitmq_cli/test/ctl/update_cluster_nodes_command_test.exs +++ b/deps/rabbitmq_cli/test/ctl/update_cluster_nodes_command_test.exs @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2016-2020 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule UpdateClusterNodesCommandTest do use ExUnit.Case, async: false @@ -29,61 +29,15 @@ defmodule UpdateClusterNodesCommandTest do }} end - test "validate: providing too few arguments fails validation", context do - assert @command.validate([], context[:opts]) == - {:validation_failure, :not_enough_args} - end - - test "validate: providing too many arguments fails validation", context do - assert @command.validate(["a", "b", "c"], context[:opts]) == - {:validation_failure, :too_many_args} - end - - test "run: specifying self as seed node fails validation", context do - stop_rabbitmq_app() - + test "run: no op", context do assert match?( - {:error, :cannot_cluster_node_with_itself}, + :ok, @command.run([context[:opts][:node]], context[:opts]) ) - - start_rabbitmq_app() - end - - test "run: request to an unreachable node returns a badrpc", context do - opts = %{ - node: :jake@thedog, - timeout: 200 - } - - assert match?( - {:badrpc, :nodedown}, - @command.run([context[:opts][:node]], opts) - ) - end - - test "run: specifying an unreachable node as seed returns a badrpc", context do - stop_rabbitmq_app() - - assert match?( - {:badrpc_multi, _, [_]}, - @command.run([:jake@thedog], context[:opts]) - ) - - start_rabbitmq_app() end test "banner", context do assert @command.banner(["a"], context[:opts]) =~ - ~r/Will seed #{get_rabbit_hostname()} from a on next start/ - end - - test "output mnesia is running error", context do - exit_code = RabbitMQ.CLI.Core.ExitCodes.exit_software() - - assert match?( - {:error, ^exit_code, "Mnesia is still running on node " <> _}, - @command.output({:error, :mnesia_unexpectedly_running}, context[:opts]) - ) + ~r/DEPRECATED. This command is a no-op./ end end diff --git a/deps/rabbitmq_cli/test/ctl/update_vhost_metadata_command_test.exs b/deps/rabbitmq_cli/test/ctl/update_vhost_metadata_command_test.exs index f9d7f1e6eee0..dd44f709bb04 100644 --- a/deps/rabbitmq_cli/test/ctl/update_vhost_metadata_command_test.exs +++ b/deps/rabbitmq_cli/test/ctl/update_vhost_metadata_command_test.exs @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule UpdateVhostMetadataCommandTest do use ExUnit.Case, async: false diff --git a/deps/rabbitmq_cli/test/ctl/version_command_test.exs b/deps/rabbitmq_cli/test/ctl/version_command_test.exs index 2d9e052999b1..8a84288b9a16 100644 --- a/deps/rabbitmq_cli/test/ctl/version_command_test.exs +++ b/deps/rabbitmq_cli/test/ctl/version_command_test.exs @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule VersionCommandTest do use ExUnit.Case diff --git a/deps/rabbitmq_cli/test/ctl/wait_command_test.exs b/deps/rabbitmq_cli/test/ctl/wait_command_test.exs index c7e1d21c8f09..cce47a393726 100644 --- a/deps/rabbitmq_cli/test/ctl/wait_command_test.exs +++ b/deps/rabbitmq_cli/test/ctl/wait_command_test.exs @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule WaitCommandTest do use ExUnit.Case, async: false diff --git a/deps/rabbitmq_cli/test/diagnostics/alarms_command_test.exs b/deps/rabbitmq_cli/test/diagnostics/alarms_command_test.exs index 95ee0bbb1664..acf7704c2d24 100644 --- a/deps/rabbitmq_cli/test/diagnostics/alarms_command_test.exs +++ b/deps/rabbitmq_cli/test/diagnostics/alarms_command_test.exs @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule AlarmsCommandTest do use ExUnit.Case, async: false diff --git a/deps/rabbitmq_cli/test/diagnostics/check_alarms_command_test.exs b/deps/rabbitmq_cli/test/diagnostics/check_alarms_command_test.exs index 912badb821ff..a9fbbf3db4a3 100644 --- a/deps/rabbitmq_cli/test/diagnostics/check_alarms_command_test.exs +++ b/deps/rabbitmq_cli/test/diagnostics/check_alarms_command_test.exs @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule CheckAlarmsCommandTest do use ExUnit.Case, async: false diff --git a/deps/rabbitmq_cli/test/diagnostics/check_if_any_deprecated_features_are_used_command_test.exs b/deps/rabbitmq_cli/test/diagnostics/check_if_any_deprecated_features_are_used_command_test.exs new file mode 100644 index 000000000000..4f05e67052f8 --- /dev/null +++ b/deps/rabbitmq_cli/test/diagnostics/check_if_any_deprecated_features_are_used_command_test.exs @@ -0,0 +1,69 @@ +## This Source Code Form is subject to the terms of the Mozilla Public +## License, v. 2.0. If a copy of the MPL was not distributed with this +## file, You can obtain one at https://mozilla.org/MPL/2.0/. +## +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. + +defmodule CheckIfAnyDeprecatedFeaturesAreUsedCommandTest do + use ExUnit.Case, async: false + import TestHelper + + @command RabbitMQ.CLI.Diagnostics.Commands.CheckIfAnyDeprecatedFeaturesAreUsedCommand + @policy_name "cmq-policy-8373" + @policy_value "{\"ha-mode\":\"all\"}" + + setup_all do + RabbitMQ.CLI.Core.Distribution.start() + + start_rabbitmq_app() + + on_exit([], fn -> + start_rabbitmq_app() + clear_policy("/", @policy_name) + end) + + :ok + end + + setup context do + {:ok, + opts: %{ + node: get_rabbit_hostname(), + timeout: context[:test_timeout] || 30000 + }} + end + + test "merge_defaults: nothing to do" do + assert @command.merge_defaults([], %{}) == {[], %{}} + end + + test "validate: treats positional arguments as a failure" do + assert @command.validate(["extra-arg"], %{}) == {:validation_failure, :too_many_args} + end + + test "validate: treats empty positional arguments and default switches as a success" do + assert @command.validate([], %{}) == :ok + end + + @tag test_timeout: 3000 + test "run: targeting an unreachable node throws a badrpc", context do + assert match?( + {:badrpc, _}, + @command.run([], Map.merge(context[:opts], %{node: :jake@thedog})) + ) + end + + test "run: when there are no policies that enable CMQ mirroring, reports success", context do + clear_policy("/", @policy_name) + assert @command.run([], context[:opts]) + end + + test "output: when the result is true, returns successfully", context do + assert match?({:ok, _}, @command.output(true, context[:opts])) + end + + # this is a check command + test "output: when the result is false, returns an error", context do + assert match?({:error, _}, @command.output(false, context[:opts])) + end +end diff --git a/deps/rabbitmq_cli/test/diagnostics/check_if_cluster_has_classic_queue_mirroring_policy_command_test.exs b/deps/rabbitmq_cli/test/diagnostics/check_if_cluster_has_classic_queue_mirroring_policy_command_test.exs new file mode 100644 index 000000000000..bcc926e5558b --- /dev/null +++ b/deps/rabbitmq_cli/test/diagnostics/check_if_cluster_has_classic_queue_mirroring_policy_command_test.exs @@ -0,0 +1,69 @@ +## This Source Code Form is subject to the terms of the Mozilla Public +## License, v. 2.0. If a copy of the MPL was not distributed with this +## file, You can obtain one at https://mozilla.org/MPL/2.0/. +## +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. + +defmodule CheckIfClusterHasClassicQueueMirroringPolicyCommandTest do + use ExUnit.Case, async: false + import TestHelper + + @command RabbitMQ.CLI.Diagnostics.Commands.CheckIfClusterHasClassicQueueMirroringPolicyCommand + @policy_name "cmq-policy-8373" + @policy_value "{\"ha-mode\":\"all\"}" + + setup_all do + RabbitMQ.CLI.Core.Distribution.start() + + start_rabbitmq_app() + + on_exit([], fn -> + start_rabbitmq_app() + clear_policy("/", @policy_name) + end) + + :ok + end + + setup context do + {:ok, + opts: %{ + node: get_rabbit_hostname(), + timeout: context[:test_timeout] || 30000 + }} + end + + test "merge_defaults: nothing to do" do + assert @command.merge_defaults([], %{}) == {[], %{}} + end + + test "validate: treats positional arguments as a failure" do + assert @command.validate(["extra-arg"], %{}) == {:validation_failure, :too_many_args} + end + + test "validate: treats empty positional arguments and default switches as a success" do + assert @command.validate([], %{}) == :ok + end + + @tag test_timeout: 3000 + test "run: targeting an unreachable node throws a badrpc", context do + assert match?( + {:badrpc, _}, + @command.run([], Map.merge(context[:opts], %{node: :jake@thedog})) + ) + end + + test "run: when there are no policies that enable CMQ mirroring, reports success", context do + clear_policy("/", @policy_name) + assert @command.run([], context[:opts]) + end + + test "output: when the result is true, returns successfully", context do + assert match?({:ok, _}, @command.output(true, context[:opts])) + end + + # this is a check command + test "output: when the result is false, returns an error", context do + assert match?({:error, _}, @command.output(false, context[:opts])) + end +end diff --git a/deps/rabbitmq_cli/test/diagnostics/check_local_alarms_command_test.exs b/deps/rabbitmq_cli/test/diagnostics/check_local_alarms_command_test.exs index 8d9baed68b18..0cdfa3df534f 100644 --- a/deps/rabbitmq_cli/test/diagnostics/check_local_alarms_command_test.exs +++ b/deps/rabbitmq_cli/test/diagnostics/check_local_alarms_command_test.exs @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule CheckLocalAlarmsCommandTest do use ExUnit.Case, async: false diff --git a/deps/rabbitmq_cli/test/diagnostics/check_port_connectivity_command_test.exs b/deps/rabbitmq_cli/test/diagnostics/check_port_connectivity_command_test.exs index 4d2f82f0f1fb..5a83d299e81c 100644 --- a/deps/rabbitmq_cli/test/diagnostics/check_port_connectivity_command_test.exs +++ b/deps/rabbitmq_cli/test/diagnostics/check_port_connectivity_command_test.exs @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule CheckPortConnectivityCommandTest do use ExUnit.Case, async: false diff --git a/deps/rabbitmq_cli/test/diagnostics/check_port_listener_command_test.exs b/deps/rabbitmq_cli/test/diagnostics/check_port_listener_command_test.exs index 2468161f8353..6f6b88ec87e7 100644 --- a/deps/rabbitmq_cli/test/diagnostics/check_port_listener_command_test.exs +++ b/deps/rabbitmq_cli/test/diagnostics/check_port_listener_command_test.exs @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule CheckPortListenerCommandTest do use ExUnit.Case, async: false diff --git a/deps/rabbitmq_cli/test/diagnostics/check_protocol_listener_command_test.exs b/deps/rabbitmq_cli/test/diagnostics/check_protocol_listener_command_test.exs index 5d993f6c55b3..5933e9fb7764 100644 --- a/deps/rabbitmq_cli/test/diagnostics/check_protocol_listener_command_test.exs +++ b/deps/rabbitmq_cli/test/diagnostics/check_protocol_listener_command_test.exs @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule CheckProtocolListenerCommandTest do use ExUnit.Case, async: false diff --git a/deps/rabbitmq_cli/test/diagnostics/check_running_command_test.exs b/deps/rabbitmq_cli/test/diagnostics/check_running_command_test.exs index fc1e2c53108a..fd1d4a52943e 100644 --- a/deps/rabbitmq_cli/test/diagnostics/check_running_command_test.exs +++ b/deps/rabbitmq_cli/test/diagnostics/check_running_command_test.exs @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule CheckRunningCommandTest do use ExUnit.Case, async: false diff --git a/deps/rabbitmq_cli/test/diagnostics/check_virtual_hosts_command_test.exs b/deps/rabbitmq_cli/test/diagnostics/check_virtual_hosts_command_test.exs index c7f208f5bea8..095f08562779 100644 --- a/deps/rabbitmq_cli/test/diagnostics/check_virtual_hosts_command_test.exs +++ b/deps/rabbitmq_cli/test/diagnostics/check_virtual_hosts_command_test.exs @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule CheckVirtualHostsCommandTest do use ExUnit.Case, async: false diff --git a/deps/rabbitmq_cli/test/diagnostics/cipher_suites_command_test.exs b/deps/rabbitmq_cli/test/diagnostics/cipher_suites_command_test.exs index 5c0df2fc14a5..54c6f3951d25 100644 --- a/deps/rabbitmq_cli/test/diagnostics/cipher_suites_command_test.exs +++ b/deps/rabbitmq_cli/test/diagnostics/cipher_suites_command_test.exs @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule CipherSuitesCommandTest do use ExUnit.Case diff --git a/deps/rabbitmq_cli/test/diagnostics/command_line_arguments_command_test.exs b/deps/rabbitmq_cli/test/diagnostics/command_line_arguments_command_test.exs index 40b24c65cc49..0f6071a89b3a 100644 --- a/deps/rabbitmq_cli/test/diagnostics/command_line_arguments_command_test.exs +++ b/deps/rabbitmq_cli/test/diagnostics/command_line_arguments_command_test.exs @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule CommandLineArgumentsCommandTest do use ExUnit.Case, async: false diff --git a/deps/rabbitmq_cli/test/diagnostics/consume_event_stream_command_test.exs b/deps/rabbitmq_cli/test/diagnostics/consume_event_stream_command_test.exs index bdf690902e46..e5ad2fee4c6d 100644 --- a/deps/rabbitmq_cli/test/diagnostics/consume_event_stream_command_test.exs +++ b/deps/rabbitmq_cli/test/diagnostics/consume_event_stream_command_test.exs @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule ConsumeEventStreamCommandTest do use ExUnit.Case, async: false diff --git a/deps/rabbitmq_cli/test/diagnostics/disable_auth_attempt_source_tracking_command_test.exs b/deps/rabbitmq_cli/test/diagnostics/disable_auth_attempt_source_tracking_command_test.exs index b4668efce8f5..e3c9b185bc56 100644 --- a/deps/rabbitmq_cli/test/diagnostics/disable_auth_attempt_source_tracking_command_test.exs +++ b/deps/rabbitmq_cli/test/diagnostics/disable_auth_attempt_source_tracking_command_test.exs @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule DisbleAuthAttemptSourceTrackingCommandTest do use ExUnit.Case, async: false diff --git a/deps/rabbitmq_cli/test/diagnostics/discover_peers_command_test.exs b/deps/rabbitmq_cli/test/diagnostics/discover_peers_command_test.exs index 06439c815300..791d07dc484f 100644 --- a/deps/rabbitmq_cli/test/diagnostics/discover_peers_command_test.exs +++ b/deps/rabbitmq_cli/test/diagnostics/discover_peers_command_test.exs @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule DiscoverPeersCommandTest do use ExUnit.Case, async: false @@ -37,6 +37,7 @@ defmodule DiscoverPeersCommandTest do @tag test_timeout: 15000 test "run: returns a list of nodes when the backend isn't configured", context do - assert match?({:ok, {[], _}}, @command.run([], context[:opts])) + this_node = node() + assert match?({:ok, {[this_node], _}}, @command.run([], context[:opts])) end end diff --git a/deps/rabbitmq_cli/test/diagnostics/enable_auth_attempt_source_tracking_command_test.exs b/deps/rabbitmq_cli/test/diagnostics/enable_auth_attempt_source_tracking_command_test.exs index 97c41428ec5b..b3a56ad63217 100644 --- a/deps/rabbitmq_cli/test/diagnostics/enable_auth_attempt_source_tracking_command_test.exs +++ b/deps/rabbitmq_cli/test/diagnostics/enable_auth_attempt_source_tracking_command_test.exs @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule EnableAuthAttemptSourceTrackingCommandTest do use ExUnit.Case, async: false diff --git a/deps/rabbitmq_cli/test/diagnostics/erlang_cookie_hash_command_test.exs b/deps/rabbitmq_cli/test/diagnostics/erlang_cookie_hash_command_test.exs index 110f52b59c42..5db8243281ed 100644 --- a/deps/rabbitmq_cli/test/diagnostics/erlang_cookie_hash_command_test.exs +++ b/deps/rabbitmq_cli/test/diagnostics/erlang_cookie_hash_command_test.exs @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule ErlangCookieHashCommandTest do use ExUnit.Case diff --git a/deps/rabbitmq_cli/test/diagnostics/erlang_cookie_sources_command_test.exs b/deps/rabbitmq_cli/test/diagnostics/erlang_cookie_sources_command_test.exs index 6bed8b574c17..fa6b19a43708 100644 --- a/deps/rabbitmq_cli/test/diagnostics/erlang_cookie_sources_command_test.exs +++ b/deps/rabbitmq_cli/test/diagnostics/erlang_cookie_sources_command_test.exs @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule ErlangCookieSourcesCommandTest do use ExUnit.Case, async: true diff --git a/deps/rabbitmq_cli/test/diagnostics/erlang_version_command_test.exs b/deps/rabbitmq_cli/test/diagnostics/erlang_version_command_test.exs index 68f9c9bab35f..5d1e73433f0b 100644 --- a/deps/rabbitmq_cli/test/diagnostics/erlang_version_command_test.exs +++ b/deps/rabbitmq_cli/test/diagnostics/erlang_version_command_test.exs @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule ErlangVersionCommandTest do use ExUnit.Case diff --git a/deps/rabbitmq_cli/test/diagnostics/is_booting_command_test.exs b/deps/rabbitmq_cli/test/diagnostics/is_booting_command_test.exs index 1e86a78aa8b4..ce3e64d6fc70 100644 --- a/deps/rabbitmq_cli/test/diagnostics/is_booting_command_test.exs +++ b/deps/rabbitmq_cli/test/diagnostics/is_booting_command_test.exs @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule IsBootingCommandTest do use ExUnit.Case, async: false diff --git a/deps/rabbitmq_cli/test/diagnostics/is_running_command_test.exs b/deps/rabbitmq_cli/test/diagnostics/is_running_command_test.exs index dfc10434d023..48dffa7cc16a 100644 --- a/deps/rabbitmq_cli/test/diagnostics/is_running_command_test.exs +++ b/deps/rabbitmq_cli/test/diagnostics/is_running_command_test.exs @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule IsRunningCommandTest do use ExUnit.Case, async: false diff --git a/deps/rabbitmq_cli/test/diagnostics/list_network_interfaces_command_test.exs b/deps/rabbitmq_cli/test/diagnostics/list_network_interfaces_command_test.exs index 87054aad3f77..ebb1395025fa 100644 --- a/deps/rabbitmq_cli/test/diagnostics/list_network_interfaces_command_test.exs +++ b/deps/rabbitmq_cli/test/diagnostics/list_network_interfaces_command_test.exs @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule ListNetworkInterfacesCommandTest do use ExUnit.Case, async: false diff --git a/deps/rabbitmq_cli/test/diagnostics/list_node_auth_attempt_stats_command_test.exs b/deps/rabbitmq_cli/test/diagnostics/list_node_auth_attempt_stats_command_test.exs index bb811f50cd5b..6edf086fbe03 100644 --- a/deps/rabbitmq_cli/test/diagnostics/list_node_auth_attempt_stats_command_test.exs +++ b/deps/rabbitmq_cli/test/diagnostics/list_node_auth_attempt_stats_command_test.exs @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule ListNodeAuthAttemptStatsCommandTest do use ExUnit.Case, async: false diff --git a/deps/rabbitmq_cli/test/diagnostics/list_policies_that_match_command_test.exs b/deps/rabbitmq_cli/test/diagnostics/list_policies_that_match_command_test.exs new file mode 100644 index 000000000000..762396ca6f05 --- /dev/null +++ b/deps/rabbitmq_cli/test/diagnostics/list_policies_that_match_command_test.exs @@ -0,0 +1,162 @@ +## This Source Code Form is subject to the terms of the Mozilla Public +## License, v. 2.0. If a copy of the MPL was not distributed with this +## file, You can obtain one at https://mozilla.org/MPL/2.0/. +## +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. + +defmodule ListPoliciesThatMatchCommandTest do + use ExUnit.Case, async: false + import TestHelper + + @command RabbitMQ.CLI.Diagnostics.Commands.ListPoliciesThatMatchCommand + + @vhost "test1" + + setup_all do + RabbitMQ.CLI.Core.Distribution.start() + + add_vhost(@vhost) + + enable_federation_plugin() + + on_exit([], fn -> + delete_vhost(@vhost) + end) + + :ok + end + + setup context do + on_exit(context, fn -> + clear_policy(context[:vhost], context[:key]) + end) + + { + :ok, + opts: %{ + node: get_rabbit_hostname(), + vhost: context[:vhost], + object_type: context[:object_type], + timeout: context[:timeout] || :infinity + } + } + end + + @tag vhost: @vhost + test "merge_defaults: a well-formed command with no vhost runs against the default" do + assert match?({_, %{vhost: "/"}}, @command.merge_defaults([], %{})) + end + + @tag vhost: @vhost + test "merge_defaults: does not change defined vhost" do + assert match?( + {[], %{vhost: "test_vhost"}}, + @command.merge_defaults([], %{vhost: "test_vhost"}) + ) + end + + @tag vhost: @vhost + test "merge_defaults: default object_type is \"queue\"" do + assert match?({_, %{object_type: "queue"}}, @command.merge_defaults([], %{})) + + assert match?( + {_, %{object_type: "exchange"}}, + @command.merge_defaults([], %{object_type: "exchange"}) + ) + end + + @tag vhost: @vhost + test "validate: providing too few arguments fails validation" do + assert @command.validate([], %{}) == {:validation_failure, :not_enough_args} + end + + @tag vhost: @vhost + test "validate: providing too many arguments fails validation" do + assert @command.validate(["too", "many"], %{}) == + {:validation_failure, :too_many_args} + end + + @tag vhost: @vhost, object_type: "queue" + test "run: it returns only the matching policies", context do + policies = [ + %{ + vhost: @vhost, + name: "matching-p1", + pattern: "^foo.*", + definition: "{\"max-length\":1}", + apply_to: "all", + priority: 1 + }, + %{ + vhost: @vhost, + name: "non-matching-p10", + pattern: "^bar.*", + definition: "{\"max-length\":10}", + apply_to: "all", + priority: 10 + }, + %{ + vhost: @vhost, + name: "matching-p0", + pattern: "^foo.*", + definition: "{\"max-length\":0}", + apply_to: "all", + priority: 0 + }, + %{ + vhost: @vhost, + name: "matching-p2", + pattern: "^foo.*", + definition: "{\"max-length\":1}", + apply_to: "all", + priority: 2 + }, + %{ + vhost: @vhost, + name: "non-matching-p20", + pattern: "^foo.*", + definition: "{\"max-length\":20}", + apply_to: "quorum_queues", + priority: 20 + } + ] + + policies + |> Enum.map(fn p -> + set_policy( + context[:vhost], + p[:name], + p[:pattern], + p[:definition], + p[:priority], + p[:apply_to] + ) + + on_exit(fn -> + clear_policy(context[:vhost], p[:name]) + end) + end) + + declare_queue("foo", context[:vhost]) + result = for policy <- @command.run(["foo"], context[:opts]), do: Map.new(policy) + + expected = ["matching-p2", "matching-p1", "matching-p0"] + assert Enum.map(result, fn map -> map.name end) == expected + end + + @tag vhost: @vhost, object_type: "queue", vhost: @vhost + test "banner_queue", context do + opts = Map.merge(context[:opts], %{vhost: context[:vhost], object_type: "queue"}) + + assert @command.banner(["my_queue"], opts) == + "Listing policies that match #{context[:object_type]} 'my_queue' in vhost '#{context[:vhost]}' ..." + end + + @tag vhost: @vhost, object_type: "exchange", vhost: @vhost + test "banner_exchange", context do + opts = Map.merge(context[:opts], %{vhost: context[:vhost], object_type: "exchange"}) + + assert @command.banner(["my_exchange"], opts) == + "Listing policies that match #{context[:object_type]} 'my_exchange' in vhost '#{context[:vhost]}' ..." + end +end diff --git a/deps/rabbitmq_cli/test/diagnostics/listeners_command_test.exs b/deps/rabbitmq_cli/test/diagnostics/listeners_command_test.exs index 757398b4c530..1901335db017 100644 --- a/deps/rabbitmq_cli/test/diagnostics/listeners_command_test.exs +++ b/deps/rabbitmq_cli/test/diagnostics/listeners_command_test.exs @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule ListenersCommandTest do use ExUnit.Case, async: false diff --git a/deps/rabbitmq_cli/test/diagnostics/log_location_command_test.exs b/deps/rabbitmq_cli/test/diagnostics/log_location_command_test.exs index 3d02816d0709..f87d8346ca01 100644 --- a/deps/rabbitmq_cli/test/diagnostics/log_location_command_test.exs +++ b/deps/rabbitmq_cli/test/diagnostics/log_location_command_test.exs @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule LogLocationCommandTest do use ExUnit.Case, async: false diff --git a/deps/rabbitmq_cli/test/diagnostics/log_tail_command_test.exs b/deps/rabbitmq_cli/test/diagnostics/log_tail_command_test.exs index 95961e533269..e5e9c27fb62c 100644 --- a/deps/rabbitmq_cli/test/diagnostics/log_tail_command_test.exs +++ b/deps/rabbitmq_cli/test/diagnostics/log_tail_command_test.exs @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule LogTailCommandTest do use ExUnit.Case, async: false diff --git a/deps/rabbitmq_cli/test/diagnostics/log_tail_stream_command_test.exs b/deps/rabbitmq_cli/test/diagnostics/log_tail_stream_command_test.exs index 70eca1851313..baeca8325d6f 100644 --- a/deps/rabbitmq_cli/test/diagnostics/log_tail_stream_command_test.exs +++ b/deps/rabbitmq_cli/test/diagnostics/log_tail_stream_command_test.exs @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule LogTailStreamCommandTest do use ExUnit.Case, async: false diff --git a/deps/rabbitmq_cli/test/diagnostics/maybe_stuck_command_test.exs b/deps/rabbitmq_cli/test/diagnostics/maybe_stuck_command_test.exs index b9bd1d36269e..db4b0075e8a4 100644 --- a/deps/rabbitmq_cli/test/diagnostics/maybe_stuck_command_test.exs +++ b/deps/rabbitmq_cli/test/diagnostics/maybe_stuck_command_test.exs @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule MaybeStuckCommandTest do use ExUnit.Case diff --git a/deps/rabbitmq_cli/test/diagnostics/memory_breakdown_command_test.exs b/deps/rabbitmq_cli/test/diagnostics/memory_breakdown_command_test.exs index 0027635e5c66..471277229d0f 100644 --- a/deps/rabbitmq_cli/test/diagnostics/memory_breakdown_command_test.exs +++ b/deps/rabbitmq_cli/test/diagnostics/memory_breakdown_command_test.exs @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2016-2020 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule MemoryBreakdownCommandTest do use ExUnit.Case, async: false diff --git a/deps/rabbitmq_cli/test/diagnostics/observer_command_test.exs b/deps/rabbitmq_cli/test/diagnostics/observer_command_test.exs index ac5f78a0703c..da49e06f2592 100644 --- a/deps/rabbitmq_cli/test/diagnostics/observer_command_test.exs +++ b/deps/rabbitmq_cli/test/diagnostics/observer_command_test.exs @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule ObserverCommandTest do use ExUnit.Case, async: false diff --git a/deps/rabbitmq_cli/test/diagnostics/os_env_command_test.exs b/deps/rabbitmq_cli/test/diagnostics/os_env_command_test.exs index de51e37ce7e6..886f862f411d 100644 --- a/deps/rabbitmq_cli/test/diagnostics/os_env_command_test.exs +++ b/deps/rabbitmq_cli/test/diagnostics/os_env_command_test.exs @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule OsEnvCommandTest do use ExUnit.Case, async: false diff --git a/deps/rabbitmq_cli/test/diagnostics/remote_shell_command_test.exs b/deps/rabbitmq_cli/test/diagnostics/remote_shell_command_test.exs index 4ebe63f1f262..d26452f324ca 100644 --- a/deps/rabbitmq_cli/test/diagnostics/remote_shell_command_test.exs +++ b/deps/rabbitmq_cli/test/diagnostics/remote_shell_command_test.exs @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule RemoteShellCommandTest do use ExUnit.Case, async: false diff --git a/deps/rabbitmq_cli/test/diagnostics/resolve_hostname_command_test.exs b/deps/rabbitmq_cli/test/diagnostics/resolve_hostname_command_test.exs index b8d3ed37a1f8..60ae8320b854 100644 --- a/deps/rabbitmq_cli/test/diagnostics/resolve_hostname_command_test.exs +++ b/deps/rabbitmq_cli/test/diagnostics/resolve_hostname_command_test.exs @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule ResolveHostnameCommandTest do use ExUnit.Case, async: false diff --git a/deps/rabbitmq_cli/test/diagnostics/resolver_info_command_test.exs b/deps/rabbitmq_cli/test/diagnostics/resolver_info_command_test.exs index cc581b5a0f42..0b0458177939 100644 --- a/deps/rabbitmq_cli/test/diagnostics/resolver_info_command_test.exs +++ b/deps/rabbitmq_cli/test/diagnostics/resolver_info_command_test.exs @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule ResolverInfoCommandTest do use ExUnit.Case, async: false diff --git a/deps/rabbitmq_cli/test/diagnostics/runtime_thread_stats_command_test.exs b/deps/rabbitmq_cli/test/diagnostics/runtime_thread_stats_command_test.exs index 924ea9cfbdb2..059b08c1ecd1 100644 --- a/deps/rabbitmq_cli/test/diagnostics/runtime_thread_stats_command_test.exs +++ b/deps/rabbitmq_cli/test/diagnostics/runtime_thread_stats_command_test.exs @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule RuntimeThreadStatsCommandTest do use ExUnit.Case diff --git a/deps/rabbitmq_cli/test/diagnostics/server_version_command_test.exs b/deps/rabbitmq_cli/test/diagnostics/server_version_command_test.exs index 2d8552a08166..6647034bd4be 100644 --- a/deps/rabbitmq_cli/test/diagnostics/server_version_command_test.exs +++ b/deps/rabbitmq_cli/test/diagnostics/server_version_command_test.exs @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule ServerVersionCommandTest do use ExUnit.Case diff --git a/deps/rabbitmq_cli/test/diagnostics/tls_versions_command_test.exs b/deps/rabbitmq_cli/test/diagnostics/tls_versions_command_test.exs index 4b23d2270964..d9bedd61edd4 100644 --- a/deps/rabbitmq_cli/test/diagnostics/tls_versions_command_test.exs +++ b/deps/rabbitmq_cli/test/diagnostics/tls_versions_command_test.exs @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule TlsVersionsCommandTest do use ExUnit.Case diff --git a/deps/rabbitmq_cli/test/fixtures/plugins/plugins_with_version_requirements/mock_rabbitmq_plugin_for_3_8-0.1.0/ebin/mock_rabbitmq_plugin_for_3_8.app b/deps/rabbitmq_cli/test/fixtures/plugins/plugins_with_version_requirements/mock_rabbitmq_plugin_for_3_8-0.1.0/ebin/mock_rabbitmq_plugin_for_3_8.app index 3531fc40da97..94f286b72257 100644 --- a/deps/rabbitmq_cli/test/fixtures/plugins/plugins_with_version_requirements/mock_rabbitmq_plugin_for_3_8-0.1.0/ebin/mock_rabbitmq_plugin_for_3_8.app +++ b/deps/rabbitmq_cli/test/fixtures/plugins/plugins_with_version_requirements/mock_rabbitmq_plugin_for_3_8-0.1.0/ebin/mock_rabbitmq_plugin_for_3_8.app @@ -6,5 +6,5 @@ {applications, [kernel,stdlib,rabbit]}, {mod, {mock_rabbitmq_plugins_01_app, []}}, {env, []}, - {broker_version_requirements, ["3.9.0", "3.10.0", "3.11.0", "3.12.0", "3.13.0"]} + {broker_version_requirements, ["3.9.0", "3.10.0", "3.11.0", "3.12.0", "3.13.0", "4.0.0"]} ]}. diff --git a/deps/rabbitmq_cli/test/json_formatting.exs b/deps/rabbitmq_cli/test/json_formatting_test.exs similarity index 91% rename from deps/rabbitmq_cli/test/json_formatting.exs rename to deps/rabbitmq_cli/test/json_formatting_test.exs index 70cb6a5a04b9..8fdc616d8ba4 100644 --- a/deps/rabbitmq_cli/test/json_formatting.exs +++ b/deps/rabbitmq_cli/test/json_formatting_test.exs @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule JSONFormattingTest do use ExUnit.Case, async: false diff --git a/deps/rabbitmq_cli/test/plugins/directories_command_test.exs b/deps/rabbitmq_cli/test/plugins/directories_command_test.exs index 47638a6cb965..e786124a0fc5 100644 --- a/deps/rabbitmq_cli/test/plugins/directories_command_test.exs +++ b/deps/rabbitmq_cli/test/plugins/directories_command_test.exs @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule DirectoriesCommandTest do use ExUnit.Case, async: false diff --git a/deps/rabbitmq_cli/test/plugins/disable_plugins_command_test.exs b/deps/rabbitmq_cli/test/plugins/disable_plugins_command_test.exs index b3c174412e1a..f8b5ef5a644b 100644 --- a/deps/rabbitmq_cli/test/plugins/disable_plugins_command_test.exs +++ b/deps/rabbitmq_cli/test/plugins/disable_plugins_command_test.exs @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule DisablePluginsCommandTest do use ExUnit.Case, async: false diff --git a/deps/rabbitmq_cli/test/plugins/enable_plugins_command_test.exs b/deps/rabbitmq_cli/test/plugins/enable_plugins_command_test.exs index da5a4cc2ef14..424a9ade1aad 100644 --- a/deps/rabbitmq_cli/test/plugins/enable_plugins_command_test.exs +++ b/deps/rabbitmq_cli/test/plugins/enable_plugins_command_test.exs @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule EnablePluginsCommandTest do use ExUnit.Case, async: false diff --git a/deps/rabbitmq_cli/test/plugins/is_enabled_command_test.exs b/deps/rabbitmq_cli/test/plugins/is_enabled_command_test.exs index 83426dbf281d..3fdc83350735 100644 --- a/deps/rabbitmq_cli/test/plugins/is_enabled_command_test.exs +++ b/deps/rabbitmq_cli/test/plugins/is_enabled_command_test.exs @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule PluginIsEnabledCommandTest do use ExUnit.Case, async: false diff --git a/deps/rabbitmq_cli/test/plugins/list_plugins_command_test.exs b/deps/rabbitmq_cli/test/plugins/list_plugins_command_test.exs index 2f804de090dc..4bd6fb764c80 100644 --- a/deps/rabbitmq_cli/test/plugins/list_plugins_command_test.exs +++ b/deps/rabbitmq_cli/test/plugins/list_plugins_command_test.exs @@ -2,11 +2,12 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule ListPluginsCommandTest do use ExUnit.Case, async: false import TestHelper + import ExUnit.CaptureIO @command RabbitMQ.CLI.Plugins.Commands.ListCommand @@ -38,7 +39,7 @@ defmodule ListPluginsCommandTest do {:ok, [enabled_plugins]} = :file.consult(plugins_file) IO.puts( - "plugins list tests will assume tnat #{Enum.join(enabled_plugins, ",")} is the list of enabled plugins to revert to" + "plugins list tests will assume that #{Enum.join(enabled_plugins, ",")} is the list of enabled plugins to revert to" ) opts = %{ @@ -444,4 +445,126 @@ defmodule ListPluginsCommandTest do assert_plugin_states(actual_plugins2, expected_plugins2) end + + test "run: lists all plugins with missing plugins warning control using --silent", context do + opts = + context[:opts] + |> Map.put_new(:silent, true) + |> Map.put_new(:hard_write, true) + + context = Map.replace(context, :opts, opts) + + missing_plugin = :rabbitmq_non_existent + + reset_enabled_plugins_to_preconfigured_defaults(context) + + set_enabled_plugins( + [:rabbitmq_federation, missing_plugin], + :online, + context[:opts][:node], + context[:opts] + ) + + on_exit(fn -> + opts = + context[:opts] + |> Map.delete(:silent) + |> Map.delete(:hard_write) + + context = Map.replace(context, :opts, opts) + + set_enabled_plugins( + [:rabbitmq_stomp, :rabbitmq_federation], + :online, + context[:opts][:node], + context[:opts] + ) + end) + + expected_plugins = [ + %{name: :rabbitmq_federation, enabled: :enabled, running: true}, + %{name: :rabbitmq_stomp, enabled: :not_enabled, running: false} + ] + + opts = context[:opts] + + assert capture_io(fn -> + %{ + plugins: actual_plugins + } = @command.run([".*"], opts) + + assert_plugin_states(actual_plugins, expected_plugins) + end) =~ ~s// + + opts = Map.replace(opts, :silent, false) + + assert capture_io(fn -> + %{ + plugins: actual_plugins + } = @command.run([".*"], opts) + + assert_plugin_states(actual_plugins, expected_plugins) + end) =~ ~s/WARNING - plugins currently enabled but missing: #{missing_plugin}\n/ + end + + test "run: lists all plugins with missing plugins warning control using --quiet", context do + opts = + context[:opts] + |> Map.put_new(:quiet, true) + |> Map.put_new(:hard_write, true) + + context = Map.replace(context, :opts, opts) + + missing_plugin = :rabbitmq_non_existent + + reset_enabled_plugins_to_preconfigured_defaults(context) + + set_enabled_plugins( + [:rabbitmq_federation, missing_plugin], + :online, + context[:opts][:node], + context[:opts] + ) + + on_exit(fn -> + opts = + context[:opts] + |> Map.delete(:quiet) + |> Map.delete(:hard_write) + + context = Map.replace(context, :opts, opts) + + set_enabled_plugins( + [:rabbitmq_stomp, :rabbitmq_federation], + :online, + context[:opts][:node], + context[:opts] + ) + end) + + expected_plugins = [ + %{name: :rabbitmq_federation, enabled: :enabled, running: true}, + %{name: :rabbitmq_stomp, enabled: :not_enabled, running: false} + ] + + opts = context[:opts] + + assert capture_io(fn -> + %{ + plugins: actual_plugins + } = @command.run([".*"], opts) + + assert_plugin_states(actual_plugins, expected_plugins) + end) =~ ~s// + + opts = Map.replace(opts, :quiet, false) + + assert capture_io(fn -> + %{ + plugins: actual_plugins + } = @command.run([".*"], opts) + + assert_plugin_states(actual_plugins, expected_plugins) + end) =~ ~s/WARNING - plugins currently enabled but missing: #{missing_plugin}\n/ + end end diff --git a/deps/rabbitmq_cli/test/plugins/plugins_formatter_test.exs b/deps/rabbitmq_cli/test/plugins/plugins_formatter_test.exs index 937506646051..eda17b519905 100644 --- a/deps/rabbitmq_cli/test/plugins/plugins_formatter_test.exs +++ b/deps/rabbitmq_cli/test/plugins/plugins_formatter_test.exs @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule PluginsFormatterTest do use ExUnit.Case, async: false @@ -76,7 +76,7 @@ defmodule PluginsFormatterTest do enabled: :implicit, running: true, version: ~c"3.7.0", - running_version: nil + running_version: "" }, %{ name: :mock_rabbitmq_plugins_01, diff --git a/deps/rabbitmq_cli/test/plugins/set_plugins_command_test.exs b/deps/rabbitmq_cli/test/plugins/set_plugins_command_test.exs index c6513b561e56..e25af5c1f584 100644 --- a/deps/rabbitmq_cli/test/plugins/set_plugins_command_test.exs +++ b/deps/rabbitmq_cli/test/plugins/set_plugins_command_test.exs @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule SetPluginsCommandTest do use ExUnit.Case, async: false diff --git a/deps/rabbitmq_cli/test/queues/add_member_command_test.exs b/deps/rabbitmq_cli/test/queues/add_member_command_test.exs index 90a9d0b8a7c2..e96dcde2f42d 100644 --- a/deps/rabbitmq_cli/test/queues/add_member_command_test.exs +++ b/deps/rabbitmq_cli/test/queues/add_member_command_test.exs @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule RabbitMQ.CLI.Queues.Commands.AddMemberCommandTest do use ExUnit.Case, async: false @@ -20,6 +20,8 @@ defmodule RabbitMQ.CLI.Queues.Commands.AddMemberCommandTest do {:ok, opts: %{ node: get_rabbit_hostname(), + membership: "voter", + vhost: "/", timeout: context[:test_timeout] || 30000 }} end @@ -42,17 +44,36 @@ defmodule RabbitMQ.CLI.Queues.Commands.AddMemberCommandTest do ) == {:validation_failure, :too_many_args} end + test "validate: when membership promotable is provided, returns a success" do + assert @command.validate(["quorum-queue-a", "rabbit@new-node"], %{membership: "promotable"}) == + :ok + end + + test "validate: when membership voter is provided, returns a success" do + assert @command.validate(["quorum-queue-a", "rabbit@new-node"], %{membership: "voter"}) == :ok + end + + test "validate: when membership non_voter is provided, returns a success" do + assert @command.validate(["quorum-queue-a", "rabbit@new-node"], %{membership: "non_voter"}) == + :ok + end + + test "validate: when wrong membership is provided, returns failure" do + assert @command.validate(["quorum-queue-a", "rabbit@new-node"], %{membership: "banana"}) == + {:validation_failure, "voter status 'banana' is not recognised."} + end + test "validate: treats two positional arguments and default switches as a success" do assert @command.validate(["quorum-queue-a", "rabbit@new-node"], %{}) == :ok end @tag test_timeout: 3000 - test "run: targeting an unreachable node throws a badrpc" do + test "run: targeting an unreachable node throws a badrpc", context do assert match?( {:badrpc, _}, @command.run( ["quorum-queue-a", "rabbit@new-node"], - %{node: :jake@thedog, vhost: "/", timeout: 200} + Map.merge(context[:opts], %{node: :jake@thedog}) ) ) end diff --git a/deps/rabbitmq_cli/test/queues/check_if_node_is_mirror_sync_critical_command_test.exs b/deps/rabbitmq_cli/test/queues/check_if_new_quorum_queue_replicas_have_finished_initial_sync_test.exs similarity index 76% rename from deps/rabbitmq_cli/test/queues/check_if_node_is_mirror_sync_critical_command_test.exs rename to deps/rabbitmq_cli/test/queues/check_if_new_quorum_queue_replicas_have_finished_initial_sync_test.exs index d75017a5d45c..7ca352c521d3 100644 --- a/deps/rabbitmq_cli/test/queues/check_if_node_is_mirror_sync_critical_command_test.exs +++ b/deps/rabbitmq_cli/test/queues/check_if_new_quorum_queue_replicas_have_finished_initial_sync_test.exs @@ -2,13 +2,13 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. -defmodule RabbitMQ.CLI.Queues.Commands.CheckIfNodeIsMirrorSyncCriticalCommandTest do +defmodule RabbitMQ.CLI.Queues.Commands.CheckIfNewQuorumQueueReplicasHaveFinishedInitialSyncCommandTest do use ExUnit.Case, async: false import TestHelper - @command RabbitMQ.CLI.Queues.Commands.CheckIfNodeIsMirrorSyncCriticalCommand + @command RabbitMQ.CLI.Queues.Commands.CheckIfNewQuorumQueueReplicasHaveFinishedInitialSyncCommand setup_all do RabbitMQ.CLI.Core.Distribution.start() diff --git a/deps/rabbitmq_cli/test/queues/check_if_node_is_quorum_critical_command_test.exs b/deps/rabbitmq_cli/test/queues/check_if_node_is_quorum_critical_command_test.exs index 519ef400a152..2abe4b466542 100644 --- a/deps/rabbitmq_cli/test/queues/check_if_node_is_quorum_critical_command_test.exs +++ b/deps/rabbitmq_cli/test/queues/check_if_node_is_quorum_critical_command_test.exs @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule RabbitMQ.CLI.Queues.Commands.CheckIfNodeIsQuorumCriticalCommandTest do use ExUnit.Case, async: false diff --git a/deps/rabbitmq_cli/test/queues/delete_member_command_test.exs b/deps/rabbitmq_cli/test/queues/delete_member_command_test.exs index 1c1d7cb6eed7..efc1968eebb1 100644 --- a/deps/rabbitmq_cli/test/queues/delete_member_command_test.exs +++ b/deps/rabbitmq_cli/test/queues/delete_member_command_test.exs @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule RabbitMQ.CLI.Queues.Commands.DeleteMemberCommandTest do use ExUnit.Case, async: false diff --git a/deps/rabbitmq_cli/test/queues/grow_command_test.exs b/deps/rabbitmq_cli/test/queues/grow_command_test.exs index fbf35d536b6b..807d41a90581 100644 --- a/deps/rabbitmq_cli/test/queues/grow_command_test.exs +++ b/deps/rabbitmq_cli/test/queues/grow_command_test.exs @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule RabbitMQ.CLI.Queues.Commands.GrowCommandTest do use ExUnit.Case, async: false @@ -23,13 +23,20 @@ defmodule RabbitMQ.CLI.Queues.Commands.GrowCommandTest do timeout: context[:test_timeout] || 30000, vhost_pattern: ".*", queue_pattern: ".*", + membership: "promotable", errors_only: false }} end test "merge_defaults: defaults to reporting complete results" do assert @command.merge_defaults([], %{}) == - {[], %{vhost_pattern: ".*", queue_pattern: ".*", errors_only: false}} + {[], + %{ + vhost_pattern: ".*", + queue_pattern: ".*", + errors_only: false, + membership: "promotable" + }} end test "validate: when no arguments are provided, returns a failure" do @@ -58,13 +65,30 @@ defmodule RabbitMQ.CLI.Queues.Commands.GrowCommandTest do {:validation_failure, :too_many_args} end + test "validate: when membership promotable is provided, returns a success" do + assert @command.validate(["quorum-queue-a", "all"], %{membership: "promotable"}) == :ok + end + + test "validate: when membership voter is provided, returns a success" do + assert @command.validate(["quorum-queue-a", "all"], %{membership: "voter"}) == :ok + end + + test "validate: when membership non_voter is provided, returns a success" do + assert @command.validate(["quorum-queue-a", "all"], %{membership: "non_voter"}) == :ok + end + + test "validate: when wrong membership is provided, returns failure" do + assert @command.validate(["quorum-queue-a", "all"], %{membership: "banana"}) == + {:validation_failure, "voter status 'banana' is not recognised."} + end + @tag test_timeout: 3000 test "run: targeting an unreachable node throws a badrpc", context do assert match?( {:badrpc, _}, @command.run( ["quorum-queue-a", "all"], - Map.merge(context[:opts], %{node: :jake@thedog, timeout: 200}) + Map.merge(context[:opts], %{node: :jake@thedog}) ) ) end diff --git a/deps/rabbitmq_cli/test/queues/list_operator_policies_with_classic_queue_mirroring_command_test.exs b/deps/rabbitmq_cli/test/queues/list_operator_policies_with_classic_queue_mirroring_command_test.exs new file mode 100644 index 000000000000..4886d2a327a5 --- /dev/null +++ b/deps/rabbitmq_cli/test/queues/list_operator_policies_with_classic_queue_mirroring_command_test.exs @@ -0,0 +1,53 @@ +## This Source Code Form is subject to the terms of the Mozilla Public +## License, v. 2.0. If a copy of the MPL was not distributed with this +## file, You can obtain one at https://mozilla.org/MPL/2.0/. +## +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. + +defmodule RabbitMQ.CLI.Queues.Commands.ListOperatorPoliciesWithClassicQueueMirroringCommandTest do + use ExUnit.Case, async: false + import TestHelper + + @command RabbitMQ.CLI.Queues.Commands.ListOperatorPoliciesWithClassicQueueMirroringCommand + + setup_all do + RabbitMQ.CLI.Core.Distribution.start() + + :ok + end + + setup context do + {:ok, + opts: %{ + node: get_rabbit_hostname(), + timeout: context[:test_timeout] || 30000 + }} + end + + test "validate: treats no arguments as a success" do + assert @command.validate([], %{}) == :ok + end + + test "validate: accepts no positional arguments" do + assert @command.validate(["arg"], %{}) == {:validation_failure, :too_many_args} + end + + test "validate: when two or more arguments are provided, returns a failure" do + assert @command.validate(["arg1", "arg2"], %{}) == + {:validation_failure, :too_many_args} + + assert @command.validate(["arg1", "arg2", "arg3"], %{}) == + {:validation_failure, :too_many_args} + end + + @tag test_timeout: 3000 + test "run: targeting an unreachable node throws a badrpc" do + assert match?( + {:badrpc, _}, + @command.run( + [], + %{node: :jake@thedog, vhost: "/", timeout: 200} + ) + ) + end +end diff --git a/deps/rabbitmq_cli/test/queues/list_policies_with_classic_queue_mirroring_command_test.exs b/deps/rabbitmq_cli/test/queues/list_policies_with_classic_queue_mirroring_command_test.exs new file mode 100644 index 000000000000..667a2cf0c338 --- /dev/null +++ b/deps/rabbitmq_cli/test/queues/list_policies_with_classic_queue_mirroring_command_test.exs @@ -0,0 +1,53 @@ +## This Source Code Form is subject to the terms of the Mozilla Public +## License, v. 2.0. If a copy of the MPL was not distributed with this +## file, You can obtain one at https://mozilla.org/MPL/2.0/. +## +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. + +defmodule RabbitMQ.CLI.Queues.Commands.ListPoliciesWithClassicQueueMirroringCommandTest do + use ExUnit.Case, async: false + import TestHelper + + @command RabbitMQ.CLI.Queues.Commands.ListPoliciesWithClassicQueueMirroringCommand + + setup_all do + RabbitMQ.CLI.Core.Distribution.start() + + :ok + end + + setup context do + {:ok, + opts: %{ + node: get_rabbit_hostname(), + timeout: context[:test_timeout] || 30000 + }} + end + + test "validate: treats no arguments as a success" do + assert @command.validate([], %{}) == :ok + end + + test "validate: accepts no positional arguments" do + assert @command.validate(["arg"], %{}) == {:validation_failure, :too_many_args} + end + + test "validate: when two or more arguments are provided, returns a failure" do + assert @command.validate(["arg1", "arg2"], %{}) == + {:validation_failure, :too_many_args} + + assert @command.validate(["arg1", "arg2", "arg3"], %{}) == + {:validation_failure, :too_many_args} + end + + @tag test_timeout: 3000 + test "run: targeting an unreachable node throws a badrpc" do + assert match?( + {:badrpc, _}, + @command.run( + [], + %{node: :jake@thedog, vhost: "/", timeout: 200} + ) + ) + end +end diff --git a/deps/rabbitmq_cli/test/queues/peek_command_test.exs b/deps/rabbitmq_cli/test/queues/peek_command_test.exs index acdf16e4075c..c82a7ed7325e 100644 --- a/deps/rabbitmq_cli/test/queues/peek_command_test.exs +++ b/deps/rabbitmq_cli/test/queues/peek_command_test.exs @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule RabbitMQ.CLI.Queues.Commands.PeekCommandTest do use ExUnit.Case, async: false diff --git a/deps/rabbitmq_cli/test/queues/quorum_status_command_test.exs b/deps/rabbitmq_cli/test/queues/quorum_status_command_test.exs index 78efea441653..53fbfe36b3ed 100644 --- a/deps/rabbitmq_cli/test/queues/quorum_status_command_test.exs +++ b/deps/rabbitmq_cli/test/queues/quorum_status_command_test.exs @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule RabbitMQ.CLI.Queues.Commands.QuorumStatusCommandTest do use ExUnit.Case, async: false diff --git a/deps/rabbitmq_cli/test/queues/reclaim_quorum_memory_command_test.exs b/deps/rabbitmq_cli/test/queues/reclaim_quorum_memory_command_test.exs index 32f22b055119..964e2d190cfe 100644 --- a/deps/rabbitmq_cli/test/queues/reclaim_quorum_memory_command_test.exs +++ b/deps/rabbitmq_cli/test/queues/reclaim_quorum_memory_command_test.exs @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule RabbitMQ.CLI.Queues.Commands.ReclaimQuorumMemoryCommandTest do use ExUnit.Case, async: false diff --git a/deps/rabbitmq_cli/test/queues/shrink_command_test.exs b/deps/rabbitmq_cli/test/queues/shrink_command_test.exs index 9c853fd0e56b..fac60634fe74 100644 --- a/deps/rabbitmq_cli/test/queues/shrink_command_test.exs +++ b/deps/rabbitmq_cli/test/queues/shrink_command_test.exs @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule RabbitMQ.CLI.Queues.Commands.ShrinkCommandTest do use ExUnit.Case, async: false diff --git a/deps/rabbitmq_cli/test/queues/stream_status_command_test.exs b/deps/rabbitmq_cli/test/queues/stream_status_command_test.exs index 7b8453c2a80a..2a372bfec383 100644 --- a/deps/rabbitmq_cli/test/queues/stream_status_command_test.exs +++ b/deps/rabbitmq_cli/test/queues/stream_status_command_test.exs @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule RabbitMQ.CLI.Streams.Commands.StreamStatusCommandTest do use ExUnit.Case, async: false diff --git a/deps/rabbitmq_cli/test/rabbitmqctl_test.exs b/deps/rabbitmq_cli/test/rabbitmqctl_test.exs index 411fc154da8b..52ed2e10b2a3 100644 --- a/deps/rabbitmq_cli/test/rabbitmqctl_test.exs +++ b/deps/rabbitmq_cli/test/rabbitmqctl_test.exs @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule RabbitMQCtlTest do use ExUnit.Case, async: false diff --git a/deps/rabbitmq_cli/test/streams/add_replica_command_test.exs b/deps/rabbitmq_cli/test/streams/add_replica_command_test.exs index cdba4a07d76a..d2213f5d18bf 100644 --- a/deps/rabbitmq_cli/test/streams/add_replica_command_test.exs +++ b/deps/rabbitmq_cli/test/streams/add_replica_command_test.exs @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. ## defmodule RabbitMQ.CLI.Streams.Commands.AddReplicaCommandTest do diff --git a/deps/rabbitmq_cli/test/streams/delete_replica_command_test.exs b/deps/rabbitmq_cli/test/streams/delete_replica_command_test.exs index 83bf7d636db5..396c6ad4b239 100644 --- a/deps/rabbitmq_cli/test/streams/delete_replica_command_test.exs +++ b/deps/rabbitmq_cli/test/streams/delete_replica_command_test.exs @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. ## defmodule RabbitMQ.CLI.Streams.Commands.DeleteReplicaCommandTest do diff --git a/deps/rabbitmq_cli/test/streams/set_stream_retention_policy_command_test.exs b/deps/rabbitmq_cli/test/streams/set_stream_retention_policy_command_test.exs index 6a9a47f08964..4d492a5984a6 100644 --- a/deps/rabbitmq_cli/test/streams/set_stream_retention_policy_command_test.exs +++ b/deps/rabbitmq_cli/test/streams/set_stream_retention_policy_command_test.exs @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. ## defmodule RabbitMQ.CLI.Streams.Commands.SetStreamRetentionPolicyCommandTest do diff --git a/deps/rabbitmq_cli/test/test_helper.exs b/deps/rabbitmq_cli/test/test_helper.exs index 7573bd81b9aa..13e5503f6bfd 100644 --- a/deps/rabbitmq_cli/test/test_helper.exs +++ b/deps/rabbitmq_cli/test/test_helper.exs @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. ten_minutes = 10 * 60 * 1000 @@ -12,7 +12,14 @@ ExUnit.configure( timeout: ten_minutes ) -ExUnit.start() +if System.get_env("BAZEL_TEST") == "1" do + ExUnit.configure(seed: 0) + :application.ensure_all_started(:mix) + :application.ensure_all_started(:rabbitmqctl) + ExUnit.start(trace: true) +else + ExUnit.start() +end # Elixir 1.15 compiler optimizations seem to require that we explicitly add to the code path true = Code.append_path(Path.join([System.get_env("DEPS_DIR"), "rabbit_common", "ebin"])) @@ -215,8 +222,8 @@ defmodule TestHelper do :rpc.call(get_rabbit_hostname(), :rabbit_policy, :list_formatted, [vhost]) end - def set_policy(vhost, name, pattern, value) do - {:ok, decoded} = :rabbit_json.try_decode(value) + def set_policy(vhost, name, pattern, definition) do + {:ok, decoded} = :rabbit_json.try_decode(definition) parsed = :maps.to_list(decoded) :ok = @@ -231,6 +238,22 @@ defmodule TestHelper do ]) end + def set_policy(vhost, name, pattern, definition, priority, apply_to) do + {:ok, decoded} = :rabbit_json.try_decode(definition) + parsed = :maps.to_list(decoded) + + :ok = + :rpc.call(get_rabbit_hostname(), :rabbit_policy, :set, [ + vhost, + name, + pattern, + parsed, + priority, + apply_to, + "acting-user" + ]) + end + def clear_policy(vhost, key) do :rpc.call(get_rabbit_hostname(), :rabbit_policy, :delete, [vhost, key, "acting-user"]) end @@ -526,7 +549,7 @@ defmodule TestHelper do end def await_no_client_connections_with_iterations(node, n) when n > 0 do - case :rpc.call(node, :rabbit_networking, :connections_local, []) do + case :rpc.call(node, :rabbit_networking, :local_connections, []) do [] -> :ok @@ -545,13 +568,13 @@ defmodule TestHelper do end def close_all_connections(node) do - # we intentionally use connections_local/0 here because connections/0, + # we intentionally use local_connections/0 here because connections/0, # the cluster-wide version, loads some bits around cluster membership # that are not normally ready with a single node. MK. # # when/if we decide to test # this project against a cluster of nodes this will need revisiting. MK. - for pid <- :rpc.call(node, :rabbit_networking, :connections_local, []) do + for pid <- :rpc.call(node, :rabbit_networking, :local_connections, []) do :rpc.call(node, :rabbit_networking, :close_connection, [pid, :force_closed]) end diff --git a/deps/rabbitmq_cli/test/upgrade/await_online_quorum_plus_one_command_test.exs b/deps/rabbitmq_cli/test/upgrade/await_online_quorum_plus_one_command_test.exs index 8879f73e71b9..62b163bda1aa 100644 --- a/deps/rabbitmq_cli/test/upgrade/await_online_quorum_plus_one_command_test.exs +++ b/deps/rabbitmq_cli/test/upgrade/await_online_quorum_plus_one_command_test.exs @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule AwaitOnlineQuorumPlusOneCommandTest do use ExUnit.Case, async: false diff --git a/deps/rabbitmq_cli/test/upgrade/await_online_synchronized_mirror_command_test.exs b/deps/rabbitmq_cli/test/upgrade/await_online_synchronized_mirror_command_test.exs deleted file mode 100644 index c5c084c00171..000000000000 --- a/deps/rabbitmq_cli/test/upgrade/await_online_synchronized_mirror_command_test.exs +++ /dev/null @@ -1,44 +0,0 @@ -## This Source Code Form is subject to the terms of the Mozilla Public -## License, v. 2.0. If a copy of the MPL was not distributed with this -## file, You can obtain one at https://mozilla.org/MPL/2.0/. -## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. - -defmodule AwaitOnlineSynchronizedMirrorsCommandTest do - use ExUnit.Case, async: false - import TestHelper - - @command RabbitMQ.CLI.Upgrade.Commands.AwaitOnlineSynchronizedMirrorCommand - - setup_all do - RabbitMQ.CLI.Core.Distribution.start() - - :ok - end - - setup context do - {:ok, - opts: %{ - node: get_rabbit_hostname(), - timeout: context[:test_timeout] || 5000 - }} - end - - test "merge_defaults: overrides a timeout" do - assert @command.merge_defaults([], %{}) == {[], %{timeout: 120_000}} - end - - test "validate: accepts no positional arguments" do - assert @command.validate(["extra-arg"], %{}) == {:validation_failure, :too_many_args} - end - - test "validate: succeeds with no positional arguments" do - assert @command.validate([], %{}) == :ok - end - - @tag test_timeout: 3000 - test "run: targeting an unreachable node throws a badrpc", context do - opts = %{node: :jake@thedog, timeout: 200} - assert match?({:badrpc, _}, @command.run([], Map.merge(context[:opts], opts))) - end -end diff --git a/deps/rabbitmq_cli/test/upgrade/drain_command_test.exs b/deps/rabbitmq_cli/test/upgrade/drain_command_test.exs index 59a7501ea5bd..49955228543a 100644 --- a/deps/rabbitmq_cli/test/upgrade/drain_command_test.exs +++ b/deps/rabbitmq_cli/test/upgrade/drain_command_test.exs @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule DrainCommandTest do use ExUnit.Case, async: false diff --git a/deps/rabbitmq_cli/test/upgrade/post_upgrade_command_test.exs b/deps/rabbitmq_cli/test/upgrade/post_upgrade_command_test.exs index 87b06763182c..594fca53adef 100644 --- a/deps/rabbitmq_cli/test/upgrade/post_upgrade_command_test.exs +++ b/deps/rabbitmq_cli/test/upgrade/post_upgrade_command_test.exs @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule PostUpgradeCommandTest do use ExUnit.Case, async: false diff --git a/deps/rabbitmq_cli/test/upgrade/revive_command_test.exs b/deps/rabbitmq_cli/test/upgrade/revive_command_test.exs index 6bc343a7a322..8d34a9512e24 100644 --- a/deps/rabbitmq_cli/test/upgrade/revive_command_test.exs +++ b/deps/rabbitmq_cli/test/upgrade/revive_command_test.exs @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. defmodule ReviveCommandTest do use ExUnit.Case, async: false diff --git a/deps/rabbitmq_codegen/.gitignore b/deps/rabbitmq_codegen/.gitignore index 7ced2f9af7c3..7a24000ff668 100644 --- a/deps/rabbitmq_codegen/.gitignore +++ b/deps/rabbitmq_codegen/.gitignore @@ -1,11 +1,5 @@ -*~ -.sw? -.*.sw? -*.beam *.pyc erl_crash.dump /build/ -/cover/ /dist/ -/ebin/ /tmp/ diff --git a/deps/rabbitmq_codegen/amqp-rabbitmq-0.8.json b/deps/rabbitmq_codegen/amqp-rabbitmq-0.8.json index c0b30a5d448e..2e654b066540 100644 --- a/deps/rabbitmq_codegen/amqp-rabbitmq-0.8.json +++ b/deps/rabbitmq_codegen/amqp-rabbitmq-0.8.json @@ -4,7 +4,7 @@ "minor-version": 0, "port": 5672, "copyright": [ - "Copyright (C) 2008-2020 VMware, Inc. or its affiliates.\n", + "Copyright (C) 2007-2024 Broadcom Inc. and its subsidiaries. All rights reserved.\n", "\n", "Permission is hereby granted, free of charge, to any person\n", "obtaining a copy of this file (the \"Software\"), to deal in the\n", diff --git a/deps/rabbitmq_codegen/amqp-rabbitmq-0.9.1.json b/deps/rabbitmq_codegen/amqp-rabbitmq-0.9.1.json index 43f3cf64cd55..a757c57703ef 100644 --- a/deps/rabbitmq_codegen/amqp-rabbitmq-0.9.1.json +++ b/deps/rabbitmq_codegen/amqp-rabbitmq-0.9.1.json @@ -5,7 +5,7 @@ "revision": 1, "port": 5672, "copyright": [ - "Copyright (C) 2008-2020 VMware, Inc. or its affiliates.\n", + "Copyright (C) 2007-2024 Broadcom Inc. and its subsidiaries. All rights reserved.\n", "\n", "Permission is hereby granted, free of charge, to any person\n", "obtaining a copy of this file (the \"Software\"), to deal in the\n", diff --git a/deps/rabbitmq_codegen/amqp_codegen.py b/deps/rabbitmq_codegen/amqp_codegen.py index 39fb82523946..d2d48d14f46d 100644 --- a/deps/rabbitmq_codegen/amqp_codegen.py +++ b/deps/rabbitmq_codegen/amqp_codegen.py @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. ## from __future__ import nested_scopes, print_function diff --git a/deps/rabbitmq_codegen/credit_extension.json b/deps/rabbitmq_codegen/credit_extension.json index 0fedebb0e25b..e5aad8ec411c 100644 --- a/deps/rabbitmq_codegen/credit_extension.json +++ b/deps/rabbitmq_codegen/credit_extension.json @@ -9,7 +9,7 @@ "hence you are strongly discouraged from building clients ", "which use it."], "copyright": [ - "Copyright (C) 2008-2020 VMware, Inc. or its affiliates.\n", + "Copyright (C) 2007-2024 Broadcom Inc. and its subsidiaries. All rights reserved.\n", "\n", "Permission is hereby granted, free of charge, to any person\n", "obtaining a copy of this file (the \"Software\"), to deal in the\n", diff --git a/deps/rabbitmq_codegen/demo_extension.json b/deps/rabbitmq_codegen/demo_extension.json index 8eff39474eeb..86b4e2968d9f 100644 --- a/deps/rabbitmq_codegen/demo_extension.json +++ b/deps/rabbitmq_codegen/demo_extension.json @@ -2,7 +2,7 @@ "extension": { "name": "demo", "version": "1.0", - "copyright": "Copyright (C) 2009-2020 VMware, Inc. or its affiliates." + "copyright": "Copyright (c) 2007-2024 Broadcom Inc. and its subsidiaries. All rights reserved." }, "domains": [ ["foo-domain", "shortstr"] diff --git a/deps/rabbitmq_codegen/license_info b/deps/rabbitmq_codegen/license_info index b64ea5bec933..cbb8425d3a0f 100644 --- a/deps/rabbitmq_codegen/license_info +++ b/deps/rabbitmq_codegen/license_info @@ -1,4 +1,4 @@ The files amqp-rabbitmq-0.8.json and amqp-rabbitmq-0.9.1.json are -"Copyright (C) 2008-2020 VMware, Inc. or its affiliates. and are covered by the MIT +"Copyright (c) 2007-2024 Broadcom Inc. and its subsidiaries. All rights reserved. and are covered by the MIT license. diff --git a/deps/rabbitmq_consistent_hash_exchange/.gitignore b/deps/rabbitmq_consistent_hash_exchange/.gitignore index f39007c3cb62..e5aeb813b32c 100644 --- a/deps/rabbitmq_consistent_hash_exchange/.gitignore +++ b/deps/rabbitmq_consistent_hash_exchange/.gitignore @@ -1,19 +1 @@ -.sw? -.*.sw? -*.beam -/.erlang.mk/ -/cover/ /debug/ -/deps/ -/doc/ -/ebin/ -/escript/ -/escript.lock -/logs/ -/plugins/ -/plugins.lock -/sbin/ -/sbin.lock -/xrefr - -rabbitmq_consistent_hash_exchange.d diff --git a/deps/rabbitmq_consistent_hash_exchange/BUILD.bazel b/deps/rabbitmq_consistent_hash_exchange/BUILD.bazel index 933db1f24920..182b31c0656f 100644 --- a/deps/rabbitmq_consistent_hash_exchange/BUILD.bazel +++ b/deps/rabbitmq_consistent_hash_exchange/BUILD.bazel @@ -24,8 +24,6 @@ APP_DESCRIPTION = "Consistent Hash Exchange Type" all_beam_files(name = "all_beam_files") -all_test_beam_files(name = "all_test_beam_files") - all_srcs(name = "all_srcs") test_suite_beam_files(name = "test_suite_beam_files") @@ -43,6 +41,8 @@ rabbitmq_app( deps = [ "//deps/rabbit:erlang_app", "//deps/rabbit_common:erlang_app", + "@khepri//:erlang_app", + "@khepri_mnesia_migration//:erlang_app", ], ) @@ -61,7 +61,7 @@ plt( ], for_target = ":erlang_app", ignore_warnings = True, - libs = ["//deps/rabbitmq_cli:elixir"], # keep + libs = ["@rules_elixir//elixir"], # keep plt = "//:base_plt", deps = ["//deps/rabbitmq_cli:erlang_app"], # keep ) @@ -82,7 +82,9 @@ broker_for_integration_suites() rabbitmq_integration_suite( name = "rabbit_exchange_type_consistent_hash_SUITE", - shard_count = 3, + runtime_deps = [ + "//deps/rabbitmq_amqp_client:erlang_app", + ], ) assert_suites() @@ -92,3 +94,5 @@ alias( actual = ":erlang_app", visibility = ["//visibility:public"], ) + +all_test_beam_files(name = "all_test_beam_files") diff --git a/deps/rabbitmq_consistent_hash_exchange/Makefile b/deps/rabbitmq_consistent_hash_exchange/Makefile index 4d02f402ccb5..9dbafcaaa69b 100644 --- a/deps/rabbitmq_consistent_hash_exchange/Makefile +++ b/deps/rabbitmq_consistent_hash_exchange/Makefile @@ -5,8 +5,10 @@ define PROJECT_APP_EXTRA_KEYS {broker_version_requirements, []} endef -DEPS = rabbit_common rabbit -TEST_DEPS = rabbitmq_ct_helpers rabbitmq_ct_client_helpers amqp_client +DEPS = rabbit_common rabbit khepri khepri_mnesia_migration +TEST_DEPS = rabbitmq_ct_helpers rabbitmq_ct_client_helpers amqp_client rabbitmq_amqp_client + +PLT_APPS += mnesia rabbitmqctl DEP_EARLY_PLUGINS = rabbit_common/mk/rabbitmq-early-plugin.mk DEP_PLUGINS = rabbit_common/mk/rabbitmq-plugin.mk diff --git a/deps/rabbitmq_consistent_hash_exchange/README.md b/deps/rabbitmq_consistent_hash_exchange/README.md index f1a377e5374c..7bc3208df736 100644 --- a/deps/rabbitmq_consistent_hash_exchange/README.md +++ b/deps/rabbitmq_consistent_hash_exchange/README.md @@ -84,7 +84,7 @@ ring partitions, and thus queues according to their binding weights. #### One Binding Per Queue This exchange type **assumes a single binding between a queue and an exchange**. -Starting with RabbitMQ `3.10.6` and `3.9.21` this will be enforced in the code: +This will be enforced in the code: when multiple bindings are created, only the first one will actually update the ring. This limitation makes most semantic sense: the purpose is to achieve @@ -376,7 +376,7 @@ exchange to route based on a named header instead. To do this, declare the exchange with a string argument called "hash-header" naming the header to be used. -When a `"hash-header"` is specified, the chosen header **must be provided**. +When a `"hash-header"` is specified, the chosen header should be provided. If published messages do not contain the header, they will all get routed to the same **arbitrarily chosen** queue. @@ -573,12 +573,13 @@ ok. ### Routing on a Message Property -In addition to a value in the header property, you can also route on the +Instead of a value in the header property, you can route on the ``message_id``, ``correlation_id``, or ``timestamp`` message properties. To do so, declare the exchange with a string argument called ``"hash-property"`` naming the property to be used. +The `"hash-header"` and `"hash-property"` are mutually exclusive. -When a `"hash-property"` is specified, the chosen property **must be provided**. +When a `"hash-property"` is specified, the chosen property should be provided. If published messages do not contain the property, they will all get routed to the same **arbitrarily chosen** queue. @@ -744,7 +745,7 @@ test() -> amqp_channel:call(Chan, #'exchange.declare'{ exchange = <<"e">>, type = <<"x-consistent-hash">>, - arguments = {<<"hash-property">>, longstr, <<"message_id">>} + arguments = {<<"hash-property">>, longstr, <<"message_id">>} }), [amqp_channel:call(Chan, #'queue.declare'{queue = Q}) || Q <- Queues], [amqp_channel:call(Chan, #'queue.bind'{queue = Q, @@ -829,13 +830,9 @@ queue weight can be provided at the time of binding. The state of the hash space is distributed across all cluster nodes. -## Continuous Integration - -[![Build Status](https://travis-ci.org/rabbitmq/rabbitmq-consistent-hash-exchange.svg?branch=master)](https://travis-ci.org/rabbitmq/rabbitmq-consistent-hash-exchange) - ## Copyright and License -(c) 2013-2020 VMware, Inc. or its affiliates. +(c) 2007-2024 Broadcom. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. Released under the Mozilla Public License 2.0, same as RabbitMQ. See [LICENSE](./LICENSE) for details. diff --git a/deps/rabbitmq_consistent_hash_exchange/app.bzl b/deps/rabbitmq_consistent_hash_exchange/app.bzl index ba5eb88887cb..e6a43a75079f 100644 --- a/deps/rabbitmq_consistent_hash_exchange/app.bzl +++ b/deps/rabbitmq_consistent_hash_exchange/app.bzl @@ -11,6 +11,7 @@ def all_beam_files(name = "all_beam_files"): srcs = [ "src/Elixir.RabbitMQ.CLI.Diagnostics.Commands.ConsistentHashExchangeRingStateCommand.erl", "src/rabbit_db_ch_exchange.erl", + "src/rabbit_db_ch_exchange_m2k_converter.erl", "src/rabbit_exchange_type_consistent_hash.erl", ], hdrs = [":public_and_private_hdrs"], @@ -21,6 +22,8 @@ def all_beam_files(name = "all_beam_files"): "//deps/rabbit:erlang_app", "//deps/rabbit_common:erlang_app", "//deps/rabbitmq_cli:erlang_app", + "@khepri//:erlang_app", + "@khepri_mnesia_migration//:erlang_app", ], ) @@ -36,6 +39,7 @@ def all_test_beam_files(name = "all_test_beam_files"): srcs = [ "src/Elixir.RabbitMQ.CLI.Diagnostics.Commands.ConsistentHashExchangeRingStateCommand.erl", "src/rabbit_db_ch_exchange.erl", + "src/rabbit_db_ch_exchange_m2k_converter.erl", "src/rabbit_exchange_type_consistent_hash.erl", ], hdrs = [":public_and_private_hdrs"], @@ -46,6 +50,8 @@ def all_test_beam_files(name = "all_test_beam_files"): "//deps/rabbit:erlang_app", "//deps/rabbit_common:erlang_app", "//deps/rabbitmq_cli:erlang_app", + "@khepri//:erlang_app", + "@khepri_mnesia_migration//:erlang_app", ], ) @@ -68,6 +74,7 @@ def all_srcs(name = "all_srcs"): srcs = [ "src/Elixir.RabbitMQ.CLI.Diagnostics.Commands.ConsistentHashExchangeRingStateCommand.erl", "src/rabbit_db_ch_exchange.erl", + "src/rabbit_db_ch_exchange_m2k_converter.erl", "src/rabbit_exchange_type_consistent_hash.erl", ], ) diff --git a/deps/rabbitmq_consistent_hash_exchange/src/Elixir.RabbitMQ.CLI.Diagnostics.Commands.ConsistentHashExchangeRingStateCommand.erl b/deps/rabbitmq_consistent_hash_exchange/src/Elixir.RabbitMQ.CLI.Diagnostics.Commands.ConsistentHashExchangeRingStateCommand.erl index 25bdf2b0db04..dbb38a2b56c5 100644 --- a/deps/rabbitmq_consistent_hash_exchange/src/Elixir.RabbitMQ.CLI.Diagnostics.Commands.ConsistentHashExchangeRingStateCommand.erl +++ b/deps/rabbitmq_consistent_hash_exchange/src/Elixir.RabbitMQ.CLI.Diagnostics.Commands.ConsistentHashExchangeRingStateCommand.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module('Elixir.RabbitMQ.CLI.Diagnostics.Commands.ConsistentHashExchangeRingStateCommand'). diff --git a/deps/rabbitmq_consistent_hash_exchange/src/rabbit_db_ch_exchange.erl b/deps/rabbitmq_consistent_hash_exchange/src/rabbit_db_ch_exchange.erl index 5af7a53a3ae7..5b2daa4819fc 100644 --- a/deps/rabbitmq_consistent_hash_exchange/src/rabbit_db_ch_exchange.erl +++ b/deps/rabbitmq_consistent_hash_exchange/src/rabbit_db_ch_exchange.erl @@ -2,10 +2,14 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_db_ch_exchange). +-include_lib("rabbit_common/include/rabbit.hrl"). +-include_lib("khepri/include/khepri.hrl"). +-include("rabbitmq_consistent_hash_exchange.hrl"). + -export([ setup_schema/0, create/1, @@ -15,13 +19,21 @@ delete_bindings/2 ]). --include_lib("rabbit_common/include/rabbit.hrl"). --include("rabbitmq_consistent_hash_exchange.hrl"). +-export([ + khepri_consistent_hash_path/0, + khepri_consistent_hash_path/1 + ]). -define(HASH_RING_STATE_TABLE, rabbit_exchange_type_consistent_hash_ring_state). +-rabbit_mnesia_tables_to_khepri_db( + [{?HASH_RING_STATE_TABLE, rabbit_db_ch_exchange_m2k_converter}]). + setup_schema() -> - setup_schema_in_mnesia(). + rabbit_khepri:handle_fallback( + #{mnesia => fun() -> setup_schema_in_mnesia() end, + khepri => ok + }). setup_schema_in_mnesia() -> _ = mnesia:create_table(?HASH_RING_STATE_TABLE, [{record_name, chx_hash_ring}, @@ -31,7 +43,10 @@ setup_schema_in_mnesia() -> rabbit_table:wait([?HASH_RING_STATE_TABLE]). create(X) -> - create_in_mnesia(X). + rabbit_khepri:handle_fallback( + #{mnesia => fun() -> create_in_mnesia(X) end, + khepri => fun() -> create_in_khepri(X) end + }). create_in_mnesia(X) -> rabbit_mnesia:execute_mnesia_transaction( @@ -49,8 +64,21 @@ create_in_mnesia_tx(X) -> bucket_map = #{}}, write) end. +create_in_khepri(X) -> + Path = khepri_consistent_hash_path(X), + case rabbit_khepri:create(Path, #chx_hash_ring{exchange = X, + next_bucket_number = 0, + bucket_map = #{}}) of + ok -> ok; + {error, {khepri, mismatching_node, _}} -> ok; + Error -> Error + end. + create_binding(Src, Dst, Weight, UpdateFun) -> - create_binding_in_mnesia(Src, Dst, Weight, UpdateFun). + rabbit_khepri:handle_fallback( + #{mnesia => fun() -> create_binding_in_mnesia(Src, Dst, Weight, UpdateFun) end, + khepri => fun() -> create_binding_in_khepri(Src, Dst, Weight, UpdateFun) end + }). create_binding_in_mnesia(Src, Dst, Weight, UpdateFun) -> rabbit_mnesia:execute_mnesia_transaction( @@ -73,8 +101,42 @@ create_binding_in_mnesia_tx(Src, Dst, Weight, UpdateFun) -> create_binding_in_mnesia_tx(Src, Dst, Weight, UpdateFun) end. +create_binding_in_khepri(Src, Dst, Weight, UpdateFun) -> + Path = khepri_consistent_hash_path(Src), + case rabbit_khepri:adv_get(Path) of + {ok, #{data := Chx0, payload_version := DVersion}} -> + case UpdateFun(Chx0, Dst, Weight) of + already_exists -> + already_exists; + Chx -> + Path1 = khepri_path:combine_with_conditions( + Path, [#if_payload_version{version = DVersion}]), + Ret2 = rabbit_khepri:put(Path1, Chx), + case Ret2 of + ok -> + created; + {error, {khepri, mismatching_node, _}} -> + create_binding_in_khepri(Src, Dst, Weight, UpdateFun); + {error, _} = Error -> + Error + end + end; + _ -> + case rabbit_khepri:create(Path, #chx_hash_ring{exchange = Src, + next_bucket_number = 0, + bucket_map = #{}}) of + ok -> ok; + {error, {khepri, mismatching_node, _}} -> + create_binding_in_khepri(Src, Dst, Weight, UpdateFun); + Error -> throw(Error) + end + end. + get(XName) -> - get_in_mnesia(XName). + rabbit_khepri:handle_fallback( + #{mnesia => fun() -> get_in_mnesia(XName) end, + khepri => fun() -> get_in_khepri(XName) end + }). get_in_mnesia(XName) -> case ets:lookup(?HASH_RING_STATE_TABLE, XName) of @@ -84,8 +146,20 @@ get_in_mnesia(XName) -> Chx end. +get_in_khepri(XName) -> + Path = khepri_consistent_hash_path(XName), + case rabbit_khepri:get(Path) of + {ok, Chx} -> + Chx; + _ -> + undefined + end. + delete(XName) -> - delete_in_mnesia(XName). + rabbit_khepri:handle_fallback( + #{mnesia => fun() -> delete_in_mnesia(XName) end, + khepri => fun() -> delete_in_khepri(XName) end + }). delete_in_mnesia(XName) -> rabbit_mnesia:execute_mnesia_transaction( @@ -94,8 +168,14 @@ delete_in_mnesia(XName) -> mnesia:delete({?HASH_RING_STATE_TABLE, XName}) end). +delete_in_khepri(XName) -> + rabbit_khepri:delete(khepri_consistent_hash_path(XName)). + delete_bindings(Bindings, DeleteFun) -> - delete_bindings_in_mnesia(Bindings, DeleteFun). + rabbit_khepri:handle_fallback( + #{mnesia => fun() -> delete_bindings_in_mnesia(Bindings, DeleteFun) end, + khepri => fun() -> delete_bindings_in_khepri(Bindings, DeleteFun) end + }). delete_bindings_in_mnesia(Bindings, DeleteFun) -> rabbit_mnesia:execute_mnesia_transaction( @@ -118,3 +198,31 @@ delete_binding_in_mnesia(#binding{source = S, destination = D, key = RK}, Delete [] -> {not_found, S} end. + +delete_bindings_in_khepri(Bindings, DeleteFun) -> + rabbit_khepri:transaction( + fun() -> + [delete_binding_in_khepri(Binding, DeleteFun) || Binding <- Bindings] + end). + +delete_binding_in_khepri(#binding{source = S, destination = D}, DeleteFun) -> + Path = khepri_consistent_hash_path(S), + case khepri_tx:get(Path) of + {ok, Chx0} -> + case DeleteFun(Chx0, D) of + not_found -> + ok; + Chx -> + ok = khepri_tx:put(Path, Chx) + end; + _ -> + {not_found, S} + end. + +khepri_consistent_hash_path(#exchange{name = Name}) -> + khepri_consistent_hash_path(Name); +khepri_consistent_hash_path(#resource{virtual_host = VHost, name = Name}) -> + [?MODULE, exchange_type_consistent_hash_ring_state, VHost, Name]. + +khepri_consistent_hash_path() -> + [?MODULE, exchange_type_consistent_hash_ring_state]. diff --git a/deps/rabbitmq_consistent_hash_exchange/src/rabbit_db_ch_exchange_m2k_converter.erl b/deps/rabbitmq_consistent_hash_exchange/src/rabbit_db_ch_exchange_m2k_converter.erl new file mode 100644 index 000000000000..39cc14fc929f --- /dev/null +++ b/deps/rabbitmq_consistent_hash_exchange/src/rabbit_db_ch_exchange_m2k_converter.erl @@ -0,0 +1,102 @@ +%% This Source Code Form is subject to the terms of the Mozilla Public +%% License, v. 2.0. If a copy of the MPL was not distributed with this +%% file, You can obtain one at https://mozilla.org/MPL/2.0/. +%% +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. +%% + +-module(rabbit_db_ch_exchange_m2k_converter). + +-behaviour(mnesia_to_khepri_converter). + +-include_lib("kernel/include/logger.hrl"). +-include_lib("khepri_mnesia_migration/src/kmm_logging.hrl"). +-include_lib("rabbit_common/include/rabbit.hrl"). + +-include("rabbitmq_consistent_hash_exchange.hrl"). + +-export([init_copy_to_khepri/3, + copy_to_khepri/3, + delete_from_khepri/3, + clear_data_in_khepri/1]). + +-record(?MODULE, {}). + +-define(HASH_RING_STATE_TABLE, rabbit_exchange_type_consistent_hash_ring_state). + +-spec init_copy_to_khepri(StoreId, MigrationId, Tables) -> Ret when + StoreId :: khepri:store_id(), + MigrationId :: mnesia_to_khepri:migration_id(), + Tables :: [mnesia_to_khepri:mnesia_table()], + Ret :: {ok, Priv}, + Priv :: #?MODULE{}. +%% @private + +init_copy_to_khepri(_StoreId, _MigrationId, _Tables) -> + State = #?MODULE{}, + {ok, State}. + +-spec copy_to_khepri(Table, Record, State) -> Ret when + Table :: mnesia_to_khepri:mnesia_table(), + Record :: tuple(), + State :: rabbit_db_m2k_converter:state(), + Ret :: {ok, NewState} | {error, Reason}, + NewState :: rabbit_db_m2k_converter:state(), + Reason :: any(). +%% @private + +copy_to_khepri(?HASH_RING_STATE_TABLE = Table, + #chx_hash_ring{exchange = XName} = Record, + State) -> + ?LOG_DEBUG( + "Mnesia->Khepri data copy: [~0p] key: ~0p", + [Table, XName], + #{domain => ?KMM_M2K_TABLE_COPY_LOG_DOMAIN}), + Path = rabbit_db_ch_exchange:khepri_consistent_hash_path(XName), + rabbit_db_m2k_converter:with_correlation_id( + fun(CorrId) -> + Extra = #{async => CorrId}, + ?LOG_DEBUG( + "Mnesia->Khepri data copy: [~0p] path: ~0p corr: ~0p", + [Table, Path, CorrId], + #{domain => ?KMM_M2K_TABLE_COPY_LOG_DOMAIN}), + rabbit_khepri:put(Path, Record, Extra) + end, State); +copy_to_khepri(Table, Record, State) -> + ?LOG_DEBUG("Mnesia->Khepri unexpected record table ~0p record ~0p state ~0p", + [Table, Record, State]), + {error, unexpected_record}. + +-spec delete_from_khepri(Table, Key, State) -> Ret when + Table :: mnesia_to_khepri:mnesia_table(), + Key :: any(), + State :: rabbit_db_m2k_converter:state(), + Ret :: {ok, NewState} | {error, Reason}, + NewState :: rabbit_db_m2k_converter:state(), + Reason :: any(). +%% @private + +delete_from_khepri(?HASH_RING_STATE_TABLE = Table, Key, State) -> + ?LOG_DEBUG( + "Mnesia->Khepri data delete: [~0p] key: ~0p", + [Table, Key], + #{domain => ?KMM_M2K_TABLE_COPY_LOG_DOMAIN}), + Path = rabbit_db_ch_exchange:khepri_consistent_hash_path(Key), + rabbit_db_m2k_converter:with_correlation_id( + fun(CorrId) -> + Extra = #{async => CorrId}, + ?LOG_DEBUG( + "Mnesia->Khepri data delete: [~0p] path: ~0p corr: ~0p", + [Table, Path, CorrId], + #{domain => ?KMM_M2K_TABLE_COPY_LOG_DOMAIN}), + rabbit_khepri:delete(Path, Extra) + end, State). + +clear_data_in_khepri(?HASH_RING_STATE_TABLE) -> + Path = rabbit_db_ch_exchange:khepri_consistent_hash_path(), + case rabbit_khepri:delete(Path) of + ok -> + ok; + Error -> + throw(Error) + end. diff --git a/deps/rabbitmq_consistent_hash_exchange/src/rabbit_exchange_type_consistent_hash.erl b/deps/rabbitmq_consistent_hash_exchange/src/rabbit_exchange_type_consistent_hash.erl index fc9f60f2704e..af9a556694c0 100644 --- a/deps/rabbitmq_consistent_hash_exchange/src/rabbit_exchange_type_consistent_hash.erl +++ b/deps/rabbitmq_consistent_hash_exchange/src/rabbit_exchange_type_consistent_hash.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_exchange_type_consistent_hash). @@ -260,9 +260,10 @@ jump_consistent_hash_value(_B0, J0, NumberOfBuckets, SeedState0) -> jump_consistent_hash_value(B, J, NumberOfBuckets, SeedState). value_to_hash(undefined, Msg) -> - mc:get_annotation(routing_keys, Msg); -value_to_hash({header, Header}, Msg0) -> - maps:get(Header, mc:routing_headers(Msg0, [x_headers])); + mc:routing_keys(Msg); +value_to_hash({header, Header}, Msg) -> + Headers = mc:routing_headers(Msg, [x_headers]), + maps:get(Header, Headers, undefined); value_to_hash({property, Property}, Msg) -> case Property of <<"correlation_id">> -> diff --git a/deps/rabbitmq_consistent_hash_exchange/test/rabbit_exchange_type_consistent_hash_SUITE.erl b/deps/rabbitmq_consistent_hash_exchange/test/rabbit_exchange_type_consistent_hash_SUITE.erl index cdfe70bc30cb..16f7ccb1fd66 100644 --- a/deps/rabbitmq_consistent_hash_exchange/test/rabbit_exchange_type_consistent_hash_SUITE.erl +++ b/deps/rabbitmq_consistent_hash_exchange/test/rabbit_exchange_type_consistent_hash_SUITE.erl @@ -2,12 +2,12 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_exchange_type_consistent_hash_SUITE). --compile(export_all). +-compile([export_all, nowarn_export_all]). -include("rabbitmq_consistent_hash_exchange.hrl"). -include_lib("common_test/include/ct.hrl"). @@ -22,35 +22,47 @@ all() -> [ {group, routing_tests}, {group, hash_ring_management_tests}, - {group, clustered} + {group, clustered}, + {group, khepri_migration} ]. groups() -> [ - {routing_tests, [], [ - routing_key_hashing_test, - custom_header_hashing_test, - message_id_hashing_test, - correlation_id_hashing_test, - timestamp_hashing_test, - other_routing_test - ]}, - {hash_ring_management_tests, [], [ - test_durable_exchange_hash_ring_recovery_between_node_restarts, - test_hash_ring_updates_when_queue_is_deleted, - test_hash_ring_updates_when_multiple_queues_are_deleted, - test_hash_ring_updates_when_exclusive_queues_are_deleted_due_to_connection_closure, - test_hash_ring_updates_when_exclusive_queues_are_deleted_due_to_connection_closure_case2, - test_hash_ring_updates_when_exclusive_queues_are_deleted_due_to_connection_closure_case3, - test_hash_ring_updates_when_exclusive_queues_are_deleted_due_to_connection_closure_case4, - test_hash_ring_updates_when_exclusive_queues_are_deleted_due_to_connection_closure_case5, - test_hash_ring_updates_when_exclusive_queues_are_deleted_due_to_connection_closure_case6, - test_hash_ring_updates_when_exchange_is_deleted, - test_hash_ring_updates_when_queue_is_unbound, - test_hash_ring_updates_when_duplicate_binding_is_created_and_queue_is_deleted, - test_hash_ring_updates_when_duplicate_binding_is_created_and_binding_is_deleted - ]}, - {clustered, [], [node_restart]} + {routing_tests, [], routing_tests()}, + {hash_ring_management_tests, [], hash_ring_management_tests()}, + {clustered, [], [node_restart]}, + {khepri_migration, [], [ + from_mnesia_to_khepri + ]} + ]. + +routing_tests() -> + [ + routing_key_hashing_test, + custom_header_hashing_test, + custom_header_undefined, + message_id_hashing_test, + correlation_id_hashing_test, + timestamp_hashing_test, + other_routing_test, + amqp_dead_letter + ]. + +hash_ring_management_tests() -> + [ + test_durable_exchange_hash_ring_recovery_between_node_restarts, + test_hash_ring_updates_when_queue_is_deleted, + test_hash_ring_updates_when_multiple_queues_are_deleted, + test_hash_ring_updates_when_exclusive_queues_are_deleted_due_to_connection_closure, + test_hash_ring_updates_when_exclusive_queues_are_deleted_due_to_connection_closure_case2, + test_hash_ring_updates_when_exclusive_queues_are_deleted_due_to_connection_closure_case3, + test_hash_ring_updates_when_exclusive_queues_are_deleted_due_to_connection_closure_case4, + test_hash_ring_updates_when_exclusive_queues_are_deleted_due_to_connection_closure_case5, + test_hash_ring_updates_when_exclusive_queues_are_deleted_due_to_connection_closure_case6, + test_hash_ring_updates_when_exchange_is_deleted, + test_hash_ring_updates_when_queue_is_unbound, + test_hash_ring_updates_when_duplicate_binding_is_created_and_queue_is_deleted, + test_hash_ring_updates_when_duplicate_binding_is_created_and_binding_is_deleted ]. %% ------------------------------------------------------------------- @@ -58,22 +70,22 @@ groups() -> %% ------------------------------------------------------------------- init_per_suite(Config) -> + {ok, _} = application:ensure_all_started(rabbitmq_amqp_client), rabbit_ct_helpers:log_environment(), - rabbit_ct_helpers:run_setup_steps(Config, []). + rabbit_ct_helpers:run_setup_steps(Config). end_per_suite(Config) -> rabbit_ct_helpers:run_teardown_steps(Config). -init_per_group(clustered = Group, Config) -> - case rabbit_ct_helpers:is_mixed_versions() of - false -> - init_per_group(Group, Config, 3); - true -> - %% Consistent hash exchange plugin prior to - %% https://github.com/rabbitmq/rabbitmq-server/pull/5121 - %% does not add bindings idempotently which makes test node_restart fail. - {skip, "not mixed versions compatible"} +init_per_group(khepri_migration = Group, Config) -> + case rabbit_ct_broker_helpers:configured_metadata_store(Config) of + mnesia -> + init_per_group(Group, Config, 1); + _ -> + {skip, "This group only targets mnesia"} end; +init_per_group(clustered = Group, Config) -> + init_per_group(Group, Config, 3); init_per_group(Group, Config) -> init_per_group(Group, Config, 1). @@ -88,9 +100,9 @@ init_per_group(Group, Config, NodesCount) -> rabbit_ct_client_helpers:setup_steps()). end_per_group(_, Config) -> - rabbit_ct_helpers:run_steps(Config, - rabbit_ct_client_helpers:teardown_steps() ++ - rabbit_ct_broker_helpers:teardown_steps()). + rabbit_ct_helpers:run_teardown_steps(Config, + rabbit_ct_client_helpers:teardown_steps() ++ + rabbit_ct_broker_helpers:teardown_steps()). init_per_testcase(Testcase, Config) -> clean_up_test_topology(Config), @@ -110,7 +122,7 @@ end_per_testcase(Testcase, Config) -> %% N.B. lowering this value below 100K increases the probability %% of failing the Chi squared test in some environments --define(DEFAULT_SAMPLE_COUNT, 150000). +-define(DEFAULT_SAMPLE_COUNT, 150_000). routing_key_hashing_test(Config) -> ok = test_with_rk(Config, ?RoutingTestQs). @@ -134,51 +146,232 @@ other_routing_test(Config) -> ok = test_mutually_exclusive_arguments(Config), ok. +%% Test case for +%% https://github.com/rabbitmq/rabbitmq-server/discussions/11671 +%% According to our docs, it's allowed (although not recommended) +%% for the publishing client to omit the header: +%% "If published messages do not contain the header, +%% they will all get routed to the same arbitrarily chosen queue." +custom_header_undefined(Config) -> + Exchange = <<"my exchange">>, + Queue = <<"my queue">>, + + Ch = rabbit_ct_client_helpers:open_channel(Config), + #'confirm.select_ok'{} = amqp_channel:call(Ch, #'confirm.select'{}), + #'exchange.declare_ok'{} = amqp_channel:call( + Ch, #'exchange.declare' { + exchange = Exchange, + type = <<"x-consistent-hash">>, + arguments = [{<<"hash-header">>, longstr, <<"hashme">>}] + }), + #'queue.declare_ok'{} = amqp_channel:call(Ch, #'queue.declare'{queue = Queue}), + #'queue.bind_ok'{} = amqp_channel:call( + Ch, #'queue.bind'{queue = Queue, + exchange = Exchange, + routing_key = <<"1">>}), + + amqp_channel:call(Ch, + #'basic.publish'{exchange = Exchange}, + %% We leave the "hashme" header undefined. + #amqp_msg{}), + amqp_channel:wait_for_confirms(Ch, 10), + + ?assertMatch({#'basic.get_ok'{}, #amqp_msg{}}, + amqp_channel:call(Ch, #'basic.get'{queue = Queue})), + + rabbit_ct_client_helpers:close_channel(Ch), + clean_up_test_topology(Config, Exchange, [Queue]), + ok. + +%% Test that messages originally published with AMQP to a quorum queue +%% can be dead lettered via the consistent hash exchange to a stream. +amqp_dead_letter(Config) -> + XName = <<"consistent hash exchange">>, + QQ = <<"quorum queue">>, + Stream1 = <<"stream 1">>, + Stream2 = <<"stream 2">>, + + Host = ?config(rmq_hostname, Config), + Port = rabbit_ct_broker_helpers:get_node_config(Config, 0, tcp_port_amqp), + OpnConf = #{address => Host, + port => Port, + container_id => <<"my container">>, + sasl => {plain, <<"guest">>, <<"guest">>}}, + {ok, Connection} = amqp10_client:open_connection(OpnConf), + {ok, Session} = amqp10_client:begin_session_sync(Connection), + {ok, LinkPair} = rabbitmq_amqp_client:attach_management_link_pair_sync( + Session, <<"my link pair">>), + + ok = rabbitmq_amqp_client:declare_exchange( + LinkPair, XName, #{type => <<"x-consistent-hash">>, + durable => true, + auto_delete => true, + arguments => #{<<"hash-property">> => {utf8, <<"correlation_id">>}}}), + {ok, #{type := <<"quorum">>}} = rabbitmq_amqp_client:declare_queue( + LinkPair, QQ, + #{arguments => #{<<"x-dead-letter-exchange">> => {utf8, XName}, + <<"x-message-ttl">> => {ulong, 0}, + <<"x-queue-type">> => {utf8, <<"quorum">>} + }}), + [begin + {ok, #{type := <<"stream">>}} = rabbitmq_amqp_client:declare_queue( + LinkPair, Stream, + #{arguments => #{<<"x-queue-type">> => {utf8, <<"stream">>}}}), + ok = rabbitmq_amqp_client:bind_queue(LinkPair, Stream, XName, _Weight = <<"1">>, #{}) + end || Stream <- [Stream1, Stream2]], + + {ok, Receiver1} = amqp10_client:attach_receiver_link( + Session, <<"receiver 1">>, <<"/queue/", Stream1/binary>>, settled), + {ok, Receiver2} = amqp10_client:attach_receiver_link( + Session, <<"receiver 2">>, <<"/queue/", Stream2/binary>>, settled), + {ok, Sender} = amqp10_client:attach_sender_link( + Session, <<"sender">>, <<"/queue/", QQ/binary>>), + receive {amqp10_event, {link, Sender, credited}} -> ok + after 5000 -> ct:fail({missing_event, ?LINE}) + end, + + Rands = [integer_to_binary(rand:uniform(1000)) || _ <- lists:seq(1, 30)], + UniqRands = lists:uniq(Rands), + NumMsgs = 128, + [begin + SeqBin = integer_to_binary(Seq), + Msg0 = amqp10_msg:set_properties( + #{correlation_id => lists:nth(rand:uniform(length(UniqRands)), UniqRands), + message_id => <<"some message ID">>}, + amqp10_msg:new(SeqBin, SeqBin)), + %% Set sometimes some other sections just to hit different code paths within the server. + %% These sections are not really relevant for the consistent hash exchange. + Msg1 = case Seq rem 2 of + 0 -> + amqp10_msg:set_message_annotations( + #{<<"k1">> => Seq}, Msg0); + 1 -> + Msg0 + end, + Msg2 = case Seq rem 3 of + 0 -> + amqp10_msg:set_application_properties( + #{<<"k2">> => Seq}, Msg1); + _ -> + Msg1 + end, + Msg = case Seq rem 4 of + 0 -> + amqp10_msg:set_delivery_annotations( + #{<<"k3">> => Seq}, Msg2); + _ -> + Msg2 + end, + ok = amqp10_client:send_msg(Sender, Msg) + end || Seq <- lists:seq(1, NumMsgs)], + ok = wait_for_accepts(NumMsgs), + + ok = amqp10_client:flow_link_credit(Receiver1, NumMsgs, never), + ok = amqp10_client:flow_link_credit(Receiver2, NumMsgs, never), + + {N1, Corrs1} = receive_correlations(Receiver1, 0, sets:new([{version, 2}])), + {N2, Corrs2} = receive_correlations(Receiver2, 0, sets:new([{version, 2}])), + ct:pal("~s: ~b messages, ~b unique correlation IDs", [Stream1, N1, sets:size(Corrs1)]), + ct:pal("~s: ~b messages, ~b unique correlation IDs", [Stream2, N2, sets:size(Corrs2)]), + %% All messages should be routed. + ?assertEqual(NumMsgs, N1 + N2), + %% Each of the 2 streams should have received at least 1 message. + ?assert(sets:size(Corrs1) > 0), + ?assert(sets:size(Corrs2) > 0), + %% Assert that the consistent hash exchange routed the given correlation IDs consistently. + %% The same correlation ID should never be present in both streams. + Intersection = sets:intersection(Corrs1, Corrs2), + ?assert(sets:is_empty(Intersection)), + + ok = amqp10_client:detach_link(Receiver1), + ok = amqp10_client:detach_link(Receiver2), + ok = amqp10_client:detach_link(Sender), + {ok, _} = rabbitmq_amqp_client:delete_queue(LinkPair, QQ), + {ok, _} = rabbitmq_amqp_client:delete_queue(LinkPair, Stream1), + {ok, _} = rabbitmq_amqp_client:delete_queue(LinkPair, Stream2), + ok = rabbitmq_amqp_client:detach_management_link_pair_sync(LinkPair), + ok = amqp10_client:end_session(Session), + ok = amqp10_client:close_connection(Connection). + +receive_correlations(Receiver, N, Set) -> + receive + {amqp10_msg, Receiver, Msg} -> + #{correlation_id := Corr, + message_id := <<"some message ID">>} = amqp10_msg:properties(Msg), + receive_correlations(Receiver, N + 1, sets:add_element(Corr, Set)) + after 500 -> + {N, Set} + end. + +wait_for_accepts(0) -> + ok; +wait_for_accepts(N) -> + receive + {amqp10_disposition, {accepted, _}} -> + wait_for_accepts(N - 1) + after 5000 -> + ct:fail({missing_accepted, N}) + end. %% ------------------------------------------------------------------- %% Implementation %% ------------------------------------------------------------------- test_with_rk(Config, Qs) -> - test0(Config, fun (E) -> + test0(Config, + fun (E) -> #'basic.publish'{exchange = E, routing_key = rnd()} end, fun() -> #amqp_msg{props = #'P_basic'{}, payload = <<>>} - end, [], Qs). + end, + [], + Qs). test_with_header(Config, Qs) -> - test0(Config, fun (E) -> + test0(Config, + fun (E) -> #'basic.publish'{exchange = E} end, fun() -> H = [{<<"hashme">>, longstr, rnd()}], #amqp_msg{props = #'P_basic'{headers = H}, payload = <<>>} - end, [{<<"hash-header">>, longstr, <<"hashme">>}], Qs). + end, + [{<<"hash-header">>, longstr, <<"hashme">>}], + Qs). test_with_correlation_id(Config, Qs) -> - test0(Config, fun(E) -> + test0(Config, + fun(E) -> #'basic.publish'{exchange = E} end, fun() -> #amqp_msg{props = #'P_basic'{correlation_id = rnd()}, payload = <<>>} - end, [{<<"hash-property">>, longstr, <<"correlation_id">>}], Qs). + end, + [{<<"hash-property">>, longstr, <<"correlation_id">>}], + Qs). test_with_message_id(Config, Qs) -> - test0(Config, fun(E) -> + test0(Config, + fun(E) -> #'basic.publish'{exchange = E} end, fun() -> #amqp_msg{props = #'P_basic'{message_id = rnd()}, payload = <<>>} - end, [{<<"hash-property">>, longstr, <<"message_id">>}], Qs). + end, + [{<<"hash-property">>, longstr, <<"message_id">>}], + Qs). test_with_timestamp(Config, Qs) -> - test0(Config, fun(E) -> + test0(Config, + fun(E) -> #'basic.publish'{exchange = E} end, fun() -> #amqp_msg{props = #'P_basic'{timestamp = rnd_int()}, payload = <<>>} - end, [{<<"hash-property">>, longstr, <<"timestamp">>}], Qs). + end, + [{<<"hash-property">>, longstr, <<"timestamp">>}], + Qs). test_mutually_exclusive_arguments(Config) -> Chan = rabbit_ct_client_helpers:open_channel(Config, 0), @@ -210,16 +403,16 @@ test_non_supported_property(Config) -> ok. rnd() -> - list_to_binary(integer_to_list(rnd_int())). + integer_to_binary(rnd_int()). rnd_int() -> - rand:uniform(10000000). + rand:uniform(10_000_000). test0(Config, MakeMethod, MakeMsg, DeclareArgs, Queues) -> test0(Config, MakeMethod, MakeMsg, DeclareArgs, Queues, ?DEFAULT_SAMPLE_COUNT). test0(Config, MakeMethod, MakeMsg, DeclareArgs, [Q1, Q2, Q3, Q4] = Queues, IterationCount) -> - Chan = rabbit_ct_client_helpers:open_channel(Config, 0), + Chan = rabbit_ct_client_helpers:open_channel(Config), #'confirm.select_ok'{} = amqp_channel:call(Chan, #'confirm.select'{}), CHX = <<"e">>, @@ -443,14 +636,14 @@ test_hash_ring_updates_when_exclusive_queues_are_deleted_due_to_connection_closu routing_key = <<"3">>}) || Q <- Queues], - ct:pal("all hash ring rows: ~tp", [hash_ring_rows(Config)]), + ct:pal("hash ring state: ~tp", [hash_ring_state(Config, X)]), ?assertEqual(18, count_buckets_of_exchange(Config, X)), assert_ring_consistency(Config, X), ok = amqp_connection:close(Conn), timer:sleep(500), - ct:pal("all hash ring rows after connection closure: ~tp", [hash_ring_rows(Config)]), + ct:pal("hash ring state after connection closure: ~tp", [hash_ring_state(Config, X)]), ?awaitMatch(0, count_buckets_of_exchange(Config, X), ?DEFAULT_WAIT, ?DEFAULT_INTERVAL), clean_up_test_topology(Config, X, []), @@ -499,7 +692,7 @@ test_hash_ring_updates_when_exclusive_queues_are_deleted_due_to_connection_closu routing_key = integer_to_binary(Key)}) || Q <- Queues], - ct:pal("all hash ring rows: ~tp", [hash_ring_rows(Config)]), + ct:pal("hash ring state: ~tp", [hash_ring_state(Config, X)]), %% NumQueues x 'Key' buckets per binding ?assertEqual(NumQueues * Key, count_buckets_of_exchange(Config, X)), @@ -507,7 +700,7 @@ test_hash_ring_updates_when_exclusive_queues_are_deleted_due_to_connection_closu ok = amqp_connection:close(Conn), timer:sleep(1000), - ct:pal("all hash ring rows after connection closure (~tp): ~tp", [XAsList, hash_ring_rows(Config)]), + ct:pal("hash ring state after connection closure (~tp): ~tp", [XAsList, hash_ring_state(Config, X)]), ?awaitMatch(0, count_buckets_of_exchange(Config, X), ?DEFAULT_WAIT, ?DEFAULT_INTERVAL), clean_up_test_topology(Config, X, []), @@ -717,7 +910,7 @@ node_restart(Config) -> rabbit_ct_broker_helpers:restart_node(Config, 1), rabbit_ct_broker_helpers:restart_node(Config, 2), - ?assertEqual(4, count_all_hash_ring_buckets(Config)), + ?assertEqual(4, count_buckets_of_exchange(Config, X)), assert_ring_consistency(Config, X), clean_up_test_topology(Config, X, QsNode1 ++ QsNode2), @@ -729,16 +922,10 @@ node_restart(Config) -> hash_ring_state(Config, X) -> rabbit_ct_broker_helpers:rpc( - Config, 0, ets, lookup, - [rabbit_exchange_type_consistent_hash_ring_state, - rabbit_misc:r(<<"/">>, exchange, X)]). - -hash_ring_rows(Config) -> - rabbit_ct_broker_helpers:rpc( - Config, 0, ets, tab2list, [rabbit_exchange_type_consistent_hash_ring_state]). + Config, 0, rabbit_exchange_type_consistent_hash, ring_state, [<<"/">>, X]). assert_ring_consistency(Config, X) -> - [#chx_hash_ring{bucket_map = M}] = hash_ring_state(Config, X), + {ok, #chx_hash_ring{bucket_map = M}} = hash_ring_state(Config, X), Buckets = lists:usort(maps:keys(M)), Hi = lists:last(Buckets), @@ -747,13 +934,78 @@ assert_ring_consistency(Config, X) -> count_buckets_of_exchange(Config, X) -> case hash_ring_state(Config, X) of - [#chx_hash_ring{bucket_map = M}] -> maps:size(M); - [] -> 0 + {ok, #chx_hash_ring{bucket_map = M}} -> + ct:pal("BUCKET MAP ~p", [M]), + maps:size(M); + {error, not_found} -> 0 end. -count_all_hash_ring_buckets(Config) -> - Rows = hash_ring_rows(Config), - lists:foldl(fun(#chx_hash_ring{bucket_map = M}, Acc) -> Acc + maps:size(M) end, 0, Rows). +from_mnesia_to_khepri(Config) -> + Queues = [Q1, Q2, Q3, Q4] = ?RoutingTestQs, + IterationCount = ?DEFAULT_SAMPLE_COUNT, + Chan = rabbit_ct_client_helpers:open_channel(Config, 0), + #'confirm.select_ok'{} = amqp_channel:call(Chan, #'confirm.select'{}), + + CHX = <<"e">>, + + clean_up_test_topology(Config, CHX, Queues), + + #'exchange.declare_ok'{} = + amqp_channel:call(Chan, + #'exchange.declare' { + exchange = CHX, + type = <<"x-consistent-hash">>, + auto_delete = true, + arguments = [] + }), + [#'queue.declare_ok'{} = + amqp_channel:call(Chan, #'queue.declare' { + queue = Q, exclusive = true }) || Q <- Queues], + [#'queue.bind_ok'{} = + amqp_channel:call(Chan, #'queue.bind' {queue = Q, + exchange = CHX, + routing_key = <<"1">>}) + || Q <- [Q1, Q2]], + [#'queue.bind_ok'{} = + amqp_channel:call(Chan, #'queue.bind' {queue = Q, + exchange = CHX, + routing_key = <<"2">>}) + || Q <- [Q3, Q4]], + + case rabbit_ct_broker_helpers:enable_feature_flag(Config, khepri_db) of + ok -> + case rabbit_ct_broker_helpers:enable_feature_flag(Config, rabbit_consistent_hash_exchange_raft_based_metadata_store) of + ok -> + [amqp_channel:call(Chan, + #'basic.publish'{exchange = CHX, routing_key = rnd()}, + #amqp_msg{props = #'P_basic'{}, payload = <<>>}) + || _ <- lists:duplicate(IterationCount, const)], + amqp_channel:wait_for_confirms(Chan, 300), + timer:sleep(500), + Counts = + [begin + #'queue.declare_ok'{message_count = M} = + amqp_channel:call(Chan, #'queue.declare' {queue = Q, + exclusive = true}), + M + end || Q <- Queues], + ?assertEqual(IterationCount, lists:sum(Counts)), %% All messages got routed + %% Chi-square test + %% H0: routing keys are not evenly distributed according to weight + Expected = [IterationCount div 6, IterationCount div 6, (IterationCount div 6) * 2, (IterationCount div 6) * 2], + Obs = lists:zip(Counts, Expected), + Chi = lists:sum([((O - E) * (O - E)) / E || {O, E} <- Obs]), + ct:pal("Chi-square test for 3 degrees of freedom is ~p, p = 0.01 is 11.35, observations (counts, expected): ~p", + [Chi, Obs]), + clean_up_test_topology(Config, CHX, Queues), + rabbit_ct_client_helpers:close_channel(Chan), + ok; + Skip -> + Skip + end; + Skip -> + Skip + end. clean_up_test_topology(Config) -> clean_up_test_topology(Config, none, ?AllQs). diff --git a/deps/rabbitmq_ct_client_helpers/.gitignore b/deps/rabbitmq_ct_client_helpers/.gitignore deleted file mode 100644 index 987a3071d0a7..000000000000 --- a/deps/rabbitmq_ct_client_helpers/.gitignore +++ /dev/null @@ -1,23 +0,0 @@ -*~ -.sw? -.*.sw? -*.beam -/.erlang.mk/ -/cover/ -/deps/ -/doc/ -/ebin/ -/escript/ -/escript.lock -/logs/ -/plugins/ -/plugins.lock -/sbin/ -/sbin.lock -/xrefr - -/rabbitmq_ct_client_helpers.d -/.rabbitmq_ct_client_helpers.plt - -/.bazelrc -/bazel-* diff --git a/deps/rabbitmq_ct_client_helpers/BUILD.bazel b/deps/rabbitmq_ct_client_helpers/BUILD.bazel index 2bbee7a93b7f..8fa9dfa34f41 100644 --- a/deps/rabbitmq_ct_client_helpers/BUILD.bazel +++ b/deps/rabbitmq_ct_client_helpers/BUILD.bazel @@ -59,7 +59,7 @@ plt( ], for_target = ":erlang_app", ignore_warnings = True, - libs = ["//deps/rabbitmq_cli:elixir"], # keep + libs = ["@rules_elixir//elixir"], # keep plt = "//:base_plt", ) diff --git a/deps/rabbitmq_ct_client_helpers/Makefile b/deps/rabbitmq_ct_client_helpers/Makefile index 43fc9e37c2f4..c61e87a82a34 100644 --- a/deps/rabbitmq_ct_client_helpers/Makefile +++ b/deps/rabbitmq_ct_client_helpers/Makefile @@ -6,5 +6,7 @@ DEPS = rabbit_common rabbitmq_ct_helpers amqp_client DEP_PLUGINS = rabbit_common/mk/rabbitmq-build.mk \ rabbit_common/mk/rabbitmq-tools.mk +PLT_APPS = common_test + include ../../rabbitmq-components.mk include ../../erlang.mk diff --git a/deps/rabbitmq_ct_client_helpers/src/rabbit_ct_client_helpers.erl b/deps/rabbitmq_ct_client_helpers/src/rabbit_ct_client_helpers.erl index 17075221f426..c8db047b7324 100644 --- a/deps/rabbitmq_ct_client_helpers/src/rabbit_ct_client_helpers.erl +++ b/deps/rabbitmq_ct_client_helpers/src/rabbit_ct_client_helpers.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_ct_client_helpers). diff --git a/deps/rabbitmq_ct_helpers/.gitignore b/deps/rabbitmq_ct_helpers/.gitignore index 85195da77ad9..558cd9165bb2 100644 --- a/deps/rabbitmq_ct_helpers/.gitignore +++ b/deps/rabbitmq_ct_helpers/.gitignore @@ -1,27 +1,4 @@ -*~ -.sw? -.*.sw? -*.beam .terraform/ .terraform-* terraform.tfstate* *terraform.lock* -/.erlang.mk/ -/cover/ -/deps/ -/doc/ -/ebin/ -/escript/ -/escript.lock -/logs/ -/plugins/ -/plugins.lock -/sbin/ -/sbin.lock -/xrefr - -/rabbitmq_ct_helpers.d -/.rabbitmq_ct_helpers.plt - -/.bazelrc -/bazel-* diff --git a/deps/rabbitmq_ct_helpers/BUILD.bazel b/deps/rabbitmq_ct_helpers/BUILD.bazel index 2f921ad4f2ea..1002b4289a8a 100644 --- a/deps/rabbitmq_ct_helpers/BUILD.bazel +++ b/deps/rabbitmq_ct_helpers/BUILD.bazel @@ -45,6 +45,7 @@ rabbitmq_app( "//deps/rabbit_common:erlang_app", "@meck//:erlang_app", "@proper//:erlang_app", + "@ra//:erlang_app", ], ) @@ -57,10 +58,10 @@ alias( xref( name = "xref", additional_libs = [ - "//deps/rabbitmq_cli:elixir", # keep + "@rules_elixir//elixir", # keep "//deps/rabbitmq_cli:erlang_app", # keep "//deps/rabbit:erlang_app", # keep - "//deps/rabbit/apps/rabbitmq_prelaunch:erlang_app", # keep + "//deps/rabbitmq_prelaunch:erlang_app", # keep "//deps/rabbitmq_management_agent:erlang_app", # keep "@proper//:erlang_app", # keep ], @@ -77,16 +78,16 @@ plt( for_target = ":erlang_app", ignore_warnings = True, libs = [ - "//deps/rabbitmq_cli:elixir", # keep + "@rules_elixir//elixir", # keep ], plt = "//:base_plt", deps = [ "//deps/rabbit:erlang_app", # keep - "//deps/rabbit/apps/rabbitmq_prelaunch:erlang_app", # keep - "//deps/rabbitmq_cli:elixir", # keep "//deps/rabbitmq_cli:erlang_app", # keep "//deps/rabbitmq_management_agent:erlang_app", # keep + "//deps/rabbitmq_prelaunch:erlang_app", # keep "@proper//:erlang_app", # keep + "@rules_elixir//elixir", # keep ], ) diff --git a/deps/rabbitmq_ct_helpers/LICENSE-APACHE2 b/deps/rabbitmq_ct_helpers/LICENSE-APACHE2 index 7017ed9108c4..8e30d6627fcd 100644 --- a/deps/rabbitmq_ct_helpers/LICENSE-APACHE2 +++ b/deps/rabbitmq_ct_helpers/LICENSE-APACHE2 @@ -187,7 +187,7 @@ same "printed page" as the copyright notice for easier identification within third-party archives. - Copyright 2017-2020 VMware, Inc. or its affiliates. + Copyright (c) 2007-2024 Broadcom. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/deps/rabbitmq_ct_helpers/Makefile b/deps/rabbitmq_ct_helpers/Makefile index 5dc6e160bd7b..2e1f19839036 100644 --- a/deps/rabbitmq_ct_helpers/Makefile +++ b/deps/rabbitmq_ct_helpers/Makefile @@ -4,8 +4,10 @@ PROJECT_DESCRIPTION = Common Test helpers for RabbitMQ DEPS = rabbit_common proper inet_tcp_proxy meck TEST_DEPS = rabbit -dep_rabbit_common = git-subfolder https://github.com/rabbitmq/rabbitmq-server main deps/rabbit_common -dep_rabbit = git-subfolder https://github.com/rabbitmq/rabbitmq-server main deps/rabbit +XREF_IGNORE = [ \ + {'Elixir.OptionParser',split,1}, \ + {'Elixir.RabbitMQCtl',exec_command,2}] + dep_inet_tcp_proxy = git https://github.com/rabbitmq/inet_tcp_proxy master DEP_PLUGINS = rabbit_common/mk/rabbitmq-build.mk \ diff --git a/deps/rabbitmq_ct_helpers/WORKSPACE.bazel b/deps/rabbitmq_ct_helpers/WORKSPACE.bazel deleted file mode 100644 index b1a77b250fea..000000000000 --- a/deps/rabbitmq_ct_helpers/WORKSPACE.bazel +++ /dev/null @@ -1,18 +0,0 @@ -load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive") - -http_archive( - name = "bazel-erlang", - sha256 = "422a9222522216f59a01703a13f578c601d6bddf5617bee8da3c43e3b299fc4e", - strip_prefix = "bazel-erlang-1.1.0", - urls = ["https://github.com/rabbitmq/bazel-erlang/archive/refs/tags/1.1.0.zip"], -) - -http_archive( - name = "rabbitmq-server", - strip_prefix = "rabbitmq-server-main", - urls = ["https://github.com/rabbitmq/rabbitmq-server/archive/main.zip"], -) - -load("@rabbitmq-server//:workspace_helpers.bzl", "rabbitmq_external_deps") - -rabbitmq_external_deps() diff --git a/deps/rabbitmq_ct_helpers/include/rabbit_assert.hrl b/deps/rabbitmq_ct_helpers/include/rabbit_assert.hrl index e1981ac4b354..f9dedabff817 100644 --- a/deps/rabbitmq_ct_helpers/include/rabbit_assert.hrl +++ b/deps/rabbitmq_ct_helpers/include/rabbit_assert.hrl @@ -6,7 +6,7 @@ AwaitMatchResult = Expr, case (AwaitMatchResult) of Guard -> AwaitMatchResult; - __V -> case erlang:system_time(millisecond) of + __V -> case erlang:monotonic_time(millisecond) of AwaitMatchNow when AwaitMatchNow < AwaitMatchHorizon -> timer:sleep( min(PollingInterval, @@ -21,29 +21,8 @@ {value, __V}]}) end end - end)(erlang:system_time(millisecond) + Timeout)) + end)(erlang:monotonic_time(millisecond) + Timeout)) end). -define(awaitMatch(Guard, Expr, Timeout), - begin - ((fun AwaitMatchFilter(AwaitMatchHorizon) -> - AwaitMatchResult = Expr, - case (AwaitMatchResult) of - Guard -> AwaitMatchResult; - __V -> case erlang:system_time(millisecond) of - AwaitMatchNow when AwaitMatchNow < AwaitMatchHorizon -> - timer:sleep( - min(?AWAIT_MATCH_DEFAULT_POLLING_INTERVAL, - AwaitMatchHorizon - AwaitMatchNow)), - AwaitMatchFilter(AwaitMatchHorizon); - _ -> - erlang:error({awaitMatch, - [{module, ?MODULE}, - {line, ?LINE}, - {expression, (??Expr)}, - {pattern, (??Guard)}, - {value, __V}]}) - end - end - end)(erlang:system_time(millisecond) + Timeout)) - end). + ?awaitMatch(Guard, Expr, Timeout, ?AWAIT_MATCH_DEFAULT_POLLING_INTERVAL)). diff --git a/deps/rabbitmq_ct_helpers/src/rabbit_control_helper.erl b/deps/rabbitmq_ct_helpers/src/rabbit_control_helper.erl index eda2a6df1b1b..de51925db73a 100644 --- a/deps/rabbitmq_ct_helpers/src/rabbit_control_helper.erl +++ b/deps/rabbitmq_ct_helpers/src/rabbit_control_helper.erl @@ -2,12 +2,13 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_control_helper). -export([command/2, command/3, command/4, command_with_output/4, format_command/4]). +-export([async_command/4, wait_for_async_command/1]). command(Command, Node, Args) -> command(Command, Node, Args, []). @@ -22,6 +23,21 @@ command(Command, Node, Args, Opts) -> Error -> Error end. +async_command(Command, Node, Args, Opts) -> + Self = self(), + spawn(fun() -> + Reply = (catch command(Command, Node, Args, Opts)), + Self ! {async_command, Node, Reply} + end). + +wait_for_async_command(Node) -> + receive + {async_command, N, Reply} when N == Node -> + Reply + after 600000 -> + timeout + end. + command_with_output(Command, Node, Args, Opts) -> Formatted = format_command(Command, Node, Args, Opts), CommandResult = 'Elixir.RabbitMQCtl':exec_command( diff --git a/deps/rabbitmq_ct_helpers/src/rabbit_ct_broker_helpers.erl b/deps/rabbitmq_ct_helpers/src/rabbit_ct_broker_helpers.erl index 31c7ade7eddc..c230b63cf3a5 100644 --- a/deps/rabbitmq_ct_helpers/src/rabbit_ct_broker_helpers.erl +++ b/deps/rabbitmq_ct_helpers/src/rabbit_ct_broker_helpers.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_ct_broker_helpers). @@ -26,6 +26,7 @@ cluster_nodes/1, cluster_nodes/2, setup_meck/1, + setup_meck/2, get_node_configs/1, get_node_configs/2, get_node_config/2, get_node_config/3, set_node_config/3, @@ -42,6 +43,8 @@ rpc_all/4, rpc_all/5, start_node/2, + async_start_node/2, + wait_for_async_start_node/1, start_broker/2, restart_broker/2, stop_broker/2, @@ -92,11 +95,6 @@ clear_policy/4, set_operator_policy/6, clear_operator_policy/3, - set_ha_policy/4, set_ha_policy/5, - set_ha_policy_all/1, - set_ha_policy_all/2, - set_ha_policy_two_pos/1, - set_ha_policy_two_pos_batch_sync/1, set_parameter/5, set_parameter/6, @@ -158,6 +156,7 @@ clear_permissions/5, set_vhost_limit/5, + clear_vhost_limit/3, set_user_limits/3, set_user_limits/4, @@ -169,7 +168,10 @@ test_channel/0, test_writer/1, - user/1 + user/1, + + configured_metadata_store/1, + await_metadata_store_consistent/2 ]). %% Internal functions exported to be used by rpc:call/4. @@ -214,7 +216,8 @@ setup_steps() -> fun rabbit_ct_helpers:ensure_rabbitmq_plugins_cmd/1, fun set_lager_flood_limit/1, fun start_rabbitmq_nodes/1, - fun share_dist_and_proxy_ports_map/1 + fun share_dist_and_proxy_ports_map/1, + fun configure_metadata_store/1 ]; _ -> [ @@ -223,7 +226,8 @@ setup_steps() -> fun rabbit_ct_helpers:ensure_rabbitmq_plugins_cmd/1, fun set_lager_flood_limit/1, fun start_rabbitmq_nodes/1, - fun share_dist_and_proxy_ports_map/1 + fun share_dist_and_proxy_ports_map/1, + fun configure_metadata_store/1 ] end. @@ -252,6 +256,9 @@ run_make_dist(Config) -> case os:getenv("SKIP_MAKE_TEST_DIST") of false -> SrcDir = ?config(current_srcdir, Config), + %% Some flags should not be propagated to Make when testing. + os:unsetenv("FULL"), + os:unsetenv("MAKEFLAGS"), case rabbit_ct_helpers:make(Config, SrcDir, ["test-dist"]) of {ok, _} -> %% The caller can set $SKIP_MAKE_TEST_DIST to @@ -622,7 +629,14 @@ do_start_rabbitmq_node(Config, NodeConfig, I) -> true -> lists:nth(I + 1, WithPlugins0); false -> WithPlugins0 end, - CanUseSecondary = (I + 1) rem 2 =:= 0, + ForceUseSecondary = rabbit_ct_helpers:get_config( + Config, force_secondary_umbrella, undefined), + CanUseSecondary = case ForceUseSecondary of + undefined -> + (I + 1) rem 2 =:= 0; + Override when is_boolean(Override) -> + Override + end, UseSecondaryUmbrella = case ?config(secondary_umbrella, Config) of false -> false; _ -> CanUseSecondary @@ -750,7 +764,7 @@ do_start_rabbitmq_node(Config, NodeConfig, I) -> NodeConfig1 = rabbit_ct_helpers:set_config( NodeConfig, [{effective_srcdir, SrcDir}, - {make_vars_for_node_startup, MakeVars}]), + {make_vars_for_node_startup, MakeVars}]), query_node(Config, NodeConfig1); _ -> AbortCmd = ["stop-node" | MakeVars], @@ -760,19 +774,24 @@ do_start_rabbitmq_node(Config, NodeConfig, I) -> RunCmd -> UseSecondary = CanUseSecondary andalso rabbit_ct_helpers:get_config(Config, rabbitmq_run_secondary_cmd) =/= undefined, - EnabledPluginsMakeVars = case {UseSecondary, WithPlugins} of - {_, false} -> - ["RABBITMQ_ENABLED_PLUGINS=rabbit"]; - {true, _} -> - [{"RABBITMQ_ENABLED_PLUGINS=~ts", [filename:basename(SrcDir)]}]; - _ -> - [] - end, + PluginsMakeVars = case {UseSecondary, WithPlugins} of + {_, false} -> + ["LEAVE_PLUGINS_DISABLED=1"]; + {true, _} -> + case filename:basename(SrcDir) of + "rabbit" -> + ["LEAVE_PLUGINS_DISABLED=1"]; + Plugin -> + [{"RABBITMQ_ENABLED_PLUGINS=~ts", [Plugin]}] + end; + _ -> + [] + end, RmqRun = case CanUseSecondary of - false -> RunCmd; - _ -> rabbit_ct_helpers:get_config(Config, rabbitmq_run_secondary_cmd, RunCmd) - end, - case rabbit_ct_helpers:exec([RmqRun, "-C", SrcDir] ++ EnabledPluginsMakeVars ++ Cmd) of + false -> RunCmd; + _ -> rabbit_ct_helpers:get_config(Config, rabbitmq_run_secondary_cmd, RunCmd) + end, + case rabbit_ct_helpers:exec([RmqRun, "-C", SrcDir] ++ PluginsMakeVars ++ Cmd) of {ok, _} -> NodeConfig1 = rabbit_ct_helpers:set_config( NodeConfig, @@ -867,21 +886,22 @@ cluster_nodes1(_, _, _, []) -> ok. handle_nodes_in_parallel(NodeConfigs, Fun) -> - T0 = erlang:timestamp(), + T0 = erlang:monotonic_time(), Parent = self(), Procs = [ begin timer:sleep(rand:uniform(1000)), spawn_link(fun() -> - T1 = erlang:timestamp(), + T1 = erlang:monotonic_time(), Ret = Fun(NodeConfig), - T2 = erlang:timestamp(), + T2 = erlang:monotonic_time(), ct:pal( ?LOW_IMPORTANCE, "Time to run ~tp for node ~ts: ~b us", [Fun, ?config(nodename, NodeConfig), - timer:now_diff(T2, T1)]), + erlang:convert_time_unit( + T2 - T1, native, microsecond)]), Parent ! {parallel_handling_ret, self(), NodeConfig, @@ -892,11 +912,11 @@ handle_nodes_in_parallel(NodeConfigs, Fun) -> wait_for_node_handling(Procs, Fun, T0, []). wait_for_node_handling([], Fun, T0, Results) -> - T3 = erlang:timestamp(), + T3 = erlang:monotonic_time(), ct:pal( ?LOW_IMPORTANCE, "Time to run ~tp for all nodes: ~b us", - [Fun, timer:now_diff(T3, T0)]), + [Fun, erlang:convert_time_unit(T3 - T0, native, microsecond)]), Results; wait_for_node_handling(Procs, Fun, T0, Results) -> receive @@ -930,6 +950,78 @@ share_dist_and_proxy_ports_map(Config) -> application, set_env, [kernel, dist_and_proxy_ports_map, Map]), Config. +configured_metadata_store(Config) -> + case rabbit_ct_helpers:get_config(Config, metadata_store) of + khepri -> + {khepri, []}; + {khepri, _FFs0} = Khepri -> + Khepri; + mnesia -> + mnesia; + _ -> + case os:getenv("RABBITMQ_METADATA_STORE") of + "khepri" -> + {khepri, []}; + _ -> + mnesia + end + end. + +configure_metadata_store(Config) -> + ct:pal("Configuring metadata store..."), + case configured_metadata_store(Config) of + {khepri, FFs0} -> + case enable_khepri_metadata_store(Config, FFs0) of + {skip, _} = Skip -> + _ = stop_rabbitmq_nodes(Config), + Skip; + Config1 -> + Config1 + end; + mnesia -> + ct:pal("Enabling Mnesia metadata store"), + Config + end. + +enable_khepri_metadata_store(Config, FFs0) -> + ct:pal("Enabling Khepri metadata store"), + FFs = [khepri_db | FFs0], + lists:foldl(fun(_FF, {skip, _Reason} = Skip) -> + Skip; + (FF, C) -> + case enable_feature_flag(C, FF) of + ok -> + C; + Skip -> + ct:pal("Enabling metadata store failed: ~p", [Skip]), + Skip + end + end, Config, FFs). + +%% Waits until the metadata store replica on Node is up to date with the leader. +await_metadata_store_consistent(Config, Node) -> + case configured_metadata_store(Config) of + mnesia -> + ok; + {khepri, _} -> + RaClusterName = rabbit_khepri:get_ra_cluster_name(), + Leader = rpc(Config, Node, ra_leaderboard, lookup_leader, [RaClusterName]), + LastAppliedLeader = ra_last_applied(Leader), + + NodeName = get_node_config(Config, Node, nodename), + ServerId = {RaClusterName, NodeName}, + rabbit_ct_helpers:eventually( + ?_assert( + begin + LastApplied = ra_last_applied(ServerId), + is_integer(LastApplied) andalso LastApplied >= LastAppliedLeader + end)) + end. + +ra_last_applied(ServerId) -> + #{last_applied := LastApplied} = ra:key_metrics(ServerId), + LastApplied. + rewrite_node_config_file(Config, Node) -> NodeConfig = get_node_config(Config, Node), I = if @@ -1030,8 +1122,14 @@ stop_rabbitmq_nodes(Config) -> case FindCrashes of true -> %% TODO: Make the ignore list configurable. - IgnoredCrashes = ["** force_vhost_failure"], - find_crashes_in_logs(NodeConfigs, IgnoredCrashes); + IgnoredCrashes0 = ["** force_vhost_failure"], + case rabbit_ct_helpers:get_config(Config, ignored_crashes) of + undefined -> + find_crashes_in_logs(NodeConfigs, IgnoredCrashes0); + IgnoredCrashes1 -> + find_crashes_in_logs( + NodeConfigs, IgnoredCrashes0 ++ IgnoredCrashes1) + end; false -> ok end, @@ -1113,7 +1211,11 @@ capture_gen_server_termination( Ret = re:run(Line, Prefix ++ "( .*|\\*.*|)$", ReOpts), case Ret of {match, [Suffix]} -> - case lists:member(Suffix, IgnoredCrashes) of + Ignore = lists:any( + fun(IgnoredCrash) -> + string:find(Suffix, IgnoredCrash) =/= nomatch + end, IgnoredCrashes), + case Ignore of false -> capture_gen_server_termination( Rest, Prefix, [Line | Acc], Count, IgnoredCrashes); @@ -1200,31 +1302,28 @@ rabbitmqctl(Config, Node, Args, Timeout) -> _ -> CanUseSecondary end, + WithPlugins0 = rabbit_ct_helpers:get_config(Config, + broker_with_plugins), + WithPlugins = case is_list(WithPlugins0) of + true -> lists:nth(I + 1, WithPlugins0); + false -> WithPlugins0 + end, Rabbitmqctl = case UseSecondaryUmbrella of true -> case BazelRunSecCmd of undefined -> - SrcDir = ?config( - secondary_rabbit_srcdir, - Config), - SecDepsDir = ?config( - secondary_erlang_mk_depsdir, - Config), - SecNewScriptsDir = filename:join( - [SecDepsDir, - SrcDir, - "sbin"]), - SecOldScriptsDir = filename:join( - [SecDepsDir, - "rabbit", - "scripts"]), - SecNewScriptsDirExists = filelib:is_dir( - SecNewScriptsDir), - SecScriptsDir = - case SecNewScriptsDirExists of - true -> SecNewScriptsDir; - false -> SecOldScriptsDir - end, + SrcDir = case WithPlugins of + false -> + ?config( + secondary_rabbit_srcdir, + Config); + _ -> + ?config( + secondary_current_srcdir, + Config) + end, + SecScriptsDir = filename:join( + [SrcDir, "sbin"]), rabbit_misc:format( "~ts/rabbitmqctl", [SecScriptsDir]); _ -> @@ -1270,7 +1369,7 @@ rabbitmqctl_list(Config, Node, Args) -> rabbitmq_queues(Config, Node, Args) -> RabbitmqQueues = ?config(rabbitmq_queues_cmd, Config), - NodeConfig = rabbit_ct_broker_helpers:get_node_config(Config, Node), + NodeConfig = get_node_config(Config, Node), Nodename = ?config(nodename, NodeConfig), Env0 = [ {"RABBITMQ_SCRIPTS_DIR", filename:dirname(RabbitmqQueues)}, @@ -1645,6 +1744,11 @@ set_vhost_limit(Config, Node, VHost, Limit0, Value) -> rabbit_vhost_limit, set, [VHost, Limits, <<"ct-tests">>]). +clear_vhost_limit(Config, Node, VHost) -> + rpc(Config, Node, + rabbit_vhost_limit, clear, + [VHost, <<"ct-tests">>]). + set_user_limits(Config, Username, Limits) -> set_user_limits(Config, 0, Username, Limits). @@ -1786,6 +1890,22 @@ start_node(Config, Node) -> _ -> ok end. +async_start_node(Config, Node) -> + Self = self(), + spawn(fun() -> + Reply = (catch start_node(Config, Node)), + Self ! {async_start_node, Node, Reply} + end), + ok. + +wait_for_async_start_node(Node) -> + receive + {async_start_node, N, Reply} when N == Node -> + Reply + after 600000 -> + timeout + end. + start_broker(Config, Node) -> ok = rpc(Config, Node, rabbit, start, []). @@ -1855,45 +1975,60 @@ await_os_pid_death(Pid) -> end. reset_node(Config, Node) -> - Name = rabbit_ct_broker_helpers:get_node_config(Config, Node, nodename), + Name = get_node_config(Config, Node, nodename), rabbit_control_helper:command(reset, Name). force_reset_node(Config, Node) -> - Name = rabbit_ct_broker_helpers:get_node_config(Config, Node, nodename), + Name = get_node_config(Config, Node, nodename), rabbit_control_helper:command(force_reset, Name). forget_cluster_node(Config, Node, NodeToForget) -> forget_cluster_node(Config, Node, NodeToForget, []). forget_cluster_node(Config, Node, NodeToForget, Opts) -> - Name = rabbit_ct_broker_helpers:get_node_config(Config, Node, nodename), + Name = get_node_config(Config, Node, nodename), NameToForget = - rabbit_ct_broker_helpers:get_node_config(Config, NodeToForget, nodename), + get_node_config(Config, NodeToForget, nodename), rabbit_control_helper:command(forget_cluster_node, Name, [NameToForget], Opts). is_feature_flag_enabled(Config, FeatureName) -> - Node = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename), - rabbit_ct_broker_helpers:rpc( + Node = get_node_config(Config, 0, nodename), + rpc( Config, Node, rabbit_feature_flags, is_enabled, [FeatureName]). is_feature_flag_supported(Config, FeatureName) -> - Nodes = rabbit_ct_broker_helpers:get_node_configs(Config, nodename), + Nodes = get_node_configs(Config, nodename), is_feature_flag_supported(Config, Nodes, FeatureName). is_feature_flag_supported(Config, [Node1 | _] = _Nodes, FeatureName) -> - rabbit_ct_broker_helpers:rpc( + rpc( Config, Node1, rabbit_feature_flags, is_supported, [[FeatureName], 60000]). enable_feature_flag(Config, FeatureName) -> - Nodes = rabbit_ct_broker_helpers:get_node_configs(Config, nodename), + Nodes = get_node_configs(Config, nodename), enable_feature_flag(Config, Nodes, FeatureName). -enable_feature_flag(Config, [Node1 | _] = Nodes, FeatureName) -> +enable_feature_flag(Config, Nodes, FeatureName) -> case is_feature_flag_supported(Config, Nodes, FeatureName) of true -> - rabbit_ct_broker_helpers:rpc( - Config, Node1, rabbit_feature_flags, enable, [FeatureName]); + %% Nodes might not be clustered for some test suites, so enabling + %% feature flags on the first one of the list is not enough + lists:foldl( + fun(N, ok) -> + case rpc( + Config, N, rabbit_feature_flags, enable, [FeatureName]) of + {error, unsupported} -> + {skip, + lists:flatten( + io_lib:format("'~ts' feature flag is unsupported", + [FeatureName]))}; + Any -> + Any + end; + (_, Other) -> + Other + end, ok, Nodes); false -> {skip, lists:flatten( @@ -1902,24 +2037,24 @@ enable_feature_flag(Config, [Node1 | _] = Nodes, FeatureName) -> end. mark_as_being_drained(Config, Node) -> - rabbit_ct_broker_helpers:rpc(Config, Node, rabbit_maintenance, mark_as_being_drained, []). + rpc(Config, Node, rabbit_maintenance, mark_as_being_drained, []). unmark_as_being_drained(Config, Node) -> - rabbit_ct_broker_helpers:rpc(Config, Node, rabbit_maintenance, unmark_as_being_drained, []). + rpc(Config, Node, rabbit_maintenance, unmark_as_being_drained, []). drain_node(Config, Node) -> - rabbit_ct_broker_helpers:rpc(Config, Node, rabbit_maintenance, drain, []). + rpc(Config, Node, rabbit_maintenance, drain, []). revive_node(Config, Node) -> - rabbit_ct_broker_helpers:rpc(Config, Node, rabbit_maintenance, revive, []). + rpc(Config, Node, rabbit_maintenance, revive, []). is_being_drained_consistent_read(Config, Node) -> - rabbit_ct_broker_helpers:rpc(Config, Node, rabbit_maintenance, is_being_drained_consistent_read, [Node]). + rpc(Config, Node, rabbit_maintenance, is_being_drained_consistent_read, [Node]). is_being_drained_local_read(Config, Node) -> - rabbit_ct_broker_helpers:rpc(Config, Node, rabbit_maintenance, is_being_drained_local_read, [Node]). + rpc(Config, Node, rabbit_maintenance, is_being_drained_local_read, [Node]). is_being_drained_consistent_read(Config, TargetNode, NodeToCheck) -> - rabbit_ct_broker_helpers:rpc(Config, TargetNode, rabbit_maintenance, is_being_drained_consistent_read, [NodeToCheck]). + rpc(Config, TargetNode, rabbit_maintenance, is_being_drained_consistent_read, [NodeToCheck]). is_being_drained_local_read(Config, TargetNode, NodeToCheck) -> - rabbit_ct_broker_helpers:rpc(Config, TargetNode, rabbit_maintenance, is_being_drained_local_read, [NodeToCheck]). + rpc(Config, TargetNode, rabbit_maintenance, is_being_drained_local_read, [NodeToCheck]). %% From a given list of gen_tcp client connections, return the list of %% connection handler PID in RabbitMQ. @@ -1988,50 +2123,6 @@ clear_operator_policy(Config, Node, Name) -> rpc(Config, Node, rabbit_policy, delete_op, [<<"/">>, Name, <<"acting-user">>]). -set_ha_policy(Config, Node, Pattern, Policy) -> - set_ha_policy(Config, Node, Pattern, Policy, []). - -set_ha_policy(Config, Node, Pattern, Policy, Extra) -> - set_policy(Config, Node, Pattern, Pattern, <<"queues">>, - ha_policy(Policy) ++ Extra). - -ha_policy(<<"all">>) -> [{<<"ha-mode">>, <<"all">>}]; -ha_policy({Mode, Params}) -> [{<<"ha-mode">>, Mode}, - {<<"ha-params">>, Params}]. - -set_ha_policy_all(Config) -> - set_ha_policy(Config, 0, <<".*">>, <<"all">>), - Config. - -set_ha_policy_all(Config, Extra) -> - set_ha_policy(Config, 0, <<".*">>, <<"all">>, Extra), - Config. - -set_ha_policy_two_pos(Config) -> - Members = - [atom_to_binary(N) - || N <- get_node_configs(Config, nodename)], - TwoNodes = [M || M <- lists:sublist(Members, 2)], - set_ha_policy(Config, 0, <<"^ha.two.">>, {<<"nodes">>, TwoNodes}, - [{<<"ha-promote-on-shutdown">>, <<"always">>}]), - set_ha_policy(Config, 0, <<"^ha.auto.">>, {<<"nodes">>, TwoNodes}, - [{<<"ha-sync-mode">>, <<"automatic">>}, - {<<"ha-promote-on-shutdown">>, <<"always">>}]), - Config. - -set_ha_policy_two_pos_batch_sync(Config) -> - Members = - [atom_to_binary(N) - || N <- get_node_configs(Config, nodename)], - TwoNodes = [M || M <- lists:sublist(Members, 2)], - set_ha_policy(Config, 0, <<"^ha.two.">>, {<<"nodes">>, TwoNodes}, - [{<<"ha-promote-on-shutdown">>, <<"always">>}]), - set_ha_policy(Config, 0, <<"^ha.auto.">>, {<<"nodes">>, TwoNodes}, - [{<<"ha-sync-mode">>, <<"automatic">>}, - {<<"ha-sync-batch-size">>, 200}, - {<<"ha-promote-on-shutdown">>, <<"always">>}]), - Config. - %% ------------------------------------------------------------------- %% Parameter helpers. %% ------------------------------------------------------------------- @@ -2123,6 +2214,11 @@ cover_add_node(Node) when is_atom(Node) andalso Node =/= undefined -> if_cover( fun() -> + %% Dependency horus starts registered process cover_server on the RabbitMQ + %% node. If we weren't to stop that process first, ct_cover:add_nodes/1 would + %% print `{error, {already_started, _CoverServerPid}}` and return + %% `{ok, []}` resulting in no test coverage. + ok = erpc:call(Node, cover, stop, []), {ok, [Node]} = ct_cover:add_nodes([Node]) end). @@ -2139,15 +2235,24 @@ cover_remove_node(Config, Node) -> cover_remove_node(Nodename). if_cover(F) -> - case os:getenv("COVER") of - false -> - ok; - _ -> - F() + case { + %% make ct COVER=1 + os:getenv("COVER"), + %% bazel coverage + os:getenv("COVERAGE") + } of + {false, false} -> ok; + _ -> F() end. setup_meck(Config) -> - {Mod, Bin, File} = code:get_object_code(meck), - [true | _] = rpc_all(Config, code, add_path, [filename:dirname(File)]), - [{module, Mod} | _] = rpc_all(Config, code, load_binary, [Mod, File, Bin]), - ok. + setup_meck(Config, []). + +setup_meck(Config, LoadModules) + when is_list(LoadModules) -> + lists:foreach( + fun(Mod) -> + {Mod, Bin, File} = code:get_object_code(Mod), + [true | _] = rpc_all(Config, code, add_path, [filename:dirname(File)]), + [{module, Mod} | _] = rpc_all(Config, code, load_binary, [Mod, File, Bin]) + end, [meck | LoadModules]). diff --git a/deps/rabbitmq_ct_helpers/src/rabbit_ct_config_schema.erl b/deps/rabbitmq_ct_helpers/src/rabbit_ct_config_schema.erl index 02e5adbde784..7baee0264bb8 100644 --- a/deps/rabbitmq_ct_helpers/src/rabbit_ct_config_schema.erl +++ b/deps/rabbitmq_ct_helpers/src/rabbit_ct_config_schema.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2017-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_ct_config_schema). @@ -25,10 +25,14 @@ run_snippets(Config) -> {ok, [Snippets]} = file:consult(?config(conf_snippets, Config)), ct:pal("Loaded config schema snippets: ~tp", [Snippets]), lists:map( - fun({N, S, C, P}) -> ok = test_snippet(Config, {snippet_id(N), S, []}, C, P); - ({N, S, A, C, P}) -> ok = test_snippet(Config, {snippet_id(N), S, A}, C, P) - end, - Snippets), + fun({N, S, C, P}) -> + ok = test_snippet(Config, {snippet_id(N), S, []}, C, P, true); + ({N, S, A, C, P}) -> + ok = test_snippet(Config, {snippet_id(N), S, A}, C, P, true); + ({N, S, A, C, P, nosort}) -> + ok = test_snippet(Config, {snippet_id(N), S, A}, C, P, false) + end, + Snippets), ok. snippet_id(N) when is_integer(N) -> @@ -40,7 +44,7 @@ snippet_id(A) when is_atom(A) -> snippet_id(L) when is_list(L) -> L. -test_snippet(Config, Snippet, Expected, _Plugins) -> +test_snippet(Config, Snippet = {SnipID, _, _}, Expected, _Plugins, Sort) -> {ConfFile, AdvancedFile} = write_snippet(Config, Snippet), %% We ignore the rabbit -> log portion of the config on v3.9+, where the lager %% dependency has been dropped @@ -50,13 +54,17 @@ test_snippet(Config, Snippet, Expected, _Plugins) -> _ -> generate_config(ConfFile, AdvancedFile) end, - Gen = deepsort(Generated), - Exp = deepsort(Expected), + {Exp, Gen} = case Sort of + true -> + {deepsort(Expected), deepsort(Generated)}; + false -> + {Expected, Generated} + end, case Exp of Gen -> ok; _ -> - ct:pal("Expected: ~tp~ngenerated: ~tp", [Expected, Generated]), - ct:pal("Expected (sorted): ~tp~ngenerated (sorted): ~tp", [Exp, Gen]), + ct:pal("Snippet ~tp. Expected: ~tp~ngenerated: ~tp", [SnipID, Expected, Generated]), + ct:pal("Snippet ~tp. Expected (sorted): ~tp~ngenerated (sorted): ~tp", [SnipID, Exp, Gen]), error({config_mismatch, Snippet, Exp, Gen}) end. diff --git a/deps/rabbitmq_ct_helpers/src/rabbit_ct_helpers.erl b/deps/rabbitmq_ct_helpers/src/rabbit_ct_helpers.erl index bac2f7145723..801de565d125 100644 --- a/deps/rabbitmq_ct_helpers/src/rabbit_ct_helpers.erl +++ b/deps/rabbitmq_ct_helpers/src/rabbit_ct_helpers.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2016-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_ct_helpers). @@ -66,14 +66,8 @@ log_environment() -> Vars = lists:sort(fun(A, B) -> A =< B end, os:getenv()), - case file:native_name_encoding() of - latin1 -> - ct:pal(?LOW_IMPORTANCE, "Environment variables:~n~ts", - [[io_lib:format(" ~ts~n", [V]) || V <- Vars]]); - utf8 -> - ct:pal(?LOW_IMPORTANCE, "Environment variables:~n~ts", - [[io_lib:format(" ~ts~n", [V]) || V <- Vars]]) - end. + ct:pal(?LOW_IMPORTANCE, "Environment variables:~n~ts", + [[io_lib:format(" ~ts~n", [V]) || V <- Vars]]). run_setup_steps(Config) -> run_setup_steps(Config, []). @@ -423,8 +417,7 @@ ensure_rabbitmq_run_secondary_cmd(Config) -> end. ensure_erl_call_cmd(Config) -> - ErlCallDir = code:lib_dir(erl_interface, bin), - ErlCall = filename:join(ErlCallDir, "erl_call"), + ErlCall = filename:join(code:lib_dir(erl_interface), "bin/erl_call"), Cmd = [ErlCall], case exec(Cmd, [{match_stdout, "Usage: "}]) of {ok, _} -> set_config(Config, {erl_call_cmd, ErlCall}); @@ -618,7 +611,10 @@ ensure_ssl_certs(Config) -> {verify, Verify}, {fail_if_no_peer_cert, FailIfNoPeerCert} ]}]}), - set_config(Config1, {rmq_certsdir, CertsDir}); + set_config( + Config1, + [{rmq_certsdir, CertsDir}, + {rmq_certspwd, CertsPwd}]); _ -> {skip, "Failed to create SSL certificates"} end. @@ -883,7 +879,10 @@ exec([Cmd | Args], Options) when is_list(Cmd) orelse is_binary(Cmd) -> Env1 = [ begin Key1 = format_arg(Key), - Value1 = format_arg(Value), + Value1 = case Value of + false -> false; + _ -> format_arg(Value) + end, Value2 = case is_binary(Value1) of true -> binary_to_list(Value1); false -> Value1 @@ -897,8 +896,10 @@ exec([Cmd | Args], Options) when is_list(Cmd) orelse is_binary(Cmd) -> | proplists:delete(env, PortOptions1)], Log ++ "~n~nEnvironment variables:~n" ++ string:join( - [rabbit_misc:format(" ~ts=~ts", [K, string:replace(V, "~", "~~", all)]) - || {K, V} <- Env1], + [rabbit_misc:format( + " ~ts=~ts", + [K, string:replace(V, "~", "~~", all)]) + || {K, V} <- Env1, is_list(V) ], "~n") } end, diff --git a/deps/rabbitmq_ct_helpers/src/rabbit_ct_proper_helpers.erl b/deps/rabbitmq_ct_helpers/src/rabbit_ct_proper_helpers.erl index f7e2e1f77715..3980e79b7fa9 100644 --- a/deps/rabbitmq_ct_helpers/src/rabbit_ct_proper_helpers.erl +++ b/deps/rabbitmq_ct_helpers/src/rabbit_ct_proper_helpers.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2016-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_ct_proper_helpers). diff --git a/deps/rabbitmq_ct_helpers/src/rabbit_ct_vm_helpers.erl b/deps/rabbitmq_ct_helpers/src/rabbit_ct_vm_helpers.erl index 09055d3995ac..b98cb0dd862a 100644 --- a/deps/rabbitmq_ct_helpers/src/rabbit_ct_vm_helpers.erl +++ b/deps/rabbitmq_ct_helpers/src/rabbit_ct_vm_helpers.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2018-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_ct_vm_helpers). diff --git a/deps/rabbitmq_ct_helpers/src/rabbit_mgmt_test_util.erl b/deps/rabbitmq_ct_helpers/src/rabbit_mgmt_test_util.erl index 0e1f327ad58c..6424df081608 100644 --- a/deps/rabbitmq_ct_helpers/src/rabbit_mgmt_test_util.erl +++ b/deps/rabbitmq_ct_helpers/src/rabbit_mgmt_test_util.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2010-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_mgmt_test_util). @@ -21,10 +21,11 @@ merge_stats_app_env(Config, Interval, SampleInterval) -> Config1 = rabbit_ct_helpers:merge_app_env( Config, {rabbit, [{collect_statistics_interval, Interval}]}), rabbit_ct_helpers:merge_app_env( - Config1, {rabbitmq_management_agent, [{sample_retention_policies, - [{global, [{605, SampleInterval}]}, - {basic, [{605, SampleInterval}]}, - {detailed, [{10, SampleInterval}]}] }]}). + Config1, {rabbitmq_management_agent, + [{sample_retention_policies, + [{global, [{605, SampleInterval}]}, + {basic, [{605, SampleInterval}]}, + {detailed, [{10, SampleInterval}]}] }]}). http_get_from_node(Config, Node, Path) -> {ok, {{_HTTP, CodeAct, _}, Headers, ResBody}} = req(Config, Node, get, Path, [auth_header("guest", "guest")]), @@ -175,6 +176,9 @@ http_delete(Config, Path, User, Pass, CodeExp) -> assert_code(CodeExp, CodeAct, "DELETE", Path, ResBody), decode(CodeExp, Headers, ResBody). +http_get_fails(Config, Path) -> + {error, {failed_connect, _}} = req(Config, get, Path, []). + format_for_upload(none) -> <<"">>; format_for_upload(List) -> diff --git a/deps/rabbitmq_ct_helpers/test/terraform_SUITE.erl b/deps/rabbitmq_ct_helpers/test/terraform_SUITE.erl index c5c375b2082c..261685199c08 100644 --- a/deps/rabbitmq_ct_helpers/test/terraform_SUITE.erl +++ b/deps/rabbitmq_ct_helpers/test/terraform_SUITE.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2018-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(terraform_SUITE). diff --git a/deps/rabbitmq_event_exchange/.gitignore b/deps/rabbitmq_event_exchange/.gitignore index 17df3b3932fd..7734a827c1e7 100644 --- a/deps/rabbitmq_event_exchange/.gitignore +++ b/deps/rabbitmq_event_exchange/.gitignore @@ -1,19 +1 @@ -.sw? -.*.sw? -*.beam -/.erlang.mk/ -/cover/ -/deps/ -/doc/ -/ebin/ -/escript/ -/escript.lock -/logs/ -/plugins/ -/plugins.lock -/sbin/ -/sbin.lock - test/config_schema_SUITE_data/schema/rabbit.schema - -rabbitmq_event_exchange.d diff --git a/deps/rabbitmq_event_exchange/src/rabbit_event_exchange_decorator.erl b/deps/rabbitmq_event_exchange/src/rabbit_event_exchange_decorator.erl index 36876d834147..86de6fb95122 100644 --- a/deps/rabbitmq_event_exchange/src/rabbit_event_exchange_decorator.erl +++ b/deps/rabbitmq_event_exchange/src/rabbit_event_exchange_decorator.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2018-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_event_exchange_decorator). diff --git a/deps/rabbitmq_event_exchange/src/rabbit_exchange_type_event.erl b/deps/rabbitmq_event_exchange/src/rabbit_exchange_type_event.erl index d0d64a1d91e4..70251406b20c 100644 --- a/deps/rabbitmq_event_exchange/src/rabbit_exchange_type_event.erl +++ b/deps/rabbitmq_event_exchange/src/rabbit_exchange_type_event.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_exchange_type_event). @@ -38,13 +38,22 @@ info(_X) -> []. info(_X, _) -> []. register() -> - _ = rabbit_exchange:declare(exchange(), topic, true, false, true, [], - ?INTERNAL_USER), - gen_event:add_handler(rabbit_event, ?MODULE, []). + case rabbit_exchange:declare(exchange(), topic, true, false, true, [], + ?INTERNAL_USER) of + {ok, _Exchange} -> + gen_event:add_handler(rabbit_event, ?MODULE, []); + {error, timeout} = Err -> + Err + end. unregister() -> - _ = rabbit_exchange:delete(exchange(), false, ?INTERNAL_USER), - gen_event:delete_handler(rabbit_event, ?MODULE, []). + case rabbit_exchange:ensure_deleted(exchange(), false, ?INTERNAL_USER) of + ok -> + gen_event:delete_handler(rabbit_event, ?MODULE, []), + ok; + {error, _} = Err -> + Err + end. exchange() -> exchange(get_vhost()). @@ -87,7 +96,7 @@ handle_event(#event{type = Type, TS, milli_seconds, seconds)}, Content = rabbit_basic:build_content(PBasic, <<>>), XName = exchange(VHost), - Msg = mc_amqpl:message(XName, Key, Content), + {ok, Msg} = mc_amqpl:message(XName, Key, Content), rabbit_queue_type:publish_at_most_once(XName, Msg) end, {ok, State}; @@ -107,10 +116,7 @@ code_change(_OldVsn, State, _Extra) -> {ok, State}. %%---------------------------------------------------------------------------- ensure_vhost_exists(VHost) -> - case rabbit_vhost:exists(VHost) of - false -> rabbit_vhost:add(VHost, ?INTERNAL_USER); - _ -> ok - end. + rabbit_vhost:add(VHost, ?INTERNAL_USER). %% pattern matching is way more efficient that the string operations, %% let's use all the keys we're aware of to speed up the handler. diff --git a/deps/rabbitmq_event_exchange/test/config_schema_SUITE.erl b/deps/rabbitmq_event_exchange/test/config_schema_SUITE.erl index ad54ac209acd..deb56e410742 100644 --- a/deps/rabbitmq_event_exchange/test/config_schema_SUITE.erl +++ b/deps/rabbitmq_event_exchange/test/config_schema_SUITE.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2018-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(config_schema_SUITE). diff --git a/deps/rabbitmq_event_exchange/test/system_SUITE.erl b/deps/rabbitmq_event_exchange/test/system_SUITE.erl index 88b2170f3b45..3cd01a79e852 100644 --- a/deps/rabbitmq_event_exchange/test/system_SUITE.erl +++ b/deps/rabbitmq_event_exchange/test/system_SUITE.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(system_SUITE). diff --git a/deps/rabbitmq_event_exchange/test/unit_SUITE.erl b/deps/rabbitmq_event_exchange/test/unit_SUITE.erl index af1da497e59f..878cdd8f5e2a 100644 --- a/deps/rabbitmq_event_exchange/test/unit_SUITE.erl +++ b/deps/rabbitmq_event_exchange/test/unit_SUITE.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(unit_SUITE). diff --git a/deps/rabbitmq_federation/.gitignore b/deps/rabbitmq_federation/.gitignore deleted file mode 100644 index f05d110de329..000000000000 --- a/deps/rabbitmq_federation/.gitignore +++ /dev/null @@ -1,18 +0,0 @@ -.sw? -.*.sw? -*.beam -/.erlang.mk/ -/cover/ -/deps/ -/doc/ -/ebin/ -/escript/ -/escript.lock -/logs/ -/plugins/ -/plugins.lock -/sbin/ -/sbin.lock -/xrefr - -rabbitmq_federation.d diff --git a/deps/rabbitmq_federation/BUILD.bazel b/deps/rabbitmq_federation/BUILD.bazel index 1fb2aad791c2..dc29595fef7c 100644 --- a/deps/rabbitmq_federation/BUILD.bazel +++ b/deps/rabbitmq_federation/BUILD.bazel @@ -69,7 +69,7 @@ plt( name = "deps_plt", for_target = ":erlang_app", ignore_warnings = True, - libs = ["//deps/rabbitmq_cli:elixir"], # keep + libs = ["@rules_elixir//elixir"], # keep plt = "//:base_plt", deps = ["//deps/rabbitmq_cli:erlang_app"], # keep ) @@ -89,6 +89,11 @@ eunit( broker_for_integration_suites() +rabbitmq_integration_suite( + name = "definition_import_SUITE", + size = "small", +) + rabbitmq_integration_suite( name = "exchange_SUITE", size = "large", @@ -96,7 +101,7 @@ rabbitmq_integration_suite( "test/rabbit_federation_test_util.beam", ], flaky = True, - shard_count = 2, + shard_count = 3, ) rabbitmq_integration_suite( diff --git a/deps/rabbitmq_federation/Makefile b/deps/rabbitmq_federation/Makefile index 72b356986b9a..1493d8efea5b 100644 --- a/deps/rabbitmq_federation/Makefile +++ b/deps/rabbitmq_federation/Makefile @@ -16,6 +16,8 @@ endef DEPS = rabbit_common rabbit amqp_client TEST_DEPS = rabbitmq_ct_helpers rabbitmq_ct_client_helpers +PLT_APPS += rabbitmqctl + DEP_EARLY_PLUGINS = rabbit_common/mk/rabbitmq-early-plugin.mk DEP_PLUGINS = rabbit_common/mk/rabbitmq-plugin.mk diff --git a/deps/rabbitmq_federation/README.md b/deps/rabbitmq_federation/README.md index efebf43d3aa3..d96c13a02e57 100644 --- a/deps/rabbitmq_federation/README.md +++ b/deps/rabbitmq_federation/README.md @@ -1,7 +1,5 @@ ## RabbitMQ Federation -[![Build Status](https://travis-ci.org/rabbitmq/rabbitmq-federation.svg?branch=master)](https://travis-ci.org/rabbitmq/rabbitmq-federation) - RabbitMQ federation offers a group of features for loosely coupled and WAN-friendly distributed RabbitMQ setups. Note that this is not an alternative to queue mirroring. @@ -22,4 +20,4 @@ See [RabbitMQ federation plugin](https://www.rabbitmq.com/federation.html) on ra Released under [the same license as RabbitMQ](https://www.rabbitmq.com/mpl.html). -2007-2015 (c) 2007-2020 VMware, Inc. or its affiliates. +2007-2015 (c) 2007-2024 Broadcom. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. diff --git a/deps/rabbitmq_federation/app.bzl b/deps/rabbitmq_federation/app.bzl index 2f68ed025bc8..92ec0c82f453 100644 --- a/deps/rabbitmq_federation/app.bzl +++ b/deps/rabbitmq_federation/app.bzl @@ -146,6 +146,15 @@ def all_srcs(name = "all_srcs"): ) def test_suite_beam_files(name = "test_suite_beam_files"): + erlang_bytecode( + name = "definition_import_SUITE_beam_files", + testonly = True, + srcs = ["test/definition_import_SUITE.erl"], + outs = ["test/definition_import_SUITE.beam"], + app_name = "rabbitmq_federation", + erlc_opts = "//:test_erlc_opts", + deps = ["//deps/rabbitmq_ct_helpers:erlang_app"], + ) erlang_bytecode( name = "exchange_SUITE_beam_files", testonly = True, @@ -201,6 +210,7 @@ def test_suite_beam_files(name = "test_suite_beam_files"): hdrs = ["include/rabbit_federation.hrl"], app_name = "rabbitmq_federation", erlc_opts = "//:test_erlc_opts", + visibility = ["//visibility:public"], deps = ["//deps/amqp_client:erlang_app"], ) erlang_bytecode( diff --git a/deps/rabbitmq_federation/include/rabbit_federation.hrl b/deps/rabbitmq_federation/include/rabbit_federation.hrl index 249ec2dd06c0..a66f384cfc67 100644 --- a/deps/rabbitmq_federation/include/rabbit_federation.hrl +++ b/deps/rabbitmq_federation/include/rabbit_federation.hrl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2020-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -record(upstream, {uris, @@ -16,7 +16,7 @@ message_ttl, trust_user_id, ack_mode, - ha_policy, + queue_type, name, bind_nowait, resource_cleanup_mode, @@ -43,6 +43,6 @@ -define(DOWNSTREAM_VHOST_ARG, <<"x-downstream-vhost">>). -define(DEF_PREFETCH, 1000). --define(FEDERATION_GUIDE_URL, <<"https://rabbitmq.com/federation.html">>). +-define(FEDERATION_GUIDE_URL, <<"https://rabbitmq.com/docs/federation/">>). --define(FEDERATION_PG_SCOPE, rabbitmq_federation_pg_scope). \ No newline at end of file +-define(FEDERATION_PG_SCOPE, rabbitmq_federation_pg_scope). diff --git a/deps/rabbitmq_federation/src/Elixir.RabbitMQ.CLI.Ctl.Commands.FederationStatusCommand.erl b/deps/rabbitmq_federation/src/Elixir.RabbitMQ.CLI.Ctl.Commands.FederationStatusCommand.erl index c6370e2d0783..560f11486767 100644 --- a/deps/rabbitmq_federation/src/Elixir.RabbitMQ.CLI.Ctl.Commands.FederationStatusCommand.erl +++ b/deps/rabbitmq_federation/src/Elixir.RabbitMQ.CLI.Ctl.Commands.FederationStatusCommand.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module('Elixir.RabbitMQ.CLI.Ctl.Commands.FederationStatusCommand'). diff --git a/deps/rabbitmq_federation/src/Elixir.RabbitMQ.CLI.Ctl.Commands.RestartFederationLinkCommand.erl b/deps/rabbitmq_federation/src/Elixir.RabbitMQ.CLI.Ctl.Commands.RestartFederationLinkCommand.erl index fc6ae93bca52..bbf4a63c8318 100644 --- a/deps/rabbitmq_federation/src/Elixir.RabbitMQ.CLI.Ctl.Commands.RestartFederationLinkCommand.erl +++ b/deps/rabbitmq_federation/src/Elixir.RabbitMQ.CLI.Ctl.Commands.RestartFederationLinkCommand.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module('Elixir.RabbitMQ.CLI.Ctl.Commands.RestartFederationLinkCommand'). diff --git a/deps/rabbitmq_federation/src/rabbit_federation_app.erl b/deps/rabbitmq_federation/src/rabbit_federation_app.erl index d5c9e500fda6..b09eb88bfdec 100644 --- a/deps/rabbitmq_federation/src/rabbit_federation_app.erl +++ b/deps/rabbitmq_federation/src/rabbit_federation_app.erl @@ -2,13 +2,11 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_federation_app). --include("rabbit_federation.hrl"). - -behaviour(application). -export([start/2, stop/1]). diff --git a/deps/rabbitmq_federation/src/rabbit_federation_db.erl b/deps/rabbitmq_federation/src/rabbit_federation_db.erl index 83d3d851e00c..3e4192f8f0d6 100644 --- a/deps/rabbitmq_federation/src/rabbit_federation_db.erl +++ b/deps/rabbitmq_federation/src/rabbit_federation_db.erl @@ -2,14 +2,12 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_federation_db). -include("rabbit_federation.hrl"). --include_lib("amqp_client/include/amqp_client.hrl"). - -define(DICT, orddict). -export([get_active_suffix/3, set_active_suffix/3, prune_scratch/2]). diff --git a/deps/rabbitmq_federation/src/rabbit_federation_event.erl b/deps/rabbitmq_federation/src/rabbit_federation_event.erl index 943aa8b5a008..ad0a2400f970 100644 --- a/deps/rabbitmq_federation/src/rabbit_federation_event.erl +++ b/deps/rabbitmq_federation/src/rabbit_federation_event.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_federation_event). diff --git a/deps/rabbitmq_federation/src/rabbit_federation_exchange.erl b/deps/rabbitmq_federation/src/rabbit_federation_exchange.erl index 0ff8db37a2a2..a233048dfb86 100644 --- a/deps/rabbitmq_federation/src/rabbit_federation_exchange.erl +++ b/deps/rabbitmq_federation/src/rabbit_federation_exchange.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% %% TODO rename this diff --git a/deps/rabbitmq_federation/src/rabbit_federation_exchange_link.erl b/deps/rabbitmq_federation/src/rabbit_federation_exchange_link.erl index e71c6a95092b..62920712ec38 100644 --- a/deps/rabbitmq_federation/src/rabbit_federation_exchange_link.erl +++ b/deps/rabbitmq_federation/src/rabbit_federation_exchange_link.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_federation_exchange_link). @@ -504,15 +504,16 @@ consume_from_upstream_queue( #upstream{prefetch_count = Prefetch, expires = Expiry, message_ttl = TTL, - ha_policy = HA} = Upstream, + queue_type = QueueType} = Upstream, #upstream_params{x_or_q = X, params = Params} = UParams, Q = upstream_queue_name(name(X), vhost(Params), DownXName), Args = [A || {_K, _T, V} = A <- [{<<"x-expires">>, long, Expiry}, {<<"x-message-ttl">>, long, TTL}, - {<<"x-ha-policy">>, longstr, HA}, - {<<"x-internal-purpose">>, longstr, <<"federation">>}], + {<<"x-internal-purpose">>, longstr, <<"federation">>}, + {<<"x-queue-type">>, longstr, atom_to_binary(QueueType)} + ], V =/= none], amqp_channel:call(Ch, #'queue.declare'{queue = Q, durable = true, diff --git a/deps/rabbitmq_federation/src/rabbit_federation_exchange_link_sup_sup.erl b/deps/rabbitmq_federation/src/rabbit_federation_exchange_link_sup_sup.erl index dd4402e467b9..63011ccd4359 100644 --- a/deps/rabbitmq_federation/src/rabbit_federation_exchange_link_sup_sup.erl +++ b/deps/rabbitmq_federation/src/rabbit_federation_exchange_link_sup_sup.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_federation_exchange_link_sup_sup). @@ -17,6 +17,7 @@ -export([start_link/0, start_child/1, adjust/1, stop_child/1]). -export([init/1]). +-export([id_to_khepri_path/1]). %%---------------------------------------------------------------------------- @@ -75,5 +76,11 @@ init([]) -> {ok, {{one_for_one, 1200, 60}, []}}. %% See comment in rabbit_federation_queue_link_sup_sup:id/1 -id(X = #exchange{policy = Policy}) -> X1 = rabbit_exchange:immutable(X), - X1#exchange{policy = Policy}. +id(X = #exchange{policy = Policy}) -> + X1 = rabbit_exchange:immutable(X), + X2 = X1#exchange{policy = Policy}, + X2. + +id_to_khepri_path( + #exchange{name = #resource{virtual_host = VHost, name = Name}}) -> + [exchange, VHost, Name]. diff --git a/deps/rabbitmq_federation/src/rabbit_federation_link_sup.erl b/deps/rabbitmq_federation/src/rabbit_federation_link_sup.erl index 59af99c8a419..714db65fe81d 100644 --- a/deps/rabbitmq_federation/src/rabbit_federation_link_sup.erl +++ b/deps/rabbitmq_federation/src/rabbit_federation_link_sup.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_federation_link_sup). diff --git a/deps/rabbitmq_federation/src/rabbit_federation_link_util.erl b/deps/rabbitmq_federation/src/rabbit_federation_link_util.erl index fb84ee6bf7fc..59cdbc8b1017 100644 --- a/deps/rabbitmq_federation/src/rabbit_federation_link_util.erl +++ b/deps/rabbitmq_federation/src/rabbit_federation_link_util.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_federation_link_util). @@ -262,6 +262,9 @@ handle_downstream_down(Reason, _Args, State) -> %% If the upstream channel goes down for an intelligible reason, just %% log it and die quietly. +handle_upstream_down(shutdown, {Upstream, UParams, XName}, State) -> + rabbit_federation_link_util:connection_error( + remote, {upstream_channel_down, shutdown}, Upstream, UParams, XName, State); handle_upstream_down({shutdown, Reason}, {Upstream, UParams, XName}, State) -> rabbit_federation_link_util:connection_error( remote, {upstream_channel_down, Reason}, Upstream, UParams, XName, State); diff --git a/deps/rabbitmq_federation/src/rabbit_federation_parameters.erl b/deps/rabbitmq_federation/src/rabbit_federation_parameters.erl index 3ad219c2e441..f2841dee4091 100644 --- a/deps/rabbitmq_federation/src/rabbit_federation_parameters.erl +++ b/deps/rabbitmq_federation/src/rabbit_federation_parameters.erl @@ -2,15 +2,13 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_federation_parameters). -behaviour(rabbit_runtime_parameter). -behaviour(rabbit_policy_validator). --include_lib("rabbit_common/include/rabbit.hrl"). - -export([validate/5, notify/5, notify_clear/4]). -export([register/0, unregister/0, validate_policy/1, adjust/1]). @@ -89,7 +87,8 @@ shared_validation() -> ['no-ack', 'on-publish', 'on-confirm']), optional}, {<<"resource-cleanup-mode">>, rabbit_parameter_validation:enum( ['default', 'never']), optional}, - {<<"ha-policy">>, fun rabbit_parameter_validation:binary/2, optional}, + {<<"queue-type">>, rabbit_parameter_validation:enum( + ['classic', 'quorum']), optional}, {<<"bind-nowait">>, fun rabbit_parameter_validation:boolean/2, optional}, {<<"channel-use-mode">>, rabbit_parameter_validation:enum( ['multiple', 'single']), optional}]. diff --git a/deps/rabbitmq_federation/src/rabbit_federation_pg.erl b/deps/rabbitmq_federation/src/rabbit_federation_pg.erl index 4a272e07b8ae..946c0c90e34e 100644 --- a/deps/rabbitmq_federation/src/rabbit_federation_pg.erl +++ b/deps/rabbitmq_federation/src/rabbit_federation_pg.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_federation_pg). @@ -22,4 +22,4 @@ stop_scope() -> exit(Pid, normal); _ -> ok - end. \ No newline at end of file + end. diff --git a/deps/rabbitmq_federation/src/rabbit_federation_queue.erl b/deps/rabbitmq_federation/src/rabbit_federation_queue.erl index d64d677506ab..e77f5dc81b60 100644 --- a/deps/rabbitmq_federation/src/rabbit_federation_queue.erl +++ b/deps/rabbitmq_federation/src/rabbit_federation_queue.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_federation_queue). @@ -17,7 +17,6 @@ {enables, recovery}]}). -include_lib("rabbit/include/amqqueue.hrl"). --include_lib("amqp_client/include/amqp_client.hrl"). -include("rabbit_federation.hrl"). -behaviour(rabbit_queue_decorator). diff --git a/deps/rabbitmq_federation/src/rabbit_federation_queue_link.erl b/deps/rabbitmq_federation/src/rabbit_federation_queue_link.erl index f4b2290ba885..621d08e33923 100644 --- a/deps/rabbitmq_federation/src/rabbit_federation_queue_link.erl +++ b/deps/rabbitmq_federation/src/rabbit_federation_queue_link.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_federation_queue_link). diff --git a/deps/rabbitmq_federation/src/rabbit_federation_queue_link_sup_sup.erl b/deps/rabbitmq_federation/src/rabbit_federation_queue_link_sup_sup.erl index fc7e8e36d489..0b7f8adba2bc 100644 --- a/deps/rabbitmq_federation/src/rabbit_federation_queue_link_sup_sup.erl +++ b/deps/rabbitmq_federation/src/rabbit_federation_queue_link_sup_sup.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_federation_queue_link_sup_sup). @@ -18,6 +18,7 @@ -export([start_link/0, start_child/1, adjust/1, stop_child/1]). -export([init/1]). +-export([id_to_khepri_path/1]). %%---------------------------------------------------------------------------- @@ -88,4 +89,9 @@ init([]) -> id(Q) when ?is_amqqueue(Q) -> Policy = amqqueue:get_policy(Q), Q1 = amqqueue:set_immutable(Q), - amqqueue:set_policy(Q1, Policy). + Q2 = amqqueue:set_policy(Q1, Policy), + Q2. + +id_to_khepri_path(Id) when ?is_amqqueue(Id) -> + #resource{virtual_host = VHost, name = Name} = amqqueue:get_name(Id), + [queue, VHost, Name]. diff --git a/deps/rabbitmq_federation/src/rabbit_federation_status.erl b/deps/rabbitmq_federation/src/rabbit_federation_status.erl index 67427cf31a3f..3d5dff131ea9 100644 --- a/deps/rabbitmq_federation/src/rabbit_federation_status.erl +++ b/deps/rabbitmq_federation/src/rabbit_federation_status.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_federation_status). @@ -13,7 +13,7 @@ -export([start_link/0]). --export([report/4, remove_exchange_or_queue/1, remove/2, status/0, lookup/1]). +-export([report/4, remove_exchange_or_queue/1, remove/2, status/0, status/1, lookup/1]). -export([init/1, handle_call/3, handle_cast/2, handle_info/2, terminate/2, code_change/3]). @@ -41,7 +41,10 @@ remove(Upstream, XorQName) -> gen_server:call(?SERVER, {remove, Upstream, XorQName}, infinity). status() -> - gen_server:call(?SERVER, status, infinity). + status(infinity). + +status(Timeout) -> + gen_server:call(?SERVER, status, Timeout). lookup(Id) -> gen_server:call(?SERVER, {lookup, Id}, infinity). diff --git a/deps/rabbitmq_federation/src/rabbit_federation_sup.erl b/deps/rabbitmq_federation/src/rabbit_federation_sup.erl index 737640001035..c63fd8511e89 100644 --- a/deps/rabbitmq_federation/src/rabbit_federation_sup.erl +++ b/deps/rabbitmq_federation/src/rabbit_federation_sup.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_federation_sup). diff --git a/deps/rabbitmq_federation/src/rabbit_federation_upstream.erl b/deps/rabbitmq_federation/src/rabbit_federation_upstream.erl index 161264c86a1a..7e303a030856 100644 --- a/deps/rabbitmq_federation/src/rabbit_federation_upstream.erl +++ b/deps/rabbitmq_federation/src/rabbit_federation_upstream.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_federation_upstream). @@ -136,7 +136,7 @@ from_upstream_or_set(US, Name, U, XorQ) -> message_ttl = bget('message-ttl', US, U, none), trust_user_id = bget('trust-user-id', US, U, false), ack_mode = to_atom(bget('ack-mode', US, U, <<"on-confirm">>)), - ha_policy = bget('ha-policy', US, U, none), + queue_type = to_atom(bget('queue-type', US, U, <<"classic">>)), name = Name, bind_nowait = bget('bind-nowait', US, U, false), resource_cleanup_mode = to_atom(bget('resource-cleanup-mode', US, U, <<"default">>)), diff --git a/deps/rabbitmq_federation/src/rabbit_federation_upstream_exchange.erl b/deps/rabbitmq_federation/src/rabbit_federation_upstream_exchange.erl index 17958c06f613..4d183e9e46b7 100644 --- a/deps/rabbitmq_federation/src/rabbit_federation_upstream_exchange.erl +++ b/deps/rabbitmq_federation/src/rabbit_federation_upstream_exchange.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_federation_upstream_exchange). @@ -78,11 +78,11 @@ validate(#exchange{arguments = Args}) -> rabbit_federation_util:validate_arg(?MAX_HOPS_ARG, long, Args). validate_binding(_X, _B) -> ok. -create(_Tx, _X) -> ok. -delete(_Tx, _X) -> ok. +create(_Serial, _X) -> ok. +delete(_Serial, _X) -> ok. policy_changed(_X1, _X2) -> ok. -add_binding(_Tx, _X, _B) -> ok. -remove_bindings(_Tx, _X, _Bs) -> ok. +add_binding(_Serial, _X, _B) -> ok. +remove_bindings(_Serial, _X, _Bs) -> ok. assert_args_equivalence(X = #exchange{name = Name, arguments = Args}, ReqArgs) -> diff --git a/deps/rabbitmq_federation/src/rabbit_federation_util.erl b/deps/rabbitmq_federation/src/rabbit_federation_util.erl index c50dbbb76467..a04dd929f4e4 100644 --- a/deps/rabbitmq_federation/src/rabbit_federation_util.erl +++ b/deps/rabbitmq_federation/src/rabbit_federation_util.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_federation_util). diff --git a/deps/rabbitmq_federation/src/rabbit_log_federation.erl b/deps/rabbitmq_federation/src/rabbit_log_federation.erl index 315f386eda9c..968c2c6e9b0c 100644 --- a/deps/rabbitmq_federation/src/rabbit_log_federation.erl +++ b/deps/rabbitmq_federation/src/rabbit_log_federation.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2021 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% %% @doc Compatibility module for the old Lager-based logging API. diff --git a/deps/rabbitmq_federation/test/definition_import_SUITE.erl b/deps/rabbitmq_federation/test/definition_import_SUITE.erl new file mode 100644 index 000000000000..44f4afe3bb65 --- /dev/null +++ b/deps/rabbitmq_federation/test/definition_import_SUITE.erl @@ -0,0 +1,146 @@ +%% This Source Code Form is subject to the terms of the Mozilla Public +%% License, v. 2.0. If a copy of the MPL was not distributed with this +%% file, You can obtain one at https://mozilla.org/MPL/2.0/. +%% +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. +%% + +-module(definition_import_SUITE). + +-include_lib("rabbitmq_ct_helpers/include/rabbit_assert.hrl"). +-include_lib("common_test/include/ct.hrl"). +-include_lib("eunit/include/eunit.hrl"). + +-compile(export_all). + +all() -> + [ + {group, roundtrip} + ]. + +groups() -> + [ + {roundtrip, [], [ + export_import_round_trip + ]} + ]. + +%% ------------------------------------------------------------------- +%% Test suite setup/teardown. +%% ------------------------------------------------------------------- + +init_per_suite(Config) -> + rabbit_ct_helpers:log_environment(), + inets:start(), + Config. +end_per_suite(Config) -> + Config. + +init_per_group(Group, Config) -> + Config1 = rabbit_ct_helpers:set_config(Config, [ + {rmq_nodename_suffix, Group} + ]), + rabbit_ct_helpers:run_setup_steps(Config1, rabbit_ct_broker_helpers:setup_steps()). + +end_per_group(_, Config) -> + rabbit_ct_helpers:run_teardown_steps(Config, rabbit_ct_broker_helpers:teardown_steps()). + +init_per_testcase(Testcase, Config) -> + rabbit_ct_helpers:testcase_started(Config, Testcase). + +end_per_testcase(Testcase, Config) -> + rabbit_ct_helpers:testcase_finished(Config, Testcase). + +%% +%% Tests +%% + +export_import_round_trip(Config) -> + case rabbit_ct_helpers:is_mixed_versions() of + false -> + import_file_case(Config, "case1"), + Defs = export(Config), + import_raw(Config, rabbit_json:encode(Defs)); + _ -> + %% skip the test in mixed version mode + {skip, "Should not run in mixed version environments"} + end. + +%% +%% Implementation +%% + +import_file_case(Config, CaseName) -> + CasePath = filename:join([ + ?config(data_dir, Config), + CaseName ++ ".json" + ]), + rabbit_ct_broker_helpers:rpc(Config, 0, ?MODULE, run_import_case, [CasePath]), + ok. + + +import_raw(Config, Body) -> + case rabbit_ct_broker_helpers:rpc(Config, 0, rabbit_definitions, import_raw, [Body]) of + ok -> ok; + {error, E} -> + ct:pal("Import of JSON definitions ~tp failed: ~tp~n", [Body, E]), + ct:fail({expected_failure, Body, E}) + end. + +export(Config) -> + rabbit_ct_broker_helpers:rpc(Config, 0, ?MODULE, run_export, []). + +run_export() -> + rabbit_definitions:all_definitions(). + +run_directory_import_case(Path, Expected) -> + ct:pal("Will load definitions from files under ~tp~n", [Path]), + Result = rabbit_definitions:maybe_load_definitions_from(true, Path), + case Expected of + ok -> + ok = Result; + error -> + ?assertMatch({error, {failed_to_import_definitions, _, _}}, Result) + end. + +run_import_case(Path) -> + {ok, Body} = file:read_file(Path), + ct:pal("Successfully loaded a definition to import from ~tp~n", [Path]), + case rabbit_definitions:import_raw(Body) of + ok -> ok; + {error, E} -> + ct:pal("Import case ~tp failed: ~tp~n", [Path, E]), + ct:fail({expected_failure, Path, E}) + end. + +run_invalid_import_case(Path) -> + {ok, Body} = file:read_file(Path), + ct:pal("Successfully loaded a definition file at ~tp~n", [Path]), + case rabbit_definitions:import_raw(Body) of + ok -> + ct:pal("Expected import case ~tp to fail~n", [Path]), + ct:fail({expected_failure, Path}); + {error, _E} -> ok + end. + +run_invalid_import_case_if_unchanged(Path) -> + Mod = rabbit_definitions_import_local_filesystem, + ct:pal("Successfully loaded a definition to import from ~tp~n", [Path]), + case rabbit_definitions:maybe_load_definitions_from_local_filesystem_if_unchanged(Mod, false, Path) of + ok -> + ct:pal("Expected import case ~tp to fail~n", [Path]), + ct:fail({expected_failure, Path}); + {error, _E} -> ok + end. + +queue_lookup(Config, VHost, Name) -> + rabbit_ct_broker_helpers:rpc(Config, 0, rabbit_amqqueue, lookup, [rabbit_misc:r(VHost, queue, Name)]). + +vhost_lookup(Config, VHost) -> + rabbit_ct_broker_helpers:rpc(Config, 0, rabbit_vhost, lookup, [VHost]). + +user_lookup(Config, User) -> + rabbit_ct_broker_helpers:rpc(Config, 0, rabbit_auth_backend_internal, lookup_user, [User]). + +delete_vhost(Config, VHost) -> + rabbit_ct_broker_helpers:rpc(Config, 0, rabbit_vhost, delete, [VHost, <<"CT tests">>]). diff --git a/deps/rabbitmq_federation/test/definition_import_SUITE_data/case1.json b/deps/rabbitmq_federation/test/definition_import_SUITE_data/case1.json new file mode 100644 index 000000000000..e549e4fd6c1d --- /dev/null +++ b/deps/rabbitmq_federation/test/definition_import_SUITE_data/case1.json @@ -0,0 +1,52 @@ +{ + "permissions": [ + { + "configure": ".*", + "read": ".*", + "user": "guest", + "vhost": "/", + "write": ".*" + } + ], + "bindings": [], + "queues": [], + "parameters": [ + { + "component": "federation-upstream-set", + "name": "location-1", + "value": [ + { + "upstream":"up-1" + }, + { + "upstream":"up-2" + } + ], + "vhost":"/"}], + "policies": [], + "rabbitmq_version": "3.13.0+376.g1bc0d89.dirty", + "users": [ + { + "hashing_algorithm": "rabbit_password_hashing_sha256", + "limits": {}, + "name": "guest", + "password_hash": "jTcCKuOmGJeeRQ/K1LG5sdZLcdnEnqv8wcrP2n68R7nMuqy2", + "tags": ["administrator"] + } + ], + "rabbit_version": "3.13.0+376.g1bc0d89.dirty", + "exchanges": [], + "topic_permissions": [], + "vhosts": [ + { + "limits": [], + "metadata": + { + "description": "Default virtual host", + "tags": [] + }, + "name":"/" + } + ], + "global_parameters": [] +} diff --git a/deps/rabbitmq_federation/test/exchange_SUITE.erl b/deps/rabbitmq_federation/test/exchange_SUITE.erl index 3857b020c0b0..bd66a2ad71cf 100644 --- a/deps/rabbitmq_federation/test/exchange_SUITE.erl +++ b/deps/rabbitmq_federation/test/exchange_SUITE.erl @@ -2,12 +2,11 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(exchange_SUITE). --include_lib("common_test/include/ct.hrl"). -include_lib("eunit/include/eunit.hrl"). -include_lib("rabbitmq_ct_helpers/include/rabbit_assert.hrl"). -include_lib("amqp_client/include/amqp_client.hrl"). @@ -30,14 +29,27 @@ all() -> [ {group, essential}, - {group, cluster_size_3} - %% {group, channel_use_mode_single} + {group, cluster_size_3}, + {group, rolling_upgrade} ]. groups() -> - [ - {essential, [], [ + [ + {essential, [], essential()}, + {cluster_size_3, [], [max_hops]}, + {rolling_upgrade, [], [child_id_format]}, + {cycle_protection, [], [ + %% TBD: port from v3.10.x in an Erlang 25-compatible way + ]}, + {channel_use_mod_single, [], [ + %% TBD: port from v3.10.x in an Erlang 25-compatible way + ]} + ]. + +essential() -> + [ single_upstream, + single_upstream_quorum, multiple_upstreams, multiple_upstreams_pattern, single_upstream_multiple_uris, @@ -47,18 +59,10 @@ groups() -> unbind_on_client_unbind, exchange_federation_link_status, lookup_exchange_status - ]}, - {cluster_size_3, [], [ - max_hops - ]}, - {cycle_protection, [], [ - %% TBD: port from v3.10.x in an Erlang 25-compatible way - ]}, - {channel_use_mod_single, [], [ - %% TBD: port from v3.10.x in an Erlang 25-compatible way - ]} - ]. + ]. +suite() -> + [{timetrap, {minutes, 3}}]. %% ------------------------------------------------------------------- %% Setup/teardown. @@ -72,7 +76,7 @@ end_per_suite(Config) -> rabbit_ct_helpers:run_teardown_steps(Config). %% Some of the "regular" tests but in the single channel mode. -init_per_group(channel_use_mode_single, Config) -> +init_per_group(essential, Config) -> SetupFederation = [ fun(Config1) -> rabbit_federation_test_util:setup_federation_with_upstream_params(Config1, [ @@ -83,38 +87,21 @@ init_per_group(channel_use_mode_single, Config) -> Suffix = rabbit_ct_helpers:testcase_absname(Config, "", "-"), Config1 = rabbit_ct_helpers:set_config(Config, [ {rmq_nodename_suffix, Suffix}, - {rmq_nodes_clustered, false} + {rmq_nodes_count, 1} ]), rabbit_ct_helpers:run_steps(Config1, rabbit_ct_broker_helpers:setup_steps() ++ rabbit_ct_client_helpers:setup_steps() ++ SetupFederation); -init_per_group(cycle_detection, Config) -> - Suffix = rabbit_ct_helpers:testcase_absname(Config, "", "-"), - Config1 = rabbit_ct_helpers:set_config(Config, [ - {rmq_nodename_suffix, Suffix}, - {rmq_nodes_clustered, false}, - {rmq_nodes_count, 1} - ]), - rabbit_ct_helpers:run_steps(Config1, - rabbit_ct_broker_helpers:setup_steps() ++ - rabbit_ct_client_helpers:setup_steps()); -init_per_group(without_plugins, Config) -> - rabbit_ct_helpers:set_config(Config, - {broker_with_plugins, [true, false]}); -init_per_group(cluster_size_1 = Group, Config) -> +init_per_group(cluster_size_3 = Group, Config) -> Config1 = rabbit_ct_helpers:set_config(Config, [ - {rmq_nodes_count, 1} + {rmq_nodes_count, 3} ]), init_per_group1(Group, Config1); -init_per_group(cluster_size_2 = Group, Config) -> +init_per_group(rolling_upgrade = Group, Config) -> Config1 = rabbit_ct_helpers:set_config(Config, [ - {rmq_nodes_count, 2} - ]), - init_per_group1(Group, Config1); -init_per_group(cluster_size_3 = Group, Config) -> - Config1 = rabbit_ct_helpers:set_config(Config, [ - {rmq_nodes_count, 3} + {rmq_nodes_count, 5}, + {rmq_nodes_clustered, false} ]), init_per_group1(Group, Config1); init_per_group(Group, Config) -> @@ -131,8 +118,6 @@ init_per_group1(_Group, Config) -> rabbit_ct_broker_helpers:setup_steps() ++ rabbit_ct_client_helpers:setup_steps()). -end_per_group(without_plugins, Config) -> - Config; end_per_group(_, Config) -> rabbit_ct_helpers:run_steps(Config, rabbit_ct_client_helpers:teardown_steps() ++ @@ -178,9 +163,46 @@ single_upstream(Config) -> await_binding(Config, 0, UpX, RK), publish_expect(Ch, UpX, RK, Q, <<"single_upstream payload">>), + Server = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename), + assert_federation_internal_queue_type(Config, Server, rabbit_classic_queue), + rabbit_ct_client_helpers:close_channel(Ch), clean_up_federation_related_bits(Config). +single_upstream_quorum(Config) -> + FedX = <<"single_upstream_quorum.federated">>, + UpX = <<"single_upstream_quorum.upstream.x">>, + rabbit_ct_broker_helpers:set_parameter( + Config, 0, <<"federation-upstream">>, <<"localhost">>, + [ + {<<"uri">>, rabbit_ct_broker_helpers:node_uri(Config, 0)}, + {<<"exchange">>, UpX}, + {<<"queue-type">>, <<"quorum">>} + ]), + rabbit_ct_broker_helpers:set_policy( + Config, 0, + <<"fed.x">>, <<"^single_upstream_quorum.federated">>, <<"exchanges">>, + [ + {<<"federation-upstream">>, <<"localhost">>} + ]), + + Ch = rabbit_ct_client_helpers:open_channel(Config, 0), + + Xs = [ + exchange_declare_method(FedX) + ], + declare_exchanges(Ch, Xs), + + RK = <<"key">>, + Q = declare_and_bind_queue(Ch, FedX, RK), + await_binding(Config, 0, UpX, RK), + publish_expect(Ch, UpX, RK, Q, <<"single_upstream_quorum payload">>), + + Server = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename), + assert_federation_internal_queue_type(Config, Server, rabbit_quorum_queue), + + rabbit_ct_client_helpers:close_channel(Ch), + clean_up_federation_related_bits(Config). multiple_upstreams(Config) -> FedX = <<"multiple_upstreams.federated">>, @@ -562,6 +584,119 @@ lookup_exchange_status(Config) -> [key, uri, status, timestamp, id, supervisor, upstream]), clean_up_federation_related_bits(Config). + +child_id_format(Config) -> + [UpstreamNode, + OldNodeA, + NewNodeB, + OldNodeC, + NewNodeD] = rabbit_ct_broker_helpers:get_node_configs( + Config, nodename), + + %% Create a cluster with the nodes running the old version of RabbitMQ in + %% mixed-version testing. + %% + %% Note: we build this on the assumption that `rabbit_ct_broker_helpers' + %% starts nodes this way: + %% Node 1: the primary copy of RabbitMQ the test is started from + %% Node 2: the secondary umbrella (if any) + %% Node 3: the primary copy + %% Node 4: the secondary umbrella + %% ... + %% + %% Therefore, `UpstreamNode' will use the primary copy, `OldNodeA' the + %% secondary umbrella, `NewNodeB' the primary copy, and so on. + Config1 = rabbit_ct_broker_helpers:cluster_nodes( + Config, [OldNodeA, OldNodeC]), + + %% Prepare the whole federated exchange on that old cluster. + UpstreamName = <<"fed_on_upgrade">>, + rabbit_ct_broker_helpers:set_parameter( + Config1, OldNodeA, <<"federation-upstream">>, UpstreamName, + [ + {<<"uri">>, rabbit_ct_broker_helpers:node_uri(Config1, UpstreamNode)} + ]), + + rabbit_ct_broker_helpers:set_policy( + Config1, OldNodeA, + <<"fed_on_upgrade_policy">>, <<"^fed_">>, <<"all">>, + [ + {<<"federation-upstream-pattern">>, UpstreamName} + ]), + + XName = <<"fed_ex_on_upgrade_cluster">>, + X = exchange_declare_method(XName, <<"direct">>), + {Conn1, Ch1} = rabbit_ct_client_helpers:open_connection_and_channel( + Config1, OldNodeA), + ?assertEqual({'exchange.declare_ok'}, declare_exchange(Ch1, X)), + rabbit_ct_client_helpers:close_channel(Ch1), + rabbit_ct_client_helpers:close_connection(Conn1), + + %% Verify the format of the child ID. In the main branch, the format was + %% temporarily a size-2 tuple with a list as the first element. This was + %% not kept later and the original ID format is used in old and new nodes. + [{Id, _, _, _}] = rabbit_ct_broker_helpers:rpc( + Config1, OldNodeA, + mirrored_supervisor, which_children, + [rabbit_federation_exchange_link_sup_sup]), + case Id of + %% This is the format we expect everywhere. + #exchange{name = #resource{name = XName}} -> + %% Verify that the supervisors exist on all nodes. + lists:foreach( + fun(Node) -> + ?assertMatch( + [{#exchange{name = #resource{name = XName}}, + _, _, _}], + rabbit_ct_broker_helpers:rpc( + Config1, Node, + mirrored_supervisor, which_children, + [rabbit_federation_exchange_link_sup_sup])) + end, [OldNodeA, OldNodeC]), + + %% Simulate a rolling upgrade by: + %% 1. adding new nodes to the old cluster + %% 2. stopping the old nodes + %% + %% After that, the supervisors run on the new code. + Config2 = rabbit_ct_broker_helpers:cluster_nodes( + Config1, [OldNodeA, NewNodeB, NewNodeD]), + ok = rabbit_ct_broker_helpers:stop_broker(Config2, OldNodeA), + ok = rabbit_ct_broker_helpers:reset_node(Config1, OldNodeA), + ok = rabbit_ct_broker_helpers:stop_broker(Config2, OldNodeC), + ok = rabbit_ct_broker_helpers:reset_node(Config2, OldNodeC), + + %% Verify that the supervisors still use the same IDs. + lists:foreach( + fun(Node) -> + ?assertMatch( + [{#exchange{name = #resource{name = XName}}, + _, _, _}], + rabbit_ct_broker_helpers:rpc( + Config2, Node, + mirrored_supervisor, which_children, + [rabbit_federation_exchange_link_sup_sup])) + end, [NewNodeB, NewNodeD]), + + %% Delete the exchange: it should work because the ID format is the + %% one expected. + %% + %% During the transient period where the ID format was changed, + %% this would crash with a badmatch because the running + %% supervisor's ID would not match the content of the database. + {Conn2, Ch2} = rabbit_ct_client_helpers:open_connection_and_channel( + Config2, NewNodeB), + ?assertEqual({'exchange.delete_ok'}, delete_exchange(Ch2, XName)), + rabbit_ct_client_helpers:close_channel(Ch2), + rabbit_ct_client_helpers:close_connection(Conn2); + + %% This is the transient format we are not interested in as it only + %% lived in a development branch. + {List, #exchange{name = #resource{name = XName}}} + when is_list(List) -> + {skip, "Testcase skipped with the transiently changed ID format"} + end. + %% %% Test helpers %% @@ -727,15 +862,15 @@ await_binding(Config, Node, Vhost, X, Key, ExpectedBindingCount) when is_integer await_binding(_Config, _Node, _Vhost, _X, _Key, ExpectedBindingCount, 0) -> {error, rabbit_misc:format("expected ~b bindings but they did not materialize in time", [ExpectedBindingCount])}; await_binding(Config, Node, Vhost, X, Key, ExpectedBindingCount, AttemptsLeft) when is_integer(ExpectedBindingCount) -> - case bound_keys_from(Config, Node, Vhost, X, Key) of - Bs when length(Bs) < ExpectedBindingCount -> - timer:sleep(100), - await_binding(Config, Node, Vhost, X, Key, ExpectedBindingCount, AttemptsLeft - 1); - Bs when length(Bs) =:= ExpectedBindingCount -> - ok; - Bs -> - {error, rabbit_misc:format("expected ~b bindings, got ~b", [ExpectedBindingCount, length(Bs)])} - end. + case bound_keys_from(Config, Node, Vhost, X, Key) of + Bs when length(Bs) < ExpectedBindingCount -> + timer:sleep(1000), + await_binding(Config, Node, Vhost, X, Key, ExpectedBindingCount, AttemptsLeft - 1); + Bs when length(Bs) =:= ExpectedBindingCount -> + ok; + Bs -> + {error, rabbit_misc:format("expected ~b bindings, got ~b", [ExpectedBindingCount, length(Bs)])} + end. await_bindings(Config, Node, X, Keys) -> [await_binding(Config, Node, X, Key) || Key <- Keys]. @@ -771,4 +906,15 @@ await_credentials_obfuscation_seeding_on_two_nodes(Config) -> rabbit_ct_broker_helpers:rpc(Config, 1, credentials_obfuscation, enabled, []) end), - timer:sleep(1000). \ No newline at end of file + timer:sleep(1000). + +assert_federation_internal_queue_type(Config, Server, Expected) -> + Qs = all_queues_on(Config, Server), + FedQs = lists:filter( + fun(Q) -> + lists:member( + {<<"x-internal-purpose">>, longstr, <<"federation">>}, amqqueue:get_arguments(Q)) + end, + Qs), + FedQTypes = lists:map(fun(Q) -> amqqueue:get_type(Q) end, FedQs), + ?assertEqual([Expected], lists:uniq(FedQTypes)). diff --git a/deps/rabbitmq_federation/test/federation_status_command_SUITE.erl b/deps/rabbitmq_federation/test/federation_status_command_SUITE.erl index 990bd159eaf9..229afd494d4d 100644 --- a/deps/rabbitmq_federation/test/federation_status_command_SUITE.erl +++ b/deps/rabbitmq_federation/test/federation_status_command_SUITE.erl @@ -2,12 +2,11 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(federation_status_command_SUITE). --include_lib("common_test/include/ct.hrl"). -include_lib("amqp_client/include/amqp_client.hrl"). -compile(export_all). diff --git a/deps/rabbitmq_federation/test/queue_SUITE.erl b/deps/rabbitmq_federation/test/queue_SUITE.erl index 795f25a7b1db..77afe87a1236 100644 --- a/deps/rabbitmq_federation/test/queue_SUITE.erl +++ b/deps/rabbitmq_federation/test/queue_SUITE.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(queue_SUITE). @@ -32,39 +32,28 @@ all() -> ]. groups() -> - ClusterSize1 = [simple, - multiple_upstreams_pattern, - multiple_downstreams, - message_flow, - dynamic_reconfiguration, - federate_unfederate, - dynamic_plugin_stop_start - ], - ClusterSize2 = [restart_upstream], - [{classic_queue, [], [ - {without_disambiguate, [], [ - {cluster_size_1, [], ClusterSize1} - ]}, - {with_disambiguate, [], [ - {cluster_size_2, [], ClusterSize2} - ]} - ]}, - {quorum_queue, [], [ - {without_disambiguate, [], [ - {cluster_size_1, [], ClusterSize1} - ]}, - {with_disambiguate, [], [ - {cluster_size_2, [], ClusterSize2} - ]} - ]}, - {mixed, [], [ - {without_disambiguate, [], [ - {cluster_size_1, [], ClusterSize1} - ]}, - {with_disambiguate, [], [ - {cluster_size_2, [], ClusterSize2} - ]} - ]} + [ + {classic_queue, [], all_tests()}, + {quorum_queue, [], all_tests()}, + {mixed, [], all_tests()} + ]. + +all_tests() -> + [ + {without_disambiguate, [], [ + {cluster_size_1, [], [ + simple, + multiple_upstreams_pattern, + multiple_downstreams, + message_flow, + dynamic_reconfiguration, + federate_unfederate, + dynamic_plugin_stop_start + ]} + ]}, + {with_disambiguate, [], [ + {cluster_size_2, [], [restart_upstream]} + ]} ]. %% ------------------------------------------------------------------- diff --git a/deps/rabbitmq_federation/test/rabbit_federation_status_SUITE.erl b/deps/rabbitmq_federation/test/rabbit_federation_status_SUITE.erl index bcc842c7c95f..645ebd0fb423 100644 --- a/deps/rabbitmq_federation/test/rabbit_federation_status_SUITE.erl +++ b/deps/rabbitmq_federation/test/rabbit_federation_status_SUITE.erl @@ -2,12 +2,11 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_federation_status_SUITE). --include_lib("common_test/include/ct.hrl"). -include_lib("amqp_client/include/amqp_client.hrl"). -include("rabbit_federation.hrl"). diff --git a/deps/rabbitmq_federation/test/rabbit_federation_test_util.erl b/deps/rabbitmq_federation/test/rabbit_federation_test_util.erl index 29f54863b401..209cbb2b3faa 100644 --- a/deps/rabbitmq_federation/test/rabbit_federation_test_util.erl +++ b/deps/rabbitmq_federation/test/rabbit_federation_test_util.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_federation_test_util). @@ -94,13 +94,15 @@ setup_federation_with_upstream_params(Config, ExtraParams) -> ] ]), - rabbit_ct_broker_helpers:set_policy(Config, 0, - <<"fed">>, <<"^fed\.">>, <<"all">>, [ - {<<"federation-upstream-set">>, <<"upstream">>}]), + rabbit_ct_broker_helpers:rpc( + Config, 0, rabbit_policy, set, + [<<"/">>, <<"fed">>, <<"^fed\.">>, [{<<"federation-upstream-set">>, <<"upstream">>}], + 0, <<"all">>, <<"acting-user">>]), - rabbit_ct_broker_helpers:set_policy(Config, 0, - <<"fed12">>, <<"^fed12\.">>, <<"all">>, [ - {<<"federation-upstream-set">>, <<"upstream12">>}]), + rabbit_ct_broker_helpers:rpc( + Config, 0, rabbit_policy, set, + [<<"/">>, <<"fed12">>, <<"^fed12\.">>, [{<<"federation-upstream-set">>, <<"upstream12">>}], + 2, <<"all">>, <<"acting-user">>]), rabbit_ct_broker_helpers:set_policy(Config, 0, <<"one">>, <<"^two$">>, <<"all">>, [ @@ -191,7 +193,7 @@ expect(Payloads, Timeout) -> false -> ?assert(false, rabbit_misc:format("received an unexpected payload ~tp", [Payload])) end after Timeout -> - ?assert(false, rabbit_misc:format("Did not receive expected payloads ~tp in time", [Payloads])) + ct:fail("Did not receive expected payloads ~tp in time", [Payloads]) end. expect_empty(Ch, Q) -> diff --git a/deps/rabbitmq_federation/test/restart_federation_link_command_SUITE.erl b/deps/rabbitmq_federation/test/restart_federation_link_command_SUITE.erl index c91a20602706..f7c1d14a8def 100644 --- a/deps/rabbitmq_federation/test/restart_federation_link_command_SUITE.erl +++ b/deps/rabbitmq_federation/test/restart_federation_link_command_SUITE.erl @@ -2,12 +2,11 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(restart_federation_link_command_SUITE). --include_lib("common_test/include/ct.hrl"). -include_lib("amqp_client/include/amqp_client.hrl"). -compile(export_all). diff --git a/deps/rabbitmq_federation/test/unit_SUITE.erl b/deps/rabbitmq_federation/test/unit_SUITE.erl index 4f56d45249a2..bd4d83e11c54 100644 --- a/deps/rabbitmq_federation/test/unit_SUITE.erl +++ b/deps/rabbitmq_federation/test/unit_SUITE.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2019-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(unit_SUITE). diff --git a/deps/rabbitmq_federation/test/unit_inbroker_SUITE.erl b/deps/rabbitmq_federation/test/unit_inbroker_SUITE.erl index 50f7c24af44c..dfc3a10086db 100644 --- a/deps/rabbitmq_federation/test/unit_inbroker_SUITE.erl +++ b/deps/rabbitmq_federation/test/unit_inbroker_SUITE.erl @@ -2,12 +2,11 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(unit_inbroker_SUITE). --include_lib("common_test/include/ct.hrl"). -include_lib("rabbit_common/include/rabbit.hrl"). -include_lib("eunit/include/eunit.hrl"). @@ -201,10 +200,12 @@ upstream_validation(_Config) -> ok. with_exchanges(Fun) -> - rabbit_exchange:declare(r(?US_NAME), fanout, false, false, false, [], - <<"acting-user">>), - X = rabbit_exchange:declare(r(?DS_NAME), fanout, false, false, false, [], - <<"acting-user">>), + {ok, _} = rabbit_exchange:declare( + r(?US_NAME), fanout, false, false, false, [], + <<"acting-user">>), + {ok, X} = rabbit_exchange:declare( + r(?DS_NAME), fanout, false, false, false, [], + <<"acting-user">>), Fun(X), %% Delete downstream first or it will recreate the upstream rabbit_exchange:delete(r(?DS_NAME), false, <<"acting-user">>), diff --git a/deps/rabbitmq_federation_management/.gitignore b/deps/rabbitmq_federation_management/.gitignore deleted file mode 100644 index 04f4e80c4fdd..000000000000 --- a/deps/rabbitmq_federation_management/.gitignore +++ /dev/null @@ -1,17 +0,0 @@ -.sw? -.*.sw? -*.beam -/.erlang.mk/ -/cover/ -/deps/ -/doc/ -/ebin/ -/escript/ -/escript.lock -/logs/ -/plugins/ -/plugins.lock -/sbin/ -/sbin.lock - -/rabbitmq_federation_management.d diff --git a/deps/rabbitmq_federation_management/BUILD.bazel b/deps/rabbitmq_federation_management/BUILD.bazel index 54754c11aa94..10d8c0af0e3c 100644 --- a/deps/rabbitmq_federation_management/BUILD.bazel +++ b/deps/rabbitmq_federation_management/BUILD.bazel @@ -67,7 +67,7 @@ plt( name = "deps_plt", for_target = ":erlang_app", ignore_warnings = True, - libs = ["//deps/rabbitmq_cli:elixir"], # keep + libs = ["@rules_elixir//elixir"], # keep plt = "//:base_plt", ) diff --git a/deps/rabbitmq_federation_management/README.md b/deps/rabbitmq_federation_management/README.md index 182da17c5a6a..25eb14b2a027 100644 --- a/deps/rabbitmq_federation_management/README.md +++ b/deps/rabbitmq_federation_management/README.md @@ -39,6 +39,6 @@ and see under the `./plugins` directory. ## Copyright and License -(c) 2007-2020 VMware, Inc. or its affiliates. +(c) 2007-2024 Broadcom. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. See `LICENSE` for license information. diff --git a/deps/rabbitmq_federation_management/priv/www/js/federation.js b/deps/rabbitmq_federation_management/priv/www/js/federation.js index 4ea78c932dbe..e31fbae6a685 100644 --- a/deps/rabbitmq_federation_management/priv/www/js/federation.js +++ b/deps/rabbitmq_federation_management/priv/www/js/federation.js @@ -75,8 +75,8 @@ HELP['federation-expires'] = HELP['federation-ttl'] = 'Time in milliseconds that undelivered messages should be held upstream when there is a network outage or backlog. Leave this blank to mean "forever".'; -HELP['ha-policy'] = - 'Determines the "x-ha-policy" argument for the upstream queue for a federated exchange. Default is "none", meaning the queue is not HA.'; +HELP['queue-type'] = + 'Defines the queue type for the upstream queue for a federated exchange. Default is "classic". Set to "quorum" for high availability.'; HELP['queue'] = 'The name of the upstream queue. Default is to use the same name as the federated queue.'; diff --git a/deps/rabbitmq_federation_management/priv/www/js/tmpl/federation-upstream.ejs b/deps/rabbitmq_federation_management/priv/www/js/tmpl/federation-upstream.ejs index 6fad08dc931b..d6918b79fd2e 100644 --- a/deps/rabbitmq_federation_management/priv/www/js/tmpl/federation-upstream.ejs +++ b/deps/rabbitmq_federation_management/priv/www/js/tmpl/federation-upstream.ejs @@ -56,8 +56,8 @@ - HA Policy - <%= fmt_string(upstream.value['ha-policy']) %> + Queue Type + <%= fmt_string(upstream.value['queue-type']) %> diff --git a/deps/rabbitmq_federation_management/priv/www/js/tmpl/federation-upstreams.ejs b/deps/rabbitmq_federation_management/priv/www/js/tmpl/federation-upstreams.ejs index 838eac1eb3b4..3e5504671ed0 100644 --- a/deps/rabbitmq_federation_management/priv/www/js/tmpl/federation-upstreams.ejs +++ b/deps/rabbitmq_federation_management/priv/www/js/tmpl/federation-upstreams.ejs @@ -19,7 +19,7 @@ Max Hops Expiry Message TTL - HA Policy + Queue Type Queue Consumer tag @@ -43,7 +43,7 @@ <%= upstream.value['max-hops'] %> <%= fmt_time(upstream.value.expires, 'ms') %> <%= fmt_time(upstream.value['message-ttl'], 'ms') %> - <%= fmt_string(upstream.value['ha-policy']) %> + <%= fmt_string(upstream.value['queue-type']) %> <%= fmt_string(upstream.value['queue']) %> <%= fmt_string(upstream.value['consumer-tag']) %> @@ -195,11 +195,11 @@ - + diff --git a/deps/rabbitmq_federation_management/src/rabbit_federation_mgmt.erl b/deps/rabbitmq_federation_management/src/rabbit_federation_mgmt.erl index 91ec316d89f4..22e217e33028 100644 --- a/deps/rabbitmq_federation_management/src/rabbit_federation_mgmt.erl +++ b/deps/rabbitmq_federation_management/src/rabbit_federation_mgmt.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_federation_mgmt). diff --git a/deps/rabbitmq_federation_management/test/federation_mgmt_SUITE.erl b/deps/rabbitmq_federation_management/test/federation_mgmt_SUITE.erl index d829f9ddb4bc..3462b647e55c 100644 --- a/deps/rabbitmq_federation_management/test/federation_mgmt_SUITE.erl +++ b/deps/rabbitmq_federation_management/test/federation_mgmt_SUITE.erl @@ -2,12 +2,11 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(federation_mgmt_SUITE). --include_lib("common_test/include/ct.hrl"). -include_lib("amqp_client/include/amqp_client.hrl"). -include_lib("rabbitmq_ct_helpers/include/rabbit_mgmt_test.hrl"). diff --git a/deps/rabbitmq_federation_prometheus/BUILD.bazel b/deps/rabbitmq_federation_prometheus/BUILD.bazel new file mode 100644 index 000000000000..b6a8c641f149 --- /dev/null +++ b/deps/rabbitmq_federation_prometheus/BUILD.bazel @@ -0,0 +1,117 @@ +load("@rules_erlang//:eunit2.bzl", "eunit") +load("@rules_erlang//:xref2.bzl", "xref") +load("@rules_erlang//:dialyze.bzl", "dialyze", "plt") +load("//:rabbitmq_home.bzl", "rabbitmq_home") +load("//:rabbitmq_run.bzl", "rabbitmq_run") +load( + "//:rabbitmq.bzl", + "BROKER_VERSION_REQUIREMENTS_ANY", + "RABBITMQ_DIALYZER_OPTS", + "assert_suites", + "rabbitmq_app", + "rabbitmq_integration_suite", +) +load( + ":app.bzl", + "all_beam_files", + "all_srcs", + "all_test_beam_files", + "test_suite_beam_files", +) + +APP_NAME = "rabbitmq_federation_prometheus" + +APP_DESCRIPTION = "Prometheus extension for the Federation plugin" + +APP_ENV = """[ +]""" + +all_srcs(name = "all_srcs") + +all_beam_files(name = "all_beam_files") + +all_test_beam_files(name = "all_test_beam_files") + +test_suite_beam_files(name = "test_suite_beam_files") + +# gazelle:erlang_app_extra_app crypto + +# gazelle:erlang_app_dep rabbit +# gazelle:erlang_app_dep rabbitmq_prometheus + +# gazelle:erlang_app_dep_exclude prometheus + +rabbitmq_app( + name = "erlang_app", + srcs = [":all_srcs"], + hdrs = [":public_hdrs"], + app_description = APP_DESCRIPTION, + app_env = APP_ENV, + app_extra_keys = BROKER_VERSION_REQUIREMENTS_ANY, + app_module = "rabbit_federation_prometheus_app", + app_name = APP_NAME, + beam_files = [":beam_files"], + extra_apps = [ + "crypto", + ], + license_files = [":license_files"], + priv = [":priv"], + deps = [ + "//deps/rabbit:erlang_app", + "//deps/rabbitmq_federation:erlang_app", + "//deps/rabbitmq_prometheus:erlang_app", + ], +) + +xref( + name = "xref", + target = ":erlang_app", +) + +plt( + name = "deps_plt", + for_target = ":erlang_app", + ignore_warnings = True, + libs = ["@rules_elixir//elixir"], # keep + plt = "//:base_plt", +) + +dialyze( + name = "dialyze", + dialyzer_opts = RABBITMQ_DIALYZER_OPTS, + plt = ":deps_plt", + target = ":erlang_app", +) + +eunit( + name = "eunit", + target = ":test_erlang_app", +) + +rabbitmq_home( + name = "broker-for-tests-home", + plugins = [ + "//deps/rabbit:erlang_app", + ":erlang_app", + ], +) + +rabbitmq_run( + name = "rabbitmq-for-tests-run", + home = ":broker-for-tests-home", +) + +rabbitmq_integration_suite( + name = "prometheus_rabbitmq_federation_collector_SUITE", + size = "small", + additional_beam = [ + ], +) + +assert_suites() + +alias( + name = "rabbitmq_federation_prometheus", + actual = ":erlang_app", + visibility = ["//visibility:public"], +) diff --git a/deps/rabbitmq_federation_prometheus/CODE_OF_CONDUCT.md b/deps/rabbitmq_federation_prometheus/CODE_OF_CONDUCT.md new file mode 120000 index 000000000000..a3613c99f0b0 --- /dev/null +++ b/deps/rabbitmq_federation_prometheus/CODE_OF_CONDUCT.md @@ -0,0 +1 @@ +../../CODE_OF_CONDUCT.md \ No newline at end of file diff --git a/deps/rabbitmq_federation_prometheus/CONTRIBUTING.md b/deps/rabbitmq_federation_prometheus/CONTRIBUTING.md new file mode 120000 index 000000000000..f939e75f21a8 --- /dev/null +++ b/deps/rabbitmq_federation_prometheus/CONTRIBUTING.md @@ -0,0 +1 @@ +../../CONTRIBUTING.md \ No newline at end of file diff --git a/deps/rabbitmq_federation_prometheus/LICENSE b/deps/rabbitmq_federation_prometheus/LICENSE new file mode 100644 index 000000000000..46e08bb41d0b --- /dev/null +++ b/deps/rabbitmq_federation_prometheus/LICENSE @@ -0,0 +1 @@ +This package is licensed under the MPL 2.0. For the MPL 2.0, please see LICENSE-MPL-RabbitMQ. \ No newline at end of file diff --git a/deps/rabbitmq_federation_prometheus/LICENSE-MPL-RabbitMQ b/deps/rabbitmq_federation_prometheus/LICENSE-MPL-RabbitMQ new file mode 100644 index 000000000000..14e2f777f6c3 --- /dev/null +++ b/deps/rabbitmq_federation_prometheus/LICENSE-MPL-RabbitMQ @@ -0,0 +1,373 @@ +Mozilla Public License Version 2.0 +================================== + +1. Definitions +-------------- + +1.1. "Contributor" + means each individual or legal entity that creates, contributes to + the creation of, or owns Covered Software. + +1.2. "Contributor Version" + means the combination of the Contributions of others (if any) used + by a Contributor and that particular Contributor's Contribution. + +1.3. "Contribution" + means Covered Software of a particular Contributor. + +1.4. "Covered Software" + means Source Code Form to which the initial Contributor has attached + the notice in Exhibit A, the Executable Form of such Source Code + Form, and Modifications of such Source Code Form, in each case + including portions thereof. + +1.5. "Incompatible With Secondary Licenses" + means + + (a) that the initial Contributor has attached the notice described + in Exhibit B to the Covered Software; or + + (b) that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the + terms of a Secondary License. + +1.6. "Executable Form" + means any form of the work other than Source Code Form. + +1.7. "Larger Work" + means a work that combines Covered Software with other material, in + a separate file or files, that is not Covered Software. + +1.8. "License" + means this document. + +1.9. "Licensable" + means having the right to grant, to the maximum extent possible, + whether at the time of the initial grant or subsequently, any and + all of the rights conveyed by this License. + +1.10. "Modifications" + means any of the following: + + (a) any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered + Software; or + + (b) any new file in Source Code Form that contains any Covered + Software. + +1.11. "Patent Claims" of a Contributor + means any patent claim(s), including without limitation, method, + process, and apparatus claims, in any patent Licensable by such + Contributor that would be infringed, but for the grant of the + License, by the making, using, selling, offering for sale, having + made, import, or transfer of either its Contributions or its + Contributor Version. + +1.12. "Secondary License" + means either the GNU General Public License, Version 2.0, the GNU + Lesser General Public License, Version 2.1, the GNU Affero General + Public License, Version 3.0, or any later versions of those + licenses. + +1.13. "Source Code Form" + means the form of the work preferred for making modifications. + +1.14. "You" (or "Your") + means an individual or a legal entity exercising rights under this + License. For legal entities, "You" includes any entity that + controls, is controlled by, or is under common control with You. For + purposes of this definition, "control" means (a) the power, direct + or indirect, to cause the direction or management of such entity, + whether by contract or otherwise, or (b) ownership of more than + fifty percent (50%) of the outstanding shares or beneficial + ownership of such entity. + +2. License Grants and Conditions +-------------------------------- + +2.1. Grants + +Each Contributor hereby grants You a world-wide, royalty-free, +non-exclusive license: + +(a) under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or + as part of a Larger Work; and + +(b) under Patent Claims of such Contributor to make, use, sell, offer + for sale, have made, import, and otherwise transfer either its + Contributions or its Contributor Version. + +2.2. Effective Date + +The licenses granted in Section 2.1 with respect to any Contribution +become effective for each Contribution on the date the Contributor first +distributes such Contribution. + +2.3. Limitations on Grant Scope + +The licenses granted in this Section 2 are the only rights granted under +this License. No additional rights or licenses will be implied from the +distribution or licensing of Covered Software under this License. +Notwithstanding Section 2.1(b) above, no patent license is granted by a +Contributor: + +(a) for any code that a Contributor has removed from Covered Software; + or + +(b) for infringements caused by: (i) Your and any other third party's + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + +(c) under Patent Claims infringed by Covered Software in the absence of + its Contributions. + +This License does not grant any rights in the trademarks, service marks, +or logos of any Contributor (except as may be necessary to comply with +the notice requirements in Section 3.4). + +2.4. Subsequent Licenses + +No Contributor makes additional grants as a result of Your choice to +distribute the Covered Software under a subsequent version of this +License (see Section 10.2) or under the terms of a Secondary License (if +permitted under the terms of Section 3.3). + +2.5. Representation + +Each Contributor represents that the Contributor believes its +Contributions are its original creation(s) or it has sufficient rights +to grant the rights to its Contributions conveyed by this License. + +2.6. Fair Use + +This License is not intended to limit any rights You have under +applicable copyright doctrines of fair use, fair dealing, or other +equivalents. + +2.7. Conditions + +Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted +in Section 2.1. + +3. Responsibilities +------------------- + +3.1. Distribution of Source Form + +All distribution of Covered Software in Source Code Form, including any +Modifications that You create or to which You contribute, must be under +the terms of this License. You must inform recipients that the Source +Code Form of the Covered Software is governed by the terms of this +License, and how they can obtain a copy of this License. You may not +attempt to alter or restrict the recipients' rights in the Source Code +Form. + +3.2. Distribution of Executable Form + +If You distribute Covered Software in Executable Form then: + +(a) such Covered Software must also be made available in Source Code + Form, as described in Section 3.1, and You must inform recipients of + the Executable Form how they can obtain a copy of such Source Code + Form by reasonable means in a timely manner, at a charge no more + than the cost of distribution to the recipient; and + +(b) You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter + the recipients' rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + +You may create and distribute a Larger Work under terms of Your choice, +provided that You also comply with the requirements of this License for +the Covered Software. If the Larger Work is a combination of Covered +Software with a work governed by one or more Secondary Licenses, and the +Covered Software is not Incompatible With Secondary Licenses, this +License permits You to additionally distribute such Covered Software +under the terms of such Secondary License(s), so that the recipient of +the Larger Work may, at their option, further distribute the Covered +Software under the terms of either this License or such Secondary +License(s). + +3.4. Notices + +You may not remove or alter the substance of any license notices +(including copyright notices, patent notices, disclaimers of warranty, +or limitations of liability) contained within the Source Code Form of +the Covered Software, except that You may alter any license notices to +the extent required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + +You may choose to offer, and to charge a fee for, warranty, support, +indemnity or liability obligations to one or more recipients of Covered +Software. However, You may do so only on Your own behalf, and not on +behalf of any Contributor. You must make it absolutely clear that any +such warranty, support, indemnity, or liability obligation is offered by +You alone, and You hereby agree to indemnify every Contributor for any +liability incurred by such Contributor as a result of warranty, support, +indemnity or liability terms You offer. You may include additional +disclaimers of warranty and limitations of liability specific to any +jurisdiction. + +4. Inability to Comply Due to Statute or Regulation +--------------------------------------------------- + +If it is impossible for You to comply with any of the terms of this +License with respect to some or all of the Covered Software due to +statute, judicial order, or regulation then You must: (a) comply with +the terms of this License to the maximum extent possible; and (b) +describe the limitations and the code they affect. Such description must +be placed in a text file included with all distributions of the Covered +Software under this License. Except to the extent prohibited by statute +or regulation, such description must be sufficiently detailed for a +recipient of ordinary skill to be able to understand it. + +5. Termination +-------------- + +5.1. The rights granted under this License will terminate automatically +if You fail to comply with any of its terms. However, if You become +compliant, then the rights granted under this License from a particular +Contributor are reinstated (a) provisionally, unless and until such +Contributor explicitly and finally terminates Your grants, and (b) on an +ongoing basis, if such Contributor fails to notify You of the +non-compliance by some reasonable means prior to 60 days after You have +come back into compliance. Moreover, Your grants from a particular +Contributor are reinstated on an ongoing basis if such Contributor +notifies You of the non-compliance by some reasonable means, this is the +first time You have received notice of non-compliance with this License +from such Contributor, and You become compliant prior to 30 days after +Your receipt of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent +infringement claim (excluding declaratory judgment actions, +counter-claims, and cross-claims) alleging that a Contributor Version +directly or indirectly infringes any patent, then the rights granted to +You by any and all Contributors for the Covered Software under Section +2.1 of this License shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all +end user license agreements (excluding distributors and resellers) which +have been validly granted by You or Your distributors under this License +prior to termination shall survive termination. + +************************************************************************ +* * +* 6. Disclaimer of Warranty * +* ------------------------- * +* * +* Covered Software is provided under this License on an "as is" * +* basis, without warranty of any kind, either expressed, implied, or * +* statutory, including, without limitation, warranties that the * +* Covered Software is free of defects, merchantable, fit for a * +* particular purpose or non-infringing. The entire risk as to the * +* quality and performance of the Covered Software is with You. * +* Should any Covered Software prove defective in any respect, You * +* (not any Contributor) assume the cost of any necessary servicing, * +* repair, or correction. This disclaimer of warranty constitutes an * +* essential part of this License. No use of any Covered Software is * +* authorized under this License except under this disclaimer. * +* * +************************************************************************ + +************************************************************************ +* * +* 7. Limitation of Liability * +* -------------------------- * +* * +* Under no circumstances and under no legal theory, whether tort * +* (including negligence), contract, or otherwise, shall any * +* Contributor, or anyone who distributes Covered Software as * +* permitted above, be liable to You for any direct, indirect, * +* special, incidental, or consequential damages of any character * +* including, without limitation, damages for lost profits, loss of * +* goodwill, work stoppage, computer failure or malfunction, or any * +* and all other commercial damages or losses, even if such party * +* shall have been informed of the possibility of such damages. This * +* limitation of liability shall not apply to liability for death or * +* personal injury resulting from such party's negligence to the * +* extent applicable law prohibits such limitation. Some * +* jurisdictions do not allow the exclusion or limitation of * +* incidental or consequential damages, so this exclusion and * +* limitation may not apply to You. * +* * +************************************************************************ + +8. Litigation +------------- + +Any litigation relating to this License may be brought only in the +courts of a jurisdiction where the defendant maintains its principal +place of business and such litigation shall be governed by laws of that +jurisdiction, without reference to its conflict-of-law provisions. +Nothing in this Section shall prevent a party's ability to bring +cross-claims or counter-claims. + +9. Miscellaneous +---------------- + +This License represents the complete agreement concerning the subject +matter hereof. If any provision of this License is held to be +unenforceable, such provision shall be reformed only to the extent +necessary to make it enforceable. Any law or regulation which provides +that the language of a contract shall be construed against the drafter +shall not be used to construe this License against a Contributor. + +10. Versions of the License +--------------------------- + +10.1. New Versions + +Mozilla Foundation is the license steward. Except as provided in Section +10.3, no one other than the license steward has the right to modify or +publish new versions of this License. Each version will be given a +distinguishing version number. + +10.2. Effect of New Versions + +You may distribute the Covered Software under the terms of the version +of the License under which You originally received the Covered Software, +or under the terms of any subsequent version published by the license +steward. + +10.3. Modified Versions + +If you create software not governed by this License, and you want to +create a new license for such software, you may create and use a +modified version of this License if you rename the license and remove +any references to the name of the license steward (except to note that +such modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary +Licenses + +If You choose to distribute Source Code Form that is Incompatible With +Secondary Licenses under the terms of this version of the License, the +notice described in Exhibit B of this License must be attached. + +Exhibit A - Source Code Form License Notice +------------------------------------------- + + This Source Code Form is subject to the terms of the Mozilla Public + License, v. 2.0. If a copy of the MPL was not distributed with this + file, You can obtain one at http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular +file, then You may include the notice in a location (such as a LICENSE +file in a relevant directory) where a recipient would be likely to look +for such a notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - "Incompatible With Secondary Licenses" Notice +--------------------------------------------------------- + + This Source Code Form is "Incompatible With Secondary Licenses", as + defined by the Mozilla Public License, v. 2.0. diff --git a/deps/rabbitmq_federation_prometheus/Makefile b/deps/rabbitmq_federation_prometheus/Makefile new file mode 100644 index 000000000000..3d069be8ed41 --- /dev/null +++ b/deps/rabbitmq_federation_prometheus/Makefile @@ -0,0 +1,16 @@ +PROJECT = rabbitmq_federation_prometheus +PROJECT_DESCRIPTION = Exposes rabbitmq_federation metrics to Prometheus +PROJECT_MOD = rabbit_federation_prometheus_app + +define PROJECT_APP_EXTRA_KEYS + {broker_version_requirements, []} +endef + +DEPS = rabbit_common rabbit rabbitmq_federation rabbitmq_prometheus +TEST_DEPS = rabbitmq_ct_helpers rabbitmq_ct_client_helpers eunit_formatters + +DEP_EARLY_PLUGINS = rabbit_common/mk/rabbitmq-early-plugin.mk +DEP_PLUGINS = rabbit_common/mk/rabbitmq-plugin.mk + +include ../../rabbitmq-components.mk +include ../../erlang.mk diff --git a/deps/rabbitmq_federation_prometheus/README.md b/deps/rabbitmq_federation_prometheus/README.md new file mode 100644 index 000000000000..2651c440499b --- /dev/null +++ b/deps/rabbitmq_federation_prometheus/README.md @@ -0,0 +1,16 @@ +# RabbitMQ Federation Prometheus + +This plugin adds Federation metrics to prometheus + +## Installation + +This plugin ships with RabbitMQ. Like all other plugins, it must be enabled +before it can be used: + +```bash +[sudo] rabbitmq-plugins enable rabbitmq_federation_prometheus +``` + +## License + +See [LICENSE](./LICENSE). diff --git a/deps/rabbitmq_federation_prometheus/app.bzl b/deps/rabbitmq_federation_prometheus/app.bzl new file mode 100644 index 000000000000..405196d21119 --- /dev/null +++ b/deps/rabbitmq_federation_prometheus/app.bzl @@ -0,0 +1,89 @@ +load("@rules_erlang//:erlang_bytecode2.bzl", "erlang_bytecode") +load("@rules_erlang//:filegroup.bzl", "filegroup") + +def all_beam_files(name = "all_beam_files"): + filegroup( + name = "beam_files", + srcs = [":other_beam"], + ) + erlang_bytecode( + name = "other_beam", + srcs = [ + "src/rabbit_federation_prometheus_app.erl", + "src/rabbit_federation_prometheus_collector.erl", + "src/rabbit_federation_prometheus_sup.erl", + ], + hdrs = [":public_and_private_hdrs"], + app_name = "rabbitmq_federation_prometheus", + dest = "ebin", + erlc_opts = "//:erlc_opts", + deps = ["@prometheus//:erlang_app"], + ) + +def all_srcs(name = "all_srcs"): + filegroup( + name = "all_srcs", + srcs = [":public_and_private_hdrs", ":srcs"], + ) + filegroup( + name = "public_and_private_hdrs", + srcs = [":private_hdrs", ":public_hdrs"], + ) + + filegroup( + name = "priv", + ) + + filegroup( + name = "srcs", + srcs = [ + "src/rabbit_federation_prometheus_app.erl", + "src/rabbit_federation_prometheus_collector.erl", + "src/rabbit_federation_prometheus_sup.erl", + ], + ) + filegroup( + name = "private_hdrs", + ) + filegroup( + name = "public_hdrs", + ) + filegroup( + name = "license_files", + srcs = [ + "LICENSE", + "LICENSE-MPL-RabbitMQ", + ], + ) + +def all_test_beam_files(name = "all_test_beam_files"): + filegroup( + name = "test_beam_files", + testonly = True, + srcs = [":test_other_beam"], + ) + erlang_bytecode( + name = "test_other_beam", + testonly = True, + srcs = [ + "src/rabbit_federation_prometheus_app.erl", + "src/rabbit_federation_prometheus_collector.erl", + "src/rabbit_federation_prometheus_sup.erl", + ], + hdrs = [":public_and_private_hdrs"], + app_name = "rabbitmq_federation_prometheus", + dest = "test", + erlc_opts = "//:test_erlc_opts", + deps = ["@prometheus//:erlang_app"], + ) + +def test_suite_beam_files(name = "test_suite_beam_files"): + erlang_bytecode( + name = "prometheus_rabbitmq_federation_collector_SUITE_beam_files", + testonly = True, + srcs = ["test/prometheus_rabbitmq_federation_collector_SUITE.erl"], + outs = ["test/prometheus_rabbitmq_federation_collector_SUITE.beam"], + app_name = "rabbitmq_federation_prometheus", + erlc_opts = "//:test_erlc_opts", + deps = ["//deps/amqp_client:erlang_app", "@prometheus//:erlang_app"], + ) diff --git a/deps/rabbitmq_federation_prometheus/src/rabbit_federation_prometheus_app.erl b/deps/rabbitmq_federation_prometheus/src/rabbit_federation_prometheus_app.erl new file mode 100644 index 000000000000..fda59b4620e8 --- /dev/null +++ b/deps/rabbitmq_federation_prometheus/src/rabbit_federation_prometheus_app.erl @@ -0,0 +1,27 @@ +%% This Source Code Form is subject to the terms of the Mozilla Public +%% License, v. 2.0. If a copy of the MPL was not distributed with this +%% file, You can obtain one at https://mozilla.org/MPL/2.0/. +%% +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. +%% +-module(rabbit_federation_prometheus_app). + +-behavior(application). + +-export([start/0, stop/0, start/2, stop/1]). + +start(normal, []) -> + {ok, _} = application:ensure_all_started(prometheus), + _ = rabbit_federation_prometheus_collector:start(), + rabbit_federation_prometheus_sup:start_link(). + +stop(_State) -> + _ = rabbit_federation_prometheus_collector:stop(), + ok. + + +start() -> + _ = rabbit_federation_prometheus_collector:start(). + +stop() -> ok. + diff --git a/deps/rabbitmq_federation_prometheus/src/rabbit_federation_prometheus_collector.erl b/deps/rabbitmq_federation_prometheus/src/rabbit_federation_prometheus_collector.erl new file mode 100644 index 000000000000..12db4594ddac --- /dev/null +++ b/deps/rabbitmq_federation_prometheus/src/rabbit_federation_prometheus_collector.erl @@ -0,0 +1,51 @@ +%% This Source Code Form is subject to the terms of the Mozilla Public +%% License, v. 2.0. If a copy of the MPL was not distributed with this +%% file, You can obtain one at https://mozilla.org/MPL/2.0/. +%% +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. +%% +-module(rabbit_federation_prometheus_collector). + +-behaviour(prometheus_collector). + +-export([start/0, stop/0]). +-export([deregister_cleanup/1, + collect_mf/2]). + +-import(prometheus_model_helpers, [create_mf/4]). + +%%==================================================================== +%% Collector API +%%==================================================================== + +start() -> + {ok, _} = application:ensure_all_started(prometheus), + prometheus_registry:register_collector(?MODULE). + +stop() -> + prometheus_registry:deregister_collector(?MODULE). + +%%==================================================================== +%% Collector API +%%==================================================================== + +deregister_cleanup(_) -> ok. + +collect_mf(_Registry, Callback) -> + Status = rabbit_federation_status:status(500), + StatusGroups = lists:foldl(fun(S, Acc) -> + %% note Init value set to 1 because if status seen first time + %% update with will take Init and put into Acc, wuthout calling fun + maps:update_with(proplists:get_value(status, S), fun(C) -> C + 1 end, 1, Acc) + end, #{}, Status), + Metrics = [{rabbitmq_federation_links, gauge, "Number of federation links", + [{[{status, S}], C} || {S, C} <- maps:to_list(StatusGroups)]}], + _ = [add_metric_family(Metric, Callback) || Metric <- Metrics], + ok. + +add_metric_family({Name, Type, Help, Metrics}, Callback) -> + Callback(create_mf(Name, Help, Type, Metrics)). + +%%==================================================================== +%% Private Parts +%%==================================================================== diff --git a/deps/rabbitmq_federation_prometheus/src/rabbit_federation_prometheus_sup.erl b/deps/rabbitmq_federation_prometheus/src/rabbit_federation_prometheus_sup.erl new file mode 100644 index 000000000000..e9106c29b31f --- /dev/null +++ b/deps/rabbitmq_federation_prometheus/src/rabbit_federation_prometheus_sup.erl @@ -0,0 +1,20 @@ +%% This Source Code Form is subject to the terms of the Mozilla Public +%% License, v. 2.0. If a copy of the MPL was not distributed with this +%% file, You can obtain one at https://mozilla.org/MPL/2.0/. +%% +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. +%% +-module(rabbit_federation_prometheus_sup). + +-behaviour(supervisor). + +-export([start_link/0]). +-export([init/1]). + +start_link() -> + supervisor:start_link(?MODULE, []). + +init(_Args) -> + SupFlags = #{strategy => one_for_one, intensity => 1, period => 5}, + ChildSpecs = [], + {ok, {SupFlags, ChildSpecs}}. diff --git a/deps/rabbitmq_federation_prometheus/test/prometheus_rabbitmq_federation_collector_SUITE.erl b/deps/rabbitmq_federation_prometheus/test/prometheus_rabbitmq_federation_collector_SUITE.erl new file mode 100644 index 000000000000..5a15a0ffb4d9 --- /dev/null +++ b/deps/rabbitmq_federation_prometheus/test/prometheus_rabbitmq_federation_collector_SUITE.erl @@ -0,0 +1,303 @@ +%% This Source Code Form is subject to the terms of the Mozilla Public +%% License, v. 2.0. If a copy of the MPL was not distributed with this +%% file, You can obtain one at https://mozilla.org/MPL/2.0/. +%% +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. +%% + +-module(prometheus_rabbitmq_federation_collector_SUITE). + +-include_lib("eunit/include/eunit.hrl"). +-include_lib("amqp_client/include/amqp_client.hrl"). +-include_lib("prometheus/include/prometheus_model.hrl"). + +-compile(export_all). + +-define(ONE_RUNNING_METRIC, #'MetricFamily'{name = <<"rabbitmq_federation_links">>, + help = "Number of federation links", + type = 'GAUGE', + metric = [#'Metric'{label = [#'LabelPair'{name = <<"status">>, + value = <<"running">>}], + gauge = #'Gauge'{value = 1}}]}). + +-define(TWO_RUNNING_METRIC, #'MetricFamily'{name = <<"rabbitmq_federation_links">>, + help = "Number of federation links", + type = 'GAUGE', + metric = [#'Metric'{label = [#'LabelPair'{name = <<"status">>, + value = <<"running">>}], + gauge = #'Gauge'{value = 2}}]}). + +-define(ONE_RUNNING_ONE_STARTING_METRIC, #'MetricFamily'{name = <<"rabbitmq_federation_links">>, + help = "Number of federation links", + type = 'GAUGE', + metric = [#'Metric'{label = [#'LabelPair'{name = <<"status">>, + value = <<"running">>}], + gauge = #'Gauge'{value = 1}}, + #'Metric'{label = [#'LabelPair'{name = <<"status">>, + value = <<"starting">>}], + gauge = #'Gauge'{value = 1}}]}). + + +all() -> + [ + {group, non_parallel_tests} + ]. + +groups() -> + [ + {non_parallel_tests, [], [ + single_link_then_second_added, + two_links_from_the_start + ]} + ]. + +suite() -> + [{timetrap, {minutes, 5}}]. + +%% ------------------------------------------------------------------- +%% Testsuite setup/teardown. +%% ------------------------------------------------------------------- +init_per_suite(Config) -> + rabbit_ct_helpers:log_environment(), + Config1 = rabbit_ct_helpers:set_config(Config, [ + {rmq_nodename_suffix, ?MODULE} + ]), + rabbit_ct_helpers:run_setup_steps(Config1, + rabbit_ct_broker_helpers:setup_steps() ++ + rabbit_ct_client_helpers:setup_steps() ++ + [fun setup_federation/1]). +end_per_suite(Config) -> + rabbit_ct_helpers:run_teardown_steps(Config, + rabbit_ct_client_helpers:teardown_steps() ++ + rabbit_ct_broker_helpers:teardown_steps()). + +init_per_group(_, Config) -> + Config. + +end_per_group(_, Config) -> + Config. + +init_per_testcase(Testcase, Config) -> + rabbit_ct_helpers:testcase_started(Config, Testcase). + +end_per_testcase(Testcase, Config) -> + rabbit_ct_helpers:testcase_finished(Config, Testcase). + +%% ------------------------------------------------------------------- +%% Test cases +%% ------------------------------------------------------------------- + +single_link_then_second_added(Config) -> + with_ch( + Config, + fun (Ch) -> + timer:sleep(3000), + [_L1] = rabbit_ct_broker_helpers:rpc(Config, 0, + rabbit_federation_status, status, []), + MFs = get_metrics(Config), + [?ONE_RUNNING_METRIC] = MFs, + maybe_declare_queue(Config, Ch, q(<<"fed.downstream2">>, [{<<"x-queue-type">>, longstr, <<"classic">>}])), + %% here we race against queue.declare... most of the times there is going to be + %% new status=starting metric. In this case we wait a bit more for running=2. + %% But running=2 is also possible first time if rpc for some reason is slow. + %% And of course simple running=1 possible too if queue.declare is really slow + MFs1 = get_metrics(Config), + case MFs1 of + [?TWO_RUNNING_METRIC] -> ok; + [?ONE_RUNNING_METRIC] -> + rabbit_ct_helpers:eventually(?_assertEqual([?TWO_RUNNING_METRIC], + get_metrics(Config)), + 500, + 5); + [?ONE_RUNNING_ONE_STARTING_METRIC] -> + rabbit_ct_helpers:eventually(?_assertEqual([?TWO_RUNNING_METRIC], + get_metrics(Config)), + 500, + 5) + + end, + + delete_all(Ch, [q(<<"fed.downstream2">>, [{<<"x-queue-type">>, longstr, <<"classic">>}])]) + end, upstream_downstream()). + +two_links_from_the_start(Config) -> + with_ch( + Config, + fun (_Ch) -> + timer:sleep(3000), + [_L1 | _L2] = rabbit_ct_broker_helpers:rpc(Config, 0, + rabbit_federation_status, status, []), + MFs = get_metrics(Config), + [?TWO_RUNNING_METRIC] = MFs + + end, upstream_downstream() ++ [q(<<"fed.downstream2">>, [{<<"x-queue-type">>, longstr, <<"classic">>}])]). + +%% ------------------------------------------------------------------- +%% +%% ------------------------------------------------------------------- + +upstream_downstream() -> + [q(<<"upstream">>, undefined), q(<<"fed.downstream">>, undefined)]. + +get_metrics(Config) -> + rabbit_ct_broker_helpers:rpc(Config, 0, + ?MODULE, collect_mf, + [default, rabbit_federation_prometheus_collector]). + + + + +setup_federation(Config) -> + setup_federation_with_upstream_params(Config, []). + +setup_federation_with_upstream_params(Config, ExtraParams) -> + rabbit_ct_broker_helpers:set_parameter(Config, 0, + <<"federation-upstream">>, <<"localhost">>, [ + {<<"uri">>, rabbit_ct_broker_helpers:node_uri(Config, 0)}, + {<<"consumer-tag">>, <<"fed.tag">>} + ] ++ ExtraParams), + + rabbit_ct_broker_helpers:set_parameter(Config, 0, + <<"federation-upstream">>, <<"local5673">>, [ + {<<"uri">>, <<"amqp://localhost:1">>} + ] ++ ExtraParams), + + rabbit_ct_broker_helpers:set_parameter(Config, 0, + <<"federation-upstream-set">>, <<"upstream">>, [ + [ + {<<"upstream">>, <<"localhost">>}, + {<<"exchange">>, <<"upstream">>}, + {<<"queue">>, <<"upstream">>} + ] + ]), + + rabbit_ct_broker_helpers:set_parameter(Config, 0, + <<"federation-upstream-set">>, <<"upstream2">>, [ + [ + {<<"upstream">>, <<"localhost">>}, + {<<"exchange">>, <<"upstream2">>}, + {<<"queue">>, <<"upstream2">>} + ] + ]), + + rabbit_ct_broker_helpers:set_parameter(Config, 0, + <<"federation-upstream-set">>, <<"localhost">>, [ + [{<<"upstream">>, <<"localhost">>}] + ]), + + rabbit_ct_broker_helpers:set_parameter(Config, 0, + <<"federation-upstream-set">>, <<"upstream12">>, [ + [ + {<<"upstream">>, <<"localhost">>}, + {<<"exchange">>, <<"upstream">>}, + {<<"queue">>, <<"upstream">>} + ], [ + {<<"upstream">>, <<"localhost">>}, + {<<"exchange">>, <<"upstream2">>}, + {<<"queue">>, <<"upstream2">>} + ] + ]), + + rabbit_ct_broker_helpers:set_parameter(Config, 0, + <<"federation-upstream-set">>, <<"one">>, [ + [ + {<<"upstream">>, <<"localhost">>}, + {<<"exchange">>, <<"one">>}, + {<<"queue">>, <<"one">>} + ] + ]), + + rabbit_ct_broker_helpers:set_parameter(Config, 0, + <<"federation-upstream-set">>, <<"two">>, [ + [ + {<<"upstream">>, <<"localhost">>}, + {<<"exchange">>, <<"two">>}, + {<<"queue">>, <<"two">>} + ] + ]), + + rabbit_ct_broker_helpers:set_parameter(Config, 0, + <<"federation-upstream-set">>, <<"upstream5673">>, [ + [ + {<<"upstream">>, <<"local5673">>}, + {<<"exchange">>, <<"upstream">>} + ] + ]), + + rabbit_ct_broker_helpers:rpc( + Config, 0, rabbit_policy, set, + [<<"/">>, <<"fed">>, <<"^fed\.">>, [{<<"federation-upstream-set">>, <<"upstream">>}], + 0, <<"all">>, <<"acting-user">>]), + + rabbit_ct_broker_helpers:rpc( + Config, 0, rabbit_policy, set, + [<<"/">>, <<"fed12">>, <<"^fed12\.">>, [{<<"federation-upstream-set">>, <<"upstream12">>}], + 2, <<"all">>, <<"acting-user">>]), + + rabbit_ct_broker_helpers:set_policy(Config, 0, + <<"one">>, <<"^two$">>, <<"all">>, [ + {<<"federation-upstream-set">>, <<"one">>}]), + + rabbit_ct_broker_helpers:set_policy(Config, 0, + <<"two">>, <<"^one$">>, <<"all">>, [ + {<<"federation-upstream-set">>, <<"two">>}]), + + rabbit_ct_broker_helpers:set_policy(Config, 0, + <<"hare">>, <<"^hare\.">>, <<"all">>, [ + {<<"federation-upstream-set">>, <<"upstream5673">>}]), + + rabbit_ct_broker_helpers:set_policy(Config, 0, + <<"all">>, <<"^all\.">>, <<"all">>, [ + {<<"federation-upstream-set">>, <<"all">>}]), + + rabbit_ct_broker_helpers:set_policy(Config, 0, + <<"new">>, <<"^new\.">>, <<"all">>, [ + {<<"federation-upstream-set">>, <<"new-set">>}]), + Config. + +with_ch(Config, Fun, Methods) -> + Ch = rabbit_ct_client_helpers:open_channel(Config), + declare_all(Config, Ch, Methods), + %% Clean up queues even after test failure. + try + Fun(Ch) + after + delete_all(Ch, Methods), + rabbit_ct_client_helpers:close_channel(Ch) + end, + ok. + +declare_all(Config, Ch, Methods) -> [maybe_declare_queue(Config, Ch, Op) || Op <- Methods]. +delete_all(Ch, Methods) -> + [delete_queue(Ch, Q) || #'queue.declare'{queue = Q} <- Methods]. + +maybe_declare_queue(Config, Ch, Method) -> + OneOffCh = rabbit_ct_client_helpers:open_channel(Config), + try + amqp_channel:call(OneOffCh, Method#'queue.declare'{passive = true}) + catch exit:{{shutdown, {server_initiated_close, ?NOT_FOUND, _Message}}, _} -> + amqp_channel:call(Ch, Method) + after + catch rabbit_ct_client_helpers:close_channel(OneOffCh) + end. + +delete_queue(Ch, Q) -> + amqp_channel:call(Ch, #'queue.delete'{queue = Q}). + +q(Name) -> + q(Name, []). + +q(Name, undefined) -> + q(Name, []); +q(Name, Args) -> + #'queue.declare'{queue = Name, + durable = true, + arguments = Args}. + +-define(PD_KEY, metric_families). +collect_mf(Registry, Collector) -> + put(?PD_KEY, []), + Collector:collect_mf(Registry, fun(MF) -> put(?PD_KEY, [MF | get(?PD_KEY)]) end), + MFs = lists:reverse(get(?PD_KEY)), + erase(?PD_KEY), + MFs. diff --git a/deps/rabbitmq_jms_topic_exchange/.gitignore b/deps/rabbitmq_jms_topic_exchange/.gitignore deleted file mode 100644 index 6870ec18a4f8..000000000000 --- a/deps/rabbitmq_jms_topic_exchange/.gitignore +++ /dev/null @@ -1,17 +0,0 @@ -.sw? -.*.sw? -*.beam -/.erlang.mk/ -/cover/ -/deps/ -/doc/ -/ebin/ -/escript/ -/escript.lock -/logs/ -/plugins/ -/plugins.lock -/sbin/ -/sbin.lock - -rabbitmq_jms_topic_exchange.d diff --git a/deps/rabbitmq_jms_topic_exchange/BUILD.bazel b/deps/rabbitmq_jms_topic_exchange/BUILD.bazel index 39e3ff2eca48..e3e49612b060 100644 --- a/deps/rabbitmq_jms_topic_exchange/BUILD.bazel +++ b/deps/rabbitmq_jms_topic_exchange/BUILD.bazel @@ -26,8 +26,6 @@ APP_MODULE = "rabbit_federation_app" all_beam_files(name = "all_beam_files") -all_test_beam_files(name = "all_test_beam_files") - all_srcs(name = "all_srcs") test_suite_beam_files(name = "test_suite_beam_files") @@ -48,6 +46,8 @@ rabbitmq_app( deps = [ "//deps/rabbit:erlang_app", "//deps/rabbit_common:erlang_app", + "@khepri//:erlang_app", + "@khepri_mnesia_migration//:erlang_app", ], ) @@ -102,3 +102,5 @@ alias( actual = ":erlang_app", visibility = ["//visibility:public"], ) + +all_test_beam_files(name = "all_test_beam_files") diff --git a/deps/rabbitmq_jms_topic_exchange/Makefile b/deps/rabbitmq_jms_topic_exchange/Makefile index 199c42238f98..d30c8823199c 100644 --- a/deps/rabbitmq_jms_topic_exchange/Makefile +++ b/deps/rabbitmq_jms_topic_exchange/Makefile @@ -1,9 +1,9 @@ PROJECT = rabbitmq_jms_topic_exchange PROJECT_DESCRIPTION = RabbitMQ JMS topic selector exchange plugin -LOCAL_DEPS = mnesia -DEPS = rabbit_common rabbit +DEPS = rabbit_common rabbit khepri khepri_mnesia_migration TEST_DEPS = rabbitmq_ct_helpers rabbitmq_ct_client_helpers amqp_client +LOCAL_DEPS = mnesia DEP_EARLY_PLUGINS = rabbit_common/mk/rabbitmq-early-plugin.mk DEP_PLUGINS = rabbit_common/mk/rabbitmq-plugin.mk diff --git a/deps/rabbitmq_jms_topic_exchange/README.md b/deps/rabbitmq_jms_topic_exchange/README.md index 5cb2ff9305d9..33b49bef5f65 100644 --- a/deps/rabbitmq_jms_topic_exchange/README.md +++ b/deps/rabbitmq_jms_topic_exchange/README.md @@ -49,6 +49,6 @@ and filtering implied by the topic name. ## Copyright and License -(c) 2007-2023 VMware, Inc. or its affiliates. +(c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. See [LICENSE](./LICENSE) for license information. diff --git a/deps/rabbitmq_jms_topic_exchange/app.bzl b/deps/rabbitmq_jms_topic_exchange/app.bzl index 3fe5ff18a359..5c73214ef386 100644 --- a/deps/rabbitmq_jms_topic_exchange/app.bzl +++ b/deps/rabbitmq_jms_topic_exchange/app.bzl @@ -10,6 +10,7 @@ def all_beam_files(name = "all_beam_files"): name = "other_beam", srcs = [ "src/rabbit_db_jms_exchange.erl", + "src/rabbit_db_jms_exchange_m2k_converter.erl", "src/rabbit_jms_topic_exchange.erl", "src/sjx_evaluator.erl", ], @@ -20,6 +21,8 @@ def all_beam_files(name = "all_beam_files"): deps = [ "//deps/rabbit:erlang_app", "//deps/rabbit_common:erlang_app", + "@khepri//:erlang_app", + "@khepri_mnesia_migration//:erlang_app", ], ) @@ -34,6 +37,7 @@ def all_test_beam_files(name = "all_test_beam_files"): testonly = True, srcs = [ "src/rabbit_db_jms_exchange.erl", + "src/rabbit_db_jms_exchange_m2k_converter.erl", "src/rabbit_jms_topic_exchange.erl", "src/sjx_evaluator.erl", ], @@ -44,6 +48,8 @@ def all_test_beam_files(name = "all_test_beam_files"): deps = [ "//deps/rabbit:erlang_app", "//deps/rabbit_common:erlang_app", + "@khepri//:erlang_app", + "@khepri_mnesia_migration//:erlang_app", ], ) @@ -65,6 +71,7 @@ def all_srcs(name = "all_srcs"): name = "srcs", srcs = [ "src/rabbit_db_jms_exchange.erl", + "src/rabbit_db_jms_exchange_m2k_converter.erl", "src/rabbit_jms_topic_exchange.erl", "src/sjx_evaluator.erl", ], diff --git a/deps/rabbitmq_jms_topic_exchange/include/rabbit_jms_topic_exchange.hrl b/deps/rabbitmq_jms_topic_exchange/include/rabbit_jms_topic_exchange.hrl index 2157a84389bc..86e4687e4ade 100644 --- a/deps/rabbitmq_jms_topic_exchange/include/rabbit_jms_topic_exchange.hrl +++ b/deps/rabbitmq_jms_topic_exchange/include/rabbit_jms_topic_exchange.hrl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2012-2020 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% ----------------------------------------------------------------------------- %% JMS on Rabbit Topic Selector Exchange plugin definitions diff --git a/deps/rabbitmq_jms_topic_exchange/src/rabbit_db_jms_exchange.erl b/deps/rabbitmq_jms_topic_exchange/src/rabbit_db_jms_exchange.erl index b772093336b6..999003be7285 100644 --- a/deps/rabbitmq_jms_topic_exchange/src/rabbit_db_jms_exchange.erl +++ b/deps/rabbitmq_jms_topic_exchange/src/rabbit_db_jms_exchange.erl @@ -2,11 +2,12 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% ----------------------------------------------------------------------------- -module(rabbit_db_jms_exchange). -include_lib("rabbit_common/include/rabbit.hrl"). +-include_lib("khepri/include/khepri.hrl"). -include("rabbit_jms_topic_exchange.hrl"). -export([ @@ -18,22 +19,70 @@ delete/3 ]). +-export([ + khepri_jms_topic_exchange_path/0, + khepri_jms_topic_exchange_path/1 + ]). + +-rabbit_mnesia_tables_to_khepri_db( + [{?JMS_TOPIC_TABLE, rabbit_db_jms_exchange_m2k_converter}]). + %% ------------------------------------------------------------------- %% setup_schema() %% ------------------------------------------------------------------- setup_schema() -> - setup_schema_in_mnesia(). + rabbit_khepri:handle_fallback( + #{mnesia => fun() -> setup_schema_in_mnesia() end, + khepri => ok + }). setup_schema_in_mnesia() -> - case mnesia:create_table( ?JMS_TOPIC_TABLE - , [ {attributes, record_info(fields, ?JMS_TOPIC_RECORD)} - , {record_name, ?JMS_TOPIC_RECORD} - , {type, set} ] - ) of - {atomic, ok} -> ok; - {aborted, {already_exists, ?JMS_TOPIC_TABLE}} -> ok - end, + TableName = ?JMS_TOPIC_TABLE, + rabbit_log:info( + "Creating table ~ts for JMS topic exchange", + [TableName]), + _ = try + rabbit_table:create( + TableName, + [{attributes, record_info(fields, ?JMS_TOPIC_RECORD)}, + {record_name, ?JMS_TOPIC_RECORD}, + {type, set}]), + %% The JMS topic exchange table must be available on all nodes. + %% If it existed on only one node, messages could not be published + %% to JMS topic exchanges and routed to topic subscribers if the node + %% was unavailable. + %% The call below makes sure this node has a copy of the table. + case rabbit_table:ensure_table_copy(TableName, node(), ram_copies) of + ok -> + %% Next, we try to fix other nodes in the cluster if they are + %% running a version of RabbitMQ which does not replicate the + %% table. All nodes must have a replica for Mnesia operations + %% to work properly. Therefore the code below is to make older + %% compatible with newer nodes. + Replicas = mnesia:table_info(TableName, all_nodes), + Members = rabbit_nodes:list_running(), + MissingOn = Members -- Replicas, + lists:foreach( + fun(Node) -> + %% Errors from adding a replica on those older nodes + %% are ignored however. They should not be fatal. The + %% problem will solve by itself once all nodes are + %% upgraded. + _ = rpc:call( + Node, + rabbit_table, ensure_table_copy, + [TableName, Node, ram_copies]) + end, MissingOn), + ok; + Error -> + Error + end + catch throw:Reason -> + rabbit_log:error( + "Failed to create JMS topic exchange table: ~tp", + [Reason]) + end, ok. %% ------------------------------------------------------------------- @@ -41,7 +90,12 @@ setup_schema_in_mnesia() -> %% ------------------------------------------------------------------- create_or_update(XName, BindingKeyAndFun, ErrorFun) -> - create_or_update_in_mnesia(XName, BindingKeyAndFun, ErrorFun). + rabbit_khepri:handle_fallback( + #{mnesia => + fun() -> create_or_update_in_mnesia(XName, BindingKeyAndFun, ErrorFun) end, + khepri => + fun() -> update_in_khepri(XName, BindingKeyAndFun, fun put_item/2, ErrorFun) end + }). create_or_update_in_mnesia(XName, BindingKeyAndFun, ErrorFun) -> rabbit_mnesia:execute_mnesia_transaction( @@ -51,12 +105,33 @@ create_or_update_in_mnesia(XName, BindingKeyAndFun, ErrorFun) -> write_state_fun_in_mnesia(XName, put_item(BindingFuns, BindingKeyAndFun)) end). +update_in_khepri(XName, BindingKeyAndFun, UpdateFun, ErrorFun) -> + Path = khepri_jms_topic_exchange_path(XName), + case rabbit_khepri:adv_get(Path) of + {ok, #{data := BindingFuns, payload_version := DVersion}} -> + Path1 = khepri_path:combine_with_conditions( + Path, [#if_payload_version{version = DVersion}]), + Ret = rabbit_khepri:put(Path1, UpdateFun(BindingFuns, BindingKeyAndFun)), + case Ret of + ok -> ok; + {error, {khepri, mismatching_node, _}} -> + update_in_khepri(XName, BindingKeyAndFun, UpdateFun, ErrorFun); + {error, _} -> + ErrorFun(XName) + end; + _Err -> + ErrorFun(XName) + end. + %% ------------------------------------------------------------------- %% insert(). %% ------------------------------------------------------------------- insert(XName, BFuns) -> - insert_in_mnesia(XName, BFuns). + rabbit_khepri:handle_fallback( + #{mnesia => fun() -> insert_in_mnesia(XName, BFuns) end, + khepri => fun() -> insert_in_khepri(XName, BFuns) end + }). insert_in_mnesia(XName, BFuns) -> rabbit_mnesia:execute_mnesia_transaction( @@ -64,12 +139,18 @@ insert_in_mnesia(XName, BFuns) -> write_state_fun_in_mnesia(XName, BFuns) end). +insert_in_khepri(XName, BFuns) -> + ok = rabbit_khepri:put(khepri_jms_topic_exchange_path(XName), BFuns). + %% ------------------------------------------------------------------- %% get(). %% ------------------------------------------------------------------- get(XName) -> - get_in_mnesia(XName). + rabbit_khepri:handle_fallback( + #{mnesia => fun() -> get_in_mnesia(XName) end, + khepri => fun() -> get_in_khepri(XName) end + }). get_in_mnesia(XName) -> mnesia:async_dirty( @@ -84,19 +165,38 @@ get_in_mnesia(XName) -> [] ). +get_in_khepri(XName) -> + case rabbit_khepri:get(khepri_jms_topic_exchange_path(XName)) of + {ok, BindingFuns} -> + BindingFuns; + _ -> + not_found + end. + %% ------------------------------------------------------------------- %% delete(). %% ------------------------------------------------------------------- delete(XName) -> - delete_in_mnesia(XName). + rabbit_khepri:handle_fallback( + #{mnesia => fun() -> delete_in_mnesia(XName) end, + khepri => fun() -> delete_in_khepri(XName) end + }). delete_in_mnesia(XName) -> rabbit_mnesia:execute_mnesia_transaction( fun() -> mnesia:delete(?JMS_TOPIC_TABLE, XName, write) end). +delete_in_khepri(XName) -> + rabbit_khepri:delete(khepri_jms_topic_exchange_path(XName)). + delete(XName, BindingKeys, ErrorFun) -> - delete_in_mnesia(XName, BindingKeys, ErrorFun). + rabbit_khepri:handle_fallback( + #{mnesia => + fun() -> delete_in_mnesia(XName, BindingKeys, ErrorFun) end, + khepri => + fun() -> update_in_khepri(XName, BindingKeys, fun remove_items/2, ErrorFun) end + }). delete_in_mnesia(XName, BindingKeys, ErrorFun) -> rabbit_mnesia:execute_mnesia_transaction( @@ -127,3 +227,13 @@ put_item(Dict, {Key, Item}) -> dict:store(Key, Item, Dict). % remove a list of keyed items from the dictionary, by key remove_items(Dict, []) -> Dict; remove_items(Dict, [Key | Keys]) -> remove_items(dict:erase(Key, Dict), Keys). + +%% ------------------------------------------------------------------- +%% paths +%% ------------------------------------------------------------------- + +khepri_jms_topic_exchange_path(#resource{virtual_host = VHost, name = Name}) -> + [?MODULE, jms_topic_exchange, VHost, Name]. + +khepri_jms_topic_exchange_path() -> + [?MODULE, jms_topic_exchange]. diff --git a/deps/rabbitmq_jms_topic_exchange/src/rabbit_db_jms_exchange_m2k_converter.erl b/deps/rabbitmq_jms_topic_exchange/src/rabbit_db_jms_exchange_m2k_converter.erl new file mode 100644 index 000000000000..13b28f791951 --- /dev/null +++ b/deps/rabbitmq_jms_topic_exchange/src/rabbit_db_jms_exchange_m2k_converter.erl @@ -0,0 +1,100 @@ +%% This Source Code Form is subject to the terms of the Mozilla Public +%% License, v. 2.0. If a copy of the MPL was not distributed with this +%% file, You can obtain one at https://mozilla.org/MPL/2.0/. +%% +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. +%% + +-module(rabbit_db_jms_exchange_m2k_converter). + +-behaviour(mnesia_to_khepri_converter). + +-include_lib("kernel/include/logger.hrl"). +-include_lib("khepri_mnesia_migration/src/kmm_logging.hrl"). +-include_lib("rabbit_common/include/rabbit.hrl"). + +-include("rabbit_jms_topic_exchange.hrl"). + +-export([init_copy_to_khepri/3, + copy_to_khepri/3, + delete_from_khepri/3, + clear_data_in_khepri/1]). + +-record(?MODULE, {}). + +-spec init_copy_to_khepri(StoreId, MigrationId, Tables) -> Ret when + StoreId :: khepri:store_id(), + MigrationId :: mnesia_to_khepri:migration_id(), + Tables :: [mnesia_to_khepri:mnesia_table()], + Ret :: {ok, Priv}, + Priv :: #?MODULE{}. +%% @private + +init_copy_to_khepri(_StoreId, _MigrationId, _Tables) -> + State = #?MODULE{}, + {ok, State}. + +-spec copy_to_khepri(Table, Record, State) -> Ret when + Table :: mnesia_to_khepri:mnesia_table(), + Record :: tuple(), + State :: rabbit_db_m2k_converter:state(), + Ret :: {ok, NewState} | {error, Reason}, + NewState :: rabbit_db_m2k_converter:state(), + Reason :: any(). +%% @private + +copy_to_khepri( + ?JMS_TOPIC_TABLE = Table, #?JMS_TOPIC_RECORD{x_name = XName, x_selector_funs = BFuns}, + State) -> + ?LOG_DEBUG( + "Mnesia->Khepri data copy: [~0p] key: ~0p", + [Table, XName], + #{domain => ?KMM_M2K_TABLE_COPY_LOG_DOMAIN}), + Path = rabbit_db_jms_exchange:khepri_jms_topic_exchange_path(XName), + rabbit_db_m2k_converter:with_correlation_id( + fun(CorrId) -> + Extra = #{async => CorrId}, + ?LOG_DEBUG( + "Mnesia->Khepri data copy: [~0p] path: ~0p corr: ~0p", + [Table, Path, CorrId], + #{domain => ?KMM_M2K_TABLE_COPY_LOG_DOMAIN}), + rabbit_khepri:put(Path, BFuns, Extra) + end, State); +copy_to_khepri(Table, Record, State) -> + ?LOG_DEBUG("Mnesia->Khepri unexpected record table ~0p record ~0p state ~0p", + [Table, Record, State]), + {error, unexpected_record}. + +-spec delete_from_khepri(Table, Key, State) -> Ret when + Table :: mnesia_to_khepri:mnesia_table(), + Key :: any(), + State :: rabbit_db_m2k_converter:state(), + Ret :: {ok, NewState} | {error, Reason}, + NewState :: rabbit_db_m2k_converter:state(), + Reason :: any(). +%% @private + +delete_from_khepri(?JMS_TOPIC_TABLE = Table, Key, State) -> + ?LOG_DEBUG( + "Mnesia->Khepri data delete: [~0p] key: ~0p", + [Table, Key], + #{domain => ?KMM_M2K_TABLE_COPY_LOG_DOMAIN}), + Path = rabbit_db_jms_exchange:khepri_jms_topic_exchange_path(Key), + rabbit_db_m2k_converter:with_correlation_id( + fun(CorrId) -> + Extra = #{async => CorrId}, + ?LOG_DEBUG( + "Mnesia->Khepri data delete: [~0p] path: ~0p corr: ~0p", + [Table, Path, CorrId], + #{domain => ?KMM_M2K_TABLE_COPY_LOG_DOMAIN}), + rabbit_khepri:delete(Path, Extra) + end, State). + +clear_data_in_khepri(?JMS_TOPIC_TABLE) -> + Path = rabbit_db_jms_exchange:khepri_jms_topic_exchange_path(), + case rabbit_khepri:delete(Path) of + ok -> + ok; + Error -> + throw(Error) + end. diff --git a/deps/rabbitmq_jms_topic_exchange/src/rabbit_jms_topic_exchange.erl b/deps/rabbitmq_jms_topic_exchange/src/rabbit_jms_topic_exchange.erl index 64d45dec79d6..a07c03589eb9 100644 --- a/deps/rabbitmq_jms_topic_exchange/src/rabbit_jms_topic_exchange.erl +++ b/deps/rabbitmq_jms_topic_exchange/src/rabbit_jms_topic_exchange.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2012-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% ----------------------------------------------------------------------------- %% JMS on Rabbit Selector Exchange plugin @@ -90,13 +90,12 @@ serialise_events() -> false. % Route messages route(#exchange{name = XName}, Msg, _Opts) -> - RKs = mc:get_annotation(routing_keys, Msg), Content = mc:protocol_state(mc:convert(mc_amqpl, Msg)), case get_binding_funs_x(XName) of not_found -> []; BindingFuns -> - match_bindings(XName, RKs, Content, BindingFuns) + match_bindings(XName, Content, BindingFuns) end. @@ -104,18 +103,19 @@ route(#exchange{name = XName}, Msg, _Opts) -> validate(_X) -> ok. % After exchange declaration and recovery -create(none, #exchange{name = XName}) -> +create(_Tx, #exchange{name = XName}) -> add_initial_record(XName). % Delete an exchange -delete(none, #exchange{name = XName}) -> - delete_state(XName). +delete(_Tx, #exchange{name = XName}) -> + delete_state(XName), + ok. % Before add binding validate_binding(_X, _B) -> ok. % A new binding has ben added or recovered -add_binding( none +add_binding( _Tx , #exchange{name = XName} , #binding{key = BindingKey, destination = Dest, args = Args} ) -> @@ -130,7 +130,7 @@ add_binding( none ok. % Binding removal -remove_bindings( none +remove_bindings( _Tx , #exchange{name = XName} , Bindings ) -> @@ -162,7 +162,7 @@ get_string_arg(Args, ArgName, Default) -> end. % Match bindings for the current message -match_bindings( XName, _RoutingKeys, MessageContent, BindingFuns) -> +match_bindings(XName, MessageContent, BindingFuns) -> MessageHeaders = get_headers(MessageContent), rabbit_router:match_bindings( XName , fun(#binding{key = Key, destination = Dest}) -> diff --git a/deps/rabbitmq_jms_topic_exchange/src/sjx_evaluator.erl b/deps/rabbitmq_jms_topic_exchange/src/sjx_evaluator.erl index fe3f754bdae2..6316b232b7e3 100644 --- a/deps/rabbitmq_jms_topic_exchange/src/sjx_evaluator.erl +++ b/deps/rabbitmq_jms_topic_exchange/src/sjx_evaluator.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2012-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% ----------------------------------------------------------------------------- %% Derived from works which were: %% Copyright (c) 2002, 2012 Tim Watson (watson.timothy@gmail.com) diff --git a/deps/rabbitmq_jms_topic_exchange/test/rjms_topic_selector_SUITE.erl b/deps/rabbitmq_jms_topic_exchange/test/rjms_topic_selector_SUITE.erl index 5d90249efb9a..6b61491046a9 100644 --- a/deps/rabbitmq_jms_topic_exchange/test/rjms_topic_selector_SUITE.erl +++ b/deps/rabbitmq_jms_topic_exchange/test/rjms_topic_selector_SUITE.erl @@ -2,14 +2,13 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2013-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rjms_topic_selector_SUITE). -compile(export_all). --include_lib("common_test/include/ct.hrl"). -include_lib("amqp_client/include/amqp_client.hrl"). -include("rabbit_jms_topic_exchange.hrl"). @@ -22,14 +21,22 @@ all() -> [ - {group, parallel_tests} + {group, mnesia_store}, + {group, khepri_store}, + {group, khepri_migration} ]. groups() -> [ - {parallel_tests, [parallel], [ - test_topic_selection - ]} + {mnesia_store, [], [ + test_topic_selection, + restart_with_auto_delete_topic_exchange + ]}, + {khepri_store, [], [ + test_topic_selection, + restart_with_auto_delete_topic_exchange + ]}, + {khepri_migration, [], [from_mnesia_to_khepri]} ]. %% ------------------------------------------------------------------- @@ -38,23 +45,35 @@ groups() -> init_per_suite(Config) -> rabbit_ct_helpers:log_environment(), - Config1 = rabbit_ct_helpers:set_config(Config, [ - {rmq_nodename_suffix, ?MODULE} - ]), - rabbit_ct_helpers:run_setup_steps(Config1, - rabbit_ct_broker_helpers:setup_steps() ++ - rabbit_ct_client_helpers:setup_steps()). + rabbit_ct_helpers:run_setup_steps(Config). end_per_suite(Config) -> - rabbit_ct_helpers:run_teardown_steps(Config, - rabbit_ct_client_helpers:teardown_steps() ++ - rabbit_ct_broker_helpers:teardown_steps()). - -init_per_group(_, Config) -> - Config. + rabbit_ct_helpers:run_teardown_steps(Config). + +init_per_group(mnesia_store = Group, Config0) -> + Config = rabbit_ct_helpers:set_config(Config0, [{metadata_store, mnesia}]), + init_per_group_common(Group, Config); +init_per_group(khepri_store = Group, Config0) -> + Config = rabbit_ct_helpers:set_config( + Config0, + [{metadata_store, {khepri, [khepri_db]}}]), + init_per_group_common(Group, Config); +init_per_group(khepri_migration = Group, Config0) -> + Config = rabbit_ct_helpers:set_config(Config0, [{metadata_store, mnesia}]), + init_per_group_common(Group, Config). + +init_per_group_common(Group, Config) -> + Config1 = rabbit_ct_helpers:set_config(Config, [ + {rmq_nodename_suffix, Group} + ]), + rabbit_ct_helpers:run_setup_steps(Config1, + rabbit_ct_broker_helpers:setup_steps() ++ + rabbit_ct_client_helpers:setup_steps()). end_per_group(_, Config) -> - Config. + rabbit_ct_helpers:run_teardown_steps(Config, + rabbit_ct_client_helpers:teardown_steps() ++ + rabbit_ct_broker_helpers:teardown_steps()). init_per_testcase(Testcase, Config) -> rabbit_ct_helpers:testcase_started(Config, Testcase). @@ -70,7 +89,7 @@ test_topic_selection(Config) -> {Connection, Channel} = open_connection_and_channel(Config), #'confirm.select_ok'{} = amqp_channel:call(Channel, #'confirm.select'{}), - Exchange = declare_rjms_exchange(Channel, "rjms_test_topic_selector_exchange", []), + Exchange = declare_rjms_exchange(Channel, "rjms_test_topic_selector_exchange", false, false, []), %% Declare a queue and bind it Q = declare_queue(Channel), @@ -81,19 +100,61 @@ test_topic_selection(Config) -> get_and_check(Channel, Q, 0, <<"true">>), + amqp_channel:call(Channel, #'exchange.delete'{exchange = Exchange}), close_connection_and_channel(Connection, Channel), ok. +restart_with_auto_delete_topic_exchange(Config) -> + [Server | _] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename), + + {_Connection, Channel} = open_connection_and_channel(Config), + #'confirm.select_ok'{} = amqp_channel:call(Channel, #'confirm.select'{}), + + Exchange = declare_rjms_exchange(Channel, "restart_with_auto_delete_topic_exchange", true, true, []), + %% Declare a queue and bind it + %% Q = declare_queue(Channel), + #'queue.declare_ok'{queue = Q} = amqp_channel:call(Channel, #'queue.declare'{durable = true}), + bind_queue(Channel, Q, Exchange, <<"select-key">>, [?BSELECTARG(<<"{ident, <<\"boolVal\">>}.">>)]), + ok = rabbit_control_helper:command(stop_app, Server), + ok = rabbit_control_helper:command(start_app, Server). + +from_mnesia_to_khepri(Config) -> + {Connection, Channel} = open_connection_and_channel(Config), + #'confirm.select_ok'{} = amqp_channel:call(Channel, #'confirm.select'{}), + + Exchange = declare_rjms_exchange(Channel, "rjms_test_topic_selector_exchange", false, false, []), + + %% Declare a queue and bind it + Q = declare_queue(Channel), + bind_queue(Channel, Q, Exchange, <<"select-key">>, [?BSELECTARG(<<"{ident, <<\"boolVal\">>}.">>)]), + + case rabbit_ct_broker_helpers:enable_feature_flag(Config, khepri_db) of + ok -> + case rabbit_ct_broker_helpers:enable_feature_flag(Config, rabbit_jms_topic_exchange_raft_based_metadata_store) of + ok -> + publish_two_messages(Channel, Exchange, <<"select-key">>), + amqp_channel:wait_for_confirms(Channel, 5), + get_and_check(Channel, Q, 0, <<"true">>), + close_connection_and_channel(Connection, Channel), + ok; + Skip -> + Skip + end; + Skip -> + Skip + end. %% ------------------------------------------------------------------- %% Helpers. %% ------------------------------------------------------------------- %% Declare a rjms_topic_selector exchange, with args -declare_rjms_exchange(Ch, XNameStr, XArgs) -> +declare_rjms_exchange(Ch, XNameStr, Durable, AutoDelete, XArgs) -> Exchange = list_to_binary(XNameStr), Decl = #'exchange.declare'{ exchange = Exchange , type = <<"x-jms-topic">> + , durable = Durable + , auto_delete = AutoDelete , arguments = XArgs }, #'exchange.declare_ok'{} = amqp_channel:call(Ch, Decl), Exchange. diff --git a/deps/rabbitmq_jms_topic_exchange/test/rjms_topic_selector_unit_SUITE.erl b/deps/rabbitmq_jms_topic_exchange/test/rjms_topic_selector_unit_SUITE.erl index 303c8253fe37..f0df24020973 100644 --- a/deps/rabbitmq_jms_topic_exchange/test/rjms_topic_selector_unit_SUITE.erl +++ b/deps/rabbitmq_jms_topic_exchange/test/rjms_topic_selector_unit_SUITE.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2012-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% ----------------------------------------------------------------------------- %% Unit test file for RJMS Topic Selector plugin @@ -13,7 +13,6 @@ -compile(export_all). --include_lib("common_test/include/ct.hrl"). -include_lib("eunit/include/eunit.hrl"). -include("rabbit_jms_topic_exchange.hrl"). -include_lib("amqp_client/include/amqp_client.hrl"). diff --git a/deps/rabbitmq_jms_topic_exchange/test/sjx_evaluation_SUITE.erl b/deps/rabbitmq_jms_topic_exchange/test/sjx_evaluation_SUITE.erl index 99e15a0f3d10..0c6ea7616c4e 100644 --- a/deps/rabbitmq_jms_topic_exchange/test/sjx_evaluation_SUITE.erl +++ b/deps/rabbitmq_jms_topic_exchange/test/sjx_evaluation_SUITE.erl @@ -12,7 +12,6 @@ -compile(export_all). --include_lib("common_test/include/ct.hrl"). -include_lib("eunit/include/eunit.hrl"). -import(sjx_evaluator, [evaluate/2]). diff --git a/deps/rabbitmq_management/.gitignore b/deps/rabbitmq_management/.gitignore index b015464d1887..96463fa9b670 100644 --- a/deps/rabbitmq_management/.gitignore +++ b/deps/rabbitmq_management/.gitignore @@ -1,31 +1,6 @@ -.sw? -.*.sw? -*.beam -*.pem -erl_crash.dump -MnesiaCore.* -/.erlang.mk/ -/cover/ /debug/ -/deps/ -/doc/ -/ebin/ -/escript/ -/escript.lock -/logs/ -/plugins/ -/plugins.lock -/sbin/ -/sbin.lock -/xrefr - -rabbitmq_management.d -.rabbitmq_management.plt - -*.coverdata test/config_schema_SUITE_data/schema/ -.vscode/* selenium/node_modules selenium/package-lock.json diff --git a/deps/rabbitmq_management/BUILD.bazel b/deps/rabbitmq_management/BUILD.bazel index 2de82451af80..6b560bb7059e 100644 --- a/deps/rabbitmq_management/BUILD.bazel +++ b/deps/rabbitmq_management/BUILD.bazel @@ -35,7 +35,8 @@ APP_ENV = """[ {cors_allow_origins, []}, {cors_max_age, 1800}, - {content_security_policy, "script-src 'self' 'unsafe-eval' 'unsafe-inline'; object-src 'self'"} + {content_security_policy, "script-src 'self' 'unsafe-eval' 'unsafe-inline'; object-src 'self'"}, + {max_http_body_size, 10000000} ]""" genrule( @@ -56,7 +57,6 @@ all_srcs(name = "all_srcs") test_suite_beam_files(name = "test_suite_beam_files") -# gazelle:erlang_app_extra_app mnesia # gazelle:erlang_app_extra_app ssl # gazelle:erlang_app_extra_app crypto # gazelle:erlang_app_extra_app public_key @@ -75,7 +75,6 @@ rabbitmq_app( beam_files = [":beam_files"], extra_apps = [ "crypto", - "mnesia", "public_key", "ssl", ], @@ -83,6 +82,7 @@ rabbitmq_app( priv = [":priv"], deps = [ "//deps/amqp_client:erlang_app", + "//deps/oauth2_client:erlang_app", "//deps/rabbit:erlang_app", "//deps/rabbit_common:erlang_app", "//deps/rabbitmq_management_agent:erlang_app", @@ -102,7 +102,7 @@ plt( name = "deps_plt", for_target = ":erlang_app", ignore_warnings = True, - libs = ["//deps/rabbitmq_cli:elixir"], # keep + libs = ["@rules_elixir//elixir"], # keep plt = "//:base_plt", ) @@ -167,7 +167,10 @@ rabbitmq_integration_suite( additional_beam = [ "test/rabbit_mgmt_runtime_parameters_util.beam", ], - shard_count = 2, + shard_count = 6, + runtime_deps = [ + "//deps/amqp10_client:erlang_app", + ], ) rabbitmq_integration_suite( @@ -208,6 +211,11 @@ rabbitmq_suite( size = "small", ) +rabbitmq_suite( + name = "rabbit_mgmt_wm_auth_SUITE", + size = "small", +) + rabbitmq_suite( name = "stats_SUITE", size = "small", diff --git a/deps/rabbitmq_management/Makefile b/deps/rabbitmq_management/Makefile index c092c90f5e77..98998bfcdb48 100644 --- a/deps/rabbitmq_management/Makefile +++ b/deps/rabbitmq_management/Makefile @@ -12,7 +12,8 @@ define PROJECT_ENV {cors_allow_origins, []}, {cors_max_age, 1800}, - {content_security_policy, "script-src 'self' 'unsafe-eval' 'unsafe-inline'; object-src 'self'"} + {content_security_policy, "script-src 'self' 'unsafe-eval' 'unsafe-inline'; object-src 'self'"}, + {max_http_body_size, 10000000} ] endef @@ -20,9 +21,9 @@ define PROJECT_APP_EXTRA_KEYS {broker_version_requirements, []} endef -DEPS = rabbit_common rabbit amqp_client cowboy cowlib rabbitmq_web_dispatch rabbitmq_management_agent -TEST_DEPS = rabbitmq_ct_helpers rabbitmq_ct_client_helpers proper -LOCAL_DEPS += mnesia ranch ssl crypto public_key +DEPS = rabbit_common rabbit amqp_client cowboy cowlib rabbitmq_web_dispatch rabbitmq_management_agent oauth2_client +TEST_DEPS = rabbitmq_ct_helpers rabbitmq_ct_client_helpers proper amqp10_client +LOCAL_DEPS += ranch ssl crypto public_key # FIXME: Add Ranch as a BUILD_DEPS to be sure the correct version is picked. # See rabbitmq-components.mk. diff --git a/deps/rabbitmq_management/README.md b/deps/rabbitmq_management/README.md index 189b9a2b5c9d..b4627f01723a 100644 --- a/deps/rabbitmq_management/README.md +++ b/deps/rabbitmq_management/README.md @@ -18,4 +18,4 @@ it has to be [enabled](https://www.rabbitmq.com/plugins.html#basics) before it c ## Copyright -(c) 2007-2020 VMware, Inc. or its affiliates. +(c) 2007-2024 Broadcom. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. diff --git a/deps/rabbitmq_management/app.bzl b/deps/rabbitmq_management/app.bzl index e85bf838eeb6..1f8429e9f7e4 100644 --- a/deps/rabbitmq_management/app.bzl +++ b/deps/rabbitmq_management/app.bzl @@ -29,6 +29,7 @@ def all_beam_files(name = "all_beam_files"): "src/rabbit_mgmt_hsts.erl", "src/rabbit_mgmt_load_definitions.erl", "src/rabbit_mgmt_login.erl", + "src/rabbit_mgmt_nodes.erl", "src/rabbit_mgmt_oauth_bootstrap.erl", "src/rabbit_mgmt_reset_handler.erl", "src/rabbit_mgmt_stats.erl", @@ -51,6 +52,7 @@ def all_beam_files(name = "all_beam_files"): "src/rabbit_mgmt_wm_connections_vhost.erl", "src/rabbit_mgmt_wm_consumers.erl", "src/rabbit_mgmt_wm_definitions.erl", + "src/rabbit_mgmt_wm_deprecated_features.erl", "src/rabbit_mgmt_wm_environment.erl", "src/rabbit_mgmt_wm_exchange.erl", "src/rabbit_mgmt_wm_exchange_publish.erl", @@ -64,7 +66,6 @@ def all_beam_files(name = "all_beam_files"): "src/rabbit_mgmt_wm_health_check_alarms.erl", "src/rabbit_mgmt_wm_health_check_certificate_expiration.erl", "src/rabbit_mgmt_wm_health_check_local_alarms.erl", - "src/rabbit_mgmt_wm_health_check_node_is_mirror_sync_critical.erl", "src/rabbit_mgmt_wm_health_check_node_is_quorum_critical.erl", "src/rabbit_mgmt_wm_health_check_port_listener.erl", "src/rabbit_mgmt_wm_health_check_protocol_listener.erl", @@ -122,6 +123,7 @@ def all_beam_files(name = "all_beam_files"): erlc_opts = "//:erlc_opts", deps = [ "//deps/amqp_client:erlang_app", + "//deps/oauth2_client:erlang_app", "//deps/rabbit:erlang_app", "//deps/rabbit_common:erlang_app", "//deps/rabbitmq_management_agent:erlang_app", @@ -159,6 +161,7 @@ def all_test_beam_files(name = "all_test_beam_files"): "src/rabbit_mgmt_hsts.erl", "src/rabbit_mgmt_load_definitions.erl", "src/rabbit_mgmt_login.erl", + "src/rabbit_mgmt_nodes.erl", "src/rabbit_mgmt_oauth_bootstrap.erl", "src/rabbit_mgmt_reset_handler.erl", "src/rabbit_mgmt_stats.erl", @@ -181,6 +184,7 @@ def all_test_beam_files(name = "all_test_beam_files"): "src/rabbit_mgmt_wm_connections_vhost.erl", "src/rabbit_mgmt_wm_consumers.erl", "src/rabbit_mgmt_wm_definitions.erl", + "src/rabbit_mgmt_wm_deprecated_features.erl", "src/rabbit_mgmt_wm_environment.erl", "src/rabbit_mgmt_wm_exchange.erl", "src/rabbit_mgmt_wm_exchange_publish.erl", @@ -194,7 +198,6 @@ def all_test_beam_files(name = "all_test_beam_files"): "src/rabbit_mgmt_wm_health_check_alarms.erl", "src/rabbit_mgmt_wm_health_check_certificate_expiration.erl", "src/rabbit_mgmt_wm_health_check_local_alarms.erl", - "src/rabbit_mgmt_wm_health_check_node_is_mirror_sync_critical.erl", "src/rabbit_mgmt_wm_health_check_node_is_quorum_critical.erl", "src/rabbit_mgmt_wm_health_check_port_listener.erl", "src/rabbit_mgmt_wm_health_check_protocol_listener.erl", @@ -252,6 +255,7 @@ def all_test_beam_files(name = "all_test_beam_files"): erlc_opts = "//:test_erlc_opts", deps = [ "//deps/amqp_client:erlang_app", + "//deps/oauth2_client:erlang_app", "//deps/rabbit:erlang_app", "//deps/rabbit_common:erlang_app", "//deps/rabbitmq_management_agent:erlang_app", @@ -308,8 +312,9 @@ def all_srcs(name = "all_srcs"): "priv/www/js/oidc-oauth/helper.js", "priv/www/js/oidc-oauth/login-callback.html", "priv/www/js/oidc-oauth/logout-callback.html", + "priv/www/js/oidc-oauth/oidc-client-ts.3.0.1.min.js", + "priv/www/js/oidc-oauth/oidc-client-ts.3.0.1.min.js.map", "priv/www/js/oidc-oauth/oidc-client-ts.js", - "priv/www/js/oidc-oauth/oidc-client-ts.js.map", "priv/www/js/prefs.js", "priv/www/js/sammy-0.7.6.js", "priv/www/js/sammy-0.7.6.min.js", @@ -325,6 +330,7 @@ def all_srcs(name = "all_srcs"): "priv/www/js/tmpl/connection.ejs", "priv/www/js/tmpl/connections.ejs", "priv/www/js/tmpl/consumers.ejs", + "priv/www/js/tmpl/deprecated-features.ejs", "priv/www/js/tmpl/exchange.ejs", "priv/www/js/tmpl/exchanges.ejs", "priv/www/js/tmpl/feature-flags.ejs", @@ -378,6 +384,7 @@ def all_srcs(name = "all_srcs"): "src/rabbit_mgmt_hsts.erl", "src/rabbit_mgmt_load_definitions.erl", "src/rabbit_mgmt_login.erl", + "src/rabbit_mgmt_nodes.erl", "src/rabbit_mgmt_oauth_bootstrap.erl", "src/rabbit_mgmt_reset_handler.erl", "src/rabbit_mgmt_stats.erl", @@ -400,6 +407,7 @@ def all_srcs(name = "all_srcs"): "src/rabbit_mgmt_wm_connections_vhost.erl", "src/rabbit_mgmt_wm_consumers.erl", "src/rabbit_mgmt_wm_definitions.erl", + "src/rabbit_mgmt_wm_deprecated_features.erl", "src/rabbit_mgmt_wm_environment.erl", "src/rabbit_mgmt_wm_exchange.erl", "src/rabbit_mgmt_wm_exchange_publish.erl", @@ -413,7 +421,6 @@ def all_srcs(name = "all_srcs"): "src/rabbit_mgmt_wm_health_check_alarms.erl", "src/rabbit_mgmt_wm_health_check_certificate_expiration.erl", "src/rabbit_mgmt_wm_health_check_local_alarms.erl", - "src/rabbit_mgmt_wm_health_check_node_is_mirror_sync_critical.erl", "src/rabbit_mgmt_wm_health_check_node_is_quorum_critical.erl", "src/rabbit_mgmt_wm_health_check_port_listener.erl", "src/rabbit_mgmt_wm_health_check_protocol_listener.erl", @@ -501,7 +508,7 @@ def test_suite_beam_files(name = "test_suite_beam_files"): outs = ["test/clustering_SUITE.beam"], app_name = "rabbitmq_management", erlc_opts = "//:test_erlc_opts", - deps = ["//deps/amqp_client:erlang_app", "//deps/rabbit_common:erlang_app", "//deps/rabbitmq_ct_helpers:erlang_app", "//deps/rabbitmq_management_agent:erlang_app"], + deps = ["//deps/amqp_client:erlang_app", "//deps/rabbit_common:erlang_app", "//deps/rabbitmq_ct_helpers:erlang_app"], ) erlang_bytecode( name = "clustering_prop_SUITE_beam_files", @@ -510,7 +517,7 @@ def test_suite_beam_files(name = "test_suite_beam_files"): outs = ["test/clustering_prop_SUITE.beam"], app_name = "rabbitmq_management", erlc_opts = "//:test_erlc_opts", - deps = ["//deps/amqp_client:erlang_app", "//deps/rabbit_common:erlang_app", "//deps/rabbitmq_ct_helpers:erlang_app", "//deps/rabbitmq_management_agent:erlang_app", "@proper//:erlang_app"], + deps = ["//deps/amqp_client:erlang_app", "//deps/rabbit_common:erlang_app", "//deps/rabbitmq_ct_helpers:erlang_app", "@proper//:erlang_app"], ) erlang_bytecode( name = "config_schema_SUITE_beam_files", @@ -578,7 +585,6 @@ def test_suite_beam_files(name = "test_suite_beam_files"): testonly = True, srcs = ["test/rabbit_mgmt_test_db_SUITE.erl"], outs = ["test/rabbit_mgmt_test_db_SUITE.beam"], - hdrs = ["include/rabbit_mgmt.hrl"], app_name = "rabbitmq_management", erlc_opts = "//:test_erlc_opts", deps = [ @@ -595,6 +601,14 @@ def test_suite_beam_files(name = "test_suite_beam_files"): app_name = "rabbitmq_management", erlc_opts = "//:test_erlc_opts", ) + erlang_bytecode( + name = "rabbit_mgmt_wm_auth_SUITE_beam_files", + testonly = True, + srcs = ["test/rabbit_mgmt_wm_auth_SUITE.erl"], + outs = ["test/rabbit_mgmt_wm_auth_SUITE.beam"], + app_name = "rabbitmq_management", + erlc_opts = "//:test_erlc_opts", + ) erlang_bytecode( name = "stats_SUITE_beam_files", testonly = True, diff --git a/deps/rabbitmq_management/bin/rabbitmqadmin b/deps/rabbitmq_management/bin/rabbitmqadmin index cf9e44a5c70b..a5977ed36e94 100755 --- a/deps/rabbitmq_management/bin/rabbitmqadmin +++ b/deps/rabbitmq_management/bin/rabbitmqadmin @@ -4,7 +4,7 @@ # License, v. 2.0. If a copy of the MPL was not distributed with this # file, You can obtain one at https://mozilla.org/MPL/2.0/. # -# Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved. +# Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. from __future__ import print_function diff --git a/deps/rabbitmq_management/include/rabbit_mgmt.hrl b/deps/rabbitmq_management/include/rabbit_mgmt.hrl index a9a7a4421780..eaf47272be93 100644 --- a/deps/rabbitmq_management/include/rabbit_mgmt.hrl +++ b/deps/rabbitmq_management/include/rabbit_mgmt.hrl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -define(AUTH_REALM, "Basic realm=\"RabbitMQ Management\""). @@ -11,3 +11,5 @@ -define(MANAGEMENT_PG_SCOPE, rabbitmq_management). -define(MANAGEMENT_PG_GROUP, management_db). + +-define(MANAGEMENT_DEFAULT_HTTP_MAX_BODY_SIZE, 20000000). diff --git a/deps/rabbitmq_management/priv/schema/rabbitmq_management.schema b/deps/rabbitmq_management/priv/schema/rabbitmq_management.schema index 30a8bc84c06a..83c32b3022ac 100644 --- a/deps/rabbitmq_management/priv/schema/rabbitmq_management.schema +++ b/deps/rabbitmq_management/priv/schema/rabbitmq_management.schema @@ -20,6 +20,23 @@ {mapping, "management.http_log_dir", "rabbitmq_management.http_log_dir", [{datatype, string}]}. +%% Max HTTP body limit + +{mapping, "management.http.max_body_size", "rabbitmq_management.max_http_body_size", + [{datatype, integer}, {validators, ["non_negative_integer"]}]}. + +{translation, "rabbitmq_management.max_http_body_size", +fun(Conf) -> + case cuttlefish:conf_get("management.http.max_body_size", Conf, undefined) of + %% 10 MiB allows for about 100K queues with short names across a small (single digit) number of virtual hosts with + %% an equally small number of users. MK. + undefined -> 10000000; + Val when is_integer(Val) -> Val; + Other -> cuttlefish:invalid("management.http.max_body_size must be set to a positive integer") + end +end}. + + %% HTTP (TCP) listener options ======================================================== %% HTTP listener consistent with Web STOMP and Web MQTT. @@ -70,7 +87,7 @@ {mapping, "management.ssl.cacertfile", "rabbitmq_management.ssl_config.cacertfile", [{datatype, string}, {validators, ["file_accessible"]}]}. {mapping, "management.ssl.password", "rabbitmq_management.ssl_config.password", - [{datatype, string}]}. + [{datatype, [tagged_binary, binary]}]}. {mapping, "management.ssl.verify", "rabbitmq_management.ssl_config.verify", [ {datatype, {enum, [verify_peer, verify_none]}}]}. @@ -278,7 +295,7 @@ end}. [{datatype, {enum, [true, false]}}]}. {mapping, "management.listener.ssl_opts.password", "rabbitmq_management.listener.ssl_opts.password", - [{datatype, string}]}. + [{datatype, [tagged_binary, binary]}]}. {mapping, "management.listener.ssl_opts.psk_identity", "rabbitmq_management.listener.ssl_opts.psk_identity", [{datatype, string}]}. @@ -415,7 +432,6 @@ fun(Conf) -> end}. -%% OAuth 2/SSO access only {mapping, "management.disable_basic_auth", "rabbitmq_management.disable_basic_auth", [{datatype, {enum, [true, false]}}]}. @@ -431,15 +447,21 @@ end}. %% =========================================================================== %% Authorization +%% OAuth 2/SSO access only %% Enable OAuth2 in the management ui {mapping, "management.oauth_enabled", "rabbitmq_management.oauth_enabled", [{datatype, {enum, [true, false]}}]}. +%% Enable Basic Auth in the management ui along with OAuth2 (it requires an additional auth_backend) +{mapping, "management.oauth_disable_basic_auth", "rabbitmq_management.oauth_disable_basic_auth", + [{datatype, {enum, [true, false]}}]}. + %% The URL of the OIDC/OAuth2 provider {mapping, "management.oauth_provider_url", "rabbitmq_management.oauth_provider_url", [{datatype, string}]}. + %% Your client application's identifier as registered with the OIDC/OAuth2 {mapping, "management.oauth_client_id", "rabbitmq_management.oauth_client_id", [{datatype, string}]}. @@ -466,6 +488,102 @@ end}. {mapping, "management.oauth_initiated_logon_type", "rabbitmq_management.oauth_initiated_logon_type", [{datatype, {enum, [sp_initiated, idp_initiated]}}]}. + +{mapping, + "management.oauth_resource_servers.$name.id", + "rabbitmq_management.oauth_resource_servers", + [{datatype, string}] +}. + +{mapping, + "management.oauth_resource_servers.$name.disabled", + "rabbitmq_management.oauth_resource_servers", [ + {datatype, {enum, [true, false]}}, + {include_default, false} +]}. + +{mapping, + "management.oauth_resource_servers.$name.label", + "rabbitmq_management.oauth_resource_servers", + [{datatype, string}] +}. + +{mapping, + "management.oauth_resource_servers.$name.oauth_provider_url", + "rabbitmq_management.oauth_resource_servers", + [{datatype, string}] +}. + + + +{mapping, + "management.oauth_resource_servers.$name.oauth_client_id", + "rabbitmq_management.oauth_resource_servers", + [{datatype, string}] +}. + +{mapping, + "management.oauth_resource_servers.$name.oauth_client_secret", + "rabbitmq_management.oauth_resource_servers", + [{datatype, string}] +}. + +{mapping, + "management.oauth_resource_servers.$name.oauth_response_type", + "rabbitmq_management.oauth_resource_servers", + [{datatype, string}] +}. + + +{mapping, + "management.oauth_resource_servers.$name.oauth_scopes", + "rabbitmq_management.oauth_resource_servers", + [{datatype, string}] +}. + +{mapping, + "management.oauth_resource_servers.$name.oauth_metadata_url", + "rabbitmq_management.oauth_resource_servers", + [{datatype, string}] +}. + +{mapping, + "management.oauth_resource_servers.$name.oauth_initiated_logon_type", + "rabbitmq_management.oauth_resource_servers", + [{datatype, {enum, [sp_initiated, idp_initiated]}}]}. + +{translation, "rabbitmq_management.oauth_resource_servers", + fun(Conf) -> + Settings = cuttlefish_variable:filter_by_prefix("management.oauth_resource_servers", Conf), + ResourceServers = [{Name, {list_to_atom(Key), V}} || {["management","oauth_resource_servers", Name, Key], V} <- Settings ], + KeyFun = fun({Name,_}) -> list_to_binary(Name) end, + ValueFun = fun({_,V}) -> V end, + NewGroup = maps:groups_from_list(KeyFun, ValueFun, ResourceServers), + ListOrSingleFun = fun(K, List) -> + case K of + key_config -> proplists:get_all_values(K, List); + _ -> + case proplists:lookup_all(K, List) of + [One] -> proplists:get_value(K, List); + [One|_] = V -> V + end + end + end, + GroupKeyConfigFun = fun(K, List) -> + ListKeys = proplists:get_keys(List), + [ {K,ListOrSingleFun(K,List)} || K <- ListKeys ] + end, + NewGroupTwo = maps:map(GroupKeyConfigFun, NewGroup), + IndexByIdOrElseNameFun = fun(K, V, NewMap) -> + case proplists:get_value(id, V) of + undefined -> maps:put(K, V, NewMap); + ID when is_binary(ID) -> maps:put(ID, V, NewMap); + ID -> maps:put(list_to_binary(ID), V, NewMap) + end + end, + maps:fold(IndexByIdOrElseNameFun,#{}, NewGroupTwo) + end}. + %% =========================================================================== diff --git a/deps/rabbitmq_management/priv/www/api/index.html b/deps/rabbitmq_management/priv/www/api/index.html index 2ed26936e61f..14bcaeb36a22 100644 --- a/deps/rabbitmq_management/priv/www/api/index.html +++ b/deps/rabbitmq_management/priv/www/api/index.html @@ -56,6 +56,13 @@

      Introduction

      with the columns parameter. This is a comma-separated list of subfields separated by dots. See the example below.

      +

      It is possible to disable the statistics in the GET requests + and obtain just the basic information of every object. This reduces + considerably the amount of data returned and the memory and resource + consumption of each query in the system. For some monitoring and operation + purposes, these queries are more appropiate. The query string parameter + disable_stats set to true will achieve this.

      +

      Most of the GET queries return many fields per object. The second part of this guide covers those.

      @@ -209,10 +216,17 @@

      Reference

      /api/nodes/name - An individual node in the RabbitMQ cluster. Add - "?memory=true" to get memory statistics, and "?binary=true" - to get a breakdown of binary memory use (may be expensive if - there are many small binaries in the system). + Returns information about an individual node in the RabbitMQ cluster. + + + + X + + + + /api/nodes/name/memory + + Returns a memory usage breakdown of an individual node in the RabbitMQ cluster. @@ -478,7 +492,21 @@

      Reference

      /api/queues - A list of all queues. Use pagination parameters to filter queues. + A list of all queues returning a reduced set of fields. Use pagination parameters to filter queues. + The parameter enable_queue_totals=true can be used in combination with the + disable_stats=true parameter to return a reduced set of fields and significantly + reduce the amount of data returned by this endpoint. That in turn can significantly reduce + CPU and bandwidth footprint of such requests. + + + + X + + + + /api/queues/detailed + A list of all queues containing all available information about the queues. Use pagination parameters to filter queues. + X @@ -531,9 +559,7 @@

      Reference

      X /api/queues/vhost/name/actions - Actions that can be taken on a queue. POST a body like: -
      {"action":"sync"}
      Currently the actions which are - supported are sync and cancel_sync. + Actions that can be taken on a queue. Currently no actions are supported. @@ -739,7 +765,7 @@

      Reference

      An individual user. To PUT a user, you will need a body looking something like this:
      {"password":"secret","tags":"administrator"}
      or: -
      {"password_hash":"2lmoth8l4H0DViLaK9Fxi6l9ds8=", "tags":"administrator"}
      +
      {"password_hash":"2lmoth8l4H0DViLaK9Fxi6l9ds8=", "tags":["administrator"]}
      The tags key is mandatory. Either password or password_hash can be set. If neither are set the user will not be able to log in with a password, @@ -1043,19 +1069,6 @@

      Reference

      otherwise responds with a 503 Service Unavailable. - - X - - - - /api/health/checks/node-is-mirror-sync-critical - - Checks if there are classic mirrored queues without synchronised mirrors online - (queues that would potentially lose data if the target node is shut down). - Responds a 200 OK if there are no such classic mirrored queues, - otherwise responds with a 503 Service Unavailable. - - X @@ -1290,6 +1303,36 @@

      Reference

      Requires the rabbitmq_stream_management plugin to be enabled. + + X + + + + /api/feature-flags + + The list of feature flags. + + + + X + + + + /api/deprecated-features + + The list of deprecated features. + + + + X + + + + /api/deprecated-features/used + + The list of deprecated features currently being used. + + @@ -1949,12 +1992,6 @@

      /api/nodes

      Number of Erlang processes in use. - - processors - - Number of cores detected and usable by Erlang. - - queue_index_journal_write_count @@ -2001,18 +2038,6 @@

      /api/nodes

      Location of sasl log file. - - sockets_total - - File descriptors available for use as sockets. - - - - sockets_used - - File descriptors used as sockets. - - type @@ -2025,6 +2050,12 @@

      /api/nodes

      Time since the Erlang VM started, in milliseconds. + + processors + + Number of logical CPU cores used by RabbitMQ. + +

      /api/nodes/(name)

      @@ -2158,6 +2189,85 @@

      /api/exchanges/(vhost)/(name)

      /api/queues

      + + When using the query parameters combination of disable_stats and + enable_queue_totals this query returns the following fields: + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
      name + The name of the queue. +
      vhost + The name of the virtual host. +
      type + The type of the queue. +
      node + Depending on the type of the queue, this is the node which holds the queue or hosts the leader. +
      state + The status of the queue. +
      arguments + The arguments of the queue. +
      auto_delete + The value of the auto_delete argument. +
      durable + The value of the durable argument. +
      exclusive + The value of the exclusive argument. +
      messages + The total number of messages in the queue. +
      messages_ready + The number of messages ready to be delivered in the queue. +
      messages_unacknowledged + The number of messages waiting for acknowledgement in the queue. +
      +

      /api/queues/(vhost)

      diff --git a/deps/rabbitmq_management/priv/www/css/main.css b/deps/rabbitmq_management/priv/www/css/main.css index 2a0e98cd7842..a3bcaae5d5f5 100644 --- a/deps/rabbitmq_management/priv/www/css/main.css +++ b/deps/rabbitmq_management/priv/www/css/main.css @@ -28,6 +28,8 @@ a:hover { color: #F60; } #topnav input[type=submit] { padding: 3px 7px; display: inline; } #topnav li { text-align: right; padding: 2px 0; } +#warnings p { text-align: center; padding: 1; margin: 1;} + #menu ul { padding: 0; margin: 0; overflow: auto; } #menu li { float: left; list-style-type: none; padding: 0 0.1em 0 0; } #menu li a { display: block; padding: 0.7em 1.3em; margin-right: 5px; } @@ -37,7 +39,9 @@ a:hover { color: #F60; } #main { padding-top: 10em; } #main.with-rhs { margin-right: 210px; } +#main.with-warnings { padding-top: 18em; } #rhs { float: right; width: 200px; background-color: white; position: relative; padding-top: 10em; } +#rhs.with-warnings { padding-top: 18em; } #rhs ul { padding: 0; margin: 10px 0 0 0; } #rhs li { list-style-type: none; padding: 0; margin-bottom: 5px; } #rhs a { display: block; padding: 0.7em; font-weight: bold; text-decoration: none; } diff --git a/deps/rabbitmq_management/priv/www/index.html b/deps/rabbitmq_management/priv/www/index.html index d6f7438dbf5f..b72e0cda9c80 100644 --- a/deps/rabbitmq_management/priv/www/index.html +++ b/deps/rabbitmq_management/priv/www/index.html @@ -16,48 +16,16 @@ - - - - + + - - - + +

      + <% if (Array.isArray(warnings)) { %> + <% for (var i = 0; i < warnings.length; i++) { %> +

      <%=warnings[i]%>

      + <% } %> + <% } %> + <% if (notAuthorized) { %> + + <% } %> +
      +<% if (!notAuthorized) { %> + <% if ((typeof resource_servers == 'object' && resource_servers.length == 1) && oauth_disable_basic_auth) { %> + + <% } else if (typeof resource_servers == 'object' && resource_servers.length >= 1) { %> -
      - + Login with : +

      + +

      +

      OAuth 2.0

      +
      +
      + <% if (resource_servers.length == 1 && declared_resource_servers_count == 1) { %> + + <% } else { %> +
      + + +

      + +

      + <% } %> +
      +
      +
      + +<% } %> + + + <% if (!oauth_disable_basic_auth) { %> +
      +

      Basic Authentication

      +
      +
      +
      + + + + + + + + + + + + + +
      *
      *
       
      +
      +
      +
      +
      + <% } %> + +<% } %> + + diff --git a/deps/rabbitmq_management/priv/www/js/tmpl/memory.ejs b/deps/rabbitmq_management/priv/www/js/tmpl/memory.ejs index df7b33eb070f..03c442329983 100644 --- a/deps/rabbitmq_management/priv/www/js/tmpl/memory.ejs +++ b/deps/rabbitmq_management/priv/www/js/tmpl/memory.ejs @@ -6,8 +6,7 @@

      <% } else { %> <% - var sections = {'queue_procs' : ['classic', 'Classic queues (masters)'], - 'queue_slave_procs' : ['classic', 'Classic queues (mirrors)'], + var sections = {'queue_procs' : ['classic', 'Classic queues'], 'quorum_queue_procs' : ['quorum', 'Quorum queues'], 'quorum_queue_dlx_procs' : ['quorum', 'Dead letter workers'], 'stream_queue_procs' : ['stream', 'Stream queues'], @@ -36,8 +35,7 @@
      <% var key = [[{name: 'Classic Queues', colour: 'classic', - keys: [['queue_procs', 'queues'], - ['queue_slave_procs', 'mirrors']]}, + keys: [['queue_procs', 'queues']]}, {name: 'Quorum Queues', colour: 'quorum', keys: [['quorum_queue_procs','quorum'], ['quorum_queue_dlx_procs', 'dead letter workers']]}, @@ -63,6 +61,7 @@ var key = [[{name: 'Classic Queues', colour: 'classic', [{name: 'Processes', colour: 'proc', keys: [['plugins', 'plugins'], + ['metadata_store', 'metadata store'], ['other_proc', 'other']]}, {name: 'System', colour: 'system', keys: [['code', 'code'], diff --git a/deps/rabbitmq_management/priv/www/js/tmpl/node.ejs b/deps/rabbitmq_management/priv/www/js/tmpl/node.ejs index 726a25949973..e1739b9415fb 100644 --- a/deps/rabbitmq_management/priv/www/js/tmpl/node.ejs +++ b/deps/rabbitmq_management/priv/www/js/tmpl/node.ejs @@ -23,22 +23,16 @@ Uptime <%= fmt_uptime(node.uptime) %> + + Cores + <%= fmt_string(node.processors) %> + <% if (rabbit_versions_interesting) { %> RabbitMQ Version <%= fmt_rabbit_version(node.applications) %> <% } %> - - Type - - <% if (node.type == 'disc') { %> - Disc - <% } else { %> - RAM - <% } %> - - Config file @@ -98,14 +92,6 @@ <% } %> - - - Socket descriptors - - - <%= node_stat_count('sockets_used', 'sockets_total', node, FD_THRESHOLDS) %> - - Erlang processes diff --git a/deps/rabbitmq_management/priv/www/js/tmpl/overview.ejs b/deps/rabbitmq_management/priv/www/js/tmpl/overview.ejs index 769c6d77c831..fdbbe1b8e025 100644 --- a/deps/rabbitmq_management/priv/www/js/tmpl/overview.ejs +++ b/deps/rabbitmq_management/priv/www/js/tmpl/overview.ejs @@ -84,9 +84,6 @@ <% if (show_column('overview', 'file_descriptors')) { %> File descriptors <% } %> - <% if (show_column('overview', 'socket_descriptors')) { %> - Socket descriptors - <% } %> <% if (show_column('overview', 'erlang_processes')) { %> Erlang processes <% } %> @@ -99,6 +96,9 @@ <% if (show_column('overview', 'uptime')) { %> Uptime <% } %> + <% if (show_column('overview', 'cores')) { %> + Cores + <% } %> <% if (show_column('overview', 'info')) { %> Info <% } %> @@ -150,11 +150,6 @@ <% } %> <% } %> - <% if (show_column('overview', 'socket_descriptors')) { %> - - <%= node_stat_count_bar('sockets_used', 'sockets_total', node, FD_THRESHOLDS) %> - - <% } %> <% if (show_column('overview', 'erlang_processes')) { %> @@ -188,17 +183,15 @@ <% if (show_column('overview', 'uptime')) { %> <%= fmt_uptime(node.uptime) %> <% } %> + <% if (show_column('overview', 'cores')) { %> + <%= fmt_string(node.processors) %> + <% } %> <% if (show_column('overview', 'info')) { %> <% if (node.being_drained) { %> maintenance mode <% } %> <%= fmt_string(node.rates_mode) %> - <% if (node.type == 'disc') { %> - disc - <% } else { %> - RAM - <% } %> <%= fmt_plugins_small(node) %> <%= fmt_string(node.mem_calculation_strategy) %> diff --git a/deps/rabbitmq_management/priv/www/js/tmpl/policies.ejs b/deps/rabbitmq_management/priv/www/js/tmpl/policies.ejs index 1f046ca5e2f8..cf191f97ee10 100644 --- a/deps/rabbitmq_management/priv/www/js/tmpl/policies.ejs +++ b/deps/rabbitmq_management/priv/www/js/tmpl/policies.ejs @@ -107,20 +107,14 @@ Auto expire
      Dead letter exchange | Dead letter routing key
      - Message TTL
      - Consumer Timeout
      + Message TTL | + Consumer Timeout | + Leader locator
      Queues [Classic] - HA mode | - HA params | - HA sync mode
      - HA mirror promotion on shutdown | - HA mirror promotion on failure -
      Version | - Master locator
      @@ -130,8 +124,6 @@ | Dead letter strategy | - Leader locator - @@ -139,12 +131,8 @@ Max age | - Max segment size in bytes - | Filter size in bytes. Valid range: 16-255 | - Leader locator - @@ -280,30 +268,37 @@
      - + - + - + diff --git a/deps/rabbitmq_management/priv/www/js/tmpl/queue.ejs b/deps/rabbitmq_management/priv/www/js/tmpl/queue.ejs index c9d7319bb4ff..7f2c9e131a55 100644 --- a/deps/rabbitmq_management/priv/www/js/tmpl/queue.ejs +++ b/deps/rabbitmq_management/priv/www/js/tmpl/queue.ejs @@ -71,53 +71,6 @@ <% } %> - <% } else { %> - <% if (!queue.exclusive) { %> - - Mirrors - - <% - var has_unsynced_node = false; - for (var i in queue.slave_nodes) { - var node = queue.slave_nodes[i]; - %> - <% - if (jQuery.inArray(node, queue.synchronised_slave_nodes) == -1) { - has_unsynced_node = true; - %> - <%= fmt_node(node) %> (unsynchronised) - <% } else { %> - <%= fmt_node(node) %> - <% } %> -
      - <% } %> - <% if (queue.state == 'syncing') { %> - - - - - -
      - <%= fmt_sync_state(queue) %> - -
      - - - - -
      -
      - <% } else if (has_unsynced_node) { %> -
      - - - - -
      - <% } %> - - - <% } %> <% } %> <% } %> @@ -139,23 +92,39 @@ <%= fmt_string(queue.consumer_details.length) %> <% } %> - <% if (!is_stream(queue)) { %> + <% if (is_classic(queue)) { %> Consumer capacity <%= fmt_percent(queue.consumer_capacity) %> <% } %> + <% if(queue.hasOwnProperty('publishers')) { %> + + Publishers + <%= fmt_string(queue.publishers) %> + + <% } %> <% if (is_quorum(queue)) { %> Open files <%= fmt_table_short(queue.open_files) %> + <% if (queue.hasOwnProperty('delivery_limit')) { %> + + Delivery limit + <%= fmt_string(queue.delivery_limit) %> + + <% } %> <% } %> <% if (is_stream(queue)) { %> Readers <%= fmt_table_short(queue.readers) %> + + Segments + <%= fmt_string(queue.segments) %> + <% } %> @@ -163,10 +132,14 @@ Total + <% if (!is_stream(queue)) { %> Ready Unacked + <% } %> <% if (is_quorum(queue)) { %> - In memory ready + High priority + Normal priority + Returned Dead-lettered @@ -180,20 +153,32 @@ Messages + <% if (is_stream(queue)) { %> + + <% } else { %> + <% } %> <%= fmt_num_thousands(queue.messages) %> + <% if (!is_stream(queue)) { %> <%= fmt_num_thousands(queue.messages_ready) %> <%= fmt_num_thousands(queue.messages_unacknowledged) %> + <% } %> <% if (is_quorum(queue)) { %> - <%= fmt_num_thousands(queue.messages_ram) %> + <%= fmt_num_thousands(queue.messages_ready_high) %> + + + <%= fmt_num_thousands(queue.messages_ready_normal) %> + + + <%= fmt_num_thousands(queue.messages_ready_returned) %> <%= fmt_num_thousands(queue.messages_dlx) %> @@ -226,16 +211,22 @@ <%= fmt_bytes(queue.message_bytes_unacknowledged) %> - - <%= fmt_bytes(queue.message_bytes_ram) %> - <% } %> <% if (is_quorum(queue)) { %> + + + + + + <%= fmt_bytes(queue.message_bytes_dlx) %> <% } %> <% if (is_classic(queue)) { %> + + <%= fmt_bytes(queue.message_bytes_ram) %> + <%= fmt_bytes(queue.message_bytes_persistent) %> diff --git a/deps/rabbitmq_management/priv/www/js/tmpl/queues.ejs b/deps/rabbitmq_management/priv/www/js/tmpl/queues.ejs index 5205aabf3192..caba0efe3092 100644 --- a/deps/rabbitmq_management/priv/www/js/tmpl/queues.ejs +++ b/deps/rabbitmq_management/priv/www/js/tmpl/queues.ejs @@ -129,11 +129,6 @@ <% } %> <% if (queue.hasOwnProperty('members')) { %> <%= fmt_members(queue) %> - <% } else { %> - <%= fmt_mirrors(queue) %> - <% if (queue.state == 'syncing') { %> - <%= fmt_sync_state(queue) %> - <% } %> <% } %> <% } %> @@ -331,7 +326,6 @@ <% if (queue_type == "classic") { %> Maximum priority | Version - | Master locator <% } %> <% if (queue_type == "quorum") { %> Delivery limit @@ -345,9 +339,7 @@ | Filter size (per chunk) in bytes | Initial cluster size <% } %> - <% if (queue_type != "classic") { %> - | Leader locator - <% } %> + Leader locator diff --git a/deps/rabbitmq_management/selenium/README.md b/deps/rabbitmq_management/selenium/README.md index 8e4a823686d4..0f9fcee379be 100644 --- a/deps/rabbitmq_management/selenium/README.md +++ b/deps/rabbitmq_management/selenium/README.md @@ -36,14 +36,21 @@ not see any browser interaction, everything happens in the background, i.e. rabb To run just one suite, you proceed as follows: ``` -suites/oauth-with-uaa.sh +suites/authnz-mgt/oauth-with-uaa.sh ``` -And to is run all suites, like the CI does, you run: +And to a group of suites, like the CI does, you run the command below which runs all +the management ui suites. If you do not pass `full-suite-management-ui`, `run-suites.sh` +defaults to `full-suite-management-ui`. ``` -./run-suites.sh +./run-suites.sh full-suite-management-ui ``` +Other suites files available are: + +- `short-suite-management-ui` which only runs a short set of suites +- `full-suite-authnz` which runs all the suites related to testing auth backends vs protocols + If you want to test your local changes, you can still build an image with these 2 commands from the root folder of the `rabbitmq-server` repo: ``` @@ -51,17 +58,30 @@ cd ../../../../ make package-generic-unix make docker-image ``` +>> Equivalent bazel command: `bazelisk run packaging/docker-image:rabbitmq` The last command prints something like this: ``` => => naming to docker.io/pivotalrabbitmq/rabbitmq:3.11.0-rc.2.51.g4f3e539.dirty 0.0s ``` +Or if you prefer to use bazel run instead: +``` +bazelisk run packaging/docker-image:rabbitmq +``` + + To run a suite with a particular docker image you do it like this: ``` cd deps/rabbitmq_management/selenium -RABBITMQ_DOCKER_IMAGE=pivotalrabbitmq/rabbitmq:3.11.0-rc.2.51.g4f3e539.dirty suites/oauth-with-uaa-with-mgt-prefix.sh +RABBITMQ_DOCKER_IMAGE=pivotalrabbitmq/rabbitmq:3.11.0-rc.2.51.g4f3e539.dirty suites/authnz-mgt/oauth-with-uaa-with-mgt-prefix.sh +``` +or like this if you built the docker image using bazel: ``` +cd deps/rabbitmq_management/selenium +RABBITMQ_DOCKER_IMAGE=bazel/packaging/docker-image:rabbitmq suites/authnz-mgt/oauth-with-uaa-with-mgt-prefix.sh +``` + ## Run tests interactively using your local chrome browser @@ -78,22 +98,22 @@ For instance, say you want to run the test cases for the suite `suites/oauth-wit First, open a terminal and launch RabbitMQ in the foreground: ``` -suites/oauth-with-uaa.sh start-rabbitmq +suites/authnz-mgt/oauth-with-uaa.sh start-rabbitmq ``` Then, launch all the components, the suite depends on, in the background: ``` -suites/oauth-with-uaa.sh start-others +suites/authnz-mgt/oauth-with-uaa.sh start-others ``` And finally, run all the test cases for the suite: ``` -suites/oauth-with-uaa.sh test +suites/authnz-mgt/oauth-with-uaa.sh test ``` Or just one test case: ``` -suites/oauth-with-uaa.sh test happy-login.js +suites/authnz-mgt/oauth-with-uaa.sh test happy-login.js ``` **NOTE**: Nowadays, it is not possible to run all test in interactive mode. It is doable but it has not @@ -131,7 +151,7 @@ automatically activated when running in interactive mode The rest of the components the test cases depends on will typically run in docker such as uaa, keycloak, and the rest. -Besides these two profiles, mutually exclusive, you can have as many profiles as needed. It is just a matter of naming the appropriate file (.env, or rabbitmq.conf, etc) with the profile and activating the profile in the test suite script. For instance `suites/oauth-with-uaa.sh` activates two profiles by declaring them in `PROFILES` environment variable as shown below: +Besides these two profiles, mutually exclusive, you can have as many profiles as needed. It is just a matter of naming the appropriate file (.env, or rabbitmq.conf, etc) with the profile and activating the profile in the test suite script. For instance `suites/authnz-mgt/oauth-with-uaa.sh` activates two profiles by declaring them in `PROFILES` environment variable as shown below: ``` PROFILES="uaa uaa-oauth-provider" ``` diff --git a/deps/rabbitmq_management/selenium/amqp10-roundtriptest/src/main/java/com/rabbitmq/amqp1_0/RoundTripTest.java b/deps/rabbitmq_management/selenium/amqp10-roundtriptest/src/main/java/com/rabbitmq/amqp1_0/RoundTripTest.java index ca6b80d82966..d683e23d8bce 100644 --- a/deps/rabbitmq_management/selenium/amqp10-roundtriptest/src/main/java/com/rabbitmq/amqp1_0/RoundTripTest.java +++ b/deps/rabbitmq_management/selenium/amqp10-roundtriptest/src/main/java/com/rabbitmq/amqp1_0/RoundTripTest.java @@ -19,10 +19,12 @@ public static void main(String args[]) throws Exception { String hostname = getEnv("RABBITMQ_HOSTNAME", "localhost"); String port = getEnv("RABBITMQ_AMQP_PORT", "5672"); String scheme = getEnv("RABBITMQ_AMQP_SCHEME", "amqp"); - String username = getEnv("RABBITMQ_AMQP_USERNAME", "guest"); - String password = getEnv("RABBITMQ_AMQP_PASSWORD", "guest"); + String username = args.length > 0 ? args[0] : getEnv("RABBITMQ_AMQP_USERNAME", "guest"); + String password = args.length > 1 ? args[1] : getEnv("RABBITMQ_AMQP_PASSWORD", "guest"); String uri = scheme + "://" + hostname + ":" + port; + System.out.println("AMQPS Roundrip using uri " + uri); + Hashtable env = new Hashtable<>(); env.put(Context.INITIAL_CONTEXT_FACTORY, "org.apache.qpid.jms.jndi.JmsInitialContextFactory"); env.put("connectionfactory.myFactoryLookup", uri); diff --git a/deps/rabbitmq_management/selenium/bin/components/README.md b/deps/rabbitmq_management/selenium/bin/components/README.md new file mode 100644 index 000000000000..8b08212a068f --- /dev/null +++ b/deps/rabbitmq_management/selenium/bin/components/README.md @@ -0,0 +1,13 @@ +These shell scripts are not meant to be executed directly. Instead they are +imported by bin/suite_template script. + +Each component required to run a test, for instance, uaa or keycloak, has +its own script with its corresponding function: + start_() + +Although there is a convention to have two functions, the entrypoint `start_()`, +and `init_()`. The latter is called by the former to initialize +environment variables. +There is a third entry point for third party components (i.e. all except rabbitmq), the `ensure_()`. +This function starts the component if it is not running. Whereas `start_()` kills the +component's container if it is running and start it again. diff --git a/deps/rabbitmq_management/selenium/bin/components/devkeycloak b/deps/rabbitmq_management/selenium/bin/components/devkeycloak new file mode 100644 index 000000000000..352544372c4a --- /dev/null +++ b/deps/rabbitmq_management/selenium/bin/components/devkeycloak @@ -0,0 +1,53 @@ +#!/usr/bin/env bash + +KEYCLOAK_DOCKER_IMAGE=quay.io/keycloak/keycloak:20.0 + +init_devkeycloak() { + DEVKEYCLOAK_CONFIG_PATH=${DEVKEYCLOAK_CONFIG_PATH:-multi-oauth/devkeycloak} + DEVKEYCLOAK_CONFIG_DIR=$(realpath ${TEST_DIR}/${DEVKEYCLOAK_CONFIG_PATH}) + + print "> DEVKEYCLOAK_CONFIG_DIR: ${DEVKEYCLOAK_CONFIG_DIR}" + print "> DEVKEYCLOAK_URL: ${DEVKEYCLOAK_URL}" + print "> DEVKEYCLOAK_DOCKER_IMAGE: ${KEYCLOAK_DOCKER_IMAGE}" +} +ensure_devkeycloak() { + if docker ps | grep devkeycloak &> /dev/null; then + print "devkeycloak already running ..." + else + start_devkeycloak + fi +} + +start_devkeycloak() { + begin "Starting devkeycloak ..." + + init_devkeycloak + kill_container_if_exist devkeycloak + + MOUNT_DEVKEYCLOAK_CONF_DIR=$CONF_DIR/devkeycloak + + mkdir -p $MOUNT_DEVKEYCLOAK_CONF_DIR + ${BIN_DIR}/gen-keycloak-json ${DEVKEYCLOAK_CONFIG_DIR} "dev-realm" $ENV_FILE $MOUNT_DEVKEYCLOAK_CONF_DIR/dev-realm.json + print "> EFFECTIVE DEVKEYCLOAK_CONFIG_FILE: $MOUNT_DEVKEYCLOAK_CONF_DIR/dev-realm.json" + cp ${DEVKEYCLOAK_CONFIG_DIR}/*.pem $MOUNT_DEVKEYCLOAK_CONF_DIR + + docker run \ + --detach \ + --name devkeycloak \ + --net ${DOCKER_NETWORK} \ + --publish 8082:8080 \ + --publish 8442:8442 \ + --env KEYCLOAK_ADMIN=admin \ + --env KEYCLOAK_ADMIN_PASSWORD=admin \ + --mount type=bind,source=${MOUNT_DEVKEYCLOAK_CONF_DIR},target=/opt/keycloak/data/import/ \ + ${KEYCLOAK_DOCKER_IMAGE} start-dev --import-realm \ + --https-certificate-file=/opt/keycloak/data/import/server_devkeycloak_certificate.pem \ + --https-certificate-key-file=/opt/keycloak/data/import/server_devkeycloak_key.pem \ + --hostname=devkeycloak --hostname-admin=devkeycloak --https-port=8442 + + wait_for_oidc_endpoint devkeycloak $DEVKEYCLOAK_URL $MOUNT_DEVKEYCLOAK_CONF_DIR/ca_certificate.pem + end "devkeycloak is ready" + print " Note: If you modify devkeycloak configuration, make sure to run the following command to export the configuration." + print " docker exec -it devkeycloak /opt/keycloak/bin/kc.sh export --users realm_file --realm test --dir /opt/keycloak/data/import/" + +} diff --git a/deps/rabbitmq_management/selenium/bin/components/fakeportal b/deps/rabbitmq_management/selenium/bin/components/fakeportal new file mode 100644 index 000000000000..aadbda50327b --- /dev/null +++ b/deps/rabbitmq_management/selenium/bin/components/fakeportal @@ -0,0 +1,52 @@ + +ensure_fakeportal() { + if docker ps | grep fakeportal &> /dev/null; then + print "fakeportal already running ..." + else + start_fakeportal + fi +} + +init_fakeportal() { + FAKEPORTAL_URL=${FAKEPORTAL_URL:-http://fakeportal:3000} + FAKEPORTAL_DIR=${SCRIPT}/../fakeportal + CLIENT_ID="${CLIENT_ID:-rabbit_idp_user}" + CLIENT_SECRET="${CLIENT_SECRET:-rabbit_idp_user}" + RABBITMQ_HOST=${RABBITMQ_HOST:-proxy:9090} + RABBITMQ_HOST_FOR_FAKEPORTAL=${RABBITMQ_HOST_FOR_FAKEPORTAL:-rabbitmq:15672} + + RABBITMQ_URL=$(calculate_rabbitmq_url $RABBITMQ_HOST) + RABBITMQ_URL_FOR_FAKEPORTAL=$(calculate_rabbitmq_url $RABBITMQ_HOST_FOR_FAKEPORTAL) + + print "> FAKEPORTAL_URL: ${FAKEPORTAL_URL}" + print "> UAA_URL_FOR_FAKEPORTAL: ${UAA_URL_FOR_FAKEPORTAL}" + print "> RABBITMQ_HOST_FOR_FAKEPORTAL: ${RABBITMQ_HOST_FOR_FAKEPORTAL}" + print "> RABBITMQ_HOST: ${RABBITMQ_HOST}" + print "> CLIENT_ID: ${CLIENT_ID}" + print "> CLIENT_SECRET: ${CLIENT_SECRET}" + print "> RABBITMQ_URL: ${RABBITMQ_URL}" +} +start_fakeportal() { + begin "Starting fakeportal ..." + + init_fakeportal + kill_container_if_exist fakeportal + mocha_test_tag=($(md5sum $SELENIUM_ROOT_FOLDER/package.json)) + + docker run \ + --detach \ + --name fakeportal \ + --net ${DOCKER_NETWORK} \ + --publish 3000:3000 \ + --env PORT=3000 \ + --env RABBITMQ_URL="${RABBITMQ_URL_FOR_FAKEPORTAL}" \ + --env PROXIED_RABBITMQ_URL="${RABBITMQ_URL}" \ + --env UAA_URL="${UAA_URL_FOR_FAKEPORTAL}" \ + --env CLIENT_ID="${CLIENT_ID}" \ + --env CLIENT_SECRET="${CLIENT_SECRET}" \ + -v ${FAKEPORTAL_DIR}:/code/fakeportal \ + mocha-test:${mocha_test_tag} run fakeportal + + wait_for_url $FAKEPORTAL_URL + end "Fakeportal is ready" +} diff --git a/deps/rabbitmq_management/selenium/bin/components/fakeproxy b/deps/rabbitmq_management/selenium/bin/components/fakeproxy new file mode 100644 index 000000000000..2705ee80427e --- /dev/null +++ b/deps/rabbitmq_management/selenium/bin/components/fakeproxy @@ -0,0 +1,52 @@ + + +ensure_fakeproxy() { + if docker ps | grep fakeproxy &> /dev/null; then + print "fakeproxy already running ..." + else + start_fakeproxy + fi +} + +init_fakeproxy() { + FAKEPROXY_URL=${FAKEPROXY_URL:-http://fakeproxy:9090} + FAKEPROXY_DIR=${SCRIPT}/../fakeportal + CLIENT_ID="${CLIENT_ID:-rabbit_idp_user}" + CLIENT_SECRET="${CLIENT_SECRET:-rabbit_idp_user}" + RABBITMQ_HOST_FOR_FAKEPROXY=${RABBITMQ_HOST_FOR_FAKEPROXY:-rabbitmq:15672} + UAA_URL_FOR_FAKEPROXY=${UAA_URL_FOR_FAKEPROXY:-http://uaa:8080} + + RABBITMQ_URL_FOR_FAKEPROXY=$(calculate_rabbitmq_url $RABBITMQ_HOST_FOR_FAKEPROXY) + + print "> FAKEPROXY_URL: ${FAKEPROXY_URL}" + print "> UAA_URL: ${UAA_URL_FOR_FAKEPROXY}" + print "> RABBITMQ_HOST_FOR_FAKEPROXY: ${RABBITMQ_HOST_FOR_FAKEPROXY}" + print "> CLIENT_ID: ${CLIENT_ID}" + print "> CLIENT_SECRET: ${CLIENT_SECRET}" + print "> RABBITMQ_URL_FOR_FAKEPROXY: ${RABBITMQ_URL_FOR_FAKEPROXY}" + +} +start_fakeproxy() { + begin "Starting fakeproxy ..." + + init_fakeproxy + kill_container_if_exist fakeproxy + mocha_test_tag=($(md5sum $SELENIUM_ROOT_FOLDER/package.json)) + + docker run \ + --detach \ + --name fakeproxy \ + --net ${DOCKER_NETWORK} \ + --publish 9090:9090 \ + --env PORT=9090 \ + --env RABBITMQ_URL="${RABBITMQ_URL_FOR_FAKEPROXY}" \ + --env UAA_URL="${UAA_URL_FOR_FAKEPROXY}" \ + --env CLIENT_ID="${CLIENT_ID}" \ + --env CLIENT_SECRET="${CLIENT_SECRET}" \ + -v ${FAKEPROXY_DIR}:/code/fakeportal \ + mocha-test:${mocha_test_tag} run fakeproxy + + wait_for_url $FAKEPROXY_URL + end "fakeproxy is ready" + +} diff --git a/deps/rabbitmq_management/selenium/bin/components/keycloak b/deps/rabbitmq_management/selenium/bin/components/keycloak new file mode 100644 index 000000000000..d6470262f194 --- /dev/null +++ b/deps/rabbitmq_management/selenium/bin/components/keycloak @@ -0,0 +1,52 @@ +#!/usr/bin/env bash + +KEYCLOAK_DOCKER_IMAGE=quay.io/keycloak/keycloak:20.0 + +ensure_keycloak() { + if docker ps | grep keycloak &> /dev/null; then + print "keycloak already running ..." + else + start_keycloak + fi +} +init_keycloak() { + KEYCLOAK_CONFIG_PATH=${KEYCLOAK_CONFIG_PATH:-oauth/keycloak} + KEYCLOAK_CONFIG_DIR=$(realpath ${TEST_DIR}/${KEYCLOAK_CONFIG_PATH}) + KEYCLOAK_URL=${OAUTH_PROVIDER_URL} + + print "> KEYCLOAK_CONFIG_DIR: ${KEYCLOAK_CONFIG_DIR}" + print "> KEYCLOAK_URL: ${KEYCLOAK_URL}" + print "> KEYCLOAK_DOCKER_IMAGE: ${KEYCLOAK_DOCKER_IMAGE}" +} +start_keycloak() { + begin "Starting keycloak ..." + + init_keycloak + kill_container_if_exist keycloak + + MOUNT_KEYCLOAK_CONF_DIR=$CONF_DIR/keycloak + + mkdir -p $MOUNT_KEYCLOAK_CONF_DIR + ${BIN_DIR}/gen-keycloak-json ${KEYCLOAK_CONFIG_DIR} "test-realm" $ENV_FILE $MOUNT_KEYCLOAK_CONF_DIR/test-realm.json + print "> EFFECTIVE KEYCLOAK_CONFIG_FILE: $MOUNT_KEYCLOAK_CONF_DIR/test-realm.json" + cp ${KEYCLOAK_CONFIG_DIR}/*.pem $MOUNT_KEYCLOAK_CONF_DIR + + docker run \ + --detach \ + --name keycloak \ + --net ${DOCKER_NETWORK} \ + --publish 8081:8080 \ + --publish 8443:8443 \ + --env KEYCLOAK_ADMIN=admin \ + --env KEYCLOAK_ADMIN_PASSWORD=admin \ + --mount type=bind,source=${MOUNT_KEYCLOAK_CONF_DIR},target=/opt/keycloak/data/import/ \ + ${KEYCLOAK_DOCKER_IMAGE} start-dev --import-realm \ + --https-certificate-file=/opt/keycloak/data/import/server_keycloak_certificate.pem \ + --https-certificate-key-file=/opt/keycloak/data/import/server_keycloak_key.pem + + wait_for_oidc_endpoint keycloak $KEYCLOAK_URL $MOUNT_KEYCLOAK_CONF_DIR/ca_certificate.pem + end "Keycloak is ready" + + print " Note: If you modify keycloak configuration. Make sure to run the following command to export the configuration." + print " docker exec -it keycloak /opt/keycloak/bin/kc.sh export --users realm_file --realm test --dir /opt/keycloak/data/import/" +} diff --git a/deps/rabbitmq_management/selenium/bin/components/mock-auth-backend-http b/deps/rabbitmq_management/selenium/bin/components/mock-auth-backend-http new file mode 100644 index 000000000000..8662b80c4a3e --- /dev/null +++ b/deps/rabbitmq_management/selenium/bin/components/mock-auth-backend-http @@ -0,0 +1,35 @@ +#!/usr/bin/env bash + +ensure_mock-auth-backend-http() { + if docker ps | grep mock-auth-backend-http &> /dev/null; then + print "mock-auth-backend-http already running ..." + else + start_mock-auth-backend-http + fi +} +init_mock-auth-backend-http() { + AUTH_BACKEND_HTTP_BASEURL=${AUTH_BACKEND_HTTP_BASEURL:-http://localhost:8888} + AUTH_BACKEND_HTTP_DIR=${TEST_CASES_DIR}/mock-auth-backend-http + + print "> AUTH_BACKEND_HTTP_BASEURL: ${AUTH_BACKEND_HTTP_BASEURL}" + print "> AUTH_BACKEND_HTTP_DIR: ${AUTH_BACKEND_HTTP_DIR}" + +} +start_mock-auth-backend-http() { + begin "Starting mock-auth-backend-http ..." + + init_mock-auth-backend-http + kill_container_if_exist mock-auth-backend-http + + docker run \ + --detach \ + --name mock-auth-backend-http \ + --net ${DOCKER_NETWORK} \ + --publish 8888:1080 \ + --env MOCKSERVER_INITIALIZATION_JSON_PATH="/config/defaultExpectations.json" \ + -v ${AUTH_BACKEND_HTTP_DIR}:/config \ + mockserver/mockserver + + wait_for_url $AUTH_BACKEND_HTTP_BASEURL/ready + end "mock-auth-backend-http is ready" +} diff --git a/deps/rabbitmq_management/selenium/bin/components/mock-auth-backend-ldap b/deps/rabbitmq_management/selenium/bin/components/mock-auth-backend-ldap new file mode 100644 index 000000000000..f380b3bbc1c7 --- /dev/null +++ b/deps/rabbitmq_management/selenium/bin/components/mock-auth-backend-ldap @@ -0,0 +1,41 @@ +#!/usr/bin/env bash + +ensure_mock-auth-backend-ldap() { + if docker ps | grep mock-auth-backend-ldap &> /dev/null; then + print "mock-auth-backend-ldap already running ..." + else + start_mock-auth-backend-ldap + fi +} +init_mock-auth-backend-ldap() { + AUTH_BACKEND_LDAP_DIR=${TEST_CONFIG_DIR}/mock-auth-backend-ldap + + print "> AUTH_BACKEND_LDAP_DIR: ${AUTH_BACKEND_LDAP_DIR}" +} +start_mock-auth-backend-ldap() { + begin "Starting mock-auth-backend-ldap ..." + + init_mock-auth-backend-ldap + kill_container_if_exist mock-auth-backend-ldap + + docker run \ + --detach \ + --name mock-auth-backend-ldap \ + --net ${DOCKER_NETWORK} \ + --env LDAP_ORGANISATION="Authentication and Tags" \ + --env LDAP_DOMAIN="example.com" \ + --env LDAP_ADMIN_PASSWORD="admin" \ + --publish 389:389 \ + --publish 636:636 \ + -v ${AUTH_BACKEND_LDAP_DIR}:/config \ + osixia/openldap:1.2.1 + + wait_for_message mock-auth-backend-ldap "starting" + docker exec mock-auth-backend-ldap ldapadd \ + -x -w "admin" \ + -H ldap:// \ + -D "cn=admin,dc=example,dc=com" \ + -f /config/import.ldif + + end "mock-auth-backend-ldap is ready" +} diff --git a/deps/rabbitmq_management/selenium/bin/components/prodkeycloak b/deps/rabbitmq_management/selenium/bin/components/prodkeycloak new file mode 100644 index 000000000000..c0e3ee16192e --- /dev/null +++ b/deps/rabbitmq_management/selenium/bin/components/prodkeycloak @@ -0,0 +1,52 @@ +#!/usr/bin/env bash + +KEYCLOAK_DOCKER_IMAGE=quay.io/keycloak/keycloak:20.0 + +ensure_prodkeycloak() { + if docker ps | grep prodkeycloak &> /dev/null; then + print "prodkeycloak already running ..." + else + start_prodkeycloak + fi +} +init_prodkeycloak() { + PRODKEYCLOAK_CONFIG_PATH=${PRODKEYCLOAK_CONFIG_PATH:-multi-oauth/prodkeycloak} + PRODKEYCLOAK_CONFIG_DIR=$(realpath ${TEST_DIR}/${PRODKEYCLOAK_CONFIG_PATH}) + + print "> PRODKEYCLOAK_CONFIG_DIR: ${PRODKEYCLOAK_CONFIG_DIR}" + print "> PRODKEYCLOAK_URL: ${PRODKEYCLOAK_URL}" + print "> KEYCLOAK_DOCKER_IMAGE: ${KEYCLOAK_DOCKER_IMAGE}" +} +start_prodkeycloak() { + begin "Starting prodkeycloak ..." + + init_prodkeycloak + kill_container_if_exist prodkeycloak + + MOUNT_PRODKEYCLOAK_CONF_DIR=$CONF_DIR/prodkeycloak + + mkdir -p $MOUNT_PRODKEYCLOAK_CONF_DIR + ${BIN_DIR}/gen-keycloak-json ${PRODKEYCLOAK_CONFIG_DIR} "prod-realm" $ENV_FILE $MOUNT_PRODKEYCLOAK_CONF_DIR/prod-realm.json + print "> EFFECTIVE PRODKEYCLOAK_CONFIG_FILE: $MOUNT_PRODKEYCLOAK_CONF_DIR/prod-realm.json" + cp ${PRODKEYCLOAK_CONFIG_DIR}/*.pem $MOUNT_PRODKEYCLOAK_CONF_DIR + + docker run \ + --detach \ + --name prodkeycloak \ + --net ${DOCKER_NETWORK} \ + --publish 8081:8080 \ + --publish 8443:8443 \ + --env KEYCLOAK_ADMIN=admin \ + --env KEYCLOAK_ADMIN_PASSWORD=admin \ + --mount type=bind,source=${MOUNT_PRODKEYCLOAK_CONF_DIR},target=/opt/keycloak/data/import/ \ + ${KEYCLOAK_DOCKER_IMAGE} start-dev --import-realm \ + --https-certificate-file=/opt/keycloak/data/import/server_prodkeycloak_certificate.pem \ + --https-certificate-key-file=/opt/keycloak/data/import/server_prodkeycloak_key.pem \ + --hostname=prodkeycloak --hostname-admin=prodkeycloak --https-port=8443 + + wait_for_oidc_endpoint prodkeycloak $PRODKEYCLOAK_URL $MOUNT_PRODKEYCLOAK_CONF_DIR/ca_certificate.pem + end "prodkeycloak is ready" + print " Note: If you modify prodkeycloak configuration, make sure to run the following command to export the configuration." + print " docker exec -it prodkeycloak /opt/keycloak/bin/kc.sh export --users realm_file --realm test --dir /opt/keycloak/data/import/" + +} diff --git a/deps/rabbitmq_management/selenium/bin/components/proxy b/deps/rabbitmq_management/selenium/bin/components/proxy new file mode 100644 index 000000000000..911ddd70d46b --- /dev/null +++ b/deps/rabbitmq_management/selenium/bin/components/proxy @@ -0,0 +1,44 @@ + +HTTPD_DOCKER_IMAGE=httpd:latest + +ensure_proxy() { + if docker ps | grep proxy &> /dev/null; then + print "proxy already running ..." + else + start_proxy + fi +} +init_proxy() { + HTTPD_CONFIG_DIR=${TEST_CONFIG_DIR}/httpd-proxy + PUBLIC_RABBITMQ_HOST=${PUBLIC_RABBITMQ_HOST:-proxy:9090} + PROXIED_RABBITMQ_URL=$(calculate_rabbitmq_url $PUBLIC_RABBITMQ_HOST) + + print "> HTTPD_CONFIG: ${HTTPD_CONFIG_DIR}" + print "> PUBLIC_RABBITMQ_HOST: ${PUBLIC_RABBITMQ_HOST}" + print "> PROXIED_RABBITMQ_URL: ${PROXIED_RABBITMQ_URL}" + print "> RABBITMQ_HOST_FOR_PROXY: ${RABBITMQ_HOST_FOR_PROXY}" + print "> HTTPD_DOCKER_IMAGE: ${HTTPD_DOCKER_IMAGE}" +} +start_proxy() { + begin "Starting proxy ..." + + init_proxy + kill_container_if_exist proxy + + MOUNT_HTTPD_CONFIG_DIR=$CONF_DIR/httpd + + mkdir -p $MOUNT_HTTPD_CONFIG_DIR + ${BIN_DIR}/gen-httpd-conf ${HTTPD_CONFIG_DIR} $ENV_FILE $MOUNT_HTTPD_CONFIG_DIR/httpd.conf + print "> EFFECTIVE HTTPD_CONFIG_FILE: $MOUNT_HTTPD_CONFIG_DIR/httpd.conf" + + docker run \ + --detach \ + --name proxy \ + --net ${DOCKER_NETWORK} \ + --publish 9090:9090 \ + --mount "type=bind,source=${MOUNT_HTTPD_CONFIG_DIR},target=/usr/local/apache2/conf" \ + ${HTTPD_DOCKER_IMAGE} + + wait_for_url $PROXIED_RABBITMQ_URL + end "Proxy is ready" +} diff --git a/deps/rabbitmq_management/selenium/bin/components/rabbitmq b/deps/rabbitmq_management/selenium/bin/components/rabbitmq new file mode 100644 index 000000000000..1d36b6567fe8 --- /dev/null +++ b/deps/rabbitmq_management/selenium/bin/components/rabbitmq @@ -0,0 +1,96 @@ +#!/usr/bin/env bash + +init_rabbitmq() { + RABBITMQ_CONFIG_DIR=${TEST_CONFIG_DIR} + RABBITMQ_DOCKER_IMAGE=${RABBITMQ_DOCKER_IMAGE:-rabbitmq} + + print "> RABBITMQ_CONFIG_DIR: ${RABBITMQ_CONFIG_DIR}" + print "> RABBITMQ_DOCKER_IMAGE: ${RABBITMQ_DOCKER_IMAGE}" + [[ -z "${OAUTH_SERVER_CONFIG_BASEDIR}" ]] || print "> OAUTH_SERVER_CONFIG_BASEDIR: ${OAUTH_SERVER_CONFIG_BASEDIR}" + [[ -z "${OAUTH_SERVER_CONFIG_DIR}" ]] || print "> OAUTH_SERVER_CONFIG_DIR: ${OAUTH_SERVER_CONFIG_DIR}" + +} +start_rabbitmq() { + if [[ "$PROFILES" == *"docker"* ]]; then + start_docker_rabbitmq + else + start_local_rabbitmq + fi +} + +start_local_rabbitmq() { + begin "Starting rabbitmq ..." + + init_rabbitmq + + RABBITMQ_SERVER_ROOT=$(realpath $TEST_DIR/../../../../) + MOUNT_RABBITMQ_CONF="/etc/rabbitmq/rabbitmq.conf" + MOUNT_ADVANCED_CONFIG="/etc/rabbitmq/advanced.config" + + RABBITMQ_TEST_DIR="${RABBITMQ_CONFIG_DIR}" ${BIN_DIR}/gen-rabbitmq-conf ${RABBITMQ_CONFIG_DIR} $ENV_FILE /tmp$MOUNT_RABBITMQ_CONF + + print "> EFFECTIVE RABBITMQ_CONFIG_FILE: /tmp$MOUNT_RABBITMQ_CONF" + ${BIN_DIR}/gen-advanced-config ${RABBITMQ_CONFIG_DIR} $ENV_FILE /tmp$MOUNT_ADVANCED_CONFIG + RESULT=$? + if [ $RESULT -eq 0 ]; then + print "> EFFECTIVE RABBITMQ_CONFIG_FILE: /tmp$MOUNT_ADVANCED_CONFIG" + gmake --directory=${RABBITMQ_SERVER_ROOT} run-broker \ + RABBITMQ_ENABLED_PLUGINS_FILE=${RABBITMQ_CONFIG_DIR}/enabled_plugins \ + RABBITMQ_CONFIG_FILE=/tmp$MOUNT_RABBITMQ_CONF \ + RABBITMQ_ADVANCED_CONFIG_FILE=/tmp$MOUNT_ADVANCED_CONFIG + else + gmake --directory=${RABBITMQ_SERVER_ROOT} run-broker \ + RABBITMQ_ENABLED_PLUGINS_FILE=${RABBITMQ_CONFIG_DIR}/enabled_plugins \ + RABBITMQ_CONFIG_FILE=/tmp$MOUNT_RABBITMQ_CONF + fi + print "> RABBITMQ_TEST_DIR: ${RABBITMQ_CONFIG_DIR}" + + +} +start_docker_rabbitmq() { + begin "Starting rabbitmq in docker ..." + + init_rabbitmq + kill_container_if_exist rabbitmq + + mkdir -p $CONF_DIR/rabbitmq + MOUNT_RABBITMQ_CONF="/etc/rabbitmq/rabbitmq.conf" + MOUNT_ADVANCED_CONFIG="/etc/rabbitmq/advanced.config" + + RABBITMQ_TEST_DIR="/var/rabbitmq" ${BIN_DIR}/gen-rabbitmq-conf ${RABBITMQ_CONFIG_DIR} $ENV_FILE $CONF_DIR/rabbitmq/rabbitmq.conf + print "> EFFECTIVE RABBITMQ_CONFIG_FILE: $CONF_DIR/rabbitmq/rabbitmq.conf" + ${BIN_DIR}/gen-advanced-config ${RABBITMQ_CONFIG_DIR} $ENV_FILE /$CONF_DIR/rabbitmq/advanced.config + RESULT=$? + if [ $RESULT -eq 0 ]; then + print "> EFFECTIVE ADVANCED_CONFIG_FILE: $CONF_DIR/rabbitmq/advanced.config" + EXTRA_MOUNTS="-v $CONF_DIR/rabbitmq/advanced.config:${MOUNT_ADVANCED_CONFIG}:ro " + fi + if [ -f ${RABBITMQ_CONFIG_DIR}/enabled_plugins ]; then + EXTRA_MOUNTS="$EXTRA_MOUNTS -v ${RABBITMQ_CONFIG_DIR}/enabled_plugins:/etc/rabbitmq/enabled_plugins " + fi + if [ -d ${RABBITMQ_CONFIG_DIR}/certs ]; then + EXTRA_MOUNTS=" $EXTRA_MOUNTS -v ${RABBITMQ_CONFIG_DIR}/certs:/var/rabbitmq/certs " + fi + if [ -d ${RABBITMQ_CONFIG_DIR}/imports ]; then + EXTRA_MOUNTS="$EXTRA_MOUNTS -v ${RABBITMQ_CONFIG_DIR}/imports:/var/rabbitmq/imports " + fi + + print "> RABBITMQ_TEST_DIR: /var/rabbitmq" + + docker run \ + --detach \ + --name rabbitmq \ + --net ${DOCKER_NETWORK} \ + -p 5672:5672 \ + -p 5671:5671 \ + -p 15672:15672 \ + -p 15671:15671 \ + -v ${RABBITMQ_CONFIG_DIR}/logging.conf:/etc/rabbitmq/conf.d/logging.conf:ro \ + -v $CONF_DIR/rabbitmq/rabbitmq.conf:${MOUNT_RABBITMQ_CONF}:ro \ + -v ${TEST_DIR}:/config \ + ${EXTRA_MOUNTS} \ + ${RABBITMQ_DOCKER_IMAGE} + + wait_for_message rabbitmq "Server startup complete" + end "RabbitMQ ready" +} diff --git a/deps/rabbitmq_management/selenium/bin/components/selenium b/deps/rabbitmq_management/selenium/bin/components/selenium new file mode 100644 index 000000000000..3ebf955053e1 --- /dev/null +++ b/deps/rabbitmq_management/selenium/bin/components/selenium @@ -0,0 +1,26 @@ +#!/usr/bin/env bash + +arch=$(uname -i) +if [[ $arch == arm* ]]; then + SELENIUM_DOCKER_IMAGE=selenium/standalone-chrome:123.0 +else + SELENIUM_DOCKER_IMAGE=seleniarm/standalone-chromium:123.0 +fi + +start_selenium() { + begin "Starting selenium ..." + + print "> SELENIUM_DOCKER_IMAGE: ${SELENIUM_DOCKER_IMAGE}" + kill_container_if_exist selenium + + docker run \ + --detach \ + --name selenium \ + --net ${DOCKER_NETWORK} \ + -p 4444:4444 \ + --shm-size=2g \ + ${SELENIUM_DOCKER_IMAGE} + + wait_for_message selenium "Started Selenium Standalone" + end "Selenium ready" +} diff --git a/deps/rabbitmq_management/selenium/bin/components/uaa b/deps/rabbitmq_management/selenium/bin/components/uaa new file mode 100644 index 000000000000..f07b535176f8 --- /dev/null +++ b/deps/rabbitmq_management/selenium/bin/components/uaa @@ -0,0 +1,45 @@ +#!/usr/bin/env bash + +UAA_DOCKER_IMAGE=cloudfoundry/uaa:75.21.0 + +ensure_uaa() { + if docker ps | grep uaa &> /dev/null; then + print "uaa already running ..." + else + start_uaa + fi +} +init_uaa() { + UAA_CONFIG_PATH=${UAA_CONFIG_PATH:-oauth/uaa} + UAA_CONFIG_DIR=$(realpath ${TEST_DIR}/${UAA_CONFIG_PATH}) + + print "> UAA_CONFIG_DIR: ${UAA_CONFIG_DIR}" + print "> UAA_URL: ${UAA_URL}" + print "> UAA_DOCKER_IMAGE: ${UAA_DOCKER_IMAGE}" +} +start_uaa() { + begin "Starting UAA ..." + + init_uaa + kill_container_if_exist uaa + + MOUNT_UAA_CONF_DIR=$CONF_DIR/uaa + + mkdir -p $MOUNT_UAA_CONF_DIR + cp ${UAA_CONFIG_DIR}/* $MOUNT_UAA_CONF_DIR + ${BIN_DIR}/gen-uaa-yml ${UAA_CONFIG_DIR} $ENV_FILE $MOUNT_UAA_CONF_DIR/uaa.yml + print "> EFFECTIVE UAA_CONFIG_FILE: $MOUNT_UAA_CONF_DIR/uaa.yml" + + docker run \ + --detach \ + --name uaa \ + --net ${DOCKER_NETWORK} \ + --publish 8080:8080 \ + --mount "type=bind,source=$MOUNT_UAA_CONF_DIR,target=/uaa" \ + --env UAA_CONFIG_PATH="/uaa" \ + --env JAVA_OPTS="-Djava.security.egd=file:/dev/./urandom" \ + ${UAA_DOCKER_IMAGE} + + wait_for_oidc_endpoint uaa $UAA_URL + end "UAA is ready" +} diff --git a/deps/rabbitmq_management/selenium/bin/gen-env-file b/deps/rabbitmq_management/selenium/bin/gen-env-file index 4bece07d49e2..60c4b4bfc50d 100755 --- a/deps/rabbitmq_management/selenium/bin/gen-env-file +++ b/deps/rabbitmq_management/selenium/bin/gen-env-file @@ -1,6 +1,8 @@ #!/usr/bin/env bash SCRIPT="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" +#set -x + ENV_FILE="/tmp/rabbitmq/.env" FIND_PATH=$1 ENV_FILE=$2 @@ -9,12 +11,40 @@ FIND_PARENT_PATH="$(dirname "$FIND_PATH")" generate_env_file() { parentdir="$(dirname "$ENV_FILE")" mkdir -p $parentdir - echo "" > $ENV_FILE + echo "#!/usr/bin/env bash" > $ENV_FILE + echo "set -u" >> $ENV_FILE + + declare -a FILE_ARRAY + for f in $($SCRIPT/find-template-files $FIND_PATH "env") + do + FILE_ARRAY+=($f) + done + + TMP_ENV_FILE="/tmp/env-tmp" + FILE_ARRAY_LENGTH=${#FILE_ARRAY[@]} - for f in $($SCRIPT/find-template-files $FIND_PATH ".env") + ## Append each .env file one by one while all variables can be resolved + ## if one variable cannot be resolve the temporary .env file fails + ## and we add the last env file to end of the list and carry one with the next one + while [ $FILE_ARRAY_LENGTH -gt 0 ] do - cat $f >> $ENV_FILE + f="${FILE_ARRAY[0]}" + cp $ENV_FILE $TMP_ENV_FILE + cat $f >> $TMP_ENV_FILE + chmod u+x $TMP_ENV_FILE + $TMP_ENV_FILE 2> /dev/null + + if [ $? -eq 0 ] + then + cat $f >> $ENV_FILE + else + FILE_ARRAY+=($f) # insert it to the end + fi + FILE_ARRAY=("${FILE_ARRAY[@]:1}") # remove the first element + FILE_ARRAY_LENGTH=${#FILE_ARRAY[@]} done + rm -r $TMP_ENV_FILE + tail +3 $ENV_FILE > "$ENV_FILE.tmp" && mv "$ENV_FILE.tmp" $ENV_FILE } generate_env_file diff --git a/deps/rabbitmq_management/selenium/bin/gen-keycloak-json b/deps/rabbitmq_management/selenium/bin/gen-keycloak-json index eaf205f11963..bd38efa994ec 100755 --- a/deps/rabbitmq_management/selenium/bin/gen-keycloak-json +++ b/deps/rabbitmq_management/selenium/bin/gen-keycloak-json @@ -5,8 +5,9 @@ SCRIPT="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" #set -x KEYCLOAK_PATH=${1:?First parameter is the directory env and config files are relative to} -ENV_FILE=${2:?Second parameter is a comma-separated list of .env file which has exported template variables} -FINAL_CONFIG_FILE=${3:?Forth parameter is the name of the final config file. It is relative to where this script is run from} +KEYCLOAK_FILENAME=${2:?Second parameter is the keycloak filename of the realm without extension} +ENV_FILE=${3:?Second parameter is a comma-separated list of .env file which has exported template variables} +FINAL_CONFIG_FILE=${4:?Forth parameter is the name of the final config file. It is relative to where this script is run from} source $ENV_FILE @@ -15,7 +16,7 @@ mkdir -p $parentdir echo "" > $FINAL_CONFIG_FILE -for f in $($SCRIPT/find-template-files $KEYCLOAK_PATH "test-realm" "json") +for f in $($SCRIPT/find-template-files $KEYCLOAK_PATH $KEYCLOAK_FILENAME "json") do envsubst < $f >> $FINAL_CONFIG_FILE done diff --git a/deps/rabbitmq_management/selenium/bin/suite_template b/deps/rabbitmq_management/selenium/bin/suite_template index 9a1f4e70dc28..3c608016ade0 100644 --- a/deps/rabbitmq_management/selenium/bin/suite_template +++ b/deps/rabbitmq_management/selenium/bin/suite_template @@ -1,14 +1,14 @@ #!/usr/bin/env bash +#set -x + SCRIPT="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" SUITE=$(caller) SUITE=$(basename "${SUITE}" .sh ) -SELENIUM_DOCKER_IMAGE=selenium/standalone-chrome:103.0 -UAA_DOCKER_IMAGE=cloudfoundry/uaa:75.21.0 -KEYCLOAK_DOCKER_IMAGE=quay.io/keycloak/keycloak:20.0 -HTTPD_DOCKER_IMAGE=httpd:latest -PADDING="" +tabs 1 +declare -i PADDING_LEVEL=0 +declare -i STEP=1 declare -a REQUIRED_COMPONENTS find_selenium_dir() { @@ -32,6 +32,13 @@ SCREENS=${SELENIUM_ROOT_FOLDER}/screens/${SUITE} CONF_DIR=/tmp/selenium/${SUITE} ENV_FILE=$CONF_DIR/.env +for f in $SCRIPT/components/*; do + if [[ ! "$f" == *README.md ]] + then + source $f; + fi +done + parse_arguments() { if [[ "$#" -gt 0 ]] then @@ -41,6 +48,9 @@ parse_arguments() { elif [[ "$1" == "start-others" ]] then echo "start-others" + elif [[ "$1" == "ensure-others" ]] + then + echo "ensure-others" elif [[ "$1" == "stop-others" ]] then echo "stop-others" @@ -55,18 +65,26 @@ parse_arguments() { COMMAND=$(parse_arguments $@) -tabs 4 + + print() { - echo -e "${PADDING}$1" + tabbing="" + if [[ $PADDING_LEVEL -gt 0 ]]; then + for i in $(seq $PADDING_LEVEL); do + tabbing="$tabbing\t" + done + fi + echo -e "$tabbing$1" } begin() { - print "\n$@" - PADDING="${PADDING}\t" + print "\n[$STEP] $@" + PADDING_LEVEL=$(($PADDING_LEVEL + 1)) + STEP=$(($STEP + 1)) } end() { - PADDING=`echo $PADDING | rev | cut -c 4- | rev` + PADDING_LEVEL=$(($PADDING_LEVEL - 1)) print "$@" } ensure_docker_network() { @@ -92,39 +110,27 @@ init_suite() { print "> PROFILES: ${PROFILES} " print "> ENV_FILE: ${ENV_FILE} " print "> COMMAND: ${COMMAND}" - end "Initialized suite ..." + end "Initialized suite" + + mkdir -p ${LOGS}/${SUITE} + mkdir -p ${SCREENS}/${SUITE} } build_mocha_image() { begin "Ensuring mocha-test image ..." tag=($(md5sum $SELENIUM_ROOT_FOLDER/package.json)) - if [[ "$(docker images -q mocha-test:$tag 2> /dev/null)" == "" ]]; then + print "> tag : $tag" + if [[ $(docker images -q mocha-test:$tag 2> /dev/null) == "" ]]; then docker build -t mocha-test:$tag --target test $SCRIPT/.. print "> Built docker image mocha-test:$tag" fi end "mocha-test image exists" } -start_selenium() { - begin "Starting selenium ..." - - print "> SELENIUM_DOCKER_IMAGE: ${SELENIUM_DOCKER_IMAGE}" - kill_container_if_exist selenium - - docker run \ - --detach \ - --name selenium \ - --net ${DOCKER_NETWORK} \ - -p 4444:4444 \ - --shm-size=2g \ - ${SELENIUM_DOCKER_IMAGE} - - wait_for_message selenium "Started Selenium Standalone" - end "Selenium ready" -} - kill_container_if_exist() { - docker stop $1 &> /dev/null || true && docker rm $1 &> /dev/null || true + if docker stop $1 &> /dev/null; then + docker rm $1 &> /dev/null + fi } wait_for_message() { attemps_left=10 @@ -135,185 +141,36 @@ wait_for_message() { ((attemps_left--)) if [[ "$attemps_left" -lt 1 ]]; then print "Timed out waiting" + save_container_log $1 exit 1 fi done } -init_rabbitmq() { - RABBITMQ_CONFIG_DIR=${TEST_CONFIG_DIR} - RABBITMQ_DOCKER_IMAGE=${RABBITMQ_DOCKER_IMAGE:-rabbitmq} - - print "> RABBITMQ_CONFIG_DIR: ${RABBITMQ_CONFIG_DIR}" - print "> RABBITMQ_DOCKER_IMAGE: ${RABBITMQ_DOCKER_IMAGE}" - [[ -z "${OAUTH_SIGNING_KEY_PATH}" ]] || print "> OAUTH_SIGNING_KEY_PATH: ${OAUTH_SIGNING_KEY_PATH}" - -} -start_rabbitmq() { - if [[ "$PROFILES" == *"docker"* ]]; then - start_docker_rabbitmq - else - start_local_rabbitmq - fi -} - -start_local_rabbitmq() { - begin "Starting rabbitmq ..." - - init_rabbitmq - - - RABBITMQ_SERVER_ROOT=$(realpath $TEST_DIR/../../../../) - MOUNT_RABBITMQ_CONF="/etc/rabbitmq/rabbitmq.conf" - MOUNT_ADVANCED_CONFIG="/etc/rabbitmq/advanced.config" - - RABBITMQ_TEST_DIR="${RABBITMQ_CONFIG_DIR}" - ${BIN_DIR}/gen-rabbitmq-conf ${RABBITMQ_CONFIG_DIR} $ENV_FILE /tmp$MOUNT_RABBITMQ_CONF - print "> EFFECTIVE RABBITMQ_CONFIG_FILE: /tmp$MOUNT_RABBITMQ_CONF" - ${BIN_DIR}/gen-advanced-config ${RABBITMQ_CONFIG_DIR} $ENV_FILE /tmp$MOUNT_ADVANCED_CONFIG - RESULT=$? - if [ $RESULT -eq 0 ]; then - print "> EFFECTIVE RABBITMQ_CONFIG_FILE: /tmp$MOUNT_ADVANCED_CONFIG" - gmake --directory=${RABBITMQ_SERVER_ROOT} run-broker \ - RABBITMQ_ENABLED_PLUGINS_FILE=${RABBITMQ_CONFIG_DIR}/enabled_plugins \ - RABBITMQ_CONFIG_FILE=/tmp$MOUNT_RABBITMQ_CONF \ - RABBITMQ_ADVANCED_CONFIG_FILE=/tmp$MOUNT_ADVANCED_CONFIG - else - gmake --directory=${RABBITMQ_SERVER_ROOT} run-broker \ - RABBITMQ_ENABLED_PLUGINS_FILE=${RABBITMQ_CONFIG_DIR}/enabled_plugins \ - RABBITMQ_CONFIG_FILE=/tmp$MOUNT_RABBITMQ_CONF - fi - print "> RABBITMQ_TEST_DIR: ${RABBITMQ_CONFIG_DIR}" - - -} -start_docker_rabbitmq() { - begin "Starting rabbitmq in docker ..." - - init_rabbitmq - kill_container_if_exist rabbitmq - - mkdir -p $CONF_DIR/rabbitmq - MOUNT_RABBITMQ_CONF="/etc/rabbitmq/rabbitmq.conf" - MOUNT_ADVANCED_CONFIG="/etc/rabbitmq/advanced.config" - - RABBITMQ_TEST_DIR="/var/rabbitmq" ${BIN_DIR}/gen-rabbitmq-conf ${RABBITMQ_CONFIG_DIR} $ENV_FILE $CONF_DIR/rabbitmq/rabbitmq.conf - print "> EFFECTIVE RABBITMQ_CONFIG_FILE: $CONF_DIR/rabbitmq/rabbitmq.conf" - ${BIN_DIR}/gen-advanced-config ${RABBITMQ_CONFIG_DIR} $ENV_FILE /$CONF_DIR/rabbitmq/advanced.config - RESULT=$? - if [ $RESULT -eq 0 ]; then - print "> EFFECTIVE ADVANCED_CONFIG_FILE: $CONF_DIR/rabbitmq/advanced.config" - EXTRA_MOUNTS="-v $CONF_DIR/rabbitmq/advanced.config:${MOUNT_ADVANCED_CONFIG}:ro" - fi - print "> RABBITMQ_TEST_DIR: /var/rabbitmq" - - docker run \ - --detach \ - --name rabbitmq \ - --net ${DOCKER_NETWORK} \ - -p 15672:15672 -p 5672:5672 \ - -v ${RABBITMQ_CONFIG_DIR}/logging.conf:/etc/rabbitmq/conf.d/logging.conf:ro \ - -v $CONF_DIR/rabbitmq/rabbitmq.conf:${MOUNT_RABBITMQ_CONF}:ro \ - -v ${RABBITMQ_CONFIG_DIR}/enabled_plugins:/etc/rabbitmq/enabled_plugins \ - -v ${TEST_DIR}/${OAUTH_SIGNING_KEY_PATH}:/config \ - -v ${RABBITMQ_CONFIG_DIR}/imports:/var/rabbitmq/imports \ - ${EXTRA_MOUNTS} \ - ${RABBITMQ_DOCKER_IMAGE} - - wait_for_message rabbitmq "Server startup complete" - end "RabbitMQ ready" -} -init_uaa() { - UAA_CONFIG_PATH=${UAA_CONFIG_PATH:-oauth/uaa} - UAA_CONFIG_DIR=$(realpath ${TEST_DIR}/${UAA_CONFIG_PATH}) - - print "> UAA_CONFIG_DIR: ${UAA_CONFIG_DIR}" - print "> UAA_URL: ${UAA_URL}" - print "> UAA_DOCKER_IMAGE: ${UAA_DOCKER_IMAGE}" -} -start_uaa() { - begin "Starting UAA ..." - - init_uaa - kill_container_if_exist uaa - - MOUNT_UAA_CONF_DIR=$CONF_DIR/uaa - - mkdir -p $MOUNT_UAA_CONF_DIR - cp ${UAA_CONFIG_DIR}/* $MOUNT_UAA_CONF_DIR - ${BIN_DIR}/gen-uaa-yml ${UAA_CONFIG_DIR} $ENV_FILE $MOUNT_UAA_CONF_DIR/uaa.yml - print "> EFFECTIVE UAA_CONFIG_FILE: $MOUNT_UAA_CONF_DIR/uaa.yml" - - docker run \ - --detach \ - --name uaa \ - --net ${DOCKER_NETWORK} \ - --publish 8080:8080 \ - --mount "type=bind,source=$MOUNT_UAA_CONF_DIR,target=/uaa" \ - --env UAA_CONFIG_PATH="/uaa" \ - --env JAVA_OPTS="-Djava.security.egd=file:/dev/./urandom" \ - ${UAA_DOCKER_IMAGE} - - wait_for_oidc_endpoint uaa $UAA_URL - end "UAA is ready" -} -init_keycloak() { - KEYCLOAK_CONFIG_PATH=${KEYCLOAK_CONFIG_PATH:-oauth/keycloak} - KEYCLOAK_CONFIG_DIR=$(realpath ${TEST_DIR}/${KEYCLOAK_CONFIG_PATH}) - KEYCLOAK_URL=${OAUTH_PROVIDER_URL} - - print "> KEYCLOAK_CONFIG_DIR: ${KEYCLOAK_CONFIG_DIR}" - print "> KEYCLOAK_URL: ${KEYCLOAK_URL}" - print "> KEYCLOAK_DOCKER_IMAGE: ${KEYCLOAK_DOCKER_IMAGE}" -} -start_keycloak() { - begin "Starting keycloak ..." - - init_keycloak - kill_container_if_exist keycloak - - MOUNT_KEYCLOAK_CONF_DIR=$CONF_DIR/keycloak - - mkdir -p $MOUNT_KEYCLOAK_CONF_DIR - ${BIN_DIR}/gen-keycloak-json ${KEYCLOAK_CONFIG_DIR} $ENV_FILE $MOUNT_KEYCLOAK_CONF_DIR/test-realm.json - print "> EFFECTIVE KEYCLOAK_CONFIG_FILE: $MOUNT_KEYCLOAK_CONF_DIR/test-realm.json" - cp ${KEYCLOAK_CONFIG_DIR}/*.pem $MOUNT_KEYCLOAK_CONF_DIR - - docker run \ - --detach \ - --name keycloak \ - --net ${DOCKER_NETWORK} \ - --publish 8080:8080 \ - --publish 8443:8443 \ - --env KEYCLOAK_ADMIN=admin \ - --env KEYCLOAK_ADMIN_PASSWORD=admin \ - --mount type=bind,source=${MOUNT_KEYCLOAK_CONF_DIR},target=/opt/keycloak/data/import/ \ - ${KEYCLOAK_DOCKER_IMAGE} start-dev --import-realm \ - --https-certificate-file=/opt/keycloak/data/import/server_localhost_certificate.pem \ - --https-certificate-key-file=/opt/keycloak/data/import/server_localhost_key.pem - - wait_for_oidc_endpoint keycloak $KEYCLOAK_URL - end "Keycloak is ready" -} wait_for_oidc_endpoint() { NAME=$1 BASE_URL=$2 - if [[ $BASE_URL == *"localhost"** ]]; then - wait_for_oidc_endpoint_local $NAME $BASE_URL + if [[ $BASE_URL == *"localhost"** || $BASE_URL == *"0.0.0.0"** ]]; then + wait_for_oidc_endpoint_local $@ else - wait_for_oidc_endpoint_docker $NAME $BASE_URL + wait_for_oidc_endpoint_docker $@ fi } wait_for_oidc_endpoint_local() { NAME=$1 BASE_URL=$2 - + CURL_ARGS="-L --fail " + DELAY_BETWEEN_ATTEMPTS=5 + if [[ $# -eq 3 ]]; then + CURL_ARGS="$CURL_ARGS --cacert $3" + DELAY_BETWEEN_ATTEMPTS=10 + fi max_retry=10 counter=0 print "Waiting for OIDC discovery endpoint $NAME ... (BASE_URL: $BASE_URL)" - until (curl -L --fail ${BASE_URL}/.well-known/openid-configuration >/dev/null 2>&1) + until (curl $CURL_ARGS ${BASE_URL}/.well-known/openid-configuration >/dev/null 2>&1) do - sleep 5 + sleep $DELAY_BETWEEN_ATTEMPTS [[ counter -eq $max_retry ]] && print "Failed!" && exit 1 print "Trying again. Try #$counter" ((counter++)) @@ -323,13 +180,20 @@ wait_for_oidc_endpoint_local() { wait_for_oidc_endpoint_docker() { NAME=$1 BASE_URL=$2 - + CURL_ARGS="-L --fail " + DOCKER_ARGS="--rm --net ${DOCKER_NETWORK} " + DELAY_BETWEEN_ATTEMPTS=5 + if [[ $# -gt 2 ]]; then + DOCKER_ARGS="$DOCKER_ARGS -v $3:/tmp/ca_certificate.pem" + CURL_ARGS="$CURL_ARGS --cacert /tmp/ca_certificate.pem" + DELAY_BETWEEN_ATTEMPTS=10 + fi max_retry=10 counter=0 print "Waiting for OIDC discovery endpoint $NAME ... (BASE_URL: $BASE_URL)" - until (docker run --net ${DOCKER_NETWORK} --rm curlimages/curl:7.85.0 -L --fail ${BASE_URL}/.well-known/openid-configuration >/dev/null 2>&1) + until (docker run $DOCKER_ARGS curlimages/curl:7.85.0 $CURL_ARGS ${BASE_URL}/.well-known/openid-configuration >/dev/null 2>&1) do - sleep 5 + sleep $DELAY_BETWEEN_ATTEMPTS [[ counter -eq $max_retry ]] && print "Failed!" && exit 1 print "Trying again. Try #$counter" ((counter++)) @@ -339,152 +203,6 @@ wait_for_oidc_endpoint_docker() { calculate_rabbitmq_url() { echo "${RABBITMQ_SCHEME:-http}://$1${PUBLIC_RABBITMQ_PATH:-$RABBITMQ_PATH}" } -init_fakeportal() { - FAKEPORTAL_URL=${FAKEPORTAL_URL:-http://fakeportal:3000} - FAKEPORTAL_DIR=${SCRIPT}/../fakeportal - CLIENT_ID="${CLIENT_ID:-rabbit_idp_user}" - CLIENT_SECRET="${CLIENT_SECRET:-rabbit_idp_user}" - RABBITMQ_HOST=${RABBITMQ_HOST:-proxy:9090} - RABBITMQ_HOST_FOR_FAKEPORTAL=${RABBITMQ_HOST_FOR_FAKEPORTAL:-rabbitmq:15672} - - RABBITMQ_URL=$(calculate_rabbitmq_url $RABBITMQ_HOST) - RABBITMQ_URL_FOR_FAKEPORTAL=$(calculate_rabbitmq_url $RABBITMQ_HOST_FOR_FAKEPORTAL) - - print "> FAKEPORTAL_URL: ${FAKEPORTAL_URL}" - print "> UAA_URL_FOR_FAKEPORTAL: ${UAA_URL_FOR_FAKEPORTAL}" - print "> RABBITMQ_HOST_FOR_FAKEPORTAL: ${RABBITMQ_HOST_FOR_FAKEPORTAL}" - print "> RABBITMQ_HOST: ${RABBITMQ_HOST}" - print "> CLIENT_ID: ${CLIENT_ID}" - print "> CLIENT_SECRET: ${CLIENT_SECRET}" - print "> RABBITMQ_URL: ${RABBITMQ_URL}" -} -start_fakeportal() { - begin "Starting fakeportal ..." - - init_fakeportal - kill_container_if_exist fakeportal - mocha_test_tag=($(md5sum $SELENIUM_ROOT_FOLDER/package.json)) - - docker run \ - --detach \ - --name fakeportal \ - --net ${DOCKER_NETWORK} \ - --publish 3000:3000 \ - --env PORT=3000 \ - --env RABBITMQ_URL="${RABBITMQ_URL_FOR_FAKEPORTAL}" \ - --env PROXIED_RABBITMQ_URL="${RABBITMQ_URL}" \ - --env UAA_URL="${UAA_URL_FOR_FAKEPORTAL}" \ - --env CLIENT_ID="${CLIENT_ID}" \ - --env CLIENT_SECRET="${CLIENT_SECRET}" \ - -v ${FAKEPORTAL_DIR}:/code/fakeportal \ - mocha-test:${mocha_test_tag} run fakeportal - - wait_for_url $FAKEPORTAL_URL - end "Fakeportal is ready" -} - -init_fakeproxy() { - FAKEPROXY_URL=${FAKEPROXY_URL:-http://fakeproxy:9090} - FAKEPROXY_DIR=${SCRIPT}/../fakeportal - CLIENT_ID="${CLIENT_ID:-rabbit_idp_user}" - CLIENT_SECRET="${CLIENT_SECRET:-rabbit_idp_user}" - RABBITMQ_HOST_FOR_FAKEPROXY=${RABBITMQ_HOST_FOR_FAKEPROXY:-rabbitmq:15672} - UAA_URL_FOR_FAKEPROXY=${UAA_URL_FOR_FAKEPROXY:-http://uaa:8080} - - RABBITMQ_URL_FOR_FAKEPROXY=$(calculate_rabbitmq_url $RABBITMQ_HOST_FOR_FAKEPROXY) - - print "> FAKEPROXY_URL: ${FAKEPROXY_URL}" - print "> UAA_URL: ${UAA_URL_FOR_FAKEPROXY}" - print "> RABBITMQ_HOST_FOR_FAKEPROXY: ${RABBITMQ_HOST_FOR_FAKEPROXY}" - print "> CLIENT_ID: ${CLIENT_ID}" - print "> CLIENT_SECRET: ${CLIENT_SECRET}" - print "> RABBITMQ_URL_FOR_FAKEPROXY: ${RABBITMQ_URL_FOR_FAKEPROXY}" - -} -start_fakeproxy() { - begin "Starting fakeproxy ..." - - init_fakeproxy - kill_container_if_exist fakeproxy - mocha_test_tag=($(md5sum $SELENIUM_ROOT_FOLDER/package.json)) - - docker run \ - --detach \ - --name fakeproxy \ - --net ${DOCKER_NETWORK} \ - --publish 9090:9090 \ - --env PORT=9090 \ - --env RABBITMQ_URL="${RABBITMQ_URL_FOR_FAKEPROXY}" \ - --env UAA_URL="${UAA_URL_FOR_FAKEPROXY}" \ - --env CLIENT_ID="${CLIENT_ID}" \ - --env CLIENT_SECRET="${CLIENT_SECRET}" \ - -v ${FAKEPROXY_DIR}:/code/fakeportal \ - mocha-test:${mocha_test_tag} run fakeproxy - - wait_for_url $FAKEPROXY_URL - end "fakeproxy is ready" - -} - -init_mock-auth-backend-http() { - AUTH_BACKEND_HTTP_BASEURL=${AUTH_BACKEND_HTTP_BASEURL:-http://localhost:8888} - AUTH_BACKEND_HTTP_DIR=${TEST_CASES_DIR}/mock-auth-backend-http - - print "> AUTH_BACKEND_HTTP_BASEURL: ${AUTH_BACKEND_HTTP_BASEURL}" - print "> AUTH_BACKEND_HTTP_DIR: ${AUTH_BACKEND_HTTP_DIR}" - -} -start_mock-auth-backend-http() { - begin "Starting mock-auth-backend-http ..." - - init_mock-auth-backend-http - kill_container_if_exist mock-auth-backend-http - - docker run \ - --detach \ - --name mock-auth-backend-http \ - --net ${DOCKER_NETWORK} \ - --publish 8888:1080 \ - --env MOCKSERVER_INITIALIZATION_JSON_PATH="/config/expectationInitialiser.json" \ - -v ${AUTH_BACKEND_HTTP_DIR}:/config \ - mockserver/mockserver - - wait_for_url $AUTH_BACKEND_HTTP_BASEURL/ready - end "mock-auth-backend-http is ready" -} - -init_mock-auth-backend-ldap() { - AUTH_BACKEND_LDAP_DIR=${TEST_CONFIG_DIR}/mock-auth-backend-ldap - - print "> AUTH_BACKEND_LDAP_DIR: ${AUTH_BACKEND_LDAP_DIR}" -} -start_mock-auth-backend-ldap() { - begin "Starting mock-auth-backend-ldap ..." - - init_mock-auth-backend-ldap - kill_container_if_exist mock-auth-backend-ldap - - docker run \ - --detach \ - --name mock-auth-backend-ldap \ - --net ${DOCKER_NETWORK} \ - --env LDAP_ORGANISATION="Authentication and Tags" \ - --env LDAP_DOMAIN="example.com" \ - --env LDAP_ADMIN_PASSWORD="admin" \ - --publish 389:389 \ - --publish 636:636 \ - -v ${AUTH_BACKEND_LDAP_DIR}:/config \ - osixia/openldap:1.2.1 - - wait_for_message mock-auth-backend-ldap "starting" - docker exec mock-auth-backend-ldap ldapadd \ - -x -w "admin" \ - -H ldap:// \ - -D "cn=admin,dc=example,dc=com" \ - -f /config/import.ldif - - end "mock-auth-backend-ldap is ready" -} wait_for_url() { BASE_URL=$1 @@ -500,7 +218,7 @@ wait_for_url_local() { counter=0 until (curl -L -f -v $url >/dev/null 2>&1) do - print "Waiting for $url to start" + print "Waiting for $url to start (local)" sleep 5 [[ counter -eq $max_retry ]] && print "Failed!" && exit 1 print "Trying again. Try #$counter" @@ -513,7 +231,7 @@ wait_for_url_docker() { counter=0 until (docker run --net ${DOCKER_NETWORK} --rm curlimages/curl:7.85.0 -L -f -v $url >/dev/null 2>&1) do - print "Waiting for $url to start" + print "Waiting for $url to start (docker)" sleep 5 [[ counter -eq $max_retry ]] && print "Failed!" && exit 1 print "Trying again. Try #$counter" @@ -521,41 +239,6 @@ wait_for_url_docker() { done } -init_proxy() { - HTTPD_CONFIG_DIR=${TEST_CONFIG_DIR}/httpd-proxy - PUBLIC_RABBITMQ_HOST=${PUBLIC_RABBITMQ_HOST:-proxy:9090} - PROXIED_RABBITMQ_URL=$(calculate_rabbitmq_url $PUBLIC_RABBITMQ_HOST) - - print "> HTTPD_CONFIG: ${HTTPD_CONFIG_DIR}" - print "> PUBLIC_RABBITMQ_HOST: ${PUBLIC_RABBITMQ_HOST}" - print "> PROXIED_RABBITMQ_URL: ${PROXIED_RABBITMQ_URL}" - print "> RABBITMQ_HOST_FOR_PROXY: ${RABBITMQ_HOST_FOR_PROXY}" - print "> HTTPD_DOCKER_IMAGE: ${HTTPD_DOCKER_IMAGE}" -} -start_proxy() { - begin "Starting proxy ..." - - init_proxy - kill_container_if_exist proxy - - MOUNT_HTTPD_CONFIG_DIR=$CONF_DIR/httpd - - mkdir -p $MOUNT_HTTPD_CONFIG_DIR - ${BIN_DIR}/gen-httpd-conf ${HTTPD_CONFIG_DIR} $ENV_FILE $MOUNT_HTTPD_CONFIG_DIR/httpd.conf - print "> EFFECTIVE HTTPD_CONFIG_FILE: $MOUNT_HTTPD_CONFIG_DIR/httpd.conf" - - docker run \ - --detach \ - --name proxy \ - --net ${DOCKER_NETWORK} \ - --publish 9090:9090 \ - --mount "type=bind,source=${MOUNT_HTTPD_CONFIG_DIR},target=/usr/local/apache2/conf" \ - ${HTTPD_DOCKER_IMAGE} - - wait_for_url $PROXIED_RABBITMQ_URL - end "Proxy is ready" -} - test() { kill_container_if_exist mocha @@ -565,7 +248,11 @@ test() { PUBLIC_RABBITMQ_HOST=${PUBLIC_RABBITMQ_HOST:-$RABBITMQ_HOST} RABBITMQ_URL=$(calculate_rabbitmq_url $PUBLIC_RABBITMQ_HOST) RABBITMQ_HOSTNAME=${RABBITMQ_HOSTNAME:-rabbitmq} + SELENIUM_TIMEOUT=${SELENIUM_TIMEOUT:-20000} + SELENIUM_POLLING=${SELENIUM_POLLING:-500} + print "> SELENIUM_TIMEOUT: ${SELENIUM_TIMEOUT}" + print "> SELENIUM_POLLING: ${SELENIUM_POLLING}" print "> RABBITMQ_HOST: ${RABBITMQ_HOST}" print "> RABBITMQ_HOSTNAME: ${RABBITMQ_HOSTNAME}" print "> PUBLIC_RABBITMQ_HOST: ${PUBLIC_RABBITMQ_HOST}" @@ -575,6 +262,10 @@ test() { print "> FAKEPORTAL_URL: ${FAKEPORTAL_URL}" mocha_test_tag=($(md5sum $SELENIUM_ROOT_FOLDER/package.json)) + print "> OAUTH_NODE_EXTRA_CA_CERTS: ${OAUTH_NODE_EXTRA_CA_CERTS}" + MOUNT_NODE_EXTRA_CA_CERTS=${TEST_DIR}/${OAUTH_NODE_EXTRA_CA_CERTS} + print "> MOUNT_NODE_EXTRA_CA_CERTS: ${MOUNT_NODE_EXTRA_CA_CERTS}" + docker run \ --rm \ --name mocha \ @@ -584,8 +275,12 @@ test() { --env UAA_URL=${UAA_URL} \ --env FAKE_PORTAL_URL=${FAKEPORTAL_URL} \ --env RUN_LOCAL=false \ + --env SELENIUM_TIMEOUT=${SELENIUM_TIMEOUT} \ + --env SELENIUM_POLLING=${SELENIUM_POLLING} \ --env PROFILES="${PROFILES}" \ --env ENV_FILE="/code/.env" \ + --env NODE_EXTRA_CA_CERTS=/nodejs/ca.pem \ + -v ${MOUNT_NODE_EXTRA_CA_CERTS}:/nodejs/ca.pem \ -v ${TEST_DIR}:/code/test \ -v ${SCREENS}:/screens \ -v ${ENV_FILE}:/code/.env \ @@ -601,9 +296,17 @@ save_logs() { save_container_logs selenium } save_container_logs() { - docker container ls | grep $1 >/dev/null 2>&1 && docker logs $1 &> $LOGS/$1.log || echo "$1 not running" + echo "Saving logs for $1" + if docker container ls | grep $1 >/dev/null 2>&1; then + docker logs $1 &> $LOGS/$1.log + else + echo "$1 not running" + fi +} +save_container_log() { + echo "Saving container $1 logs to $LOGS/$1.log ..." + docker logs $1 &> $LOGS/$1.log } - profiles_with_local_or_docker() { if [[ "$PROFILES" != *"local"* && "$PROFILES" != *"docker"* ]]; then echo "$PROFILES docker" @@ -612,9 +315,11 @@ profiles_with_local_or_docker() { fi } generate_env_file() { + begin "Generating env file ..." mkdir -p $CONF_DIR ${BIN_DIR}/gen-env-file $TEST_CONFIG_DIR $ENV_FILE source $ENV_FILE + end "Finished generating env file." } run() { runWith rabbitmq @@ -636,12 +341,16 @@ run_local_with() { generate_env_file build_mocha_image + if [[ "$COMMAND" == "start-rabbitmq" ]] then start_local_rabbitmq elif [[ "$COMMAND" == "start-others" ]] then start_local_others + elif [[ "$COMMAND" == "ensure-others" ]] + then + ensure_local_others elif [[ "$COMMAND" == "stop-others" ]] then teardown_local_others @@ -695,6 +404,13 @@ start_local_others() { start_components fi } +ensure_local_others() { + if [[ $REQUIRED_COMPONENTS == "" ]]; then + print "There are no other components" + else + ensure_components + fi +} teardown_local_others() { if [[ $REQUIRED_COMPONENTS == "" ]]; then print "There are no other components" @@ -703,7 +419,7 @@ teardown_local_others() { fi } test_local() { - begin "Running local test $1" + begin "Running local test ${1:-}" RABBITMQ_HOST=${RABBITMQ_HOST:-rabbitmq:15672} PUBLIC_RABBITMQ_HOST=${PUBLIC_RABBITMQ_HOST:-$RABBITMQ_HOST} @@ -711,7 +427,11 @@ test_local() { export RABBITMQ_HOSTNAME=${RABBITMQ_HOSTNAME:-rabbitmq} export RABBITMQ_AMQP_USERNAME=${RABBITMQ_AMQP_USERNAME} export RABBITMQ_AMQP_PASSWORD=${RABBITMQ_AMQP_PASSWORD} + export SELENIUM_TIMEOUT=${SELENIUM_TIMEOUT:-20000} + export SELENIUM_POLLING=${SELENIUM_POLLING:-500} + print "> SELENIUM_TIMEOUT: ${SELENIUM_TIMEOUT}" + print "> SELENIUM_POLLING: ${SELENIUM_POLLING}" print "> RABBITMQ_HOST: ${RABBITMQ_HOST}" print "> RABBITMQ_HOSTNAME: ${RABBITMQ_HOSTNAME}" print "> PUBLIC_RABBITMQ_HOST: ${PUBLIC_RABBITMQ_HOST}" @@ -719,15 +439,26 @@ test_local() { print "> RABBITMQ_URL: ${RABBITMQ_URL}" print "> UAA_URL: ${UAA_URL}" print "> FAKE_PORTAL_URL: ${FAKE_PORTAL_URL}" + print "> OAUTH_NODE_EXTRA_CA_CERTS: ${OAUTH_NODE_EXTRA_CA_CERTS}" + MOUNT_NODE_EXTRA_CA_CERTS=${TEST_DIR}/${OAUTH_NODE_EXTRA_CA_CERTS} + print "> MOUNT_NODE_EXTRA_CA_CERTS: ${MOUNT_NODE_EXTRA_CA_CERTS}" export RUN_LOCAL=true export SCREENSHOTS_DIR=${SCREENS} + export PROFILES export ENV_FILE + export NODE_EXTRA_CA_CERTS=$MOUNT_NODE_EXTRA_CA_CERTS npm test $TEST_CASES_DIR/$1 } - +ensure_components() { + for i in "${REQUIRED_COMPONENTS[@]}" + do + start="ensure_$i" + $start + done +} start_components() { for i in "${REQUIRED_COMPONENTS[@]}" do @@ -739,17 +470,19 @@ teardown_components() { begin "Tear down ..." for i in "${REQUIRED_COMPONENTS[@]}" do - print "Tear down $i" - $(kill_container_if_exist $i) + local component="$i" + print "Tear down $component" + kill_container_if_exist "$component" done end "Finished teardown" } save_components_logs() { - begin "Saving Logs to $LOGS ..." + begin "Saving Logs to $LOGS for ${REQUIRED_COMPONENTS[@]} ..." for i in "${REQUIRED_COMPONENTS[@]}" do - print "Saving logs for $i" - $(save_container_logs $i) + local component="$i" + print "Saving logs for component $component" + save_container_logs "$component" done end "Finished saving logs" } diff --git a/deps/rabbitmq_management/selenium/full-suite-authnz b/deps/rabbitmq_management/selenium/full-suite-authnz deleted file mode 100644 index d8c7e1dce3e2..000000000000 --- a/deps/rabbitmq_management/selenium/full-suite-authnz +++ /dev/null @@ -1,8 +0,0 @@ -authnz/auth-cache-http-backends.sh -authnz/auth-cache-ldap-backends.sh -authnz/auth-http-backend.sh -authnz/auth-http-internal-backends-with-internal.sh -authnz/auth-http-internal-backends.sh -authnz/auth-internal-backend.sh -authnz/auth-internal-http-backends.sh -authnz/auth-ldap-backend.sh diff --git a/deps/rabbitmq_management/selenium/full-suite-authnz-messaging b/deps/rabbitmq_management/selenium/full-suite-authnz-messaging new file mode 100644 index 000000000000..5eec8081fa62 --- /dev/null +++ b/deps/rabbitmq_management/selenium/full-suite-authnz-messaging @@ -0,0 +1,9 @@ +authnz-messaging/auth-cache-http-backends.sh +authnz-messaging/auth-cache-ldap-backends.sh +authnz-messaging/auth-http-backend.sh +authnz-messaging/auth-http-internal-backends-with-internal.sh +authnz-messaging/auth-http-internal-backends.sh +authnz-messaging/auth-internal-backend.sh +authnz-messaging/auth-internal-http-backends.sh +authnz-messaging/auth-ldap-backend.sh +authnz-messaging/auth-http-backend.sh diff --git a/deps/rabbitmq_management/selenium/full-suite-management-ui b/deps/rabbitmq_management/selenium/full-suite-management-ui index 9b4a2609aa97..16ae3233eb31 100644 --- a/deps/rabbitmq_management/selenium/full-suite-management-ui +++ b/deps/rabbitmq_management/selenium/full-suite-management-ui @@ -1,13 +1,19 @@ authnz-mgt/basic-auth-behind-proxy.sh authnz-mgt/basic-auth.sh +authnz-mgt/basic-auth-with-mgt-prefix.sh +authnz-mgt/multi-oauth-with-basic-auth-when-idps-down.sh +authnz-mgt/multi-oauth-with-basic-auth.sh +authnz-mgt/multi-oauth-without-basic-auth-and-resource-label-and-scopes.sh +authnz-mgt/multi-oauth-without-basic-auth.sh +authnz-mgt/oauth-and-basic-auth.sh authnz-mgt/oauth-idp-initiated-with-uaa-and-prefix-via-proxy.sh authnz-mgt/oauth-idp-initiated-with-uaa-and-prefix.sh authnz-mgt/oauth-idp-initiated-with-uaa-via-proxy.sh authnz-mgt/oauth-idp-initiated-with-uaa.sh authnz-mgt/oauth-with-keycloak.sh -authnz-mgt/oauth-with-uaa-and-mgt-prefix.sh +authnz-mgt/oauth-with-keycloak-with-verify-none.sh +authnz-mgt/oauth-with-uaa-down-but-with-basic-auth.sh authnz-mgt/oauth-with-uaa-down.sh -authnz-mgt/oauth-with-uaa.sh mgt/vhosts.sh mgt/definitions.sh mgt/exchanges.sh diff --git a/deps/rabbitmq_management/selenium/package.json b/deps/rabbitmq_management/selenium/package.json index dcc9321460a0..465febe009f7 100644 --- a/deps/rabbitmq_management/selenium/package.json +++ b/deps/rabbitmq_management/selenium/package.json @@ -4,28 +4,29 @@ "description": "", "main": "index.js", "scripts": { - "test": "mocha --recursive --trace-warnings --timeout 40000", "fakeportal": "node fakeportal/app.js", "fakeproxy": "node fakeportal/proxy.js", - "amqp10_roundtriptest" : "export $(cat $ENV_FILE | xargs)&& ./run-amqp10-roundtriptest" + "amqp10_roundtriptest": "eval $(cat $ENV_FILE ) &&./run-amqp10-roundtriptest", + "test": " eval $(cat $ENV_FILE ) && mocha --recursive --trace-warnings --timeout 40000" }, "keywords": [], "author": "", "license": "ISC", "dependencies": { - "chromedriver": "^115.0.0", + "chromedriver": "^125.0.0", "ejs": "^3.1.8", - "express": "^4.18.2", + "express": "^4.18.2", "geckodriver": "^3.0.2", "http-proxy": "^1.18.1", + "mqtt": "^5.3.3", "path": "^0.12.7", "proxy": "^1.0.2", - "selenium-webdriver": "^4.4.0", + "selenium-webdriver": "^4.19.0", "xmlhttprequest": "^1.8.0" }, "devDependencies": { "chai": "^4.3.6", - "mocha": "^10.0.0", + "mocha": "^10.4.0", "request": "^2.88.2", "standard": "^17.0.0" } diff --git a/deps/rabbitmq_management/selenium/run-amqp10-roundtriptest b/deps/rabbitmq_management/selenium/run-amqp10-roundtriptest index 96a49b0c669c..4f76fbf41603 100755 --- a/deps/rabbitmq_management/selenium/run-amqp10-roundtriptest +++ b/deps/rabbitmq_management/selenium/run-amqp10-roundtriptest @@ -5,12 +5,12 @@ env | grep RABBITMQ if [[ -f "/code/amqp10-roundtriptest" ]]; then echo "Running amqp10-roundtriptest inside mocha-test docker image ..." - java -jar /code/amqp10-roundtriptest-1.0-SNAPSHOT-jar-with-dependencies.jar + java -jar /code/amqp10-roundtriptest-1.0-SNAPSHOT-jar-with-dependencies.jar $@ else if [[ ! -f "amqp10-roundtriptest/target/amqp10-roundtriptest-1.0-SNAPSHOT-jar-with-dependencies.jar" ]]; then echo "Building amqp10-roundtriptest jar ..." - mvn -f amqp10-roundtriptest package + mvn -f amqp10-roundtriptest package $@ fi echo "Running amqp10-roundtriptest jar ..." - java -jar amqp10-roundtriptest/target/amqp10-roundtriptest-1.0-SNAPSHOT-jar-with-dependencies.jar + java -jar amqp10-roundtriptest/target/amqp10-roundtriptest-1.0-SNAPSHOT-jar-with-dependencies.jar $@ fi diff --git a/deps/rabbitmq_management/selenium/run-suites.sh b/deps/rabbitmq_management/selenium/run-suites.sh index abc7f7b67425..b1d16a519578 100755 --- a/deps/rabbitmq_management/selenium/run-suites.sh +++ b/deps/rabbitmq_management/selenium/run-suites.sh @@ -11,9 +11,11 @@ NC='\033[0m' SUCCESSFUL_SUITES=() FAILED_SUITES=() -cat $SCRIPT/$SUITE_FILE | sort | while read SUITE +TOTAL_SUITES=$(wc -l $SCRIPT/$SUITE_FILE | awk '{print $1}') + +while read SUITE do - echo "=== Running suite $SUITE ============================================" + echo -e "=== Running suite (${TOTAL_SUITES}/${GREEN}${#SUCCESSFUL_SUITES[@]}/${RED}${#FAILED_SUITES[@]}${NC}) $SUITE ============================================" echo " " ENV_MODES="docker" $SCRIPT/suites/$SUITE TEST_RESULT="$?" @@ -28,9 +30,9 @@ do fi echo -e "=== $TEST_STATUS $SUITE ===========================================" echo " " -done +done <<< "$(cat $SCRIPT/$SUITE_FILE | sort)" -echo "=== Summary ============================================" +echo -e "=== Summary (${TOTAL_SUITES}/${GREEN}${#SUCCESSFUL_SUITES[@]}/${RED}${#FAILED_SUITES[@]}${NC}) ============================================" if [ ${#SUCCESSFUL_SUITES[@]} -gt 0 ]; then echo -e " > ${GREEN}Successful suites ${NC}"; fi for f in ${SUCCESSFUL_SUITES[@]} do @@ -43,4 +45,5 @@ do echo " - $f" done +echo "Terminating with $OVERALL_TEST_RESULT" exit $OVERALL_TEST_RESULT diff --git a/deps/rabbitmq_management/selenium/short-suite-management-ui b/deps/rabbitmq_management/selenium/short-suite-management-ui index f80b22d15b9c..dd0c79f0f889 100644 --- a/deps/rabbitmq_management/selenium/short-suite-management-ui +++ b/deps/rabbitmq_management/selenium/short-suite-management-ui @@ -1,5 +1,5 @@ authnz-mgt/basic-auth.sh -authnz-mgt/oauth-with-uaa.sh +authnz-mgt/oauth-with-keycloak.sh mgt/vhosts.sh mgt/exchanges.sh mgt/limits.sh diff --git a/deps/rabbitmq_management/selenium/suites/authnz/auth-cache-http-backends.sh b/deps/rabbitmq_management/selenium/suites/authnz-messaging/auth-cache-http-backends.sh similarity index 100% rename from deps/rabbitmq_management/selenium/suites/authnz/auth-cache-http-backends.sh rename to deps/rabbitmq_management/selenium/suites/authnz-messaging/auth-cache-http-backends.sh diff --git a/deps/rabbitmq_management/selenium/suites/authnz/auth-cache-ldap-backends.sh b/deps/rabbitmq_management/selenium/suites/authnz-messaging/auth-cache-ldap-backends.sh similarity index 100% rename from deps/rabbitmq_management/selenium/suites/authnz/auth-cache-ldap-backends.sh rename to deps/rabbitmq_management/selenium/suites/authnz-messaging/auth-cache-ldap-backends.sh diff --git a/deps/rabbitmq_management/selenium/suites/authnz/auth-http-backend.sh b/deps/rabbitmq_management/selenium/suites/authnz-messaging/auth-http-backend.sh similarity index 100% rename from deps/rabbitmq_management/selenium/suites/authnz/auth-http-backend.sh rename to deps/rabbitmq_management/selenium/suites/authnz-messaging/auth-http-backend.sh diff --git a/deps/rabbitmq_management/selenium/suites/authnz/auth-http-internal-backends-with-internal.sh b/deps/rabbitmq_management/selenium/suites/authnz-messaging/auth-http-internal-backends-with-internal.sh similarity index 100% rename from deps/rabbitmq_management/selenium/suites/authnz/auth-http-internal-backends-with-internal.sh rename to deps/rabbitmq_management/selenium/suites/authnz-messaging/auth-http-internal-backends-with-internal.sh diff --git a/deps/rabbitmq_management/selenium/suites/authnz/auth-http-internal-backends.sh b/deps/rabbitmq_management/selenium/suites/authnz-messaging/auth-http-internal-backends.sh similarity index 76% rename from deps/rabbitmq_management/selenium/suites/authnz/auth-http-internal-backends.sh rename to deps/rabbitmq_management/selenium/suites/authnz-messaging/auth-http-internal-backends.sh index 9f9df51b7aff..105926e117dc 100755 --- a/deps/rabbitmq_management/selenium/suites/authnz/auth-http-internal-backends.sh +++ b/deps/rabbitmq_management/selenium/suites/authnz-messaging/auth-http-internal-backends.sh @@ -3,7 +3,7 @@ SCRIPT="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" TEST_CASES_PATH=/authnz-msg-protocols -PROFILES="http-user auth-http auth_backends-internal-http " +PROFILES="http-user auth-http auth_backends-http-internal " source $SCRIPT/../../bin/suite_template runWith mock-auth-backend-http diff --git a/deps/rabbitmq_management/selenium/suites/authnz/auth-internal-backend.sh b/deps/rabbitmq_management/selenium/suites/authnz-messaging/auth-internal-backend.sh similarity index 100% rename from deps/rabbitmq_management/selenium/suites/authnz/auth-internal-backend.sh rename to deps/rabbitmq_management/selenium/suites/authnz-messaging/auth-internal-backend.sh diff --git a/deps/rabbitmq_management/selenium/suites/authnz/auth-internal-http-backends.sh b/deps/rabbitmq_management/selenium/suites/authnz-messaging/auth-internal-http-backends.sh similarity index 100% rename from deps/rabbitmq_management/selenium/suites/authnz/auth-internal-http-backends.sh rename to deps/rabbitmq_management/selenium/suites/authnz-messaging/auth-internal-http-backends.sh diff --git a/deps/rabbitmq_management/selenium/suites/authnz/auth-ldap-backend.sh b/deps/rabbitmq_management/selenium/suites/authnz-messaging/auth-ldap-backend.sh similarity index 100% rename from deps/rabbitmq_management/selenium/suites/authnz/auth-ldap-backend.sh rename to deps/rabbitmq_management/selenium/suites/authnz-messaging/auth-ldap-backend.sh diff --git a/deps/rabbitmq_management/selenium/suites/authnz-messaging/auth-oauth-backend-with-devproducer.sh b/deps/rabbitmq_management/selenium/suites/authnz-messaging/auth-oauth-backend-with-devproducer.sh new file mode 100755 index 000000000000..3a29fb752f1b --- /dev/null +++ b/deps/rabbitmq_management/selenium/suites/authnz-messaging/auth-oauth-backend-with-devproducer.sh @@ -0,0 +1,9 @@ +#!/usr/bin/env bash + +SCRIPT="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" + +TEST_CASES_PATH=/authnz-msg-protocols +PROFILES="devkeycloak prodkeycloak oauth-devproducer auth-oauth-dev auth_backends-oauth" + +source $SCRIPT/../../bin/suite_template +runWith devkeycloak diff --git a/deps/rabbitmq_management/selenium/suites/authnz-messaging/auth-oauth-backend-with-prodproducer.sh b/deps/rabbitmq_management/selenium/suites/authnz-messaging/auth-oauth-backend-with-prodproducer.sh new file mode 100755 index 000000000000..01a67e88ba50 --- /dev/null +++ b/deps/rabbitmq_management/selenium/suites/authnz-messaging/auth-oauth-backend-with-prodproducer.sh @@ -0,0 +1,9 @@ +#!/usr/bin/env bash + +SCRIPT="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" + +TEST_CASES_PATH=/authnz-msg-protocols +PROFILES="devkeycloak prodkeycloak oauth-prodproducer auth-oauth-prod auth_backends-oauth" + +source $SCRIPT/../../bin/suite_template +runWith prodkeycloak diff --git a/deps/rabbitmq_management/selenium/suites/authnz-mgt/basic-auth-with-mgt-prefix.sh b/deps/rabbitmq_management/selenium/suites/authnz-mgt/basic-auth-with-mgt-prefix.sh new file mode 100755 index 000000000000..04a1dd36f35e --- /dev/null +++ b/deps/rabbitmq_management/selenium/suites/authnz-mgt/basic-auth-with-mgt-prefix.sh @@ -0,0 +1,9 @@ +#!/usr/bin/env bash + +SCRIPT="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" + +TEST_CASES_PATH=/basic-auth +PROFILES="mgt-prefix" + +source $SCRIPT/../../bin/suite_template $@ +run diff --git a/deps/rabbitmq_management/selenium/suites/authnz-mgt/multi-oauth-with-basic-auth-when-idps-down.sh b/deps/rabbitmq_management/selenium/suites/authnz-mgt/multi-oauth-with-basic-auth-when-idps-down.sh new file mode 100755 index 000000000000..1bea7e906036 --- /dev/null +++ b/deps/rabbitmq_management/selenium/suites/authnz-mgt/multi-oauth-with-basic-auth-when-idps-down.sh @@ -0,0 +1,10 @@ +#!/usr/bin/env bash + +SCRIPT="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" + +TEST_CASES_PATH=/multi-oauth/with-basic-auth-idps-down +TEST_CONFIG_PATH=/multi-oauth +PROFILES="devkeycloak prodkeycloak enable-basic-auth with-resource-label with-resource-scopes tls" + +source $SCRIPT/../../bin/suite_template $@ +run diff --git a/deps/rabbitmq_management/selenium/suites/authnz-mgt/multi-oauth-with-basic-auth.sh b/deps/rabbitmq_management/selenium/suites/authnz-mgt/multi-oauth-with-basic-auth.sh new file mode 100755 index 000000000000..9ccf75203bba --- /dev/null +++ b/deps/rabbitmq_management/selenium/suites/authnz-mgt/multi-oauth-with-basic-auth.sh @@ -0,0 +1,10 @@ +#!/usr/bin/env bash + +SCRIPT="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" + +TEST_CASES_PATH=/multi-oauth/with-basic-auth +TEST_CONFIG_PATH=/multi-oauth +PROFILES="devkeycloak prodkeycloak enable-basic-auth with-resource-label with-resource-scopes tls" + +source $SCRIPT/../../bin/suite_template $@ +runWith devkeycloak prodkeycloak diff --git a/deps/rabbitmq_management/selenium/suites/authnz-mgt/multi-oauth-without-basic-auth-and-resource-label-and-scopes.sh b/deps/rabbitmq_management/selenium/suites/authnz-mgt/multi-oauth-without-basic-auth-and-resource-label-and-scopes.sh new file mode 100755 index 000000000000..36b64a5da259 --- /dev/null +++ b/deps/rabbitmq_management/selenium/suites/authnz-mgt/multi-oauth-without-basic-auth-and-resource-label-and-scopes.sh @@ -0,0 +1,10 @@ +#!/usr/bin/env bash + +SCRIPT="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" + +TEST_CASES_PATH=/multi-oauth/without-basic-auth +TEST_CONFIG_PATH=/multi-oauth +PROFILES="devkeycloak prodkeycloak tls" + +source $SCRIPT/../../bin/suite_template $@ +runWith devkeycloak prodkeycloak diff --git a/deps/rabbitmq_management/selenium/suites/authnz-mgt/multi-oauth-without-basic-auth.sh b/deps/rabbitmq_management/selenium/suites/authnz-mgt/multi-oauth-without-basic-auth.sh new file mode 100755 index 000000000000..a9ce62b53550 --- /dev/null +++ b/deps/rabbitmq_management/selenium/suites/authnz-mgt/multi-oauth-without-basic-auth.sh @@ -0,0 +1,10 @@ +#!/usr/bin/env bash + +SCRIPT="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" + +TEST_CASES_PATH=/multi-oauth/without-basic-auth +TEST_CONFIG_PATH=/multi-oauth +PROFILES="devkeycloak prodkeycloak with-resource-label with-resource-scopes tls" + +source $SCRIPT/../../bin/suite_template $@ +runWith devkeycloak prodkeycloak diff --git a/deps/rabbitmq_management/selenium/suites/authnz-mgt/oauth-and-basic-auth.sh b/deps/rabbitmq_management/selenium/suites/authnz-mgt/oauth-and-basic-auth.sh new file mode 100755 index 000000000000..07aa42fb0443 --- /dev/null +++ b/deps/rabbitmq_management/selenium/suites/authnz-mgt/oauth-and-basic-auth.sh @@ -0,0 +1,10 @@ +#!/usr/bin/env bash + +SCRIPT="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" + +TEST_CASES_PATH=/oauth/with-basic-auth +TEST_CONFIG_PATH=/oauth +PROFILES="keycloak jwks keycloak-oauth-provider enable-basic-auth tls" + +source $SCRIPT/../../bin/suite_template $@ +runWith keycloak diff --git a/deps/rabbitmq_management/selenium/suites/authnz-mgt/oauth-idp-initiated-with-uaa-and-prefix-via-proxy.sh b/deps/rabbitmq_management/selenium/suites/authnz-mgt/oauth-idp-initiated-with-uaa-and-prefix-via-proxy.sh index 3547f4653236..efbc223badc1 100755 --- a/deps/rabbitmq_management/selenium/suites/authnz-mgt/oauth-idp-initiated-with-uaa-and-prefix-via-proxy.sh +++ b/deps/rabbitmq_management/selenium/suites/authnz-mgt/oauth-idp-initiated-with-uaa-and-prefix-via-proxy.sh @@ -4,7 +4,7 @@ SCRIPT="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" TEST_CASES_PATH=/oauth/with-idp-initiated-via-proxy TEST_CONFIG_PATH=/oauth -PROFILES="uaa fakeportal fakeproxy fakeportal-oauth-provider idp-initiated mgt-prefix" +PROFILES="uaa fakeportal fakeproxy fakeportal-mgt-oauth-provider idp-initiated mgt-prefix uaa-oauth-provider" source $SCRIPT/../../bin/suite_template $@ runWith uaa fakeportal fakeproxy diff --git a/deps/rabbitmq_management/selenium/suites/authnz-mgt/oauth-idp-initiated-with-uaa-and-prefix.sh b/deps/rabbitmq_management/selenium/suites/authnz-mgt/oauth-idp-initiated-with-uaa-and-prefix.sh index ead7254df620..0b3e9b8685c5 100755 --- a/deps/rabbitmq_management/selenium/suites/authnz-mgt/oauth-idp-initiated-with-uaa-and-prefix.sh +++ b/deps/rabbitmq_management/selenium/suites/authnz-mgt/oauth-idp-initiated-with-uaa-and-prefix.sh @@ -4,7 +4,7 @@ SCRIPT="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" TEST_CASES_PATH=/oauth/with-idp-initiated TEST_CONFIG_PATH=/oauth -PROFILES="uaa fakeportal-oauth-provider idp-initiated mgt-prefix" +PROFILES="uaa fakeportal-mgt-oauth-provider idp-initiated mgt-prefix uaa-oauth-provider" source $SCRIPT/../../bin/suite_template $@ runWith uaa fakeportal diff --git a/deps/rabbitmq_management/selenium/suites/authnz-mgt/oauth-idp-initiated-with-uaa-via-proxy.sh b/deps/rabbitmq_management/selenium/suites/authnz-mgt/oauth-idp-initiated-with-uaa-via-proxy.sh index cda013b5063c..1de40086af1d 100755 --- a/deps/rabbitmq_management/selenium/suites/authnz-mgt/oauth-idp-initiated-with-uaa-via-proxy.sh +++ b/deps/rabbitmq_management/selenium/suites/authnz-mgt/oauth-idp-initiated-with-uaa-via-proxy.sh @@ -4,7 +4,7 @@ SCRIPT="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" TEST_CASES_PATH=/oauth/with-idp-initiated-via-proxy TEST_CONFIG_PATH=/oauth -PROFILES="uaa fakeportal fakeproxy fakeportal-oauth-provider idp-initiated" +PROFILES="uaa fakeportal fakeproxy fakeportal-mgt-oauth-provider idp-initiated uaa-oauth-provider" source $SCRIPT/../../bin/suite_template $@ runWith uaa fakeportal fakeproxy diff --git a/deps/rabbitmq_management/selenium/suites/authnz-mgt/oauth-idp-initiated-with-uaa.sh b/deps/rabbitmq_management/selenium/suites/authnz-mgt/oauth-idp-initiated-with-uaa.sh index efd1f5dcfe75..21dfa922ca0f 100755 --- a/deps/rabbitmq_management/selenium/suites/authnz-mgt/oauth-idp-initiated-with-uaa.sh +++ b/deps/rabbitmq_management/selenium/suites/authnz-mgt/oauth-idp-initiated-with-uaa.sh @@ -4,7 +4,7 @@ SCRIPT="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" TEST_CASES_PATH=/oauth/with-idp-initiated TEST_CONFIG_PATH=/oauth -PROFILES="uaa fakeportal-oauth-provider idp-initiated" +PROFILES="uaa idp-initiated uaa-oauth-provider fakeportal-mgt-oauth-provider" source $SCRIPT/../../bin/suite_template $@ runWith uaa fakeportal diff --git a/deps/rabbitmq_management/selenium/suites/authnz-mgt/oauth-with-keycloak-with-verify-none.sh b/deps/rabbitmq_management/selenium/suites/authnz-mgt/oauth-with-keycloak-with-verify-none.sh new file mode 100755 index 000000000000..65f662fcfe61 --- /dev/null +++ b/deps/rabbitmq_management/selenium/suites/authnz-mgt/oauth-with-keycloak-with-verify-none.sh @@ -0,0 +1,10 @@ +#!/usr/bin/env bash + +SCRIPT="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" + +TEST_CASES_PATH=/oauth/with-sp-initiated +TEST_CONFIG_PATH=/oauth +PROFILES="keycloak keycloak-verify-none-oauth-provider keycloak-mgt-oauth-provider tls" + +source $SCRIPT/../../bin/suite_template $@ +runWith keycloak diff --git a/deps/rabbitmq_management/selenium/suites/authnz-mgt/oauth-with-keycloak.sh b/deps/rabbitmq_management/selenium/suites/authnz-mgt/oauth-with-keycloak.sh index f66f8412c07a..d650cc79a836 100755 --- a/deps/rabbitmq_management/selenium/suites/authnz-mgt/oauth-with-keycloak.sh +++ b/deps/rabbitmq_management/selenium/suites/authnz-mgt/oauth-with-keycloak.sh @@ -4,7 +4,7 @@ SCRIPT="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" TEST_CASES_PATH=/oauth/with-sp-initiated TEST_CONFIG_PATH=/oauth -PROFILES="keycloak jwks keycloak-oauth-provider" +PROFILES="keycloak keycloak-oauth-provider keycloak-mgt-oauth-provider tls" source $SCRIPT/../../bin/suite_template $@ runWith keycloak diff --git a/deps/rabbitmq_management/selenium/suites/authnz-mgt/oauth-with-uaa-and-mgt-prefix.sh b/deps/rabbitmq_management/selenium/suites/authnz-mgt/oauth-with-uaa-and-mgt-prefix.sh index 06832455cf70..8ad99b9c3b76 100755 --- a/deps/rabbitmq_management/selenium/suites/authnz-mgt/oauth-with-uaa-and-mgt-prefix.sh +++ b/deps/rabbitmq_management/selenium/suites/authnz-mgt/oauth-with-uaa-and-mgt-prefix.sh @@ -4,7 +4,7 @@ SCRIPT="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" TEST_CASES_PATH=/oauth/with-sp-initiated TEST_CONFIG_PATH=/oauth -PROFILES="uaa uaa-oauth-provider mgt-prefix" +PROFILES="uaa uaa-oauth-provider mgt-prefix uaa-mgt-oauth-provider" source $SCRIPT/../../bin/suite_template $@ runWith uaa diff --git a/deps/rabbitmq_management/selenium/suites/authnz-mgt/oauth-with-uaa-down-but-with-basic-auth.sh b/deps/rabbitmq_management/selenium/suites/authnz-mgt/oauth-with-uaa-down-but-with-basic-auth.sh new file mode 100755 index 000000000000..e3b8b010ad67 --- /dev/null +++ b/deps/rabbitmq_management/selenium/suites/authnz-mgt/oauth-with-uaa-down-but-with-basic-auth.sh @@ -0,0 +1,10 @@ +#!/usr/bin/env bash + +SCRIPT="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" + +TEST_CASES_PATH=/oauth/with-basic-auth-idp-down +TEST_CONFIG_PATH=/oauth +PROFILES="uaa uaa-oauth-provider uaa-mgt-oauth-provider enable-basic-auth" + +source $SCRIPT/../../bin/suite_template $@ +run diff --git a/deps/rabbitmq_management/selenium/suites/authnz-mgt/oauth-with-uaa-down.sh b/deps/rabbitmq_management/selenium/suites/authnz-mgt/oauth-with-uaa-down.sh index 1eaf57a30f7f..12b4695eb09c 100755 --- a/deps/rabbitmq_management/selenium/suites/authnz-mgt/oauth-with-uaa-down.sh +++ b/deps/rabbitmq_management/selenium/suites/authnz-mgt/oauth-with-uaa-down.sh @@ -4,7 +4,7 @@ SCRIPT="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" TEST_CASES_PATH=/oauth/with-idp-down TEST_CONFIG_PATH=/oauth -PROFILES="uaa uaa-oauth-provider" +PROFILES="uaa uaa-oauth-provider uaa-mgt-oauth-provider" source $SCRIPT/../../bin/suite_template $@ run diff --git a/deps/rabbitmq_management/selenium/suites/authnz-mgt/oauth-with-uaa.sh b/deps/rabbitmq_management/selenium/suites/authnz-mgt/oauth-with-uaa.sh index 3743b2322f6e..2e382ab2c5f2 100755 --- a/deps/rabbitmq_management/selenium/suites/authnz-mgt/oauth-with-uaa.sh +++ b/deps/rabbitmq_management/selenium/suites/authnz-mgt/oauth-with-uaa.sh @@ -4,7 +4,7 @@ SCRIPT="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" TEST_CASES_PATH=/oauth/with-sp-initiated TEST_CONFIG_PATH=/oauth -PROFILES="uaa uaa-oauth-provider" +PROFILES="uaa uaa-oauth-provider uaa-mgt-oauth-provider" source $SCRIPT/../../bin/suite_template $@ runWith uaa diff --git a/deps/rabbitmq_management/selenium/test/authnz-msg-protocols/.env.internal-user b/deps/rabbitmq_management/selenium/test/authnz-msg-protocols/.env.internal-user deleted file mode 100644 index 296907622ea7..000000000000 --- a/deps/rabbitmq_management/selenium/test/authnz-msg-protocols/.env.internal-user +++ /dev/null @@ -1,2 +0,0 @@ -export RABBITMQ_AMQP_USERNAME=management -export RABBITMQ_AMQP_PASSWORD=management diff --git a/deps/rabbitmq_management/selenium/test/authnz-msg-protocols/amqp10.js b/deps/rabbitmq_management/selenium/test/authnz-msg-protocols/amqp10.js index 11aac6dbf3b4..3a679bb21587 100644 --- a/deps/rabbitmq_management/selenium/test/authnz-msg-protocols/amqp10.js +++ b/deps/rabbitmq_management/selenium/test/authnz-msg-protocols/amqp10.js @@ -1,5 +1,6 @@ const assert = require('assert') -const { getURLForProtocol } = require('../utils') +const { getURLForProtocol, tokenFor, openIdConfiguration } = require('../utils') +const { reset, expectUser, expectVhost, expectResource, allow, verifyAll } = require('../mock_http_backend') const {execSync} = require('child_process') const profiles = process.env.PROFILES || "" @@ -10,17 +11,39 @@ for (const element of profiles.split(" ")) { } } -describe('Having the following auth_backends enabled: ' + backends, function () { +describe('Having AMQP 1.0 protocol enabled and the following auth_backends: ' + backends, function () { + let expectations = [] + let username = process.env.RABBITMQ_AMQP_USERNAME + let password = process.env.RABBITMQ_AMQP_PASSWORD before(function () { - + if (backends.includes("http") && username.includes("http")) { + reset() + expectations.push(expectUser({ "username": username, "password": password}, "allow")) + expectations.push(expectVhost({ "username": username, "vhost": "/"}, "allow")) + expectations.push(expectResource({ "username": username, "vhost": "/", "resource": "queue", "name": "my-queue", "permission":"configure", "tags":""}, "allow")) + expectations.push(expectResource({ "username": username, "vhost": "/", "resource": "queue", "name": "my-queue", "permission":"read", "tags":""}, "allow")) + expectations.push(expectResource({ "username": username, "vhost": "/", "resource": "exchange", "name": "amq.default", "permission":"write", "tags":""}, "allow")) + }else if (backends.includes("oauth") && username.includes("oauth")) { + let oauthProviderUrl = process.env.OAUTH_PROVIDER_URL + let oauthClientId = process.env.OAUTH_CLIENT_ID + let oauthClientSecret = process.env.OAUTH_CLIENT_SECRET + console.log("oauthProviderUrl : " + oauthProviderUrl) + let openIdConfig = openIdConfiguration(oauthProviderUrl) + console.log("Obtained token_endpoint : " + openIdConfig.token_endpoint) + password = tokenFor(oauthClientId, oauthClientSecret, openIdConfig.token_endpoint) + console.log("Obtained access token : " + password) + } }) it('can open an AMQP 1.0 connection', function () { - execSync("npm run amqp10_roundtriptest") + execSync("npm run amqp10_roundtriptest -- " + username + " " + password) + }) after(function () { - + if ( backends.includes("http") ) { + verifyAll(expectations) + } }) }) diff --git a/deps/rabbitmq_management/selenium/test/authnz-msg-protocols/enabled_plugins b/deps/rabbitmq_management/selenium/test/authnz-msg-protocols/enabled_plugins index feaf7912d783..59b57cb3828f 100644 --- a/deps/rabbitmq_management/selenium/test/authnz-msg-protocols/enabled_plugins +++ b/deps/rabbitmq_management/selenium/test/authnz-msg-protocols/enabled_plugins @@ -1,5 +1,5 @@ [accept,amqp10_client,amqp_client,base64url,cowboy,cowlib,eetcd,gun,jose, - prometheus,rabbitmq_amqp1_0,rabbitmq_auth_backend_cache, + oauth2_client,prometheus,rabbitmq_auth_backend_cache, rabbitmq_auth_backend_http,rabbitmq_auth_backend_ldap, rabbitmq_auth_backend_oauth2,rabbitmq_auth_mechanism_ssl,rabbitmq_aws, rabbitmq_consistent_hash_exchange,rabbitmq_event_exchange, diff --git a/deps/rabbitmq_management/selenium/test/authnz-msg-protocols/.env.auth-http.docker b/deps/rabbitmq_management/selenium/test/authnz-msg-protocols/env.auth-http.docker similarity index 100% rename from deps/rabbitmq_management/selenium/test/authnz-msg-protocols/.env.auth-http.docker rename to deps/rabbitmq_management/selenium/test/authnz-msg-protocols/env.auth-http.docker diff --git a/deps/rabbitmq_management/selenium/test/authnz-msg-protocols/.env.auth-http.local b/deps/rabbitmq_management/selenium/test/authnz-msg-protocols/env.auth-http.local similarity index 100% rename from deps/rabbitmq_management/selenium/test/authnz-msg-protocols/.env.auth-http.local rename to deps/rabbitmq_management/selenium/test/authnz-msg-protocols/env.auth-http.local diff --git a/deps/rabbitmq_management/selenium/test/authnz-msg-protocols/.env.auth-ldap.docker b/deps/rabbitmq_management/selenium/test/authnz-msg-protocols/env.auth-ldap.docker similarity index 100% rename from deps/rabbitmq_management/selenium/test/authnz-msg-protocols/.env.auth-ldap.docker rename to deps/rabbitmq_management/selenium/test/authnz-msg-protocols/env.auth-ldap.docker diff --git a/deps/rabbitmq_management/selenium/test/authnz-msg-protocols/.env.auth-ldap.local b/deps/rabbitmq_management/selenium/test/authnz-msg-protocols/env.auth-ldap.local similarity index 100% rename from deps/rabbitmq_management/selenium/test/authnz-msg-protocols/.env.auth-ldap.local rename to deps/rabbitmq_management/selenium/test/authnz-msg-protocols/env.auth-ldap.local diff --git a/deps/rabbitmq_management/selenium/test/authnz-msg-protocols/env.auth-oauth-dev.docker b/deps/rabbitmq_management/selenium/test/authnz-msg-protocols/env.auth-oauth-dev.docker new file mode 100644 index 000000000000..b1e4f12b6b53 --- /dev/null +++ b/deps/rabbitmq_management/selenium/test/authnz-msg-protocols/env.auth-oauth-dev.docker @@ -0,0 +1,2 @@ +export OAUTH_PROVIDER_URL=https://devkeycloak:8442/realms/dev +export OAUTH_NODE_EXTRA_CA_CERTS=multi-oauth/devkeycloak/ca_certificate.pem diff --git a/deps/rabbitmq_management/selenium/test/authnz-msg-protocols/env.auth-oauth-dev.local b/deps/rabbitmq_management/selenium/test/authnz-msg-protocols/env.auth-oauth-dev.local new file mode 100644 index 000000000000..b1e4f12b6b53 --- /dev/null +++ b/deps/rabbitmq_management/selenium/test/authnz-msg-protocols/env.auth-oauth-dev.local @@ -0,0 +1,2 @@ +export OAUTH_PROVIDER_URL=https://devkeycloak:8442/realms/dev +export OAUTH_NODE_EXTRA_CA_CERTS=multi-oauth/devkeycloak/ca_certificate.pem diff --git a/deps/rabbitmq_management/selenium/test/authnz-msg-protocols/env.auth-oauth-prod.docker b/deps/rabbitmq_management/selenium/test/authnz-msg-protocols/env.auth-oauth-prod.docker new file mode 100644 index 000000000000..840561447fd2 --- /dev/null +++ b/deps/rabbitmq_management/selenium/test/authnz-msg-protocols/env.auth-oauth-prod.docker @@ -0,0 +1,2 @@ +export OAUTH_PROVIDER_URL=https://prodkeycloak:8442/realms/prod +export OAUTH_NODE_EXTRA_CA_CERTS=multi-oauth/prodkeycloak/ca_certificate.pem diff --git a/deps/rabbitmq_management/selenium/test/authnz-msg-protocols/env.auth-oauth-prod.local b/deps/rabbitmq_management/selenium/test/authnz-msg-protocols/env.auth-oauth-prod.local new file mode 100644 index 000000000000..840561447fd2 --- /dev/null +++ b/deps/rabbitmq_management/selenium/test/authnz-msg-protocols/env.auth-oauth-prod.local @@ -0,0 +1,2 @@ +export OAUTH_PROVIDER_URL=https://prodkeycloak:8442/realms/prod +export OAUTH_NODE_EXTRA_CA_CERTS=multi-oauth/prodkeycloak/ca_certificate.pem diff --git a/deps/rabbitmq_management/selenium/test/authnz-msg-protocols/env.docker.devkeycloak b/deps/rabbitmq_management/selenium/test/authnz-msg-protocols/env.docker.devkeycloak new file mode 100644 index 000000000000..2b359b653a94 --- /dev/null +++ b/deps/rabbitmq_management/selenium/test/authnz-msg-protocols/env.docker.devkeycloak @@ -0,0 +1,2 @@ +export DEVKEYCLOAK_URL=https://devkeycloak:8442/realms/dev +export DEVKEYCLOAK_CA_CERT=/config/oauth/keycloak/ca_certificate.pem diff --git a/deps/rabbitmq_management/selenium/test/authnz-msg-protocols/env.docker.prodkeycloak b/deps/rabbitmq_management/selenium/test/authnz-msg-protocols/env.docker.prodkeycloak new file mode 100644 index 000000000000..e929f0246e9f --- /dev/null +++ b/deps/rabbitmq_management/selenium/test/authnz-msg-protocols/env.docker.prodkeycloak @@ -0,0 +1,2 @@ +export PRODKEYCLOAK_URL=https://prodkeycloak:8443/realms/prod +export PRODKEYCLOAK_CA_CERT=/config/oauth/keycloak/ca_certificate.pem diff --git a/deps/rabbitmq_management/selenium/test/authnz-msg-protocols/.env.http-user b/deps/rabbitmq_management/selenium/test/authnz-msg-protocols/env.http-user similarity index 100% rename from deps/rabbitmq_management/selenium/test/authnz-msg-protocols/.env.http-user rename to deps/rabbitmq_management/selenium/test/authnz-msg-protocols/env.http-user diff --git a/deps/rabbitmq_management/selenium/test/authnz-msg-protocols/env.internal-user b/deps/rabbitmq_management/selenium/test/authnz-msg-protocols/env.internal-user new file mode 100644 index 000000000000..b35a68de7b3d --- /dev/null +++ b/deps/rabbitmq_management/selenium/test/authnz-msg-protocols/env.internal-user @@ -0,0 +1,2 @@ +export RABBITMQ_AMQP_USERNAME=internaluser +export RABBITMQ_AMQP_PASSWORD=management diff --git a/deps/rabbitmq_management/selenium/test/authnz-msg-protocols/.env.ldap-user b/deps/rabbitmq_management/selenium/test/authnz-msg-protocols/env.ldap-user similarity index 100% rename from deps/rabbitmq_management/selenium/test/authnz-msg-protocols/.env.ldap-user rename to deps/rabbitmq_management/selenium/test/authnz-msg-protocols/env.ldap-user diff --git a/deps/rabbitmq_management/selenium/test/authnz-msg-protocols/.env.local b/deps/rabbitmq_management/selenium/test/authnz-msg-protocols/env.local similarity index 100% rename from deps/rabbitmq_management/selenium/test/authnz-msg-protocols/.env.local rename to deps/rabbitmq_management/selenium/test/authnz-msg-protocols/env.local diff --git a/deps/rabbitmq_management/selenium/test/authnz-msg-protocols/env.local.devkeycloak b/deps/rabbitmq_management/selenium/test/authnz-msg-protocols/env.local.devkeycloak new file mode 100644 index 000000000000..a1e2d5d596c2 --- /dev/null +++ b/deps/rabbitmq_management/selenium/test/authnz-msg-protocols/env.local.devkeycloak @@ -0,0 +1,2 @@ +export DEVKEYCLOAK_URL=https://localhost:8442/realms/dev +export DEVKEYCLOAK_CA_CERT=deps/rabbitmq_management/selenium/test/multi-oauth/devkeycloak/ca_certificate.pem diff --git a/deps/rabbitmq_management/selenium/test/authnz-msg-protocols/env.local.prodkeycloak b/deps/rabbitmq_management/selenium/test/authnz-msg-protocols/env.local.prodkeycloak new file mode 100644 index 000000000000..e267b558cd49 --- /dev/null +++ b/deps/rabbitmq_management/selenium/test/authnz-msg-protocols/env.local.prodkeycloak @@ -0,0 +1,2 @@ +export PRODKEYCLOAK_URL=https://localhost:8443/realms/prod +export PRODKEYCLOAK_CA_CERT=deps/rabbitmq_management/selenium/test/multi-oauth/prodkeycloak/ca_certificate.pem diff --git a/deps/rabbitmq_management/selenium/test/authnz-msg-protocols/env.oauth-devproducer b/deps/rabbitmq_management/selenium/test/authnz-msg-protocols/env.oauth-devproducer new file mode 100644 index 000000000000..ccc08ac6b023 --- /dev/null +++ b/deps/rabbitmq_management/selenium/test/authnz-msg-protocols/env.oauth-devproducer @@ -0,0 +1,3 @@ +export RABBITMQ_AMQP_USERNAME=oauth +export OAUTH_CLIENT_ID=dev_producer +export OAUTH_CLIENT_SECRET=z1PNm47wfWyulTnAaDOf1AggTy3MxX2H diff --git a/deps/rabbitmq_management/selenium/test/authnz-msg-protocols/env.oauth-prodproducer b/deps/rabbitmq_management/selenium/test/authnz-msg-protocols/env.oauth-prodproducer new file mode 100644 index 000000000000..8710eea7f1bf --- /dev/null +++ b/deps/rabbitmq_management/selenium/test/authnz-msg-protocols/env.oauth-prodproducer @@ -0,0 +1,3 @@ +export RABBITMQ_AMQP_USERNAME=oauth +export OAUTH_CLIENT_ID=prod_producer +export OAUTH_CLIENT_SECRET=prod_producer diff --git a/deps/rabbitmq_management/selenium/test/authnz-msg-protocols/imports/users.json b/deps/rabbitmq_management/selenium/test/authnz-msg-protocols/imports/users.json index 4c3815679bc2..fc91b2949586 100644 --- a/deps/rabbitmq_management/selenium/test/authnz-msg-protocols/imports/users.json +++ b/deps/rabbitmq_management/selenium/test/authnz-msg-protocols/imports/users.json @@ -36,6 +36,15 @@ "monitoring" ], "limits": {} + }, + { + "name": "internaluser", + "password_hash": "wefAAoRipS2ytWb7U2+BLhReT8oO+VU8ztUi3dv+rawi9rB1", + "hashing_algorithm": "rabbit_password_hashing_sha256", + "tags": [ + "management" + ], + "limits": {} } ], "vhosts": [ @@ -51,6 +60,13 @@ "configure": ".*", "write": ".*", "read": ".*" + }, + { + "user": "internaluser", + "vhost": "/", + "configure": ".*", + "write": ".*", + "read": ".*" } ] diff --git a/deps/rabbitmq_management/selenium/test/authnz-msg-protocols/mock-auth-backend-http/defaultExpectations.json b/deps/rabbitmq_management/selenium/test/authnz-msg-protocols/mock-auth-backend-http/defaultExpectations.json new file mode 100644 index 000000000000..b4b40eec451a --- /dev/null +++ b/deps/rabbitmq_management/selenium/test/authnz-msg-protocols/mock-auth-backend-http/defaultExpectations.json @@ -0,0 +1,10 @@ +[ + { + "httpRequest": { + "path": "/ready" + }, + "httpResponse": { + "body": "ok" + } + } +] diff --git a/deps/rabbitmq_management/selenium/test/authnz-msg-protocols/mock-auth-backend-http/expectationInitialiser.json b/deps/rabbitmq_management/selenium/test/authnz-msg-protocols/mock-auth-backend-http/expectationInitialiser.json deleted file mode 100644 index 4415f8219f7e..000000000000 --- a/deps/rabbitmq_management/selenium/test/authnz-msg-protocols/mock-auth-backend-http/expectationInitialiser.json +++ /dev/null @@ -1,46 +0,0 @@ -[ - { - "httpRequest": { - "path": "/ready" - }, - "httpResponse": { - "body": "ok" - } - }, - { - "httpRequest": { - "path": "/auth/user", - "queryStringParameters": { - "username": "httpuser", - "password": "httppassword" - } - }, - "httpResponse": { - "body": "allow" - } - }, - { - "httpRequest": { - "path": "/auth/vhost" - }, - "httpResponse": { - "body": "allow" - } - }, - { - "httpRequest": { - "path": "/auth/resource" - }, - "httpResponse": { - "body": "allow" - } - }, - { - "httpRequest": { - "path": "/auth/topic" - }, - "httpResponse": { - "body": "allow" - } - } -] diff --git a/deps/rabbitmq_management/selenium/test/authnz-msg-protocols/mqtt.js b/deps/rabbitmq_management/selenium/test/authnz-msg-protocols/mqtt.js new file mode 100644 index 000000000000..e71916003ef9 --- /dev/null +++ b/deps/rabbitmq_management/selenium/test/authnz-msg-protocols/mqtt.js @@ -0,0 +1,64 @@ +const assert = require('assert') +const { getURLForProtocol, tokenFor, openIdConfiguration } = require('../utils') +const { reset, expectUser, expectVhost, expectResource, allow, verifyAll } = require('../mock_http_backend') +const mqtt = require('mqtt'); + +const profiles = process.env.PROFILES || "" +var backends = "" +for (const element of profiles.split(" ")) { + if ( element.startsWith("auth_backends-") ) { + backends = element.substring(element.indexOf("-")+1) + } +} + +describe('Having MQTT protocol enbled and the following auth_backends: ' + backends, function () { + let mqttOptions + let expectations = [] + let client_id = 'selenium-client' + let rabbit = process.env.RABBITMQ_HOSTNAME || 'localhost' + let username = process.env.RABBITMQ_AMQP_USERNAME + let password = process.env.RABBITMQ_AMQP_PASSWORD + + before(function () { + if (backends.includes("http") && username.includes("http")) { + reset() + expectations.push(expectUser({ "username": username, "password": password, "client_id": client_id, "vhost": "/" }, "allow")) + expectations.push(expectVhost({ "username": username, "vhost": "/"}, "allow")) + } else if (backends.includes("oauth") && username.includes("oauth")) { + let oauthProviderUrl = process.env.OAUTH_PROVIDER_URL + let oauthClientId = process.env.OAUTH_CLIENT_ID + let oauthClientSecret = process.env.OAUTH_CLIENT_SECRET + let openIdConfig = openIdConfiguration(oauthProviderUrl) + console.log("Obtained token_endpoint : " + openIdConfig.token_endpoint) + password = tokenFor(oauthClientId, oauthClientSecret, openIdConfig.token_endpoint) + console.log("Obtained access token : " + password) + } + mqttOptions = { + clientId: client_id, + protocolId: 'MQTT', + protocolVersion: 4, + keepalive: 10000, + clean: false, + reconnectPeriod: '1000', + username: username, + password: password, + } + }) + + it('can open an MQTT connection', function () { + var client = mqtt.connect("mqtt://" + rabbit + ":1883", mqttOptions) + client.on('error', function(err) { + assert.fail("Mqtt connection failed due to " + err) + client.end() + }) + client.on('connect', function(err) { + client.end() + }) + }) + + after(function () { + if ( backends.includes("http") ) { + verifyAll(expectations) + } + }) +}) diff --git a/deps/rabbitmq_management/selenium/test/authnz-msg-protocols/rabbitmq.backends-oauth.conf b/deps/rabbitmq_management/selenium/test/authnz-msg-protocols/rabbitmq.backends-oauth.conf new file mode 100644 index 000000000000..d1105c6bc864 --- /dev/null +++ b/deps/rabbitmq_management/selenium/test/authnz-msg-protocols/rabbitmq.backends-oauth.conf @@ -0,0 +1,33 @@ +## RabbitMQ configuration with 2 oauth2 resources, rabbit_prod and rabbit_dev, +## rather than a single resource_server_id +## Also, each resource is owned by its own oauth provider, i.e. RabbitMQ is +## accessed by users and clients from two different providers using their dedicated +## resource_server_id. +log.console.level = debug + +auth_backends.1 = rabbit_auth_backend_oauth2 + +# Common auth_oauth2 settings for all resources +auth_oauth2.preferred_username_claims.1 = preferred_username +auth_oauth2.preferred_username_claims.2 = user_name +auth_oauth2.preferred_username_claims.3 = email +auth_oauth2.scope_prefix = rabbitmq. + +## Resource servers hosted by this rabbitmq instance +auth_oauth2.resource_servers.1.id = rabbit_prod +auth_oauth2.resource_servers.1.oauth_provider_id = prodkeycloak +auth_oauth2.resource_servers.2.id = rabbit_dev +auth_oauth2.resource_servers.2.oauth_provider_id = devkeycloak +auth_oauth2.resource_servers.3.id = rabbit_internal +auth_oauth2.resource_servers.3.oauth_provider_id = devkeycloak + +## Oauth providers +auth_oauth2.oauth_providers.devkeycloak.issuer = ${DEVKEYCLOAK_URL} +auth_oauth2.oauth_providers.devkeycloak.https.cacertfile = ${DEVKEYCLOAK_CA_CERT} +auth_oauth2.oauth_providers.devkeycloak.https.verify = verify_peer +auth_oauth2.oauth_providers.devkeycloak.https.hostname_verification = wildcard + +auth_oauth2.oauth_providers.prodkeycloak.issuer = ${PRODKEYCLOAK_URL} +auth_oauth2.oauth_providers.prodkeycloak.https.cacertfile = ${PRODKEYCLOAK_CA_CERT} +auth_oauth2.oauth_providers.prodkeycloak.https.verify = verify_peer +auth_oauth2.oauth_providers.prodkeycloak.https.hostname_verification = wildcard diff --git a/deps/rabbitmq_management/selenium/test/basic-auth/ac-management.js b/deps/rabbitmq_management/selenium/test/basic-auth/ac-management.js index 0b5549f9f99a..d5282d386b82 100644 --- a/deps/rabbitmq_management/selenium/test/basic-auth/ac-management.js +++ b/deps/rabbitmq_management/selenium/test/basic-auth/ac-management.js @@ -24,6 +24,37 @@ describe('management user with vhosts permissions', function () { await overview.isLoaded() }) + it('can access overview tab', async function () { + await overview.clickOnOverviewTab() + await overview.waitForOverviewTab() + assert.ok(!await overview.isPopupWarningDisplayed()) + }) + it('can access connections tab', async function () { + await overview.clickOnConnectionsTab() + await overview.waitForConnectionsTab() + assert.ok(!await overview.isPopupWarningDisplayed()) + }) + it('can access channels tab', async function () { + await overview.clickOnChannelsTab() + await overview.waitForChannelsTab() + assert.ok(!await overview.isPopupWarningDisplayed()) + }) + it('can access exchanges tab', async function () { + await overview.clickOnExchangesTab() + await overview.waitForExchangesTab() + assert.ok(!await overview.isPopupWarningDisplayed()) + }) + it('can access queues and streams tab', async function () { + await overview.clickOnQueuesTab() + await overview.waitForQueuesTab() + assert.ok(!await overview.isPopupWarningDisplayed()) + }) + it('can access limited options in admin tab', async function () { + await overview.clickOnAdminTab() + await overview.waitForAdminTab() + assert.ok(!await overview.isPopupWarningDisplayed()) + }) + it('cannot add/update user limits', async function () { await overview.clickOnAdminTab() await admin.clickOnLimits() diff --git a/deps/rabbitmq_management/selenium/test/basic-auth/enabled_plugins b/deps/rabbitmq_management/selenium/test/basic-auth/enabled_plugins index feaf7912d783..c91f7ba880c3 100644 --- a/deps/rabbitmq_management/selenium/test/basic-auth/enabled_plugins +++ b/deps/rabbitmq_management/selenium/test/basic-auth/enabled_plugins @@ -1,5 +1,5 @@ [accept,amqp10_client,amqp_client,base64url,cowboy,cowlib,eetcd,gun,jose, - prometheus,rabbitmq_amqp1_0,rabbitmq_auth_backend_cache, + oauth2_client,prometheus,rabbitmq_amqp1_0,rabbitmq_auth_backend_cache, rabbitmq_auth_backend_http,rabbitmq_auth_backend_ldap, rabbitmq_auth_backend_oauth2,rabbitmq_auth_mechanism_ssl,rabbitmq_aws, rabbitmq_consistent_hash_exchange,rabbitmq_event_exchange, diff --git a/deps/rabbitmq_management/selenium/test/basic-auth/.env.docker.proxy b/deps/rabbitmq_management/selenium/test/basic-auth/env.docker.proxy similarity index 69% rename from deps/rabbitmq_management/selenium/test/basic-auth/.env.docker.proxy rename to deps/rabbitmq_management/selenium/test/basic-auth/env.docker.proxy index bc2b13861c11..91a1d12cdc21 100644 --- a/deps/rabbitmq_management/selenium/test/basic-auth/.env.docker.proxy +++ b/deps/rabbitmq_management/selenium/test/basic-auth/env.docker.proxy @@ -1,2 +1,3 @@ export PUBLIC_RABBITMQ_HOST=proxy:9090 export RABBITMQ_HOST_FOR_PROXY=rabbitmq:15672 +export SELENIUM_INTERACTION_DELAY=250 \ No newline at end of file diff --git a/deps/rabbitmq_management/selenium/test/basic-auth/.env.local b/deps/rabbitmq_management/selenium/test/basic-auth/env.local similarity index 100% rename from deps/rabbitmq_management/selenium/test/basic-auth/.env.local rename to deps/rabbitmq_management/selenium/test/basic-auth/env.local diff --git a/deps/rabbitmq_management/selenium/test/basic-auth/.env.local.proxy b/deps/rabbitmq_management/selenium/test/basic-auth/env.local.proxy similarity index 73% rename from deps/rabbitmq_management/selenium/test/basic-auth/.env.local.proxy rename to deps/rabbitmq_management/selenium/test/basic-auth/env.local.proxy index f4cba30edfe3..91875b4c5611 100644 --- a/deps/rabbitmq_management/selenium/test/basic-auth/.env.local.proxy +++ b/deps/rabbitmq_management/selenium/test/basic-auth/env.local.proxy @@ -1,2 +1,3 @@ export PUBLIC_RABBITMQ_HOST=localhost:9090 export RABBITMQ_HOST_FOR_PROXY=host.docker.internal:15672 +export SELENIUM_INTERACTION_DELAY=250 \ No newline at end of file diff --git a/deps/rabbitmq_management/selenium/test/basic-auth/imports/users.json b/deps/rabbitmq_management/selenium/test/basic-auth/imports/users.json index b17df9ea0c68..e6b99e3b2b4d 100644 --- a/deps/rabbitmq_management/selenium/test/basic-auth/imports/users.json +++ b/deps/rabbitmq_management/selenium/test/basic-auth/imports/users.json @@ -44,6 +44,13 @@ "monitoring" ], "limits": {} + }, + { + "name": "rabbit_no_management", + "password_hash": "Joz9zzUBOrX10lB3GisWN5oTXK+wj0gxS/nyrfTYmBOuhps5", + "hashing_algorithm": "rabbit_password_hashing_sha256", + "tags": [ ], + "limits": {} } ], "vhosts": [ @@ -65,6 +72,13 @@ "configure": ".*", "write": ".*", "read": ".*" + }, + { + "user": "rabbit_no_management", + "vhost": "/", + "configure": ".*", + "write": ".*", + "read": ".*" } ] diff --git a/deps/rabbitmq_management/selenium/test/basic-auth/logout.js b/deps/rabbitmq_management/selenium/test/basic-auth/logout.js index 1a3ab8881e06..6493c7059654 100644 --- a/deps/rabbitmq_management/selenium/test/basic-auth/logout.js +++ b/deps/rabbitmq_management/selenium/test/basic-auth/logout.js @@ -22,6 +22,7 @@ describe('When a logged in user', function () { it('logs out', async function () { await loginPage.login('guest', 'guest') await overview.isLoaded() + await overview.selectRefreshOption("Do not refresh") await overview.logout() await loginPage.isLoaded() }) diff --git a/deps/rabbitmq_management/selenium/test/basic-auth/rabbitmq.conf b/deps/rabbitmq_management/selenium/test/basic-auth/rabbitmq.conf index a5bb75a3d43d..f5e2add9f1af 100644 --- a/deps/rabbitmq_management/selenium/test/basic-auth/rabbitmq.conf +++ b/deps/rabbitmq_management/selenium/test/basic-auth/rabbitmq.conf @@ -1,4 +1,4 @@ auth_backends.1 = rabbit_auth_backend_internal -management.login_session_timeout = 150 +management.login_session_timeout = 1 load_definitions = ${IMPORT_DIR}/users.json diff --git a/deps/rabbitmq_management/selenium/test/basic-auth/rabbitmq.mgt-prefix.conf b/deps/rabbitmq_management/selenium/test/basic-auth/rabbitmq.mgt-prefix.conf new file mode 100644 index 000000000000..d8ed31ae7c6a --- /dev/null +++ b/deps/rabbitmq_management/selenium/test/basic-auth/rabbitmq.mgt-prefix.conf @@ -0,0 +1 @@ +management.path_prefix = /my-prefix/another-prefix diff --git a/deps/rabbitmq_management/selenium/test/basic-auth/session-expired.js b/deps/rabbitmq_management/selenium/test/basic-auth/session-expired.js new file mode 100644 index 000000000000..262497bdbeeb --- /dev/null +++ b/deps/rabbitmq_management/selenium/test/basic-auth/session-expired.js @@ -0,0 +1,39 @@ +const { By, Key, until, Builder } = require('selenium-webdriver') +require('chromedriver') +const assert = require('assert') +const { buildDriver, goToHome, captureScreensFor, teardown, delay } = require('../utils') + +const LoginPage = require('../pageobjects/LoginPage') +const OverviewPage = require('../pageobjects/OverviewPage') + +describe('Once user is logged in', function () { + let homePage + let idpLogin + let overview + let captureScreen + this.timeout(65000) // hard-coded to 25secs because this test requires 35sec to run + + before(async function () { + driver = buildDriver() + await goToHome(driver) + login = new LoginPage(driver) + overview = new OverviewPage(driver) + captureScreen = captureScreensFor(driver, __filename) + await login.login('guest', 'guest') + await overview.isLoaded() + + }) + + it('it has to login after the session expires', async function () { + + await delay(60000) + await login.isLoaded() + await login.login('guest', 'guest') + await overview.isLoaded() + await overview.clickOnConnectionsTab() // and we can still interact with the ui + }) + + after(async function () { + await teardown(driver, this, captureScreen) + }) +}) diff --git a/deps/rabbitmq_management/selenium/test/basic-auth/unauthorized.js b/deps/rabbitmq_management/selenium/test/basic-auth/unauthorized.js new file mode 100644 index 000000000000..2399817f5733 --- /dev/null +++ b/deps/rabbitmq_management/selenium/test/basic-auth/unauthorized.js @@ -0,0 +1,59 @@ +const { By, Key, until, Builder } = require('selenium-webdriver') +require('chromedriver') +const assert = require('assert') +const { buildDriver, goToHome, captureScreensFor, teardown, delay } = require('../utils') + +const LoginPage = require('../pageobjects/LoginPage') +const OverviewPage = require('../pageobjects/OverviewPage') + +describe('An user without management tag', function () { + let homePage + let idpLogin + let overview + let captureScreen + + before(async function () { + driver = buildDriver() + await goToHome(driver) + login = new LoginPage(driver) + overview = new OverviewPage(driver) + captureScreen = captureScreensFor(driver, __filename) + + assert.ok(!await login.isPopupWarningDisplayed()) + await login.login('rabbit_no_management', 'rabbit_no_management') + await !overview.isLoaded() + }) + + it('cannot log in into the management ui', async function () { + const visible = await login.isWarningVisible() + assert.ok(visible) + }) + + it('should get "Login failed" warning message', async function(){ + assert.equal('Login failed', await login.getWarning()) + }) + + it('should get popup warning dialog', async function(){ + assert.ok(login.isPopupWarningDisplayed()) + assert.equal('Not_Authorized', await login.getPopupWarning()) + }) + + describe("After clicking on popup warning dialog button", function() { + + before(async function () { + await login.closePopupWarning() + }) + + it('should close popup warning', async function(){ + await delay(1000) + const visible = await login.isPopupWarningDisplayed() + assert.ok(!visible) + }) + + }) + + + after(async function () { + await teardown(driver, this, captureScreen) + }) +}) diff --git a/deps/rabbitmq_management/selenium/test/.env.docker b/deps/rabbitmq_management/selenium/test/env.docker similarity index 100% rename from deps/rabbitmq_management/selenium/test/.env.docker rename to deps/rabbitmq_management/selenium/test/env.docker diff --git a/deps/rabbitmq_management/selenium/test/.env.local b/deps/rabbitmq_management/selenium/test/env.local similarity index 100% rename from deps/rabbitmq_management/selenium/test/.env.local rename to deps/rabbitmq_management/selenium/test/env.local diff --git a/deps/rabbitmq_management/selenium/test/env.tls.docker b/deps/rabbitmq_management/selenium/test/env.tls.docker new file mode 100644 index 000000000000..e598d14b7439 --- /dev/null +++ b/deps/rabbitmq_management/selenium/test/env.tls.docker @@ -0,0 +1,3 @@ +export RABBITMQ_SCHEME=https +export RABBITMQ_HOSTNAME=rabbitmq +export RABBITMQ_HOST=rabbitmq:15671 diff --git a/deps/rabbitmq_management/selenium/test/env.tls.local b/deps/rabbitmq_management/selenium/test/env.tls.local new file mode 100644 index 000000000000..e39b7b520c8a --- /dev/null +++ b/deps/rabbitmq_management/selenium/test/env.tls.local @@ -0,0 +1,3 @@ +export RABBITMQ_SCHEME=https +export RABBITMQ_HOSTNAME=localhost +export RABBITMQ_HOST=localhost:15671 diff --git a/deps/rabbitmq_management/selenium/test/mgt-only/enabled_plugins b/deps/rabbitmq_management/selenium/test/mgt-only/enabled_plugins index feaf7912d783..ea2a6a29ba53 100644 --- a/deps/rabbitmq_management/selenium/test/mgt-only/enabled_plugins +++ b/deps/rabbitmq_management/selenium/test/mgt-only/enabled_plugins @@ -1,5 +1,5 @@ [accept,amqp10_client,amqp_client,base64url,cowboy,cowlib,eetcd,gun,jose, - prometheus,rabbitmq_amqp1_0,rabbitmq_auth_backend_cache, + prometheus,rabbitmq_auth_backend_cache, rabbitmq_auth_backend_http,rabbitmq_auth_backend_ldap, rabbitmq_auth_backend_oauth2,rabbitmq_auth_mechanism_ssl,rabbitmq_aws, rabbitmq_consistent_hash_exchange,rabbitmq_event_exchange, diff --git a/deps/rabbitmq_management/selenium/test/mock_http_backend.js b/deps/rabbitmq_management/selenium/test/mock_http_backend.js new file mode 100644 index 000000000000..869e2fbc431d --- /dev/null +++ b/deps/rabbitmq_management/selenium/test/mock_http_backend.js @@ -0,0 +1,140 @@ +const XMLHttpRequest = require('xmlhttprequest').XMLHttpRequest + +const baseURL = process.env.AUTH_BACKEND_HTTP_BASEURL || 'http://localhost:8888' + +function putReset() { + const req = new XMLHttpRequest() + const url = baseURL + '/mockserver/reset' + req.open('PUT', url, false) + req.send() + if (!wasSuccessful(req)) { + console.error(req.responseText) + throw new Error(req.responseText) + } +} +function putExpectation(expectation) { + const req = new XMLHttpRequest() + const url = baseURL + '/mockserver/expectation' + req.open('PUT', url, false) + req.setRequestHeader('Content-Type', 'application/x-www-form-urlencoded') + req.setRequestHeader('Accept', 'application/json') + req.send(JSON.stringify(expectation)) + if (!wasSuccessful(req)) { + console.error(req.responseText) + throw new Error(req.responseText) + } +} +function wasSuccessful(req) { + return Math.floor(req.status / 100) == 2 +} +function putVerify(expectation) { + const req = new XMLHttpRequest() + const url = baseURL + '/mockserver/verify' + + req.open('PUT', url, false) + req.setRequestHeader('Content-Type', 'application/x-www-form-urlencoded') + req.setRequestHeader('Accept', 'application/json') + + req.send(JSON.stringify(expectation)) + if (!wasSuccessful(req)) { + console.error(req.responseText) + throw new Error(req.responseText) + } + +} + +module.exports = { + + deny: () => { + return "deny" + }, + allow: () => { + return "allow" + }, + + reset: () => { + putReset() + }, + + // let parameters = { username: 'something', password: 'somethingelse', param3: 'another' } + // let response = "deny" + // let allow = "allow" + // let allow = "allow [administrator]" + expectUser: (parameters, response, opts = { method: 'GET', path: '/auth/user'}) => { + + putExpectation({ + "httpRequest": { + "method" : opts.method, + "path": opts.path, + "queryStringParameters": parameters + }, + "httpResponse": { + "body": response + } + }) + return { + "httpRequest": { + "method" : opts.method, + "path": opts.path, + "queryStringParameters": parameters + }, + "times": { + "atLeast": 1 + } + } + }, + expectVhost: (parameters, response, opts = { method: 'GET', path: '/auth/vhost'}) => { + putExpectation({ + "httpRequest": { + "method" : opts.method, + "path": opts.path, + "queryStringParameters": parameters + }, + "httpResponse": { + "body": response + } + }) + return { + "httpRequest": { + "method" : opts.method, + "path": opts.path, + "queryStringParameters": parameters + }, + "times": { + "atLeast": 1 + } + } + }, + + expectResource: (parameters, response, opts = { method: 'GET', path: '/auth/resource'}) => { + putExpectation({ + "httpRequest": { + "method" : opts.method, + "path": opts.path, + "queryStringParameters": parameters + }, + "httpResponse": { + "body": response + } + }) + return { + "httpRequest": { + "method" : opts.method, + "path": opts.path, + "queryStringParameters": parameters + }, + "times": { + "atLeast": 1 + } + } + }, + verify: (expectation) => { + putVerify(expectation) + }, + verifyAll : (expectations) => { + for (i in expectations) { + putVerify(expectations[i]) + } + } + +} diff --git a/deps/rabbitmq_management/selenium/test/multi-oauth/certs/ca_certificate.pem b/deps/rabbitmq_management/selenium/test/multi-oauth/certs/ca_certificate.pem new file mode 100644 index 000000000000..cd37bea304f5 --- /dev/null +++ b/deps/rabbitmq_management/selenium/test/multi-oauth/certs/ca_certificate.pem @@ -0,0 +1,21 @@ +-----BEGIN CERTIFICATE----- +MIIDezCCAmOgAwIBAgIJAOA06nrAwraBMA0GCSqGSIb3DQEBCwUAMEwxOzA5BgNV +BAMMMlRMU0dlblNlbGZTaWduZWR0Um9vdENBIDIwMjMtMTEtMTZUMTI6MjQ6NDcu +Mjg5MDkzMQ0wCwYDVQQHDAQkJCQkMB4XDTIzMTExNjExMjQ0N1oXDTMzMTExMzEx +MjQ0N1owTDE7MDkGA1UEAwwyVExTR2VuU2VsZlNpZ25lZHRSb290Q0EgMjAyMy0x +MS0xNlQxMjoyNDo0Ny4yODkwOTMxDTALBgNVBAcMBCQkJCQwggEiMA0GCSqGSIb3 +DQEBAQUAA4IBDwAwggEKAoIBAQDWJrvvUvpkiAhvIiciuTbFHRMC7VdOXdIM3y3I +Vt56Voj3dkCVitFcvTc+pkuqoQUaWRTc5M+875CaQSRIDfVyFTIGTyVXv6cZRcoz +0gcmYvopIJ4Wi5/xG9Qp8uJMtr+UBJ57ez6Urau/L3zETAVZA+y1bTylAlh4tjMH +I24bvyy4yNQbPtG4y5F9x484fn3H4x7lf6O/Xulcvy8vL1kyc/EgrF4fpjogwj58 +eQ5HLwbAlMRRxXxXX2U5tXlrv475WItp/1mhZ+j2yCMKB4tJ8tXbtpgou0JDtlN0 +8Jwm3+d5a6PxqynmgRAXStZ4Fda93Pa3FJfw1u63JrmOprG9AgMBAAGjYDBeMA8G +A1UdEwEB/wQFMAMBAf8wCwYDVR0PBAQDAgEGMB0GA1UdDgQWBBS2Icxjr1ucGCIx +ikeSG9igJf558jAfBgNVHSMEGDAWgBS2Icxjr1ucGCIxikeSG9igJf558jANBgkq +hkiG9w0BAQsFAAOCAQEAR0iG00uE2GnoWtaXEHYJTdvBBcStBB8qnRk19Qu/b8qd +HAhRGb31IiuYzNJxLxhOtXWQMKvsKPAKpPXP3c5XVAf2O156GoXEPkKQktF738Pp +rRlrQPqU9Qpm84rMC54EB7coxEs7HMx4do/kNaVPdqq++JIEAcWOEVKfudN+8TMR +XyUJT54jBacsTpAZNfY6boJmuQ+G6tkpQvlHOU6388IFuLPkYRO7h7CHVbDsMEXD +Ptg3PCK97nCVgs4xfQGR7nT2pawfEUQVMon/XShtXY0RIKpynwrgICHDdvMXRXlG +a4haA7sz8Wyroy6Ub5+X3s4YRumSQrhiwRzqU+f75A== +-----END CERTIFICATE----- diff --git a/deps/rabbitmq_management/selenium/test/multi-oauth/certs/server_rabbitmq_certificate.pem b/deps/rabbitmq_management/selenium/test/multi-oauth/certs/server_rabbitmq_certificate.pem new file mode 100644 index 000000000000..ef57ff61a411 --- /dev/null +++ b/deps/rabbitmq_management/selenium/test/multi-oauth/certs/server_rabbitmq_certificate.pem @@ -0,0 +1,23 @@ +-----BEGIN CERTIFICATE----- +MIIDxDCCAqygAwIBAgIBDTANBgkqhkiG9w0BAQsFADBMMTswOQYDVQQDDDJUTFNH +ZW5TZWxmU2lnbmVkdFJvb3RDQSAyMDIzLTExLTE2VDEyOjI0OjQ3LjI4OTA5MzEN +MAsGA1UEBwwEJCQkJDAeFw0yNDAyMDkwODE3MDFaFw0zNDAyMDYwODE3MDFaMCQx +ETAPBgNVBAMMCHJhYmJpdG1xMQ8wDQYDVQQKDAZzZXJ2ZXIwggEiMA0GCSqGSIb3 +DQEBAQUAA4IBDwAwggEKAoIBAQCjxmYRJeYfOnQ91ZSIZsjznnPiy0yukFnapF7Y +iIXxFCygEnw/hwqSG7ddkvDjNlc6P+K4rEEBmER87mEl0YqvAZ9/C6K4OANJFuD7 +kQYH3Uyt+aXJfeyByAjr8HM/jSHDZm5DpysVlSBMkJGg4sV9h38i0aT27+J0a4xm +Yb9pH+bbWKn4QflvOQi7IcyZ+PcB54/vCDZRtlypkT/6EuqTXqRHH9wGlYaos+Jo +XMQDWykYtN2160E1gUwW1OhdRlDHj21Tej9fYObRjb326au4e3ivTPqKYLYsSz0Y +dcRoM6SjvwGiAC131n2XeHyKTQrMeKOb+TTVHzJZG7iUM5iBAgMBAAGjgdgwgdUw +CQYDVR0TBAIwADALBgNVHQ8EBAMCBaAwHQYDVR0lBBYwFAYIKwYBBQUHAwEGCCsG +AQUFBwMCMCkGA1UdEQQiMCCCCHJhYmJpdG1xgglsb2NhbGhvc3SCCWxvY2FsaG9z +dDAdBgNVHQ4EFgQUs9vJtNmoNWybsVgMmeRqcPGXRckwHwYDVR0jBBgwFoAUtiHM +Y69bnBgiMYpHkhvYoCX+efIwMQYDVR0fBCowKDAmoCSgIoYgaHR0cDovL2NybC1z +ZXJ2ZXI6ODAwMC9iYXNpYy5jcmwwDQYJKoZIhvcNAQELBQADggEBAHxsmfxpoGZg +AlLu+Y62TQxqp2i+PqLJHuGBdB/93NV3S3P3tlDaqHwYt0mveS7ej+JXhw9wvSZz +jmejWePL08FXD9KPggRP4/SsG6Adf/5+vcofYR23I7D4y9hsrDqZezCurWZ4LY4X +dYmIQcI6IwgcjffWhsyt3CEbU+yVg6jrjVWv5sVPi3xZUu/dwpTdrdNzeUIFM8vf +H3BS8EcLwtaNR4snLJlFIhuDfDv7Ewi1FsmM4zkSe/aHboUNDduI2poRW/EPtbdM +zD1pVXNh1Q9hkqFCD7l4Vua+JVsA7PWD7yr73pm2ak6GfgjA7Enj0a6KbAfAXLMr +otRknmbKCUU= +-----END CERTIFICATE----- diff --git a/deps/rabbitmq_management/selenium/test/multi-oauth/certs/server_rabbitmq_key.pem b/deps/rabbitmq_management/selenium/test/multi-oauth/certs/server_rabbitmq_key.pem new file mode 100644 index 000000000000..f5df03f73df8 --- /dev/null +++ b/deps/rabbitmq_management/selenium/test/multi-oauth/certs/server_rabbitmq_key.pem @@ -0,0 +1,28 @@ +-----BEGIN PRIVATE KEY----- +MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQCjxmYRJeYfOnQ9 +1ZSIZsjznnPiy0yukFnapF7YiIXxFCygEnw/hwqSG7ddkvDjNlc6P+K4rEEBmER8 +7mEl0YqvAZ9/C6K4OANJFuD7kQYH3Uyt+aXJfeyByAjr8HM/jSHDZm5DpysVlSBM +kJGg4sV9h38i0aT27+J0a4xmYb9pH+bbWKn4QflvOQi7IcyZ+PcB54/vCDZRtlyp +kT/6EuqTXqRHH9wGlYaos+JoXMQDWykYtN2160E1gUwW1OhdRlDHj21Tej9fYObR +jb326au4e3ivTPqKYLYsSz0YdcRoM6SjvwGiAC131n2XeHyKTQrMeKOb+TTVHzJZ +G7iUM5iBAgMBAAECggEAOdYOpW+k3NJfypZqZeEmhiIm+qig4+TGVphFhmJwKrrd +J4pfpm+iJAb1sm3588N0+nUlM+Jg8pc7WIM2e4yMVVFVaiBJzpS5VE5oFW8Zmh1k +vuuyyH1X0F08CVZY3NCSY9cAiZO3e1+2kFNdmlt7MuFu3HT8tNfyOPriEiXi2tSA +qmgUmMql305wYwjIp+mTP8X7YKKdIdCXwPC2E1Kj5SseEc9NYvHdmeJ3nZCVATbS +h8aP7HB5GpsDMHbnnFzOqPfxIPxYkJ4JqE0iGpw+SMYbIGLVkMEGodpWjBwZiaaI +EMeJJk3Qs/QvVLDxhSsFXsaLGLgYN0rItYX9dUyroQKBgQDOOLKJ9OPcm3sAWo9e +byRYegDPPM06Es5s0hF0Pr0u6X8F7fDnpS74XVMlWxZzvXWgZQNwC2nYaGfNpK5t +E2FxIC0S69W4m1L6sp2sTRLSJo5NiZc4kNVjGvnmgIrNqMhJK8pLOh5xx6/kAbpo +/lydhtXWP0omw5imFkh3bGQuZwKBgQDLTsCu01OCNuQs0Y9hgW/iHzRpX1aHvp8X +u8v/AtOS3z5a3WptrLah/HHM5B/4Hh9dW4uljuR0zTsk8dFD8lQ/mdxbXjPGEcN6 +QNe1Md2nV0xAZsW1Xp1iFDomS5xSn+qWDmR0EAXvs0hHMQnX1k7+dp2mK1whRwdM +z4mv0cZg1wKBgDnuzaFZ7aVs/GoGBt7FpFVCuPV/JDxbSihh/0tD0MvcBrY4uQOq +cP6O4SvOYglTwTa1CfkxC6Qi+H5Z9DJqTmaEXoVBQYIiCHarNQZRhKcK89EuhQ/8 +CCZWTrwFgnjyIIoFxkfJ5QGb0nrgTWjvhD8wwOP2VbN8IWcPPX5nMeGjAoGBAL7b +y59T3E2d4k8A3C2ZKcOJr9ZMHhuJJClPr45SxPRYh10eB0+2mC0xpFPIxQpUnPUz +f8GIh4fvMtrX+LBkyhp7ApbztH75Jh2ayeXcTk1OctLyqCBAFleAzaYtzS7z2XHN +SRh8AlaoY+4RZ0AsfDP+frkEc5T57Sx6mLNpp2Y5AoGAXG5BGedrCMa44Ugpux41 +saTIlaXUOObxdsGTLMOy1Ppb9LW5yk4kS8ObP3SksjUUZrRUO/BagLukgcaS038/ +AbNDU5lMCmMfwxPN2lulERhaIA1BeVgmOwJYY7nqXkL5Yibu0OXnvvbCkt0eLnp2 +ATZBECwIxNuB9pixRmDhXsM= +-----END PRIVATE KEY----- diff --git a/deps/rabbitmq_management/selenium/test/multi-oauth/devkeycloak/ca_certificate.pem b/deps/rabbitmq_management/selenium/test/multi-oauth/devkeycloak/ca_certificate.pem new file mode 100644 index 000000000000..cd37bea304f5 --- /dev/null +++ b/deps/rabbitmq_management/selenium/test/multi-oauth/devkeycloak/ca_certificate.pem @@ -0,0 +1,21 @@ +-----BEGIN CERTIFICATE----- +MIIDezCCAmOgAwIBAgIJAOA06nrAwraBMA0GCSqGSIb3DQEBCwUAMEwxOzA5BgNV +BAMMMlRMU0dlblNlbGZTaWduZWR0Um9vdENBIDIwMjMtMTEtMTZUMTI6MjQ6NDcu +Mjg5MDkzMQ0wCwYDVQQHDAQkJCQkMB4XDTIzMTExNjExMjQ0N1oXDTMzMTExMzEx +MjQ0N1owTDE7MDkGA1UEAwwyVExTR2VuU2VsZlNpZ25lZHRSb290Q0EgMjAyMy0x +MS0xNlQxMjoyNDo0Ny4yODkwOTMxDTALBgNVBAcMBCQkJCQwggEiMA0GCSqGSIb3 +DQEBAQUAA4IBDwAwggEKAoIBAQDWJrvvUvpkiAhvIiciuTbFHRMC7VdOXdIM3y3I +Vt56Voj3dkCVitFcvTc+pkuqoQUaWRTc5M+875CaQSRIDfVyFTIGTyVXv6cZRcoz +0gcmYvopIJ4Wi5/xG9Qp8uJMtr+UBJ57ez6Urau/L3zETAVZA+y1bTylAlh4tjMH +I24bvyy4yNQbPtG4y5F9x484fn3H4x7lf6O/Xulcvy8vL1kyc/EgrF4fpjogwj58 +eQ5HLwbAlMRRxXxXX2U5tXlrv475WItp/1mhZ+j2yCMKB4tJ8tXbtpgou0JDtlN0 +8Jwm3+d5a6PxqynmgRAXStZ4Fda93Pa3FJfw1u63JrmOprG9AgMBAAGjYDBeMA8G +A1UdEwEB/wQFMAMBAf8wCwYDVR0PBAQDAgEGMB0GA1UdDgQWBBS2Icxjr1ucGCIx +ikeSG9igJf558jAfBgNVHSMEGDAWgBS2Icxjr1ucGCIxikeSG9igJf558jANBgkq +hkiG9w0BAQsFAAOCAQEAR0iG00uE2GnoWtaXEHYJTdvBBcStBB8qnRk19Qu/b8qd +HAhRGb31IiuYzNJxLxhOtXWQMKvsKPAKpPXP3c5XVAf2O156GoXEPkKQktF738Pp +rRlrQPqU9Qpm84rMC54EB7coxEs7HMx4do/kNaVPdqq++JIEAcWOEVKfudN+8TMR +XyUJT54jBacsTpAZNfY6boJmuQ+G6tkpQvlHOU6388IFuLPkYRO7h7CHVbDsMEXD +Ptg3PCK97nCVgs4xfQGR7nT2pawfEUQVMon/XShtXY0RIKpynwrgICHDdvMXRXlG +a4haA7sz8Wyroy6Ub5+X3s4YRumSQrhiwRzqU+f75A== +-----END CERTIFICATE----- diff --git a/deps/rabbitmq_management/selenium/test/multi-oauth/devkeycloak/dev-realm.json b/deps/rabbitmq_management/selenium/test/multi-oauth/devkeycloak/dev-realm.json new file mode 100644 index 000000000000..ab468356a8e3 --- /dev/null +++ b/deps/rabbitmq_management/selenium/test/multi-oauth/devkeycloak/dev-realm.json @@ -0,0 +1,2790 @@ +{ + "id" : "dev", + "realm" : "dev", + "notBefore" : 0, + "defaultSignatureAlgorithm" : "RS256", + "revokeRefreshToken" : false, + "refreshTokenMaxReuse" : 0, + "accessTokenLifespan" : 300, + "accessTokenLifespanForImplicitFlow" : 900, + "ssoSessionIdleTimeout" : 1800, + "ssoSessionMaxLifespan" : 36000, + "ssoSessionIdleTimeoutRememberMe" : 0, + "ssoSessionMaxLifespanRememberMe" : 0, + "offlineSessionIdleTimeout" : 2592000, + "offlineSessionMaxLifespanEnabled" : false, + "offlineSessionMaxLifespan" : 5184000, + "clientSessionIdleTimeout" : 0, + "clientSessionMaxLifespan" : 0, + "clientOfflineSessionIdleTimeout" : 0, + "clientOfflineSessionMaxLifespan" : 0, + "accessCodeLifespan" : 60, + "accessCodeLifespanUserAction" : 300, + "accessCodeLifespanLogin" : 1800, + "actionTokenGeneratedByAdminLifespan" : 43200, + "actionTokenGeneratedByUserLifespan" : 300, + "oauth2DeviceCodeLifespan" : 600, + "oauth2DevicePollingInterval" : 5, + "enabled" : true, + "sslRequired" : "external", + "registrationAllowed" : false, + "registrationEmailAsUsername" : false, + "rememberMe" : false, + "verifyEmail" : false, + "loginWithEmailAllowed" : true, + "duplicateEmailsAllowed" : false, + "resetPasswordAllowed" : false, + "editUsernameAllowed" : false, + "bruteForceProtected" : false, + "permanentLockout" : false, + "maxFailureWaitSeconds" : 900, + "minimumQuickLoginWaitSeconds" : 60, + "waitIncrementSeconds" : 60, + "quickLoginCheckMilliSeconds" : 1000, + "maxDeltaTimeSeconds" : 43200, + "failureFactor" : 30, + "roles" : { + "realm" : [ { + "id" : "2b61bc53-60cc-48fc-b89b-ee3e80204895", + "name" : "rabbitmq.tag:management", + "composite" : false, + "clientRole" : false, + "containerId" : "dev", + "attributes" : { } + }, { + "id" : "c28bf7ca-9fb7-485c-a68b-d5fb4bd844fb", + "name" : "rabbitmq.tag:administrator", + "composite" : false, + "clientRole" : false, + "containerId" : "dev", + "attributes" : { } + }, { + "id" : "d2b776e4-8c4d-4168-9d52-76aaa115ee70", + "name" : "uma_authorization", + "description" : "${role_uma_authorization}", + "composite" : false, + "clientRole" : false, + "containerId" : "dev", + "attributes" : { } + }, { + "id" : "6faef857-1c9b-4474-ba01-ad1946d243d6", + "name" : "rabbitmq-proxy-client-role", + "description" : "", + "composite" : false, + "clientRole" : false, + "containerId" : "dev", + "attributes" : { } + }, { + "id" : "0a838a26-4908-4750-a1d0-7cc322c698ae", + "name" : "producer", + "composite" : false, + "clientRole" : false, + "containerId" : "dev", + "attributes" : { } + }, { + "id" : "dd893988-6661-4849-a0f1-1cd1a63b51a5", + "name" : "rabbitmq.read:*/*", + "composite" : false, + "clientRole" : false, + "containerId" : "dev", + "attributes" : { } + }, { + "id" : "6feb7afe-2fa8-4569-8fb8-e50c2a4302d2", + "name" : "offline_access", + "description" : "${role_offline-access}", + "composite" : false, + "clientRole" : false, + "containerId" : "dev", + "attributes" : { } + }, { + "id" : "af1bc955-6d4d-42e9-b0d4-343e7eb075d0", + "name" : "rabbitmq-role", + "description" : "", + "composite" : false, + "clientRole" : false, + "containerId" : "dev", + "attributes" : { } + }, { + "id" : "77e9131f-1eb3-45a3-9f3b-f74991a99def", + "name" : "rabbitmq.configure:*/*", + "composite" : false, + "clientRole" : false, + "containerId" : "dev", + "attributes" : { } + }, { + "id" : "97bb2b6b-33ff-404e-b754-351604d9f34c", + "name" : "rabbitmq", + "description" : "", + "composite" : false, + "clientRole" : false, + "containerId" : "dev", + "attributes" : { } + }, { + "id" : "b84ae322-7112-41d1-8a3f-0009447ded47", + "name" : "default-roles-test", + "description" : "${role_default-roles}", + "composite" : true, + "composites" : { + "realm" : [ "offline_access", "uma_authorization" ], + "client" : { + "account" : [ "view-profile", "manage-account" ] + } + }, + "clientRole" : false, + "containerId" : "dev", + "attributes" : { } + }, { + "id" : "5516969b-be85-490c-9715-9c1186075d60", + "name" : "rabbitmq-management", + "composite" : false, + "clientRole" : false, + "containerId" : "dev", + "attributes" : { } + }, { + "id" : "216cfa85-9b8a-4fc0-bee1-814e2978d82b", + "name" : "rabbitmq.write:*/*", + "composite" : false, + "clientRole" : false, + "containerId" : "dev", + "attributes" : { } + } ], + "client" : { + "realm-management" : [ { + "id" : "6721a146-c9e3-4a24-9d26-6dbc7e3aae1f", + "name" : "manage-authorization", + "description" : "${role_manage-authorization}", + "composite" : false, + "clientRole" : true, + "containerId" : "09de26c0-0a1b-4e5f-8724-23af07a0e54a", + "attributes" : { } + }, { + "id" : "147b7e9a-d884-42b7-a970-245c2b5590b0", + "name" : "view-clients", + "description" : "${role_view-clients}", + "composite" : true, + "composites" : { + "client" : { + "realm-management" : [ "query-clients" ] + } + }, + "clientRole" : true, + "containerId" : "09de26c0-0a1b-4e5f-8724-23af07a0e54a", + "attributes" : { } + }, { + "id" : "c25f4711-ee9b-4457-9636-7dacffceb676", + "name" : "view-events", + "description" : "${role_view-events}", + "composite" : false, + "clientRole" : true, + "containerId" : "09de26c0-0a1b-4e5f-8724-23af07a0e54a", + "attributes" : { } + }, { + "id" : "1ce1e692-4dae-498f-8ac6-ca119eb329ef", + "name" : "query-realms", + "description" : "${role_query-realms}", + "composite" : false, + "clientRole" : true, + "containerId" : "09de26c0-0a1b-4e5f-8724-23af07a0e54a", + "attributes" : { } + }, { + "id" : "78cd990d-68bd-4e71-9561-5e4412bcbfb7", + "name" : "create-client", + "description" : "${role_create-client}", + "composite" : false, + "clientRole" : true, + "containerId" : "09de26c0-0a1b-4e5f-8724-23af07a0e54a", + "attributes" : { } + }, { + "id" : "c5bc4413-71cb-43f1-b48b-c9428aed47cd", + "name" : "query-clients", + "description" : "${role_query-clients}", + "composite" : false, + "clientRole" : true, + "containerId" : "09de26c0-0a1b-4e5f-8724-23af07a0e54a", + "attributes" : { } + }, { + "id" : "21b62a0b-62fd-4a39-8b97-8ce8b89ad9d8", + "name" : "view-identity-providers", + "description" : "${role_view-identity-providers}", + "composite" : false, + "clientRole" : true, + "containerId" : "09de26c0-0a1b-4e5f-8724-23af07a0e54a", + "attributes" : { } + }, { + "id" : "9df26a88-36e9-4670-8b63-dc4e57ebcce8", + "name" : "manage-clients", + "description" : "${role_manage-clients}", + "composite" : false, + "clientRole" : true, + "containerId" : "09de26c0-0a1b-4e5f-8724-23af07a0e54a", + "attributes" : { } + }, { + "id" : "02bc109a-d318-4196-aa15-171651685b50", + "name" : "view-users", + "description" : "${role_view-users}", + "composite" : true, + "composites" : { + "client" : { + "realm-management" : [ "query-users", "query-groups" ] + } + }, + "clientRole" : true, + "containerId" : "09de26c0-0a1b-4e5f-8724-23af07a0e54a", + "attributes" : { } + }, { + "id" : "97499d4c-fb81-4ee6-bd5e-6eb198424654", + "name" : "manage-events", + "description" : "${role_manage-events}", + "composite" : false, + "clientRole" : true, + "containerId" : "09de26c0-0a1b-4e5f-8724-23af07a0e54a", + "attributes" : { } + }, { + "id" : "bf64efcd-f8a4-47e6-bc5e-0ff29635b885", + "name" : "realm-admin", + "description" : "${role_realm-admin}", + "composite" : true, + "composites" : { + "client" : { + "realm-management" : [ "manage-authorization", "view-clients", "view-events", "query-realms", "create-client", "view-identity-providers", "query-clients", "view-users", "manage-clients", "manage-events", "view-realm", "manage-realm", "manage-users", "query-users", "query-groups", "manage-identity-providers", "view-authorization", "impersonation" ] + } + }, + "clientRole" : true, + "containerId" : "09de26c0-0a1b-4e5f-8724-23af07a0e54a", + "attributes" : { } + }, { + "id" : "fb251ef8-0f7e-4e85-a423-e3bf515dbe5c", + "name" : "manage-realm", + "description" : "${role_manage-realm}", + "composite" : false, + "clientRole" : true, + "containerId" : "09de26c0-0a1b-4e5f-8724-23af07a0e54a", + "attributes" : { } + }, { + "id" : "09df7745-f99e-4961-add6-eca3e2ab9b44", + "name" : "view-realm", + "description" : "${role_view-realm}", + "composite" : false, + "clientRole" : true, + "containerId" : "09de26c0-0a1b-4e5f-8724-23af07a0e54a", + "attributes" : { } + }, { + "id" : "45a27cee-8828-4427-90b4-9394b080db18", + "name" : "manage-users", + "description" : "${role_manage-users}", + "composite" : false, + "clientRole" : true, + "containerId" : "09de26c0-0a1b-4e5f-8724-23af07a0e54a", + "attributes" : { } + }, { + "id" : "7454f27b-cabc-4160-9835-1747659f6f00", + "name" : "query-users", + "description" : "${role_query-users}", + "composite" : false, + "clientRole" : true, + "containerId" : "09de26c0-0a1b-4e5f-8724-23af07a0e54a", + "attributes" : { } + }, { + "id" : "42d9a084-b4e0-42f5-8c29-9623fb265f79", + "name" : "query-groups", + "description" : "${role_query-groups}", + "composite" : false, + "clientRole" : true, + "containerId" : "09de26c0-0a1b-4e5f-8724-23af07a0e54a", + "attributes" : { } + }, { + "id" : "893a1a00-5e1f-4dc2-983d-640a3cce58fa", + "name" : "manage-identity-providers", + "description" : "${role_manage-identity-providers}", + "composite" : false, + "clientRole" : true, + "containerId" : "09de26c0-0a1b-4e5f-8724-23af07a0e54a", + "attributes" : { } + }, { + "id" : "8c9b5f3e-2819-4dbe-81d0-fa8721ff9f1d", + "name" : "view-authorization", + "description" : "${role_view-authorization}", + "composite" : false, + "clientRole" : true, + "containerId" : "09de26c0-0a1b-4e5f-8724-23af07a0e54a", + "attributes" : { } + }, { + "id" : "1e3044c3-fb93-48fa-9b27-eb4d8c6ccad7", + "name" : "impersonation", + "description" : "${role_impersonation}", + "composite" : false, + "clientRole" : true, + "containerId" : "09de26c0-0a1b-4e5f-8724-23af07a0e54a", + "attributes" : { } + } ], + "security-admin-console" : [ ], + "account-console" : [ ], + "broker" : [ { + "id" : "147bafb6-45a8-45ba-b214-7826b1fc4856", + "name" : "read-token", + "description" : "${role_read-token}", + "composite" : false, + "clientRole" : true, + "containerId" : "f32cd0e1-5b78-412a-ba07-6ad2a9aeb007", + "attributes" : { } + } ], + "rabbitmq" : [ { + "id" : "f5caa7a5-0770-41d8-a3a3-8691470b6d82", + "name" : "rabbitmq-role", + "description" : "", + "composite" : false, + "clientRole" : true, + "containerId" : "a57c9f6a-8b64-47dc-af53-d6ccc2d4aa60", + "attributes" : { } + } ], + "rabbitmq-proxy-client" : [ { + "id" : "ba66d339-cbca-41c1-87fe-38e7b50efd52", + "name" : "rabbitmq-proxy-client-role", + "composite" : true, + "composites" : { + "realm" : [ "rabbitmq-role", "rabbitmq-proxy-client-role", "rabbitmq" ] + }, + "clientRole" : true, + "containerId" : "c265f3db-ed3a-4898-8800-af044b3c30f5", + "attributes" : { } + } ], + "mgt_api_client" : [ ], + "dev_producer" : [ ], + "rabbit_dev_mgt_api" : [ ], + "admin-cli" : [ ], + "producer" : [ ], + "rabbitmq-client-code" : [ ], + "account" : [ { + "id" : "957f712c-e735-402d-9f41-ad9832749f51", + "name" : "delete-account", + "description" : "${role_delete-account}", + "composite" : false, + "clientRole" : true, + "containerId" : "bd6c76be-d33d-43d6-9cbb-965df4f0c025", + "attributes" : { } + }, { + "id" : "9145b32b-f8ef-4ff0-b50a-be1af192a65a", + "name" : "manage-account-links", + "description" : "${role_manage-account-links}", + "composite" : false, + "clientRole" : true, + "containerId" : "bd6c76be-d33d-43d6-9cbb-965df4f0c025", + "attributes" : { } + }, { + "id" : "59cac6df-51cd-4a3c-bf77-03bc2b34fe69", + "name" : "manage-consent", + "description" : "${role_manage-consent}", + "composite" : true, + "composites" : { + "client" : { + "account" : [ "view-consent" ] + } + }, + "clientRole" : true, + "containerId" : "bd6c76be-d33d-43d6-9cbb-965df4f0c025", + "attributes" : { } + }, { + "id" : "508ce853-78d5-428c-9589-0e310fa7fe40", + "name" : "view-profile", + "description" : "${role_view-profile}", + "composite" : false, + "clientRole" : true, + "containerId" : "bd6c76be-d33d-43d6-9cbb-965df4f0c025", + "attributes" : { } + }, { + "id" : "061542b2-67d9-4388-aadb-9c936f19d607", + "name" : "view-groups", + "description" : "${role_view-groups}", + "composite" : false, + "clientRole" : true, + "containerId" : "bd6c76be-d33d-43d6-9cbb-965df4f0c025", + "attributes" : { } + }, { + "id" : "6d326537-afdd-4f72-8973-14b164361a7e", + "name" : "view-applications", + "description" : "${role_view-applications}", + "composite" : false, + "clientRole" : true, + "containerId" : "bd6c76be-d33d-43d6-9cbb-965df4f0c025", + "attributes" : { } + }, { + "id" : "1fc2ea3e-395a-45bd-ae2f-a9eb674ed4b2", + "name" : "manage-account", + "description" : "${role_manage-account}", + "composite" : true, + "composites" : { + "client" : { + "account" : [ "manage-account-links" ] + } + }, + "clientRole" : true, + "containerId" : "bd6c76be-d33d-43d6-9cbb-965df4f0c025", + "attributes" : { } + }, { + "id" : "95ad82e5-1859-496e-ba95-6b38f8043efd", + "name" : "view-consent", + "description" : "${role_view-consent}", + "composite" : false, + "clientRole" : true, + "containerId" : "bd6c76be-d33d-43d6-9cbb-965df4f0c025", + "attributes" : { } + } ], + "rabbit_dev_mgt_ui" : [ ] + } + }, + "groups" : [ { + "id" : "6746dbec-7e2b-4540-ae00-73aa2a93a04e", + "name" : "rabbitmq", + "path" : "/rabbitmq", + "attributes" : { }, + "realmRoles" : [ "rabbitmq" ], + "clientRoles" : { }, + "subGroups" : [ ] + } ], + "defaultRole" : { + "id" : "b84ae322-7112-41d1-8a3f-0009447ded47", + "name" : "default-roles-test", + "description" : "${role_default-roles}", + "composite" : true, + "clientRole" : false, + "containerId" : "dev" + }, + "requiredCredentials" : [ "password" ], + "otpPolicyType" : "totp", + "otpPolicyAlgorithm" : "HmacSHA1", + "otpPolicyInitialCounter" : 0, + "otpPolicyDigits" : 6, + "otpPolicyLookAheadWindow" : 1, + "otpPolicyPeriod" : 30, + "otpPolicyCodeReusable" : false, + "otpSupportedApplications" : [ "totpAppGoogleName", "totpAppFreeOTPName" ], + "webAuthnPolicyRpEntityName" : "keycloak", + "webAuthnPolicySignatureAlgorithms" : [ "ES256" ], + "webAuthnPolicyRpId" : "", + "webAuthnPolicyAttestationConveyancePreference" : "not specified", + "webAuthnPolicyAuthenticatorAttachment" : "not specified", + "webAuthnPolicyRequireResidentKey" : "not specified", + "webAuthnPolicyUserVerificationRequirement" : "not specified", + "webAuthnPolicyCreateTimeout" : 0, + "webAuthnPolicyAvoidSameAuthenticatorRegister" : false, + "webAuthnPolicyAcceptableAaguids" : [ ], + "webAuthnPolicyPasswordlessRpEntityName" : "keycloak", + "webAuthnPolicyPasswordlessSignatureAlgorithms" : [ "ES256" ], + "webAuthnPolicyPasswordlessRpId" : "", + "webAuthnPolicyPasswordlessAttestationConveyancePreference" : "not specified", + "webAuthnPolicyPasswordlessAuthenticatorAttachment" : "not specified", + "webAuthnPolicyPasswordlessRequireResidentKey" : "not specified", + "webAuthnPolicyPasswordlessUserVerificationRequirement" : "not specified", + "webAuthnPolicyPasswordlessCreateTimeout" : 0, + "webAuthnPolicyPasswordlessAvoidSameAuthenticatorRegister" : false, + "webAuthnPolicyPasswordlessAcceptableAaguids" : [ ], + "users" : [ { + "id" : "88063139-59de-4027-a421-d613e3bdba1f", + "createdTimestamp" : 1690974911722, + "username" : "dev_user", + "enabled" : true, + "totp" : false, + "emailVerified" : true, + "firstName" : "", + "lastName" : "", + "credentials" : [ { + "id" : "0b2591b9-871e-490d-9319-6314fb5dc42b", + "type" : "password", + "userLabel" : "My password", + "createdDate" : 1690974921254, + "secretData" : "{\"value\":\"txSoE1qlIryIJsd8EKHp0aE7I5bzLkEEWKGxPrcH1lVmKXeAftKnB6Rqxnh2pX4IFem/FMTF/rcmttU+FFmsUA==\",\"salt\":\"qFN5DsIvc/F4yKrXke5K5Q==\",\"additionalParameters\":{}}", + "credentialData" : "{\"hashIterations\":27500,\"algorithm\":\"pbkdf2-sha256\",\"additionalParameters\":{}}" + } ], + "disableableCredentialTypes" : [ ], + "requiredActions" : [ ], + "realmRoles" : [ "rabbitmq.tag:management", "default-roles-test" ], + "notBefore" : 0, + "groups" : [ ] + }, { + "id" : "451df525-a468-43c1-97f3-656d5d31ba68", + "createdTimestamp" : 1690974863360, + "username" : "prod_user", + "enabled" : true, + "totp" : false, + "emailVerified" : true, + "firstName" : "", + "lastName" : "", + "credentials" : [ { + "id" : "508707a9-08e9-4e5e-8257-b6d6466c98df", + "type" : "password", + "userLabel" : "My password", + "createdDate" : 1690974873162, + "secretData" : "{\"value\":\"iEG974FQB66ACMIKSB6WpgC+CTKL6+JU5qIyjwM4Z1TeQz89pPOeXxjrmtaqourwV5adMVurURO2oO/qL8yHRg==\",\"salt\":\"+axOgEN33yDcNdrXvT+V8Q==\",\"additionalParameters\":{}}", + "credentialData" : "{\"hashIterations\":27500,\"algorithm\":\"pbkdf2-sha256\",\"additionalParameters\":{}}" + } ], + "disableableCredentialTypes" : [ ], + "requiredActions" : [ ], + "realmRoles" : [ "rabbitmq.tag:management", "rabbitmq.tag:administrator", "default-roles-test" ], + "notBefore" : 0, + "groups" : [ ] + }, { + "id" : "4cf4d6b5-09e5-453f-bf22-c8efdc2dd1dc", + "createdTimestamp" : 1651841525973, + "username" : "rabbit_admin", + "enabled" : true, + "totp" : false, + "emailVerified" : true, + "firstName" : "", + "lastName" : "", + "email" : "rabbit_admin@rabbit.com", + "credentials" : [ { + "id" : "deca2be2-28ad-4f98-981f-3ec68bf12ae2", + "type" : "password", + "createdDate" : 1651841816533, + "secretData" : "{\"value\":\"bRuz2IKP4+kG3IKo258mVNqW8Nts6CkZavF3tf4M+/dlJFNPJIallxephOKUiVPtMOdO9Huq9K0uwTBYSZY3fg==\",\"salt\":\"v2qUXLV0n8402Ef8brQg1Q==\",\"additionalParameters\":{}}", + "credentialData" : "{\"hashIterations\":27500,\"algorithm\":\"pbkdf2-sha256\",\"additionalParameters\":{}}" + } ], + "disableableCredentialTypes" : [ ], + "requiredActions" : [ ], + "realmRoles" : [ "rabbitmq.tag:administrator", "rabbitmq.configure:*/*", "rabbitmq", "rabbitmq.write:*/*", "rabbitmq.read:*/*" ], + "notBefore" : 0, + "groups" : [ ] + }, { + "id" : "c15b9b9a-1e20-45b1-8d0c-15d1e805615b", + "createdTimestamp" : 1690973977084, + "username" : "service-account-dev_producer", + "enabled" : true, + "totp" : false, + "emailVerified" : false, + "serviceAccountClientId" : "dev_producer", + "credentials" : [ ], + "disableableCredentialTypes" : [ ], + "requiredActions" : [ ], + "realmRoles" : [ "default-roles-test", "producer" ], + "notBefore" : 0, + "groups" : [ ] + }, { + "id" : "15f03347-e2fc-4f8c-9743-f4dfd59f67fe", + "createdTimestamp" : 1652084304711, + "username" : "service-account-mgt_api_client", + "enabled" : true, + "totp" : false, + "emailVerified" : false, + "serviceAccountClientId" : "mgt_api_client", + "credentials" : [ ], + "disableableCredentialTypes" : [ ], + "requiredActions" : [ ], + "realmRoles" : [ "rabbitmq.tag:management", "default-roles-test", "rabbitmq-management" ], + "notBefore" : 0, + "groups" : [ ] + }, { + "id" : "63ec2047-6689-45c0-981d-f9b127a6bb7f", + "createdTimestamp" : 1652084012762, + "username" : "service-account-producer", + "enabled" : true, + "totp" : false, + "emailVerified" : false, + "serviceAccountClientId" : "producer", + "credentials" : [ ], + "disableableCredentialTypes" : [ ], + "requiredActions" : [ ], + "realmRoles" : [ "default-roles-test", "producer" ], + "notBefore" : 0, + "groups" : [ ] + }, { + "id" : "cdf5fa4f-f0de-49b6-b7b1-10bfb1cd793e", + "createdTimestamp" : 1705167146686, + "username" : "service-account-rabbit_dev_mgt_api", + "enabled" : true, + "totp" : false, + "emailVerified" : false, + "serviceAccountClientId" : "rabbit_dev_mgt_api", + "credentials" : [ ], + "disableableCredentialTypes" : [ ], + "requiredActions" : [ ], + "realmRoles" : [ "rabbitmq.tag:management", "default-roles-test", "rabbitmq-management" ], + "notBefore" : 0, + "groups" : [ ] + }, { + "id" : "7a51406b-d6d8-4c77-9b8a-135a2f07d8d5", + "createdTimestamp" : 1677053286393, + "username" : "service-account-rabbitmq-proxy-client", + "enabled" : true, + "totp" : false, + "emailVerified" : false, + "serviceAccountClientId" : "rabbitmq-proxy-client", + "credentials" : [ ], + "disableableCredentialTypes" : [ ], + "requiredActions" : [ ], + "realmRoles" : [ "default-roles-test" ], + "notBefore" : 0, + "groups" : [ ] + } ], + "scopeMappings" : [ { + "client" : "producer", + "roles" : [ "producer" ] + }, { + "clientScope" : "rabbitmq.read:*/*", + "roles" : [ "producer" ] + }, { + "clientScope" : "offline_access", + "roles" : [ "offline_access" ] + }, { + "clientScope" : "rabbitmq.configure:*/*", + "roles" : [ "producer" ] + }, { + "clientScope" : "rabbitmq.tag:management", + "roles" : [ "rabbitmq.tag:management" ] + }, { + "clientScope" : "rabbitmq.write:*/*", + "roles" : [ "producer" ] + }, { + "clientScope" : "rabbitmq.tag:administrator", + "roles" : [ "rabbitmq.tag:administrator" ] + } ], + "clientScopeMappings" : { + "account" : [ { + "client" : "account-console", + "roles" : [ "manage-account", "view-groups" ] + } ] + }, + "clients" : [ { + "id" : "bd6c76be-d33d-43d6-9cbb-965df4f0c025", + "clientId" : "account", + "name" : "${client_account}", + "rootUrl" : "${authBaseUrl}", + "baseUrl" : "/realms/test/account/", + "surrogateAuthRequired" : false, + "enabled" : true, + "alwaysDisplayInConsole" : false, + "clientAuthenticatorType" : "client-secret", + "redirectUris" : [ "/realms/test/account/*" ], + "webOrigins" : [ ], + "notBefore" : 0, + "bearerOnly" : false, + "consentRequired" : false, + "standardFlowEnabled" : true, + "implicitFlowEnabled" : false, + "directAccessGrantsEnabled" : false, + "serviceAccountsEnabled" : false, + "publicClient" : true, + "frontchannelLogout" : false, + "protocol" : "openid-connect", + "attributes" : { + "post.logout.redirect.uris" : "+" + }, + "authenticationFlowBindingOverrides" : { }, + "fullScopeAllowed" : false, + "nodeReRegistrationTimeout" : 0, + "defaultClientScopes" : [ "web-origins", "acr", "profile", "roles", "email" ], + "optionalClientScopes" : [ "address", "phone", "offline_access", "microprofile-jwt" ] + }, { + "id" : "80023652-2709-4646-9367-b6114aa73bae", + "clientId" : "account-console", + "name" : "${client_account-console}", + "rootUrl" : "${authBaseUrl}", + "baseUrl" : "/realms/test/account/", + "surrogateAuthRequired" : false, + "enabled" : true, + "alwaysDisplayInConsole" : false, + "clientAuthenticatorType" : "client-secret", + "redirectUris" : [ "/realms/test/account/*" ], + "webOrigins" : [ ], + "notBefore" : 0, + "bearerOnly" : false, + "consentRequired" : false, + "standardFlowEnabled" : true, + "implicitFlowEnabled" : false, + "directAccessGrantsEnabled" : false, + "serviceAccountsEnabled" : false, + "publicClient" : true, + "frontchannelLogout" : false, + "protocol" : "openid-connect", + "attributes" : { + "post.logout.redirect.uris" : "+", + "pkce.code.challenge.method" : "S256" + }, + "authenticationFlowBindingOverrides" : { }, + "fullScopeAllowed" : false, + "nodeReRegistrationTimeout" : 0, + "protocolMappers" : [ { + "id" : "ebcf72c5-f58a-48cb-a6fb-db44e8735d7e", + "name" : "audience resolve", + "protocol" : "openid-connect", + "protocolMapper" : "oidc-audience-resolve-mapper", + "consentRequired" : false, + "config" : { } + } ], + "defaultClientScopes" : [ "web-origins", "acr", "profile", "roles", "email" ], + "optionalClientScopes" : [ "address", "phone", "offline_access", "microprofile-jwt" ] + }, { + "id" : "e5484264-82ff-46df-b38e-d5456439f413", + "clientId" : "admin-cli", + "name" : "${client_admin-cli}", + "surrogateAuthRequired" : false, + "enabled" : true, + "alwaysDisplayInConsole" : false, + "clientAuthenticatorType" : "client-secret", + "redirectUris" : [ ], + "webOrigins" : [ ], + "notBefore" : 0, + "bearerOnly" : false, + "consentRequired" : false, + "standardFlowEnabled" : false, + "implicitFlowEnabled" : false, + "directAccessGrantsEnabled" : true, + "serviceAccountsEnabled" : false, + "publicClient" : true, + "frontchannelLogout" : false, + "protocol" : "openid-connect", + "attributes" : { + "post.logout.redirect.uris" : "+" + }, + "authenticationFlowBindingOverrides" : { }, + "fullScopeAllowed" : false, + "nodeReRegistrationTimeout" : 0, + "defaultClientScopes" : [ "web-origins", "acr", "profile", "roles", "email" ], + "optionalClientScopes" : [ "address", "phone", "offline_access", "microprofile-jwt" ] + }, { + "id" : "f32cd0e1-5b78-412a-ba07-6ad2a9aeb007", + "clientId" : "broker", + "name" : "${client_broker}", + "surrogateAuthRequired" : false, + "enabled" : true, + "alwaysDisplayInConsole" : false, + "clientAuthenticatorType" : "client-secret", + "redirectUris" : [ ], + "webOrigins" : [ ], + "notBefore" : 0, + "bearerOnly" : true, + "consentRequired" : false, + "standardFlowEnabled" : true, + "implicitFlowEnabled" : false, + "directAccessGrantsEnabled" : false, + "serviceAccountsEnabled" : false, + "publicClient" : false, + "frontchannelLogout" : false, + "protocol" : "openid-connect", + "attributes" : { + "post.logout.redirect.uris" : "+" + }, + "authenticationFlowBindingOverrides" : { }, + "fullScopeAllowed" : false, + "nodeReRegistrationTimeout" : 0, + "defaultClientScopes" : [ "web-origins", "acr", "profile", "roles", "email" ], + "optionalClientScopes" : [ "address", "phone", "offline_access", "microprofile-jwt" ] + }, { + "id" : "d8dddaf0-31a6-4b0c-a6e1-d28cd2eb6256", + "clientId" : "dev_producer", + "name" : "", + "description" : "", + "surrogateAuthRequired" : false, + "enabled" : true, + "alwaysDisplayInConsole" : false, + "clientAuthenticatorType" : "client-secret", + "secret" : "z1PNm47wfWyulTnAaDOf1AggTy3MxX2H", + "redirectUris" : [ ], + "webOrigins" : [ ], + "notBefore" : 0, + "bearerOnly" : false, + "consentRequired" : false, + "standardFlowEnabled" : false, + "implicitFlowEnabled" : false, + "directAccessGrantsEnabled" : false, + "serviceAccountsEnabled" : true, + "publicClient" : false, + "frontchannelLogout" : true, + "protocol" : "openid-connect", + "attributes" : { + "oidc.ciba.grant.enabled" : "false", + "client.secret.creation.time" : "1690973977", + "backchannel.logout.session.required" : "true", + "post.logout.redirect.uris" : "+", + "oauth2.device.authorization.grant.enabled" : "false", + "backchannel.logout.revoke.offline.tokens" : "false" + }, + "authenticationFlowBindingOverrides" : { }, + "fullScopeAllowed" : true, + "nodeReRegistrationTimeout" : -1, + "protocolMappers" : [ { + "id" : "8ce01162-04dc-4e31-9103-5fa7d1be2fb2", + "name" : "Client ID", + "protocol" : "openid-connect", + "protocolMapper" : "oidc-usersessionmodel-note-mapper", + "consentRequired" : false, + "config" : { + "user.session.note" : "clientId", + "userinfo.token.claim" : "true", + "id.token.claim" : "true", + "access.token.claim" : "true", + "claim.name" : "clientId", + "jsonType.label" : "String" + } + }, { + "id" : "693021a3-6a1a-434b-8e7c-9358dfbfad61", + "name" : "aud", + "protocol" : "openid-connect", + "protocolMapper" : "oidc-audience-mapper", + "consentRequired" : false, + "config" : { + "id.token.claim" : "false", + "access.token.claim" : "true", + "included.custom.audience" : "rabbit_dev", + "userinfo.token.claim" : "false" + } + }, { + "id" : "a2c871ac-e9fe-4082-99bf-78ddcf118661", + "name" : "Client Host", + "protocol" : "openid-connect", + "protocolMapper" : "oidc-usersessionmodel-note-mapper", + "consentRequired" : false, + "config" : { + "user.session.note" : "clientHost", + "userinfo.token.claim" : "true", + "id.token.claim" : "true", + "access.token.claim" : "true", + "claim.name" : "clientHost", + "jsonType.label" : "String" + } + }, { + "id" : "414f346b-4c6f-4e41-a810-827f60470ba4", + "name" : "Client IP Address", + "protocol" : "openid-connect", + "protocolMapper" : "oidc-usersessionmodel-note-mapper", + "consentRequired" : false, + "config" : { + "user.session.note" : "clientAddress", + "userinfo.token.claim" : "true", + "id.token.claim" : "true", + "access.token.claim" : "true", + "claim.name" : "clientAddress", + "jsonType.label" : "String" + } + } ], + "defaultClientScopes" : [ "rabbitmq.read:*/*", "web-origins", "acr", "rabbitmq.write:*/*", "profile", "roles", "email", "rabbitmq.configure:*/*" ], + "optionalClientScopes" : [ "address", "phone", "offline_access", "microprofile-jwt" ] + }, { + "id" : "c5be3c24-0c88-4672-a77a-79002fcc9a9d", + "clientId" : "mgt_api_client", + "surrogateAuthRequired" : false, + "enabled" : true, + "alwaysDisplayInConsole" : false, + "clientAuthenticatorType" : "client-secret", + "secret" : "LWOuYqJ8gjKg3D2U8CJZDuID3KiRZVDa", + "redirectUris" : [ ], + "webOrigins" : [ ], + "notBefore" : 0, + "bearerOnly" : false, + "consentRequired" : false, + "standardFlowEnabled" : false, + "implicitFlowEnabled" : false, + "directAccessGrantsEnabled" : false, + "serviceAccountsEnabled" : true, + "publicClient" : false, + "frontchannelLogout" : false, + "protocol" : "openid-connect", + "attributes" : { + "saml.force.post.binding" : "false", + "saml.multivalued.roles" : "false", + "frontchannel.logout.session.required" : "false", + "post.logout.redirect.uris" : "+", + "oauth2.device.authorization.grant.enabled" : "false", + "backchannel.logout.revoke.offline.tokens" : "false", + "saml.server.signature.keyinfo.ext" : "false", + "use.refresh.tokens" : "true", + "oidc.ciba.grant.enabled" : "false", + "backchannel.logout.session.required" : "true", + "client_credentials.use_refresh_token" : "false", + "require.pushed.authorization.requests" : "false", + "saml.client.signature" : "false", + "saml.allow.ecp.flow" : "false", + "id.token.as.detached.signature" : "false", + "saml.assertion.signature" : "false", + "client.secret.creation.time" : "1652084304", + "saml.encrypt" : "false", + "saml.server.signature" : "false", + "exclude.session.state.from.auth.response" : "false", + "saml.artifact.binding" : "false", + "saml_force_name_id_format" : "false", + "acr.loa.map" : "{}", + "tls.client.certificate.bound.access.tokens" : "false", + "saml.authnstatement" : "false", + "display.on.consent.screen" : "false", + "token.response.type.bearer.lower-case" : "false", + "saml.onetimeuse.condition" : "false" + }, + "authenticationFlowBindingOverrides" : { }, + "fullScopeAllowed" : true, + "nodeReRegistrationTimeout" : -1, + "protocolMappers" : [ { + "id" : "33fd8faf-3ea6-4669-beea-45b9655cf6ab", + "name" : "Client IP Address", + "protocol" : "openid-connect", + "protocolMapper" : "oidc-usersessionmodel-note-mapper", + "consentRequired" : false, + "config" : { + "user.session.note" : "clientAddress", + "userinfo.token.claim" : "true", + "id.token.claim" : "true", + "access.token.claim" : "true", + "claim.name" : "clientAddress", + "jsonType.label" : "String" + } + }, { + "id" : "aae7e2aa-72e7-4d29-ae68-a66b846d62ab", + "name" : "aud", + "protocol" : "openid-connect", + "protocolMapper" : "oidc-audience-mapper", + "consentRequired" : false, + "config" : { + "id.token.claim" : "false", + "access.token.claim" : "true", + "included.custom.audience" : "rabbitmq", + "userinfo.token.claim" : "false" + } + }, { + "id" : "f7e826de-e651-4080-8e97-feba46b8a0a2", + "name" : "Client ID", + "protocol" : "openid-connect", + "protocolMapper" : "oidc-usersessionmodel-note-mapper", + "consentRequired" : false, + "config" : { + "user.session.note" : "clientId", + "userinfo.token.claim" : "true", + "id.token.claim" : "true", + "access.token.claim" : "true", + "claim.name" : "clientId", + "jsonType.label" : "String" + } + }, { + "id" : "545a1d71-5dc8-491c-bf7b-1c672d50e606", + "name" : "Client Host", + "protocol" : "openid-connect", + "protocolMapper" : "oidc-usersessionmodel-note-mapper", + "consentRequired" : false, + "config" : { + "user.session.note" : "clientHost", + "userinfo.token.claim" : "true", + "id.token.claim" : "true", + "access.token.claim" : "true", + "claim.name" : "clientHost", + "jsonType.label" : "String" + } + } ], + "defaultClientScopes" : [ "rabbitmq.tag:administrator", "rabbitmq.tag:management", "email" ], + "optionalClientScopes" : [ "address", "phone", "offline_access", "microprofile-jwt" ] + }, { + "id" : "3e96bddd-95f9-4277-b3ad-f8f6f5d5bb59", + "clientId" : "producer", + "surrogateAuthRequired" : false, + "enabled" : true, + "alwaysDisplayInConsole" : false, + "clientAuthenticatorType" : "client-secret", + "secret" : "kbOFBXI9tANgKUq8vXHLhT6YhbivgXxn", + "redirectUris" : [ ], + "webOrigins" : [ ], + "notBefore" : 0, + "bearerOnly" : false, + "consentRequired" : false, + "standardFlowEnabled" : false, + "implicitFlowEnabled" : false, + "directAccessGrantsEnabled" : false, + "serviceAccountsEnabled" : true, + "publicClient" : false, + "frontchannelLogout" : false, + "protocol" : "openid-connect", + "attributes" : { + "saml.force.post.binding" : "false", + "saml.multivalued.roles" : "false", + "frontchannel.logout.session.required" : "false", + "post.logout.redirect.uris" : "+", + "oauth2.device.authorization.grant.enabled" : "false", + "backchannel.logout.revoke.offline.tokens" : "false", + "saml.server.signature.keyinfo.ext" : "false", + "use.refresh.tokens" : "true", + "oidc.ciba.grant.enabled" : "false", + "backchannel.logout.session.required" : "false", + "client_credentials.use_refresh_token" : "true", + "require.pushed.authorization.requests" : "false", + "saml.client.signature" : "false", + "saml.allow.ecp.flow" : "false", + "id.token.as.detached.signature" : "false", + "saml.assertion.signature" : "false", + "client.secret.creation.time" : "1652081901", + "saml.encrypt" : "false", + "saml.server.signature" : "false", + "exclude.session.state.from.auth.response" : "false", + "saml.artifact.binding" : "false", + "saml_force_name_id_format" : "false", + "acr.loa.map" : "{}", + "tls.client.certificate.bound.access.tokens" : "false", + "saml.authnstatement" : "false", + "display.on.consent.screen" : "false", + "token.response.type.bearer.lower-case" : "false", + "saml.onetimeuse.condition" : "false" + }, + "authenticationFlowBindingOverrides" : { }, + "fullScopeAllowed" : true, + "nodeReRegistrationTimeout" : -1, + "protocolMappers" : [ { + "id" : "72928dd9-10c9-4049-bfa7-4cc05e650f46", + "name" : "realm roles", + "protocol" : "openid-connect", + "protocolMapper" : "oidc-usermodel-realm-role-mapper", + "consentRequired" : false, + "config" : { + "user.attribute" : "foo", + "access.token.claim" : "true", + "claim.name" : "realm_access.roles", + "jsonType.label" : "String", + "multivalued" : "true" + } + }, { + "id" : "4c3b3c28-795f-4056-a854-5cf119b36266", + "name" : "Client ID", + "protocol" : "openid-connect", + "protocolMapper" : "oidc-usersessionmodel-note-mapper", + "consentRequired" : false, + "config" : { + "user.session.note" : "clientId", + "userinfo.token.claim" : "true", + "id.token.claim" : "true", + "access.token.claim" : "true", + "claim.name" : "clientId", + "jsonType.label" : "String" + } + }, { + "id" : "56b7571c-3226-4c92-8615-c99b265a42fc", + "name" : "aud", + "protocol" : "openid-connect", + "protocolMapper" : "oidc-audience-mapper", + "consentRequired" : false, + "config" : { + "id.token.claim" : "false", + "access.token.claim" : "true", + "included.custom.audience" : "rabbitmq", + "userinfo.token.claim" : "false" + } + }, { + "id" : "4ca73107-b26b-46ee-985b-d2dcc099f21c", + "name" : "Client Host", + "protocol" : "openid-connect", + "protocolMapper" : "oidc-usersessionmodel-note-mapper", + "consentRequired" : false, + "config" : { + "user.session.note" : "clientHost", + "userinfo.token.claim" : "true", + "id.token.claim" : "true", + "access.token.claim" : "true", + "claim.name" : "clientHost", + "jsonType.label" : "String" + } + }, { + "id" : "26e5243a-3127-4528-9a54-8af324ac2392", + "name" : "client roles", + "protocol" : "openid-connect", + "protocolMapper" : "oidc-usermodel-client-role-mapper", + "consentRequired" : false, + "config" : { + "user.attribute" : "foo", + "access.token.claim" : "true", + "claim.name" : "resource_access.${client_id}.roles", + "jsonType.label" : "String", + "multivalued" : "true" + } + }, { + "id" : "d52cc6cb-08a1-4c2b-bf06-61f234a419d1", + "name" : "Client IP Address", + "protocol" : "openid-connect", + "protocolMapper" : "oidc-usersessionmodel-note-mapper", + "consentRequired" : false, + "config" : { + "user.session.note" : "clientAddress", + "userinfo.token.claim" : "true", + "id.token.claim" : "true", + "access.token.claim" : "true", + "claim.name" : "clientAddress", + "jsonType.label" : "String" + } + } ], + "defaultClientScopes" : [ "rabbitmq.read:*/*", "rabbitmq.write:*/*", "roles", "rabbitmq.configure:*/*" ], + "optionalClientScopes" : [ "address", "phone", "offline_access", "microprofile-jwt" ] + }, { + "id" : "8616d2bd-0eec-47b9-8b04-04b291cc9147", + "clientId" : "rabbit_dev_mgt_api", + "name" : "", + "description" : "", + "surrogateAuthRequired" : false, + "enabled" : true, + "alwaysDisplayInConsole" : false, + "clientAuthenticatorType" : "client-secret", + "secret" : "p7v6DksWkcb6TUYK6payswovC0LqhU6A", + "redirectUris" : [ ], + "webOrigins" : [ ], + "notBefore" : 0, + "bearerOnly" : false, + "consentRequired" : false, + "standardFlowEnabled" : false, + "implicitFlowEnabled" : false, + "directAccessGrantsEnabled" : false, + "serviceAccountsEnabled" : true, + "publicClient" : false, + "frontchannelLogout" : true, + "protocol" : "openid-connect", + "attributes" : { + "oidc.ciba.grant.enabled" : "false", + "oauth2.device.authorization.grant.enabled" : "false", + "client.secret.creation.time" : "1705167146", + "backchannel.logout.session.required" : "true", + "backchannel.logout.revoke.offline.tokens" : "false" + }, + "authenticationFlowBindingOverrides" : { }, + "fullScopeAllowed" : true, + "nodeReRegistrationTimeout" : -1, + "protocolMappers" : [ { + "id" : "5e97d60b-dabd-4856-ac1b-fcc81f1c96ef", + "name" : "Client IP Address", + "protocol" : "openid-connect", + "protocolMapper" : "oidc-usersessionmodel-note-mapper", + "consentRequired" : false, + "config" : { + "user.session.note" : "clientAddress", + "id.token.claim" : "true", + "access.token.claim" : "true", + "claim.name" : "clientAddress", + "jsonType.label" : "String" + } + }, { + "id" : "1dfa3529-be4b-43d5-aa52-df09fa96bbd7", + "name" : "Client Host", + "protocol" : "openid-connect", + "protocolMapper" : "oidc-usersessionmodel-note-mapper", + "consentRequired" : false, + "config" : { + "user.session.note" : "clientHost", + "id.token.claim" : "true", + "access.token.claim" : "true", + "claim.name" : "clientHost", + "jsonType.label" : "String" + } + }, { + "id" : "53abbc75-c9de-4859-a958-d6a2fdc51876", + "name" : "aud", + "protocol" : "openid-connect", + "protocolMapper" : "oidc-audience-mapper", + "consentRequired" : false, + "config" : { + "id.token.claim" : "false", + "access.token.claim" : "true", + "included.custom.audience" : "rabbit_dev" + } + }, { + "id" : "d2bd267d-37a7-4ee4-bf73-68af80cde9c9", + "name" : "Client ID", + "protocol" : "openid-connect", + "protocolMapper" : "oidc-usersessionmodel-note-mapper", + "consentRequired" : false, + "config" : { + "user.session.note" : "clientId", + "id.token.claim" : "true", + "access.token.claim" : "true", + "claim.name" : "clientId", + "jsonType.label" : "String" + } + } ], + "defaultClientScopes" : [ "web-origins", "acr", "profile", "roles", "rabbitmq.tag:management", "email" ], + "optionalClientScopes" : [ "address", "phone", "offline_access", "microprofile-jwt" ] + }, { + "id" : "9e59fee6-c772-4244-a807-58d157cde3ea", + "clientId" : "rabbit_dev_mgt_ui", + "name" : "", + "description" : "", + "rootUrl" : "${RABBITMQ_SCHEME}://${RABBITMQ_HOST}${RABBITMQ_PATH}", + "adminUrl" : "", + "baseUrl" : "", + "surrogateAuthRequired" : false, + "enabled" : true, + "alwaysDisplayInConsole" : false, + "clientAuthenticatorType" : "client-secret", + "redirectUris" : [ "${RABBITMQ_SCHEME}://${RABBITMQ_HOST}${RABBITMQ_PATH}/*" ], + "webOrigins" : [ "+" ], + "notBefore" : 0, + "bearerOnly" : false, + "consentRequired" : false, + "standardFlowEnabled" : true, + "implicitFlowEnabled" : false, + "directAccessGrantsEnabled" : false, + "serviceAccountsEnabled" : false, + "publicClient" : true, + "frontchannelLogout" : true, + "protocol" : "openid-connect", + "attributes" : { + "oidc.ciba.grant.enabled" : "false", + "backchannel.logout.session.required" : "true", + "post.logout.redirect.uris" : "+", + "oauth2.device.authorization.grant.enabled" : "false", + "display.on.consent.screen" : "false", + "backchannel.logout.revoke.offline.tokens" : "false" + }, + "authenticationFlowBindingOverrides" : { }, + "fullScopeAllowed" : true, + "nodeReRegistrationTimeout" : -1, + "protocolMappers" : [ { + "id" : "65461f51-e45e-4de0-9981-974402d599e6", + "name" : "aud", + "protocol" : "openid-connect", + "protocolMapper" : "oidc-audience-mapper", + "consentRequired" : false, + "config" : { + "id.token.claim" : "false", + "access.token.claim" : "true", + "included.custom.audience" : "rabbit_dev", + "userinfo.token.claim" : "false" + } + } ], + "defaultClientScopes" : [ "web-origins", "acr", "profile", "roles", "rabbitmq.tag:management", "rabbitmq.tag:administrator", "email" ], + "optionalClientScopes" : [ "address", "phone", "offline_access", "microprofile-jwt" ] + }, { + "id" : "a57c9f6a-8b64-47dc-af53-d6ccc2d4aa60", + "clientId" : "rabbitmq", + "name" : "", + "description" : "", + "surrogateAuthRequired" : false, + "enabled" : true, + "alwaysDisplayInConsole" : false, + "clientAuthenticatorType" : "client-secret", + "redirectUris" : [ ], + "webOrigins" : [ ], + "notBefore" : 0, + "bearerOnly" : false, + "consentRequired" : false, + "standardFlowEnabled" : true, + "implicitFlowEnabled" : false, + "directAccessGrantsEnabled" : true, + "serviceAccountsEnabled" : false, + "publicClient" : true, + "frontchannelLogout" : true, + "protocol" : "openid-connect", + "attributes" : { + "oidc.ciba.grant.enabled" : "false", + "post.logout.redirect.uris" : "+", + "oauth2.device.authorization.grant.enabled" : "false", + "backchannel.logout.session.required" : "true", + "backchannel.logout.revoke.offline.tokens" : "false" + }, + "authenticationFlowBindingOverrides" : { }, + "fullScopeAllowed" : true, + "nodeReRegistrationTimeout" : -1, + "defaultClientScopes" : [ "web-origins", "acr", "profile", "roles", "email" ], + "optionalClientScopes" : [ "address", "phone", "offline_access", "microprofile-jwt" ] + }, { + "id" : "e64b05d1-0d1c-4294-85f9-52ae098ecf1f", + "clientId" : "rabbitmq-client-code", + "name" : "", + "description" : "", + "rootUrl" : "http://localhost:15672/", + "adminUrl" : "", + "baseUrl" : "", + "surrogateAuthRequired" : false, + "enabled" : true, + "alwaysDisplayInConsole" : false, + "clientAuthenticatorType" : "client-secret", + "redirectUris" : [ "http://localhost:15672/*" ], + "webOrigins" : [ "+" ], + "notBefore" : 0, + "bearerOnly" : false, + "consentRequired" : false, + "standardFlowEnabled" : true, + "implicitFlowEnabled" : false, + "directAccessGrantsEnabled" : true, + "serviceAccountsEnabled" : false, + "publicClient" : true, + "frontchannelLogout" : true, + "protocol" : "openid-connect", + "attributes" : { + "saml.force.post.binding" : "false", + "saml.multivalued.roles" : "false", + "frontchannel.logout.session.required" : "false", + "post.logout.redirect.uris" : "+", + "oauth2.device.authorization.grant.enabled" : "false", + "backchannel.logout.revoke.offline.tokens" : "false", + "saml.server.signature.keyinfo.ext" : "false", + "use.refresh.tokens" : "true", + "oidc.ciba.grant.enabled" : "false", + "backchannel.logout.session.required" : "false", + "client_credentials.use_refresh_token" : "false", + "require.pushed.authorization.requests" : "false", + "saml.client.signature" : "false", + "saml.allow.ecp.flow" : "false", + "id.token.as.detached.signature" : "false", + "saml.assertion.signature" : "false", + "client.secret.creation.time" : "1652171962", + "saml.encrypt" : "false", + "saml.server.signature" : "false", + "exclude.session.state.from.auth.response" : "false", + "tls-client-certificate-bound-access-tokens" : "false", + "saml.artifact.binding" : "false", + "saml_force_name_id_format" : "false", + "acr.loa.map" : "{}", + "tls.client.certificate.bound.access.tokens" : "false", + "saml.authnstatement" : "false", + "display.on.consent.screen" : "false", + "token.response.type.bearer.lower-case" : "false", + "saml.onetimeuse.condition" : "false" + }, + "authenticationFlowBindingOverrides" : { }, + "fullScopeAllowed" : true, + "nodeReRegistrationTimeout" : -1, + "protocolMappers" : [ { + "id" : "e6905c3e-7ace-4b4f-9244-0f20a86da8ef", + "name" : "aud", + "protocol" : "openid-connect", + "protocolMapper" : "oidc-audience-mapper", + "consentRequired" : false, + "config" : { + "id.token.claim" : "false", + "access.token.claim" : "true", + "included.custom.audience" : "rabbitmq", + "userinfo.token.claim" : "false" + } + }, { + "id" : "548a2e70-5a2b-4959-8c72-97f6455ce478", + "name" : "realm roles", + "protocol" : "openid-connect", + "protocolMapper" : "oidc-usermodel-realm-role-mapper", + "consentRequired" : false, + "config" : { + "user.attribute" : "foo", + "access.token.claim" : "true", + "claim.name" : "extra_scope", + "jsonType.label" : "String", + "multivalued" : "true" + } + }, { + "id" : "923edb6d-2188-4f23-a547-7e372d9cb5eb", + "name" : "username", + "protocol" : "openid-connect", + "protocolMapper" : "oidc-usermodel-property-mapper", + "consentRequired" : false, + "config" : { + "userinfo.token.claim" : "true", + "user.attribute" : "username", + "id.token.claim" : "true", + "access.token.claim" : "true", + "claim.name" : "user_name", + "jsonType.label" : "String" + } + } ], + "defaultClientScopes" : [ "web-origins", "acr", "rabbitmq.tag:administrator", "profile", "roles", "rabbitmq.tag:management", "email" ], + "optionalClientScopes" : [ "address", "phone", "offline_access", "microprofile-jwt" ] + }, { + "id" : "c265f3db-ed3a-4898-8800-af044b3c30f5", + "clientId" : "rabbitmq-proxy-client", + "name" : "", + "description" : "", + "rootUrl" : "", + "adminUrl" : "", + "baseUrl" : "", + "surrogateAuthRequired" : false, + "enabled" : true, + "alwaysDisplayInConsole" : false, + "clientAuthenticatorType" : "client-secret", + "secret" : "nt6pmZMeyrgzYgkg2MLgZQZxLveRMW5M", + "redirectUris" : [ "http://0.0.0.0:4180/*" ], + "webOrigins" : [ ], + "notBefore" : 0, + "bearerOnly" : false, + "consentRequired" : false, + "standardFlowEnabled" : true, + "implicitFlowEnabled" : false, + "directAccessGrantsEnabled" : true, + "serviceAccountsEnabled" : true, + "publicClient" : false, + "frontchannelLogout" : true, + "protocol" : "openid-connect", + "attributes" : { + "client.secret.creation.time" : "1677053168", + "post.logout.redirect.uris" : "+", + "oauth2.device.authorization.grant.enabled" : "false", + "backchannel.logout.revoke.offline.tokens" : "false", + "use.refresh.tokens" : "true", + "tls-client-certificate-bound-access-tokens" : "false", + "oidc.ciba.grant.enabled" : "false", + "backchannel.logout.session.required" : "true", + "client_credentials.use_refresh_token" : "false", + "acr.loa.map" : "{}", + "require.pushed.authorization.requests" : "false", + "display.on.consent.screen" : "false", + "token.response.type.bearer.lower-case" : "false" + }, + "authenticationFlowBindingOverrides" : { }, + "fullScopeAllowed" : true, + "nodeReRegistrationTimeout" : -1, + "protocolMappers" : [ { + "id" : "e1c2389a-c5ca-4a81-a5c2-67f919f2368d", + "name" : "Client ID", + "protocol" : "openid-connect", + "protocolMapper" : "oidc-usersessionmodel-note-mapper", + "consentRequired" : false, + "config" : { + "user.session.note" : "clientId", + "userinfo.token.claim" : "true", + "id.token.claim" : "true", + "access.token.claim" : "true", + "claim.name" : "clientId", + "jsonType.label" : "String" + } + }, { + "id" : "54b12841-4524-4b8a-8dc0-bb6f9044e11d", + "name" : "Client IP Address", + "protocol" : "openid-connect", + "protocolMapper" : "oidc-usersessionmodel-note-mapper", + "consentRequired" : false, + "config" : { + "user.session.note" : "clientAddress", + "userinfo.token.claim" : "true", + "id.token.claim" : "true", + "access.token.claim" : "true", + "claim.name" : "clientAddress", + "jsonType.label" : "String" + } + }, { + "id" : "a5c803da-af15-4fc8-ad7f-a4a900f0703b", + "name" : "Client Host", + "protocol" : "openid-connect", + "protocolMapper" : "oidc-usersessionmodel-note-mapper", + "consentRequired" : false, + "config" : { + "user.session.note" : "clientHost", + "userinfo.token.claim" : "true", + "id.token.claim" : "true", + "access.token.claim" : "true", + "claim.name" : "clientHost", + "jsonType.label" : "String" + } + }, { + "id" : "958d4a83-d5b3-4cca-af3e-fde9f9328eec", + "name" : "aud", + "protocol" : "openid-connect", + "protocolMapper" : "oidc-audience-mapper", + "consentRequired" : false, + "config" : { + "included.client.audience" : "rabbitmq-proxy-client", + "id.token.claim" : "true", + "access.token.claim" : "true", + "included.custom.audience" : "rabbitmq", + "userinfo.token.claim" : "true" + } + } ], + "defaultClientScopes" : [ "web-origins", "acr", "roles", "email" ], + "optionalClientScopes" : [ "address", "phone", "rabbitmq.tag:administrator", "profile", "offline_access", "microprofile-jwt" ] + }, { + "id" : "09de26c0-0a1b-4e5f-8724-23af07a0e54a", + "clientId" : "realm-management", + "name" : "${client_realm-management}", + "surrogateAuthRequired" : false, + "enabled" : true, + "alwaysDisplayInConsole" : false, + "clientAuthenticatorType" : "client-secret", + "redirectUris" : [ ], + "webOrigins" : [ ], + "notBefore" : 0, + "bearerOnly" : true, + "consentRequired" : false, + "standardFlowEnabled" : true, + "implicitFlowEnabled" : false, + "directAccessGrantsEnabled" : false, + "serviceAccountsEnabled" : false, + "publicClient" : false, + "frontchannelLogout" : false, + "protocol" : "openid-connect", + "attributes" : { + "post.logout.redirect.uris" : "+" + }, + "authenticationFlowBindingOverrides" : { }, + "fullScopeAllowed" : false, + "nodeReRegistrationTimeout" : 0, + "defaultClientScopes" : [ "web-origins", "acr", "profile", "roles", "email" ], + "optionalClientScopes" : [ "address", "phone", "offline_access", "microprofile-jwt" ] + }, { + "id" : "16f67c5c-f86b-4334-93f4-fd26356cbb24", + "clientId" : "security-admin-console", + "name" : "${client_security-admin-console}", + "rootUrl" : "${authAdminUrl}", + "baseUrl" : "/admin/test/console/", + "surrogateAuthRequired" : false, + "enabled" : true, + "alwaysDisplayInConsole" : false, + "clientAuthenticatorType" : "client-secret", + "redirectUris" : [ "/admin/test/console/*" ], + "webOrigins" : [ "+" ], + "notBefore" : 0, + "bearerOnly" : false, + "consentRequired" : false, + "standardFlowEnabled" : true, + "implicitFlowEnabled" : false, + "directAccessGrantsEnabled" : false, + "serviceAccountsEnabled" : false, + "publicClient" : true, + "frontchannelLogout" : false, + "protocol" : "openid-connect", + "attributes" : { + "post.logout.redirect.uris" : "+", + "pkce.code.challenge.method" : "S256" + }, + "authenticationFlowBindingOverrides" : { }, + "fullScopeAllowed" : false, + "nodeReRegistrationTimeout" : 0, + "protocolMappers" : [ { + "id" : "26e7deed-9c26-4a19-88fa-845bec2e5909", + "name" : "locale", + "protocol" : "openid-connect", + "protocolMapper" : "oidc-usermodel-attribute-mapper", + "consentRequired" : false, + "config" : { + "userinfo.token.claim" : "true", + "user.attribute" : "locale", + "id.token.claim" : "true", + "access.token.claim" : "true", + "claim.name" : "locale", + "jsonType.label" : "String" + } + } ], + "defaultClientScopes" : [ "web-origins", "acr", "profile", "roles", "email" ], + "optionalClientScopes" : [ "address", "phone", "offline_access", "microprofile-jwt" ] + } ], + "clientScopes" : [ { + "id" : "ec4e76a3-8597-41d4-aa7c-e4e1fee6a01a", + "name" : "profile", + "description" : "OpenID Connect built-in scope: profile", + "protocol" : "openid-connect", + "attributes" : { + "include.in.token.scope" : "true", + "display.on.consent.screen" : "true", + "consent.screen.text" : "${profileScopeConsentText}" + }, + "protocolMappers" : [ { + "id" : "db04690a-de25-4627-8e0c-78a018e86ce8", + "name" : "family name", + "protocol" : "openid-connect", + "protocolMapper" : "oidc-usermodel-property-mapper", + "consentRequired" : false, + "config" : { + "userinfo.token.claim" : "true", + "user.attribute" : "lastName", + "id.token.claim" : "true", + "access.token.claim" : "true", + "claim.name" : "family_name", + "jsonType.label" : "String" + } + }, { + "id" : "e5c72df5-7fa9-43c6-8f13-c7c2d73fe89a", + "name" : "profile", + "protocol" : "openid-connect", + "protocolMapper" : "oidc-usermodel-attribute-mapper", + "consentRequired" : false, + "config" : { + "userinfo.token.claim" : "true", + "user.attribute" : "profile", + "id.token.claim" : "true", + "access.token.claim" : "true", + "claim.name" : "profile", + "jsonType.label" : "String" + } + }, { + "id" : "5856da2a-7aa0-446c-be48-22112783e322", + "name" : "nickname", + "protocol" : "openid-connect", + "protocolMapper" : "oidc-usermodel-attribute-mapper", + "consentRequired" : false, + "config" : { + "userinfo.token.claim" : "true", + "user.attribute" : "nickname", + "id.token.claim" : "true", + "access.token.claim" : "true", + "claim.name" : "nickname", + "jsonType.label" : "String" + } + }, { + "id" : "5a7a208b-70eb-4f8f-b8ff-2115a615d696", + "name" : "gender", + "protocol" : "openid-connect", + "protocolMapper" : "oidc-usermodel-attribute-mapper", + "consentRequired" : false, + "config" : { + "userinfo.token.claim" : "true", + "user.attribute" : "gender", + "id.token.claim" : "true", + "access.token.claim" : "true", + "claim.name" : "gender", + "jsonType.label" : "String" + } + }, { + "id" : "b1cad309-90cd-4fed-8e62-c05dc4649b99", + "name" : "birthdate", + "protocol" : "openid-connect", + "protocolMapper" : "oidc-usermodel-attribute-mapper", + "consentRequired" : false, + "config" : { + "userinfo.token.claim" : "true", + "user.attribute" : "birthdate", + "id.token.claim" : "true", + "access.token.claim" : "true", + "claim.name" : "birthdate", + "jsonType.label" : "String" + } + }, { + "id" : "b3a08e61-6e08-4aa3-aa71-212bc13bff5d", + "name" : "picture", + "protocol" : "openid-connect", + "protocolMapper" : "oidc-usermodel-attribute-mapper", + "consentRequired" : false, + "config" : { + "userinfo.token.claim" : "true", + "user.attribute" : "picture", + "id.token.claim" : "true", + "access.token.claim" : "true", + "claim.name" : "picture", + "jsonType.label" : "String" + } + }, { + "id" : "bc59bb88-2cae-4c60-b09f-3c18fced603f", + "name" : "website", + "protocol" : "openid-connect", + "protocolMapper" : "oidc-usermodel-attribute-mapper", + "consentRequired" : false, + "config" : { + "userinfo.token.claim" : "true", + "user.attribute" : "website", + "id.token.claim" : "true", + "access.token.claim" : "true", + "claim.name" : "website", + "jsonType.label" : "String" + } + }, { + "id" : "da32f964-8b0a-4cef-babc-8b90f31b20a7", + "name" : "given name", + "protocol" : "openid-connect", + "protocolMapper" : "oidc-usermodel-property-mapper", + "consentRequired" : false, + "config" : { + "userinfo.token.claim" : "true", + "user.attribute" : "firstName", + "id.token.claim" : "true", + "access.token.claim" : "true", + "claim.name" : "given_name", + "jsonType.label" : "String" + } + }, { + "id" : "eb6b8e8c-1e03-497a-80b4-3e9c26a86d9a", + "name" : "username", + "protocol" : "openid-connect", + "protocolMapper" : "oidc-usermodel-property-mapper", + "consentRequired" : false, + "config" : { + "userinfo.token.claim" : "true", + "user.attribute" : "username", + "id.token.claim" : "true", + "access.token.claim" : "true", + "claim.name" : "preferred_username", + "jsonType.label" : "String" + } + }, { + "id" : "334e47b2-5f74-4668-b04e-9ab55513c146", + "name" : "updated at", + "protocol" : "openid-connect", + "protocolMapper" : "oidc-usermodel-attribute-mapper", + "consentRequired" : false, + "config" : { + "userinfo.token.claim" : "true", + "user.attribute" : "updatedAt", + "id.token.claim" : "true", + "access.token.claim" : "true", + "claim.name" : "updated_at", + "jsonType.label" : "long" + } + }, { + "id" : "7dad1c25-6b18-4571-8d92-bfd698c5b94b", + "name" : "full name", + "protocol" : "openid-connect", + "protocolMapper" : "oidc-full-name-mapper", + "consentRequired" : false, + "config" : { + "id.token.claim" : "true", + "access.token.claim" : "true", + "userinfo.token.claim" : "true" + } + }, { + "id" : "bf52c928-4d33-4c14-8e61-969b17bed2a5", + "name" : "zoneinfo", + "protocol" : "openid-connect", + "protocolMapper" : "oidc-usermodel-attribute-mapper", + "consentRequired" : false, + "config" : { + "userinfo.token.claim" : "true", + "user.attribute" : "zoneinfo", + "id.token.claim" : "true", + "access.token.claim" : "true", + "claim.name" : "zoneinfo", + "jsonType.label" : "String" + } + }, { + "id" : "3a03eb21-7c20-4150-87f2-ca94c9df601c", + "name" : "middle name", + "protocol" : "openid-connect", + "protocolMapper" : "oidc-usermodel-attribute-mapper", + "consentRequired" : false, + "config" : { + "userinfo.token.claim" : "true", + "user.attribute" : "middleName", + "id.token.claim" : "true", + "access.token.claim" : "true", + "claim.name" : "middle_name", + "jsonType.label" : "String" + } + }, { + "id" : "431c9682-e4ba-4348-9d07-f8d5415ca98b", + "name" : "locale", + "protocol" : "openid-connect", + "protocolMapper" : "oidc-usermodel-attribute-mapper", + "consentRequired" : false, + "config" : { + "userinfo.token.claim" : "true", + "user.attribute" : "locale", + "id.token.claim" : "true", + "access.token.claim" : "true", + "claim.name" : "locale", + "jsonType.label" : "String" + } + } ] + }, { + "id" : "b2ced9e2-289f-44b0-8567-5218a2eee3e6", + "name" : "rabbitmq.read:*/*", + "description" : "read all", + "protocol" : "openid-connect", + "attributes" : { + "include.in.token.scope" : "true", + "display.on.consent.screen" : "true" + } + }, { + "id" : "46f5e514-9283-4f03-b2af-a7da506f0cbc", + "name" : "offline_access", + "description" : "OpenID Connect built-in scope: offline_access", + "protocol" : "openid-connect", + "attributes" : { + "consent.screen.text" : "${offlineAccessScopeConsentText}", + "display.on.consent.screen" : "true" + } + }, { + "id" : "a4ffacea-34a8-4eb2-961f-af78e50b1140", + "name" : "address", + "description" : "OpenID Connect built-in scope: address", + "protocol" : "openid-connect", + "attributes" : { + "include.in.token.scope" : "true", + "display.on.consent.screen" : "true", + "consent.screen.text" : "${addressScopeConsentText}" + }, + "protocolMappers" : [ { + "id" : "791b8544-4659-4d61-8fb8-8a18a687648d", + "name" : "address", + "protocol" : "openid-connect", + "protocolMapper" : "oidc-address-mapper", + "consentRequired" : false, + "config" : { + "user.attribute.formatted" : "formatted", + "user.attribute.country" : "country", + "user.attribute.postal_code" : "postal_code", + "userinfo.token.claim" : "true", + "user.attribute.street" : "street", + "id.token.claim" : "true", + "user.attribute.region" : "region", + "access.token.claim" : "true", + "user.attribute.locality" : "locality" + } + } ] + }, { + "id" : "7a2981c1-d606-43f6-acbf-76a8124e59b7", + "name" : "microprofile-jwt", + "description" : "Microprofile - JWT built-in scope", + "protocol" : "openid-connect", + "attributes" : { + "include.in.token.scope" : "true", + "display.on.consent.screen" : "false" + }, + "protocolMappers" : [ { + "id" : "63a285df-f6d0-4e06-9f16-d4a578fce8bf", + "name" : "upn", + "protocol" : "openid-connect", + "protocolMapper" : "oidc-usermodel-property-mapper", + "consentRequired" : false, + "config" : { + "userinfo.token.claim" : "true", + "user.attribute" : "username", + "id.token.claim" : "true", + "access.token.claim" : "true", + "claim.name" : "upn", + "jsonType.label" : "String" + } + }, { + "id" : "1665df09-6855-420b-a649-0f0afe054b51", + "name" : "groups", + "protocol" : "openid-connect", + "protocolMapper" : "oidc-usermodel-realm-role-mapper", + "consentRequired" : false, + "config" : { + "multivalued" : "true", + "userinfo.token.claim" : "true", + "user.attribute" : "foo", + "id.token.claim" : "true", + "access.token.claim" : "true", + "claim.name" : "groups", + "jsonType.label" : "String" + } + } ] + }, { + "id" : "b9c5af5d-59f3-445b-b899-c6574bc6191b", + "name" : "phone", + "description" : "OpenID Connect built-in scope: phone", + "protocol" : "openid-connect", + "attributes" : { + "include.in.token.scope" : "true", + "display.on.consent.screen" : "true", + "consent.screen.text" : "${phoneScopeConsentText}" + }, + "protocolMappers" : [ { + "id" : "e945c389-e953-431b-b3b4-882a50a8054e", + "name" : "phone number", + "protocol" : "openid-connect", + "protocolMapper" : "oidc-usermodel-attribute-mapper", + "consentRequired" : false, + "config" : { + "userinfo.token.claim" : "true", + "user.attribute" : "phoneNumber", + "id.token.claim" : "true", + "access.token.claim" : "true", + "claim.name" : "phone_number", + "jsonType.label" : "String" + } + }, { + "id" : "52b07524-1521-48d6-be23-779f8e1f8a67", + "name" : "phone number verified", + "protocol" : "openid-connect", + "protocolMapper" : "oidc-usermodel-attribute-mapper", + "consentRequired" : false, + "config" : { + "userinfo.token.claim" : "true", + "user.attribute" : "phoneNumberVerified", + "id.token.claim" : "true", + "access.token.claim" : "true", + "claim.name" : "phone_number_verified", + "jsonType.label" : "boolean" + } + } ] + }, { + "id" : "4dfc0d4d-654d-4e1c-8b58-64a0e1126a19", + "name" : "rabbitmq.configure:*/*", + "protocol" : "openid-connect", + "attributes" : { + "include.in.token.scope" : "true", + "display.on.consent.screen" : "true" + } + }, { + "id" : "93d154c5-e9fe-49ff-bca8-bc55a141a31e", + "name" : "rabbitmq.tag:management", + "protocol" : "openid-connect", + "attributes" : { + "include.in.token.scope" : "true", + "display.on.consent.screen" : "true" + } + }, { + "id" : "cc214fa3-0a7f-4390-9c4a-8ae14512e4a4", + "name" : "web-origins", + "description" : "OpenID Connect scope for add allowed web origins to the access token", + "protocol" : "openid-connect", + "attributes" : { + "include.in.token.scope" : "false", + "display.on.consent.screen" : "false", + "consent.screen.text" : "" + }, + "protocolMappers" : [ { + "id" : "e2ac8ddb-9c19-4088-bc72-c4176e0fac3f", + "name" : "allowed web origins", + "protocol" : "openid-connect", + "protocolMapper" : "oidc-allowed-origins-mapper", + "consentRequired" : false, + "config" : { } + } ] + }, { + "id" : "53b1a2b2-085e-4e36-bb81-c88e8d846439", + "name" : "email", + "description" : "OpenID Connect built-in scope: email", + "protocol" : "openid-connect", + "attributes" : { + "include.in.token.scope" : "true", + "display.on.consent.screen" : "true", + "consent.screen.text" : "${emailScopeConsentText}" + }, + "protocolMappers" : [ { + "id" : "e1a850d9-6372-4521-8d0d-acee25245c90", + "name" : "email", + "protocol" : "openid-connect", + "protocolMapper" : "oidc-usermodel-property-mapper", + "consentRequired" : false, + "config" : { + "userinfo.token.claim" : "true", + "user.attribute" : "email", + "id.token.claim" : "true", + "access.token.claim" : "true", + "claim.name" : "email", + "jsonType.label" : "String" + } + }, { + "id" : "43a7220e-d94d-43e6-a5e7-1a12dbbb4460", + "name" : "email verified", + "protocol" : "openid-connect", + "protocolMapper" : "oidc-usermodel-property-mapper", + "consentRequired" : false, + "config" : { + "userinfo.token.claim" : "true", + "user.attribute" : "emailVerified", + "id.token.claim" : "true", + "access.token.claim" : "true", + "claim.name" : "email_verified", + "jsonType.label" : "boolean" + } + } ] + }, { + "id" : "2b745afc-cb92-4ac3-b314-8ef1d638b4b1", + "name" : "roles", + "description" : "OpenID Connect scope for add user roles to the access token", + "protocol" : "openid-connect", + "attributes" : { + "include.in.token.scope" : "false", + "display.on.consent.screen" : "true", + "consent.screen.text" : "${rolesScopeConsentText}" + }, + "protocolMappers" : [ { + "id" : "2cac7c2c-7c9f-44e6-a76e-b8d3fad627ea", + "name" : "realm roles", + "protocol" : "openid-connect", + "protocolMapper" : "oidc-usermodel-realm-role-mapper", + "consentRequired" : false, + "config" : { + "user.attribute" : "foo", + "access.token.claim" : "true", + "claim.name" : "realm_access.roles", + "jsonType.label" : "String", + "multivalued" : "true" + } + }, { + "id" : "91c4a9bd-a9b9-402b-9eb6-762362d18c6b", + "name" : "client roles", + "protocol" : "openid-connect", + "protocolMapper" : "oidc-usermodel-client-role-mapper", + "consentRequired" : false, + "config" : { + "user.attribute" : "foo", + "access.token.claim" : "true", + "claim.name" : "resource_access.${client_id}.roles", + "jsonType.label" : "String", + "multivalued" : "true" + } + } ] + }, { + "id" : "8b53c714-89cf-4cfd-ac76-1b45bd841b58", + "name" : "acr", + "description" : "OpenID Connect scope for add acr (authentication context class reference) to the token", + "protocol" : "openid-connect", + "attributes" : { + "include.in.token.scope" : "false", + "display.on.consent.screen" : "false" + }, + "protocolMappers" : [ { + "id" : "6381b445-4f37-434e-b982-c34a6048913b", + "name" : "acr loa level", + "protocol" : "openid-connect", + "protocolMapper" : "oidc-acr-mapper", + "consentRequired" : false, + "config" : { + "id.token.claim" : "true", + "access.token.claim" : "true", + "userinfo.token.claim" : "true" + } + } ] + }, { + "id" : "3331893c-c67c-4146-b00a-b4862200628c", + "name" : "role_list", + "description" : "SAML role list", + "protocol" : "saml", + "attributes" : { + "consent.screen.text" : "${samlRoleListScopeConsentText}", + "display.on.consent.screen" : "true" + }, + "protocolMappers" : [ { + "id" : "863078ec-d37c-46fc-a70e-3fe6340fbeec", + "name" : "role list", + "protocol" : "saml", + "protocolMapper" : "saml-role-list-mapper", + "consentRequired" : false, + "config" : { + "single" : "false", + "attribute.nameformat" : "Basic", + "attribute.name" : "Role" + } + } ] + }, { + "id" : "2010b133-4bfe-4f5f-8d1a-33b2a7ad2e60", + "name" : "rabbitmq.write:*/*", + "protocol" : "openid-connect", + "attributes" : { + "include.in.token.scope" : "true", + "display.on.consent.screen" : "true" + } + }, { + "id" : "f6e6dd62-22bf-4421-910e-e6070908764c", + "name" : "rabbitmq.tag:administrator", + "protocol" : "openid-connect", + "attributes" : { + "include.in.token.scope" : "true", + "display.on.consent.screen" : "true" + } + } ], + "defaultDefaultClientScopes" : [ "role_list", "profile", "email", "roles", "web-origins", "acr" ], + "defaultOptionalClientScopes" : [ "offline_access", "address", "phone", "microprofile-jwt" ], + "browserSecurityHeaders" : { + "contentSecurityPolicyReportOnly" : "", + "xContentTypeOptions" : "nosniff", + "xRobotsTag" : "none", + "xFrameOptions" : "SAMEORIGIN", + "contentSecurityPolicy" : "frame-src 'self'; frame-ancestors 'self'; object-src 'none';", + "xXSSProtection" : "1; mode=block", + "strictTransportSecurity" : "max-age=31536000; includeSubDomains" + }, + "smtpServer" : { }, + "eventsEnabled" : false, + "eventsListeners" : [ "jboss-logging" ], + "enabledEventTypes" : [ ], + "adminEventsEnabled" : false, + "adminEventsDetailsEnabled" : false, + "identityProviders" : [ ], + "identityProviderMappers" : [ ], + "components" : { + "org.keycloak.services.clientregistration.policy.ClientRegistrationPolicy" : [ { + "id" : "72a90aea-e732-467d-ade9-34e73c993209", + "name" : "Allowed Client Scopes", + "providerId" : "allowed-client-templates", + "subType" : "authenticated", + "subComponents" : { }, + "config" : { + "allow-default-scopes" : [ "true" ] + } + }, { + "id" : "5ed9083b-fdb5-4fc5-97b5-0b31189f8ad2", + "name" : "Max Clients Limit", + "providerId" : "max-clients", + "subType" : "anonymous", + "subComponents" : { }, + "config" : { + "max-clients" : [ "200" ] + } + }, { + "id" : "4f81b0b9-6a42-4128-b7a1-4a814acf5875", + "name" : "Full Scope Disabled", + "providerId" : "scope", + "subType" : "anonymous", + "subComponents" : { }, + "config" : { } + }, { + "id" : "2c06a7de-50eb-439f-b592-6bbda10d6af3", + "name" : "Allowed Protocol Mapper Types", + "providerId" : "allowed-protocol-mappers", + "subType" : "authenticated", + "subComponents" : { }, + "config" : { + "allowed-protocol-mapper-types" : [ "saml-role-list-mapper", "oidc-usermodel-property-mapper", "saml-user-property-mapper", "oidc-usermodel-attribute-mapper", "saml-user-attribute-mapper", "oidc-sha256-pairwise-sub-mapper", "oidc-address-mapper", "oidc-full-name-mapper" ] + } + }, { + "id" : "693f0625-c453-40c0-b38e-80b7b7deaefa", + "name" : "Consent Required", + "providerId" : "consent-required", + "subType" : "anonymous", + "subComponents" : { }, + "config" : { } + }, { + "id" : "7c971805-14f2-4eb0-b2af-90c2db4c2e41", + "name" : "Allowed Client Scopes", + "providerId" : "allowed-client-templates", + "subType" : "anonymous", + "subComponents" : { }, + "config" : { + "allow-default-scopes" : [ "true" ] + } + }, { + "id" : "e4376815-05e5-4675-9b11-6e18d5712849", + "name" : "Allowed Protocol Mapper Types", + "providerId" : "allowed-protocol-mappers", + "subType" : "anonymous", + "subComponents" : { }, + "config" : { + "allowed-protocol-mapper-types" : [ "saml-user-attribute-mapper", "oidc-usermodel-attribute-mapper", "saml-role-list-mapper", "oidc-full-name-mapper", "oidc-usermodel-property-mapper", "oidc-sha256-pairwise-sub-mapper", "saml-user-property-mapper", "oidc-address-mapper" ] + } + }, { + "id" : "bbadf932-a286-4841-be1b-ed845e2131cb", + "name" : "Trusted Hosts", + "providerId" : "trusted-hosts", + "subType" : "anonymous", + "subComponents" : { }, + "config" : { + "host-sending-registration-request-must-match" : [ "true" ], + "client-uris-must-match" : [ "true" ] + } + } ], + "org.keycloak.keys.KeyProvider" : [ { + "id" : "307ede55-7647-498c-b1ba-4be80fb609cc", + "name" : "rsa-generated", + "providerId" : "rsa-generated", + "subComponents" : { }, + "config" : { + "privateKey" : [ "MIIEogIBAAKCAQEAx5RCXJqU+e2646hYCduHBANJvpYN3Uv1gq15LjTlIXYqBCfm4SLUX1y1lOKDoOVl5j/flAgdgF4I9P3G4drc36NuaaocQguu3xWOsG9UZheiDD4wJANC7F6FMdWqSiBySA+EXyQ2zkoUBkNxKecqWhqVmaY9IVyxbQhdXsKQH0hBnvd3NQaem0RXuUeadUK5TGI9VqTe96sN7lLdE+T94n5cubqtBbc14kZ6YZsi1Pa+2xe/ZsDlXgMGAqTxgSy5l6cnxzxE+ndAp+pIR83BxfRFijo0LNKxyiZQ0X+QZ4Y3sTGadLCZzr6R/N/1QQnhOTXmTsVyNdmhQMr6A7lBFwIDAQABAoIBAAP7tYdbnnWOhRheF8O6mes+lY40OHqeNXwyMiT18UzFqvkCQt1jcJGmrAkYrD/d1DbQN4ogz1Xsiok5N2ryj033WRDK0F2RFiBlsb9buXeAKT/NTfCqD//fsxDXjtqD40QE60Nq2Z0sZVHqrquDbZj2xt2WL8omq3Pdot9tSqsVIQMbRIfH+I9+9kQ8Ob7t423I06AFiXJg5h3qjLx1jP6qQWsC4ippY6QmUve/d3PWqSd4GQ4sb2KQKvfT1VU4HPvIQdf+OurXaF/lPR+6XDU3RmA1qY61JS0O0ul+jTUGUHRxtgI51IU16+jcKiAzjWZ53HI9jLODP6gzyn+KC2ECgYEA/2SN4sNHM8aL/i7hdcbq+c+UFKK5AyWjewbMWGC2ZCzEzW/zqn2lmQHZK7hDAuAhvnEh6rvxxFKQw2PXb9N9IGIaAEJmGrJYg5QaLspNs5/4a3+GZh8lfZgwaEBetazrIFSOrVFhYb/pRz6m2x7oKNIpVXXUXdNr0DD4mDAwLqUCgYEAyA272k5rlTXj2IX/MvAtAuD2l7R6DfDirolzNdLVEXKLWpZwqdAuLWhDNGvNtmeAQcIuGFUvHhv5ZdnAwOKjbJsVDmr8vUCegCZJPuEGuOXjqZq+a1a84lhSSyWWiiz/yuIVh2Bnu8TD2Xb3igNFa0ipWga9nfZm7usifVumQAsCgYA/z+kfyrkkt6xM83vECNK3XmVajpn5rlLdr4IpZujLuN/nkNxqMgDJbUvM/7pGoqfrxKq70wAClLq1B2JR/57ZE4n5nJ2UeBFjtwKfxE6L3iGdAn0bURb1/avCsKq5bB5Hsrj/l0DkwqXP9liMkXlikbhgMRPB6cybdVD3/bpcPQKBgFxZIqeC3dzSNKymJokwH8cdbBfZwyIeWbXyU1G8UoksVDHsEnQBWt+xKpzPSvXxz14LslWfNRH0HeurWnRv6rre2BiAwMzoQIKtqdAx9nVyAecwPMi2EJl35f00i8qbPTU9qmyEzz35deM0LM7z9Z6xuyOIyw1ZSmjt+Ezf+t3DAoGAB7rT8E7lc036+L1RzKF+T8XwXReHcDkXhKt5V/RQBzRL5GY2mqQQ1rb037KlGXRHAwPkQqmYpMhY9ccRF5UqA05IT/KApvc36m7DAXPaNy1CwZHrr0l3rR6fhpUvtgrt1uyCXvaLJxUAd/5MTw2ffqOsMSxiuRCrpUC+dxCXtG8=" ], + "keyUse" : [ "SIG" ], + "certificate" : [ "MIIClzCCAX8CBgGAmWyoEzANBgkqhkiG9w0BAQsFADAPMQ0wCwYDVQQDDAR0ZXN0MB4XDTIyMDUwNjEyNDkzNloXDTMyMDUwNjEyNTExNlowDzENMAsGA1UEAwwEdGVzdDCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAMeUQlyalPntuuOoWAnbhwQDSb6WDd1L9YKteS405SF2KgQn5uEi1F9ctZTig6DlZeY/35QIHYBeCPT9xuHa3N+jbmmqHEILrt8VjrBvVGYXogw+MCQDQuxehTHVqkogckgPhF8kNs5KFAZDcSnnKloalZmmPSFcsW0IXV7CkB9IQZ73dzUGnptEV7lHmnVCuUxiPVak3verDe5S3RPk/eJ+XLm6rQW3NeJGemGbItT2vtsXv2bA5V4DBgKk8YEsuZenJ8c8RPp3QKfqSEfNwcX0RYo6NCzSscomUNF/kGeGN7ExmnSwmc6+kfzf9UEJ4Tk15k7FcjXZoUDK+gO5QRcCAwEAATANBgkqhkiG9w0BAQsFAAOCAQEAp2oy7aEJWJ1/BBUlFArzHqXZ/aYzuRUdyCTQn6+tjRsB9IrPwRI4p9sN4mCl0EUDvureNxCiY9XBHWmEteb5n952XWcSWi6tzAj5BQL4LHIFFzMLHr1+HYpmMwYHFgmR9MhoKFINEEGpOvCRokurEN2uU/tNcBX5HGnEWRc5ZNPRwQcJggnHHWxAmuNKIf73pPAECcffrTJO6cePt++TaU2j2qA6S5A/p/0Za10EtKcNeL1EIvwuFxewBjJQjXEqvmN4VlcVadj+pQ7AwtzujdtCuHvO/9zxRo5xq9KFl/VVMk4fUvwKA+vjcMwjDn2qXZwKmslX6YkV18gbmnSCOg==" ], + "priority" : [ "100" ] + } + }, { + "id" : "1ae80491-123b-4d2d-8018-7ceb6971d07d", + "name" : "aes-generated", + "providerId" : "aes-generated", + "subComponents" : { }, + "config" : { + "kid" : [ "e43bf5a9-a6eb-46e6-a529-2174e96536fd" ], + "secret" : [ "2cQajcd-396pH2TSx6TC-Q" ], + "priority" : [ "100" ] + } + }, { + "id" : "b7b404ce-f1db-4fba-9037-d43bbd5fa584", + "name" : "hmac-generated", + "providerId" : "hmac-generated", + "subComponents" : { }, + "config" : { + "kid" : [ "2a9eb5b7-3df3-4fe8-980b-93808456c392" ], + "secret" : [ "ghLjXQyrSvgpx1wi2-YYgGSOUH2-FteA8BN_FujK8wywdQ-LiMd-WDfnw-F6GexWHqNrv95VRjHzvlojRbDluQ" ], + "priority" : [ "100" ], + "algorithm" : [ "HS256" ] + } + }, { + "id" : "f7ed445f-b2bd-4cbd-9985-eba8b36ed733", + "name" : "rsa-enc-generated", + "providerId" : "rsa-enc-generated", + "subComponents" : { }, + "config" : { + "privateKey" : [ "MIIEowIBAAKCAQEAjn/EV027lKv/NvYuCxCqUrAsVKTHpKLyFZCeJfrMcGn9XDTjMMYVx5/2wguPJFpX+nUzFSCjGuejRWamVFOoHiaXB+mrdPoAZbBNrgdeqwb2+7+G0iqsmU+Lfi/pxsubTTLZIZB1PBfV/4DmWz3vRU+uapiHo5pn+h0mbeOCLltuZmygiKlkzpTLfZxdmSXMqCwZv9J6Zdvrpio88Ca2F1dD4w3bPWvcbLmftAkpByucyHlo0v3jLPbq9LQF7fRD3WqHDGgQjLFJJKvN16tTnVkLWzFE4tumsQirCp8MoAuC3fKa5X5AKjZRarcrml9bmnFId+VGs764FY8THVuqtQIDAQABAoIBAC2FBOw6iYxRcSJWe5jPRwlI+7BCPwAJiTl4S/yn4/yY7vUwc86VyElPgRy1YpFjRq1cGOhL1651hkNaKhHP82lGPIKrkzi3z76sFfRcqFIL1IPQx7mFJkOHFHFHzu7RBZgggRnmsnxyxMpsm6lT3IYQkQ++D74kScis3STHQ0OZOVGkx9Sbznx1+i2rc7QUkWy8b7G8/ByVJsAu2SBLfbQ4wFAhJKtr+uDP6Tt2Cyn4GpzE68B5cA7htJI49uVvbGTBLInH5PXejXKyLfEJjbH2brx9hVStmaaV/sD1hroZ0sjhM54MamctPq2o0H33S2WFZJaUXwDWHzOSyUmY5SECgYEAxTJmhuCYz/P2AZP4DGDNwFHLar1K719m+RxUu0ndIDKeGhBtX3oSQXaKQS+JM+dmbF3M7ub18OgCHF+raPXAE6en1tdh5ZQJbRQEX9dnaXVpjCPFciZMgP0eJ1CpMuKo7qb3IXv7YtcN6pFjMNchz3MyVqbNBFN8ocQWoe4VJu0CgYEAuP3Z45ce6XI+7274yTXMEHqmCZU8krKanmppgIj39iJGbkEBo9QSB32XhG7dU0tWJhj4QtQcMKFCMcqckw9Jetyb68J2vreCVti9CqSrSLL4VFhMzVdoRPflzXB29gdJTe4TxBAiSryVICVblW3giM4UWhzNAuJOC8f5r1xrkekCgYBmKyXJreYeoBSOXr6+kw0nHnnZFLgVa4Vrfc08uBlUTEVz1Z0FQbbhqewZt+pLNRHxBWxfPtSf+2TUlJC3sdPRmySvgCodi2SS2jMmAPF4RzfnPsVWzhcHIZ2U2wq+7YZ/F4ylEZp+bFOue6M7s8q1s8aZ9JP2MNc67OCZB0R4RQKBgQC3YfZIVfuvwbA/3ItFs03KnDrSTx2P8vuxxJ03bRAZ8BpPm6OLi2QgBtFX2CsRMiKBe8lHPkt/rawX/dk/My1NXTo3+TuLjhDoFM05qsmdNMVVn37rJBXaIMCu6ikTdV+moDb56mCEI/PUvRPPyu+FznyAZAKbNEnYBfIvc3ezWQKBgEIw8dPvzmmbGvcb1K56VlyKTcejY0tPk8spRd+BKpi+VXlhV+1xLVmyB6lzveFm+0Da2arFGxFKgi/PfT5mXfheGyX2dxMbK5cMkYg7R+pHBbm/0XXAdQnaGZ4wz0O1R/DC8t0ZDp4wSlb3fVJpQoEKkqMY9bK2H4DvToOvLFck" ], + "keyUse" : [ "ENC" ], + "certificate" : [ "MIIClzCCAX8CBgGAmWypGDANBgkqhkiG9w0BAQsFADAPMQ0wCwYDVQQDDAR0ZXN0MB4XDTIyMDUwNjEyNDkzNloXDTMyMDUwNjEyNTExNlowDzENMAsGA1UEAwwEdGVzdDCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAI5/xFdNu5Sr/zb2LgsQqlKwLFSkx6Si8hWQniX6zHBp/Vw04zDGFcef9sILjyRaV/p1MxUgoxrno0VmplRTqB4mlwfpq3T6AGWwTa4HXqsG9vu/htIqrJlPi34v6cbLm00y2SGQdTwX1f+A5ls970VPrmqYh6OaZ/odJm3jgi5bbmZsoIipZM6Uy32cXZklzKgsGb/SemXb66YqPPAmthdXQ+MN2z1r3Gy5n7QJKQcrnMh5aNL94yz26vS0Be30Q91qhwxoEIyxSSSrzderU51ZC1sxROLbprEIqwqfDKALgt3ymuV+QCo2UWq3K5pfW5pxSHflRrO+uBWPEx1bqrUCAwEAATANBgkqhkiG9w0BAQsFAAOCAQEAThu6AxY4bqRbl4RtPH5Xcm033xwLssXmVXh01NuQUfowtkHi77lGUWXBmvwY6UIKC4D4eYrGVYVRTjHUEknmLrxSzUKi4g38kax4pBfwWWstWyVyo89dl7hA5ZlzdZ+SFB4HasGcXdhVFG2dwVvx6lnfxBIWZkgy5GAtIOpK4oIJOIutTiR1yuku4a9zkk6yumsxTKivAs1UMsvQXzcFSUDIrdj0vfdCAB6SvjpYvf8d0wO31bb+t3vWblv29RNV4qbuA/CZkrvWZZXZ+bal0qZd06Z+Hbc4iBgPSHs/HjyAJ8xac3ljg0IWolvZxRkFBR4VSB3pgCUKxf3V4mgbPw==" ], + "priority" : [ "100" ], + "algorithm" : [ "RSA-OAEP" ] + } + }, { + "id" : "66a592ec-8657-4f53-8870-1e1693ff266c", + "name" : "rsa", + "providerId" : "rsa", + "subComponents" : { }, + "config" : { + "privateKey" : [ "MIIEpAIBAAKCAQEA2dP+vRn+Kj+S/oGd49kq6+CKNAduCC1raLfTH7B3qjmZYm45 yDl+XmgK9CNmHXkho9qvmhdksdzDVsdeDlhKIdcIWadhqDzdtn1hj/22iUwrhH0b d475hlKcsiZ+oy/sdgGgAzvmmTQmdMqEXqV2B9q9KFBmo4Ahh/6+d4wM1rH9kxl0 RvMAKLe+daoIHIjok8hCO4cKQQEw/ErBe4SF2cr3wQwCfF1qVu4eAVNVfxfy/uEv G3Q7x005P3TcK+QcYgJxav3lictSi5dyWLgGQAvkknWitpRK8KVLypEj5WKej6CF 8nq30utn15FQg0JkHoqzwiCqqeen8GIPteI7VwIDAQABAoIBAFsB5FszYepa11o3 4zSPxgv4qyUjuYf3GfoNW0rRGp3nJLtoHAIYa0CcLX9kzsQfmLtxoY46mdppxr8Z 2qUZpBdRVO7ILNfyXhthdQKI2NuyFDhtYK1p8bx6BXe095HMcvm2ohjXzPdTP4Hq HrXAYXjUndUbClbjMJ82AnPF8pM70kBq7g733UqkdfrMuv6/d95Jiyw4cC7dGsI3 Ruz9DGhiAyCBtQ0tUB+6Kqn5DChSB+ccfMJjr6GnCVYmERxEQ5DJCTIX8am8C6KX mAxUwHMTsEGBU6GzhcUgAwUFEK3I9RptdlRFp7F8E/P0LxmPkFdgaBNUhrdnB7Y4 01n1R1kCgYEA/huFJgwVWSBSK/XIouFuQrxZOI9JbBbdmpFT7SBGCdFg26Or9y7j +N5HE7yuoZ9PkBh17zzosZdsJhGocRYvO0LSq8cXvKXKCwn2fTMM7uJ/oQe68sxG cF/fC0M/8LvRESWShH920rrERu0s161RuasdOPre0aXu7ZQzkQ68O6MCgYEA23NO DHKNblBOdFEWsvotLqV8DrIbQ4le7sSgQr56/bdn9GScZk2JU0f+pqzpiGUy9bIt 6uujvt5ar0IvpIQVdjf3dbp6Fy+Dwhd4yTR4dMdDECest7jL++/21x8Y0ywFhBIK yEd+QxpOLXP6qaSKTGxL2rnTXRjl8/g629xQPL0CgYEAkNNOh+jLIgjxzGxA9dRV 62M91qaTyi8eDkJV+wgx4taaxZP7Jt5qwCSvjegz/5m01wOZ88hbNxx+XxQhVJK4 SKZFO/I07Sfwh2oeOi0maeBdrYGiY09ZtiJuFRU3FBV3irZHU4zyRBh+VY5HyITX 12JXPWp+JC7WhkG5QiuLzNECgYEA15OBzICLpx6Es4clAVT6JaSzJcyZM9MyyuOl e2ubbrpJCK/9ZBIvIPzMj/e0wiSH1wzeRrSM+ud7tkcSfk6ytptsIN67KSOoD3b3 VNCStEU7ABe5eBG1cRzeI52MyYWpNYBzzyNMSacBvWz9hMD6ivCn44pAtGfNHclw KKNYvxECgYBOamf25md9Jy6rtQsJVEJWw+8sB4lBlKEEadc5qekR7ZQ0hwj8CnTm WOo856ynI28Sog62iw8F/do/z0B29RuGuxw+prkBkn3lg/VQXEitzqcYvota6osa 8XSfaPiTyQwWpzbFNZzzemlTsIDiF3UqwkHvWaMYPDf4Ng3cokPPxw==" ], + "certificate" : [ "MIICmDCCAYACCQC7YJWOo6LVaDANBgkqhkiG9w0BAQsFADAOMQwwCgYDVQQDDANq d3QwHhcNMjIwNTA2MTQzNjQ5WhcNMjIwNjA1MTQzNjQ5WjAOMQwwCgYDVQQDDANq d3QwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDZ0/69Gf4qP5L+gZ3j 2Srr4Io0B24ILWtot9MfsHeqOZlibjnIOX5eaAr0I2YdeSGj2q+aF2Sx3MNWx14O WEoh1whZp2GoPN22fWGP/baJTCuEfRt3jvmGUpyyJn6jL+x2AaADO+aZNCZ0yoRe pXYH2r0oUGajgCGH/r53jAzWsf2TGXRG8wAot751qggciOiTyEI7hwpBATD8SsF7 hIXZyvfBDAJ8XWpW7h4BU1V/F/L+4S8bdDvHTTk/dNwr5BxiAnFq/eWJy1KLl3JY uAZAC+SSdaK2lErwpUvKkSPlYp6PoIXyerfS62fXkVCDQmQeirPCIKqp56fwYg+1 4jtXAgMBAAEwDQYJKoZIhvcNAQELBQADggEBACJlWtWnQqepYiFCijVgy/eM5KL0 rFZOZ6HNefoJTrYY1QYZrWxRz3M4u9JpUy4fBvGHxElBElcr3fXLXDytH9EwMJm1 E5x3o3qkQyWdXYGW6ZF58dklcJTdejOxEO373qpywVwbCFGiuIt7s5v4v+r2HOg3 D4elb2bqxmRim04xIkVZufKo+h6a8dBb5JEU3UaxyGDBR0IdyjhyBo1+HhH+RqZs xQhQ7DhlIGWUYZNCu13fb1GNSMiNqspKnMpFdQ4Bfpsb7vOeEK+aqJjCKcYbuGa6 BiwBjbKYyEF5r01Tob50dcVPfIGOqO0lQ3IsV31n9LSoAAtaVqioPK1rvDo=" ], + "active" : [ "true" ], + "priority" : [ "101" ], + "enabled" : [ "true" ], + "algorithm" : [ "RS256" ] + } + } ] + }, + "internationalizationEnabled" : false, + "supportedLocales" : [ ], + "authenticationFlows" : [ { + "id" : "d3892413-9ee0-444f-ad3c-1bdbc2c69f2d", + "alias" : "Account verification options", + "description" : "Method with which to verity the existing account", + "providerId" : "basic-flow", + "topLevel" : false, + "builtIn" : true, + "authenticationExecutions" : [ { + "authenticator" : "idp-email-verification", + "authenticatorFlow" : false, + "requirement" : "ALTERNATIVE", + "priority" : 10, + "autheticatorFlow" : false, + "userSetupAllowed" : false + }, { + "authenticatorFlow" : true, + "requirement" : "ALTERNATIVE", + "priority" : 20, + "autheticatorFlow" : true, + "flowAlias" : "Verify Existing Account by Re-authentication", + "userSetupAllowed" : false + } ] + }, { + "id" : "9bfd53e9-fdce-4fdc-8f94-285f2c494ffb", + "alias" : "Authentication Options", + "description" : "Authentication options.", + "providerId" : "basic-flow", + "topLevel" : false, + "builtIn" : true, + "authenticationExecutions" : [ { + "authenticator" : "basic-auth", + "authenticatorFlow" : false, + "requirement" : "REQUIRED", + "priority" : 10, + "autheticatorFlow" : false, + "userSetupAllowed" : false + }, { + "authenticator" : "basic-auth-otp", + "authenticatorFlow" : false, + "requirement" : "DISABLED", + "priority" : 20, + "autheticatorFlow" : false, + "userSetupAllowed" : false + }, { + "authenticator" : "auth-spnego", + "authenticatorFlow" : false, + "requirement" : "DISABLED", + "priority" : 30, + "autheticatorFlow" : false, + "userSetupAllowed" : false + } ] + }, { + "id" : "6910e2ac-cbef-4ed9-a2dd-9faa7c13943e", + "alias" : "Browser - Conditional OTP", + "description" : "Flow to determine if the OTP is required for the authentication", + "providerId" : "basic-flow", + "topLevel" : false, + "builtIn" : true, + "authenticationExecutions" : [ { + "authenticator" : "conditional-user-configured", + "authenticatorFlow" : false, + "requirement" : "REQUIRED", + "priority" : 10, + "autheticatorFlow" : false, + "userSetupAllowed" : false + }, { + "authenticator" : "auth-otp-form", + "authenticatorFlow" : false, + "requirement" : "REQUIRED", + "priority" : 20, + "autheticatorFlow" : false, + "userSetupAllowed" : false + } ] + }, { + "id" : "d9923e95-1c00-491f-b088-aad288614752", + "alias" : "Direct Grant - Conditional OTP", + "description" : "Flow to determine if the OTP is required for the authentication", + "providerId" : "basic-flow", + "topLevel" : false, + "builtIn" : true, + "authenticationExecutions" : [ { + "authenticator" : "conditional-user-configured", + "authenticatorFlow" : false, + "requirement" : "REQUIRED", + "priority" : 10, + "autheticatorFlow" : false, + "userSetupAllowed" : false + }, { + "authenticator" : "direct-grant-validate-otp", + "authenticatorFlow" : false, + "requirement" : "REQUIRED", + "priority" : 20, + "autheticatorFlow" : false, + "userSetupAllowed" : false + } ] + }, { + "id" : "9f9fb926-3d32-42c7-863d-77c7b81b5728", + "alias" : "First broker login - Conditional OTP", + "description" : "Flow to determine if the OTP is required for the authentication", + "providerId" : "basic-flow", + "topLevel" : false, + "builtIn" : true, + "authenticationExecutions" : [ { + "authenticator" : "conditional-user-configured", + "authenticatorFlow" : false, + "requirement" : "REQUIRED", + "priority" : 10, + "autheticatorFlow" : false, + "userSetupAllowed" : false + }, { + "authenticator" : "auth-otp-form", + "authenticatorFlow" : false, + "requirement" : "REQUIRED", + "priority" : 20, + "autheticatorFlow" : false, + "userSetupAllowed" : false + } ] + }, { + "id" : "ae2a6d0b-505c-4981-837a-10a4eba16598", + "alias" : "Handle Existing Account", + "description" : "Handle what to do if there is existing account with same email/username like authenticated identity provider", + "providerId" : "basic-flow", + "topLevel" : false, + "builtIn" : true, + "authenticationExecutions" : [ { + "authenticator" : "idp-confirm-link", + "authenticatorFlow" : false, + "requirement" : "REQUIRED", + "priority" : 10, + "autheticatorFlow" : false, + "userSetupAllowed" : false + }, { + "authenticatorFlow" : true, + "requirement" : "REQUIRED", + "priority" : 20, + "autheticatorFlow" : true, + "flowAlias" : "Account verification options", + "userSetupAllowed" : false + } ] + }, { + "id" : "98987611-236b-4bd8-abfc-d6784b2a2d4e", + "alias" : "Reset - Conditional OTP", + "description" : "Flow to determine if the OTP should be reset or not. Set to REQUIRED to force.", + "providerId" : "basic-flow", + "topLevel" : false, + "builtIn" : true, + "authenticationExecutions" : [ { + "authenticator" : "conditional-user-configured", + "authenticatorFlow" : false, + "requirement" : "REQUIRED", + "priority" : 10, + "autheticatorFlow" : false, + "userSetupAllowed" : false + }, { + "authenticator" : "reset-otp", + "authenticatorFlow" : false, + "requirement" : "REQUIRED", + "priority" : 20, + "autheticatorFlow" : false, + "userSetupAllowed" : false + } ] + }, { + "id" : "5dd18602-1cb5-4198-807f-551e22f1e36a", + "alias" : "User creation or linking", + "description" : "Flow for the existing/non-existing user alternatives", + "providerId" : "basic-flow", + "topLevel" : false, + "builtIn" : true, + "authenticationExecutions" : [ { + "authenticatorConfig" : "create unique user config", + "authenticator" : "idp-create-user-if-unique", + "authenticatorFlow" : false, + "requirement" : "ALTERNATIVE", + "priority" : 10, + "autheticatorFlow" : false, + "userSetupAllowed" : false + }, { + "authenticatorFlow" : true, + "requirement" : "ALTERNATIVE", + "priority" : 20, + "autheticatorFlow" : true, + "flowAlias" : "Handle Existing Account", + "userSetupAllowed" : false + } ] + }, { + "id" : "278611dd-0338-4a56-97c0-da69fa64bc72", + "alias" : "Verify Existing Account by Re-authentication", + "description" : "Reauthentication of existing account", + "providerId" : "basic-flow", + "topLevel" : false, + "builtIn" : true, + "authenticationExecutions" : [ { + "authenticator" : "idp-username-password-form", + "authenticatorFlow" : false, + "requirement" : "REQUIRED", + "priority" : 10, + "autheticatorFlow" : false, + "userSetupAllowed" : false + }, { + "authenticatorFlow" : true, + "requirement" : "CONDITIONAL", + "priority" : 20, + "autheticatorFlow" : true, + "flowAlias" : "First broker login - Conditional OTP", + "userSetupAllowed" : false + } ] + }, { + "id" : "af4c356b-f2bb-4712-bfe1-b4127a2c378c", + "alias" : "browser", + "description" : "browser based authentication", + "providerId" : "basic-flow", + "topLevel" : true, + "builtIn" : true, + "authenticationExecutions" : [ { + "authenticator" : "auth-cookie", + "authenticatorFlow" : false, + "requirement" : "ALTERNATIVE", + "priority" : 10, + "autheticatorFlow" : false, + "userSetupAllowed" : false + }, { + "authenticator" : "auth-spnego", + "authenticatorFlow" : false, + "requirement" : "DISABLED", + "priority" : 20, + "autheticatorFlow" : false, + "userSetupAllowed" : false + }, { + "authenticator" : "identity-provider-redirector", + "authenticatorFlow" : false, + "requirement" : "ALTERNATIVE", + "priority" : 25, + "autheticatorFlow" : false, + "userSetupAllowed" : false + }, { + "authenticatorFlow" : true, + "requirement" : "ALTERNATIVE", + "priority" : 30, + "autheticatorFlow" : true, + "flowAlias" : "forms", + "userSetupAllowed" : false + } ] + }, { + "id" : "2cc7bcd2-9549-44ed-9f44-a4f7a7710b81", + "alias" : "clients", + "description" : "Base authentication for clients", + "providerId" : "client-flow", + "topLevel" : true, + "builtIn" : true, + "authenticationExecutions" : [ { + "authenticator" : "client-secret", + "authenticatorFlow" : false, + "requirement" : "ALTERNATIVE", + "priority" : 10, + "autheticatorFlow" : false, + "userSetupAllowed" : false + }, { + "authenticator" : "client-jwt", + "authenticatorFlow" : false, + "requirement" : "ALTERNATIVE", + "priority" : 20, + "autheticatorFlow" : false, + "userSetupAllowed" : false + }, { + "authenticator" : "client-secret-jwt", + "authenticatorFlow" : false, + "requirement" : "ALTERNATIVE", + "priority" : 30, + "autheticatorFlow" : false, + "userSetupAllowed" : false + }, { + "authenticator" : "client-x509", + "authenticatorFlow" : false, + "requirement" : "ALTERNATIVE", + "priority" : 40, + "autheticatorFlow" : false, + "userSetupAllowed" : false + } ] + }, { + "id" : "a3634dc8-81e8-4834-941e-e61cb8131c13", + "alias" : "direct grant", + "description" : "OpenID Connect Resource Owner Grant", + "providerId" : "basic-flow", + "topLevel" : true, + "builtIn" : true, + "authenticationExecutions" : [ { + "authenticator" : "direct-grant-validate-username", + "authenticatorFlow" : false, + "requirement" : "REQUIRED", + "priority" : 10, + "autheticatorFlow" : false, + "userSetupAllowed" : false + }, { + "authenticator" : "direct-grant-validate-password", + "authenticatorFlow" : false, + "requirement" : "REQUIRED", + "priority" : 20, + "autheticatorFlow" : false, + "userSetupAllowed" : false + }, { + "authenticatorFlow" : true, + "requirement" : "CONDITIONAL", + "priority" : 30, + "autheticatorFlow" : true, + "flowAlias" : "Direct Grant - Conditional OTP", + "userSetupAllowed" : false + } ] + }, { + "id" : "ea8179dc-e31b-49b1-81da-025ad9c71f47", + "alias" : "docker auth", + "description" : "Used by Docker clients to authenticate against the IDP", + "providerId" : "basic-flow", + "topLevel" : true, + "builtIn" : true, + "authenticationExecutions" : [ { + "authenticator" : "docker-http-basic-authenticator", + "authenticatorFlow" : false, + "requirement" : "REQUIRED", + "priority" : 10, + "autheticatorFlow" : false, + "userSetupAllowed" : false + } ] + }, { + "id" : "65280dd9-8119-4880-ab88-75afdd4cd509", + "alias" : "first broker login", + "description" : "Actions taken after first broker login with identity provider account, which is not yet linked to any Keycloak account", + "providerId" : "basic-flow", + "topLevel" : true, + "builtIn" : true, + "authenticationExecutions" : [ { + "authenticatorConfig" : "review profile config", + "authenticator" : "idp-review-profile", + "authenticatorFlow" : false, + "requirement" : "REQUIRED", + "priority" : 10, + "autheticatorFlow" : false, + "userSetupAllowed" : false + }, { + "authenticatorFlow" : true, + "requirement" : "REQUIRED", + "priority" : 20, + "autheticatorFlow" : true, + "flowAlias" : "User creation or linking", + "userSetupAllowed" : false + } ] + }, { + "id" : "415ae0cc-833a-4292-8dd6-aef970c78d35", + "alias" : "forms", + "description" : "Username, password, otp and other auth forms.", + "providerId" : "basic-flow", + "topLevel" : false, + "builtIn" : true, + "authenticationExecutions" : [ { + "authenticator" : "auth-username-password-form", + "authenticatorFlow" : false, + "requirement" : "REQUIRED", + "priority" : 10, + "autheticatorFlow" : false, + "userSetupAllowed" : false + }, { + "authenticatorFlow" : true, + "requirement" : "CONDITIONAL", + "priority" : 20, + "autheticatorFlow" : true, + "flowAlias" : "Browser - Conditional OTP", + "userSetupAllowed" : false + } ] + }, { + "id" : "1b0ed524-0415-4a6c-a381-99b8fb5e63f9", + "alias" : "http challenge", + "description" : "An authentication flow based on challenge-response HTTP Authentication Schemes", + "providerId" : "basic-flow", + "topLevel" : true, + "builtIn" : true, + "authenticationExecutions" : [ { + "authenticator" : "no-cookie-redirect", + "authenticatorFlow" : false, + "requirement" : "REQUIRED", + "priority" : 10, + "autheticatorFlow" : false, + "userSetupAllowed" : false + }, { + "authenticatorFlow" : true, + "requirement" : "REQUIRED", + "priority" : 20, + "autheticatorFlow" : true, + "flowAlias" : "Authentication Options", + "userSetupAllowed" : false + } ] + }, { + "id" : "7d36ca32-300c-4fe2-ba56-01f1d617cba9", + "alias" : "registration", + "description" : "registration flow", + "providerId" : "basic-flow", + "topLevel" : true, + "builtIn" : true, + "authenticationExecutions" : [ { + "authenticator" : "registration-page-form", + "authenticatorFlow" : true, + "requirement" : "REQUIRED", + "priority" : 10, + "autheticatorFlow" : true, + "flowAlias" : "registration form", + "userSetupAllowed" : false + } ] + }, { + "id" : "ec9f9e5c-f4dd-4e0a-bbcd-b9f13f10a5fc", + "alias" : "registration form", + "description" : "registration form", + "providerId" : "form-flow", + "topLevel" : false, + "builtIn" : true, + "authenticationExecutions" : [ { + "authenticator" : "registration-user-creation", + "authenticatorFlow" : false, + "requirement" : "REQUIRED", + "priority" : 20, + "autheticatorFlow" : false, + "userSetupAllowed" : false + }, { + "authenticator" : "registration-profile-action", + "authenticatorFlow" : false, + "requirement" : "REQUIRED", + "priority" : 40, + "autheticatorFlow" : false, + "userSetupAllowed" : false + }, { + "authenticator" : "registration-password-action", + "authenticatorFlow" : false, + "requirement" : "REQUIRED", + "priority" : 50, + "autheticatorFlow" : false, + "userSetupAllowed" : false + }, { + "authenticator" : "registration-recaptcha-action", + "authenticatorFlow" : false, + "requirement" : "DISABLED", + "priority" : 60, + "autheticatorFlow" : false, + "userSetupAllowed" : false + } ] + }, { + "id" : "18d8b8ff-e773-4035-a0b9-5875e9e1bd1b", + "alias" : "reset credentials", + "description" : "Reset credentials for a user if they forgot their password or something", + "providerId" : "basic-flow", + "topLevel" : true, + "builtIn" : true, + "authenticationExecutions" : [ { + "authenticator" : "reset-credentials-choose-user", + "authenticatorFlow" : false, + "requirement" : "REQUIRED", + "priority" : 10, + "autheticatorFlow" : false, + "userSetupAllowed" : false + }, { + "authenticator" : "reset-credential-email", + "authenticatorFlow" : false, + "requirement" : "REQUIRED", + "priority" : 20, + "autheticatorFlow" : false, + "userSetupAllowed" : false + }, { + "authenticator" : "reset-password", + "authenticatorFlow" : false, + "requirement" : "REQUIRED", + "priority" : 30, + "autheticatorFlow" : false, + "userSetupAllowed" : false + }, { + "authenticatorFlow" : true, + "requirement" : "CONDITIONAL", + "priority" : 40, + "autheticatorFlow" : true, + "flowAlias" : "Reset - Conditional OTP", + "userSetupAllowed" : false + } ] + }, { + "id" : "1197b3a4-eee8-42c1-a824-db7c960dcc3f", + "alias" : "saml ecp", + "description" : "SAML ECP Profile Authentication Flow", + "providerId" : "basic-flow", + "topLevel" : true, + "builtIn" : true, + "authenticationExecutions" : [ { + "authenticator" : "http-basic-authenticator", + "authenticatorFlow" : false, + "requirement" : "REQUIRED", + "priority" : 10, + "autheticatorFlow" : false, + "userSetupAllowed" : false + } ] + } ], + "authenticatorConfig" : [ { + "id" : "be159ad0-1779-4672-a4da-e92562f48263", + "alias" : "create unique user config", + "config" : { + "require.password.update.after.registration" : "false" + } + }, { + "id" : "8bf31c93-626e-4c3f-9f2d-2767ec16f08c", + "alias" : "review profile config", + "config" : { + "update.profile.on.first.login" : "missing" + } + } ], + "requiredActions" : [ { + "alias" : "CONFIGURE_TOTP", + "name" : "Configure OTP", + "providerId" : "CONFIGURE_TOTP", + "enabled" : true, + "defaultAction" : false, + "priority" : 10, + "config" : { } + }, { + "alias" : "terms_and_conditions", + "name" : "Terms and Conditions", + "providerId" : "terms_and_conditions", + "enabled" : false, + "defaultAction" : false, + "priority" : 20, + "config" : { } + }, { + "alias" : "UPDATE_PASSWORD", + "name" : "Update Password", + "providerId" : "UPDATE_PASSWORD", + "enabled" : true, + "defaultAction" : false, + "priority" : 30, + "config" : { } + }, { + "alias" : "UPDATE_PROFILE", + "name" : "Update Profile", + "providerId" : "UPDATE_PROFILE", + "enabled" : true, + "defaultAction" : false, + "priority" : 40, + "config" : { } + }, { + "alias" : "VERIFY_EMAIL", + "name" : "Verify Email", + "providerId" : "VERIFY_EMAIL", + "enabled" : true, + "defaultAction" : false, + "priority" : 50, + "config" : { } + }, { + "alias" : "delete_account", + "name" : "Delete Account", + "providerId" : "delete_account", + "enabled" : false, + "defaultAction" : false, + "priority" : 60, + "config" : { } + }, { + "alias" : "update_user_locale", + "name" : "Update User Locale", + "providerId" : "update_user_locale", + "enabled" : true, + "defaultAction" : false, + "priority" : 1000, + "config" : { } + } ], + "browserFlow" : "browser", + "registrationFlow" : "registration", + "directGrantFlow" : "direct grant", + "resetCredentialsFlow" : "reset credentials", + "clientAuthenticationFlow" : "clients", + "dockerAuthenticationFlow" : "docker auth", + "attributes" : { + "cibaBackchannelTokenDeliveryMode" : "poll", + "cibaExpiresIn" : "120", + "cibaAuthRequestedUserHint" : "login_hint", + "oauth2DeviceCodeLifespan" : "600", + "clientOfflineSessionMaxLifespan" : "0", + "oauth2DevicePollingInterval" : "5", + "clientSessionIdleTimeout" : "0", + "parRequestUriLifespan" : "60", + "clientSessionMaxLifespan" : "0", + "clientOfflineSessionIdleTimeout" : "0", + "cibaInterval" : "5", + "realmReusableOtpCode" : "false" + }, + "keycloakVersion" : "20.0.5", + "userManagedAccessAllowed" : false, + "clientProfiles" : { + "profiles" : [ ] + }, + "clientPolicies" : { + "policies" : [ ] + } +} diff --git a/deps/rabbitmq_management/selenium/test/multi-oauth/devkeycloak/server_devkeycloak.p12 b/deps/rabbitmq_management/selenium/test/multi-oauth/devkeycloak/server_devkeycloak.p12 new file mode 100644 index 000000000000..015ebc99a18b Binary files /dev/null and b/deps/rabbitmq_management/selenium/test/multi-oauth/devkeycloak/server_devkeycloak.p12 differ diff --git a/deps/rabbitmq_management/selenium/test/multi-oauth/devkeycloak/server_devkeycloak_certificate.pem b/deps/rabbitmq_management/selenium/test/multi-oauth/devkeycloak/server_devkeycloak_certificate.pem new file mode 100644 index 000000000000..7c11ae263701 --- /dev/null +++ b/deps/rabbitmq_management/selenium/test/multi-oauth/devkeycloak/server_devkeycloak_certificate.pem @@ -0,0 +1,23 @@ +-----BEGIN CERTIFICATE----- +MIIDzDCCArSgAwIBAgIBCzANBgkqhkiG9w0BAQsFADBMMTswOQYDVQQDDDJUTFNH +ZW5TZWxmU2lnbmVkdFJvb3RDQSAyMDIzLTExLTE2VDEyOjI0OjQ3LjI4OTA5MzEN +MAsGA1UEBwwEJCQkJDAeFw0yNDAxMTMxMTU4MzRaFw0zNDAxMTAxMTU4MzRaMCcx +FDASBgNVBAMMC2RldmtleWNsb2FrMQ8wDQYDVQQKDAZzZXJ2ZXIwggEiMA0GCSqG +SIb3DQEBAQUAA4IBDwAwggEKAoIBAQCt5/wvIdHr5FJvIWNiwoaict0G/pkHnUYs +VIHjLjeR59q5qAojrKRqs32D9eeKqikHv/xTp9dpOa8qwpho11bSs/HgnXZKie1F +fQG/8arfipHkODn10VoNtZ0Revu5on9h67kkgyjCk4WKG34o7ye4qacJhOAGV8LU +HJXyA1kPFCtZzvcobYwbpPENpV7MWfxqa9gIV6IZln7EttzcraDfsYIr44uLbfeC +2BcvJZP+JFXmTBZz0fbMfLsv6z5KF0pH4XKInINan9ajGLnE5SbvqMLHQ4KANwMP +Q8OLtU4is6wNAgAQmCYphmdd5k3m2fwXN+YeMohBGjdKz/cxnx7LAgMBAAGjgd0w +gdowCQYDVR0TBAIwADALBgNVHQ8EBAMCBaAwHQYDVR0lBBYwFAYIKwYBBQUHAwEG +CCsGAQUFBwMCMC4GA1UdEQQnMCWCC2RldmtleWNsb2FrggtkZXZrZXljbG9ha4IJ +bG9jYWxob3N0MB0GA1UdDgQWBBTaLvUjHU7ggs6aF7YUycglgDeQCjAfBgNVHSME +GDAWgBS2Icxjr1ucGCIxikeSG9igJf558jAxBgNVHR8EKjAoMCagJKAihiBodHRw +Oi8vY3JsLXNlcnZlcjo4MDAwL2Jhc2ljLmNybDANBgkqhkiG9w0BAQsFAAOCAQEA +i1wni0pi4RY1Txu3Lef5vd9FzUqKYBi2bcrMVliAmmCjKriwSP0/zd9LgoyC57/3 +WUZ3cLMSdmMc8go1QPEBmkwjtkw0HACN+XXOmocRimewmBhCQ5Lh90xuFJlk7snN +FbwQmohE6w+DvQAy8vseHS6WKeVsMCSPtQk2ID9/DEhSndQDJeYDpjrwUOn2B+Kf +WbHLryT//sk6xMq4++ljQEld1NU1z8bo1a5D2juH9724KlzZcE70nJOaCGLPdamt +e+p0kw7xlQH67+R1IaYgDNand62P7b+KZZML0B88QUC166ZeablncyFca5SMCYlS +z6HFKTiVN19ZgqC084RQOw== +-----END CERTIFICATE----- diff --git a/deps/rabbitmq_management/selenium/test/multi-oauth/devkeycloak/server_devkeycloak_key.pem b/deps/rabbitmq_management/selenium/test/multi-oauth/devkeycloak/server_devkeycloak_key.pem new file mode 100644 index 000000000000..9c2f3b11fda8 --- /dev/null +++ b/deps/rabbitmq_management/selenium/test/multi-oauth/devkeycloak/server_devkeycloak_key.pem @@ -0,0 +1,28 @@ +-----BEGIN PRIVATE KEY----- +MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQCt5/wvIdHr5FJv +IWNiwoaict0G/pkHnUYsVIHjLjeR59q5qAojrKRqs32D9eeKqikHv/xTp9dpOa8q +wpho11bSs/HgnXZKie1FfQG/8arfipHkODn10VoNtZ0Revu5on9h67kkgyjCk4WK +G34o7ye4qacJhOAGV8LUHJXyA1kPFCtZzvcobYwbpPENpV7MWfxqa9gIV6IZln7E +ttzcraDfsYIr44uLbfeC2BcvJZP+JFXmTBZz0fbMfLsv6z5KF0pH4XKInINan9aj +GLnE5SbvqMLHQ4KANwMPQ8OLtU4is6wNAgAQmCYphmdd5k3m2fwXN+YeMohBGjdK +z/cxnx7LAgMBAAECggEAUJtrx8jsTWOqVfpq37b8wsVs77YI5n+erCOLhOrCNS37 +svxRntGB8Dc8IXNUnHBczkvNwFTWwt5K9A34qPfPNaDUp2E1GD2wLfpQWlDA/BZ1 +owvwyaD9FcetgxG3JgdM9e1WBreIqA/K4QX3Ry/7AFwaoY3mbOo20yxg0Cl/GT5j +DtI+RpNvA480yQWTjJBE3bk9S/9k38bAtRT9C6ArQqhkasXxBbuZiyb5GOvBZ3iN +hhMvgXrj2g038jbElWlo/uSLcsRadPnLAvXXROKrqLMnKTkv3BnqmPXQXdP0eNet +XoXAxbb8FbYtTxHceiyfwTOqtvUv+07wh8tiZDiUKQKBgQDg0QPFm2hqpMWPhsal +fwtVa7QMsF6MVaLB2eb3onZoQxZpWOXyeA7hfJz4iGiObM12EF98k+9G3Mg88gEI +RV+ENE7XI/YZcfQQsT4JNtR9CS0xDihSrmr80ndFburMT3oucpw/76AOs38Oof1q +cs3VLCYAIMHm6SHm1ylTN3bQvQKBgQDGBzPerVMnwntLPie1gAIYDIvlSim81nhm +Aa2jCIAgtogTXJ/WCtznvAxhDUsBG5Mn4oaS38oaBXSlnp0Vuz9ibZCMPxc2hADU +1GSpXl/xv9yC2HHjMeWWWPoF9Qtpo9x1XipkxYIeJYTulqaPXTd6YGSdQ7NENTuX +f0o5Sgy6JwKBgQCYKT+5To8kpvNESn9G4i8EmMobUIAd0ZRASkCGWQJ1XPrdQJsa +OmIwAcyodoL16vRBNaG7StFHkAVDIrTKKVIVw+Wcva1C2ZrMdXo8eEznd/+LVT67 +f2vQRI8PgpwOvrg+mbnhmEknyht0BvXjR2LDJodtzL4QkLguanCA72hOrQKBgBQw +w88eaO4S3DNNwQq2ZIBDNzhHmX3ReDEeVq/avAWZ1sHynbFbJi7Sc4iprE4Om7Bj +Xkk3XAnPKJeCVo3Sq6HDfgtum1VJnDQW+7RxFOM3JqqImwQJIFl19PgKhgFdXarx +0Oy5XozoUmdpIM5ZOMDXdyq5rltz+gF2TwEMVcPdAoGAVAr+aCEmFmvo1B+VoUYg +wMef7vU8CmSVXWFN9G+7vsJ5xbk6D1VLQf8+hTO2VvOjWfjMqxPuOT5Bz6lsKNh6 +wgzXljUx0ZB/9c6rXCf06IT+CvFWWr2j1hgaCSmDQ6x/FV0H6tV4oSIAztEuJfGk +Hl/FLnRCHMe1OqG/Gh/WjTQ= +-----END PRIVATE KEY----- diff --git a/deps/rabbitmq_management/selenium/test/multi-oauth/enabled_plugins b/deps/rabbitmq_management/selenium/test/multi-oauth/enabled_plugins new file mode 100644 index 000000000000..c91f7ba880c3 --- /dev/null +++ b/deps/rabbitmq_management/selenium/test/multi-oauth/enabled_plugins @@ -0,0 +1,16 @@ +[accept,amqp10_client,amqp_client,base64url,cowboy,cowlib,eetcd,gun,jose, + oauth2_client,prometheus,rabbitmq_amqp1_0,rabbitmq_auth_backend_cache, + rabbitmq_auth_backend_http,rabbitmq_auth_backend_ldap, + rabbitmq_auth_backend_oauth2,rabbitmq_auth_mechanism_ssl,rabbitmq_aws, + rabbitmq_consistent_hash_exchange,rabbitmq_event_exchange, + rabbitmq_federation,rabbitmq_federation_management, + rabbitmq_jms_topic_exchange,rabbitmq_management,rabbitmq_management_agent, + rabbitmq_mqtt,rabbitmq_peer_discovery_aws,rabbitmq_peer_discovery_common, + rabbitmq_peer_discovery_consul,rabbitmq_peer_discovery_etcd, + rabbitmq_peer_discovery_k8s,rabbitmq_prometheus,rabbitmq_random_exchange, + rabbitmq_recent_history_exchange,rabbitmq_sharding,rabbitmq_shovel, + rabbitmq_shovel_management,rabbitmq_stomp,rabbitmq_stream, + rabbitmq_stream_common,rabbitmq_stream_management,rabbitmq_top, + rabbitmq_tracing,rabbitmq_trust_store,rabbitmq_web_dispatch, + rabbitmq_web_mqtt,rabbitmq_web_mqtt_examples,rabbitmq_web_stomp, + rabbitmq_web_stomp_examples]. diff --git a/deps/rabbitmq_management/selenium/test/multi-oauth/env.docker b/deps/rabbitmq_management/selenium/test/multi-oauth/env.docker new file mode 100644 index 000000000000..3df42ae5f91d --- /dev/null +++ b/deps/rabbitmq_management/selenium/test/multi-oauth/env.docker @@ -0,0 +1 @@ +export OAUTH_SERVER_CONFIG_BASEDIR=/config diff --git a/deps/rabbitmq_management/selenium/test/multi-oauth/env.docker.devkeycloak b/deps/rabbitmq_management/selenium/test/multi-oauth/env.docker.devkeycloak new file mode 100644 index 000000000000..8b1ad3234463 --- /dev/null +++ b/deps/rabbitmq_management/selenium/test/multi-oauth/env.docker.devkeycloak @@ -0,0 +1,2 @@ +export DEVKEYCLOAK_URL=https://devkeycloak:8442/realms/dev +export DEVKEYCLOAK_CA_CERT=/config/oauth/keycloak/ca_certificate.pem diff --git a/deps/rabbitmq_management/selenium/test/multi-oauth/env.docker.prodkeycloak b/deps/rabbitmq_management/selenium/test/multi-oauth/env.docker.prodkeycloak new file mode 100644 index 000000000000..e929f0246e9f --- /dev/null +++ b/deps/rabbitmq_management/selenium/test/multi-oauth/env.docker.prodkeycloak @@ -0,0 +1,2 @@ +export PRODKEYCLOAK_URL=https://prodkeycloak:8443/realms/prod +export PRODKEYCLOAK_CA_CERT=/config/oauth/keycloak/ca_certificate.pem diff --git a/deps/rabbitmq_management/selenium/test/multi-oauth/env.local b/deps/rabbitmq_management/selenium/test/multi-oauth/env.local new file mode 100644 index 000000000000..d61f528c4e4a --- /dev/null +++ b/deps/rabbitmq_management/selenium/test/multi-oauth/env.local @@ -0,0 +1 @@ +export OAUTH_SERVER_CONFIG_BASEDIR=deps/rabbitmq_management/selenium/test diff --git a/deps/rabbitmq_management/selenium/test/multi-oauth/env.local.devkeycloak b/deps/rabbitmq_management/selenium/test/multi-oauth/env.local.devkeycloak new file mode 100644 index 000000000000..a1e2d5d596c2 --- /dev/null +++ b/deps/rabbitmq_management/selenium/test/multi-oauth/env.local.devkeycloak @@ -0,0 +1,2 @@ +export DEVKEYCLOAK_URL=https://localhost:8442/realms/dev +export DEVKEYCLOAK_CA_CERT=deps/rabbitmq_management/selenium/test/multi-oauth/devkeycloak/ca_certificate.pem diff --git a/deps/rabbitmq_management/selenium/test/multi-oauth/env.local.prodkeycloak b/deps/rabbitmq_management/selenium/test/multi-oauth/env.local.prodkeycloak new file mode 100644 index 000000000000..e267b558cd49 --- /dev/null +++ b/deps/rabbitmq_management/selenium/test/multi-oauth/env.local.prodkeycloak @@ -0,0 +1,2 @@ +export PRODKEYCLOAK_URL=https://localhost:8443/realms/prod +export PRODKEYCLOAK_CA_CERT=deps/rabbitmq_management/selenium/test/multi-oauth/prodkeycloak/ca_certificate.pem diff --git a/deps/rabbitmq_management/selenium/test/multi-oauth/imports/users.json b/deps/rabbitmq_management/selenium/test/multi-oauth/imports/users.json new file mode 100644 index 000000000000..b17df9ea0c68 --- /dev/null +++ b/deps/rabbitmq_management/selenium/test/multi-oauth/imports/users.json @@ -0,0 +1,71 @@ +{ + "users": [ + { + "name": "guest", + "password_hash": "Joz9zzUBOrX10lB3GisWN5oTXK+wj0gxS/nyrfTYmBOuhps5", + "hashing_algorithm": "rabbit_password_hashing_sha256", + "tags": [ + "administrator" + ], + "limits": {} + }, + { + "name": "administrator-only", + "password_hash": "Joz9zzUBOrX10lB3GisWN5oTXK+wj0gxS/nyrfTYmBOuhps5", + "hashing_algorithm": "rabbit_password_hashing_sha256", + "tags": [ + "administrator" + ], + "limits": {} + }, + { + "name": "management-only", + "password_hash": "Joz9zzUBOrX10lB3GisWN5oTXK+wj0gxS/nyrfTYmBOuhps5", + "hashing_algorithm": "rabbit_password_hashing_sha256", + "tags": [ + "management" + ], + "limits": {} + }, + { + "name": "management", + "password_hash": "Joz9zzUBOrX10lB3GisWN5oTXK+wj0gxS/nyrfTYmBOuhps5", + "hashing_algorithm": "rabbit_password_hashing_sha256", + "tags": [ + "management" + ], + "limits": {} + }, + { + "name": "monitoring-only", + "password_hash": "Joz9zzUBOrX10lB3GisWN5oTXK+wj0gxS/nyrfTYmBOuhps5", + "hashing_algorithm": "rabbit_password_hashing_sha256", + "tags": [ + "monitoring" + ], + "limits": {} + } + ], + "vhosts": [ + { + "name": "/" + } + ], + "permissions": [ + { + "user": "guest", + "vhost": "/", + "configure": ".*", + "write": ".*", + "read": ".*" + }, + { + "user": "management", + "vhost": "/", + "configure": ".*", + "write": ".*", + "read": ".*" + } + ] + +} diff --git a/deps/rabbitmq_management/selenium/test/multi-oauth/prodkeycloak/ca_certificate.pem b/deps/rabbitmq_management/selenium/test/multi-oauth/prodkeycloak/ca_certificate.pem new file mode 100644 index 000000000000..cd37bea304f5 --- /dev/null +++ b/deps/rabbitmq_management/selenium/test/multi-oauth/prodkeycloak/ca_certificate.pem @@ -0,0 +1,21 @@ +-----BEGIN CERTIFICATE----- +MIIDezCCAmOgAwIBAgIJAOA06nrAwraBMA0GCSqGSIb3DQEBCwUAMEwxOzA5BgNV +BAMMMlRMU0dlblNlbGZTaWduZWR0Um9vdENBIDIwMjMtMTEtMTZUMTI6MjQ6NDcu +Mjg5MDkzMQ0wCwYDVQQHDAQkJCQkMB4XDTIzMTExNjExMjQ0N1oXDTMzMTExMzEx +MjQ0N1owTDE7MDkGA1UEAwwyVExTR2VuU2VsZlNpZ25lZHRSb290Q0EgMjAyMy0x +MS0xNlQxMjoyNDo0Ny4yODkwOTMxDTALBgNVBAcMBCQkJCQwggEiMA0GCSqGSIb3 +DQEBAQUAA4IBDwAwggEKAoIBAQDWJrvvUvpkiAhvIiciuTbFHRMC7VdOXdIM3y3I +Vt56Voj3dkCVitFcvTc+pkuqoQUaWRTc5M+875CaQSRIDfVyFTIGTyVXv6cZRcoz +0gcmYvopIJ4Wi5/xG9Qp8uJMtr+UBJ57ez6Urau/L3zETAVZA+y1bTylAlh4tjMH +I24bvyy4yNQbPtG4y5F9x484fn3H4x7lf6O/Xulcvy8vL1kyc/EgrF4fpjogwj58 +eQ5HLwbAlMRRxXxXX2U5tXlrv475WItp/1mhZ+j2yCMKB4tJ8tXbtpgou0JDtlN0 +8Jwm3+d5a6PxqynmgRAXStZ4Fda93Pa3FJfw1u63JrmOprG9AgMBAAGjYDBeMA8G +A1UdEwEB/wQFMAMBAf8wCwYDVR0PBAQDAgEGMB0GA1UdDgQWBBS2Icxjr1ucGCIx +ikeSG9igJf558jAfBgNVHSMEGDAWgBS2Icxjr1ucGCIxikeSG9igJf558jANBgkq +hkiG9w0BAQsFAAOCAQEAR0iG00uE2GnoWtaXEHYJTdvBBcStBB8qnRk19Qu/b8qd +HAhRGb31IiuYzNJxLxhOtXWQMKvsKPAKpPXP3c5XVAf2O156GoXEPkKQktF738Pp +rRlrQPqU9Qpm84rMC54EB7coxEs7HMx4do/kNaVPdqq++JIEAcWOEVKfudN+8TMR +XyUJT54jBacsTpAZNfY6boJmuQ+G6tkpQvlHOU6388IFuLPkYRO7h7CHVbDsMEXD +Ptg3PCK97nCVgs4xfQGR7nT2pawfEUQVMon/XShtXY0RIKpynwrgICHDdvMXRXlG +a4haA7sz8Wyroy6Ub5+X3s4YRumSQrhiwRzqU+f75A== +-----END CERTIFICATE----- diff --git a/deps/rabbitmq_management/selenium/test/multi-oauth/prodkeycloak/prod-realm.json b/deps/rabbitmq_management/selenium/test/multi-oauth/prodkeycloak/prod-realm.json new file mode 100644 index 000000000000..3bd835e33f5d --- /dev/null +++ b/deps/rabbitmq_management/selenium/test/multi-oauth/prodkeycloak/prod-realm.json @@ -0,0 +1,2690 @@ +{ + "id" : "prod", + "realm" : "prod", + "notBefore" : 0, + "defaultSignatureAlgorithm" : "RS256", + "revokeRefreshToken" : false, + "refreshTokenMaxReuse" : 0, + "accessTokenLifespan" : 300, + "accessTokenLifespanForImplicitFlow" : 900, + "ssoSessionIdleTimeout" : 1800, + "ssoSessionMaxLifespan" : 36000, + "ssoSessionIdleTimeoutRememberMe" : 0, + "ssoSessionMaxLifespanRememberMe" : 0, + "offlineSessionIdleTimeout" : 2592000, + "offlineSessionMaxLifespanEnabled" : false, + "offlineSessionMaxLifespan" : 5184000, + "clientSessionIdleTimeout" : 0, + "clientSessionMaxLifespan" : 0, + "clientOfflineSessionIdleTimeout" : 0, + "clientOfflineSessionMaxLifespan" : 0, + "accessCodeLifespan" : 60, + "accessCodeLifespanUserAction" : 300, + "accessCodeLifespanLogin" : 1800, + "actionTokenGeneratedByAdminLifespan" : 43200, + "actionTokenGeneratedByUserLifespan" : 300, + "oauth2DeviceCodeLifespan" : 600, + "oauth2DevicePollingInterval" : 5, + "enabled" : true, + "sslRequired" : "external", + "registrationAllowed" : false, + "registrationEmailAsUsername" : false, + "rememberMe" : false, + "verifyEmail" : false, + "loginWithEmailAllowed" : true, + "duplicateEmailsAllowed" : false, + "resetPasswordAllowed" : false, + "editUsernameAllowed" : false, + "bruteForceProtected" : false, + "permanentLockout" : false, + "maxFailureWaitSeconds" : 900, + "minimumQuickLoginWaitSeconds" : 60, + "waitIncrementSeconds" : 60, + "quickLoginCheckMilliSeconds" : 1000, + "maxDeltaTimeSeconds" : 43200, + "failureFactor" : 30, + "roles" : { + "realm" : [ { + "id" : "2b61bc53-60cc-48fc-b89b-ee3e80204895", + "name" : "rabbitmq.tag:management", + "composite" : false, + "clientRole" : false, + "containerId" : "prod", + "attributes" : { } + }, { + "id" : "c28bf7ca-9fb7-485c-a68b-d5fb4bd844fb", + "name" : "rabbitmq.tag:administrator", + "composite" : false, + "clientRole" : false, + "containerId" : "prod", + "attributes" : { } + }, { + "id" : "d2b776e4-8c4d-4168-9d52-76aaa115ee70", + "name" : "uma_authorization", + "description" : "${role_uma_authorization}", + "composite" : false, + "clientRole" : false, + "containerId" : "prod", + "attributes" : { } + }, { + "id" : "6faef857-1c9b-4474-ba01-ad1946d243d6", + "name" : "rabbitmq-proxy-client-role", + "description" : "", + "composite" : false, + "clientRole" : false, + "containerId" : "prod", + "attributes" : { } + }, { + "id" : "0a838a26-4908-4750-a1d0-7cc322c698ae", + "name" : "producer", + "composite" : false, + "clientRole" : false, + "containerId" : "prod", + "attributes" : { } + }, { + "id" : "dd893988-6661-4849-a0f1-1cd1a63b51a5", + "name" : "rabbitmq.read:*/*", + "composite" : false, + "clientRole" : false, + "containerId" : "prod", + "attributes" : { } + }, { + "id" : "6feb7afe-2fa8-4569-8fb8-e50c2a4302d2", + "name" : "offline_access", + "description" : "${role_offline-access}", + "composite" : false, + "clientRole" : false, + "containerId" : "prod", + "attributes" : { } + }, { + "id" : "af1bc955-6d4d-42e9-b0d4-343e7eb075d0", + "name" : "rabbitmq-role", + "description" : "", + "composite" : false, + "clientRole" : false, + "containerId" : "prod", + "attributes" : { } + }, { + "id" : "77e9131f-1eb3-45a3-9f3b-f74991a99def", + "name" : "rabbitmq.configure:*/*", + "composite" : false, + "clientRole" : false, + "containerId" : "prod", + "attributes" : { } + }, { + "id" : "97bb2b6b-33ff-404e-b754-351604d9f34c", + "name" : "rabbitmq", + "description" : "", + "composite" : false, + "clientRole" : false, + "containerId" : "prod", + "attributes" : { } + }, { + "id" : "b84ae322-7112-41d1-8a3f-0009447ded47", + "name" : "default-roles-test", + "description" : "${role_default-roles}", + "composite" : true, + "composites" : { + "realm" : [ "offline_access", "uma_authorization" ], + "client" : { + "account" : [ "view-profile", "manage-account" ] + } + }, + "clientRole" : false, + "containerId" : "prod", + "attributes" : { } + }, { + "id" : "5516969b-be85-490c-9715-9c1186075d60", + "name" : "rabbitmq-management", + "composite" : false, + "clientRole" : false, + "containerId" : "prod", + "attributes" : { } + }, { + "id" : "216cfa85-9b8a-4fc0-bee1-814e2978d82b", + "name" : "rabbitmq.write:*/*", + "composite" : false, + "clientRole" : false, + "containerId" : "prod", + "attributes" : { } + } ], + "client" : { + "realm-management" : [ { + "id" : "6721a146-c9e3-4a24-9d26-6dbc7e3aae1f", + "name" : "manage-authorization", + "description" : "${role_manage-authorization}", + "composite" : false, + "clientRole" : true, + "containerId" : "09de26c0-0a1b-4e5f-8724-23af07a0e54a", + "attributes" : { } + }, { + "id" : "147b7e9a-d884-42b7-a970-245c2b5590b0", + "name" : "view-clients", + "description" : "${role_view-clients}", + "composite" : true, + "composites" : { + "client" : { + "realm-management" : [ "query-clients" ] + } + }, + "clientRole" : true, + "containerId" : "09de26c0-0a1b-4e5f-8724-23af07a0e54a", + "attributes" : { } + }, { + "id" : "c25f4711-ee9b-4457-9636-7dacffceb676", + "name" : "view-events", + "description" : "${role_view-events}", + "composite" : false, + "clientRole" : true, + "containerId" : "09de26c0-0a1b-4e5f-8724-23af07a0e54a", + "attributes" : { } + }, { + "id" : "1ce1e692-4dae-498f-8ac6-ca119eb329ef", + "name" : "query-realms", + "description" : "${role_query-realms}", + "composite" : false, + "clientRole" : true, + "containerId" : "09de26c0-0a1b-4e5f-8724-23af07a0e54a", + "attributes" : { } + }, { + "id" : "78cd990d-68bd-4e71-9561-5e4412bcbfb7", + "name" : "create-client", + "description" : "${role_create-client}", + "composite" : false, + "clientRole" : true, + "containerId" : "09de26c0-0a1b-4e5f-8724-23af07a0e54a", + "attributes" : { } + }, { + "id" : "c5bc4413-71cb-43f1-b48b-c9428aed47cd", + "name" : "query-clients", + "description" : "${role_query-clients}", + "composite" : false, + "clientRole" : true, + "containerId" : "09de26c0-0a1b-4e5f-8724-23af07a0e54a", + "attributes" : { } + }, { + "id" : "21b62a0b-62fd-4a39-8b97-8ce8b89ad9d8", + "name" : "view-identity-providers", + "description" : "${role_view-identity-providers}", + "composite" : false, + "clientRole" : true, + "containerId" : "09de26c0-0a1b-4e5f-8724-23af07a0e54a", + "attributes" : { } + }, { + "id" : "9df26a88-36e9-4670-8b63-dc4e57ebcce8", + "name" : "manage-clients", + "description" : "${role_manage-clients}", + "composite" : false, + "clientRole" : true, + "containerId" : "09de26c0-0a1b-4e5f-8724-23af07a0e54a", + "attributes" : { } + }, { + "id" : "02bc109a-d318-4196-aa15-171651685b50", + "name" : "view-users", + "description" : "${role_view-users}", + "composite" : true, + "composites" : { + "client" : { + "realm-management" : [ "query-users", "query-groups" ] + } + }, + "clientRole" : true, + "containerId" : "09de26c0-0a1b-4e5f-8724-23af07a0e54a", + "attributes" : { } + }, { + "id" : "97499d4c-fb81-4ee6-bd5e-6eb198424654", + "name" : "manage-events", + "description" : "${role_manage-events}", + "composite" : false, + "clientRole" : true, + "containerId" : "09de26c0-0a1b-4e5f-8724-23af07a0e54a", + "attributes" : { } + }, { + "id" : "bf64efcd-f8a4-47e6-bc5e-0ff29635b885", + "name" : "realm-admin", + "description" : "${role_realm-admin}", + "composite" : true, + "composites" : { + "client" : { + "realm-management" : [ "manage-authorization", "view-clients", "view-events", "query-realms", "create-client", "view-identity-providers", "query-clients", "view-users", "manage-clients", "manage-events", "view-realm", "manage-realm", "manage-users", "query-users", "query-groups", "manage-identity-providers", "view-authorization", "impersonation" ] + } + }, + "clientRole" : true, + "containerId" : "09de26c0-0a1b-4e5f-8724-23af07a0e54a", + "attributes" : { } + }, { + "id" : "fb251ef8-0f7e-4e85-a423-e3bf515dbe5c", + "name" : "manage-realm", + "description" : "${role_manage-realm}", + "composite" : false, + "clientRole" : true, + "containerId" : "09de26c0-0a1b-4e5f-8724-23af07a0e54a", + "attributes" : { } + }, { + "id" : "09df7745-f99e-4961-add6-eca3e2ab9b44", + "name" : "view-realm", + "description" : "${role_view-realm}", + "composite" : false, + "clientRole" : true, + "containerId" : "09de26c0-0a1b-4e5f-8724-23af07a0e54a", + "attributes" : { } + }, { + "id" : "45a27cee-8828-4427-90b4-9394b080db18", + "name" : "manage-users", + "description" : "${role_manage-users}", + "composite" : false, + "clientRole" : true, + "containerId" : "09de26c0-0a1b-4e5f-8724-23af07a0e54a", + "attributes" : { } + }, { + "id" : "7454f27b-cabc-4160-9835-1747659f6f00", + "name" : "query-users", + "description" : "${role_query-users}", + "composite" : false, + "clientRole" : true, + "containerId" : "09de26c0-0a1b-4e5f-8724-23af07a0e54a", + "attributes" : { } + }, { + "id" : "42d9a084-b4e0-42f5-8c29-9623fb265f79", + "name" : "query-groups", + "description" : "${role_query-groups}", + "composite" : false, + "clientRole" : true, + "containerId" : "09de26c0-0a1b-4e5f-8724-23af07a0e54a", + "attributes" : { } + }, { + "id" : "893a1a00-5e1f-4dc2-983d-640a3cce58fa", + "name" : "manage-identity-providers", + "description" : "${role_manage-identity-providers}", + "composite" : false, + "clientRole" : true, + "containerId" : "09de26c0-0a1b-4e5f-8724-23af07a0e54a", + "attributes" : { } + }, { + "id" : "8c9b5f3e-2819-4dbe-81d0-fa8721ff9f1d", + "name" : "view-authorization", + "description" : "${role_view-authorization}", + "composite" : false, + "clientRole" : true, + "containerId" : "09de26c0-0a1b-4e5f-8724-23af07a0e54a", + "attributes" : { } + }, { + "id" : "1e3044c3-fb93-48fa-9b27-eb4d8c6ccad7", + "name" : "impersonation", + "description" : "${role_impersonation}", + "composite" : false, + "clientRole" : true, + "containerId" : "09de26c0-0a1b-4e5f-8724-23af07a0e54a", + "attributes" : { } + } ], + "prod_producer" : [ ], + "security-admin-console" : [ ], + "account-console" : [ ], + "broker" : [ { + "id" : "147bafb6-45a8-45ba-b214-7826b1fc4856", + "name" : "read-token", + "description" : "${role_read-token}", + "composite" : false, + "clientRole" : true, + "containerId" : "f32cd0e1-5b78-412a-ba07-6ad2a9aeb007", + "attributes" : { } + } ], + "rabbitmq" : [ { + "id" : "f5caa7a5-0770-41d8-a3a3-8691470b6d82", + "name" : "rabbitmq-role", + "description" : "", + "composite" : false, + "clientRole" : true, + "containerId" : "a57c9f6a-8b64-47dc-af53-d6ccc2d4aa60", + "attributes" : { } + } ], + "rabbitmq-proxy-client" : [ { + "id" : "ba66d339-cbca-41c1-87fe-38e7b50efd52", + "name" : "rabbitmq-proxy-client-role", + "composite" : true, + "composites" : { + "realm" : [ "rabbitmq-role", "rabbitmq-proxy-client-role", "rabbitmq" ] + }, + "clientRole" : true, + "containerId" : "c265f3db-ed3a-4898-8800-af044b3c30f5", + "attributes" : { } + } ], + "mgt_api_client" : [ ], + "admin-cli" : [ ], + "producer" : [ ], + "rabbitmq-client-code" : [ ], + "account" : [ { + "id" : "957f712c-e735-402d-9f41-ad9832749f51", + "name" : "delete-account", + "description" : "${role_delete-account}", + "composite" : false, + "clientRole" : true, + "containerId" : "bd6c76be-d33d-43d6-9cbb-965df4f0c025", + "attributes" : { } + }, { + "id" : "9145b32b-f8ef-4ff0-b50a-be1af192a65a", + "name" : "manage-account-links", + "description" : "${role_manage-account-links}", + "composite" : false, + "clientRole" : true, + "containerId" : "bd6c76be-d33d-43d6-9cbb-965df4f0c025", + "attributes" : { } + }, { + "id" : "59cac6df-51cd-4a3c-bf77-03bc2b34fe69", + "name" : "manage-consent", + "description" : "${role_manage-consent}", + "composite" : true, + "composites" : { + "client" : { + "account" : [ "view-consent" ] + } + }, + "clientRole" : true, + "containerId" : "bd6c76be-d33d-43d6-9cbb-965df4f0c025", + "attributes" : { } + }, { + "id" : "508ce853-78d5-428c-9589-0e310fa7fe40", + "name" : "view-profile", + "description" : "${role_view-profile}", + "composite" : false, + "clientRole" : true, + "containerId" : "bd6c76be-d33d-43d6-9cbb-965df4f0c025", + "attributes" : { } + }, { + "id" : "061542b2-67d9-4388-aadb-9c936f19d607", + "name" : "view-groups", + "description" : "${role_view-groups}", + "composite" : false, + "clientRole" : true, + "containerId" : "bd6c76be-d33d-43d6-9cbb-965df4f0c025", + "attributes" : { } + }, { + "id" : "6d326537-afdd-4f72-8973-14b164361a7e", + "name" : "view-applications", + "description" : "${role_view-applications}", + "composite" : false, + "clientRole" : true, + "containerId" : "bd6c76be-d33d-43d6-9cbb-965df4f0c025", + "attributes" : { } + }, { + "id" : "1fc2ea3e-395a-45bd-ae2f-a9eb674ed4b2", + "name" : "manage-account", + "description" : "${role_manage-account}", + "composite" : true, + "composites" : { + "client" : { + "account" : [ "manage-account-links" ] + } + }, + "clientRole" : true, + "containerId" : "bd6c76be-d33d-43d6-9cbb-965df4f0c025", + "attributes" : { } + }, { + "id" : "95ad82e5-1859-496e-ba95-6b38f8043efd", + "name" : "view-consent", + "description" : "${role_view-consent}", + "composite" : false, + "clientRole" : true, + "containerId" : "bd6c76be-d33d-43d6-9cbb-965df4f0c025", + "attributes" : { } + } ], + "rabbit_prod_mgt_ui" : [ ] + } + }, + "groups" : [ { + "id" : "6746dbec-7e2b-4540-ae00-73aa2a93a04e", + "name" : "rabbitmq", + "path" : "/rabbitmq", + "attributes" : { }, + "realmRoles" : [ "rabbitmq" ], + "clientRoles" : { }, + "subGroups" : [ ] + } ], + "defaultRole" : { + "id" : "b84ae322-7112-41d1-8a3f-0009447ded47", + "name" : "default-roles-test", + "description" : "${role_default-roles}", + "composite" : true, + "clientRole" : false, + "containerId" : "prod" + }, + "requiredCredentials" : [ "password" ], + "otpPolicyType" : "totp", + "otpPolicyAlgorithm" : "HmacSHA1", + "otpPolicyInitialCounter" : 0, + "otpPolicyDigits" : 6, + "otpPolicyLookAheadWindow" : 1, + "otpPolicyPeriod" : 30, + "otpPolicyCodeReusable" : false, + "otpSupportedApplications" : [ "totpAppGoogleName", "totpAppFreeOTPName" ], + "webAuthnPolicyRpEntityName" : "keycloak", + "webAuthnPolicySignatureAlgorithms" : [ "ES256" ], + "webAuthnPolicyRpId" : "", + "webAuthnPolicyAttestationConveyancePreference" : "not specified", + "webAuthnPolicyAuthenticatorAttachment" : "not specified", + "webAuthnPolicyRequireResidentKey" : "not specified", + "webAuthnPolicyUserVerificationRequirement" : "not specified", + "webAuthnPolicyCreateTimeout" : 0, + "webAuthnPolicyAvoidSameAuthenticatorRegister" : false, + "webAuthnPolicyAcceptableAaguids" : [ ], + "webAuthnPolicyPasswordlessRpEntityName" : "keycloak", + "webAuthnPolicyPasswordlessSignatureAlgorithms" : [ "ES256" ], + "webAuthnPolicyPasswordlessRpId" : "", + "webAuthnPolicyPasswordlessAttestationConveyancePreference" : "not specified", + "webAuthnPolicyPasswordlessAuthenticatorAttachment" : "not specified", + "webAuthnPolicyPasswordlessRequireResidentKey" : "not specified", + "webAuthnPolicyPasswordlessUserVerificationRequirement" : "not specified", + "webAuthnPolicyPasswordlessCreateTimeout" : 0, + "webAuthnPolicyPasswordlessAvoidSameAuthenticatorRegister" : false, + "webAuthnPolicyPasswordlessAcceptableAaguids" : [ ], + "users" : [ { + "id" : "88063139-59de-4027-a421-d613e3bdba1f", + "createdTimestamp" : 1690974911722, + "username" : "dev_user", + "enabled" : true, + "totp" : false, + "emailVerified" : true, + "firstName" : "", + "lastName" : "", + "credentials" : [ { + "id" : "0b2591b9-871e-490d-9319-6314fb5dc42b", + "type" : "password", + "userLabel" : "My password", + "createdDate" : 1690974921254, + "secretData" : "{\"value\":\"txSoE1qlIryIJsd8EKHp0aE7I5bzLkEEWKGxPrcH1lVmKXeAftKnB6Rqxnh2pX4IFem/FMTF/rcmttU+FFmsUA==\",\"salt\":\"qFN5DsIvc/F4yKrXke5K5Q==\",\"additionalParameters\":{}}", + "credentialData" : "{\"hashIterations\":27500,\"algorithm\":\"pbkdf2-sha256\",\"additionalParameters\":{}}" + } ], + "disableableCredentialTypes" : [ ], + "requiredActions" : [ ], + "realmRoles" : [ "rabbitmq.tag:management", "default-roles-test" ], + "notBefore" : 0, + "groups" : [ ] + }, { + "id" : "451df525-a468-43c1-97f3-656d5d31ba68", + "createdTimestamp" : 1690974863360, + "username" : "prod_user", + "enabled" : true, + "totp" : false, + "emailVerified" : true, + "firstName" : "", + "lastName" : "", + "credentials" : [ { + "id" : "508707a9-08e9-4e5e-8257-b6d6466c98df", + "type" : "password", + "userLabel" : "My password", + "createdDate" : 1690974873162, + "secretData" : "{\"value\":\"iEG974FQB66ACMIKSB6WpgC+CTKL6+JU5qIyjwM4Z1TeQz89pPOeXxjrmtaqourwV5adMVurURO2oO/qL8yHRg==\",\"salt\":\"+axOgEN33yDcNdrXvT+V8Q==\",\"additionalParameters\":{}}", + "credentialData" : "{\"hashIterations\":27500,\"algorithm\":\"pbkdf2-sha256\",\"additionalParameters\":{}}" + } ], + "disableableCredentialTypes" : [ ], + "requiredActions" : [ ], + "realmRoles" : [ "rabbitmq.tag:management", "rabbitmq.tag:administrator", "default-roles-test" ], + "notBefore" : 0, + "groups" : [ ] + }, { + "id" : "4cf4d6b5-09e5-453f-bf22-c8efdc2dd1dc", + "createdTimestamp" : 1651841525973, + "username" : "rabbit_admin", + "enabled" : true, + "totp" : false, + "emailVerified" : true, + "firstName" : "", + "lastName" : "", + "email" : "rabbit_admin@rabbit.com", + "credentials" : [ { + "id" : "deca2be2-28ad-4f98-981f-3ec68bf12ae2", + "type" : "password", + "createdDate" : 1651841816533, + "secretData" : "{\"value\":\"bRuz2IKP4+kG3IKo258mVNqW8Nts6CkZavF3tf4M+/dlJFNPJIallxephOKUiVPtMOdO9Huq9K0uwTBYSZY3fg==\",\"salt\":\"v2qUXLV0n8402Ef8brQg1Q==\",\"additionalParameters\":{}}", + "credentialData" : "{\"hashIterations\":27500,\"algorithm\":\"pbkdf2-sha256\",\"additionalParameters\":{}}" + } ], + "disableableCredentialTypes" : [ ], + "requiredActions" : [ ], + "realmRoles" : [ "rabbitmq.tag:administrator", "rabbitmq.configure:*/*", "rabbitmq", "rabbitmq.write:*/*", "rabbitmq.read:*/*" ], + "notBefore" : 0, + "groups" : [ ] + }, { + "id" : "15f03347-e2fc-4f8c-9743-f4dfd59f67fe", + "createdTimestamp" : 1652084304711, + "username" : "service-account-mgt_api_client", + "enabled" : true, + "totp" : false, + "emailVerified" : false, + "serviceAccountClientId" : "mgt_api_client", + "credentials" : [ ], + "disableableCredentialTypes" : [ ], + "requiredActions" : [ ], + "realmRoles" : [ "rabbitmq.tag:management", "default-roles-test", "rabbitmq-management" ], + "notBefore" : 0, + "groups" : [ ] + }, { + "id" : "826065e7-bb58-4b65-bbf7-8982d6cca6c8", + "createdTimestamp" : 1690973663764, + "username" : "service-account-prod_producer", + "enabled" : true, + "totp" : false, + "emailVerified" : false, + "serviceAccountClientId" : "prod_producer", + "credentials" : [ ], + "disableableCredentialTypes" : [ ], + "requiredActions" : [ ], + "realmRoles" : [ "default-roles-test", "producer" ], + "notBefore" : 0, + "groups" : [ ] + }, { + "id" : "63ec2047-6689-45c0-981d-f9b127a6bb7f", + "createdTimestamp" : 1652084012762, + "username" : "service-account-producer", + "enabled" : true, + "totp" : false, + "emailVerified" : false, + "serviceAccountClientId" : "producer", + "credentials" : [ ], + "disableableCredentialTypes" : [ ], + "requiredActions" : [ ], + "realmRoles" : [ "default-roles-test", "producer" ], + "notBefore" : 0, + "groups" : [ ] + }, { + "id" : "7a51406b-d6d8-4c77-9b8a-135a2f07d8d5", + "createdTimestamp" : 1677053286393, + "username" : "service-account-rabbitmq-proxy-client", + "enabled" : true, + "totp" : false, + "emailVerified" : false, + "serviceAccountClientId" : "rabbitmq-proxy-client", + "credentials" : [ ], + "disableableCredentialTypes" : [ ], + "requiredActions" : [ ], + "realmRoles" : [ "default-roles-test" ], + "notBefore" : 0, + "groups" : [ ] + } ], + "scopeMappings" : [ { + "client" : "producer", + "roles" : [ "producer" ] + }, { + "clientScope" : "rabbitmq.read:*/*", + "roles" : [ "producer" ] + }, { + "clientScope" : "offline_access", + "roles" : [ "offline_access" ] + }, { + "clientScope" : "rabbitmq.configure:*/*", + "roles" : [ "producer" ] + }, { + "clientScope" : "rabbitmq.tag:management", + "roles" : [ "rabbitmq.tag:management" ] + }, { + "clientScope" : "rabbitmq.write:*/*", + "roles" : [ "producer" ] + }, { + "clientScope" : "rabbitmq.tag:administrator", + "roles" : [ "rabbitmq.tag:administrator" ] + } ], + "clientScopeMappings" : { + "account" : [ { + "client" : "account-console", + "roles" : [ "manage-account", "view-groups" ] + } ] + }, + "clients" : [ { + "id" : "bd6c76be-d33d-43d6-9cbb-965df4f0c025", + "clientId" : "account", + "name" : "${client_account}", + "rootUrl" : "${authBaseUrl}", + "baseUrl" : "/realms/test/account/", + "surrogateAuthRequired" : false, + "enabled" : true, + "alwaysDisplayInConsole" : false, + "clientAuthenticatorType" : "client-secret", + "redirectUris" : [ "/realms/test/account/*" ], + "webOrigins" : [ ], + "notBefore" : 0, + "bearerOnly" : false, + "consentRequired" : false, + "standardFlowEnabled" : true, + "implicitFlowEnabled" : false, + "directAccessGrantsEnabled" : false, + "serviceAccountsEnabled" : false, + "publicClient" : true, + "frontchannelLogout" : false, + "protocol" : "openid-connect", + "attributes" : { + "post.logout.redirect.uris" : "+" + }, + "authenticationFlowBindingOverrides" : { }, + "fullScopeAllowed" : false, + "nodeReRegistrationTimeout" : 0, + "defaultClientScopes" : [ "web-origins", "acr", "profile", "roles", "email" ], + "optionalClientScopes" : [ "address", "phone", "offline_access", "microprofile-jwt" ] + }, { + "id" : "80023652-2709-4646-9367-b6114aa73bae", + "clientId" : "account-console", + "name" : "${client_account-console}", + "rootUrl" : "${authBaseUrl}", + "baseUrl" : "/realms/test/account/", + "surrogateAuthRequired" : false, + "enabled" : true, + "alwaysDisplayInConsole" : false, + "clientAuthenticatorType" : "client-secret", + "redirectUris" : [ "/realms/test/account/*" ], + "webOrigins" : [ ], + "notBefore" : 0, + "bearerOnly" : false, + "consentRequired" : false, + "standardFlowEnabled" : true, + "implicitFlowEnabled" : false, + "directAccessGrantsEnabled" : false, + "serviceAccountsEnabled" : false, + "publicClient" : true, + "frontchannelLogout" : false, + "protocol" : "openid-connect", + "attributes" : { + "post.logout.redirect.uris" : "+", + "pkce.code.challenge.method" : "S256" + }, + "authenticationFlowBindingOverrides" : { }, + "fullScopeAllowed" : false, + "nodeReRegistrationTimeout" : 0, + "protocolMappers" : [ { + "id" : "ebcf72c5-f58a-48cb-a6fb-db44e8735d7e", + "name" : "audience resolve", + "protocol" : "openid-connect", + "protocolMapper" : "oidc-audience-resolve-mapper", + "consentRequired" : false, + "config" : { } + } ], + "defaultClientScopes" : [ "web-origins", "acr", "profile", "roles", "email" ], + "optionalClientScopes" : [ "address", "phone", "offline_access", "microprofile-jwt" ] + }, { + "id" : "e5484264-82ff-46df-b38e-d5456439f413", + "clientId" : "admin-cli", + "name" : "${client_admin-cli}", + "surrogateAuthRequired" : false, + "enabled" : true, + "alwaysDisplayInConsole" : false, + "clientAuthenticatorType" : "client-secret", + "redirectUris" : [ ], + "webOrigins" : [ ], + "notBefore" : 0, + "bearerOnly" : false, + "consentRequired" : false, + "standardFlowEnabled" : false, + "implicitFlowEnabled" : false, + "directAccessGrantsEnabled" : true, + "serviceAccountsEnabled" : false, + "publicClient" : true, + "frontchannelLogout" : false, + "protocol" : "openid-connect", + "attributes" : { + "post.logout.redirect.uris" : "+" + }, + "authenticationFlowBindingOverrides" : { }, + "fullScopeAllowed" : false, + "nodeReRegistrationTimeout" : 0, + "defaultClientScopes" : [ "web-origins", "acr", "profile", "roles", "email" ], + "optionalClientScopes" : [ "address", "phone", "offline_access", "microprofile-jwt" ] + }, { + "id" : "f32cd0e1-5b78-412a-ba07-6ad2a9aeb007", + "clientId" : "broker", + "name" : "${client_broker}", + "surrogateAuthRequired" : false, + "enabled" : true, + "alwaysDisplayInConsole" : false, + "clientAuthenticatorType" : "client-secret", + "redirectUris" : [ ], + "webOrigins" : [ ], + "notBefore" : 0, + "bearerOnly" : true, + "consentRequired" : false, + "standardFlowEnabled" : true, + "implicitFlowEnabled" : false, + "directAccessGrantsEnabled" : false, + "serviceAccountsEnabled" : false, + "publicClient" : false, + "frontchannelLogout" : false, + "protocol" : "openid-connect", + "attributes" : { + "post.logout.redirect.uris" : "+" + }, + "authenticationFlowBindingOverrides" : { }, + "fullScopeAllowed" : false, + "nodeReRegistrationTimeout" : 0, + "defaultClientScopes" : [ "web-origins", "acr", "profile", "roles", "email" ], + "optionalClientScopes" : [ "address", "phone", "offline_access", "microprofile-jwt" ] + }, { + "id" : "c5be3c24-0c88-4672-a77a-79002fcc9a9d", + "clientId" : "mgt_api_client", + "surrogateAuthRequired" : false, + "enabled" : true, + "alwaysDisplayInConsole" : false, + "clientAuthenticatorType" : "client-secret", + "secret" : "LWOuYqJ8gjKg3D2U8CJZDuID3KiRZVDa", + "redirectUris" : [ ], + "webOrigins" : [ ], + "notBefore" : 0, + "bearerOnly" : false, + "consentRequired" : false, + "standardFlowEnabled" : false, + "implicitFlowEnabled" : false, + "directAccessGrantsEnabled" : false, + "serviceAccountsEnabled" : true, + "publicClient" : false, + "frontchannelLogout" : false, + "protocol" : "openid-connect", + "attributes" : { + "saml.force.post.binding" : "false", + "saml.multivalued.roles" : "false", + "frontchannel.logout.session.required" : "false", + "post.logout.redirect.uris" : "+", + "oauth2.device.authorization.grant.enabled" : "false", + "backchannel.logout.revoke.offline.tokens" : "false", + "saml.server.signature.keyinfo.ext" : "false", + "use.refresh.tokens" : "true", + "oidc.ciba.grant.enabled" : "false", + "backchannel.logout.session.required" : "true", + "client_credentials.use_refresh_token" : "false", + "require.pushed.authorization.requests" : "false", + "saml.client.signature" : "false", + "saml.allow.ecp.flow" : "false", + "id.token.as.detached.signature" : "false", + "saml.assertion.signature" : "false", + "client.secret.creation.time" : "1652084304", + "saml.encrypt" : "false", + "saml.server.signature" : "false", + "exclude.session.state.from.auth.response" : "false", + "saml.artifact.binding" : "false", + "saml_force_name_id_format" : "false", + "acr.loa.map" : "{}", + "tls.client.certificate.bound.access.tokens" : "false", + "saml.authnstatement" : "false", + "display.on.consent.screen" : "false", + "token.response.type.bearer.lower-case" : "false", + "saml.onetimeuse.condition" : "false" + }, + "authenticationFlowBindingOverrides" : { }, + "fullScopeAllowed" : true, + "nodeReRegistrationTimeout" : -1, + "protocolMappers" : [ { + "id" : "33fd8faf-3ea6-4669-beea-45b9655cf6ab", + "name" : "Client IP Address", + "protocol" : "openid-connect", + "protocolMapper" : "oidc-usersessionmodel-note-mapper", + "consentRequired" : false, + "config" : { + "user.session.note" : "clientAddress", + "userinfo.token.claim" : "true", + "id.token.claim" : "true", + "access.token.claim" : "true", + "claim.name" : "clientAddress", + "jsonType.label" : "String" + } + }, { + "id" : "aae7e2aa-72e7-4d29-ae68-a66b846d62ab", + "name" : "aud", + "protocol" : "openid-connect", + "protocolMapper" : "oidc-audience-mapper", + "consentRequired" : false, + "config" : { + "id.token.claim" : "false", + "access.token.claim" : "true", + "included.custom.audience" : "rabbitmq", + "userinfo.token.claim" : "false" + } + }, { + "id" : "f7e826de-e651-4080-8e97-feba46b8a0a2", + "name" : "Client ID", + "protocol" : "openid-connect", + "protocolMapper" : "oidc-usersessionmodel-note-mapper", + "consentRequired" : false, + "config" : { + "user.session.note" : "clientId", + "userinfo.token.claim" : "true", + "id.token.claim" : "true", + "access.token.claim" : "true", + "claim.name" : "clientId", + "jsonType.label" : "String" + } + }, { + "id" : "545a1d71-5dc8-491c-bf7b-1c672d50e606", + "name" : "Client Host", + "protocol" : "openid-connect", + "protocolMapper" : "oidc-usersessionmodel-note-mapper", + "consentRequired" : false, + "config" : { + "user.session.note" : "clientHost", + "userinfo.token.claim" : "true", + "id.token.claim" : "true", + "access.token.claim" : "true", + "claim.name" : "clientHost", + "jsonType.label" : "String" + } + } ], + "defaultClientScopes" : [ "rabbitmq.tag:administrator", "rabbitmq.tag:management", "email" ], + "optionalClientScopes" : [ "address", "phone", "offline_access", "microprofile-jwt" ] + }, { + "id" : "f1c75ad4-2182-4e67-b2a4-5cac93ad7939", + "clientId" : "prod_producer", + "name" : "", + "description" : "", + "surrogateAuthRequired" : false, + "enabled" : true, + "alwaysDisplayInConsole" : false, + "clientAuthenticatorType" : "client-secret", + "secret" : "PdLHb1w8RH1oD5bpppgy8OF9G6QeRpL9", + "redirectUris" : [ ], + "webOrigins" : [ ], + "notBefore" : 0, + "bearerOnly" : false, + "consentRequired" : false, + "standardFlowEnabled" : false, + "implicitFlowEnabled" : false, + "directAccessGrantsEnabled" : false, + "serviceAccountsEnabled" : true, + "publicClient" : false, + "frontchannelLogout" : true, + "protocol" : "openid-connect", + "attributes" : { + "oidc.ciba.grant.enabled" : "false", + "client.secret.creation.time" : "1690973663", + "backchannel.logout.session.required" : "true", + "post.logout.redirect.uris" : "+", + "oauth2.device.authorization.grant.enabled" : "false", + "backchannel.logout.revoke.offline.tokens" : "false" + }, + "authenticationFlowBindingOverrides" : { }, + "fullScopeAllowed" : true, + "nodeReRegistrationTimeout" : -1, + "protocolMappers" : [ { + "id" : "d25e25ae-5653-4806-a9c3-4f95ab17ca84", + "name" : "Client IP Address", + "protocol" : "openid-connect", + "protocolMapper" : "oidc-usersessionmodel-note-mapper", + "consentRequired" : false, + "config" : { + "user.session.note" : "clientAddress", + "userinfo.token.claim" : "true", + "id.token.claim" : "true", + "access.token.claim" : "true", + "claim.name" : "clientAddress", + "jsonType.label" : "String" + } + }, { + "id" : "6195b57b-755c-492b-8dda-bb2c5e4418c4", + "name" : "aud", + "protocol" : "openid-connect", + "protocolMapper" : "oidc-audience-mapper", + "consentRequired" : false, + "config" : { + "id.token.claim" : "false", + "access.token.claim" : "true", + "included.custom.audience" : "rabbit_prod", + "userinfo.token.claim" : "false" + } + }, { + "id" : "c337d632-52cc-4c46-87e9-5f541f98b2af", + "name" : "Client ID", + "protocol" : "openid-connect", + "protocolMapper" : "oidc-usersessionmodel-note-mapper", + "consentRequired" : false, + "config" : { + "user.session.note" : "clientId", + "userinfo.token.claim" : "true", + "id.token.claim" : "true", + "access.token.claim" : "true", + "claim.name" : "clientId", + "jsonType.label" : "String" + } + }, { + "id" : "978d0198-3d5c-4fe7-b222-1da9ccdf6153", + "name" : "Client Host", + "protocol" : "openid-connect", + "protocolMapper" : "oidc-usersessionmodel-note-mapper", + "consentRequired" : false, + "config" : { + "user.session.note" : "clientHost", + "userinfo.token.claim" : "true", + "id.token.claim" : "true", + "access.token.claim" : "true", + "claim.name" : "clientHost", + "jsonType.label" : "String" + } + } ], + "defaultClientScopes" : [ "rabbitmq.read:*/*", "web-origins", "acr", "rabbitmq.write:*/*", "profile", "roles", "email", "rabbitmq.configure:*/*" ], + "optionalClientScopes" : [ "address", "phone", "offline_access", "microprofile-jwt" ] + }, { + "id" : "3e96bddd-95f9-4277-b3ad-f8f6f5d5bb59", + "clientId" : "producer", + "surrogateAuthRequired" : false, + "enabled" : true, + "alwaysDisplayInConsole" : false, + "clientAuthenticatorType" : "client-secret", + "secret" : "kbOFBXI9tANgKUq8vXHLhT6YhbivgXxn", + "redirectUris" : [ ], + "webOrigins" : [ ], + "notBefore" : 0, + "bearerOnly" : false, + "consentRequired" : false, + "standardFlowEnabled" : false, + "implicitFlowEnabled" : false, + "directAccessGrantsEnabled" : false, + "serviceAccountsEnabled" : true, + "publicClient" : false, + "frontchannelLogout" : false, + "protocol" : "openid-connect", + "attributes" : { + "saml.force.post.binding" : "false", + "saml.multivalued.roles" : "false", + "frontchannel.logout.session.required" : "false", + "post.logout.redirect.uris" : "+", + "oauth2.device.authorization.grant.enabled" : "false", + "backchannel.logout.revoke.offline.tokens" : "false", + "saml.server.signature.keyinfo.ext" : "false", + "use.refresh.tokens" : "true", + "oidc.ciba.grant.enabled" : "false", + "backchannel.logout.session.required" : "false", + "client_credentials.use_refresh_token" : "true", + "require.pushed.authorization.requests" : "false", + "saml.client.signature" : "false", + "saml.allow.ecp.flow" : "false", + "id.token.as.detached.signature" : "false", + "saml.assertion.signature" : "false", + "client.secret.creation.time" : "1652081901", + "saml.encrypt" : "false", + "saml.server.signature" : "false", + "exclude.session.state.from.auth.response" : "false", + "saml.artifact.binding" : "false", + "saml_force_name_id_format" : "false", + "acr.loa.map" : "{}", + "tls.client.certificate.bound.access.tokens" : "false", + "saml.authnstatement" : "false", + "display.on.consent.screen" : "false", + "token.response.type.bearer.lower-case" : "false", + "saml.onetimeuse.condition" : "false" + }, + "authenticationFlowBindingOverrides" : { }, + "fullScopeAllowed" : true, + "nodeReRegistrationTimeout" : -1, + "protocolMappers" : [ { + "id" : "72928dd9-10c9-4049-bfa7-4cc05e650f46", + "name" : "realm roles", + "protocol" : "openid-connect", + "protocolMapper" : "oidc-usermodel-realm-role-mapper", + "consentRequired" : false, + "config" : { + "user.attribute" : "foo", + "access.token.claim" : "true", + "claim.name" : "realm_access.roles", + "jsonType.label" : "String", + "multivalued" : "true" + } + }, { + "id" : "4c3b3c28-795f-4056-a854-5cf119b36266", + "name" : "Client ID", + "protocol" : "openid-connect", + "protocolMapper" : "oidc-usersessionmodel-note-mapper", + "consentRequired" : false, + "config" : { + "user.session.note" : "clientId", + "userinfo.token.claim" : "true", + "id.token.claim" : "true", + "access.token.claim" : "true", + "claim.name" : "clientId", + "jsonType.label" : "String" + } + }, { + "id" : "56b7571c-3226-4c92-8615-c99b265a42fc", + "name" : "aud", + "protocol" : "openid-connect", + "protocolMapper" : "oidc-audience-mapper", + "consentRequired" : false, + "config" : { + "id.token.claim" : "false", + "access.token.claim" : "true", + "included.custom.audience" : "rabbitmq", + "userinfo.token.claim" : "false" + } + }, { + "id" : "4ca73107-b26b-46ee-985b-d2dcc099f21c", + "name" : "Client Host", + "protocol" : "openid-connect", + "protocolMapper" : "oidc-usersessionmodel-note-mapper", + "consentRequired" : false, + "config" : { + "user.session.note" : "clientHost", + "userinfo.token.claim" : "true", + "id.token.claim" : "true", + "access.token.claim" : "true", + "claim.name" : "clientHost", + "jsonType.label" : "String" + } + }, { + "id" : "26e5243a-3127-4528-9a54-8af324ac2392", + "name" : "client roles", + "protocol" : "openid-connect", + "protocolMapper" : "oidc-usermodel-client-role-mapper", + "consentRequired" : false, + "config" : { + "user.attribute" : "foo", + "access.token.claim" : "true", + "claim.name" : "resource_access.${client_id}.roles", + "jsonType.label" : "String", + "multivalued" : "true" + } + }, { + "id" : "d52cc6cb-08a1-4c2b-bf06-61f234a419d1", + "name" : "Client IP Address", + "protocol" : "openid-connect", + "protocolMapper" : "oidc-usersessionmodel-note-mapper", + "consentRequired" : false, + "config" : { + "user.session.note" : "clientAddress", + "userinfo.token.claim" : "true", + "id.token.claim" : "true", + "access.token.claim" : "true", + "claim.name" : "clientAddress", + "jsonType.label" : "String" + } + } ], + "defaultClientScopes" : [ "rabbitmq.read:*/*", "rabbitmq.write:*/*", "roles", "rabbitmq.configure:*/*" ], + "optionalClientScopes" : [ "address", "phone", "offline_access", "microprofile-jwt" ] + }, { + "id" : "6f65dc7b-5dc8-4b37-ba05-29d924d1edff", + "clientId" : "rabbit_prod_mgt_ui", + "name" : "", + "description" : "", + "rootUrl" : "${RABBITMQ_SCHEME}://${RABBITMQ_HOST}${RABBITMQ_PATH}", + "adminUrl" : "", + "baseUrl" : "", + "surrogateAuthRequired" : false, + "enabled" : true, + "alwaysDisplayInConsole" : false, + "clientAuthenticatorType" : "client-secret", + "redirectUris" : [ "${RABBITMQ_SCHEME}://${RABBITMQ_HOST}${RABBITMQ_PATH}/*" ], + "webOrigins" : [ "+" ], + "notBefore" : 0, + "bearerOnly" : false, + "consentRequired" : false, + "standardFlowEnabled" : true, + "implicitFlowEnabled" : false, + "directAccessGrantsEnabled" : false, + "serviceAccountsEnabled" : false, + "publicClient" : true, + "frontchannelLogout" : true, + "protocol" : "openid-connect", + "attributes" : { + "oidc.ciba.grant.enabled" : "false", + "backchannel.logout.session.required" : "true", + "post.logout.redirect.uris" : "+", + "oauth2.device.authorization.grant.enabled" : "false", + "display.on.consent.screen" : "false", + "backchannel.logout.revoke.offline.tokens" : "false" + }, + "authenticationFlowBindingOverrides" : { }, + "fullScopeAllowed" : true, + "nodeReRegistrationTimeout" : -1, + "protocolMappers" : [ { + "id" : "949fa590-6bcf-4a58-af2b-2ea598cbc0fd", + "name" : "aud", + "protocol" : "openid-connect", + "protocolMapper" : "oidc-audience-mapper", + "consentRequired" : false, + "config" : { + "id.token.claim" : "false", + "access.token.claim" : "true", + "included.custom.audience" : "rabbit_prod", + "userinfo.token.claim" : "false" + } + } ], + "defaultClientScopes" : [ "web-origins", "acr", "rabbitmq.tag:administrator", "rabbitmq.tag:management", "profile", "roles", "email" ], + "optionalClientScopes" : [ "address", "phone", "offline_access", "microprofile-jwt" ] + }, { + "id" : "a57c9f6a-8b64-47dc-af53-d6ccc2d4aa60", + "clientId" : "rabbitmq", + "name" : "", + "description" : "", + "surrogateAuthRequired" : false, + "enabled" : true, + "alwaysDisplayInConsole" : false, + "clientAuthenticatorType" : "client-secret", + "redirectUris" : [ ], + "webOrigins" : [ ], + "notBefore" : 0, + "bearerOnly" : false, + "consentRequired" : false, + "standardFlowEnabled" : true, + "implicitFlowEnabled" : false, + "directAccessGrantsEnabled" : true, + "serviceAccountsEnabled" : false, + "publicClient" : true, + "frontchannelLogout" : true, + "protocol" : "openid-connect", + "attributes" : { + "oidc.ciba.grant.enabled" : "false", + "post.logout.redirect.uris" : "+", + "oauth2.device.authorization.grant.enabled" : "false", + "backchannel.logout.session.required" : "true", + "backchannel.logout.revoke.offline.tokens" : "false" + }, + "authenticationFlowBindingOverrides" : { }, + "fullScopeAllowed" : true, + "nodeReRegistrationTimeout" : -1, + "defaultClientScopes" : [ "web-origins", "acr", "profile", "roles", "email" ], + "optionalClientScopes" : [ "address", "phone", "offline_access", "microprofile-jwt" ] + }, { + "id" : "e64b05d1-0d1c-4294-85f9-52ae098ecf1f", + "clientId" : "rabbitmq-client-code", + "name" : "", + "description" : "", + "rootUrl" : "http://localhost:15672/", + "adminUrl" : "", + "baseUrl" : "", + "surrogateAuthRequired" : false, + "enabled" : true, + "alwaysDisplayInConsole" : false, + "clientAuthenticatorType" : "client-secret", + "redirectUris" : [ "http://localhost:15672/*" ], + "webOrigins" : [ "+" ], + "notBefore" : 0, + "bearerOnly" : false, + "consentRequired" : false, + "standardFlowEnabled" : true, + "implicitFlowEnabled" : false, + "directAccessGrantsEnabled" : true, + "serviceAccountsEnabled" : false, + "publicClient" : true, + "frontchannelLogout" : true, + "protocol" : "openid-connect", + "attributes" : { + "saml.force.post.binding" : "false", + "saml.multivalued.roles" : "false", + "frontchannel.logout.session.required" : "false", + "post.logout.redirect.uris" : "+", + "oauth2.device.authorization.grant.enabled" : "false", + "backchannel.logout.revoke.offline.tokens" : "false", + "saml.server.signature.keyinfo.ext" : "false", + "use.refresh.tokens" : "true", + "oidc.ciba.grant.enabled" : "false", + "backchannel.logout.session.required" : "false", + "client_credentials.use_refresh_token" : "false", + "require.pushed.authorization.requests" : "false", + "saml.client.signature" : "false", + "saml.allow.ecp.flow" : "false", + "id.token.as.detached.signature" : "false", + "saml.assertion.signature" : "false", + "client.secret.creation.time" : "1652171962", + "saml.encrypt" : "false", + "saml.server.signature" : "false", + "exclude.session.state.from.auth.response" : "false", + "tls-client-certificate-bound-access-tokens" : "false", + "saml.artifact.binding" : "false", + "saml_force_name_id_format" : "false", + "acr.loa.map" : "{}", + "tls.client.certificate.bound.access.tokens" : "false", + "saml.authnstatement" : "false", + "display.on.consent.screen" : "false", + "token.response.type.bearer.lower-case" : "false", + "saml.onetimeuse.condition" : "false" + }, + "authenticationFlowBindingOverrides" : { }, + "fullScopeAllowed" : true, + "nodeReRegistrationTimeout" : -1, + "protocolMappers" : [ { + "id" : "e6905c3e-7ace-4b4f-9244-0f20a86da8ef", + "name" : "aud", + "protocol" : "openid-connect", + "protocolMapper" : "oidc-audience-mapper", + "consentRequired" : false, + "config" : { + "id.token.claim" : "false", + "access.token.claim" : "true", + "included.custom.audience" : "rabbitmq", + "userinfo.token.claim" : "false" + } + }, { + "id" : "548a2e70-5a2b-4959-8c72-97f6455ce478", + "name" : "realm roles", + "protocol" : "openid-connect", + "protocolMapper" : "oidc-usermodel-realm-role-mapper", + "consentRequired" : false, + "config" : { + "user.attribute" : "foo", + "access.token.claim" : "true", + "claim.name" : "extra_scope", + "jsonType.label" : "String", + "multivalued" : "true" + } + }, { + "id" : "923edb6d-2188-4f23-a547-7e372d9cb5eb", + "name" : "username", + "protocol" : "openid-connect", + "protocolMapper" : "oidc-usermodel-property-mapper", + "consentRequired" : false, + "config" : { + "userinfo.token.claim" : "true", + "user.attribute" : "username", + "id.token.claim" : "true", + "access.token.claim" : "true", + "claim.name" : "user_name", + "jsonType.label" : "String" + } + } ], + "defaultClientScopes" : [ "web-origins", "acr", "rabbitmq.tag:administrator", "profile", "roles", "rabbitmq.tag:management", "email" ], + "optionalClientScopes" : [ "address", "phone", "offline_access", "microprofile-jwt" ] + }, { + "id" : "c265f3db-ed3a-4898-8800-af044b3c30f5", + "clientId" : "rabbitmq-proxy-client", + "name" : "", + "description" : "", + "rootUrl" : "", + "adminUrl" : "", + "baseUrl" : "", + "surrogateAuthRequired" : false, + "enabled" : true, + "alwaysDisplayInConsole" : false, + "clientAuthenticatorType" : "client-secret", + "secret" : "nt6pmZMeyrgzYgkg2MLgZQZxLveRMW5M", + "redirectUris" : [ "http://0.0.0.0:4180/*" ], + "webOrigins" : [ ], + "notBefore" : 0, + "bearerOnly" : false, + "consentRequired" : false, + "standardFlowEnabled" : true, + "implicitFlowEnabled" : false, + "directAccessGrantsEnabled" : true, + "serviceAccountsEnabled" : true, + "publicClient" : false, + "frontchannelLogout" : true, + "protocol" : "openid-connect", + "attributes" : { + "client.secret.creation.time" : "1677053168", + "post.logout.redirect.uris" : "+", + "oauth2.device.authorization.grant.enabled" : "false", + "backchannel.logout.revoke.offline.tokens" : "false", + "use.refresh.tokens" : "true", + "tls-client-certificate-bound-access-tokens" : "false", + "oidc.ciba.grant.enabled" : "false", + "backchannel.logout.session.required" : "true", + "client_credentials.use_refresh_token" : "false", + "acr.loa.map" : "{}", + "require.pushed.authorization.requests" : "false", + "display.on.consent.screen" : "false", + "token.response.type.bearer.lower-case" : "false" + }, + "authenticationFlowBindingOverrides" : { }, + "fullScopeAllowed" : true, + "nodeReRegistrationTimeout" : -1, + "protocolMappers" : [ { + "id" : "e1c2389a-c5ca-4a81-a5c2-67f919f2368d", + "name" : "Client ID", + "protocol" : "openid-connect", + "protocolMapper" : "oidc-usersessionmodel-note-mapper", + "consentRequired" : false, + "config" : { + "user.session.note" : "clientId", + "userinfo.token.claim" : "true", + "id.token.claim" : "true", + "access.token.claim" : "true", + "claim.name" : "clientId", + "jsonType.label" : "String" + } + }, { + "id" : "54b12841-4524-4b8a-8dc0-bb6f9044e11d", + "name" : "Client IP Address", + "protocol" : "openid-connect", + "protocolMapper" : "oidc-usersessionmodel-note-mapper", + "consentRequired" : false, + "config" : { + "user.session.note" : "clientAddress", + "userinfo.token.claim" : "true", + "id.token.claim" : "true", + "access.token.claim" : "true", + "claim.name" : "clientAddress", + "jsonType.label" : "String" + } + }, { + "id" : "a5c803da-af15-4fc8-ad7f-a4a900f0703b", + "name" : "Client Host", + "protocol" : "openid-connect", + "protocolMapper" : "oidc-usersessionmodel-note-mapper", + "consentRequired" : false, + "config" : { + "user.session.note" : "clientHost", + "userinfo.token.claim" : "true", + "id.token.claim" : "true", + "access.token.claim" : "true", + "claim.name" : "clientHost", + "jsonType.label" : "String" + } + }, { + "id" : "958d4a83-d5b3-4cca-af3e-fde9f9328eec", + "name" : "aud", + "protocol" : "openid-connect", + "protocolMapper" : "oidc-audience-mapper", + "consentRequired" : false, + "config" : { + "included.client.audience" : "rabbitmq-proxy-client", + "id.token.claim" : "true", + "access.token.claim" : "true", + "included.custom.audience" : "rabbitmq", + "userinfo.token.claim" : "true" + } + } ], + "defaultClientScopes" : [ "web-origins", "acr", "roles", "email" ], + "optionalClientScopes" : [ "address", "phone", "rabbitmq.tag:administrator", "profile", "offline_access", "microprofile-jwt" ] + }, { + "id" : "09de26c0-0a1b-4e5f-8724-23af07a0e54a", + "clientId" : "realm-management", + "name" : "${client_realm-management}", + "surrogateAuthRequired" : false, + "enabled" : true, + "alwaysDisplayInConsole" : false, + "clientAuthenticatorType" : "client-secret", + "redirectUris" : [ ], + "webOrigins" : [ ], + "notBefore" : 0, + "bearerOnly" : true, + "consentRequired" : false, + "standardFlowEnabled" : true, + "implicitFlowEnabled" : false, + "directAccessGrantsEnabled" : false, + "serviceAccountsEnabled" : false, + "publicClient" : false, + "frontchannelLogout" : false, + "protocol" : "openid-connect", + "attributes" : { + "post.logout.redirect.uris" : "+" + }, + "authenticationFlowBindingOverrides" : { }, + "fullScopeAllowed" : false, + "nodeReRegistrationTimeout" : 0, + "defaultClientScopes" : [ "web-origins", "acr", "profile", "roles", "email" ], + "optionalClientScopes" : [ "address", "phone", "offline_access", "microprofile-jwt" ] + }, { + "id" : "16f67c5c-f86b-4334-93f4-fd26356cbb24", + "clientId" : "security-admin-console", + "name" : "${client_security-admin-console}", + "rootUrl" : "${authAdminUrl}", + "baseUrl" : "/admin/test/console/", + "surrogateAuthRequired" : false, + "enabled" : true, + "alwaysDisplayInConsole" : false, + "clientAuthenticatorType" : "client-secret", + "redirectUris" : [ "/admin/test/console/*" ], + "webOrigins" : [ "+" ], + "notBefore" : 0, + "bearerOnly" : false, + "consentRequired" : false, + "standardFlowEnabled" : true, + "implicitFlowEnabled" : false, + "directAccessGrantsEnabled" : false, + "serviceAccountsEnabled" : false, + "publicClient" : true, + "frontchannelLogout" : false, + "protocol" : "openid-connect", + "attributes" : { + "post.logout.redirect.uris" : "+", + "pkce.code.challenge.method" : "S256" + }, + "authenticationFlowBindingOverrides" : { }, + "fullScopeAllowed" : false, + "nodeReRegistrationTimeout" : 0, + "protocolMappers" : [ { + "id" : "26e7deed-9c26-4a19-88fa-845bec2e5909", + "name" : "locale", + "protocol" : "openid-connect", + "protocolMapper" : "oidc-usermodel-attribute-mapper", + "consentRequired" : false, + "config" : { + "userinfo.token.claim" : "true", + "user.attribute" : "locale", + "id.token.claim" : "true", + "access.token.claim" : "true", + "claim.name" : "locale", + "jsonType.label" : "String" + } + } ], + "defaultClientScopes" : [ "web-origins", "acr", "profile", "roles", "email" ], + "optionalClientScopes" : [ "address", "phone", "offline_access", "microprofile-jwt" ] + } ], + "clientScopes" : [ { + "id" : "ec4e76a3-8597-41d4-aa7c-e4e1fee6a01a", + "name" : "profile", + "description" : "OpenID Connect built-in scope: profile", + "protocol" : "openid-connect", + "attributes" : { + "include.in.token.scope" : "true", + "display.on.consent.screen" : "true", + "consent.screen.text" : "${profileScopeConsentText}" + }, + "protocolMappers" : [ { + "id" : "db04690a-de25-4627-8e0c-78a018e86ce8", + "name" : "family name", + "protocol" : "openid-connect", + "protocolMapper" : "oidc-usermodel-property-mapper", + "consentRequired" : false, + "config" : { + "userinfo.token.claim" : "true", + "user.attribute" : "lastName", + "id.token.claim" : "true", + "access.token.claim" : "true", + "claim.name" : "family_name", + "jsonType.label" : "String" + } + }, { + "id" : "e5c72df5-7fa9-43c6-8f13-c7c2d73fe89a", + "name" : "profile", + "protocol" : "openid-connect", + "protocolMapper" : "oidc-usermodel-attribute-mapper", + "consentRequired" : false, + "config" : { + "userinfo.token.claim" : "true", + "user.attribute" : "profile", + "id.token.claim" : "true", + "access.token.claim" : "true", + "claim.name" : "profile", + "jsonType.label" : "String" + } + }, { + "id" : "5856da2a-7aa0-446c-be48-22112783e322", + "name" : "nickname", + "protocol" : "openid-connect", + "protocolMapper" : "oidc-usermodel-attribute-mapper", + "consentRequired" : false, + "config" : { + "userinfo.token.claim" : "true", + "user.attribute" : "nickname", + "id.token.claim" : "true", + "access.token.claim" : "true", + "claim.name" : "nickname", + "jsonType.label" : "String" + } + }, { + "id" : "5a7a208b-70eb-4f8f-b8ff-2115a615d696", + "name" : "gender", + "protocol" : "openid-connect", + "protocolMapper" : "oidc-usermodel-attribute-mapper", + "consentRequired" : false, + "config" : { + "userinfo.token.claim" : "true", + "user.attribute" : "gender", + "id.token.claim" : "true", + "access.token.claim" : "true", + "claim.name" : "gender", + "jsonType.label" : "String" + } + }, { + "id" : "b1cad309-90cd-4fed-8e62-c05dc4649b99", + "name" : "birthdate", + "protocol" : "openid-connect", + "protocolMapper" : "oidc-usermodel-attribute-mapper", + "consentRequired" : false, + "config" : { + "userinfo.token.claim" : "true", + "user.attribute" : "birthdate", + "id.token.claim" : "true", + "access.token.claim" : "true", + "claim.name" : "birthdate", + "jsonType.label" : "String" + } + }, { + "id" : "b3a08e61-6e08-4aa3-aa71-212bc13bff5d", + "name" : "picture", + "protocol" : "openid-connect", + "protocolMapper" : "oidc-usermodel-attribute-mapper", + "consentRequired" : false, + "config" : { + "userinfo.token.claim" : "true", + "user.attribute" : "picture", + "id.token.claim" : "true", + "access.token.claim" : "true", + "claim.name" : "picture", + "jsonType.label" : "String" + } + }, { + "id" : "bc59bb88-2cae-4c60-b09f-3c18fced603f", + "name" : "website", + "protocol" : "openid-connect", + "protocolMapper" : "oidc-usermodel-attribute-mapper", + "consentRequired" : false, + "config" : { + "userinfo.token.claim" : "true", + "user.attribute" : "website", + "id.token.claim" : "true", + "access.token.claim" : "true", + "claim.name" : "website", + "jsonType.label" : "String" + } + }, { + "id" : "da32f964-8b0a-4cef-babc-8b90f31b20a7", + "name" : "given name", + "protocol" : "openid-connect", + "protocolMapper" : "oidc-usermodel-property-mapper", + "consentRequired" : false, + "config" : { + "userinfo.token.claim" : "true", + "user.attribute" : "firstName", + "id.token.claim" : "true", + "access.token.claim" : "true", + "claim.name" : "given_name", + "jsonType.label" : "String" + } + }, { + "id" : "eb6b8e8c-1e03-497a-80b4-3e9c26a86d9a", + "name" : "username", + "protocol" : "openid-connect", + "protocolMapper" : "oidc-usermodel-property-mapper", + "consentRequired" : false, + "config" : { + "userinfo.token.claim" : "true", + "user.attribute" : "username", + "id.token.claim" : "true", + "access.token.claim" : "true", + "claim.name" : "preferred_username", + "jsonType.label" : "String" + } + }, { + "id" : "334e47b2-5f74-4668-b04e-9ab55513c146", + "name" : "updated at", + "protocol" : "openid-connect", + "protocolMapper" : "oidc-usermodel-attribute-mapper", + "consentRequired" : false, + "config" : { + "userinfo.token.claim" : "true", + "user.attribute" : "updatedAt", + "id.token.claim" : "true", + "access.token.claim" : "true", + "claim.name" : "updated_at", + "jsonType.label" : "long" + } + }, { + "id" : "7dad1c25-6b18-4571-8d92-bfd698c5b94b", + "name" : "full name", + "protocol" : "openid-connect", + "protocolMapper" : "oidc-full-name-mapper", + "consentRequired" : false, + "config" : { + "id.token.claim" : "true", + "access.token.claim" : "true", + "userinfo.token.claim" : "true" + } + }, { + "id" : "bf52c928-4d33-4c14-8e61-969b17bed2a5", + "name" : "zoneinfo", + "protocol" : "openid-connect", + "protocolMapper" : "oidc-usermodel-attribute-mapper", + "consentRequired" : false, + "config" : { + "userinfo.token.claim" : "true", + "user.attribute" : "zoneinfo", + "id.token.claim" : "true", + "access.token.claim" : "true", + "claim.name" : "zoneinfo", + "jsonType.label" : "String" + } + }, { + "id" : "3a03eb21-7c20-4150-87f2-ca94c9df601c", + "name" : "middle name", + "protocol" : "openid-connect", + "protocolMapper" : "oidc-usermodel-attribute-mapper", + "consentRequired" : false, + "config" : { + "userinfo.token.claim" : "true", + "user.attribute" : "middleName", + "id.token.claim" : "true", + "access.token.claim" : "true", + "claim.name" : "middle_name", + "jsonType.label" : "String" + } + }, { + "id" : "431c9682-e4ba-4348-9d07-f8d5415ca98b", + "name" : "locale", + "protocol" : "openid-connect", + "protocolMapper" : "oidc-usermodel-attribute-mapper", + "consentRequired" : false, + "config" : { + "userinfo.token.claim" : "true", + "user.attribute" : "locale", + "id.token.claim" : "true", + "access.token.claim" : "true", + "claim.name" : "locale", + "jsonType.label" : "String" + } + } ] + }, { + "id" : "b2ced9e2-289f-44b0-8567-5218a2eee3e6", + "name" : "rabbitmq.read:*/*", + "description" : "read all", + "protocol" : "openid-connect", + "attributes" : { + "include.in.token.scope" : "true", + "display.on.consent.screen" : "true" + } + }, { + "id" : "46f5e514-9283-4f03-b2af-a7da506f0cbc", + "name" : "offline_access", + "description" : "OpenID Connect built-in scope: offline_access", + "protocol" : "openid-connect", + "attributes" : { + "consent.screen.text" : "${offlineAccessScopeConsentText}", + "display.on.consent.screen" : "true" + } + }, { + "id" : "a4ffacea-34a8-4eb2-961f-af78e50b1140", + "name" : "address", + "description" : "OpenID Connect built-in scope: address", + "protocol" : "openid-connect", + "attributes" : { + "include.in.token.scope" : "true", + "display.on.consent.screen" : "true", + "consent.screen.text" : "${addressScopeConsentText}" + }, + "protocolMappers" : [ { + "id" : "791b8544-4659-4d61-8fb8-8a18a687648d", + "name" : "address", + "protocol" : "openid-connect", + "protocolMapper" : "oidc-address-mapper", + "consentRequired" : false, + "config" : { + "user.attribute.formatted" : "formatted", + "user.attribute.country" : "country", + "user.attribute.postal_code" : "postal_code", + "userinfo.token.claim" : "true", + "user.attribute.street" : "street", + "id.token.claim" : "true", + "user.attribute.region" : "region", + "access.token.claim" : "true", + "user.attribute.locality" : "locality" + } + } ] + }, { + "id" : "7a2981c1-d606-43f6-acbf-76a8124e59b7", + "name" : "microprofile-jwt", + "description" : "Microprofile - JWT built-in scope", + "protocol" : "openid-connect", + "attributes" : { + "include.in.token.scope" : "true", + "display.on.consent.screen" : "false" + }, + "protocolMappers" : [ { + "id" : "63a285df-f6d0-4e06-9f16-d4a578fce8bf", + "name" : "upn", + "protocol" : "openid-connect", + "protocolMapper" : "oidc-usermodel-property-mapper", + "consentRequired" : false, + "config" : { + "userinfo.token.claim" : "true", + "user.attribute" : "username", + "id.token.claim" : "true", + "access.token.claim" : "true", + "claim.name" : "upn", + "jsonType.label" : "String" + } + }, { + "id" : "1665df09-6855-420b-a649-0f0afe054b51", + "name" : "groups", + "protocol" : "openid-connect", + "protocolMapper" : "oidc-usermodel-realm-role-mapper", + "consentRequired" : false, + "config" : { + "multivalued" : "true", + "userinfo.token.claim" : "true", + "user.attribute" : "foo", + "id.token.claim" : "true", + "access.token.claim" : "true", + "claim.name" : "groups", + "jsonType.label" : "String" + } + } ] + }, { + "id" : "b9c5af5d-59f3-445b-b899-c6574bc6191b", + "name" : "phone", + "description" : "OpenID Connect built-in scope: phone", + "protocol" : "openid-connect", + "attributes" : { + "include.in.token.scope" : "true", + "display.on.consent.screen" : "true", + "consent.screen.text" : "${phoneScopeConsentText}" + }, + "protocolMappers" : [ { + "id" : "e945c389-e953-431b-b3b4-882a50a8054e", + "name" : "phone number", + "protocol" : "openid-connect", + "protocolMapper" : "oidc-usermodel-attribute-mapper", + "consentRequired" : false, + "config" : { + "userinfo.token.claim" : "true", + "user.attribute" : "phoneNumber", + "id.token.claim" : "true", + "access.token.claim" : "true", + "claim.name" : "phone_number", + "jsonType.label" : "String" + } + }, { + "id" : "52b07524-1521-48d6-be23-779f8e1f8a67", + "name" : "phone number verified", + "protocol" : "openid-connect", + "protocolMapper" : "oidc-usermodel-attribute-mapper", + "consentRequired" : false, + "config" : { + "userinfo.token.claim" : "true", + "user.attribute" : "phoneNumberVerified", + "id.token.claim" : "true", + "access.token.claim" : "true", + "claim.name" : "phone_number_verified", + "jsonType.label" : "boolean" + } + } ] + }, { + "id" : "4dfc0d4d-654d-4e1c-8b58-64a0e1126a19", + "name" : "rabbitmq.configure:*/*", + "protocol" : "openid-connect", + "attributes" : { + "include.in.token.scope" : "true", + "display.on.consent.screen" : "true" + } + }, { + "id" : "93d154c5-e9fe-49ff-bca8-bc55a141a31e", + "name" : "rabbitmq.tag:management", + "protocol" : "openid-connect", + "attributes" : { + "include.in.token.scope" : "true", + "display.on.consent.screen" : "true" + } + }, { + "id" : "cc214fa3-0a7f-4390-9c4a-8ae14512e4a4", + "name" : "web-origins", + "description" : "OpenID Connect scope for add allowed web origins to the access token", + "protocol" : "openid-connect", + "attributes" : { + "include.in.token.scope" : "false", + "display.on.consent.screen" : "false", + "consent.screen.text" : "" + }, + "protocolMappers" : [ { + "id" : "e2ac8ddb-9c19-4088-bc72-c4176e0fac3f", + "name" : "allowed web origins", + "protocol" : "openid-connect", + "protocolMapper" : "oidc-allowed-origins-mapper", + "consentRequired" : false, + "config" : { } + } ] + }, { + "id" : "53b1a2b2-085e-4e36-bb81-c88e8d846439", + "name" : "email", + "description" : "OpenID Connect built-in scope: email", + "protocol" : "openid-connect", + "attributes" : { + "include.in.token.scope" : "true", + "display.on.consent.screen" : "true", + "consent.screen.text" : "${emailScopeConsentText}" + }, + "protocolMappers" : [ { + "id" : "e1a850d9-6372-4521-8d0d-acee25245c90", + "name" : "email", + "protocol" : "openid-connect", + "protocolMapper" : "oidc-usermodel-property-mapper", + "consentRequired" : false, + "config" : { + "userinfo.token.claim" : "true", + "user.attribute" : "email", + "id.token.claim" : "true", + "access.token.claim" : "true", + "claim.name" : "email", + "jsonType.label" : "String" + } + }, { + "id" : "43a7220e-d94d-43e6-a5e7-1a12dbbb4460", + "name" : "email verified", + "protocol" : "openid-connect", + "protocolMapper" : "oidc-usermodel-property-mapper", + "consentRequired" : false, + "config" : { + "userinfo.token.claim" : "true", + "user.attribute" : "emailVerified", + "id.token.claim" : "true", + "access.token.claim" : "true", + "claim.name" : "email_verified", + "jsonType.label" : "boolean" + } + } ] + }, { + "id" : "2b745afc-cb92-4ac3-b314-8ef1d638b4b1", + "name" : "roles", + "description" : "OpenID Connect scope for add user roles to the access token", + "protocol" : "openid-connect", + "attributes" : { + "include.in.token.scope" : "false", + "display.on.consent.screen" : "true", + "consent.screen.text" : "${rolesScopeConsentText}" + }, + "protocolMappers" : [ { + "id" : "2cac7c2c-7c9f-44e6-a76e-b8d3fad627ea", + "name" : "realm roles", + "protocol" : "openid-connect", + "protocolMapper" : "oidc-usermodel-realm-role-mapper", + "consentRequired" : false, + "config" : { + "user.attribute" : "foo", + "access.token.claim" : "true", + "claim.name" : "realm_access.roles", + "jsonType.label" : "String", + "multivalued" : "true" + } + }, { + "id" : "91c4a9bd-a9b9-402b-9eb6-762362d18c6b", + "name" : "client roles", + "protocol" : "openid-connect", + "protocolMapper" : "oidc-usermodel-client-role-mapper", + "consentRequired" : false, + "config" : { + "user.attribute" : "foo", + "access.token.claim" : "true", + "claim.name" : "resource_access.${client_id}.roles", + "jsonType.label" : "String", + "multivalued" : "true" + } + } ] + }, { + "id" : "8b53c714-89cf-4cfd-ac76-1b45bd841b58", + "name" : "acr", + "description" : "OpenID Connect scope for add acr (authentication context class reference) to the token", + "protocol" : "openid-connect", + "attributes" : { + "include.in.token.scope" : "false", + "display.on.consent.screen" : "false" + }, + "protocolMappers" : [ { + "id" : "6381b445-4f37-434e-b982-c34a6048913b", + "name" : "acr loa level", + "protocol" : "openid-connect", + "protocolMapper" : "oidc-acr-mapper", + "consentRequired" : false, + "config" : { + "id.token.claim" : "true", + "access.token.claim" : "true", + "userinfo.token.claim" : "true" + } + } ] + }, { + "id" : "3331893c-c67c-4146-b00a-b4862200628c", + "name" : "role_list", + "description" : "SAML role list", + "protocol" : "saml", + "attributes" : { + "consent.screen.text" : "${samlRoleListScopeConsentText}", + "display.on.consent.screen" : "true" + }, + "protocolMappers" : [ { + "id" : "863078ec-d37c-46fc-a70e-3fe6340fbeec", + "name" : "role list", + "protocol" : "saml", + "protocolMapper" : "saml-role-list-mapper", + "consentRequired" : false, + "config" : { + "single" : "false", + "attribute.nameformat" : "Basic", + "attribute.name" : "Role" + } + } ] + }, { + "id" : "2010b133-4bfe-4f5f-8d1a-33b2a7ad2e60", + "name" : "rabbitmq.write:*/*", + "protocol" : "openid-connect", + "attributes" : { + "include.in.token.scope" : "true", + "display.on.consent.screen" : "true" + } + }, { + "id" : "f6e6dd62-22bf-4421-910e-e6070908764c", + "name" : "rabbitmq.tag:administrator", + "protocol" : "openid-connect", + "attributes" : { + "include.in.token.scope" : "true", + "display.on.consent.screen" : "true" + } + } ], + "defaultDefaultClientScopes" : [ "role_list", "profile", "email", "roles", "web-origins", "acr" ], + "defaultOptionalClientScopes" : [ "offline_access", "address", "phone", "microprofile-jwt" ], + "browserSecurityHeaders" : { + "contentSecurityPolicyReportOnly" : "", + "xContentTypeOptions" : "nosniff", + "xRobotsTag" : "none", + "xFrameOptions" : "SAMEORIGIN", + "contentSecurityPolicy" : "frame-src 'self'; frame-ancestors 'self'; object-src 'none';", + "xXSSProtection" : "1; mode=block", + "strictTransportSecurity" : "max-age=31536000; includeSubDomains" + }, + "smtpServer" : { }, + "eventsEnabled" : false, + "eventsListeners" : [ "jboss-logging" ], + "enabledEventTypes" : [ ], + "adminEventsEnabled" : false, + "adminEventsDetailsEnabled" : false, + "identityProviders" : [ ], + "identityProviderMappers" : [ ], + "components" : { + "org.keycloak.services.clientregistration.policy.ClientRegistrationPolicy" : [ { + "id" : "72a90aea-e732-467d-ade9-34e73c993209", + "name" : "Allowed Client Scopes", + "providerId" : "allowed-client-templates", + "subType" : "authenticated", + "subComponents" : { }, + "config" : { + "allow-default-scopes" : [ "true" ] + } + }, { + "id" : "5ed9083b-fdb5-4fc5-97b5-0b31189f8ad2", + "name" : "Max Clients Limit", + "providerId" : "max-clients", + "subType" : "anonymous", + "subComponents" : { }, + "config" : { + "max-clients" : [ "200" ] + } + }, { + "id" : "4f81b0b9-6a42-4128-b7a1-4a814acf5875", + "name" : "Full Scope Disabled", + "providerId" : "scope", + "subType" : "anonymous", + "subComponents" : { }, + "config" : { } + }, { + "id" : "2c06a7de-50eb-439f-b592-6bbda10d6af3", + "name" : "Allowed Protocol Mapper Types", + "providerId" : "allowed-protocol-mappers", + "subType" : "authenticated", + "subComponents" : { }, + "config" : { + "allowed-protocol-mapper-types" : [ "saml-user-property-mapper", "saml-user-attribute-mapper", "oidc-sha256-pairwise-sub-mapper", "oidc-usermodel-attribute-mapper", "oidc-usermodel-property-mapper", "saml-role-list-mapper", "oidc-address-mapper", "oidc-full-name-mapper" ] + } + }, { + "id" : "693f0625-c453-40c0-b38e-80b7b7deaefa", + "name" : "Consent Required", + "providerId" : "consent-required", + "subType" : "anonymous", + "subComponents" : { }, + "config" : { } + }, { + "id" : "7c971805-14f2-4eb0-b2af-90c2db4c2e41", + "name" : "Allowed Client Scopes", + "providerId" : "allowed-client-templates", + "subType" : "anonymous", + "subComponents" : { }, + "config" : { + "allow-default-scopes" : [ "true" ] + } + }, { + "id" : "e4376815-05e5-4675-9b11-6e18d5712849", + "name" : "Allowed Protocol Mapper Types", + "providerId" : "allowed-protocol-mappers", + "subType" : "anonymous", + "subComponents" : { }, + "config" : { + "allowed-protocol-mapper-types" : [ "saml-role-list-mapper", "saml-user-property-mapper", "oidc-sha256-pairwise-sub-mapper", "oidc-usermodel-property-mapper", "oidc-full-name-mapper", "saml-user-attribute-mapper", "oidc-usermodel-attribute-mapper", "oidc-address-mapper" ] + } + }, { + "id" : "bbadf932-a286-4841-be1b-ed845e2131cb", + "name" : "Trusted Hosts", + "providerId" : "trusted-hosts", + "subType" : "anonymous", + "subComponents" : { }, + "config" : { + "host-sending-registration-request-must-match" : [ "true" ], + "client-uris-must-match" : [ "true" ] + } + } ], + "org.keycloak.keys.KeyProvider" : [ { + "id" : "307ede55-7647-498c-b1ba-4be80fb609cc", + "name" : "rsa-generated", + "providerId" : "rsa-generated", + "subComponents" : { }, + "config" : { + "privateKey" : [ "MIIEogIBAAKCAQEAx5RCXJqU+e2646hYCduHBANJvpYN3Uv1gq15LjTlIXYqBCfm4SLUX1y1lOKDoOVl5j/flAgdgF4I9P3G4drc36NuaaocQguu3xWOsG9UZheiDD4wJANC7F6FMdWqSiBySA+EXyQ2zkoUBkNxKecqWhqVmaY9IVyxbQhdXsKQH0hBnvd3NQaem0RXuUeadUK5TGI9VqTe96sN7lLdE+T94n5cubqtBbc14kZ6YZsi1Pa+2xe/ZsDlXgMGAqTxgSy5l6cnxzxE+ndAp+pIR83BxfRFijo0LNKxyiZQ0X+QZ4Y3sTGadLCZzr6R/N/1QQnhOTXmTsVyNdmhQMr6A7lBFwIDAQABAoIBAAP7tYdbnnWOhRheF8O6mes+lY40OHqeNXwyMiT18UzFqvkCQt1jcJGmrAkYrD/d1DbQN4ogz1Xsiok5N2ryj033WRDK0F2RFiBlsb9buXeAKT/NTfCqD//fsxDXjtqD40QE60Nq2Z0sZVHqrquDbZj2xt2WL8omq3Pdot9tSqsVIQMbRIfH+I9+9kQ8Ob7t423I06AFiXJg5h3qjLx1jP6qQWsC4ippY6QmUve/d3PWqSd4GQ4sb2KQKvfT1VU4HPvIQdf+OurXaF/lPR+6XDU3RmA1qY61JS0O0ul+jTUGUHRxtgI51IU16+jcKiAzjWZ53HI9jLODP6gzyn+KC2ECgYEA/2SN4sNHM8aL/i7hdcbq+c+UFKK5AyWjewbMWGC2ZCzEzW/zqn2lmQHZK7hDAuAhvnEh6rvxxFKQw2PXb9N9IGIaAEJmGrJYg5QaLspNs5/4a3+GZh8lfZgwaEBetazrIFSOrVFhYb/pRz6m2x7oKNIpVXXUXdNr0DD4mDAwLqUCgYEAyA272k5rlTXj2IX/MvAtAuD2l7R6DfDirolzNdLVEXKLWpZwqdAuLWhDNGvNtmeAQcIuGFUvHhv5ZdnAwOKjbJsVDmr8vUCegCZJPuEGuOXjqZq+a1a84lhSSyWWiiz/yuIVh2Bnu8TD2Xb3igNFa0ipWga9nfZm7usifVumQAsCgYA/z+kfyrkkt6xM83vECNK3XmVajpn5rlLdr4IpZujLuN/nkNxqMgDJbUvM/7pGoqfrxKq70wAClLq1B2JR/57ZE4n5nJ2UeBFjtwKfxE6L3iGdAn0bURb1/avCsKq5bB5Hsrj/l0DkwqXP9liMkXlikbhgMRPB6cybdVD3/bpcPQKBgFxZIqeC3dzSNKymJokwH8cdbBfZwyIeWbXyU1G8UoksVDHsEnQBWt+xKpzPSvXxz14LslWfNRH0HeurWnRv6rre2BiAwMzoQIKtqdAx9nVyAecwPMi2EJl35f00i8qbPTU9qmyEzz35deM0LM7z9Z6xuyOIyw1ZSmjt+Ezf+t3DAoGAB7rT8E7lc036+L1RzKF+T8XwXReHcDkXhKt5V/RQBzRL5GY2mqQQ1rb037KlGXRHAwPkQqmYpMhY9ccRF5UqA05IT/KApvc36m7DAXPaNy1CwZHrr0l3rR6fhpUvtgrt1uyCXvaLJxUAd/5MTw2ffqOsMSxiuRCrpUC+dxCXtG8=" ], + "keyUse" : [ "SIG" ], + "certificate" : [ "MIIClzCCAX8CBgGAmWyoEzANBgkqhkiG9w0BAQsFADAPMQ0wCwYDVQQDDAR0ZXN0MB4XDTIyMDUwNjEyNDkzNloXDTMyMDUwNjEyNTExNlowDzENMAsGA1UEAwwEdGVzdDCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAMeUQlyalPntuuOoWAnbhwQDSb6WDd1L9YKteS405SF2KgQn5uEi1F9ctZTig6DlZeY/35QIHYBeCPT9xuHa3N+jbmmqHEILrt8VjrBvVGYXogw+MCQDQuxehTHVqkogckgPhF8kNs5KFAZDcSnnKloalZmmPSFcsW0IXV7CkB9IQZ73dzUGnptEV7lHmnVCuUxiPVak3verDe5S3RPk/eJ+XLm6rQW3NeJGemGbItT2vtsXv2bA5V4DBgKk8YEsuZenJ8c8RPp3QKfqSEfNwcX0RYo6NCzSscomUNF/kGeGN7ExmnSwmc6+kfzf9UEJ4Tk15k7FcjXZoUDK+gO5QRcCAwEAATANBgkqhkiG9w0BAQsFAAOCAQEAp2oy7aEJWJ1/BBUlFArzHqXZ/aYzuRUdyCTQn6+tjRsB9IrPwRI4p9sN4mCl0EUDvureNxCiY9XBHWmEteb5n952XWcSWi6tzAj5BQL4LHIFFzMLHr1+HYpmMwYHFgmR9MhoKFINEEGpOvCRokurEN2uU/tNcBX5HGnEWRc5ZNPRwQcJggnHHWxAmuNKIf73pPAECcffrTJO6cePt++TaU2j2qA6S5A/p/0Za10EtKcNeL1EIvwuFxewBjJQjXEqvmN4VlcVadj+pQ7AwtzujdtCuHvO/9zxRo5xq9KFl/VVMk4fUvwKA+vjcMwjDn2qXZwKmslX6YkV18gbmnSCOg==" ], + "priority" : [ "100" ] + } + }, { + "id" : "1ae80491-123b-4d2d-8018-7ceb6971d07d", + "name" : "aes-generated", + "providerId" : "aes-generated", + "subComponents" : { }, + "config" : { + "kid" : [ "e43bf5a9-a6eb-46e6-a529-2174e96536fd" ], + "secret" : [ "2cQajcd-396pH2TSx6TC-Q" ], + "priority" : [ "100" ] + } + }, { + "id" : "b7b404ce-f1db-4fba-9037-d43bbd5fa584", + "name" : "hmac-generated", + "providerId" : "hmac-generated", + "subComponents" : { }, + "config" : { + "kid" : [ "2a9eb5b7-3df3-4fe8-980b-93808456c392" ], + "secret" : [ "ghLjXQyrSvgpx1wi2-YYgGSOUH2-FteA8BN_FujK8wywdQ-LiMd-WDfnw-F6GexWHqNrv95VRjHzvlojRbDluQ" ], + "priority" : [ "100" ], + "algorithm" : [ "HS256" ] + } + }, { + "id" : "f7ed445f-b2bd-4cbd-9985-eba8b36ed733", + "name" : "rsa-enc-generated", + "providerId" : "rsa-enc-generated", + "subComponents" : { }, + "config" : { + "privateKey" : [ "MIIEowIBAAKCAQEAjn/EV027lKv/NvYuCxCqUrAsVKTHpKLyFZCeJfrMcGn9XDTjMMYVx5/2wguPJFpX+nUzFSCjGuejRWamVFOoHiaXB+mrdPoAZbBNrgdeqwb2+7+G0iqsmU+Lfi/pxsubTTLZIZB1PBfV/4DmWz3vRU+uapiHo5pn+h0mbeOCLltuZmygiKlkzpTLfZxdmSXMqCwZv9J6Zdvrpio88Ca2F1dD4w3bPWvcbLmftAkpByucyHlo0v3jLPbq9LQF7fRD3WqHDGgQjLFJJKvN16tTnVkLWzFE4tumsQirCp8MoAuC3fKa5X5AKjZRarcrml9bmnFId+VGs764FY8THVuqtQIDAQABAoIBAC2FBOw6iYxRcSJWe5jPRwlI+7BCPwAJiTl4S/yn4/yY7vUwc86VyElPgRy1YpFjRq1cGOhL1651hkNaKhHP82lGPIKrkzi3z76sFfRcqFIL1IPQx7mFJkOHFHFHzu7RBZgggRnmsnxyxMpsm6lT3IYQkQ++D74kScis3STHQ0OZOVGkx9Sbznx1+i2rc7QUkWy8b7G8/ByVJsAu2SBLfbQ4wFAhJKtr+uDP6Tt2Cyn4GpzE68B5cA7htJI49uVvbGTBLInH5PXejXKyLfEJjbH2brx9hVStmaaV/sD1hroZ0sjhM54MamctPq2o0H33S2WFZJaUXwDWHzOSyUmY5SECgYEAxTJmhuCYz/P2AZP4DGDNwFHLar1K719m+RxUu0ndIDKeGhBtX3oSQXaKQS+JM+dmbF3M7ub18OgCHF+raPXAE6en1tdh5ZQJbRQEX9dnaXVpjCPFciZMgP0eJ1CpMuKo7qb3IXv7YtcN6pFjMNchz3MyVqbNBFN8ocQWoe4VJu0CgYEAuP3Z45ce6XI+7274yTXMEHqmCZU8krKanmppgIj39iJGbkEBo9QSB32XhG7dU0tWJhj4QtQcMKFCMcqckw9Jetyb68J2vreCVti9CqSrSLL4VFhMzVdoRPflzXB29gdJTe4TxBAiSryVICVblW3giM4UWhzNAuJOC8f5r1xrkekCgYBmKyXJreYeoBSOXr6+kw0nHnnZFLgVa4Vrfc08uBlUTEVz1Z0FQbbhqewZt+pLNRHxBWxfPtSf+2TUlJC3sdPRmySvgCodi2SS2jMmAPF4RzfnPsVWzhcHIZ2U2wq+7YZ/F4ylEZp+bFOue6M7s8q1s8aZ9JP2MNc67OCZB0R4RQKBgQC3YfZIVfuvwbA/3ItFs03KnDrSTx2P8vuxxJ03bRAZ8BpPm6OLi2QgBtFX2CsRMiKBe8lHPkt/rawX/dk/My1NXTo3+TuLjhDoFM05qsmdNMVVn37rJBXaIMCu6ikTdV+moDb56mCEI/PUvRPPyu+FznyAZAKbNEnYBfIvc3ezWQKBgEIw8dPvzmmbGvcb1K56VlyKTcejY0tPk8spRd+BKpi+VXlhV+1xLVmyB6lzveFm+0Da2arFGxFKgi/PfT5mXfheGyX2dxMbK5cMkYg7R+pHBbm/0XXAdQnaGZ4wz0O1R/DC8t0ZDp4wSlb3fVJpQoEKkqMY9bK2H4DvToOvLFck" ], + "keyUse" : [ "ENC" ], + "certificate" : [ "MIIClzCCAX8CBgGAmWypGDANBgkqhkiG9w0BAQsFADAPMQ0wCwYDVQQDDAR0ZXN0MB4XDTIyMDUwNjEyNDkzNloXDTMyMDUwNjEyNTExNlowDzENMAsGA1UEAwwEdGVzdDCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAI5/xFdNu5Sr/zb2LgsQqlKwLFSkx6Si8hWQniX6zHBp/Vw04zDGFcef9sILjyRaV/p1MxUgoxrno0VmplRTqB4mlwfpq3T6AGWwTa4HXqsG9vu/htIqrJlPi34v6cbLm00y2SGQdTwX1f+A5ls970VPrmqYh6OaZ/odJm3jgi5bbmZsoIipZM6Uy32cXZklzKgsGb/SemXb66YqPPAmthdXQ+MN2z1r3Gy5n7QJKQcrnMh5aNL94yz26vS0Be30Q91qhwxoEIyxSSSrzderU51ZC1sxROLbprEIqwqfDKALgt3ymuV+QCo2UWq3K5pfW5pxSHflRrO+uBWPEx1bqrUCAwEAATANBgkqhkiG9w0BAQsFAAOCAQEAThu6AxY4bqRbl4RtPH5Xcm033xwLssXmVXh01NuQUfowtkHi77lGUWXBmvwY6UIKC4D4eYrGVYVRTjHUEknmLrxSzUKi4g38kax4pBfwWWstWyVyo89dl7hA5ZlzdZ+SFB4HasGcXdhVFG2dwVvx6lnfxBIWZkgy5GAtIOpK4oIJOIutTiR1yuku4a9zkk6yumsxTKivAs1UMsvQXzcFSUDIrdj0vfdCAB6SvjpYvf8d0wO31bb+t3vWblv29RNV4qbuA/CZkrvWZZXZ+bal0qZd06Z+Hbc4iBgPSHs/HjyAJ8xac3ljg0IWolvZxRkFBR4VSB3pgCUKxf3V4mgbPw==" ], + "priority" : [ "100" ], + "algorithm" : [ "RSA-OAEP" ] + } + }, { + "id" : "66a592ec-8657-4f53-8870-1e1693ff266c", + "name" : "rsa", + "providerId" : "rsa", + "subComponents" : { }, + "config" : { + "privateKey" : [ "MIIEpAIBAAKCAQEA2dP+vRn+Kj+S/oGd49kq6+CKNAduCC1raLfTH7B3qjmZYm45 yDl+XmgK9CNmHXkho9qvmhdksdzDVsdeDlhKIdcIWadhqDzdtn1hj/22iUwrhH0b d475hlKcsiZ+oy/sdgGgAzvmmTQmdMqEXqV2B9q9KFBmo4Ahh/6+d4wM1rH9kxl0 RvMAKLe+daoIHIjok8hCO4cKQQEw/ErBe4SF2cr3wQwCfF1qVu4eAVNVfxfy/uEv G3Q7x005P3TcK+QcYgJxav3lictSi5dyWLgGQAvkknWitpRK8KVLypEj5WKej6CF 8nq30utn15FQg0JkHoqzwiCqqeen8GIPteI7VwIDAQABAoIBAFsB5FszYepa11o3 4zSPxgv4qyUjuYf3GfoNW0rRGp3nJLtoHAIYa0CcLX9kzsQfmLtxoY46mdppxr8Z 2qUZpBdRVO7ILNfyXhthdQKI2NuyFDhtYK1p8bx6BXe095HMcvm2ohjXzPdTP4Hq HrXAYXjUndUbClbjMJ82AnPF8pM70kBq7g733UqkdfrMuv6/d95Jiyw4cC7dGsI3 Ruz9DGhiAyCBtQ0tUB+6Kqn5DChSB+ccfMJjr6GnCVYmERxEQ5DJCTIX8am8C6KX mAxUwHMTsEGBU6GzhcUgAwUFEK3I9RptdlRFp7F8E/P0LxmPkFdgaBNUhrdnB7Y4 01n1R1kCgYEA/huFJgwVWSBSK/XIouFuQrxZOI9JbBbdmpFT7SBGCdFg26Or9y7j +N5HE7yuoZ9PkBh17zzosZdsJhGocRYvO0LSq8cXvKXKCwn2fTMM7uJ/oQe68sxG cF/fC0M/8LvRESWShH920rrERu0s161RuasdOPre0aXu7ZQzkQ68O6MCgYEA23NO DHKNblBOdFEWsvotLqV8DrIbQ4le7sSgQr56/bdn9GScZk2JU0f+pqzpiGUy9bIt 6uujvt5ar0IvpIQVdjf3dbp6Fy+Dwhd4yTR4dMdDECest7jL++/21x8Y0ywFhBIK yEd+QxpOLXP6qaSKTGxL2rnTXRjl8/g629xQPL0CgYEAkNNOh+jLIgjxzGxA9dRV 62M91qaTyi8eDkJV+wgx4taaxZP7Jt5qwCSvjegz/5m01wOZ88hbNxx+XxQhVJK4 SKZFO/I07Sfwh2oeOi0maeBdrYGiY09ZtiJuFRU3FBV3irZHU4zyRBh+VY5HyITX 12JXPWp+JC7WhkG5QiuLzNECgYEA15OBzICLpx6Es4clAVT6JaSzJcyZM9MyyuOl e2ubbrpJCK/9ZBIvIPzMj/e0wiSH1wzeRrSM+ud7tkcSfk6ytptsIN67KSOoD3b3 VNCStEU7ABe5eBG1cRzeI52MyYWpNYBzzyNMSacBvWz9hMD6ivCn44pAtGfNHclw KKNYvxECgYBOamf25md9Jy6rtQsJVEJWw+8sB4lBlKEEadc5qekR7ZQ0hwj8CnTm WOo856ynI28Sog62iw8F/do/z0B29RuGuxw+prkBkn3lg/VQXEitzqcYvota6osa 8XSfaPiTyQwWpzbFNZzzemlTsIDiF3UqwkHvWaMYPDf4Ng3cokPPxw==" ], + "certificate" : [ "MIICmDCCAYACCQC7YJWOo6LVaDANBgkqhkiG9w0BAQsFADAOMQwwCgYDVQQDDANq d3QwHhcNMjIwNTA2MTQzNjQ5WhcNMjIwNjA1MTQzNjQ5WjAOMQwwCgYDVQQDDANq d3QwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDZ0/69Gf4qP5L+gZ3j 2Srr4Io0B24ILWtot9MfsHeqOZlibjnIOX5eaAr0I2YdeSGj2q+aF2Sx3MNWx14O WEoh1whZp2GoPN22fWGP/baJTCuEfRt3jvmGUpyyJn6jL+x2AaADO+aZNCZ0yoRe pXYH2r0oUGajgCGH/r53jAzWsf2TGXRG8wAot751qggciOiTyEI7hwpBATD8SsF7 hIXZyvfBDAJ8XWpW7h4BU1V/F/L+4S8bdDvHTTk/dNwr5BxiAnFq/eWJy1KLl3JY uAZAC+SSdaK2lErwpUvKkSPlYp6PoIXyerfS62fXkVCDQmQeirPCIKqp56fwYg+1 4jtXAgMBAAEwDQYJKoZIhvcNAQELBQADggEBACJlWtWnQqepYiFCijVgy/eM5KL0 rFZOZ6HNefoJTrYY1QYZrWxRz3M4u9JpUy4fBvGHxElBElcr3fXLXDytH9EwMJm1 E5x3o3qkQyWdXYGW6ZF58dklcJTdejOxEO373qpywVwbCFGiuIt7s5v4v+r2HOg3 D4elb2bqxmRim04xIkVZufKo+h6a8dBb5JEU3UaxyGDBR0IdyjhyBo1+HhH+RqZs xQhQ7DhlIGWUYZNCu13fb1GNSMiNqspKnMpFdQ4Bfpsb7vOeEK+aqJjCKcYbuGa6 BiwBjbKYyEF5r01Tob50dcVPfIGOqO0lQ3IsV31n9LSoAAtaVqioPK1rvDo=" ], + "active" : [ "true" ], + "priority" : [ "101" ], + "enabled" : [ "true" ], + "algorithm" : [ "RS256" ] + } + } ] + }, + "internationalizationEnabled" : false, + "supportedLocales" : [ ], + "authenticationFlows" : [ { + "id" : "7a71b7d2-4be9-45dc-a30a-ff9632e59cce", + "alias" : "Account verification options", + "description" : "Method with which to verity the existing account", + "providerId" : "basic-flow", + "topLevel" : false, + "builtIn" : true, + "authenticationExecutions" : [ { + "authenticator" : "idp-email-verification", + "authenticatorFlow" : false, + "requirement" : "ALTERNATIVE", + "priority" : 10, + "autheticatorFlow" : false, + "userSetupAllowed" : false + }, { + "authenticatorFlow" : true, + "requirement" : "ALTERNATIVE", + "priority" : 20, + "autheticatorFlow" : true, + "flowAlias" : "Verify Existing Account by Re-authentication", + "userSetupAllowed" : false + } ] + }, { + "id" : "9294dfac-f7e0-472e-8282-4ad98bd550e8", + "alias" : "Authentication Options", + "description" : "Authentication options.", + "providerId" : "basic-flow", + "topLevel" : false, + "builtIn" : true, + "authenticationExecutions" : [ { + "authenticator" : "basic-auth", + "authenticatorFlow" : false, + "requirement" : "REQUIRED", + "priority" : 10, + "autheticatorFlow" : false, + "userSetupAllowed" : false + }, { + "authenticator" : "basic-auth-otp", + "authenticatorFlow" : false, + "requirement" : "DISABLED", + "priority" : 20, + "autheticatorFlow" : false, + "userSetupAllowed" : false + }, { + "authenticator" : "auth-spnego", + "authenticatorFlow" : false, + "requirement" : "DISABLED", + "priority" : 30, + "autheticatorFlow" : false, + "userSetupAllowed" : false + } ] + }, { + "id" : "4e9715f2-d971-4384-af92-068a59f7a8d0", + "alias" : "Browser - Conditional OTP", + "description" : "Flow to determine if the OTP is required for the authentication", + "providerId" : "basic-flow", + "topLevel" : false, + "builtIn" : true, + "authenticationExecutions" : [ { + "authenticator" : "conditional-user-configured", + "authenticatorFlow" : false, + "requirement" : "REQUIRED", + "priority" : 10, + "autheticatorFlow" : false, + "userSetupAllowed" : false + }, { + "authenticator" : "auth-otp-form", + "authenticatorFlow" : false, + "requirement" : "REQUIRED", + "priority" : 20, + "autheticatorFlow" : false, + "userSetupAllowed" : false + } ] + }, { + "id" : "440b9130-3aa0-4f26-b9cc-71959a85b0a0", + "alias" : "Direct Grant - Conditional OTP", + "description" : "Flow to determine if the OTP is required for the authentication", + "providerId" : "basic-flow", + "topLevel" : false, + "builtIn" : true, + "authenticationExecutions" : [ { + "authenticator" : "conditional-user-configured", + "authenticatorFlow" : false, + "requirement" : "REQUIRED", + "priority" : 10, + "autheticatorFlow" : false, + "userSetupAllowed" : false + }, { + "authenticator" : "direct-grant-validate-otp", + "authenticatorFlow" : false, + "requirement" : "REQUIRED", + "priority" : 20, + "autheticatorFlow" : false, + "userSetupAllowed" : false + } ] + }, { + "id" : "e199fadc-92a6-4fb9-858d-4ab4dd839cfe", + "alias" : "First broker login - Conditional OTP", + "description" : "Flow to determine if the OTP is required for the authentication", + "providerId" : "basic-flow", + "topLevel" : false, + "builtIn" : true, + "authenticationExecutions" : [ { + "authenticator" : "conditional-user-configured", + "authenticatorFlow" : false, + "requirement" : "REQUIRED", + "priority" : 10, + "autheticatorFlow" : false, + "userSetupAllowed" : false + }, { + "authenticator" : "auth-otp-form", + "authenticatorFlow" : false, + "requirement" : "REQUIRED", + "priority" : 20, + "autheticatorFlow" : false, + "userSetupAllowed" : false + } ] + }, { + "id" : "8f88c45e-5db3-402d-ab33-d95a92c87912", + "alias" : "Handle Existing Account", + "description" : "Handle what to do if there is existing account with same email/username like authenticated identity provider", + "providerId" : "basic-flow", + "topLevel" : false, + "builtIn" : true, + "authenticationExecutions" : [ { + "authenticator" : "idp-confirm-link", + "authenticatorFlow" : false, + "requirement" : "REQUIRED", + "priority" : 10, + "autheticatorFlow" : false, + "userSetupAllowed" : false + }, { + "authenticatorFlow" : true, + "requirement" : "REQUIRED", + "priority" : 20, + "autheticatorFlow" : true, + "flowAlias" : "Account verification options", + "userSetupAllowed" : false + } ] + }, { + "id" : "5f69b8d5-071c-4e1b-b558-5b01c5d77eb7", + "alias" : "Reset - Conditional OTP", + "description" : "Flow to determine if the OTP should be reset or not. Set to REQUIRED to force.", + "providerId" : "basic-flow", + "topLevel" : false, + "builtIn" : true, + "authenticationExecutions" : [ { + "authenticator" : "conditional-user-configured", + "authenticatorFlow" : false, + "requirement" : "REQUIRED", + "priority" : 10, + "autheticatorFlow" : false, + "userSetupAllowed" : false + }, { + "authenticator" : "reset-otp", + "authenticatorFlow" : false, + "requirement" : "REQUIRED", + "priority" : 20, + "autheticatorFlow" : false, + "userSetupAllowed" : false + } ] + }, { + "id" : "cbc77171-9924-4bcf-93ea-d1497e217142", + "alias" : "User creation or linking", + "description" : "Flow for the existing/non-existing user alternatives", + "providerId" : "basic-flow", + "topLevel" : false, + "builtIn" : true, + "authenticationExecutions" : [ { + "authenticatorConfig" : "create unique user config", + "authenticator" : "idp-create-user-if-unique", + "authenticatorFlow" : false, + "requirement" : "ALTERNATIVE", + "priority" : 10, + "autheticatorFlow" : false, + "userSetupAllowed" : false + }, { + "authenticatorFlow" : true, + "requirement" : "ALTERNATIVE", + "priority" : 20, + "autheticatorFlow" : true, + "flowAlias" : "Handle Existing Account", + "userSetupAllowed" : false + } ] + }, { + "id" : "05b75962-86e5-499d-bdd2-0f2261c3b6ab", + "alias" : "Verify Existing Account by Re-authentication", + "description" : "Reauthentication of existing account", + "providerId" : "basic-flow", + "topLevel" : false, + "builtIn" : true, + "authenticationExecutions" : [ { + "authenticator" : "idp-username-password-form", + "authenticatorFlow" : false, + "requirement" : "REQUIRED", + "priority" : 10, + "autheticatorFlow" : false, + "userSetupAllowed" : false + }, { + "authenticatorFlow" : true, + "requirement" : "CONDITIONAL", + "priority" : 20, + "autheticatorFlow" : true, + "flowAlias" : "First broker login - Conditional OTP", + "userSetupAllowed" : false + } ] + }, { + "id" : "834c1dfe-60a2-4fea-97b8-d3ca1b6b3276", + "alias" : "browser", + "description" : "browser based authentication", + "providerId" : "basic-flow", + "topLevel" : true, + "builtIn" : true, + "authenticationExecutions" : [ { + "authenticator" : "auth-cookie", + "authenticatorFlow" : false, + "requirement" : "ALTERNATIVE", + "priority" : 10, + "autheticatorFlow" : false, + "userSetupAllowed" : false + }, { + "authenticator" : "auth-spnego", + "authenticatorFlow" : false, + "requirement" : "DISABLED", + "priority" : 20, + "autheticatorFlow" : false, + "userSetupAllowed" : false + }, { + "authenticator" : "identity-provider-redirector", + "authenticatorFlow" : false, + "requirement" : "ALTERNATIVE", + "priority" : 25, + "autheticatorFlow" : false, + "userSetupAllowed" : false + }, { + "authenticatorFlow" : true, + "requirement" : "ALTERNATIVE", + "priority" : 30, + "autheticatorFlow" : true, + "flowAlias" : "forms", + "userSetupAllowed" : false + } ] + }, { + "id" : "34f1d7a0-2b77-48df-bdf9-91650318f8bd", + "alias" : "clients", + "description" : "Base authentication for clients", + "providerId" : "client-flow", + "topLevel" : true, + "builtIn" : true, + "authenticationExecutions" : [ { + "authenticator" : "client-secret", + "authenticatorFlow" : false, + "requirement" : "ALTERNATIVE", + "priority" : 10, + "autheticatorFlow" : false, + "userSetupAllowed" : false + }, { + "authenticator" : "client-jwt", + "authenticatorFlow" : false, + "requirement" : "ALTERNATIVE", + "priority" : 20, + "autheticatorFlow" : false, + "userSetupAllowed" : false + }, { + "authenticator" : "client-secret-jwt", + "authenticatorFlow" : false, + "requirement" : "ALTERNATIVE", + "priority" : 30, + "autheticatorFlow" : false, + "userSetupAllowed" : false + }, { + "authenticator" : "client-x509", + "authenticatorFlow" : false, + "requirement" : "ALTERNATIVE", + "priority" : 40, + "autheticatorFlow" : false, + "userSetupAllowed" : false + } ] + }, { + "id" : "1304dc0c-5614-412d-873e-f6828e3e882d", + "alias" : "direct grant", + "description" : "OpenID Connect Resource Owner Grant", + "providerId" : "basic-flow", + "topLevel" : true, + "builtIn" : true, + "authenticationExecutions" : [ { + "authenticator" : "direct-grant-validate-username", + "authenticatorFlow" : false, + "requirement" : "REQUIRED", + "priority" : 10, + "autheticatorFlow" : false, + "userSetupAllowed" : false + }, { + "authenticator" : "direct-grant-validate-password", + "authenticatorFlow" : false, + "requirement" : "REQUIRED", + "priority" : 20, + "autheticatorFlow" : false, + "userSetupAllowed" : false + }, { + "authenticatorFlow" : true, + "requirement" : "CONDITIONAL", + "priority" : 30, + "autheticatorFlow" : true, + "flowAlias" : "Direct Grant - Conditional OTP", + "userSetupAllowed" : false + } ] + }, { + "id" : "63b19807-ec4a-4f9e-be4a-744becb58049", + "alias" : "docker auth", + "description" : "Used by Docker clients to authenticate against the IDP", + "providerId" : "basic-flow", + "topLevel" : true, + "builtIn" : true, + "authenticationExecutions" : [ { + "authenticator" : "docker-http-basic-authenticator", + "authenticatorFlow" : false, + "requirement" : "REQUIRED", + "priority" : 10, + "autheticatorFlow" : false, + "userSetupAllowed" : false + } ] + }, { + "id" : "577aa2f4-36e8-4f29-81ff-b1956a92d649", + "alias" : "first broker login", + "description" : "Actions taken after first broker login with identity provider account, which is not yet linked to any Keycloak account", + "providerId" : "basic-flow", + "topLevel" : true, + "builtIn" : true, + "authenticationExecutions" : [ { + "authenticatorConfig" : "review profile config", + "authenticator" : "idp-review-profile", + "authenticatorFlow" : false, + "requirement" : "REQUIRED", + "priority" : 10, + "autheticatorFlow" : false, + "userSetupAllowed" : false + }, { + "authenticatorFlow" : true, + "requirement" : "REQUIRED", + "priority" : 20, + "autheticatorFlow" : true, + "flowAlias" : "User creation or linking", + "userSetupAllowed" : false + } ] + }, { + "id" : "79324593-31cc-4cdd-a4fa-689949527553", + "alias" : "forms", + "description" : "Username, password, otp and other auth forms.", + "providerId" : "basic-flow", + "topLevel" : false, + "builtIn" : true, + "authenticationExecutions" : [ { + "authenticator" : "auth-username-password-form", + "authenticatorFlow" : false, + "requirement" : "REQUIRED", + "priority" : 10, + "autheticatorFlow" : false, + "userSetupAllowed" : false + }, { + "authenticatorFlow" : true, + "requirement" : "CONDITIONAL", + "priority" : 20, + "autheticatorFlow" : true, + "flowAlias" : "Browser - Conditional OTP", + "userSetupAllowed" : false + } ] + }, { + "id" : "38bede60-ec0b-49e7-86f4-cc9f4f96beee", + "alias" : "http challenge", + "description" : "An authentication flow based on challenge-response HTTP Authentication Schemes", + "providerId" : "basic-flow", + "topLevel" : true, + "builtIn" : true, + "authenticationExecutions" : [ { + "authenticator" : "no-cookie-redirect", + "authenticatorFlow" : false, + "requirement" : "REQUIRED", + "priority" : 10, + "autheticatorFlow" : false, + "userSetupAllowed" : false + }, { + "authenticatorFlow" : true, + "requirement" : "REQUIRED", + "priority" : 20, + "autheticatorFlow" : true, + "flowAlias" : "Authentication Options", + "userSetupAllowed" : false + } ] + }, { + "id" : "2b9026fb-ad05-42f7-a3b8-9eb3f90d768e", + "alias" : "registration", + "description" : "registration flow", + "providerId" : "basic-flow", + "topLevel" : true, + "builtIn" : true, + "authenticationExecutions" : [ { + "authenticator" : "registration-page-form", + "authenticatorFlow" : true, + "requirement" : "REQUIRED", + "priority" : 10, + "autheticatorFlow" : true, + "flowAlias" : "registration form", + "userSetupAllowed" : false + } ] + }, { + "id" : "7a723453-4b7b-4f7b-a3da-62745f576454", + "alias" : "registration form", + "description" : "registration form", + "providerId" : "form-flow", + "topLevel" : false, + "builtIn" : true, + "authenticationExecutions" : [ { + "authenticator" : "registration-user-creation", + "authenticatorFlow" : false, + "requirement" : "REQUIRED", + "priority" : 20, + "autheticatorFlow" : false, + "userSetupAllowed" : false + }, { + "authenticator" : "registration-profile-action", + "authenticatorFlow" : false, + "requirement" : "REQUIRED", + "priority" : 40, + "autheticatorFlow" : false, + "userSetupAllowed" : false + }, { + "authenticator" : "registration-password-action", + "authenticatorFlow" : false, + "requirement" : "REQUIRED", + "priority" : 50, + "autheticatorFlow" : false, + "userSetupAllowed" : false + }, { + "authenticator" : "registration-recaptcha-action", + "authenticatorFlow" : false, + "requirement" : "DISABLED", + "priority" : 60, + "autheticatorFlow" : false, + "userSetupAllowed" : false + } ] + }, { + "id" : "1b136997-0f64-423c-a57e-8d0b2bfd5352", + "alias" : "reset credentials", + "description" : "Reset credentials for a user if they forgot their password or something", + "providerId" : "basic-flow", + "topLevel" : true, + "builtIn" : true, + "authenticationExecutions" : [ { + "authenticator" : "reset-credentials-choose-user", + "authenticatorFlow" : false, + "requirement" : "REQUIRED", + "priority" : 10, + "autheticatorFlow" : false, + "userSetupAllowed" : false + }, { + "authenticator" : "reset-credential-email", + "authenticatorFlow" : false, + "requirement" : "REQUIRED", + "priority" : 20, + "autheticatorFlow" : false, + "userSetupAllowed" : false + }, { + "authenticator" : "reset-password", + "authenticatorFlow" : false, + "requirement" : "REQUIRED", + "priority" : 30, + "autheticatorFlow" : false, + "userSetupAllowed" : false + }, { + "authenticatorFlow" : true, + "requirement" : "CONDITIONAL", + "priority" : 40, + "autheticatorFlow" : true, + "flowAlias" : "Reset - Conditional OTP", + "userSetupAllowed" : false + } ] + }, { + "id" : "94d7200c-1c7f-42b5-83de-bda1e619c4f8", + "alias" : "saml ecp", + "description" : "SAML ECP Profile Authentication Flow", + "providerId" : "basic-flow", + "topLevel" : true, + "builtIn" : true, + "authenticationExecutions" : [ { + "authenticator" : "http-basic-authenticator", + "authenticatorFlow" : false, + "requirement" : "REQUIRED", + "priority" : 10, + "autheticatorFlow" : false, + "userSetupAllowed" : false + } ] + } ], + "authenticatorConfig" : [ { + "id" : "a297313c-0ab0-4cd1-acb4-ebf65ccd863b", + "alias" : "create unique user config", + "config" : { + "require.password.update.after.registration" : "false" + } + }, { + "id" : "b6bbfb62-4c6a-4212-ab77-73749ea0f50d", + "alias" : "review profile config", + "config" : { + "update.profile.on.first.login" : "missing" + } + } ], + "requiredActions" : [ { + "alias" : "CONFIGURE_TOTP", + "name" : "Configure OTP", + "providerId" : "CONFIGURE_TOTP", + "enabled" : true, + "defaultAction" : false, + "priority" : 10, + "config" : { } + }, { + "alias" : "terms_and_conditions", + "name" : "Terms and Conditions", + "providerId" : "terms_and_conditions", + "enabled" : false, + "defaultAction" : false, + "priority" : 20, + "config" : { } + }, { + "alias" : "UPDATE_PASSWORD", + "name" : "Update Password", + "providerId" : "UPDATE_PASSWORD", + "enabled" : true, + "defaultAction" : false, + "priority" : 30, + "config" : { } + }, { + "alias" : "UPDATE_PROFILE", + "name" : "Update Profile", + "providerId" : "UPDATE_PROFILE", + "enabled" : true, + "defaultAction" : false, + "priority" : 40, + "config" : { } + }, { + "alias" : "VERIFY_EMAIL", + "name" : "Verify Email", + "providerId" : "VERIFY_EMAIL", + "enabled" : true, + "defaultAction" : false, + "priority" : 50, + "config" : { } + }, { + "alias" : "delete_account", + "name" : "Delete Account", + "providerId" : "delete_account", + "enabled" : false, + "defaultAction" : false, + "priority" : 60, + "config" : { } + }, { + "alias" : "update_user_locale", + "name" : "Update User Locale", + "providerId" : "update_user_locale", + "enabled" : true, + "defaultAction" : false, + "priority" : 1000, + "config" : { } + } ], + "browserFlow" : "browser", + "registrationFlow" : "registration", + "directGrantFlow" : "direct grant", + "resetCredentialsFlow" : "reset credentials", + "clientAuthenticationFlow" : "clients", + "dockerAuthenticationFlow" : "docker auth", + "attributes" : { + "cibaBackchannelTokenDeliveryMode" : "poll", + "cibaExpiresIn" : "120", + "cibaAuthRequestedUserHint" : "login_hint", + "oauth2DeviceCodeLifespan" : "600", + "clientOfflineSessionMaxLifespan" : "0", + "oauth2DevicePollingInterval" : "5", + "clientSessionIdleTimeout" : "0", + "parRequestUriLifespan" : "60", + "clientSessionMaxLifespan" : "0", + "clientOfflineSessionIdleTimeout" : "0", + "cibaInterval" : "5", + "realmReusableOtpCode" : "false" + }, + "keycloakVersion" : "20.0.5", + "userManagedAccessAllowed" : false, + "clientProfiles" : { + "profiles" : [ ] + }, + "clientPolicies" : { + "policies" : [ ] + } +} diff --git a/deps/rabbitmq_management/selenium/test/multi-oauth/prodkeycloak/server_prodkeycloak.p12 b/deps/rabbitmq_management/selenium/test/multi-oauth/prodkeycloak/server_prodkeycloak.p12 new file mode 100644 index 000000000000..4e363100e505 Binary files /dev/null and b/deps/rabbitmq_management/selenium/test/multi-oauth/prodkeycloak/server_prodkeycloak.p12 differ diff --git a/deps/rabbitmq_management/selenium/test/multi-oauth/prodkeycloak/server_prodkeycloak_certificate.pem b/deps/rabbitmq_management/selenium/test/multi-oauth/prodkeycloak/server_prodkeycloak_certificate.pem new file mode 100644 index 000000000000..f155d4123327 --- /dev/null +++ b/deps/rabbitmq_management/selenium/test/multi-oauth/prodkeycloak/server_prodkeycloak_certificate.pem @@ -0,0 +1,23 @@ +-----BEGIN CERTIFICATE----- +MIIDzzCCAregAwIBAgIBDDANBgkqhkiG9w0BAQsFADBMMTswOQYDVQQDDDJUTFNH +ZW5TZWxmU2lnbmVkdFJvb3RDQSAyMDIzLTExLTE2VDEyOjI0OjQ3LjI4OTA5MzEN +MAsGA1UEBwwEJCQkJDAeFw0yNDAxMTMxMTU4NDNaFw0zNDAxMTAxMTU4NDNaMCgx +FTATBgNVBAMMDHByb2RrZXljbG9hazEPMA0GA1UECgwGc2VydmVyMIIBIjANBgkq +hkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAyRzkMDxZj7DP52nc4voOCz07tfpam9Qp +JbqJFwCb9SQkL/feGA86+IuzRJW9N3RozM5jeIa+yV7Obf+km4FYxPP6SffEEeM9 +SEqMAz1BNfUxGvo4XI6TmJ2u7YK0haVPDRSIGNmJO1tZgceOU0WeUkpNaNh4yF+f +3AQEEtd78ywdR/NHnx6wFCEtlPkSIoBLUX0/lF78YLkDZRBCRasUWP3m3/StUYzx +6V7LtBfiUhSd2W6AvxUo8NLRu70wNUyVuwwUthEj8AxeyX1SH3UybA/OT68c64NH +gZauVdDbz7cBVJCJU2fGUO8+Rq/dS7lwRymee/nZ5iqg2cfCEIsehwIDAQABo4Hf +MIHcMAkGA1UdEwQCMAAwCwYDVR0PBAQDAgWgMB0GA1UdJQQWMBQGCCsGAQUFBwMB +BggrBgEFBQcDAjAwBgNVHREEKTAnggxwcm9ka2V5Y2xvYWuCDHByb2RrZXljbG9h +a4IJbG9jYWxob3N0MB0GA1UdDgQWBBRHLuo22l4IoKXLxGFVjbG7bi6oJzAfBgNV +HSMEGDAWgBS2Icxjr1ucGCIxikeSG9igJf558jAxBgNVHR8EKjAoMCagJKAihiBo +dHRwOi8vY3JsLXNlcnZlcjo4MDAwL2Jhc2ljLmNybDANBgkqhkiG9w0BAQsFAAOC +AQEAnawpUvXok9AVLD2JSnFT3hzc5sRkMMuLLR9nskGpmp594mgMKebVOMh7x/OT +2/pO8RnqTyA5AB3DJPb+1bDBtFmcWaktOLOuYOw7GXvNRzTIRmW0i65l7cgnHOdU +U3JW/D/FozY02w5nVh14NDhgHs0BsDOJXUmogsmlvKFfeKiaB8vIz6wdLlA2eg6L +AQZNjiACNbzzd2C3duSDD6BhoImN0j7QsksPtwDwujAIFZcjlz7J11KRniDbecjq +cCc/gU/Ms8q8aahK84fG9UcPZJe6MtFY0B9AmiEmq2ImFlWWHUh33eSwIr37jywN ++8bxzT1vgTTqskv+wMbM+mQa2w== +-----END CERTIFICATE----- diff --git a/deps/rabbitmq_management/selenium/test/multi-oauth/prodkeycloak/server_prodkeycloak_key.pem b/deps/rabbitmq_management/selenium/test/multi-oauth/prodkeycloak/server_prodkeycloak_key.pem new file mode 100644 index 000000000000..b2cf9e44c515 --- /dev/null +++ b/deps/rabbitmq_management/selenium/test/multi-oauth/prodkeycloak/server_prodkeycloak_key.pem @@ -0,0 +1,28 @@ +-----BEGIN PRIVATE KEY----- +MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQDJHOQwPFmPsM/n +adzi+g4LPTu1+lqb1CkluokXAJv1JCQv994YDzr4i7NElb03dGjMzmN4hr7JXs5t +/6SbgVjE8/pJ98QR4z1ISowDPUE19TEa+jhcjpOYna7tgrSFpU8NFIgY2Yk7W1mB +x45TRZ5SSk1o2HjIX5/cBAQS13vzLB1H80efHrAUIS2U+RIigEtRfT+UXvxguQNl +EEJFqxRY/ebf9K1RjPHpXsu0F+JSFJ3ZboC/FSjw0tG7vTA1TJW7DBS2ESPwDF7J +fVIfdTJsD85Prxzrg0eBlq5V0NvPtwFUkIlTZ8ZQ7z5Gr91LuXBHKZ57+dnmKqDZ +x8IQix6HAgMBAAECggEBAJ0IvzDe3rvxPtWedsiQogiqnoZA3yFQL3TzS3o3ko9+ +0fbWn4e/1LcgNjF2jpHPhsls2oTRCgYozh1cAUcfX5YiP6wkF+gzvLVG6D7bRKEC +PH6pJPs4pQ0FCwMQDS9R3gEDqCVnLt23PZO1o29oK/BrbjhQ1zb2W9erFxczROih +hHMpLucuY/X55/6QrbyosNqjXCTpoR98Bk6xnvMyuXuIwCgQCT6HD8yvKH3+gG06 +LOQ3t9jy+JIiiwX7l/JNJPYZr+ElXlZa4DGO15/91qcDZbBIsmGJsZHlaglojjUn +utyrqnai1jInZPMGvlZfuLkAuOPtJKMZdXoS8LzlcXkCgYEA+ukTVtlxYHtGb84I +xR2YQ7Zn1pYJj6Sc01wQuo+oHpFuOpi/VUGrsnKN9W1bxL7T8TJC0Rjffz7mfuGs +5YoWFOplVju0sG1KtpQ2qBKAaMiGsPoa4L2VbZnlyzQj1rDa0RYwW+zNnbGfipdg +jqfsjknvGA/aaLgbkMv0ZH5GJyMCgYEAzTE6P3EcZheU+swDUwpoOYkVRCH39xy5 +roX0VLwpU7ARUqgmBj22Z1dnh9WM1+9Rc+LYFOtY1C1IWfPy/x/edJel5hHW+8EF +80kYp3Hv6CfYWlVDDxbmzpN8lHnYKigR/eKVq32jSMoQ4NTduwBb3NkMHHQG3cft +885zPFrLU00CgYEAx7sLmwICn4PiIRQIpSiW0af85rOOrtqhwBo0ct3yPUsVTO3U +uQBKtgU8fdbsyyQAwKp6x8od90PR5cSthhcy1rlzq35hqmOFqus2yvnXYBHoLi8Z +gDdKIPH2G5jIwpkLxo78NeC+GL6ROpif009XHjk6a5QLD3sm7k98nxZpr7MCgYBD +Oj27S3PifxdwlCcCrgY305IEIJz9eYvcgkbq/DsOEEGcszrCELYSZbCl8HGUzfQB +4/Cn6fPQkIWD80lKDUb1LDpOhsnI8hThALHzKoFPrr5T2lt+NiKoy+mlO8Z3CWnb +pMEkzqUQ1CNzhkqfWh6+3N369IjLYRW1K47V12mGgQKBgCXyTridJ0HZRuspKOo0 +SGQONUEELIs9jOyqZqt3/0vhhkD9rpyEL+J1dr+pKTAFTw3G0bC8A2FlykCnD2Ph +rMUucItj6svLLPIN8GzLxI2c1h5lwbPpVDyVIkcZCqbJ9V0vLzP+JmIsDscQG3xw +SyfaSuozFOSzgIg/ZZNEGT9P +-----END PRIVATE KEY----- diff --git a/deps/rabbitmq_management/selenium/test/multi-oauth/rabbitmq.basic-management.conf b/deps/rabbitmq_management/selenium/test/multi-oauth/rabbitmq.basic-management.conf new file mode 100644 index 000000000000..abe6d6c0a06b --- /dev/null +++ b/deps/rabbitmq_management/selenium/test/multi-oauth/rabbitmq.basic-management.conf @@ -0,0 +1,15 @@ + + +# Common management setting for all resources +management.oauth_enabled = true +management.oauth_scopes = openid profile rabbitmq.tag:administrator + +## Management ui settings for each declared resource server +management.oauth_resource_servers.1.id = rabbit_prod +management.oauth_resource_servers.1.oauth_client_id = rabbit_prod_mgt_ui + +management.oauth_resource_servers.2.id = rabbit_dev +management.oauth_resource_servers.2.oauth_client_id = rabbit_dev_mgt_ui + +management.oauth_resource_servers.3.id = rabbit_internal +management.oauth_resource_servers.3.disabled = true diff --git a/deps/rabbitmq_management/selenium/test/multi-oauth/rabbitmq.conf b/deps/rabbitmq_management/selenium/test/multi-oauth/rabbitmq.conf new file mode 100644 index 000000000000..a53547c10edf --- /dev/null +++ b/deps/rabbitmq_management/selenium/test/multi-oauth/rabbitmq.conf @@ -0,0 +1,48 @@ +## RabbitMQ configuration with 2 oauth2 resources, rabbit_prod and rabbit_dev, +## rather than a single resource_server_id +## Also, each resource is owned by its own oauth provider, i.e. RabbitMQ is +## accessed by users and clients from two different providers using their dedicated +## resource_server_id. + +log.console.level = debug + +auth_backends.1 = rabbit_auth_backend_oauth2 + +# Common auth_oauth2 settings for all resources +auth_oauth2.preferred_username_claims.1 = preferred_username +auth_oauth2.preferred_username_claims.2 = user_name +auth_oauth2.preferred_username_claims.3 = email +auth_oauth2.scope_prefix = rabbitmq. + +## Resource servers hosted by this rabbitmq instance +auth_oauth2.resource_servers.1.id = rabbit_prod +auth_oauth2.resource_servers.1.oauth_provider_id = prodkeycloak +auth_oauth2.resource_servers.2.id = rabbit_dev +auth_oauth2.resource_servers.2.oauth_provider_id = devkeycloak +auth_oauth2.resource_servers.3.id = rabbit_internal +auth_oauth2.resource_servers.3.oauth_provider_id = devkeycloak + +## Oauth providers +auth_oauth2.oauth_providers.devkeycloak.issuer = ${DEVKEYCLOAK_URL} +auth_oauth2.oauth_providers.devkeycloak.https.cacertfile = ${DEVKEYCLOAK_CA_CERT} +auth_oauth2.oauth_providers.devkeycloak.https.verify = verify_peer +auth_oauth2.oauth_providers.devkeycloak.https.hostname_verification = wildcard + +auth_oauth2.oauth_providers.prodkeycloak.issuer = ${PRODKEYCLOAK_URL} +auth_oauth2.oauth_providers.prodkeycloak.https.cacertfile = ${PRODKEYCLOAK_CA_CERT} +auth_oauth2.oauth_providers.prodkeycloak.https.verify = verify_peer +auth_oauth2.oauth_providers.prodkeycloak.https.hostname_verification = wildcard + +# Common management setting for all resources +management.oauth_enabled = true +management.oauth_scopes = openid profile rabbitmq.tag:management rabbitmq.tag:administrator + +## Management ui settings for each declared resource server +management.oauth_resource_servers.1.id = rabbit_prod +management.oauth_resource_servers.1.oauth_client_id = rabbit_prod_mgt_ui + +management.oauth_resource_servers.2.id = rabbit_dev +management.oauth_resource_servers.2.oauth_client_id = rabbit_dev_mgt_ui + +management.oauth_resource_servers.3.id = rabbit_internal +management.oauth_resource_servers.3.disabled = true diff --git a/deps/rabbitmq_management/selenium/test/multi-oauth/rabbitmq.enable-basic-auth.conf b/deps/rabbitmq_management/selenium/test/multi-oauth/rabbitmq.enable-basic-auth.conf new file mode 100644 index 000000000000..702b20fc60b0 --- /dev/null +++ b/deps/rabbitmq_management/selenium/test/multi-oauth/rabbitmq.enable-basic-auth.conf @@ -0,0 +1,5 @@ + +auth_backends.2 = rabbit_auth_backend_internal + +management.oauth_disable_basic_auth = false +load_definitions = ${RABBITMQ_TEST_DIR}/imports/users.json diff --git a/deps/rabbitmq_management/selenium/test/multi-oauth/rabbitmq.tls.conf b/deps/rabbitmq_management/selenium/test/multi-oauth/rabbitmq.tls.conf new file mode 100644 index 000000000000..61107323c637 --- /dev/null +++ b/deps/rabbitmq_management/selenium/test/multi-oauth/rabbitmq.tls.conf @@ -0,0 +1,14 @@ +auth_backends.1 = rabbit_auth_backend_oauth2 + +listeners.ssl.1 = 5671 + +ssl_options.cacertfile = ${RABBITMQ_TEST_DIR}/certs/ca_certificate.pem +ssl_options.certfile = ${RABBITMQ_TEST_DIR}/certs/server_rabbitmq_certificate.pem +ssl_options.keyfile = ${RABBITMQ_TEST_DIR}/certs/server_rabbitmq_key.pem +ssl_options.verify = verify_peer +ssl_options.fail_if_no_peer_cert = true + +management.ssl.port = 15671 +management.ssl.cacertfile = ${RABBITMQ_TEST_DIR}/certs/ca_certificate.pem +management.ssl.certfile = ${RABBITMQ_TEST_DIR}/certs/server_rabbitmq_certificate.pem +management.ssl.keyfile = ${RABBITMQ_TEST_DIR}/certs/server_rabbitmq_key.pem diff --git a/deps/rabbitmq_management/selenium/test/multi-oauth/rabbitmq.with-resource-label.conf b/deps/rabbitmq_management/selenium/test/multi-oauth/rabbitmq.with-resource-label.conf new file mode 100644 index 000000000000..240b19b2b7d8 --- /dev/null +++ b/deps/rabbitmq_management/selenium/test/multi-oauth/rabbitmq.with-resource-label.conf @@ -0,0 +1,3 @@ + +management.oauth_resource_servers.1.label = RabbitMQ Production +management.oauth_resource_servers.2.label = RabbitMQ Development diff --git a/deps/rabbitmq_management/selenium/test/multi-oauth/rabbitmq.with-resource-scopes.conf b/deps/rabbitmq_management/selenium/test/multi-oauth/rabbitmq.with-resource-scopes.conf new file mode 100644 index 000000000000..54580e980383 --- /dev/null +++ b/deps/rabbitmq_management/selenium/test/multi-oauth/rabbitmq.with-resource-scopes.conf @@ -0,0 +1,6 @@ + +management.oauth_resource_servers.1.label = RabbitMQ Production +management.oauth_resource_servers.2.label = RabbitMQ Development + +management.oauth_resource_servers.1.oauth_scopes = openid profile rabbitmq.tag:administrator +management.oauth_resource_servers.2.oauth_scopes = openid profile rabbitmq.tag:management diff --git a/deps/rabbitmq_management/selenium/test/multi-oauth/with-basic-auth-idps-down/happy-login.js b/deps/rabbitmq_management/selenium/test/multi-oauth/with-basic-auth-idps-down/happy-login.js new file mode 100644 index 000000000000..3b6fccc73685 --- /dev/null +++ b/deps/rabbitmq_management/selenium/test/multi-oauth/with-basic-auth-idps-down/happy-login.js @@ -0,0 +1,36 @@ +const { By, Key, until, Builder } = require('selenium-webdriver') +require('chromedriver') +const assert = require('assert') +const { buildDriver, goToHome, captureScreensFor, teardown, idpLoginPage } = require('../../utils') + +const SSOHomePage = require('../../pageobjects/SSOHomePage') +const OverviewPage = require('../../pageobjects/OverviewPage') + +describe('When basic authentication is enabled but both Idps are down', function () { + let homePage + let idpLogin + let overview + let captureScreen + + before(async function () { + driver = buildDriver() + await goToHome(driver) + homePage = new SSOHomePage(driver) + idpLogin = idpLoginPage(driver) + overview = new OverviewPage(driver) + captureScreen = captureScreensFor(driver, __filename) + }) + + it('can log in with Basic Auth', async function () { + await homePage.toggleBasicAuthSection() + await homePage.basicAuthLogin('guest', 'guest') + if (!await overview.isLoaded()) { + throw new Error('Failed to login') + } + await overview.logout() + }) + + after(async function () { + await teardown(driver, this, captureScreen) + }) +}) diff --git a/deps/rabbitmq_management/selenium/test/multi-oauth/with-basic-auth-idps-down/landing.js b/deps/rabbitmq_management/selenium/test/multi-oauth/with-basic-auth-idps-down/landing.js new file mode 100644 index 000000000000..30bc40a4436a --- /dev/null +++ b/deps/rabbitmq_management/selenium/test/multi-oauth/with-basic-auth-idps-down/landing.js @@ -0,0 +1,44 @@ +const { By, Key, until, Builder } = require('selenium-webdriver') +require('chromedriver') +const assert = require('assert') +const { buildDriver, goToHome, teardown, captureScreensFor } = require('../../utils') + +const SSOHomePage = require('../../pageobjects/SSOHomePage') + +describe('When basic authentication is enabled but both Idps are down', function () { + let driver + let homePage + let captureScreen + + before(async function () { + driver = buildDriver() + await goToHome(driver) + homePage = new SSOHomePage(driver) + captureScreen = captureScreensFor(driver, __filename) + }) + + it('should display a warning message for all oauth2 resources', async function () { + await homePage.isLoaded() + const warnings = await homePage.getWarnings() + + assert.equal(2, warnings.length) + const warning0 = await warnings[0].getText() + assert.equal(true, warning0.startsWith("OAuth resource [RabbitMQ Development] not available")) + assert.equal(true, warning0.endsWith("not reachable")) + const warning1 = await warnings[1].getText() + assert.equal(true, warning1.startsWith("OAuth resource [RabbitMQ Production] not available")) + assert.equal(true, warning1.endsWith("not reachable")) + + }) + + it('should not be presented oauth2 section', async function () { + await homePage.isLoaded() + if (await homePage.isOAuth2SectionVisible()) { + throw new Error('OAuth2 section should not be present') + } + }) + + after(async function () { + await teardown(driver, this, captureScreen) + }) +}) diff --git a/deps/rabbitmq_management/selenium/test/multi-oauth/with-basic-auth/happy-login.js b/deps/rabbitmq_management/selenium/test/multi-oauth/with-basic-auth/happy-login.js new file mode 100644 index 000000000000..2932d402c4b2 --- /dev/null +++ b/deps/rabbitmq_management/selenium/test/multi-oauth/with-basic-auth/happy-login.js @@ -0,0 +1,36 @@ +const { By, Key, until, Builder } = require('selenium-webdriver') +require('chromedriver') +const assert = require('assert') +const { buildDriver, goToHome, captureScreensFor, teardown, idpLoginPage } = require('../../utils') + +const SSOHomePage = require('../../pageobjects/SSOHomePage') +const OverviewPage = require('../../pageobjects/OverviewPage') + +describe('Given two oauth resources and basic auth enabled', function () { + let homePage + let idpLogin + let overview + let captureScreen + + before(async function () { + driver = buildDriver() + await goToHome(driver) + homePage = new SSOHomePage(driver) + idpLogin = idpLoginPage(driver) + overview = new OverviewPage(driver) + captureScreen = captureScreensFor(driver, __filename) + }) + + it('guest registered in internal db can log in with Basic Auth', async function () { + await homePage.toggleBasicAuthSection() + await homePage.basicAuthLogin('guest', 'guest') + if (!await overview.isLoaded()) { + throw new Error('Failed to login') + } + await overview.logout() + }) + + after(async function () { + await teardown(driver, this, captureScreen) + }) +}) diff --git a/deps/rabbitmq_management/selenium/test/multi-oauth/with-basic-auth/landing.js b/deps/rabbitmq_management/selenium/test/multi-oauth/with-basic-auth/landing.js new file mode 100644 index 000000000000..df286b7c6349 --- /dev/null +++ b/deps/rabbitmq_management/selenium/test/multi-oauth/with-basic-auth/landing.js @@ -0,0 +1,56 @@ +const { By, Key, until, Builder } = require('selenium-webdriver') +require('chromedriver') +const assert = require('assert') +const { buildDriver, goToHome, captureScreensFor, teardown, assertAllOptions, hasProfile } = require('../../utils') + +const SSOHomePage = require('../../pageobjects/SSOHomePage') + +describe('Given two oauth resources and basic auth enabled, an unauthenticated user', function () { + let homePage + let captureScreen + + before(async function () { + driver = buildDriver() + await goToHome(driver) + homePage = new SSOHomePage(driver) + captureScreen = captureScreensFor(driver, __filename) + await homePage.isLoaded() + }) + + it('should be presented with a login button to log in using OAuth 2.0', async function () { + await homePage.getOAuth2Section() + assert.equal(await homePage.getLoginButton(), 'Click here to log in') + }) + + it('there should be two OAuth resources to choose from', async function () { + resources = await homePage.getOAuthResourceOptions() + if (hasProfile("with-resource-label")) { + assertAllOptions([ + { value : "rabbit_dev", text : "RabbitMQ Development" }, + { value : "rabbit_prod", text : "RabbitMQ Production" } + ], resources) + }else { + assertAllOptions([ + { value : "rabbit_dev", text : "rabbit_dev" }, + { value : "rabbit_prod", text : "rabbit_prod" } + ], resources) + } + }) + + + it('should be presented with a login button to log in using Basic Auth', async function () { + await homePage.toggleBasicAuthSection() + await homePage.getBasicAuthSection() + assert.equal(await homePage.getBasicAuthLoginButton(), 'Login') + }) + + it('should not have a warning message', async function () { + const visible = await homePage.isWarningVisible() + assert.ok(!visible) + }) + + + after(async function () { + await teardown(driver, this, captureScreen) + }) +}) diff --git a/deps/rabbitmq_management/selenium/test/multi-oauth/without-basic-auth/happy-login.js b/deps/rabbitmq_management/selenium/test/multi-oauth/without-basic-auth/happy-login.js new file mode 100644 index 000000000000..da8915b57a7f --- /dev/null +++ b/deps/rabbitmq_management/selenium/test/multi-oauth/without-basic-auth/happy-login.js @@ -0,0 +1,57 @@ +const { By, Key, until, Builder } = require('selenium-webdriver') +require('chromedriver') +const assert = require('assert') +const { buildDriver, goToHome, captureScreensFor, teardown, idpLoginPage, hasProfile } = require('../../utils') + +const SSOHomePage = require('../../pageobjects/SSOHomePage') +const OverviewPage = require('../../pageobjects/OverviewPage') + +describe('Given there are three oauth resources but two enabled', function () { + let homePage + let idpLogin + let overview + let captureScreen + + before(async function () { + driver = buildDriver() + await goToHome(driver) + homePage = new SSOHomePage(driver) + idpLogin = idpLoginPage(driver) + overview = new OverviewPage(driver) + captureScreen = captureScreensFor(driver, __filename) + }) + + it('dev_user registered in devkeycloak can log in using RabbitMQ Development OAuth 2.0 resource', async function () { + if (hasProfile("with-resource-label")) { + await homePage.chooseOauthResource("RabbitMQ Development") + }else { + await homePage.chooseOauthResource("rabbit_dev") + } + await homePage.clickToLogin() + await idpLogin.login('dev_user', 'dev_user') + if (!await overview.isLoaded()) { + throw new Error('Failed to login') + } + assert.ok(!await overview.isPopupWarningDisplayed()) + await overview.logout() + }) + it('prod_user registered in prodkeycloak can log in using RabbitMQ Development OAuth 2.0 resource', async function () { + if (hasProfile("with-resource-label")) { + await homePage.chooseOauthResource("RabbitMQ Production") + }else { + await homePage.chooseOauthResource("rabbit_prod") + } + await homePage.clickToLogin() + await idpLogin.login('prod_user', 'prod_user') + if (!await overview.isLoaded()) { + throw new Error('Failed to login') + } + assert.ok(!await overview.isPopupWarningDisplayed()) + await overview.logout() + }) + + + after(async function () { + await teardown(driver, this, captureScreen) + }) +}) diff --git a/deps/rabbitmq_management/selenium/test/multi-oauth/without-basic-auth/landing.js b/deps/rabbitmq_management/selenium/test/multi-oauth/without-basic-auth/landing.js new file mode 100644 index 000000000000..662bb09c1cdf --- /dev/null +++ b/deps/rabbitmq_management/selenium/test/multi-oauth/without-basic-auth/landing.js @@ -0,0 +1,53 @@ +const { By, Key, until, Builder } = require('selenium-webdriver') +require('chromedriver') +const assert = require('assert') +const { buildDriver, goToHome, captureScreensFor, teardown, assertAllOptions, hasProfile } = require('../../utils') + +const SSOHomePage = require('../../pageobjects/SSOHomePage') + + +describe('Given three oauth resources but only two enabled, an unauthenticated user', function () { + let homePage + let captureScreen + + before(async function () { + driver = buildDriver() + await goToHome(driver) + homePage = new SSOHomePage(driver) + captureScreen = captureScreensFor(driver, __filename) + await homePage.isLoaded() + }) + + it('should be presented with a login button to log in using OAuth 2.0', async function () { + await homePage.getOAuth2Section() + assert.equal(await homePage.getLoginButton(), 'Click here to log in') + }) + + it('there should be two OAuth resources to choose from', async function () { + resources = await homePage.getOAuthResourceOptions() + if (hasProfile("with-resource-label")) { + assertAllOptions([ + { value : "rabbit_dev", text : "RabbitMQ Development" }, + { value : "rabbit_prod", text : "RabbitMQ Production" } + ], resources) + }else { + assertAllOptions([ + { value : "rabbit_dev", text : "rabbit_dev" }, + { value : "rabbit_prod", text : "rabbit_prod" } + ], resources) + } + }) + + it('should not be presented with a login button to log in using Basic Auth', async function () { + assert.ok(!await homePage.isBasicAuthSectionVisible()) + }) + + it('should not have a warning message', async function () { + assert.ok(!await homePage.isWarningVisible()) + }) + + + after(async function () { + await teardown(driver, this, captureScreen) + }) +}) diff --git a/deps/rabbitmq_management/selenium/test/oauth/.env.docker b/deps/rabbitmq_management/selenium/test/oauth/.env.docker deleted file mode 100644 index a50de500c795..000000000000 --- a/deps/rabbitmq_management/selenium/test/oauth/.env.docker +++ /dev/null @@ -1 +0,0 @@ -export OAUTH_SIGNING_KEY_DIR=/config diff --git a/deps/rabbitmq_management/selenium/test/oauth/.env.docker.keycloak b/deps/rabbitmq_management/selenium/test/oauth/.env.docker.keycloak deleted file mode 100644 index 7da860dcfe3d..000000000000 --- a/deps/rabbitmq_management/selenium/test/oauth/.env.docker.keycloak +++ /dev/null @@ -1,2 +0,0 @@ -export KEYCLOAK_URL=http://keycloak:8080/realms/test -export OAUTH_JKWS_URL="https://keycloak:8443/realms/test/protocol/openid-connect/certs" diff --git a/deps/rabbitmq_management/selenium/test/oauth/.env.docker.uaa b/deps/rabbitmq_management/selenium/test/oauth/.env.docker.uaa deleted file mode 100644 index cc59eed9b457..000000000000 --- a/deps/rabbitmq_management/selenium/test/oauth/.env.docker.uaa +++ /dev/null @@ -1,2 +0,0 @@ -export OAUTH_SIGNING_KEY_DIR=/config -export UAA_URL=http://uaa:8080 diff --git a/deps/rabbitmq_management/selenium/test/oauth/.env.keycloak-oauth-provider b/deps/rabbitmq_management/selenium/test/oauth/.env.keycloak-oauth-provider deleted file mode 100644 index df1e3d27032b..000000000000 --- a/deps/rabbitmq_management/selenium/test/oauth/.env.keycloak-oauth-provider +++ /dev/null @@ -1 +0,0 @@ -export OAUTH_PROVIDER_URL=${KEYCLOAK_URL} diff --git a/deps/rabbitmq_management/selenium/test/oauth/.env.local b/deps/rabbitmq_management/selenium/test/oauth/.env.local deleted file mode 100644 index 48b1e666cc3a..000000000000 --- a/deps/rabbitmq_management/selenium/test/oauth/.env.local +++ /dev/null @@ -1 +0,0 @@ -export OAUTH_SIGNING_KEY_DIR=deps/rabbitmq_management/selenium/test/${OAUTH_SIGNING_KEY_PATH} diff --git a/deps/rabbitmq_management/selenium/test/oauth/.env.local.keycloak b/deps/rabbitmq_management/selenium/test/oauth/.env.local.keycloak deleted file mode 100644 index 635e7f766922..000000000000 --- a/deps/rabbitmq_management/selenium/test/oauth/.env.local.keycloak +++ /dev/null @@ -1,2 +0,0 @@ -export OAUTH_PROVIDER_URL=http://localhost:8080/realms/test -export OAUTH_JKWS_URL="https://localhost:8443/realms/test/protocol/openid-connect/certs" diff --git a/deps/rabbitmq_management/selenium/test/oauth/.env.local.uaa b/deps/rabbitmq_management/selenium/test/oauth/.env.local.uaa deleted file mode 100644 index 7f0e1972cda7..000000000000 --- a/deps/rabbitmq_management/selenium/test/oauth/.env.local.uaa +++ /dev/null @@ -1,2 +0,0 @@ -export OAUTH_SIGNING_KEY_DIR=deps/rabbitmq_management/selenium/test/${OAUTH_SIGNING_KEY_PATH} -export UAA_URL=http://localhost:8080 diff --git a/deps/rabbitmq_management/selenium/test/oauth/.env.uaa b/deps/rabbitmq_management/selenium/test/oauth/.env.uaa deleted file mode 100644 index 2fb03bb9a5f7..000000000000 --- a/deps/rabbitmq_management/selenium/test/oauth/.env.uaa +++ /dev/null @@ -1,4 +0,0 @@ -export OAUTH_SIGNING_KEY_ID=legacy-token-key -export OAUTH_SIGNING_KEY_PATH=oauth/uaa -export OAUTH_CLIENT_SECRET=rabbit_client_code -export OAUTH_SCOPES="openid profile rabbitmq.*" diff --git a/deps/rabbitmq_management/selenium/test/oauth/certs/ca_certificate.pem b/deps/rabbitmq_management/selenium/test/oauth/certs/ca_certificate.pem new file mode 100644 index 000000000000..cd37bea304f5 --- /dev/null +++ b/deps/rabbitmq_management/selenium/test/oauth/certs/ca_certificate.pem @@ -0,0 +1,21 @@ +-----BEGIN CERTIFICATE----- +MIIDezCCAmOgAwIBAgIJAOA06nrAwraBMA0GCSqGSIb3DQEBCwUAMEwxOzA5BgNV +BAMMMlRMU0dlblNlbGZTaWduZWR0Um9vdENBIDIwMjMtMTEtMTZUMTI6MjQ6NDcu +Mjg5MDkzMQ0wCwYDVQQHDAQkJCQkMB4XDTIzMTExNjExMjQ0N1oXDTMzMTExMzEx +MjQ0N1owTDE7MDkGA1UEAwwyVExTR2VuU2VsZlNpZ25lZHRSb290Q0EgMjAyMy0x +MS0xNlQxMjoyNDo0Ny4yODkwOTMxDTALBgNVBAcMBCQkJCQwggEiMA0GCSqGSIb3 +DQEBAQUAA4IBDwAwggEKAoIBAQDWJrvvUvpkiAhvIiciuTbFHRMC7VdOXdIM3y3I +Vt56Voj3dkCVitFcvTc+pkuqoQUaWRTc5M+875CaQSRIDfVyFTIGTyVXv6cZRcoz +0gcmYvopIJ4Wi5/xG9Qp8uJMtr+UBJ57ez6Urau/L3zETAVZA+y1bTylAlh4tjMH +I24bvyy4yNQbPtG4y5F9x484fn3H4x7lf6O/Xulcvy8vL1kyc/EgrF4fpjogwj58 +eQ5HLwbAlMRRxXxXX2U5tXlrv475WItp/1mhZ+j2yCMKB4tJ8tXbtpgou0JDtlN0 +8Jwm3+d5a6PxqynmgRAXStZ4Fda93Pa3FJfw1u63JrmOprG9AgMBAAGjYDBeMA8G +A1UdEwEB/wQFMAMBAf8wCwYDVR0PBAQDAgEGMB0GA1UdDgQWBBS2Icxjr1ucGCIx +ikeSG9igJf558jAfBgNVHSMEGDAWgBS2Icxjr1ucGCIxikeSG9igJf558jANBgkq +hkiG9w0BAQsFAAOCAQEAR0iG00uE2GnoWtaXEHYJTdvBBcStBB8qnRk19Qu/b8qd +HAhRGb31IiuYzNJxLxhOtXWQMKvsKPAKpPXP3c5XVAf2O156GoXEPkKQktF738Pp +rRlrQPqU9Qpm84rMC54EB7coxEs7HMx4do/kNaVPdqq++JIEAcWOEVKfudN+8TMR +XyUJT54jBacsTpAZNfY6boJmuQ+G6tkpQvlHOU6388IFuLPkYRO7h7CHVbDsMEXD +Ptg3PCK97nCVgs4xfQGR7nT2pawfEUQVMon/XShtXY0RIKpynwrgICHDdvMXRXlG +a4haA7sz8Wyroy6Ub5+X3s4YRumSQrhiwRzqU+f75A== +-----END CERTIFICATE----- diff --git a/deps/rabbitmq_management/selenium/test/oauth/certs/server_rabbitmq_certificate.pem b/deps/rabbitmq_management/selenium/test/oauth/certs/server_rabbitmq_certificate.pem new file mode 100644 index 000000000000..ef57ff61a411 --- /dev/null +++ b/deps/rabbitmq_management/selenium/test/oauth/certs/server_rabbitmq_certificate.pem @@ -0,0 +1,23 @@ +-----BEGIN CERTIFICATE----- +MIIDxDCCAqygAwIBAgIBDTANBgkqhkiG9w0BAQsFADBMMTswOQYDVQQDDDJUTFNH +ZW5TZWxmU2lnbmVkdFJvb3RDQSAyMDIzLTExLTE2VDEyOjI0OjQ3LjI4OTA5MzEN +MAsGA1UEBwwEJCQkJDAeFw0yNDAyMDkwODE3MDFaFw0zNDAyMDYwODE3MDFaMCQx +ETAPBgNVBAMMCHJhYmJpdG1xMQ8wDQYDVQQKDAZzZXJ2ZXIwggEiMA0GCSqGSIb3 +DQEBAQUAA4IBDwAwggEKAoIBAQCjxmYRJeYfOnQ91ZSIZsjznnPiy0yukFnapF7Y +iIXxFCygEnw/hwqSG7ddkvDjNlc6P+K4rEEBmER87mEl0YqvAZ9/C6K4OANJFuD7 +kQYH3Uyt+aXJfeyByAjr8HM/jSHDZm5DpysVlSBMkJGg4sV9h38i0aT27+J0a4xm +Yb9pH+bbWKn4QflvOQi7IcyZ+PcB54/vCDZRtlypkT/6EuqTXqRHH9wGlYaos+Jo +XMQDWykYtN2160E1gUwW1OhdRlDHj21Tej9fYObRjb326au4e3ivTPqKYLYsSz0Y +dcRoM6SjvwGiAC131n2XeHyKTQrMeKOb+TTVHzJZG7iUM5iBAgMBAAGjgdgwgdUw +CQYDVR0TBAIwADALBgNVHQ8EBAMCBaAwHQYDVR0lBBYwFAYIKwYBBQUHAwEGCCsG +AQUFBwMCMCkGA1UdEQQiMCCCCHJhYmJpdG1xgglsb2NhbGhvc3SCCWxvY2FsaG9z +dDAdBgNVHQ4EFgQUs9vJtNmoNWybsVgMmeRqcPGXRckwHwYDVR0jBBgwFoAUtiHM +Y69bnBgiMYpHkhvYoCX+efIwMQYDVR0fBCowKDAmoCSgIoYgaHR0cDovL2NybC1z +ZXJ2ZXI6ODAwMC9iYXNpYy5jcmwwDQYJKoZIhvcNAQELBQADggEBAHxsmfxpoGZg +AlLu+Y62TQxqp2i+PqLJHuGBdB/93NV3S3P3tlDaqHwYt0mveS7ej+JXhw9wvSZz +jmejWePL08FXD9KPggRP4/SsG6Adf/5+vcofYR23I7D4y9hsrDqZezCurWZ4LY4X +dYmIQcI6IwgcjffWhsyt3CEbU+yVg6jrjVWv5sVPi3xZUu/dwpTdrdNzeUIFM8vf +H3BS8EcLwtaNR4snLJlFIhuDfDv7Ewi1FsmM4zkSe/aHboUNDduI2poRW/EPtbdM +zD1pVXNh1Q9hkqFCD7l4Vua+JVsA7PWD7yr73pm2ak6GfgjA7Enj0a6KbAfAXLMr +otRknmbKCUU= +-----END CERTIFICATE----- diff --git a/deps/rabbitmq_management/selenium/test/oauth/certs/server_rabbitmq_key.pem b/deps/rabbitmq_management/selenium/test/oauth/certs/server_rabbitmq_key.pem new file mode 100644 index 000000000000..f5df03f73df8 --- /dev/null +++ b/deps/rabbitmq_management/selenium/test/oauth/certs/server_rabbitmq_key.pem @@ -0,0 +1,28 @@ +-----BEGIN PRIVATE KEY----- +MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQCjxmYRJeYfOnQ9 +1ZSIZsjznnPiy0yukFnapF7YiIXxFCygEnw/hwqSG7ddkvDjNlc6P+K4rEEBmER8 +7mEl0YqvAZ9/C6K4OANJFuD7kQYH3Uyt+aXJfeyByAjr8HM/jSHDZm5DpysVlSBM +kJGg4sV9h38i0aT27+J0a4xmYb9pH+bbWKn4QflvOQi7IcyZ+PcB54/vCDZRtlyp +kT/6EuqTXqRHH9wGlYaos+JoXMQDWykYtN2160E1gUwW1OhdRlDHj21Tej9fYObR +jb326au4e3ivTPqKYLYsSz0YdcRoM6SjvwGiAC131n2XeHyKTQrMeKOb+TTVHzJZ +G7iUM5iBAgMBAAECggEAOdYOpW+k3NJfypZqZeEmhiIm+qig4+TGVphFhmJwKrrd +J4pfpm+iJAb1sm3588N0+nUlM+Jg8pc7WIM2e4yMVVFVaiBJzpS5VE5oFW8Zmh1k +vuuyyH1X0F08CVZY3NCSY9cAiZO3e1+2kFNdmlt7MuFu3HT8tNfyOPriEiXi2tSA +qmgUmMql305wYwjIp+mTP8X7YKKdIdCXwPC2E1Kj5SseEc9NYvHdmeJ3nZCVATbS +h8aP7HB5GpsDMHbnnFzOqPfxIPxYkJ4JqE0iGpw+SMYbIGLVkMEGodpWjBwZiaaI +EMeJJk3Qs/QvVLDxhSsFXsaLGLgYN0rItYX9dUyroQKBgQDOOLKJ9OPcm3sAWo9e +byRYegDPPM06Es5s0hF0Pr0u6X8F7fDnpS74XVMlWxZzvXWgZQNwC2nYaGfNpK5t +E2FxIC0S69W4m1L6sp2sTRLSJo5NiZc4kNVjGvnmgIrNqMhJK8pLOh5xx6/kAbpo +/lydhtXWP0omw5imFkh3bGQuZwKBgQDLTsCu01OCNuQs0Y9hgW/iHzRpX1aHvp8X +u8v/AtOS3z5a3WptrLah/HHM5B/4Hh9dW4uljuR0zTsk8dFD8lQ/mdxbXjPGEcN6 +QNe1Md2nV0xAZsW1Xp1iFDomS5xSn+qWDmR0EAXvs0hHMQnX1k7+dp2mK1whRwdM +z4mv0cZg1wKBgDnuzaFZ7aVs/GoGBt7FpFVCuPV/JDxbSihh/0tD0MvcBrY4uQOq +cP6O4SvOYglTwTa1CfkxC6Qi+H5Z9DJqTmaEXoVBQYIiCHarNQZRhKcK89EuhQ/8 +CCZWTrwFgnjyIIoFxkfJ5QGb0nrgTWjvhD8wwOP2VbN8IWcPPX5nMeGjAoGBAL7b +y59T3E2d4k8A3C2ZKcOJr9ZMHhuJJClPr45SxPRYh10eB0+2mC0xpFPIxQpUnPUz +f8GIh4fvMtrX+LBkyhp7ApbztH75Jh2ayeXcTk1OctLyqCBAFleAzaYtzS7z2XHN +SRh8AlaoY+4RZ0AsfDP+frkEc5T57Sx6mLNpp2Y5AoGAXG5BGedrCMa44Ugpux41 +saTIlaXUOObxdsGTLMOy1Ppb9LW5yk4kS8ObP3SksjUUZrRUO/BagLukgcaS038/ +AbNDU5lMCmMfwxPN2lulERhaIA1BeVgmOwJYY7nqXkL5Yibu0OXnvvbCkt0eLnp2 +ATZBECwIxNuB9pixRmDhXsM= +-----END PRIVATE KEY----- diff --git a/deps/rabbitmq_management/selenium/test/oauth/enabled_plugins b/deps/rabbitmq_management/selenium/test/oauth/enabled_plugins index feaf7912d783..c91f7ba880c3 100644 --- a/deps/rabbitmq_management/selenium/test/oauth/enabled_plugins +++ b/deps/rabbitmq_management/selenium/test/oauth/enabled_plugins @@ -1,5 +1,5 @@ [accept,amqp10_client,amqp_client,base64url,cowboy,cowlib,eetcd,gun,jose, - prometheus,rabbitmq_amqp1_0,rabbitmq_auth_backend_cache, + oauth2_client,prometheus,rabbitmq_amqp1_0,rabbitmq_auth_backend_cache, rabbitmq_auth_backend_http,rabbitmq_auth_backend_ldap, rabbitmq_auth_backend_oauth2,rabbitmq_auth_mechanism_ssl,rabbitmq_aws, rabbitmq_consistent_hash_exchange,rabbitmq_event_exchange, diff --git a/deps/rabbitmq_management/selenium/test/oauth/env.docker b/deps/rabbitmq_management/selenium/test/oauth/env.docker new file mode 100644 index 000000000000..3df42ae5f91d --- /dev/null +++ b/deps/rabbitmq_management/selenium/test/oauth/env.docker @@ -0,0 +1 @@ +export OAUTH_SERVER_CONFIG_BASEDIR=/config diff --git a/deps/rabbitmq_management/selenium/test/oauth/.env.docker.fakeportal b/deps/rabbitmq_management/selenium/test/oauth/env.docker.fakeportal similarity index 100% rename from deps/rabbitmq_management/selenium/test/oauth/.env.docker.fakeportal rename to deps/rabbitmq_management/selenium/test/oauth/env.docker.fakeportal diff --git a/deps/rabbitmq_management/selenium/test/oauth/.env.docker.fakeproxy b/deps/rabbitmq_management/selenium/test/oauth/env.docker.fakeproxy similarity index 100% rename from deps/rabbitmq_management/selenium/test/oauth/.env.docker.fakeproxy rename to deps/rabbitmq_management/selenium/test/oauth/env.docker.fakeproxy diff --git a/deps/rabbitmq_management/selenium/test/oauth/env.docker.keycloak b/deps/rabbitmq_management/selenium/test/oauth/env.docker.keycloak new file mode 100644 index 000000000000..774a99ff3c9b --- /dev/null +++ b/deps/rabbitmq_management/selenium/test/oauth/env.docker.keycloak @@ -0,0 +1,3 @@ +export KEYCLOAK_URL=https://keycloak:8443/realms/test +export OAUTH_PROVIDER_URL=https://keycloak:8443/realms/test +export OAUTH_PROVIDER_CA_CERT=/config/oauth/keycloak/ca_certificate.pem diff --git a/deps/rabbitmq_management/selenium/test/oauth/env.docker.uaa b/deps/rabbitmq_management/selenium/test/oauth/env.docker.uaa new file mode 100644 index 000000000000..afc439185290 --- /dev/null +++ b/deps/rabbitmq_management/selenium/test/oauth/env.docker.uaa @@ -0,0 +1 @@ +export UAA_URL=http://uaa:8080 diff --git a/deps/rabbitmq_management/selenium/test/oauth/env.enabled_basic_auth b/deps/rabbitmq_management/selenium/test/oauth/env.enabled_basic_auth new file mode 100644 index 000000000000..121f3d7ea972 --- /dev/null +++ b/deps/rabbitmq_management/selenium/test/oauth/env.enabled_basic_auth @@ -0,0 +1 @@ +export DISABLED_BASIC_AUTH=false diff --git a/deps/rabbitmq_management/selenium/test/oauth/.env.fakeportal-oauth-provider b/deps/rabbitmq_management/selenium/test/oauth/env.fakeportal-oauth-provider similarity index 100% rename from deps/rabbitmq_management/selenium/test/oauth/.env.fakeportal-oauth-provider rename to deps/rabbitmq_management/selenium/test/oauth/env.fakeportal-oauth-provider diff --git a/deps/rabbitmq_management/selenium/test/oauth/.env.fakeproxy b/deps/rabbitmq_management/selenium/test/oauth/env.fakeproxy similarity index 100% rename from deps/rabbitmq_management/selenium/test/oauth/.env.fakeproxy rename to deps/rabbitmq_management/selenium/test/oauth/env.fakeproxy diff --git a/deps/rabbitmq_management/selenium/test/oauth/.env.keycloak b/deps/rabbitmq_management/selenium/test/oauth/env.keycloak similarity index 63% rename from deps/rabbitmq_management/selenium/test/oauth/.env.keycloak rename to deps/rabbitmq_management/selenium/test/oauth/env.keycloak index 7b995063bf5a..7025c2930a28 100644 --- a/deps/rabbitmq_management/selenium/test/oauth/.env.keycloak +++ b/deps/rabbitmq_management/selenium/test/oauth/env.keycloak @@ -1,3 +1,3 @@ -export OAUTH_SIGNING_KEY_PATH=oauth/keycloak +export OAUTH_SERVER_CONFIG_DIR=${OAUTH_SERVER_CONFIG_BASEDIR}/oauth/keycloak export OAUTH_SIGNING_KEY_ID=Gnl2ZlbRh3rAr6Wymc988_5cY7T5GuePd5dpJlXDJUk export OAUTH_SCOPES="openid profile rabbitmq.tag:management" diff --git a/deps/rabbitmq_management/selenium/test/oauth/env.keycloak-oauth-provider b/deps/rabbitmq_management/selenium/test/oauth/env.keycloak-oauth-provider new file mode 100644 index 000000000000..74d6e94ad01d --- /dev/null +++ b/deps/rabbitmq_management/selenium/test/oauth/env.keycloak-oauth-provider @@ -0,0 +1 @@ +# export OAUTH_PROVIDER_URL=${KEYCLOAK_URL} diff --git a/deps/rabbitmq_management/selenium/test/oauth/env.local b/deps/rabbitmq_management/selenium/test/oauth/env.local new file mode 100644 index 000000000000..d61f528c4e4a --- /dev/null +++ b/deps/rabbitmq_management/selenium/test/oauth/env.local @@ -0,0 +1 @@ +export OAUTH_SERVER_CONFIG_BASEDIR=deps/rabbitmq_management/selenium/test diff --git a/deps/rabbitmq_management/selenium/test/oauth/.env.local.fakeportal b/deps/rabbitmq_management/selenium/test/oauth/env.local.fakeportal similarity index 100% rename from deps/rabbitmq_management/selenium/test/oauth/.env.local.fakeportal rename to deps/rabbitmq_management/selenium/test/oauth/env.local.fakeportal diff --git a/deps/rabbitmq_management/selenium/test/oauth/.env.local.fakeproxy b/deps/rabbitmq_management/selenium/test/oauth/env.local.fakeproxy similarity index 100% rename from deps/rabbitmq_management/selenium/test/oauth/.env.local.fakeproxy rename to deps/rabbitmq_management/selenium/test/oauth/env.local.fakeproxy diff --git a/deps/rabbitmq_management/selenium/test/oauth/env.local.keycloak b/deps/rabbitmq_management/selenium/test/oauth/env.local.keycloak new file mode 100644 index 000000000000..1fa28ef79232 --- /dev/null +++ b/deps/rabbitmq_management/selenium/test/oauth/env.local.keycloak @@ -0,0 +1,3 @@ +export KEYCLOAK_URL=https://localhost:8443/realms/test +export OAUTH_PROVIDER_URL=https://localhost:8443/realms/test +export OAUTH_PROVIDER_CA_CERT=deps/rabbitmq_management/selenium/test/oauth/keycloak/ca_certificate.pem diff --git a/deps/rabbitmq_management/selenium/test/oauth/env.local.uaa b/deps/rabbitmq_management/selenium/test/oauth/env.local.uaa new file mode 100644 index 000000000000..40d8bf716099 --- /dev/null +++ b/deps/rabbitmq_management/selenium/test/oauth/env.local.uaa @@ -0,0 +1 @@ +export UAA_URL=http://localhost:8080 diff --git a/deps/rabbitmq_management/selenium/test/oauth/.env.mgt-prefix b/deps/rabbitmq_management/selenium/test/oauth/env.mgt-prefix similarity index 100% rename from deps/rabbitmq_management/selenium/test/oauth/.env.mgt-prefix rename to deps/rabbitmq_management/selenium/test/oauth/env.mgt-prefix diff --git a/deps/rabbitmq_management/selenium/test/oauth/env.uaa b/deps/rabbitmq_management/selenium/test/oauth/env.uaa new file mode 100644 index 000000000000..506e68ac66f7 --- /dev/null +++ b/deps/rabbitmq_management/selenium/test/oauth/env.uaa @@ -0,0 +1,4 @@ +export OAUTH_SIGNING_KEY_ID=legacy-token-key +export OAUTH_SERVER_CONFIG_DIR=${OAUTH_SERVER_CONFIG_BASEDIR}/oauth/uaa +export OAUTH_CLIENT_SECRET=rabbitmq_client_code +export OAUTH_SCOPES="openid profile rabbitmq.*" diff --git a/deps/rabbitmq_management/selenium/test/oauth/.env.uaa-oauth-provider b/deps/rabbitmq_management/selenium/test/oauth/env.uaa-oauth-provider similarity index 100% rename from deps/rabbitmq_management/selenium/test/oauth/.env.uaa-oauth-provider rename to deps/rabbitmq_management/selenium/test/oauth/env.uaa-oauth-provider diff --git a/deps/rabbitmq_management/selenium/test/oauth/imports/users.json b/deps/rabbitmq_management/selenium/test/oauth/imports/users.json new file mode 100644 index 000000000000..e6b99e3b2b4d --- /dev/null +++ b/deps/rabbitmq_management/selenium/test/oauth/imports/users.json @@ -0,0 +1,85 @@ +{ + "users": [ + { + "name": "guest", + "password_hash": "Joz9zzUBOrX10lB3GisWN5oTXK+wj0gxS/nyrfTYmBOuhps5", + "hashing_algorithm": "rabbit_password_hashing_sha256", + "tags": [ + "administrator" + ], + "limits": {} + }, + { + "name": "administrator-only", + "password_hash": "Joz9zzUBOrX10lB3GisWN5oTXK+wj0gxS/nyrfTYmBOuhps5", + "hashing_algorithm": "rabbit_password_hashing_sha256", + "tags": [ + "administrator" + ], + "limits": {} + }, + { + "name": "management-only", + "password_hash": "Joz9zzUBOrX10lB3GisWN5oTXK+wj0gxS/nyrfTYmBOuhps5", + "hashing_algorithm": "rabbit_password_hashing_sha256", + "tags": [ + "management" + ], + "limits": {} + }, + { + "name": "management", + "password_hash": "Joz9zzUBOrX10lB3GisWN5oTXK+wj0gxS/nyrfTYmBOuhps5", + "hashing_algorithm": "rabbit_password_hashing_sha256", + "tags": [ + "management" + ], + "limits": {} + }, + { + "name": "monitoring-only", + "password_hash": "Joz9zzUBOrX10lB3GisWN5oTXK+wj0gxS/nyrfTYmBOuhps5", + "hashing_algorithm": "rabbit_password_hashing_sha256", + "tags": [ + "monitoring" + ], + "limits": {} + }, + { + "name": "rabbit_no_management", + "password_hash": "Joz9zzUBOrX10lB3GisWN5oTXK+wj0gxS/nyrfTYmBOuhps5", + "hashing_algorithm": "rabbit_password_hashing_sha256", + "tags": [ ], + "limits": {} + } + ], + "vhosts": [ + { + "name": "/" + } + ], + "permissions": [ + { + "user": "guest", + "vhost": "/", + "configure": ".*", + "write": ".*", + "read": ".*" + }, + { + "user": "management", + "vhost": "/", + "configure": ".*", + "write": ".*", + "read": ".*" + }, + { + "user": "rabbit_no_management", + "vhost": "/", + "configure": ".*", + "write": ".*", + "read": ".*" + } + ] + +} diff --git a/deps/rabbitmq_management/selenium/test/oauth/keycloak/ca_certificate.pem b/deps/rabbitmq_management/selenium/test/oauth/keycloak/ca_certificate.pem index 40c0fd039d0c..cd37bea304f5 100644 --- a/deps/rabbitmq_management/selenium/test/oauth/keycloak/ca_certificate.pem +++ b/deps/rabbitmq_management/selenium/test/oauth/keycloak/ca_certificate.pem @@ -1,21 +1,21 @@ -----BEGIN CERTIFICATE----- -MIIDezCCAmOgAwIBAgIJAIafyIrD8MVnMA0GCSqGSIb3DQEBCwUAMEwxOzA5BgNV -BAMMMlRMU0dlblNlbGZTaWduZWR0Um9vdENBIDIwMjMtMDYtMTNUMTI6MzY6NDQu -ODUwMzcxMQ0wCwYDVQQHDAQkJCQkMB4XDTIzMDYxMzEwMzY0NFoXDTMzMDYxMDEw -MzY0NFowTDE7MDkGA1UEAwwyVExTR2VuU2VsZlNpZ25lZHRSb290Q0EgMjAyMy0w -Ni0xM1QxMjozNjo0NC44NTAzNzExDTALBgNVBAcMBCQkJCQwggEiMA0GCSqGSIb3 -DQEBAQUAA4IBDwAwggEKAoIBAQC9iC76OIDMAGmKdz5cRPJoEW7p0je0t0MtcerY -qx4/E+Ft9ZCGaKLDCfUIit2Zv4JaGoPEI3zCcA41M0EuC3cI99s5tvOnRMM8DFTk -ARWP848shQpbCFXozD99Q9A3lriie8s3an/MZS/B401ujo9KNdg9P9rxjasIN3Oo -y0GOi306xQ1kxuXKvutuPjYZE0DWXY0GGNQk9jTVJ53qNSLJWLfsjDvOt9VnlbXM -GIX2RIQhE5/abpo6DPehXfmpfwm9pU0yNQVBBzFgXEmeqe08VfjgJ/SoWiic1AfW -auOA50ZB1MdcelVnDgbc0JFRBDwdxhfB4u4TOrTuGtC9WhNNAgMBAAGjYDBeMA8G -A1UdEwEB/wQFMAMBAf8wCwYDVR0PBAQDAgEGMB0GA1UdDgQWBBScoS8TfTiIl4Nv -qxxf1Q7/aNCykjAfBgNVHSMEGDAWgBScoS8TfTiIl4Nvqxxf1Q7/aNCykjANBgkq -hkiG9w0BAQsFAAOCAQEAMsWWqR5S+JIAQO1a9b1gPp3eGFM8XomLLTgUiMQEuOcH -rF5iCrQsAUXcOo4OrMnEA9sK9z0gcncRenBNzc/M79eOspKnZ3RtXouPcv+0x8Zj -TqaGoknipDfXgNcQzKjrSUIrKAr/vhRKio13GgVMsgWj8VEYlynANKtfGQiMVgvH -15UHIDP6uKjgF0HsBh/rSO9gKFTMtaurtbJ3iV20Voxq8gXyazWkwx++JrxsqDyB -rRVFddWmbPt719zikGkGZZFSZ9k6tRMyG4oiZsea8Yy2s95Jw0kLggnyJvW3mzBa -0nckXeslmE7uGuV1KeOHcofdXAfaokfFMsZZqDZgLg== +MIIDezCCAmOgAwIBAgIJAOA06nrAwraBMA0GCSqGSIb3DQEBCwUAMEwxOzA5BgNV +BAMMMlRMU0dlblNlbGZTaWduZWR0Um9vdENBIDIwMjMtMTEtMTZUMTI6MjQ6NDcu +Mjg5MDkzMQ0wCwYDVQQHDAQkJCQkMB4XDTIzMTExNjExMjQ0N1oXDTMzMTExMzEx +MjQ0N1owTDE7MDkGA1UEAwwyVExTR2VuU2VsZlNpZ25lZHRSb290Q0EgMjAyMy0x +MS0xNlQxMjoyNDo0Ny4yODkwOTMxDTALBgNVBAcMBCQkJCQwggEiMA0GCSqGSIb3 +DQEBAQUAA4IBDwAwggEKAoIBAQDWJrvvUvpkiAhvIiciuTbFHRMC7VdOXdIM3y3I +Vt56Voj3dkCVitFcvTc+pkuqoQUaWRTc5M+875CaQSRIDfVyFTIGTyVXv6cZRcoz +0gcmYvopIJ4Wi5/xG9Qp8uJMtr+UBJ57ez6Urau/L3zETAVZA+y1bTylAlh4tjMH +I24bvyy4yNQbPtG4y5F9x484fn3H4x7lf6O/Xulcvy8vL1kyc/EgrF4fpjogwj58 +eQ5HLwbAlMRRxXxXX2U5tXlrv475WItp/1mhZ+j2yCMKB4tJ8tXbtpgou0JDtlN0 +8Jwm3+d5a6PxqynmgRAXStZ4Fda93Pa3FJfw1u63JrmOprG9AgMBAAGjYDBeMA8G +A1UdEwEB/wQFMAMBAf8wCwYDVR0PBAQDAgEGMB0GA1UdDgQWBBS2Icxjr1ucGCIx +ikeSG9igJf558jAfBgNVHSMEGDAWgBS2Icxjr1ucGCIxikeSG9igJf558jANBgkq +hkiG9w0BAQsFAAOCAQEAR0iG00uE2GnoWtaXEHYJTdvBBcStBB8qnRk19Qu/b8qd +HAhRGb31IiuYzNJxLxhOtXWQMKvsKPAKpPXP3c5XVAf2O156GoXEPkKQktF738Pp +rRlrQPqU9Qpm84rMC54EB7coxEs7HMx4do/kNaVPdqq++JIEAcWOEVKfudN+8TMR +XyUJT54jBacsTpAZNfY6boJmuQ+G6tkpQvlHOU6388IFuLPkYRO7h7CHVbDsMEXD +Ptg3PCK97nCVgs4xfQGR7nT2pawfEUQVMon/XShtXY0RIKpynwrgICHDdvMXRXlG +a4haA7sz8Wyroy6Ub5+X3s4YRumSQrhiwRzqU+f75A== -----END CERTIFICATE----- diff --git a/deps/rabbitmq_management/selenium/test/oauth/keycloak/server_keycloak_certificate.pem b/deps/rabbitmq_management/selenium/test/oauth/keycloak/server_keycloak_certificate.pem new file mode 100644 index 000000000000..242c153987b7 --- /dev/null +++ b/deps/rabbitmq_management/selenium/test/oauth/keycloak/server_keycloak_certificate.pem @@ -0,0 +1,23 @@ +-----BEGIN CERTIFICATE----- +MIID0zCCArugAwIBAgIBAzANBgkqhkiG9w0BAQsFADBMMTswOQYDVQQDDDJUTFNH +ZW5TZWxmU2lnbmVkdFJvb3RDQSAyMDIzLTExLTE2VDEyOjI0OjQ3LjI4OTA5MzEN +MAsGA1UEBwwEJCQkJDAeFw0yMzExMTYxMTI0NDhaFw0zMzExMTMxMTI0NDhaMCQx +ETAPBgNVBAMMCGtleWNsb2FrMQ8wDQYDVQQKDAZzZXJ2ZXIwggEiMA0GCSqGSIb3 +DQEBAQUAA4IBDwAwggEKAoIBAQDso0G4gflW5HDiBuwrjvyYy3rUx/24IxHQzZMT +7o1qoXA/h8C0kUX7aS6XFij8hCNHdNG0GL/QPifKxwiW8JIK2Xpy6jdxDzooHaDU ++Tyk8BDFYnQtXaMsqb5zXJ/P4u8bjBP4X2+/gnbNF/1yyOZxpRObrWxX+C2IJ+vy +ruh+TCEqokJ5jE+m6GPgiqx56bytXX0KLhuI7jXT60NKGqNVCV8qn5fO4z/fh6FY +tFxRc0QHy48YHBFo+I+R9nW4xq+0pbctnjTzlfRxHYEWvnsrptc4AOa6b49HSShf +qmkxgVn3G/U5Gmtzu2IjPWfGVwRjBo4hhoeG/fV9FMhqz6fjAgMBAAGjgecwgeQw +CQYDVR0TBAIwADALBgNVHQ8EBAMCBaAwHQYDVR0lBBYwFAYIKwYBBQUHAwEGCCsG +AQUFBwMCMDgGA1UdEQQxMC+CCGtleWNsb2Frghhtcm9zYWxlczBMVkRRLnZtd2Fy +ZS5jb22CCWxvY2FsaG9zdDAdBgNVHQ4EFgQUwxjubJIZkvDwv9aDtdNcDcfmSSQw +HwYDVR0jBBgwFoAUtiHMY69bnBgiMYpHkhvYoCX+efIwMQYDVR0fBCowKDAmoCSg +IoYgaHR0cDovL2NybC1zZXJ2ZXI6ODAwMC9iYXNpYy5jcmwwDQYJKoZIhvcNAQEL +BQADggEBAFmcToMQTRER97Mk5CK3qopzdFveJWHgyAHh35DQdCxtBadOXmC3n82p +dumNOKhSFNx6Hre38cQHBIuir2g4dvalfN7PwDttdi7TRPGS30bAbA4/VWtld9bt +66QDSh5Obsuq23dA9eEs34GfggXpTyBSyX4AWsHOmUpeoYSJEsUmxoMAgezu0p8r +kgOJQ0j63vG4S7jHMvtKHNG5LMTvIUk8FNW6SA/7AhJxmzEQiBFXMghenEqd682u +TpeRHe6+/Nyge1B1FYUgDVbaZ2/694tdT3V3tFvKhqbTZrKMdFJRpiMUjgfs1GzI ++NhzvUTa6MbV1ZgeXv3YmU+diCgiTmk= +-----END CERTIFICATE----- diff --git a/deps/rabbitmq_management/selenium/test/oauth/keycloak/server_keycloak_key.pem b/deps/rabbitmq_management/selenium/test/oauth/keycloak/server_keycloak_key.pem new file mode 100644 index 000000000000..fb461404eea9 --- /dev/null +++ b/deps/rabbitmq_management/selenium/test/oauth/keycloak/server_keycloak_key.pem @@ -0,0 +1,28 @@ +-----BEGIN PRIVATE KEY----- +MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQDso0G4gflW5HDi +BuwrjvyYy3rUx/24IxHQzZMT7o1qoXA/h8C0kUX7aS6XFij8hCNHdNG0GL/QPifK +xwiW8JIK2Xpy6jdxDzooHaDU+Tyk8BDFYnQtXaMsqb5zXJ/P4u8bjBP4X2+/gnbN +F/1yyOZxpRObrWxX+C2IJ+vyruh+TCEqokJ5jE+m6GPgiqx56bytXX0KLhuI7jXT +60NKGqNVCV8qn5fO4z/fh6FYtFxRc0QHy48YHBFo+I+R9nW4xq+0pbctnjTzlfRx +HYEWvnsrptc4AOa6b49HSShfqmkxgVn3G/U5Gmtzu2IjPWfGVwRjBo4hhoeG/fV9 +FMhqz6fjAgMBAAECggEBAM4lGqelcpUjxMLizPLVSW/CM2sSHhE/W9HOhIYklsWB +hcuSc2nZ9GEkLBYqk+IHKsShG94MgWzj+L5JzU3QnSkec/GP4GR5o2w3A1kFFrOI +/tM1BYhPvkq2RNcypXXwd+RDj1Ibsbnf6aaZc41/PmFaMU65MV0hMmkefgmYHamG +86kdCX1vZ2NwJWL1ALAf2rRb30QWl+W+/qDnDZ1qdxVbok8106HXBB3uXhLyaBIR +t9lGUqOoh3bdNsvPmma6T5y1cEXwcsVtfxB+myxils0XD0HsGa5FBGARh7/6jPeV +zs9nvcwVvruNGb4k4T6yEz0JutaFSgmWjAMu2pe5i4ECgYEA+k8mvt9JVmqqKLHv +Vr8BcLT2JK0/rrblth4fDiyZzKixaGnlZuXWOhiQF9+0lAk0zZjLXDZr6dWE/gMK +ZyRj1xrmB37f6/Z6F9M4r/n3RjzINkD6D5sA+Gg5nR6+nh7gNq3J6F33ZUaODeBh +EyTMXh7RT+Ug1G9BFg81tl0sNfECgYEA8gSI5otRI6i4zUZFg0ziwoIWJpdEyWwb +q7UgYzn8N8LprVibwkhnjfXysbulo/7gvRZ+uCw702xUfv1uyEKc5PHmOer4ElRU +iYdJeZblbrlk6eyOFEqucovPte82YnqFIQn6KJqNLKlG2KHIsYX1igVyGbMB2Pp/ +4iE32HefFxMCgYAEtJg13lyyky6/tRiauNx+EejOp7MaxbVrxwUubwg1ILa1D8iQ +NqHgVbXfvQTYA5RKiSTJhvxgWPM3EzeO2NBHqunIGkp7VRbWe9IE/N35JAtfebk5 +seBCyzLKEVnj/xCX9oxlId8UuE7TU/R/N6Hf4xRsPBJx6+V9VKvd0cKTAQKBgCZU +6Yn6TuOi+YIpuyDMsK22BOQf2Vk9sjRD/9k3eecrC+/UtPbUmPI3HjVgTx/mYpoQ +UgnBl8goxElIwp8dTdRFK/3IZXohuTH/J3gGmlgrLPyP5wD3wyGJW2CpfqeiWCuf +dOuxbuK//OSa2zqiyP0PV78SRxyisFaUhE/Ywm3ZAoGAYwa5t5kdPjVqtxRAsDuX +itQM5qEqLZIYlN7ehKPn8okTCc761ddaI/+fluH5S4YCo21itq38UssAjp6vbwpy +lHhvP03bpo63iz4RYwKDNEh2HD3z/a9eteColtXU8lPpfky360AwGQ1Bx7RaGGas +ttPmhm+mk3G6fRHYvk6rtJY= +-----END PRIVATE KEY----- diff --git a/deps/rabbitmq_management/selenium/test/oauth/keycloak/server_localhost_certificate.pem b/deps/rabbitmq_management/selenium/test/oauth/keycloak/server_localhost_certificate.pem deleted file mode 100644 index 9f7f30e0f687..000000000000 --- a/deps/rabbitmq_management/selenium/test/oauth/keycloak/server_localhost_certificate.pem +++ /dev/null @@ -1,23 +0,0 @@ ------BEGIN CERTIFICATE----- -MIIDyzCCArOgAwIBAgIBATANBgkqhkiG9w0BAQsFADBMMTswOQYDVQQDDDJUTFNH -ZW5TZWxmU2lnbmVkdFJvb3RDQSAyMDIzLTA2LTEzVDEyOjUzOjI3LjM4OTIzMjEN -MAsGA1UEBwwEJCQkJDAeFw0yMzA2MTMxMDUzMjdaFw0zMzA2MTAxMDUzMjdaMCUx -EjAQBgNVBAMMCWxvY2FsaG9zdDEPMA0GA1UECgwGc2VydmVyMIIBIjANBgkqhkiG -9w0BAQEFAAOCAQ8AMIIBCgKCAQEAyF6LEFxMDMIkH4/dHwAkXQj0MQE91aMqC2t+ -JoYUMQSMfQdzxhlwj3zaWWlKZouMYeIc/8ZQzqmEN8rgNhVFMox2tYyninCIaE4t -bFRTY6QHhvszdEzkrCeQA3NuzcMjoBTsrxgjPCaH8dr/8z6P4fEk5YDo/t98DRVT -P9M8HpFJ+ap8DRd7B4GGfVQmuoG6kb655KpjXqE+isCE9nQAUOkhhwofl2wyIxrN -ucgqvQd0LuuQpWPkELzwInakIBAMIMgC597yYXcGPgJWs6CcLqii3CrXM0lGB+z3 -Buv1ffm9nB04N7VUq8+Cyi17PkKNihRSACcbMgckrxxd9A3KewIDAQABo4HeMIHb -MAkGA1UdEwQCMAAwCwYDVR0PBAQDAgWgMBMGA1UdJQQMMAoGCCsGAQUFBwMBMDkG -A1UdEQQyMDCCCWxvY2FsaG9zdIIYbXJvc2FsZXMwTFZEUS52bXdhcmUuY29tggls -b2NhbGhvc3QwHQYDVR0OBBYEFNH/UYDO1rKz607qp7mjjMBakYJnMB8GA1UdIwQY -MBaAFKeWeZvOH7Z4DP459yUjLcXg/uW1MDEGA1UdHwQqMCgwJqAkoCKGIGh0dHA6 -Ly9jcmwtc2VydmVyOjgwMDAvYmFzaWMuY3JsMA0GCSqGSIb3DQEBCwUAA4IBAQBn -E++l6xLjW6xo6RVRLy9tsO/VGREhbb9Q7vyiNzpmRS7VVaUl06lq/MlfrxZmUFog -EDg9Ojw4YV2PJ+kAG/zp/fJBwDT2NKKJBRv+lr4puGb07yMWUuoiIw/u4aN7zrmk -TOdmqa6gDbfBlACsYIt9iu5Y2VaHnFiraTKizNO1J+NUb+UyKw7pgnJupBh3mcaw -9xJzVDzALtvOApZ8QeJF3Ev3NGnA9IHASj/rrcnxMxM7OoQtJVg7PUMDTRxjGLEV -FainQ01Hm1SjBS0Ttvj+MRibCIafGNIsTz5vECZ8VJNQUhDGXgSdRvSJ/ERuGIf0 -gssLuKG2XzI2nYm6+0ng ------END CERTIFICATE----- diff --git a/deps/rabbitmq_management/selenium/test/oauth/keycloak/server_localhost_key.pem b/deps/rabbitmq_management/selenium/test/oauth/keycloak/server_localhost_key.pem deleted file mode 100644 index a340b96ab9e3..000000000000 --- a/deps/rabbitmq_management/selenium/test/oauth/keycloak/server_localhost_key.pem +++ /dev/null @@ -1,28 +0,0 @@ ------BEGIN PRIVATE KEY----- -MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQDIXosQXEwMwiQf -j90fACRdCPQxAT3VoyoLa34mhhQxBIx9B3PGGXCPfNpZaUpmi4xh4hz/xlDOqYQ3 -yuA2FUUyjHa1jKeKcIhoTi1sVFNjpAeG+zN0TOSsJ5ADc27NwyOgFOyvGCM8Jofx -2v/zPo/h8STlgOj+33wNFVM/0zwekUn5qnwNF3sHgYZ9VCa6gbqRvrnkqmNeoT6K -wIT2dABQ6SGHCh+XbDIjGs25yCq9B3Qu65ClY+QQvPAidqQgEAwgyALn3vJhdwY+ -AlazoJwuqKLcKtczSUYH7PcG6/V9+b2cHTg3tVSrz4LKLXs+Qo2KFFIAJxsyBySv -HF30Dcp7AgMBAAECggEBALVNvx2ltnbQ8OjSP24+rai1Ymg5TF9UjcXZlUN8jSax -jAHSTXMSL+TWP6kp+dWCLhugA0d5hkMJ5oapf2nQo1WS/hNW238MRpHDM5zTTMVb -digwgyWYMk6IWeqVd8yd502BwYzKFY/m+Znh5TmQAZeboRw+IhYF2PTpt/OHyt2I -VFx5gztryMPvi/zd8sSpXjkvbjLUMf5U//k3BU4gVute6LtedSM/fUwaFpYWJ2KU -aNhbt8BYrJXSDwnl6GIybjDU3HHRmuVu7e4Mt688InV+fS9aEIMDOkhkyenPDz+5 -+JgMU/wJAeMTIrQ6V6kYJrWf92bLYnVi6x3jPfsrEikCgYEA95jECO1O2+aomZYC -k2FeCyToI0eVkJxWeUdBJ+NS0Zkvt1eCyz1wYLIkiFmrtn6JKj2k7M66OHWctqCZ -FkgEc/9j1o2cKdUjL4qiwTkFZP0cJBe/W0ugStp6LCza2cWTdQUaDnbBe2UsZCcd -xFK/3oniIsElv3zm5a6ga9vs/q0CgYEAzytxnwCEECtJ6vanG+vUPry3Z8B7p21n -Riz/X0NGHSc5M14iOnsyFZLB/4+RkADZGCAy6B2CyJD5UztzpjMjZFjtwx6UJXda -JgQpl8RiSOzGitr9/lmjN3yxzA29rsu0O8buUhgZWboIokkOkRNbWSrp6yWpTvXl -X6FbwTLXWscCgYEA5xscwAhhdziRXup6dP6JPXWxiFyk6lpDDOjJlGXHRATsWQHB -/9rVLiyZlPu+H5V0io0HiFJd151QLdcxjW6jWXKkyftcLF/Ze+K3kAudUWo//iB8 -aMbqU3QiXWFw1Zxpyux8KcwHRRpmmQU576odlaa2ASKwDVCUZQbejk61o/ECgYAY -0WCEJsCrWzQ4tKGiQ6cieOMTx8hIb1++1WcmV13P4kIE+FLrZJTEZtdcsStD8AYR -0NGoYtinBE8J/IZHM7saq1iYVlJzBpBDG56L8te/WrYSLlfdH4ng/Mwj4MWHahnG -S3eDWCW5TQL5xfy7vnDkBrMNG27j6as3wJHIXDnWQwKBgFL7+yd9jOejKgtsbXYX -8Rub4I/KS+6dv1s9mlHsT6+SX+BozZC/xwmBQmVuPexnklgqO9c0A4c7pVNLE2u4 -9RBrDFle4A6e8NDB7oq8Vo8wnpyglbBUB/0aVNU25BWl1/BhxzoxTTeF+U0pmKia -KmmMEdjVD5y0qh+ma+rHDBb1 ------END PRIVATE KEY----- diff --git a/deps/rabbitmq_management/selenium/test/oauth/keycloak/test-realm.json b/deps/rabbitmq_management/selenium/test/oauth/keycloak/test-realm.json index 0591151458e6..c287be00464f 100644 --- a/deps/rabbitmq_management/selenium/test/oauth/keycloak/test-realm.json +++ b/deps/rabbitmq_management/selenium/test/oauth/keycloak/test-realm.json @@ -45,14 +45,6 @@ "failureFactor" : 30, "roles" : { "realm" : [ { - "id" : "6feb7afe-2fa8-4569-8fb8-e50c2a4302d2", - "name" : "offline_access", - "description" : "${role_offline-access}", - "composite" : false, - "clientRole" : false, - "containerId" : "test", - "attributes" : { } - }, { "id" : "2b61bc53-60cc-48fc-b89b-ee3e80204895", "name" : "rabbitmq.tag:management", "composite" : false, @@ -74,6 +66,44 @@ "clientRole" : false, "containerId" : "test", "attributes" : { } + }, { + "id" : "6faef857-1c9b-4474-ba01-ad1946d243d6", + "name" : "rabbitmq-proxy-client-role", + "description" : "", + "composite" : false, + "clientRole" : false, + "containerId" : "test", + "attributes" : { } + }, { + "id" : "0a838a26-4908-4750-a1d0-7cc322c698ae", + "name" : "producer", + "composite" : false, + "clientRole" : false, + "containerId" : "test", + "attributes" : { } + }, { + "id" : "dd893988-6661-4849-a0f1-1cd1a63b51a5", + "name" : "rabbitmq.read:*/*", + "composite" : false, + "clientRole" : false, + "containerId" : "test", + "attributes" : { } + }, { + "id" : "6feb7afe-2fa8-4569-8fb8-e50c2a4302d2", + "name" : "offline_access", + "description" : "${role_offline-access}", + "composite" : false, + "clientRole" : false, + "containerId" : "test", + "attributes" : { } + }, { + "id" : "af1bc955-6d4d-42e9-b0d4-343e7eb075d0", + "name" : "rabbitmq-role", + "description" : "", + "composite" : false, + "clientRole" : false, + "containerId" : "test", + "attributes" : { } }, { "id" : "77e9131f-1eb3-45a3-9f3b-f74991a99def", "name" : "rabbitmq.configure:*/*", @@ -103,13 +133,6 @@ "clientRole" : false, "containerId" : "test", "attributes" : { } - }, { - "id" : "0a838a26-4908-4750-a1d0-7cc322c698ae", - "name" : "producer", - "composite" : false, - "clientRole" : false, - "containerId" : "test", - "attributes" : { } }, { "id" : "5516969b-be85-490c-9715-9c1186075d60", "name" : "rabbitmq-management", @@ -124,13 +147,6 @@ "clientRole" : false, "containerId" : "test", "attributes" : { } - }, { - "id" : "dd893988-6661-4849-a0f1-1cd1a63b51a5", - "name" : "rabbitmq.read:*/*", - "composite" : false, - "clientRole" : false, - "containerId" : "test", - "attributes" : { } } ], "client" : { "realm-management" : [ { @@ -301,10 +317,8 @@ "containerId" : "09de26c0-0a1b-4e5f-8724-23af07a0e54a", "attributes" : { } } ], - "mgt_api_client" : [ ], + "prod_producer" : [ ], "security-admin-console" : [ ], - "admin-cli" : [ ], - "producer" : [ ], "account-console" : [ ], "broker" : [ { "id" : "147bafb6-45a8-45ba-b214-7826b1fc4856", @@ -315,7 +329,32 @@ "containerId" : "f32cd0e1-5b78-412a-ba07-6ad2a9aeb007", "attributes" : { } } ], - "rabbit_client_code" : [ ], + "rabbitmq" : [ { + "id" : "f5caa7a5-0770-41d8-a3a3-8691470b6d82", + "name" : "rabbitmq-role", + "description" : "", + "composite" : false, + "clientRole" : true, + "containerId" : "a57c9f6a-8b64-47dc-af53-d6ccc2d4aa60", + "attributes" : { } + } ], + "keycloak-producer" : [ ], + "rabbitmq-proxy-client" : [ { + "id" : "ba66d339-cbca-41c1-87fe-38e7b50efd52", + "name" : "rabbitmq-proxy-client-role", + "composite" : true, + "composites" : { + "realm" : [ "rabbitmq-role", "rabbitmq-proxy-client-role", "rabbitmq" ] + }, + "clientRole" : true, + "containerId" : "c265f3db-ed3a-4898-8800-af044b3c30f5", + "attributes" : { } + } ], + "mgt_api_client" : [ ], + "dev_producer" : [ ], + "admin-cli" : [ ], + "producer" : [ ], + "rabbitmq_client_code" : [ ], "account" : [ { "id" : "957f712c-e735-402d-9f41-ad9832749f51", "name" : "delete-account", @@ -391,10 +430,19 @@ "containerId" : "bd6c76be-d33d-43d6-9cbb-965df4f0c025", "attributes" : { } } ], - "rabbitmq-proxy-client" : [ ] + "rabbit_prod_mgt_ui" : [ ], + "rabbit_dev_mgt_ui" : [ ] } }, - "groups" : [ ], + "groups" : [ { + "id" : "6746dbec-7e2b-4540-ae00-73aa2a93a04e", + "name" : "rabbitmq", + "path" : "/rabbitmq", + "attributes" : { }, + "realmRoles" : [ "rabbitmq" ], + "clientRoles" : { }, + "subGroups" : [ ] + } ], "defaultRole" : { "id" : "b84ae322-7112-41d1-8a3f-0009447ded47", "name" : "default-roles-test", @@ -411,7 +459,7 @@ "otpPolicyLookAheadWindow" : 1, "otpPolicyPeriod" : 30, "otpPolicyCodeReusable" : false, - "otpSupportedApplications" : [ "totpAppFreeOTPName", "totpAppGoogleName" ], + "otpSupportedApplications" : [ "totpAppGoogleName", "totpAppFreeOTPName" ], "webAuthnPolicyRpEntityName" : "keycloak", "webAuthnPolicySignatureAlgorithms" : [ "ES256" ], "webAuthnPolicyRpId" : "", @@ -433,6 +481,50 @@ "webAuthnPolicyPasswordlessAvoidSameAuthenticatorRegister" : false, "webAuthnPolicyPasswordlessAcceptableAaguids" : [ ], "users" : [ { + "id" : "88063139-59de-4027-a421-d613e3bdba1f", + "createdTimestamp" : 1690974911722, + "username" : "dev_user", + "enabled" : true, + "totp" : false, + "emailVerified" : true, + "firstName" : "", + "lastName" : "", + "credentials" : [ { + "id" : "0b2591b9-871e-490d-9319-6314fb5dc42b", + "type" : "password", + "userLabel" : "My password", + "createdDate" : 1690974921254, + "secretData" : "{\"value\":\"txSoE1qlIryIJsd8EKHp0aE7I5bzLkEEWKGxPrcH1lVmKXeAftKnB6Rqxnh2pX4IFem/FMTF/rcmttU+FFmsUA==\",\"salt\":\"qFN5DsIvc/F4yKrXke5K5Q==\",\"additionalParameters\":{}}", + "credentialData" : "{\"hashIterations\":27500,\"algorithm\":\"pbkdf2-sha256\",\"additionalParameters\":{}}" + } ], + "disableableCredentialTypes" : [ ], + "requiredActions" : [ ], + "realmRoles" : [ "rabbitmq.tag:management", "default-roles-test" ], + "notBefore" : 0, + "groups" : [ ] + }, { + "id" : "451df525-a468-43c1-97f3-656d5d31ba68", + "createdTimestamp" : 1690974863360, + "username" : "prod_user", + "enabled" : true, + "totp" : false, + "emailVerified" : true, + "firstName" : "", + "lastName" : "", + "credentials" : [ { + "id" : "508707a9-08e9-4e5e-8257-b6d6466c98df", + "type" : "password", + "userLabel" : "My password", + "createdDate" : 1690974873162, + "secretData" : "{\"value\":\"iEG974FQB66ACMIKSB6WpgC+CTKL6+JU5qIyjwM4Z1TeQz89pPOeXxjrmtaqourwV5adMVurURO2oO/qL8yHRg==\",\"salt\":\"+axOgEN33yDcNdrXvT+V8Q==\",\"additionalParameters\":{}}", + "credentialData" : "{\"hashIterations\":27500,\"algorithm\":\"pbkdf2-sha256\",\"additionalParameters\":{}}" + } ], + "disableableCredentialTypes" : [ ], + "requiredActions" : [ ], + "realmRoles" : [ "rabbitmq.tag:management", "rabbitmq.tag:administrator", "default-roles-test" ], + "notBefore" : 0, + "groups" : [ ] + }, { "id" : "4cf4d6b5-09e5-453f-bf22-c8efdc2dd1dc", "createdTimestamp" : 1651841525973, "username" : "rabbit_admin", @@ -454,6 +546,57 @@ "realmRoles" : [ "rabbitmq.tag:administrator", "rabbitmq.configure:*/*", "rabbitmq", "rabbitmq.write:*/*", "rabbitmq.read:*/*" ], "notBefore" : 0, "groups" : [ ] + }, { + "id" : "181d7149-077b-4c2d-a0df-0425536ae9e4", + "createdTimestamp" : 1691407834581, + "username" : "rabbit_no_management", + "enabled" : true, + "totp" : false, + "emailVerified" : true, + "firstName" : "", + "lastName" : "", + "credentials" : [ { + "id" : "198b49de-e406-436b-8bd3-bba1401b9367", + "type" : "password", + "userLabel" : "My password", + "createdDate" : 1691407843410, + "secretData" : "{\"value\":\"3VzxuftR6h5KV1AlO+1YKHZMS4c3w3pCarGr7WVdNk3ey6d1nMqrqv/H1PowW8+olBdq6lf8y4KamshgRTuUTw==\",\"salt\":\"MJMytWRnSnE36G51QVbM4g==\",\"additionalParameters\":{}}", + "credentialData" : "{\"hashIterations\":27500,\"algorithm\":\"pbkdf2-sha256\",\"additionalParameters\":{}}" + } ], + "disableableCredentialTypes" : [ ], + "requiredActions" : [ ], + "realmRoles" : [ "default-roles-test" ], + "notBefore" : 0, + "groups" : [ ] + }, + { + "id" : "c15b9b9a-1e20-45b1-8d0c-15d1e805615b", + "createdTimestamp" : 1690973977084, + "username" : "service-account-dev_producer", + "enabled" : true, + "totp" : false, + "emailVerified" : false, + "serviceAccountClientId" : "dev_producer", + "credentials" : [ ], + "disableableCredentialTypes" : [ ], + "requiredActions" : [ ], + "realmRoles" : [ "default-roles-test", "producer" ], + "notBefore" : 0, + "groups" : [ ] + }, { + "id" : "dd187d2d-698f-4107-b0b8-d6cd21fa2f9c", + "createdTimestamp" : 1690971595361, + "username" : "service-account-keycloak-producer", + "enabled" : true, + "totp" : false, + "emailVerified" : false, + "serviceAccountClientId" : "keycloak-producer", + "credentials" : [ ], + "disableableCredentialTypes" : [ ], + "requiredActions" : [ ], + "realmRoles" : [ "default-roles-test" ], + "notBefore" : 0, + "groups" : [ ] }, { "id" : "15f03347-e2fc-4f8c-9743-f4dfd59f67fe", "createdTimestamp" : 1652084304711, @@ -468,6 +611,20 @@ "realmRoles" : [ "default-roles-test", "rabbitmq-management" ], "notBefore" : 0, "groups" : [ ] + }, { + "id" : "826065e7-bb58-4b65-bbf7-8982d6cca6c8", + "createdTimestamp" : 1690973663764, + "username" : "service-account-prod_producer", + "enabled" : true, + "totp" : false, + "emailVerified" : false, + "serviceAccountClientId" : "prod_producer", + "credentials" : [ ], + "disableableCredentialTypes" : [ ], + "requiredActions" : [ ], + "realmRoles" : [ "default-roles-test", "producer" ], + "notBefore" : 0, + "groups" : [ ] }, { "id" : "63ec2047-6689-45c0-981d-f9b127a6bb7f", "createdTimestamp" : 1652084012762, @@ -511,7 +668,7 @@ "roles" : [ "producer" ] }, { "clientScope" : "rabbitmq.tag:management", - "roles" : [ "rabbitmq-management" ] + "roles" : [ "rabbitmq.tag:management" ] }, { "clientScope" : "rabbitmq.write:*/*", "roles" : [ "producer" ] @@ -650,6 +807,186 @@ "nodeReRegistrationTimeout" : 0, "defaultClientScopes" : [ "web-origins", "acr", "profile", "roles", "email" ], "optionalClientScopes" : [ "address", "phone", "offline_access", "microprofile-jwt" ] + }, { + "id" : "d8dddaf0-31a6-4b0c-a6e1-d28cd2eb6256", + "clientId" : "dev_producer", + "name" : "", + "description" : "", + "surrogateAuthRequired" : false, + "enabled" : true, + "alwaysDisplayInConsole" : false, + "clientAuthenticatorType" : "client-secret", + "secret" : "z1PNm47wfWyulTnAaDOf1AggTy3MxX2H", + "redirectUris" : [ ], + "webOrigins" : [ ], + "notBefore" : 0, + "bearerOnly" : false, + "consentRequired" : false, + "standardFlowEnabled" : false, + "implicitFlowEnabled" : false, + "directAccessGrantsEnabled" : false, + "serviceAccountsEnabled" : true, + "publicClient" : false, + "frontchannelLogout" : true, + "protocol" : "openid-connect", + "attributes" : { + "oidc.ciba.grant.enabled" : "false", + "client.secret.creation.time" : "1690973977", + "backchannel.logout.session.required" : "true", + "post.logout.redirect.uris" : "+", + "oauth2.device.authorization.grant.enabled" : "false", + "backchannel.logout.revoke.offline.tokens" : "false" + }, + "authenticationFlowBindingOverrides" : { }, + "fullScopeAllowed" : true, + "nodeReRegistrationTimeout" : -1, + "protocolMappers" : [ { + "id" : "8ce01162-04dc-4e31-9103-5fa7d1be2fb2", + "name" : "Client ID", + "protocol" : "openid-connect", + "protocolMapper" : "oidc-usersessionmodel-note-mapper", + "consentRequired" : false, + "config" : { + "user.session.note" : "clientId", + "userinfo.token.claim" : "true", + "id.token.claim" : "true", + "access.token.claim" : "true", + "claim.name" : "clientId", + "jsonType.label" : "String" + } + }, { + "id" : "693021a3-6a1a-434b-8e7c-9358dfbfad61", + "name" : "aud", + "protocol" : "openid-connect", + "protocolMapper" : "oidc-audience-mapper", + "consentRequired" : false, + "config" : { + "id.token.claim" : "false", + "access.token.claim" : "true", + "included.custom.audience" : "rabbit_dev", + "userinfo.token.claim" : "false" + } + }, { + "id" : "a2c871ac-e9fe-4082-99bf-78ddcf118661", + "name" : "Client Host", + "protocol" : "openid-connect", + "protocolMapper" : "oidc-usersessionmodel-note-mapper", + "consentRequired" : false, + "config" : { + "user.session.note" : "clientHost", + "userinfo.token.claim" : "true", + "id.token.claim" : "true", + "access.token.claim" : "true", + "claim.name" : "clientHost", + "jsonType.label" : "String" + } + }, { + "id" : "414f346b-4c6f-4e41-a810-827f60470ba4", + "name" : "Client IP Address", + "protocol" : "openid-connect", + "protocolMapper" : "oidc-usersessionmodel-note-mapper", + "consentRequired" : false, + "config" : { + "user.session.note" : "clientAddress", + "userinfo.token.claim" : "true", + "id.token.claim" : "true", + "access.token.claim" : "true", + "claim.name" : "clientAddress", + "jsonType.label" : "String" + } + } ], + "defaultClientScopes" : [ "rabbitmq.read:*/*", "web-origins", "acr", "rabbitmq.write:*/*", "profile", "roles", "email", "rabbitmq.configure:*/*" ], + "optionalClientScopes" : [ "address", "phone", "offline_access", "microprofile-jwt" ] + }, { + "id" : "168cf7cf-91dd-466f-bd6a-247a500c13a3", + "clientId" : "keycloak-producer", + "name" : "", + "description" : "", + "surrogateAuthRequired" : false, + "enabled" : true, + "alwaysDisplayInConsole" : false, + "clientAuthenticatorType" : "client-secret", + "secret" : "iWr83ogRaT5CdplCeQzkDDFcMqXSJjHH", + "redirectUris" : [ ], + "webOrigins" : [ ], + "notBefore" : 0, + "bearerOnly" : false, + "consentRequired" : false, + "standardFlowEnabled" : false, + "implicitFlowEnabled" : false, + "directAccessGrantsEnabled" : false, + "serviceAccountsEnabled" : true, + "publicClient" : false, + "frontchannelLogout" : true, + "protocol" : "openid-connect", + "attributes" : { + "oidc.ciba.grant.enabled" : "false", + "client.secret.creation.time" : "1689856122", + "backchannel.logout.session.required" : "true", + "post.logout.redirect.uris" : "+", + "oauth2.device.authorization.grant.enabled" : "false", + "backchannel.logout.revoke.offline.tokens" : "false" + }, + "authenticationFlowBindingOverrides" : { }, + "fullScopeAllowed" : true, + "nodeReRegistrationTimeout" : -1, + "protocolMappers" : [ { + "id" : "7f86920f-9e63-4d82-b790-b43d28097755", + "name" : "Client Host", + "protocol" : "openid-connect", + "protocolMapper" : "oidc-usersessionmodel-note-mapper", + "consentRequired" : false, + "config" : { + "user.session.note" : "clientHost", + "userinfo.token.claim" : "true", + "id.token.claim" : "true", + "access.token.claim" : "true", + "claim.name" : "clientHost", + "jsonType.label" : "String" + } + }, { + "id" : "cc7540fc-ffe4-4f93-94bf-1ae018915069", + "name" : "keycloak-producer", + "protocol" : "openid-connect", + "protocolMapper" : "oidc-audience-mapper", + "consentRequired" : false, + "config" : { + "included.client.audience" : "keycloak-producer", + "id.token.claim" : "false", + "access.token.claim" : "true", + "userinfo.token.claim" : "false" + } + }, { + "id" : "40486e5a-c741-4017-8748-057d496966e6", + "name" : "Client IP Address", + "protocol" : "openid-connect", + "protocolMapper" : "oidc-usersessionmodel-note-mapper", + "consentRequired" : false, + "config" : { + "user.session.note" : "clientAddress", + "userinfo.token.claim" : "true", + "id.token.claim" : "true", + "access.token.claim" : "true", + "claim.name" : "clientAddress", + "jsonType.label" : "String" + } + }, { + "id" : "0fd38337-8809-422a-9223-4c61eaf76bf1", + "name" : "Client ID", + "protocol" : "openid-connect", + "protocolMapper" : "oidc-usersessionmodel-note-mapper", + "consentRequired" : false, + "config" : { + "user.session.note" : "clientId", + "userinfo.token.claim" : "true", + "id.token.claim" : "true", + "access.token.claim" : "true", + "claim.name" : "clientId", + "jsonType.label" : "String" + } + } ], + "defaultClientScopes" : [ "web-origins", "acr", "profile", "roles", "email" ], + "optionalClientScopes" : [ "address", "phone", "offline_access", "microprofile-jwt" ] }, { "id" : "c5be3c24-0c88-4672-a77a-79002fcc9a9d", "clientId" : "mgt_api_client", @@ -760,6 +1097,96 @@ } ], "defaultClientScopes" : [ "rabbitmq.tag:administrator", "rabbitmq.tag:management", "email" ], "optionalClientScopes" : [ "address", "phone", "offline_access", "microprofile-jwt" ] + }, { + "id" : "f1c75ad4-2182-4e67-b2a4-5cac93ad7939", + "clientId" : "prod_producer", + "name" : "", + "description" : "", + "surrogateAuthRequired" : false, + "enabled" : true, + "alwaysDisplayInConsole" : false, + "clientAuthenticatorType" : "client-secret", + "secret" : "PdLHb1w8RH1oD5bpppgy8OF9G6QeRpL9", + "redirectUris" : [ ], + "webOrigins" : [ ], + "notBefore" : 0, + "bearerOnly" : false, + "consentRequired" : false, + "standardFlowEnabled" : false, + "implicitFlowEnabled" : false, + "directAccessGrantsEnabled" : false, + "serviceAccountsEnabled" : true, + "publicClient" : false, + "frontchannelLogout" : true, + "protocol" : "openid-connect", + "attributes" : { + "oidc.ciba.grant.enabled" : "false", + "client.secret.creation.time" : "1690973663", + "backchannel.logout.session.required" : "true", + "post.logout.redirect.uris" : "+", + "oauth2.device.authorization.grant.enabled" : "false", + "backchannel.logout.revoke.offline.tokens" : "false" + }, + "authenticationFlowBindingOverrides" : { }, + "fullScopeAllowed" : true, + "nodeReRegistrationTimeout" : -1, + "protocolMappers" : [ { + "id" : "d25e25ae-5653-4806-a9c3-4f95ab17ca84", + "name" : "Client IP Address", + "protocol" : "openid-connect", + "protocolMapper" : "oidc-usersessionmodel-note-mapper", + "consentRequired" : false, + "config" : { + "user.session.note" : "clientAddress", + "userinfo.token.claim" : "true", + "id.token.claim" : "true", + "access.token.claim" : "true", + "claim.name" : "clientAddress", + "jsonType.label" : "String" + } + }, { + "id" : "6195b57b-755c-492b-8dda-bb2c5e4418c4", + "name" : "aud", + "protocol" : "openid-connect", + "protocolMapper" : "oidc-audience-mapper", + "consentRequired" : false, + "config" : { + "id.token.claim" : "false", + "access.token.claim" : "true", + "included.custom.audience" : "rabbit_prod", + "userinfo.token.claim" : "false" + } + }, { + "id" : "c337d632-52cc-4c46-87e9-5f541f98b2af", + "name" : "Client ID", + "protocol" : "openid-connect", + "protocolMapper" : "oidc-usersessionmodel-note-mapper", + "consentRequired" : false, + "config" : { + "user.session.note" : "clientId", + "userinfo.token.claim" : "true", + "id.token.claim" : "true", + "access.token.claim" : "true", + "claim.name" : "clientId", + "jsonType.label" : "String" + } + }, { + "id" : "978d0198-3d5c-4fe7-b222-1da9ccdf6153", + "name" : "Client Host", + "protocol" : "openid-connect", + "protocolMapper" : "oidc-usersessionmodel-note-mapper", + "consentRequired" : false, + "config" : { + "user.session.note" : "clientHost", + "userinfo.token.claim" : "true", + "id.token.claim" : "true", + "access.token.claim" : "true", + "claim.name" : "clientHost", + "jsonType.label" : "String" + } + } ], + "defaultClientScopes" : [ "rabbitmq.read:*/*", "web-origins", "acr", "rabbitmq.write:*/*", "profile", "roles", "email", "rabbitmq.configure:*/*" ], + "optionalClientScopes" : [ "address", "phone", "offline_access", "microprofile-jwt" ] }, { "id" : "3e96bddd-95f9-4277-b3ad-f8f6f5d5bb59", "clientId" : "producer", @@ -896,9 +1323,142 @@ } ], "defaultClientScopes" : [ "rabbitmq.read:*/*", "rabbitmq.write:*/*", "roles", "rabbitmq.configure:*/*" ], "optionalClientScopes" : [ "address", "phone", "offline_access", "microprofile-jwt" ] + }, { + "id" : "9e59fee6-c772-4244-a807-58d157cde3ea", + "clientId" : "rabbit_dev_mgt_ui", + "name" : "", + "description" : "", + "rootUrl" : "${RABBITMQ_SCHEME}://${RABBITMQ_HOST}${RABBITMQ_PATH}", + "adminUrl" : "", + "baseUrl" : "", + "surrogateAuthRequired" : false, + "enabled" : true, + "alwaysDisplayInConsole" : false, + "clientAuthenticatorType" : "client-secret", + "redirectUris" : [ "${RABBITMQ_SCHEME}://${RABBITMQ_HOST}${RABBITMQ_PATH}/*" ], + "webOrigins" : [ "+" ], + "notBefore" : 0, + "bearerOnly" : false, + "consentRequired" : false, + "standardFlowEnabled" : true, + "implicitFlowEnabled" : false, + "directAccessGrantsEnabled" : false, + "serviceAccountsEnabled" : false, + "publicClient" : true, + "frontchannelLogout" : true, + "protocol" : "openid-connect", + "attributes" : { + "oidc.ciba.grant.enabled" : "false", + "backchannel.logout.session.required" : "true", + "post.logout.redirect.uris" : "+", + "oauth2.device.authorization.grant.enabled" : "false", + "display.on.consent.screen" : "false", + "backchannel.logout.revoke.offline.tokens" : "false" + }, + "authenticationFlowBindingOverrides" : { }, + "fullScopeAllowed" : true, + "nodeReRegistrationTimeout" : -1, + "protocolMappers" : [ { + "id" : "65461f51-e45e-4de0-9981-974402d599e6", + "name" : "aud", + "protocol" : "openid-connect", + "protocolMapper" : "oidc-audience-mapper", + "consentRequired" : false, + "config" : { + "id.token.claim" : "false", + "access.token.claim" : "true", + "included.custom.audience" : "rabbit_dev", + "userinfo.token.claim" : "false" + } + } ], + "defaultClientScopes" : [ "web-origins", "acr", "profile", "roles", "rabbitmq.tag:management", "email" ], + "optionalClientScopes" : [ "address", "phone", "offline_access", "microprofile-jwt" ] + }, { + "id" : "6f65dc7b-5dc8-4b37-ba05-29d924d1edff", + "clientId" : "rabbit_prod_mgt_ui", + "name" : "", + "description" : "", + "rootUrl" : "${RABBITMQ_SCHEME}://${RABBITMQ_HOST}${RABBITMQ_PATH}", + "adminUrl" : "", + "baseUrl" : "", + "surrogateAuthRequired" : false, + "enabled" : true, + "alwaysDisplayInConsole" : false, + "clientAuthenticatorType" : "client-secret", + "redirectUris" : [ "${RABBITMQ_SCHEME}://${RABBITMQ_HOST}${RABBITMQ_PATH}/*" ], + "webOrigins" : [ "+" ], + "notBefore" : 0, + "bearerOnly" : false, + "consentRequired" : false, + "standardFlowEnabled" : true, + "implicitFlowEnabled" : false, + "directAccessGrantsEnabled" : false, + "serviceAccountsEnabled" : false, + "publicClient" : true, + "frontchannelLogout" : true, + "protocol" : "openid-connect", + "attributes" : { + "oidc.ciba.grant.enabled" : "false", + "backchannel.logout.session.required" : "true", + "post.logout.redirect.uris" : "+", + "oauth2.device.authorization.grant.enabled" : "false", + "display.on.consent.screen" : "false", + "backchannel.logout.revoke.offline.tokens" : "false" + }, + "authenticationFlowBindingOverrides" : { }, + "fullScopeAllowed" : true, + "nodeReRegistrationTimeout" : -1, + "protocolMappers" : [ { + "id" : "949fa590-6bcf-4a58-af2b-2ea598cbc0fd", + "name" : "aud", + "protocol" : "openid-connect", + "protocolMapper" : "oidc-audience-mapper", + "consentRequired" : false, + "config" : { + "id.token.claim" : "false", + "access.token.claim" : "true", + "included.custom.audience" : "rabbit_prod", + "userinfo.token.claim" : "false" + } + } ], + "defaultClientScopes" : [ "web-origins", "acr", "rabbitmq.tag:administrator", "profile", "roles", "email" ], + "optionalClientScopes" : [ "address", "phone", "offline_access", "microprofile-jwt" ] + }, { + "id" : "a57c9f6a-8b64-47dc-af53-d6ccc2d4aa60", + "clientId" : "rabbitmq", + "name" : "", + "description" : "", + "surrogateAuthRequired" : false, + "enabled" : true, + "alwaysDisplayInConsole" : false, + "clientAuthenticatorType" : "client-secret", + "redirectUris" : [ ], + "webOrigins" : [ ], + "notBefore" : 0, + "bearerOnly" : false, + "consentRequired" : false, + "standardFlowEnabled" : true, + "implicitFlowEnabled" : false, + "directAccessGrantsEnabled" : true, + "serviceAccountsEnabled" : false, + "publicClient" : true, + "frontchannelLogout" : true, + "protocol" : "openid-connect", + "attributes" : { + "oidc.ciba.grant.enabled" : "false", + "post.logout.redirect.uris" : "+", + "oauth2.device.authorization.grant.enabled" : "false", + "backchannel.logout.session.required" : "true", + "backchannel.logout.revoke.offline.tokens" : "false" + }, + "authenticationFlowBindingOverrides" : { }, + "fullScopeAllowed" : true, + "nodeReRegistrationTimeout" : -1, + "defaultClientScopes" : [ "web-origins", "acr", "profile", "roles", "email" ], + "optionalClientScopes" : [ "address", "phone", "offline_access", "microprofile-jwt" ] }, { "id" : "e64b05d1-0d1c-4294-85f9-52ae098ecf1f", - "clientId" : "rabbit_client_code", + "clientId" : "rabbitmq_client_code", "name" : "", "description" : "", "rootUrl" : "${RABBITMQ_SCHEME}://${RABBITMQ_HOST}${RABBITMQ_PATH}", @@ -909,7 +1469,7 @@ "alwaysDisplayInConsole" : false, "clientAuthenticatorType" : "client-secret", "redirectUris" : [ "${RABBITMQ_SCHEME}://${RABBITMQ_HOST}${RABBITMQ_PATH}/*" ], - "webOrigins" : [ "*" ], + "webOrigins" : [ "+" ], "notBefore" : 0, "bearerOnly" : false, "consentRequired" : false, @@ -1023,6 +1583,7 @@ "protocol" : "openid-connect", "attributes" : { "client.secret.creation.time" : "1677053168", + "post.logout.redirect.uris" : "+", "oauth2.device.authorization.grant.enabled" : "false", "backchannel.logout.revoke.offline.tokens" : "false", "use.refresh.tokens" : "true", @@ -1046,6 +1607,7 @@ "consentRequired" : false, "config" : { "user.session.note" : "clientId", + "userinfo.token.claim" : "true", "id.token.claim" : "true", "access.token.claim" : "true", "claim.name" : "clientId", @@ -1059,6 +1621,7 @@ "consentRequired" : false, "config" : { "user.session.note" : "clientAddress", + "userinfo.token.claim" : "true", "id.token.claim" : "true", "access.token.claim" : "true", "claim.name" : "clientAddress", @@ -1072,13 +1635,27 @@ "consentRequired" : false, "config" : { "user.session.note" : "clientHost", + "userinfo.token.claim" : "true", "id.token.claim" : "true", "access.token.claim" : "true", "claim.name" : "clientHost", "jsonType.label" : "String" } + }, { + "id" : "958d4a83-d5b3-4cca-af3e-fde9f9328eec", + "name" : "aud", + "protocol" : "openid-connect", + "protocolMapper" : "oidc-audience-mapper", + "consentRequired" : false, + "config" : { + "included.client.audience" : "rabbitmq-proxy-client", + "id.token.claim" : "true", + "access.token.claim" : "true", + "included.custom.audience" : "rabbitmq", + "userinfo.token.claim" : "true" + } } ], - "defaultClientScopes" : [ "web-origins", "acr", "roles", "rabbitmq", "email" ], + "defaultClientScopes" : [ "web-origins", "acr", "roles", "email" ], "optionalClientScopes" : [ "address", "phone", "rabbitmq.tag:administrator", "profile", "offline_access", "microprofile-jwt" ] }, { "id" : "09de26c0-0a1b-4e5f-8724-23af07a0e54a", @@ -1579,13 +2156,6 @@ "jsonType.label" : "String", "multivalued" : "true" } - }, { - "id" : "43145559-8ab9-4630-a1af-794f5a39b865", - "name" : "audience resolve", - "protocol" : "openid-connect", - "protocolMapper" : "oidc-audience-resolve-mapper", - "consentRequired" : false, - "config" : { } }, { "id" : "91c4a9bd-a9b9-402b-9eb6-762362d18c6b", "name" : "client roles", @@ -1642,30 +2212,6 @@ "attribute.name" : "Role" } } ] - }, { - "id" : "b6b7be88-3c0f-4cda-b122-0c14960f5a6d", - "name" : "rabbitmq", - "description" : "", - "protocol" : "openid-connect", - "attributes" : { - "include.in.token.scope" : "true", - "display.on.consent.screen" : "true", - "gui.order" : "", - "consent.screen.text" : "" - }, - "protocolMappers" : [ { - "id" : "acbeeaa3-34b1-45e5-9f07-14ea595ca9cb", - "name" : "rabbitmq-audience", - "protocol" : "openid-connect", - "protocolMapper" : "oidc-audience-mapper", - "consentRequired" : false, - "config" : { - "included.client.audience" : "rabbitmq-proxy-client", - "id.token.claim" : "false", - "access.token.claim" : "true", - "included.custom.audience" : "rabbitmq" - } - } ] }, { "id" : "2010b133-4bfe-4f5f-8d1a-33b2a7ad2e60", "name" : "rabbitmq.write:*/*", @@ -1683,7 +2229,7 @@ "display.on.consent.screen" : "true" } } ], - "defaultDefaultClientScopes" : [ "role_list", "profile", "email", "roles", "web-origins", "acr", "rabbitmq" ], + "defaultDefaultClientScopes" : [ "role_list", "profile", "email", "roles", "web-origins", "acr" ], "defaultOptionalClientScopes" : [ "offline_access", "address", "phone", "microprofile-jwt" ], "browserSecurityHeaders" : { "contentSecurityPolicyReportOnly" : "", @@ -1735,7 +2281,7 @@ "subType" : "authenticated", "subComponents" : { }, "config" : { - "allowed-protocol-mapper-types" : [ "oidc-full-name-mapper", "saml-role-list-mapper", "oidc-sha256-pairwise-sub-mapper", "saml-user-attribute-mapper", "oidc-usermodel-property-mapper", "saml-user-property-mapper", "oidc-address-mapper", "oidc-usermodel-attribute-mapper" ] + "allowed-protocol-mapper-types" : [ "saml-role-list-mapper", "saml-user-attribute-mapper", "oidc-usermodel-attribute-mapper", "oidc-address-mapper", "oidc-full-name-mapper", "oidc-sha256-pairwise-sub-mapper", "oidc-usermodel-property-mapper", "saml-user-property-mapper" ] } }, { "id" : "693f0625-c453-40c0-b38e-80b7b7deaefa", @@ -1760,7 +2306,7 @@ "subType" : "anonymous", "subComponents" : { }, "config" : { - "allowed-protocol-mapper-types" : [ "oidc-sha256-pairwise-sub-mapper", "saml-role-list-mapper", "saml-user-attribute-mapper", "oidc-address-mapper", "oidc-usermodel-property-mapper", "oidc-full-name-mapper", "saml-user-property-mapper", "oidc-usermodel-attribute-mapper" ] + "allowed-protocol-mapper-types" : [ "oidc-full-name-mapper", "oidc-usermodel-property-mapper", "saml-user-attribute-mapper", "saml-role-list-mapper", "oidc-sha256-pairwise-sub-mapper", "saml-user-property-mapper", "oidc-usermodel-attribute-mapper", "oidc-address-mapper" ] } }, { "id" : "bbadf932-a286-4841-be1b-ed845e2131cb", @@ -1835,7 +2381,7 @@ "internationalizationEnabled" : false, "supportedLocales" : [ ], "authenticationFlows" : [ { - "id" : "f51ca13f-1a57-4cfb-b7c0-62ecd60555cc", + "id" : "bdbdb959-482d-4473-ba85-ddd0d1eb1f45", "alias" : "Account verification options", "description" : "Method with which to verity the existing account", "providerId" : "basic-flow", @@ -1857,7 +2403,7 @@ "userSetupAllowed" : false } ] }, { - "id" : "be1cda35-73c3-49d9-9898-0d7aaf738550", + "id" : "fe87c03d-2f20-46f8-a4e6-2f1546ce6130", "alias" : "Authentication Options", "description" : "Authentication options.", "providerId" : "basic-flow", @@ -1886,7 +2432,7 @@ "userSetupAllowed" : false } ] }, { - "id" : "0692db81-4d34-4ed5-8607-193166c92f81", + "id" : "e07614b8-bde3-4be9-a27a-7ca27b7e3320", "alias" : "Browser - Conditional OTP", "description" : "Flow to determine if the OTP is required for the authentication", "providerId" : "basic-flow", @@ -1908,7 +2454,7 @@ "userSetupAllowed" : false } ] }, { - "id" : "9058f38b-ded0-4805-8cd9-b92f9d638a85", + "id" : "5e16c2c2-39e8-4c11-bbb0-c9fce0554a1d", "alias" : "Direct Grant - Conditional OTP", "description" : "Flow to determine if the OTP is required for the authentication", "providerId" : "basic-flow", @@ -1930,7 +2476,7 @@ "userSetupAllowed" : false } ] }, { - "id" : "d88c67ef-a320-42d3-9ea7-c4a0f1e91e71", + "id" : "325b06ff-9e11-42c3-9737-876f1ac59ffb", "alias" : "First broker login - Conditional OTP", "description" : "Flow to determine if the OTP is required for the authentication", "providerId" : "basic-flow", @@ -1952,7 +2498,7 @@ "userSetupAllowed" : false } ] }, { - "id" : "d6b14498-7293-49c3-a114-9d54543eaa69", + "id" : "5ee22c9c-1a64-4716-82c5-4bc35a45c43c", "alias" : "Handle Existing Account", "description" : "Handle what to do if there is existing account with same email/username like authenticated identity provider", "providerId" : "basic-flow", @@ -1974,7 +2520,7 @@ "userSetupAllowed" : false } ] }, { - "id" : "06f73dfd-78b2-464a-be5a-9b79edd65c25", + "id" : "6dc7651e-80db-431e-918d-6e81b39967c8", "alias" : "Reset - Conditional OTP", "description" : "Flow to determine if the OTP should be reset or not. Set to REQUIRED to force.", "providerId" : "basic-flow", @@ -1996,7 +2542,7 @@ "userSetupAllowed" : false } ] }, { - "id" : "a39403d3-1547-42da-aabd-2a04afec3c35", + "id" : "0ec67320-988c-429a-8873-88db8fcdf719", "alias" : "User creation or linking", "description" : "Flow for the existing/non-existing user alternatives", "providerId" : "basic-flow", @@ -2019,7 +2565,7 @@ "userSetupAllowed" : false } ] }, { - "id" : "75dbe9e4-84ee-4c31-b4e0-9be4d381b4a1", + "id" : "bb56bf6a-96e7-4276-9cf6-efeeb36618e2", "alias" : "Verify Existing Account by Re-authentication", "description" : "Reauthentication of existing account", "providerId" : "basic-flow", @@ -2041,7 +2587,7 @@ "userSetupAllowed" : false } ] }, { - "id" : "ca32d1e5-8c8d-4b30-9b4c-21785023d788", + "id" : "1cea1cd0-eca9-4618-8ad2-cb104d9994a4", "alias" : "browser", "description" : "browser based authentication", "providerId" : "basic-flow", @@ -2077,7 +2623,7 @@ "userSetupAllowed" : false } ] }, { - "id" : "7f6f9377-1881-42ae-b8e9-2383dec03058", + "id" : "f12ba0ee-e280-452a-8e51-c58126d9daef", "alias" : "clients", "description" : "Base authentication for clients", "providerId" : "client-flow", @@ -2113,7 +2659,7 @@ "userSetupAllowed" : false } ] }, { - "id" : "f25f0cb8-ebb5-4ead-8a0e-00efa42af1ee", + "id" : "a1ccb467-8d27-4974-b7dc-1cc036fcc2be", "alias" : "direct grant", "description" : "OpenID Connect Resource Owner Grant", "providerId" : "basic-flow", @@ -2142,7 +2688,7 @@ "userSetupAllowed" : false } ] }, { - "id" : "8be2646f-aacb-4ccb-ab84-4fb2c221d965", + "id" : "86b95fb2-55ee-4e72-a7ad-96ff462719f2", "alias" : "docker auth", "description" : "Used by Docker clients to authenticate against the IDP", "providerId" : "basic-flow", @@ -2157,7 +2703,7 @@ "userSetupAllowed" : false } ] }, { - "id" : "f033f929-0126-4a9e-872b-420464c86fc8", + "id" : "0a55f090-cd93-4d56-8cb2-d1f5eb5ea230", "alias" : "first broker login", "description" : "Actions taken after first broker login with identity provider account, which is not yet linked to any Keycloak account", "providerId" : "basic-flow", @@ -2180,7 +2726,7 @@ "userSetupAllowed" : false } ] }, { - "id" : "622a4d14-0f15-405c-b6ff-3cc4a47a2ac4", + "id" : "3f22c16a-9901-4ef5-b324-b880237275d5", "alias" : "forms", "description" : "Username, password, otp and other auth forms.", "providerId" : "basic-flow", @@ -2202,7 +2748,7 @@ "userSetupAllowed" : false } ] }, { - "id" : "ffc5b288-f220-42c5-b484-4ef38acfa613", + "id" : "55bff30b-4577-4a04-a716-7916dc242978", "alias" : "http challenge", "description" : "An authentication flow based on challenge-response HTTP Authentication Schemes", "providerId" : "basic-flow", @@ -2224,7 +2770,7 @@ "userSetupAllowed" : false } ] }, { - "id" : "2fe5d3e1-9fb8-4310-a425-ebec27791ed3", + "id" : "9437c219-d080-43c0-bc32-cf99ae19cbca", "alias" : "registration", "description" : "registration flow", "providerId" : "basic-flow", @@ -2240,7 +2786,7 @@ "userSetupAllowed" : false } ] }, { - "id" : "50e1a75f-d18b-4f06-99c1-a2bee1ea8f13", + "id" : "4f7768d7-3b66-4398-a6c0-03f308cc17ee", "alias" : "registration form", "description" : "registration form", "providerId" : "form-flow", @@ -2276,7 +2822,7 @@ "userSetupAllowed" : false } ] }, { - "id" : "2ead0f34-79b1-4500-a00f-147a2d3a80c8", + "id" : "45c71d83-12a5-4e9b-b4c2-827cc691b31d", "alias" : "reset credentials", "description" : "Reset credentials for a user if they forgot their password or something", "providerId" : "basic-flow", @@ -2312,7 +2858,7 @@ "userSetupAllowed" : false } ] }, { - "id" : "5584a878-8c18-4a66-b834-4151bc2c3fef", + "id" : "610dcc49-ff34-4e9d-97dc-28d3d3b558f0", "alias" : "saml ecp", "description" : "SAML ECP Profile Authentication Flow", "providerId" : "basic-flow", @@ -2328,13 +2874,13 @@ } ] } ], "authenticatorConfig" : [ { - "id" : "4772be2b-26eb-4126-8fb4-8cd58f136a74", + "id" : "3d3eff66-1e1e-4b2e-8401-612fe7439dd9", "alias" : "create unique user config", "config" : { "require.password.update.after.registration" : "false" } }, { - "id" : "370961ed-7742-4751-93e2-f3a9ad585f9e", + "id" : "4ea7cfe8-d04b-445e-9d1b-0caf71e21f10", "alias" : "review profile config", "config" : { "update.profile.on.first.login" : "missing" diff --git a/deps/rabbitmq_management/selenium/test/oauth/rabbitmq.conf b/deps/rabbitmq_management/selenium/test/oauth/rabbitmq.conf index fbd4b72296d6..d8534a9a1fe0 100644 --- a/deps/rabbitmq_management/selenium/test/oauth/rabbitmq.conf +++ b/deps/rabbitmq_management/selenium/test/oauth/rabbitmq.conf @@ -2,10 +2,11 @@ auth_backends.1 = rabbit_auth_backend_oauth2 management.login_session_timeout = 1 management.oauth_enabled = true -management.oauth_client_id = rabbit_client_code +management.oauth_client_id = rabbitmq_client_code management.oauth_scopes = ${OAUTH_SCOPES} -management.oauth_provider_url = ${OAUTH_PROVIDER_URL} management.cors.allow_origins.1 = * auth_oauth2.resource_server_id = rabbitmq auth_oauth2.preferred_username_claims.1 = user_name +auth_oauth2.preferred_username_claims.2 = preferred_username +auth_oauth2.preferred_username_claims.3 = email diff --git a/deps/rabbitmq_management/selenium/test/oauth/rabbitmq.enable-basic-auth.conf b/deps/rabbitmq_management/selenium/test/oauth/rabbitmq.enable-basic-auth.conf new file mode 100644 index 000000000000..4f7e04ab7973 --- /dev/null +++ b/deps/rabbitmq_management/selenium/test/oauth/rabbitmq.enable-basic-auth.conf @@ -0,0 +1,4 @@ + +auth_backends.2 = rabbit_auth_backend_internal + +management.oauth_disable_basic_auth = false diff --git a/deps/rabbitmq_management/selenium/test/oauth/rabbitmq.fakeportal-mgt-oauth-provider.conf b/deps/rabbitmq_management/selenium/test/oauth/rabbitmq.fakeportal-mgt-oauth-provider.conf new file mode 100644 index 000000000000..a28dc253ab86 --- /dev/null +++ b/deps/rabbitmq_management/selenium/test/oauth/rabbitmq.fakeportal-mgt-oauth-provider.conf @@ -0,0 +1 @@ +management.oauth_provider_url = ${FAKEPORTAL_URL} diff --git a/deps/rabbitmq_management/selenium/test/oauth/rabbitmq.jwks.conf b/deps/rabbitmq_management/selenium/test/oauth/rabbitmq.jwks.conf deleted file mode 100644 index 6d528e339f54..000000000000 --- a/deps/rabbitmq_management/selenium/test/oauth/rabbitmq.jwks.conf +++ /dev/null @@ -1 +0,0 @@ -auth_oauth2.jwks_url = ${OAUTH_JKWS_URL} diff --git a/deps/rabbitmq_management/selenium/test/oauth/rabbitmq.keycloak-mgt-oauth-provider.conf b/deps/rabbitmq_management/selenium/test/oauth/rabbitmq.keycloak-mgt-oauth-provider.conf new file mode 100644 index 000000000000..9e6e55f94073 --- /dev/null +++ b/deps/rabbitmq_management/selenium/test/oauth/rabbitmq.keycloak-mgt-oauth-provider.conf @@ -0,0 +1,2 @@ +# uaa requires a secret in order to renew tokens +management.oauth_provider_url = ${KEYCLOAK_URL} diff --git a/deps/rabbitmq_management/selenium/test/oauth/rabbitmq.keycloak-oauth-provider.conf b/deps/rabbitmq_management/selenium/test/oauth/rabbitmq.keycloak-oauth-provider.conf new file mode 100644 index 000000000000..69adfc409a1f --- /dev/null +++ b/deps/rabbitmq_management/selenium/test/oauth/rabbitmq.keycloak-oauth-provider.conf @@ -0,0 +1,2 @@ +auth_oauth2.issuer = ${OAUTH_PROVIDER_URL} +auth_oauth2.https.cacertfile = ${OAUTH_PROVIDER_CA_CERT} diff --git a/deps/rabbitmq_management/selenium/test/oauth/rabbitmq.keycloak-verify-none-oauth-provider.conf b/deps/rabbitmq_management/selenium/test/oauth/rabbitmq.keycloak-verify-none-oauth-provider.conf new file mode 100644 index 000000000000..601720623775 --- /dev/null +++ b/deps/rabbitmq_management/selenium/test/oauth/rabbitmq.keycloak-verify-none-oauth-provider.conf @@ -0,0 +1,2 @@ +auth_oauth2.issuer = ${OAUTH_PROVIDER_URL} +auth_oauth2.https.peer_verification = verify_none diff --git a/deps/rabbitmq_management/selenium/test/oauth/rabbitmq.load-user-definitions.conf b/deps/rabbitmq_management/selenium/test/oauth/rabbitmq.load-user-definitions.conf new file mode 100644 index 000000000000..efe162082bf2 --- /dev/null +++ b/deps/rabbitmq_management/selenium/test/oauth/rabbitmq.load-user-definitions.conf @@ -0,0 +1,2 @@ + +load_definitions = ${RABBITMQ_TEST_DIR}/imports/users.json diff --git a/deps/rabbitmq_management/selenium/test/oauth/rabbitmq.tls.conf b/deps/rabbitmq_management/selenium/test/oauth/rabbitmq.tls.conf new file mode 100644 index 000000000000..61107323c637 --- /dev/null +++ b/deps/rabbitmq_management/selenium/test/oauth/rabbitmq.tls.conf @@ -0,0 +1,14 @@ +auth_backends.1 = rabbit_auth_backend_oauth2 + +listeners.ssl.1 = 5671 + +ssl_options.cacertfile = ${RABBITMQ_TEST_DIR}/certs/ca_certificate.pem +ssl_options.certfile = ${RABBITMQ_TEST_DIR}/certs/server_rabbitmq_certificate.pem +ssl_options.keyfile = ${RABBITMQ_TEST_DIR}/certs/server_rabbitmq_key.pem +ssl_options.verify = verify_peer +ssl_options.fail_if_no_peer_cert = true + +management.ssl.port = 15671 +management.ssl.cacertfile = ${RABBITMQ_TEST_DIR}/certs/ca_certificate.pem +management.ssl.certfile = ${RABBITMQ_TEST_DIR}/certs/server_rabbitmq_certificate.pem +management.ssl.keyfile = ${RABBITMQ_TEST_DIR}/certs/server_rabbitmq_key.pem diff --git a/deps/rabbitmq_management/selenium/test/oauth/rabbitmq.uaa-mgt-oauth-provider.conf b/deps/rabbitmq_management/selenium/test/oauth/rabbitmq.uaa-mgt-oauth-provider.conf new file mode 100644 index 000000000000..e50200cbeefd --- /dev/null +++ b/deps/rabbitmq_management/selenium/test/oauth/rabbitmq.uaa-mgt-oauth-provider.conf @@ -0,0 +1,2 @@ +# uaa requires a secret in order to renew tokens +management.oauth_provider_url = ${UAA_URL} diff --git a/deps/rabbitmq_management/selenium/test/oauth/rabbitmq.uaa-oauth-provider.conf b/deps/rabbitmq_management/selenium/test/oauth/rabbitmq.uaa-oauth-provider.conf new file mode 100644 index 000000000000..46f67a598bd0 --- /dev/null +++ b/deps/rabbitmq_management/selenium/test/oauth/rabbitmq.uaa-oauth-provider.conf @@ -0,0 +1,6 @@ +# uaa requires a secret in order to renew tokens +management.oauth_client_secret = ${OAUTH_CLIENT_SECRET} + +# configure static signing keys and the oauth provider used by the plugin +auth_oauth2.default_key = ${OAUTH_SIGNING_KEY_ID} +auth_oauth2.signing_keys.${OAUTH_SIGNING_KEY_ID} = ${OAUTH_SERVER_CONFIG_DIR}/signing-key.pem diff --git a/deps/rabbitmq_management/selenium/test/oauth/rabbitmq.uaa.conf b/deps/rabbitmq_management/selenium/test/oauth/rabbitmq.uaa.conf deleted file mode 100644 index 4bb752078dd8..000000000000 --- a/deps/rabbitmq_management/selenium/test/oauth/rabbitmq.uaa.conf +++ /dev/null @@ -1,3 +0,0 @@ -management.oauth_client_secret = ${OAUTH_CLIENT_SECRET} -auth_oauth2.default_key = ${OAUTH_SIGNING_KEY_ID} -auth_oauth2.signing_keys.${OAUTH_SIGNING_KEY_ID} = ${OAUTH_SIGNING_KEY_DIR}/signing-key.pem diff --git a/deps/rabbitmq_management/selenium/test/oauth/uaa/uaa.yml b/deps/rabbitmq_management/selenium/test/oauth/uaa/uaa.yml index a45de36aead8..546a78402f2a 100644 --- a/deps/rabbitmq_management/selenium/test/oauth/uaa/uaa.yml +++ b/deps/rabbitmq_management/selenium/test/oauth/uaa/uaa.yml @@ -97,6 +97,8 @@ jwt: scim: users: - rabbit_admin|rabbit_admin|scim.read,openid,rabbitmq.read:*/*,rabbitmq.write:*/*,rabbitmq.configure:*/*,rabbitmq.tag:administrator + - rabbit_admin_1|rabbit_admin_1|scim.read,openid,rmq-uaa-1.read:*/*,rmq-uaa-1.write:*/*,rmq-uaa-1.configure:*/*,rmq-uaa-1.tag:administrator + - rabbit_admin_2|rabbit_admin_2|scim.read,openid,rmq-uaa-2.read:*/*,rmq-uaa-2.write:*/*,rmq-uaa-2.configure:*/*,rmq-uaa-2.tag:administrator - rabbitmq_management|rabbitmq_management|scim.read,openid,rabbitmq.read:*/*,rabbitmq.write:*/*,rabbitmq.configure:*/*,rabbitmq.tag:management - rabbit_monitor|rabbit_monitor|scim.read,openid,rabbitmq.tag:monitoring - rabbit_no_management|rabbit_no_management|scim.read,openid,rabbitmq.read:*/* @@ -108,6 +110,7 @@ scim: 'rabbitmq.tag:monitoring': Monitoring 'rabbitmq.tag:administrator': Administrator + oauth: # Always override clients on startup client: @@ -126,9 +129,9 @@ oauth: secret: mgt_api_client authorized-grant-types: client_credentials authorities: rabbitmq.tag:monitoring - rabbit_client_code: - id: rabbit_client_code - secret: rabbit_client_code + rabbitmq_client_code: + id: rabbitmq_client_code + secret: rabbitmq_client_code authorized-grant-types: authorization_code,refresh_token scope: rabbitmq.*,openid,profile authorities: uaa.resource,rabbitmq @@ -178,3 +181,37 @@ oauth: secret: consumer_with_roles_secret authorities: rabbitmq.* api://rabbitmq:Read.All,api://rabbitmq:Configure.All,api://rabbitmq:Write.All authorized-grant-types: client_credentials + +cors: + xhr: + allowed: + headers: + - Accept + - Authorization + - Content-Type + - authorization + - content-type + - X-Requested-With + origin: + - ^*$ + methods: + - GET + - PUT + - POST + - DELETE + default: + allowed: + headers: + - Accept + - Authorization + - Content-Type + - authorization + - content-type + - X-Requested-With + origin: + - ^*$ + methods: + - GET + - PUT + - POST + - DELETE diff --git a/deps/rabbitmq_management/selenium/test/oauth/with-basic-auth-idp-down/happy-login.js b/deps/rabbitmq_management/selenium/test/oauth/with-basic-auth-idp-down/happy-login.js new file mode 100644 index 000000000000..73d1ccd4a650 --- /dev/null +++ b/deps/rabbitmq_management/selenium/test/oauth/with-basic-auth-idp-down/happy-login.js @@ -0,0 +1,36 @@ +const { By, Key, until, Builder } = require('selenium-webdriver') +require('chromedriver') +const assert = require('assert') +const { buildDriver, goToHome, captureScreensFor, teardown, idpLoginPage } = require('../../utils') + +const SSOHomePage = require('../../pageobjects/SSOHomePage') +const OverviewPage = require('../../pageobjects/OverviewPage') + +describe('When basic authentication is enabled but UAA is down', function () { + let homePage + let idpLogin + let overview + let captureScreen + + before(async function () { + driver = buildDriver() + await goToHome(driver) + homePage = new SSOHomePage(driver) + idpLogin = idpLoginPage(driver) + overview = new OverviewPage(driver) + captureScreen = captureScreensFor(driver, __filename) + }) + + it('can log in with Basic Auth', async function () { + await homePage.toggleBasicAuthSection() + await homePage.basicAuthLogin('guest', 'guest') + if (!await overview.isLoaded()) { + throw new Error('Failed to login') + } + await overview.logout() + }) + + after(async function () { + await teardown(driver, this, captureScreen) + }) +}) diff --git a/deps/rabbitmq_management/selenium/test/oauth/with-basic-auth-idp-down/landing.js b/deps/rabbitmq_management/selenium/test/oauth/with-basic-auth-idp-down/landing.js new file mode 100644 index 000000000000..5a0b6bc13026 --- /dev/null +++ b/deps/rabbitmq_management/selenium/test/oauth/with-basic-auth-idp-down/landing.js @@ -0,0 +1,37 @@ +const { By, Key, until, Builder } = require('selenium-webdriver') +require('chromedriver') +const assert = require('assert') +const { buildDriver, goToHome, teardown, captureScreensFor } = require('../../utils') + +const SSOHomePage = require('../../pageobjects/SSOHomePage') + +describe('When basic authentication is enabled but UAA is down', function () { + let driver + let homePage + let captureScreen + + before(async function () { + driver = buildDriver() + await goToHome(driver) + homePage = new SSOHomePage(driver) + captureScreen = captureScreensFor(driver, __filename) + }) + + it('should display warning message that UAA is down', async function () { + await homePage.isLoaded() + const message = await homePage.getWarning() + assert.equal(true, message.startsWith('OAuth resource [rabbitmq] not available')) + assert.equal(true, message.endsWith(' not reachable')) + }) + + it('should not be presented oauth2 section', async function () { + await homePage.isLoaded() + if (await homePage.isOAuth2SectionVisible()) { + throw new Error('OAuth2 section should not be present') + } + }) + + after(async function () { + await teardown(driver, this, captureScreen) + }) +}) diff --git a/deps/rabbitmq_management/selenium/test/oauth/with-basic-auth/happy-login.js b/deps/rabbitmq_management/selenium/test/oauth/with-basic-auth/happy-login.js new file mode 100644 index 000000000000..54c8531259b0 --- /dev/null +++ b/deps/rabbitmq_management/selenium/test/oauth/with-basic-auth/happy-login.js @@ -0,0 +1,45 @@ +const { By, Key, until, Builder } = require('selenium-webdriver') +require('chromedriver') +const assert = require('assert') +const { buildDriver, goToHome, captureScreensFor, teardown, idpLoginPage } = require('../../utils') + +const SSOHomePage = require('../../pageobjects/SSOHomePage') +const OverviewPage = require('../../pageobjects/OverviewPage') + +describe('An user with administrator tag', function () { + let homePage + let idpLogin + let overview + let captureScreen + + before(async function () { + driver = buildDriver() + await goToHome(driver) + homePage = new SSOHomePage(driver) + idpLogin = idpLoginPage(driver) + overview = new OverviewPage(driver) + captureScreen = captureScreensFor(driver, __filename) + }) + + it('can log in with OAuth 2.0', async function () { + await homePage.clickToLogin() + await idpLogin.login('rabbit_admin', 'rabbit_admin') + if (!await overview.isLoaded()) { + throw new Error('Failed to login') + } + await overview.logout() + }) + + it('can log in with Basic Auth', async function () { + await homePage.toggleBasicAuthSection() + await homePage.basicAuthLogin('guest', 'guest') + if (!await overview.isLoaded()) { + throw new Error('Failed to login') + } + await overview.logout() + }) + + after(async function () { + await teardown(driver, this, captureScreen) + }) +}) diff --git a/deps/rabbitmq_management/selenium/test/oauth/with-basic-auth/landing.js b/deps/rabbitmq_management/selenium/test/oauth/with-basic-auth/landing.js new file mode 100644 index 000000000000..508b5f0555ca --- /dev/null +++ b/deps/rabbitmq_management/selenium/test/oauth/with-basic-auth/landing.js @@ -0,0 +1,41 @@ +const { By, Key, until, Builder } = require('selenium-webdriver') +require('chromedriver') +const assert = require('assert') +const { buildDriver, goToHome, captureScreensFor, teardown } = require('../../utils') + +const SSOHomePage = require('../../pageobjects/SSOHomePage') + +describe('A user which accesses any protected URL without a session where basic auth is enabled', function () { + let homePage + let captureScreen + + before(async function () { + driver = buildDriver() + await goToHome(driver) + homePage = new SSOHomePage(driver) + captureScreen = captureScreensFor(driver, __filename) + await homePage.isLoaded() + }) + + it('should be presented with a login button to log in using OAuth 2.0', async function () { + await homePage.getOAuth2Section() + assert.equal(await homePage.getLoginButton(), 'Click here to log in') + }) + + + it('should be presented with a login button to log in using Basic Auth', async function () { + await homePage.toggleBasicAuthSection() + await homePage.getBasicAuthSection() + assert.equal(await homePage.getBasicAuthLoginButton(), 'Login') + }) + + it('should not have a warning message', async function () { + const visible = await homePage.isWarningVisible() + assert.ok(!visible) + }) + + + after(async function () { + await teardown(driver, this, captureScreen) + }) +}) diff --git a/deps/rabbitmq_management/selenium/test/oauth/with-basic-auth/unauthorized.js b/deps/rabbitmq_management/selenium/test/oauth/with-basic-auth/unauthorized.js new file mode 100644 index 000000000000..fec2757f0a32 --- /dev/null +++ b/deps/rabbitmq_management/selenium/test/oauth/with-basic-auth/unauthorized.js @@ -0,0 +1,59 @@ +const { By, Key, until, Builder } = require('selenium-webdriver') +require('chromedriver') +const assert = require('assert') +const { buildDriver, goToHome, captureScreensFor, teardown, idpLoginPage } = require('../../utils') + +const SSOHomePage = require('../../pageobjects/SSOHomePage') +const OverviewPage = require('../../pageobjects/OverviewPage') + +describe('An user without management tag', function () { + let homePage + let idpLogin + let overview + let captureScreen + + before(async function () { + driver = buildDriver() + await goToHome(driver) + homePage = new SSOHomePage(driver) + idpLogin = idpLoginPage(driver) + overview = new OverviewPage(driver) + captureScreen = captureScreensFor(driver, __filename) + + await homePage.clickToLogin() + await idpLogin.login('rabbit_no_management', 'rabbit_no_management') + if (!await homePage.isLoaded()) { + throw new Error('Failed to login') + } + }) + + it('cannot log in into the management ui', async function () { + const visible = await homePage.isWarningVisible() + assert.ok(visible) + }) + + it('should get "Not authorized" warning message', async function(){ + assert.equal('Not authorized', await homePage.getWarning()) + assert.equal('Click here to logout', await homePage.getLogoutButton()) + assert.ok(!await homePage.isBasicAuthSectionVisible()) + assert.ok(!await homePage.isOAuth2SectionVisible()) + }) + + describe("After clicking on logout button", function() { + + before(async function () { + await homePage.clickToLogout() + }) + + it('should get redirected to home page again without error message', async function(){ + const visible = await homePage.isWarningVisible() + assert.ok(!visible) + }) + + }) + + + after(async function () { + await teardown(driver, this, captureScreen) + }) +}) diff --git a/deps/rabbitmq_management/selenium/test/oauth/with-idp-down/landing.js b/deps/rabbitmq_management/selenium/test/oauth/with-idp-down/landing.js index 30e81a714b9d..5e23e8df807c 100644 --- a/deps/rabbitmq_management/selenium/test/oauth/with-idp-down/landing.js +++ b/deps/rabbitmq_management/selenium/test/oauth/with-idp-down/landing.js @@ -5,7 +5,7 @@ const { buildDriver, goToHome, teardown, captureScreensFor } = require('../../ut const SSOHomePage = require('../../pageobjects/SSOHomePage') -describe('A user which accesses management ui without a session', function () { +describe('When UAA is down', function () { let driver let homePage let captureScreen @@ -17,16 +17,16 @@ describe('A user which accesses management ui without a session', function () { captureScreen = captureScreensFor(driver, __filename) }) - it('should have a warning message when UAA is down', async function () { + it('should display warning message that UAA is down', async function () { await homePage.isLoaded() const message = await homePage.getWarning() - assert.equal(true, message.endsWith('does not appear to be a running OAuth2.0 instance or may not have a trusted SSL certificate')) + assert.equal(true, message.startsWith('OAuth resource [rabbitmq] not available')) + assert.equal(true, message.endsWith(' not reachable')) }) - it('should be presented with a login button to log in', async function () { + it('should not be presented with a login button to log in', async function () { await homePage.isLoaded() - const value = await homePage.getLoginButton() - assert.equal(value, 'Click here to log in') + assert.equal(false, await homePage.isLoginButtonVisible()) }) after(async function () { diff --git a/deps/rabbitmq_management/selenium/test/oauth/with-multi-resources/happy-login.js b/deps/rabbitmq_management/selenium/test/oauth/with-multi-resources/happy-login.js new file mode 100644 index 000000000000..01f9057db62d --- /dev/null +++ b/deps/rabbitmq_management/selenium/test/oauth/with-multi-resources/happy-login.js @@ -0,0 +1,69 @@ +const { By, Key, until, Builder } = require('selenium-webdriver') +require('chromedriver') +const assert = require('assert') +const { buildDriver, goToHome, captureScreensFor, teardown, idpLoginPage } = require('../../utils') + +const SSOHomePage = require('../../pageobjects/SSOHomePage') +const OverviewPage = require('../../pageobjects/OverviewPage') +const FakePortalPage = require('../../pageobjects/FakePortalPage') + +describe('When there two OAuth resources', function () { + let homePage + let idpLogin + let overview + let fakePortal + let captureScreen + + before(async function () { + driver = buildDriver() + await goToHome(driver) + homePage = new SSOHomePage(driver) + idpLogin = idpLoginPage(driver, "keycloak") + overview = new OverviewPage(driver) + fakePortal = new FakePortalPage(driver) + captureScreen = captureScreensFor(driver, __filename) + }) + + it('can log using first resource (sp_initiated)', async function () { + await homePage.chooseOauthResource('RabbitMQ Production') + await homePage.clickToLogin() + await idpLogin.login('prod_user', 'prod_user') + if (!await overview.isLoaded()) { + throw new Error('Failed to login') + } + assert.equal(await overview.getUser(), 'User prod_user') + await overview.logout() + }) + + it('can log using third resource (idp_initiated)', async function () { + if (!await homePage.isLoaded()) { + throw new Error('Failed to load home page') + } + await homePage.chooseOauthResource('RabbitMQ X_Idp') + await homePage.clickToLogin() + if (!await fakePortal.isLoaded()) { + throw new Error('Failed to redirect to IDP') + } + }) +/* put back once webdriver fixes a known issue + it('can log using second resource (sp_initiated)', async function () { + if (!await homePage.isLoaded()) { + throw new Error('Failed to load home page') + } + await homePage.chooseOauthResource('RabbitMQ Production') + await homePage.clickToLogin() + await idpLogin.login('prod_user', 'prod_user') + + if (!await overview.isLoaded()) { + throw new Error('Failed to login') + } + assert.equal(await overview.getUser(), 'User prod_user') + await overview.logout() + + }) +*/ + + after(async function () { + await teardown(driver, this, captureScreen) + }) +}) diff --git a/deps/rabbitmq_management/selenium/test/oauth/with-multi-resources/landing.js b/deps/rabbitmq_management/selenium/test/oauth/with-multi-resources/landing.js new file mode 100644 index 000000000000..ce74f527bd2a --- /dev/null +++ b/deps/rabbitmq_management/selenium/test/oauth/with-multi-resources/landing.js @@ -0,0 +1,45 @@ +const { By, Key, until, Builder } = require('selenium-webdriver') +require('chromedriver') +const assert = require('assert') +const { buildDriver, goToHome, captureScreensFor, teardown } = require('../../utils') + +const SSOHomePage = require('../../pageobjects/SSOHomePage') + +describe('A user which accesses any protected URL without a session', function () { + let homePage + let captureScreen + + before(async function () { + driver = buildDriver() + await goToHome(driver) + homePage = new SSOHomePage(driver) + captureScreen = captureScreensFor(driver, __filename) + }) + + it('should be presented with a combo box and a button to log in', async function () { + await homePage.isLoaded() + const resources = await homePage.getOAuthResourceOptions() + + assert.equal("rabbit_prod", resources[1].value) + assert.equal("RabbitMQ Production", resources[1].text) + + assert.equal("rabbit_dev", resources[0].value) + assert.equal("RabbitMQ Development", resources[0].text) + + assert.equal("rabbit_x", resources[2].value) + assert.equal("RabbitMQ X_Idp", resources[2].text) + + const value = await homePage.getLoginButton() + assert.equal(value, 'Click here to log in') + }) + + it('should not have a warning message', async function () { + await homePage.isLoaded() + const visible = await homePage.isWarningVisible() + assert.ok(!visible) + }) + + after(async function () { + await teardown(driver, this, captureScreen) + }) +}) diff --git a/deps/rabbitmq_management/selenium/test/oauth/with-sp-initiated/happy-login.js b/deps/rabbitmq_management/selenium/test/oauth/with-sp-initiated/happy-login.js index 78c550c865c3..763c22202ac1 100644 --- a/deps/rabbitmq_management/selenium/test/oauth/with-sp-initiated/happy-login.js +++ b/deps/rabbitmq_management/selenium/test/oauth/with-sp-initiated/happy-login.js @@ -1,15 +1,14 @@ const { By, Key, until, Builder } = require('selenium-webdriver') require('chromedriver') const assert = require('assert') -const { buildDriver, goToHome, captureScreensFor, teardown } = require('../../utils') +const { buildDriver, goToHome, captureScreensFor, teardown, idpLoginPage } = require('../../utils') const SSOHomePage = require('../../pageobjects/SSOHomePage') -const UAALoginPage = require('../../pageobjects/UAALoginPage') const OverviewPage = require('../../pageobjects/OverviewPage') describe('An user with administrator tag', function () { let homePage - let uaaLogin + let idpLogin let overview let captureScreen @@ -17,14 +16,14 @@ describe('An user with administrator tag', function () { driver = buildDriver() await goToHome(driver) homePage = new SSOHomePage(driver) - uaaLogin = new UAALoginPage(driver) + idpLogin = idpLoginPage(driver) overview = new OverviewPage(driver) captureScreen = captureScreensFor(driver, __filename) }) it('can log in into the management ui', async function () { await homePage.clickToLogin() - await uaaLogin.login('rabbit_admin', 'rabbit_admin') + await idpLogin.login('rabbit_admin', 'rabbit_admin') if (!await overview.isLoaded()) { throw new Error('Failed to login') } diff --git a/deps/rabbitmq_management/selenium/test/oauth/with-sp-initiated/logout.js b/deps/rabbitmq_management/selenium/test/oauth/with-sp-initiated/logout.js index cfcdada50bc7..f8b40fe0abe2 100644 --- a/deps/rabbitmq_management/selenium/test/oauth/with-sp-initiated/logout.js +++ b/deps/rabbitmq_management/selenium/test/oauth/with-sp-initiated/logout.js @@ -1,32 +1,33 @@ const { By, Key, until, Builder } = require('selenium-webdriver') require('chromedriver') const assert = require('assert') -const { buildDriver, goToHome, captureScreensFor, teardown } = require('../../utils') +const { buildDriver, goToHome, captureScreensFor, teardown, idpLoginPage } = require('../../utils') const SSOHomePage = require('../../pageobjects/SSOHomePage') -const UAALoginPage = require('../../pageobjects/UAALoginPage') const OverviewPage = require('../../pageobjects/OverviewPage') describe('When a logged in user', function () { let overview let homePage let captureScreen + let idpLogin before(async function () { driver = buildDriver() await goToHome(driver) homePage = new SSOHomePage(driver) - uaaLogin = new UAALoginPage(driver) + idpLogin = idpLoginPage(driver) overview = new OverviewPage(driver) captureScreen = captureScreensFor(driver, __filename) }) it('logs out', async function () { await homePage.clickToLogin() - await uaaLogin.login('rabbit_admin', 'rabbit_admin') + await idpLogin.login('rabbit_admin', 'rabbit_admin') await overview.isLoaded() await overview.logout() await homePage.isLoaded() + }) after(async function () { diff --git a/deps/rabbitmq_management/selenium/test/oauth/with-sp-initiated/redirection-after-login.js b/deps/rabbitmq_management/selenium/test/oauth/with-sp-initiated/redirection-after-login.js new file mode 100644 index 000000000000..eb9d49b9d6c4 --- /dev/null +++ b/deps/rabbitmq_management/selenium/test/oauth/with-sp-initiated/redirection-after-login.js @@ -0,0 +1,43 @@ +const { By, Key, until, Builder } = require('selenium-webdriver') +require('chromedriver') +const assert = require('assert') +const { buildDriver, goToExchanges, captureScreensFor, teardown, goToHome, idpLoginPage } = require('../../utils') + +const SSOHomePage = require('../../pageobjects/SSOHomePage') +const UAALoginPage = require('../../pageobjects/UAALoginPage') +const ExchangesPage = require('../../pageobjects/ExchangesPage') + +describe('A user which accesses a protected URL without a session', function () { + let homePage + let idpLogin + let exchanges + let captureScreen + + before(async function () { + driver = buildDriver() + homePage = new SSOHomePage(driver) + idpLogin = idpLoginPage(driver) + exchanges = new ExchangesPage(driver) + + await goToExchanges(driver) + + captureScreen = captureScreensFor(driver, __filename) + }) + + it('redirect to previous accessed page after login ', async function () { + await homePage.clickToLogin() + + await idpLogin.login('rabbit_admin', 'rabbit_admin') + + if (!await exchanges.isLoaded()) { + throw new Error('Failed to login') + } + + assert.equal("All exchanges (8)", await exchanges.getPagingSectionHeaderText()) + }) + + + after(async function () { + await teardown(driver, this, captureScreen) + }) +}) diff --git a/deps/rabbitmq_management/selenium/test/oauth/with-sp-initiated/token-refresh.js b/deps/rabbitmq_management/selenium/test/oauth/with-sp-initiated/token-refresh.js index 17b2b696d330..d14e009c1e8f 100644 --- a/deps/rabbitmq_management/selenium/test/oauth/with-sp-initiated/token-refresh.js +++ b/deps/rabbitmq_management/selenium/test/oauth/with-sp-initiated/token-refresh.js @@ -1,31 +1,32 @@ const { By, Key, until, Builder } = require('selenium-webdriver') require('chromedriver') const assert = require('assert') -const { buildDriver, goToHome, captureScreensFor, teardown, delay } = require('../../utils') +const { buildDriver, goToHome, captureScreensFor, teardown, delay, idpLoginPage } = require('../../utils') const SSOHomePage = require('../../pageobjects/SSOHomePage') const UAALoginPage = require('../../pageobjects/UAALoginPage') +const KeycloakLoginPage = require('../../pageobjects/KeycloakLoginPage') const OverviewPage = require('../../pageobjects/OverviewPage') describe('Once user is logged in', function () { let homePage - let uaaLogin + let idpLogin let overview let captureScreen - this.timeout(25000) // hard-coded to 25secs because this test requires 25sec to run + this.timeout(45000) // hard-coded to 25secs because this test requires 35sec to run before(async function () { driver = buildDriver() await goToHome(driver) homePage = new SSOHomePage(driver) - uaaLogin = new UAALoginPage(driver) + idpLogin = idpLoginPage(driver) overview = new OverviewPage(driver) captureScreen = captureScreensFor(driver, __filename) }) it('its token is automatically renewed', async function () { await homePage.clickToLogin() - await uaaLogin.login('rabbit_admin', 'rabbit_admin') + await idpLogin.login('rabbit_admin', 'rabbit_admin') await overview.isLoaded() await delay(15000) diff --git a/deps/rabbitmq_management/selenium/test/oauth/with-sp-initiated/unauthorized.js b/deps/rabbitmq_management/selenium/test/oauth/with-sp-initiated/unauthorized.js new file mode 100644 index 000000000000..846f2f91f158 --- /dev/null +++ b/deps/rabbitmq_management/selenium/test/oauth/with-sp-initiated/unauthorized.js @@ -0,0 +1,61 @@ +const { By, Key, until, Builder } = require('selenium-webdriver') +require('chromedriver') +const assert = require('assert') +const { buildDriver, goToHome, captureScreensFor, teardown, idpLoginPage } = require('../../utils') + +const SSOHomePage = require('../../pageobjects/SSOHomePage') +const OverviewPage = require('../../pageobjects/OverviewPage') + +describe('An user without management tag', function () { + let homePage + let idpLogin + let overview + let captureScreen + + before(async function () { + driver = buildDriver() + await goToHome(driver) + homePage = new SSOHomePage(driver) + idpLogin = idpLoginPage(driver) + overview = new OverviewPage(driver) + captureScreen = captureScreensFor(driver, __filename) + + await homePage.clickToLogin() + await idpLogin.login('rabbit_no_management', 'rabbit_no_management') + + }) + + it('cannot log in into the management ui', async function () { + if (!await homePage.isLoaded()) { + throw new Error('Failed to login') + } + const visible = await homePage.isWarningVisible() + assert.ok(visible) + }) + + it('should get "Not authorized" warning message and logout button but no login button', async function(){ + assert.equal('Not authorized', await homePage.getWarning()) + assert.equal('Click here to logout', await homePage.getLogoutButton()) + assert.ok(!await homePage.isBasicAuthSectionVisible()) + assert.ok(!await homePage.isOAuth2SectionVisible()) + }) + + describe("After clicking on logout button", function() { + + before(async function () { + await homePage.clickToLogout() + }) + + it('should get redirected to home page again without error message', async function(){ + await homePage.isLoaded() + const visible = await homePage.isWarningVisible() + assert.ok(!visible) + }) + + }) + + + after(async function () { + await teardown(driver, this, captureScreen) + }) +}) diff --git a/deps/rabbitmq_management/selenium/test/pageobjects/AdminTab.js b/deps/rabbitmq_management/selenium/test/pageobjects/AdminTab.js index d6c92e8f14c8..b8eb247d507e 100644 --- a/deps/rabbitmq_management/selenium/test/pageobjects/AdminTab.js +++ b/deps/rabbitmq_management/selenium/test/pageobjects/AdminTab.js @@ -111,7 +111,7 @@ module.exports = class AdminTab extends OverviewPage { async searchForUser(user, regex = false) { - var filtered_user = By.css('a[href="#/users/' + user + '"]') + const filtered_user = By.css('a[href="#/users/' + user + '"]') await this.sendKeys(FILTER_USER, user) await this.sendKeys(FILTER_USER, Key.RETURN) diff --git a/deps/rabbitmq_management/selenium/test/pageobjects/BasePage.js b/deps/rabbitmq_management/selenium/test/pageobjects/BasePage.js index b07520c24329..2f00a5e67d71 100644 --- a/deps/rabbitmq_management/selenium/test/pageobjects/BasePage.js +++ b/deps/rabbitmq_management/selenium/test/pageobjects/BasePage.js @@ -1,10 +1,10 @@ -const { By, Key, until, Builder } = require('selenium-webdriver') +const { By, Key, until, Builder, Select } = require('selenium-webdriver') const MENU_TABS = By.css('div#menu ul#tabs') const USER = By.css('li#logout') const LOGOUT_FORM = By.css('li#logout form') const SELECT_VHOSTS = By.css('select#show-vhost') - +const SELECT_REFRESH = By.css('ul#topnav li#interval select#update-every') const OVERVIEW_TAB = By.css('div#menu ul#tabs li#overview') const CONNECTIONS_TAB = By.css('div#menu ul#tabs li#connections') const CHANNELS_TAB = By.css('div#menu ul#tabs li#channels') @@ -13,18 +13,23 @@ const EXCHANGES_TAB = By.css('div#menu ul#tabs li#exchanges') const ADMIN_TAB = By.css('div#menu ul#tabs li#admin') const STREAM_CONNECTIONS_TAB = By.css('div#menu ul#tabs li#stream-connections') +const FORM_POPUP = By.css('div.form-popup-warn') +const FORM_POPUP_CLOSE_BUTTON = By.css('div.form-popup-warn span') + module.exports = class BasePage { driver timeout polling + interactionDelay constructor (webdriver) { this.driver = webdriver - // this is another timeout (--timeout 10000) which is the maximum test execution time - this.timeout = parseInt(process.env.TIMEOUT) || 5000 // max time waiting to locate an element. Should be less that test timeout - this.polling = parseInt(process.env.POLLING) || 1000 // how frequent selenium searches for an element + this.timeout = parseInt(process.env.SELENIUM_TIMEOUT) || 1000 // max time waiting to locate an element. Should be less that test timeout + this.polling = parseInt(process.env.SELENIUM_POLLING) || 500 // how frequent selenium searches for an element + this.interactionDelay = parseInt(process.env.SELENIUM_INTERACTION_DELAY) || 0 // slow down interactions (when rabbit is behind a http proxy) } + async isLoaded () { return this.waitForDisplayed(MENU_TABS) } @@ -36,7 +41,9 @@ module.exports = class BasePage { async getUser () { return this.getText(USER) } - + async selectRefreshOption(option) { + return this.selectOption(SELECT_REFRESH, option) + } async waitForOverviewTab() { return this.waitForDisplayed(OVERVIEW_TAB) } @@ -77,6 +84,7 @@ module.exports = class BasePage { return this.click(QUEUES_AND_STREAMS_TAB) } async waitForQueuesTab() { + await this.driver.sleep(250) return this.waitForDisplayed(QUEUES_AND_STREAMS_TAB) } @@ -87,19 +95,39 @@ module.exports = class BasePage { return this.waitForDisplayed(STREAM_CONNECTIONS_TAB) } - async getSelectableVhosts() { - let selectable = await this.waitForDisplayed(SELECT_VHOSTS) - let options = await selectable.findElements(By.css('option')) + async getSelectableOptions(locator) { + let selectable = await this.waitForDisplayed(locator) + const select = await new Select(selectable) + const optionList = await select.getOptions() + let table_model = [] - for (let option of options) { - table_model.push(await option.getText()) + for (const index in optionList) { + const t = await optionList[index].getText() + const v = await optionList[index].getAttribute('value') + table_model.push({"text":t, "value": v}) } + return table_model } + async selectOption(locator, text) { + let selectable = await this.waitForDisplayed(locator) + const select = await new Select(selectable) + return select.selectByVisibleText(text) + } + + async getSelectableVhosts() { + const table_model = await this.getSelectableOptions(SELECT_VHOSTS) + let new_table_model = [] + for (let i = 0; i < table_model.length; i++) { + new_table_model.push(await table_model[i].text) + } + return new_table_model + } + async getTable(locator, firstNColumns) { - let table = await this.waitForDisplayed(locator) - let rows = await table.findElements(By.css('tbody tr')) + const table = await this.waitForDisplayed(locator) + const rows = await table.findElements(By.css('tbody tr')) let table_model = [] for (let row of rows) { let columns = await row.findElements(By.css('td')) @@ -111,11 +139,50 @@ module.exports = class BasePage { } return table_model } + async isPopupWarningDisplayed() { + try { + let element = await driver.findElement(FORM_POPUP) + return element.isDisplayed() + } catch(e) { + return Promise.resolve(false) + } + /* + let element = await driver.findElement(FORM_POPUP) + return this.driver.wait(until.elementIsVisible(element), this.timeout / 2, + 'Timed out after [timeout=' + this.timeout + ';polling=' + this.polling + '] awaiting till visible ' + element, + this.polling / 2).then(function onWarningVisible(e) { + return Promise.resolve(true) + }, function onError(e) { + return Promise.resolve(false) + }) + */ + } + async getPopupWarning() { + let element = await driver.findElement(FORM_POPUP) + return this.driver.wait(until.elementIsVisible(element), this.timeout, + 'Timed out after [timeout=' + this.timeout + ';polling=' + this.polling + '] awaiting till visible ' + element, + this.polling).getText().then((value) => value.substring(0, value.search('\n\nClose'))) + } + async closePopupWarning() { + return this.click(FORM_POPUP_CLOSE_BUTTON) + } + async isDisplayed(locator) { + try { + let element = await driver.findElement(locator) + + return this.driver.wait(until.elementIsVisible(element), this.timeout, + 'Timed out after [timeout=' + this.timeout + ';polling=' + this.polling + '] awaiting till visible ' + element, + this.polling / 2) + }catch(error) { + return Promise.resolve(false) + } + } async waitForLocated (locator) { try { return this.driver.wait(until.elementLocated(locator), this.timeout, - 'Timed out after 30 seconds locating ' + locator, this.polling) + 'Timed out after [timeout=' + this.timeout + ';polling=' + this.polling + '] seconds locating ' + locator, + this.polling) }catch(error) { console.error("Failed to locate element " + locator) throw error @@ -125,15 +192,23 @@ module.exports = class BasePage { async waitForVisible (element) { try { return this.driver.wait(until.elementIsVisible(element), this.timeout, - 'Timed out after 30 seconds awaiting till visible ' + element, this.polling) + 'Timed out after [timeout=' + this.timeout + ';polling=' + this.polling + '] awaiting till visible ' + element, + this.polling) }catch(error) { console.error("Failed to find visible element " + element) throw error } } + async waitForDisplayed (locator) { - return this.waitForVisible(await this.waitForLocated(locator)) + if (this.interactionDelay && this.interactionDelay > 0) await this.driver.sleep(this.interactionDelay) + try { + return this.waitForVisible(await this.waitForLocated(locator)) + }catch(error) { + console.error("Failed to waitForDisplayed for locator " + locator) + throw error + } } async getText (locator) { @@ -147,6 +222,8 @@ module.exports = class BasePage { } async click (locator) { + if (this.interactionDelay) await this.driver.sleep(this.interactionDelay) + const element = await this.waitForDisplayed(locator) try { return element.click() @@ -170,17 +247,20 @@ module.exports = class BasePage { async chooseFile (locator, file) { const element = await this.waitForDisplayed(locator) - var remote = require('selenium-webdriver/remote'); + const remote = require('selenium-webdriver/remote'); driver.setFileDetector(new remote.FileDetector); return element.sendKeys(file) } async acceptAlert () { await this.driver.wait(until.alertIsPresent(), this.timeout); await this.driver.sleep(250) - let alert = await this.driver.switchTo().alert(); + const alert = await this.driver.switchTo().alert(); await this.driver.sleep(250) return alert.accept(); } + log(message) { + console.log(new Date() + " " + message) + } capture () { this.driver.takeScreenshot().then( diff --git a/deps/rabbitmq_management/selenium/test/pageobjects/KeycloakLoginPage.js b/deps/rabbitmq_management/selenium/test/pageobjects/KeycloakLoginPage.js new file mode 100644 index 000000000000..e05eea44521b --- /dev/null +++ b/deps/rabbitmq_management/selenium/test/pageobjects/KeycloakLoginPage.js @@ -0,0 +1,21 @@ +const { By, Key, until, Builder } = require('selenium-webdriver') + +const BasePage = require('./BasePage') + +const FORM = By.css('form#kc-form-login') +const USERNAME = By.css('input[name="username"]') +const PASSWORD = By.css('input[name="password"]') + +module.exports = class KeycloakLoginPage extends BasePage { + async isLoaded () { + return this.waitForDisplayed(FORM) + } + + async login (username, password) { + await this.isLoaded() + + await this.sendKeys(USERNAME, username) + await this.sendKeys(PASSWORD, password) + return this.submit(FORM) + } +} diff --git a/deps/rabbitmq_management/selenium/test/pageobjects/LimitsAdminTab.js b/deps/rabbitmq_management/selenium/test/pageobjects/LimitsAdminTab.js index 590b6b3f1ce8..09ddbf9c5807 100644 --- a/deps/rabbitmq_management/selenium/test/pageobjects/LimitsAdminTab.js +++ b/deps/rabbitmq_management/selenium/test/pageobjects/LimitsAdminTab.js @@ -19,8 +19,7 @@ module.exports = class LimitsAdminTab extends AdminTab { await this.click(VIRTUAL_HOST_LIMITS_SECTION) try { - rows = driver.findElements(VIRTUAL_HOST_LIMITS_TABLE_ROWS) - return rows + return driver.findElements(VIRTUAL_HOST_LIMITS_TABLE_ROWS) } catch (NoSuchElement) { return Promise.resolve([]) } @@ -29,8 +28,7 @@ module.exports = class LimitsAdminTab extends AdminTab { await this.click(USER_LIMITS_SECTION) try { - rows = driver.findElements(VIRTUAL_HOST_LIMITS_TABLE_ROWS) - return rows + return driver.findElements(VIRTUAL_HOST_LIMITS_TABLE_ROWS) } catch (NoSuchElement) { return Promise.resolve([]) } diff --git a/deps/rabbitmq_management/selenium/test/pageobjects/LoginPage.js b/deps/rabbitmq_management/selenium/test/pageobjects/LoginPage.js index 9b435f0b6c34..5e69e15dfbd6 100644 --- a/deps/rabbitmq_management/selenium/test/pageobjects/LoginPage.js +++ b/deps/rabbitmq_management/selenium/test/pageobjects/LoginPage.js @@ -6,6 +6,7 @@ const FORM = By.css('div#login form') const USERNAME = By.css('input[name="username"]') const PASSWORD = By.css('input[name="password"]') const LOGIN_BUTTON = By.css('div#outer div#login form input[type=submit]') +const WARNING = By.css('div#outer div#login div#login-status p') module.exports = class LoginPage extends BasePage { async isLoaded () { @@ -22,4 +23,26 @@ module.exports = class LoginPage extends BasePage { async getLoginButton () { return this.getValue(LOGIN_BUTTON) } + + + async isWarningVisible () { + try { + await this.waitForDisplayed(WARNING) + return Promise.resolve(true) + } catch (e) { + return Promise.resolve(false) + } + } + async getWarnings() { + try + { + return driver.findElements(WARNING) + } catch (NoSuchElement) { + return Promise.resolve([]) + } + } + async getWarning () { + return this.getText(WARNING) + } + } diff --git a/deps/rabbitmq_management/selenium/test/pageobjects/SSOHomePage.js b/deps/rabbitmq_management/selenium/test/pageobjects/SSOHomePage.js index 22e127b73b00..4bb074a52597 100644 --- a/deps/rabbitmq_management/selenium/test/pageobjects/SSOHomePage.js +++ b/deps/rabbitmq_management/selenium/test/pageobjects/SSOHomePage.js @@ -2,38 +2,113 @@ const { By, Key, until, Builder } = require('selenium-webdriver') const BasePage = require('./BasePage') -const LOGIN_BUTTON = By.css('div#outer div#login div#login-status button#loginWindow') -const WARNING = By.css('p.warning') +const LOGIN = By.css('div#outer div#login') +const LOGOUT_BUTTON = By.css('div#outer div#login-status button#logout') +const OAUTH2_LOGIN_BUTTON = By.css('div#outer div#login button#login') +const SELECT_RESOURCES = By.css('div#outer div#login select#oauth2-resource') +const WARNING = By.css('div#outer div#login div#login-status p.warning') + +const SECTION_LOGIN_WITH_OAUTH = By.css('div#outer div#login div#login-with-oauth2') +const SECTION_LOGIN_WITH_BASIC_AUTH = By.css('div#outer div#login div#login-with-basic-auth') +const BASIC_AUTH_LOGIN_BUTTON = By.css('form#basic-auth-form input[type=submit]') + +const BASIC_AUTH_LOGIN_FORM = By.css('form#basic-auth-form') +const BASIC_AUTH_LOGIN_USERNAME = By.css('form#basic-auth-form input#username') +const BASIC_AUTH_LOGIN_PASSWORD = By.css('form#basic-auth-form input#password') module.exports = class SSOHomePage extends BasePage { async isLoaded () { - return this.waitForDisplayed(LOGIN_BUTTON) + return this.waitForDisplayed(LOGIN) } async clickToLogin () { await this.isLoaded() - if (!await this.isWarningVisible()) { - return this.click(LOGIN_BUTTON) - } else { - this.capture() - const message = await this.getWarning() - throw new Error('Warning message "' + message + '" is visible. Idp is probably down or not reachable') - } + return this.click(OAUTH2_LOGIN_BUTTON) + } + async clickToBasicAuthLogin () { + await this.isLoaded() + return this.click(BASIC_AUTH_LOGIN_BUTTON) } + async clickToLogout() { + await this.isLoaded() + return this.click(LOGOUT_BUTTON) + } async getLoginButton () { - return this.getText(LOGIN_BUTTON) + return this.getText(OAUTH2_LOGIN_BUTTON) + } + async getLogoutButton () { + return this.getText(LOGOUT_BUTTON) + } + async getBasicAuthLoginButton () { + return this.getValue(BASIC_AUTH_LOGIN_BUTTON) } - async isWarningVisible () { + async chooseOauthResource(text) { + return this.selectOption(SELECT_RESOURCES, text) + } + + async getOAuthResourceOptions () { + return this.getSelectableOptions(SELECT_RESOURCES) + } + async isLoginButtonVisible() { + try { + await this.waitForDisplayed(OAUTH2_LOGIN_BUTTON) + return Promise.resolve(true) + } catch (e) { + return Promise.resolve(false) + } + } + async isLogoutButtonVisible() { try { - await this.getText(WARNING) + await this.waitForDisplayed(LOGOUT_BUTTON) return Promise.resolve(true) } catch (e) { return Promise.resolve(false) } } + async isOAuth2SectionVisible() { + return this.isDisplayed(SECTION_LOGIN_WITH_OAUTH) + } + async getOAuth2Section() { + return this.waitForDisplayed(SECTION_LOGIN_WITH_OAUTH) + } + async isBasicAuthSectionVisible() { + return this.isDisplayed(SECTION_LOGIN_WITH_BASIC_AUTH) + } + async getBasicAuthSection() { + return this.waitForDisplayed(SECTION_LOGIN_WITH_BASIC_AUTH) + } + + async toggleBasicAuthSection() { + await this.click(SECTION_LOGIN_WITH_BASIC_AUTH) + } + + async basicAuthLogin (username, password) { + await this.isLoaded() + await this.sendKeys(BASIC_AUTH_LOGIN_USERNAME, username) + await this.sendKeys(BASIC_AUTH_LOGIN_PASSWORD, password) + return this.submit(BASIC_AUTH_LOGIN_FORM) + } + + + async isWarningVisible () { + try { + await this.waitForDisplayed(WARNING) + return Promise.resolve(true) + } catch (e) { + return Promise.resolve(false) + } + } + async getWarnings() { + try + { + return driver.findElements(WARNING) + } catch (NoSuchElement) { + return Promise.resolve([]) + } + } async getWarning () { return this.getText(WARNING) } diff --git a/deps/rabbitmq_management/selenium/test/pageobjects/UAALoginPage.js b/deps/rabbitmq_management/selenium/test/pageobjects/UAALoginPage.js index 8e75b39ce14c..f0dc3c0708eb 100644 --- a/deps/rabbitmq_management/selenium/test/pageobjects/UAALoginPage.js +++ b/deps/rabbitmq_management/selenium/test/pageobjects/UAALoginPage.js @@ -2,7 +2,7 @@ const { By, Key, until, Builder } = require('selenium-webdriver') const BasePage = require('./BasePage') -const FORM = By.css('form') +const FORM = By.css('div.content form') const USERNAME = By.css('input[name="username"]') const PASSWORD = By.css('input[name="password"]') diff --git a/deps/rabbitmq_management/selenium/test/pageobjects/VhostsAdminTab.js b/deps/rabbitmq_management/selenium/test/pageobjects/VhostsAdminTab.js index 71dd14dc00bd..34ae729da33d 100644 --- a/deps/rabbitmq_management/selenium/test/pageobjects/VhostsAdminTab.js +++ b/deps/rabbitmq_management/selenium/test/pageobjects/VhostsAdminTab.js @@ -25,7 +25,7 @@ module.exports = class VhostsAdminTab extends AdminTab { return await this.searchForVhosts(vhost, regex) != undefined } async clickOnVhost(vhost_rows, vhost) { - let links = await vhost_rows.findElements(By.css("td a")) + const links = await vhost_rows.findElements(By.css("td a")) for (let link of links) { let text = await link.getText() if ( text === "/" ) return link.click() diff --git a/deps/rabbitmq_management/selenium/test/utils.js b/deps/rabbitmq_management/selenium/test/utils.js index ae08f5ed5b66..efa9a5196c95 100644 --- a/deps/rabbitmq_management/selenium/test/utils.js +++ b/deps/rabbitmq_management/selenium/test/utils.js @@ -2,15 +2,19 @@ const fs = require('fs') const XMLHttpRequest = require('xmlhttprequest').XMLHttpRequest const fsp = fs.promises const path = require('path') -const { By, Key, until, Builder, logging } = require('selenium-webdriver') +const { By, Key, until, Builder, logging, Capabilities } = require('selenium-webdriver') require('chromedriver') +const UAALoginPage = require('./pageobjects/UAALoginPage') +const KeycloakLoginPage = require('./pageobjects/KeycloakLoginPage') +const assert = require('assert') const uaaUrl = process.env.UAA_URL || 'http://localhost:8080' -const baseUrl = process.env.RABBITMQ_URL || 'http://localhost:15672' +const baseUrl = process.env.RABBITMQ_URL || 'http://localhost:15672/' const hostname = process.env.RABBITMQ_HOSTNAME || 'localhost' const runLocal = String(process.env.RUN_LOCAL).toLowerCase() != 'false' const seleniumUrl = process.env.SELENIUM_URL || 'http://selenium:4444' const screenshotsDir = process.env.SCREENSHOTS_DIR || '/screens' +const profiles = process.env.PROFILES || '' class CaptureScreenshot { driver @@ -32,12 +36,25 @@ class CaptureScreenshot { } module.exports = { + log: (message) => { + console.log(new Date() + " " + message) + }, + + hasProfile: (profile) => { + return profiles.includes(profile) + }, + buildDriver: (caps) => { builder = new Builder() if (!runLocal) { builder = builder.usingServer(seleniumUrl) } - driver = builder.forBrowser('chrome').build() + var chromeCapabilities = Capabilities.chrome(); + chromeCapabilities.setAcceptInsecureCerts(true); + driver = builder + .forBrowser('chrome') + .withCapabilities(chromeCapabilities) + .build() driver.manage().setTimeouts( { pageLoad: 35000 } ) return driver }, @@ -55,7 +72,11 @@ module.exports = { }, goToLogin: (driver, token) => { - return driver.get(baseUrl + '/#/login?access_token=' + token) + return driver.get(baseUrl + '#/login?access_token=' + token) + }, + + goToExchanges: (driver) => { + return driver.get(baseUrl + '#/exchanges') }, goTo: (driver, address) => { @@ -72,9 +93,35 @@ module.exports = { return new CaptureScreenshot(driver, require('path').basename(test)) }, - tokenFor: (client_id, client_secret) => { + idpLoginPage: (driver, preferredIdp) => { + if (!preferredIdp) { + if (process.env.PROFILES.includes("uaa")) { + preferredIdp = "uaa" + } else if (process.env.PROFILES.includes("keycloak")) { + preferredIdp = "keycloak" + } else { + throw new Error("Missing uaa or keycloak profiles") + } + } + switch(preferredIdp) { + case "uaa": return new UAALoginPage(driver) + case "keycloak": return new KeycloakLoginPage(driver) + default: new Error("Unsupported ipd " + preferredIdp) + } + }, + openIdConfiguration: (url) => { + const req = new XMLHttpRequest() + req.open('GET', url + "/.well-known/openid-configuration", false) + req.send() + if (req.status == 200) return JSON.parse(req.responseText) + else { + console.error(req.responseText) + throw new Error(req.responseText) + } + }, + + tokenFor: (client_id, client_secret, url = uaaUrl) => { const req = new XMLHttpRequest() - const url = uaaUrl + '/oauth/token' const params = 'client_id=' + client_id + '&client_secret=' + client_secret + '&grant_type=client_credentials' + @@ -92,6 +139,15 @@ module.exports = { } }, + assertAllOptions: (expectedOptions, actualOptions) => { + assert.equal(expectedOptions.length, actualOptions.length) + for (let i = 0; i < expectedOptions.length; i++) { + assert.ok(actualOptions.find((actualOption) => + actualOption.value == expectedOptions[i].value + && actualOption.text == expectedOptions[i].text)) + } + }, + teardown: async (driver, test, captureScreen = null) => { driver.manage().logs().get(logging.Type.BROWSER).then(function(entries) { entries.forEach(function(entry) { diff --git a/deps/rabbitmq_management/src/rabbit_mgmt_app.erl b/deps/rabbitmq_management/src/rabbit_mgmt_app.erl index 600ee9886808..c34bff1ceda9 100644 --- a/deps/rabbitmq_management/src/rabbit_mgmt_app.erl +++ b/deps/rabbitmq_management/src/rabbit_mgmt_app.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_mgmt_app). @@ -14,7 +14,6 @@ -export([get_listeners_config/0]). -endif. --include_lib("rabbitmq_management_agent/include/rabbit_mgmt_records.hrl"). -include_lib("amqp_client/include/amqp_client.hrl"). -define(TCP_CONTEXT, rabbitmq_management_tcp). @@ -26,6 +25,13 @@ [{description, "Imports definition file at management.load_definitions"}, {mfa, {rabbit_mgmt_load_definitions, boot, []}}]}). +-rabbit_feature_flag( + {detailed_queues_endpoint, + #{desc => "Add a detailed queues HTTP API endpoint. Reduce number of metrics in the default endpoint.", + stability => stable, + depends_on => [feature_flags_v2] + }}). + start(_Type, _StartArgs) -> case rabbit_mgmt_agent_config:is_metrics_collector_enabled() of true -> diff --git a/deps/rabbitmq_management/src/rabbit_mgmt_cors.erl b/deps/rabbitmq_management/src/rabbit_mgmt_cors.erl index 12d123728667..0decb066d415 100644 --- a/deps/rabbitmq_management/src/rabbit_mgmt_cors.erl +++ b/deps/rabbitmq_management/src/rabbit_mgmt_cors.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% %% Useful documentation about CORS: diff --git a/deps/rabbitmq_management/src/rabbit_mgmt_csp.erl b/deps/rabbitmq_management/src/rabbit_mgmt_csp.erl index 7939acb97bba..02fde2e2ae5d 100644 --- a/deps/rabbitmq_management/src/rabbit_mgmt_csp.erl +++ b/deps/rabbitmq_management/src/rabbit_mgmt_csp.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% %% Sets CSP header(s) on the response if configured, diff --git a/deps/rabbitmq_management/src/rabbit_mgmt_db.erl b/deps/rabbitmq_management/src/rabbit_mgmt_db.erl index 7383b1f45775..0814ede743e9 100644 --- a/deps/rabbitmq_management/src/rabbit_mgmt_db.erl +++ b/deps/rabbitmq_management/src/rabbit_mgmt_db.erl @@ -2,12 +2,11 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_mgmt_db). --include_lib("rabbitmq_management_agent/include/rabbit_mgmt_records.hrl"). -include_lib("rabbitmq_management_agent/include/rabbit_mgmt_metrics.hrl"). -include_lib("rabbit_common/include/rabbit.hrl"). -include_lib("rabbit_common/include/rabbit_core_metrics.hrl"). @@ -145,13 +144,20 @@ augment_exchanges(Xs, Ranges, _) -> %% we can only cache if no ranges are requested. %% The mgmt ui doesn't use ranges for queue listings --spec augment_queues([proplists:proplist()], ranges(), basic | full) -> any(). -augment_queues(Qs, ?NO_RANGES = Ranges, basic) -> +-spec augment_queues([proplists:proplist()], ranges(), basic | detailed | full) -> any(). +augment_queues(Qs, ?NO_RANGES = Ranges, basic) -> + submit_cached(queues, + fun(Interval, Queues) -> + list_basic_queue_stats(Ranges, Queues, Interval) + end, Qs, max(60000, length(Qs) * 2)); +augment_queues(Qs, ?NO_RANGES = Ranges, detailed) -> submit_cached(queues, fun(Interval, Queues) -> list_queue_stats(Ranges, Queues, Interval) end, Qs, max(60000, length(Qs) * 2)); -augment_queues(Qs, Ranges, basic) -> +augment_queues(Qs, Ranges, basic) -> + submit(fun(Interval) -> list_basic_queue_stats(Ranges, Qs, Interval) end); +augment_queues(Qs, Ranges, detailed) -> submit(fun(Interval) -> list_queue_stats(Ranges, Qs, Interval) end); augment_queues(Qs, Ranges, _) -> submit(fun(Interval) -> detail_queue_stats(Ranges, Qs, Interval) end). @@ -349,8 +355,16 @@ consumers_stats(VHost) -> -spec list_queue_stats(ranges(), [proplists:proplist()], integer()) -> [proplists:proplist()]. list_queue_stats(Ranges, Objs, Interval) -> + list_queue_stats(Ranges, Objs, Interval, all_list_queue_data). + +-spec list_basic_queue_stats(ranges(), [proplists:proplist()], integer()) -> + [proplists:proplist()]. +list_basic_queue_stats(Ranges, Objs, Interval) -> + list_queue_stats(Ranges, Objs, Interval, all_list_basic_queue_data). + +list_queue_stats(Ranges, Objs, Interval, Fun) -> Ids = [id_lookup(queue_stats, Obj) || Obj <- Objs], - DataLookup = get_data_from_nodes({rabbit_mgmt_data, all_list_queue_data, [Ids, Ranges]}), + DataLookup = get_data_from_nodes({rabbit_mgmt_data, Fun, [Ids, Ranges]}), adjust_hibernated_memory_use( [begin Id = id_lookup(queue_stats, Obj), @@ -652,10 +666,13 @@ node_stats(Ranges, Objs, Interval) -> combine(New, Old) -> case pget(state, Old) of unknown -> New ++ Old; - live -> New ++ lists:keydelete(state, 1, Old); + live -> New ++ delete_keys([state, online], Old); _ -> lists:keydelete(state, 1, New) ++ Old end. +delete_keys(Keys, List) -> + [I || I <- List, not lists:member(element(1, I), Keys)]. + revert({'_', _}, {Id, _}) -> Id; revert({_, '_'}, {_, Id}) -> diff --git a/deps/rabbitmq_management/src/rabbit_mgmt_db_cache.erl b/deps/rabbitmq_management/src/rabbit_mgmt_db_cache.erl index aa603150fbbf..dc2c55066f2c 100644 --- a/deps/rabbitmq_management/src/rabbit_mgmt_db_cache.erl +++ b/deps/rabbitmq_management/src/rabbit_mgmt_db_cache.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2016-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. All rights reserved. -module(rabbit_mgmt_db_cache). @@ -65,9 +65,14 @@ fetch(Key, FetchFun, FunArgs, Timeout) -> ProcName = process_name(Key), Pid = case whereis(ProcName) of undefined -> - {ok, P} = supervisor:start_child(rabbit_mgmt_db_cache_sup, - ?CHILD(Key)), - P; + case supervisor:start_child(rabbit_mgmt_db_cache_sup, + ?CHILD(Key)) of + {ok, P} -> + P; + {error, {already_started, P}} -> + %% A parallel request started the cache meanwhile + P + end; P -> P end, gen_server:call(Pid, {fetch, FetchFun, FunArgs}, Timeout). diff --git a/deps/rabbitmq_management/src/rabbit_mgmt_db_cache_sup.erl b/deps/rabbitmq_management/src/rabbit_mgmt_db_cache_sup.erl index 9a8d4a62a8a0..92f5353ea523 100644 --- a/deps/rabbitmq_management/src/rabbit_mgmt_db_cache_sup.erl +++ b/deps/rabbitmq_management/src/rabbit_mgmt_db_cache_sup.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2016-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. -module(rabbit_mgmt_db_cache_sup). diff --git a/deps/rabbitmq_management/src/rabbit_mgmt_dispatcher.erl b/deps/rabbitmq_management/src/rabbit_mgmt_dispatcher.erl index 270a81aee3f6..ef73bd7cfca8 100644 --- a/deps/rabbitmq_management/src/rabbit_mgmt_dispatcher.erl +++ b/deps/rabbitmq_management/src/rabbit_mgmt_dispatcher.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_mgmt_dispatcher). @@ -63,8 +63,18 @@ build_module_routes(Ignore) -> Routes = [Module:dispatcher() || Module <- modules(Ignore)], [{"/api" ++ Path, Mod, Args} || {Path, Mod, Args} <- lists:append(Routes)]. -modules(IgnoreApps) -> - [Module || {App, Module, Behaviours} <- +modules(IgnoreApps0) -> + Apps0 = rabbit_misc:rabbitmq_related_apps(), + Apps = case IgnoreApps0 of + [] -> + Apps0; + _ -> + IgnoreApps = sets:from_list(IgnoreApps0, [{version, 2}]), + lists:filter( + fun(App) -> not sets:is_element(App, IgnoreApps) end, + Apps0) + end, + [Module || {_App, Module, Behaviours} <- %% Sort rabbitmq_management modules first. This is %% a microoptimization because most files belong to %% this application. Making it first avoids several @@ -76,8 +86,7 @@ modules(IgnoreApps) -> (_, {rabbitmq_management, _, _}) -> false; ({A, _, _}, {B, _, _}) -> A =< B end, - rabbit_misc:all_module_attributes(behaviour)), - not lists:member(App, IgnoreApps), + rabbit_misc:module_attributes_from_apps(behaviour, Apps)), lists:member(rabbit_mgmt_extension, Behaviours)]. module_app(Module) -> @@ -134,6 +143,7 @@ dispatcher() -> {"/exchanges/:vhost/:exchange/bindings/source", rabbit_mgmt_wm_bindings, [exchange_source]}, {"/exchanges/:vhost/:exchange/bindings/destination", rabbit_mgmt_wm_bindings, [exchange_destination]}, {"/queues", rabbit_mgmt_wm_queues, []}, + {"/queues/detailed", rabbit_mgmt_wm_queues, [detailed]}, {"/queues/:vhost", rabbit_mgmt_wm_queues, []}, {"/queues/:vhost/:queue", rabbit_mgmt_wm_queue, []}, {"/queues/:vhost/:destination/bindings", rabbit_mgmt_wm_bindings, [queue]}, @@ -168,6 +178,8 @@ dispatcher() -> {"/user-limits/:user", rabbit_mgmt_wm_user_limits, []}, {"/feature-flags", rabbit_mgmt_wm_feature_flags, []}, {"/feature-flags/:name/enable", rabbit_mgmt_wm_feature_flag_enable, []}, + {"/deprecated-features", rabbit_mgmt_wm_deprecated_features, [all]}, + {"/deprecated-features/used", rabbit_mgmt_wm_deprecated_features, [used]}, {"/whoami", rabbit_mgmt_wm_whoami, []}, {"/permissions", rabbit_mgmt_wm_permissions, []}, {"/permissions/:vhost/:user", rabbit_mgmt_wm_permission, []}, @@ -185,7 +197,6 @@ dispatcher() -> {"/health/checks/port-listener/:port", rabbit_mgmt_wm_health_check_port_listener, []}, {"/health/checks/protocol-listener/:protocol", rabbit_mgmt_wm_health_check_protocol_listener, []}, {"/health/checks/virtual-hosts", rabbit_mgmt_wm_health_check_virtual_hosts, []}, - {"/health/checks/node-is-mirror-sync-critical", rabbit_mgmt_wm_health_check_node_is_mirror_sync_critical, []}, {"/health/checks/node-is-quorum-critical", rabbit_mgmt_wm_health_check_node_is_quorum_critical, []}, {"/reset", rabbit_mgmt_wm_reset, []}, {"/reset/:node", rabbit_mgmt_wm_reset, []}, diff --git a/deps/rabbitmq_management/src/rabbit_mgmt_extension.erl b/deps/rabbitmq_management/src/rabbit_mgmt_extension.erl index b481e95a745d..bdf04c673ea6 100644 --- a/deps/rabbitmq_management/src/rabbit_mgmt_extension.erl +++ b/deps/rabbitmq_management/src/rabbit_mgmt_extension.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2011-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_mgmt_extension). diff --git a/deps/rabbitmq_management/src/rabbit_mgmt_features.erl b/deps/rabbitmq_management/src/rabbit_mgmt_features.erl index aaecb0a30b0b..70c820de7dc2 100644 --- a/deps/rabbitmq_management/src/rabbit_mgmt_features.erl +++ b/deps/rabbitmq_management/src/rabbit_mgmt_features.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2023-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_mgmt_features). diff --git a/deps/rabbitmq_management/src/rabbit_mgmt_headers.erl b/deps/rabbitmq_management/src/rabbit_mgmt_headers.erl index caf0d4a8c0d6..b4d348d74d55 100644 --- a/deps/rabbitmq_management/src/rabbit_mgmt_headers.erl +++ b/deps/rabbitmq_management/src/rabbit_mgmt_headers.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% %% This module contains helper functions that control diff --git a/deps/rabbitmq_management/src/rabbit_mgmt_hsts.erl b/deps/rabbitmq_management/src/rabbit_mgmt_hsts.erl index 7954397ed557..ecc30f5f24c4 100644 --- a/deps/rabbitmq_management/src/rabbit_mgmt_hsts.erl +++ b/deps/rabbitmq_management/src/rabbit_mgmt_hsts.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% %% Sets HSTS header(s) on the response if configured, diff --git a/deps/rabbitmq_management/src/rabbit_mgmt_load_definitions.erl b/deps/rabbitmq_management/src/rabbit_mgmt_load_definitions.erl index 1cfcba764ebb..f765df4ed123 100644 --- a/deps/rabbitmq_management/src/rabbit_mgmt_load_definitions.erl +++ b/deps/rabbitmq_management/src/rabbit_mgmt_load_definitions.erl @@ -2,20 +2,17 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_mgmt_load_definitions). --include_lib("rabbit_common/include/rabbit.hrl"). - -export([boot/0, maybe_load_definitions/0, maybe_load_definitions_from/2]). %% This module exists for backwards compatibility only. %% Definition import functionality is now a core server feature. boot() -> - rabbit_log:debug("Will import definitions file from management.load_definitions"), rabbit_definitions:maybe_load_definitions(rabbitmq_management, load_definitions). maybe_load_definitions() -> diff --git a/deps/rabbitmq_management/src/rabbit_mgmt_login.erl b/deps/rabbitmq_management/src/rabbit_mgmt_login.erl index 7770bccc38b7..a78d79c4666f 100644 --- a/deps/rabbitmq_management/src/rabbit_mgmt_login.erl +++ b/deps/rabbitmq_management/src/rabbit_mgmt_login.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2011-2022 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_mgmt_login). @@ -10,8 +10,6 @@ -export([init/2]). -include_lib("rabbitmq_management_agent/include/rabbit_mgmt_records.hrl"). --include_lib("amqp_client/include/amqp_client.hrl"). - %%-------------------------------------------------------------------- init(Req0, State) -> diff --git a/deps/rabbitmq_management/src/rabbit_mgmt_nodes.erl b/deps/rabbitmq_management/src/rabbit_mgmt_nodes.erl new file mode 100644 index 000000000000..2382f031c6b3 --- /dev/null +++ b/deps/rabbitmq_management/src/rabbit_mgmt_nodes.erl @@ -0,0 +1,26 @@ +%% This Source Code Form is subject to the terms of the Mozilla Public +%% License, v. 2.0. If a copy of the MPL was not distributed with this +%% file, You can obtain one at https://mozilla.org/MPL/2.0/. +%% +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. +%% + +-module(rabbit_mgmt_nodes). + +-export([ + node_name_from_req/1, + node_exists/1 +]). + +%% +%% API +%% + +node_name_from_req(ReqData) -> + list_to_atom(binary_to_list(rabbit_mgmt_util:id(node, ReqData))). + +%% To be used in resource_exists/2 +node_exists(ReqData) -> + Node = node_name_from_req(ReqData), + AllNodes = rabbit_nodes:list_members(), + lists:member(Node, AllNodes). \ No newline at end of file diff --git a/deps/rabbitmq_management/src/rabbit_mgmt_oauth_bootstrap.erl b/deps/rabbitmq_management/src/rabbit_mgmt_oauth_bootstrap.erl index f79e8279a200..f31704acdacc 100644 --- a/deps/rabbitmq_management/src/rabbit_mgmt_oauth_bootstrap.erl +++ b/deps/rabbitmq_management/src/rabbit_mgmt_oauth_bootstrap.erl @@ -2,36 +2,56 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2011-2022 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_mgmt_oauth_bootstrap). -export([init/2]). --include_lib("rabbitmq_management_agent/include/rabbit_mgmt_records.hrl"). --include_lib("amqp_client/include/amqp_client.hrl"). - %%-------------------------------------------------------------------- init(Req0, State) -> - bootstrap_oauth(rabbit_mgmt_headers:set_no_cache_headers( - rabbit_mgmt_headers:set_common_permission_headers(Req0, ?MODULE), ?MODULE), State). + bootstrap_oauth(rabbit_mgmt_headers:set_no_cache_headers( + rabbit_mgmt_headers:set_common_permission_headers(Req0, ?MODULE), ?MODULE), State). bootstrap_oauth(Req0, State) -> - JSContent = oauth_initialize_if_required() ++ set_token_auth(Req0), - {ok, cowboy_req:reply(200, #{<<"content-type">> => <<"text/javascript; charset=utf-8">>}, JSContent, Req0), State}. - -oauth_initialize_if_required() -> - ["function oauth_initialize_if_required() { return oauth_initialize(" , - rabbit_json:encode(rabbit_mgmt_format:format_nulls(rabbit_mgmt_wm_auth:authSettings())) , ") }" ]. - -set_token_auth(Req0) -> - case application:get_env(rabbitmq_management, oauth_enabled, false) of - true -> - case cowboy_req:parse_header(<<"authorization">>, Req0) of - {bearer, Token} -> ["set_token_auth('", Token, "');"]; - _ -> [] - end; - false -> [] - end. + AuthSettings = rabbit_mgmt_wm_auth:authSettings(), + Dependencies = oauth_dependencies(), + JSContent = import_dependencies(Dependencies) ++ + set_oauth_settings(AuthSettings) ++ + set_token_auth(AuthSettings, Req0) ++ + export_dependencies(Dependencies), + {ok, cowboy_req:reply(200, #{<<"content-type">> => <<"text/javascript; charset=utf-8">>}, + JSContent, Req0), State}. + +set_oauth_settings(AuthSettings) -> + JsonAuthSettings = rabbit_json:encode(rabbit_mgmt_format:format_nulls(AuthSettings)), + ["set_oauth_settings(", JsonAuthSettings, ");"]. + +set_token_auth(AuthSettings, Req0) -> + case proplists:get_value(oauth_enabled, AuthSettings, false) of + true -> + case cowboy_req:parse_header(<<"authorization">>, Req0) of + {bearer, Token} -> ["set_token_auth('", Token, "');"]; + _ -> [] + end; + false -> + [] + end. + +import_dependencies(Dependencies) -> + ["import {", string:join(Dependencies, ","), "} from './helper.js';"]. + +oauth_dependencies() -> + ["oauth_initialize_if_required", + "hasAnyResourceServerReady", + "oauth_initialize", "oauth_initiate", + "oauth_initiateLogin", + "oauth_initiateLogout", + "oauth_completeLogin", + "oauth_completeLogout", + "set_oauth_settings"]. + +export_dependencies(Dependencies) -> + [ io_lib:format("window.~s = ~s;", [Dep, Dep]) || Dep <- Dependencies ]. diff --git a/deps/rabbitmq_management/src/rabbit_mgmt_reset_handler.erl b/deps/rabbitmq_management/src/rabbit_mgmt_reset_handler.erl index 9d45fddb79d7..6afd99fad1d3 100644 --- a/deps/rabbitmq_management/src/rabbit_mgmt_reset_handler.erl +++ b/deps/rabbitmq_management/src/rabbit_mgmt_reset_handler.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% %% When management extensions are enabled and/or disabled at runtime, the diff --git a/deps/rabbitmq_management/src/rabbit_mgmt_stats.erl b/deps/rabbitmq_management/src/rabbit_mgmt_stats.erl index 3d839f9ef319..2990c7238de0 100644 --- a/deps/rabbitmq_management/src/rabbit_mgmt_stats.erl +++ b/deps/rabbitmq_management/src/rabbit_mgmt_stats.erl @@ -2,14 +2,12 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2010-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_mgmt_stats). -include_lib("rabbitmq_management_agent/include/rabbit_mgmt_records.hrl"). --include_lib("rabbitmq_management_agent/include/rabbit_mgmt_metrics.hrl"). - -export([format_range/6]). -define(MICRO_TO_MILLI, 1000). diff --git a/deps/rabbitmq_management/src/rabbit_mgmt_sup.erl b/deps/rabbitmq_management/src/rabbit_mgmt_sup.erl index 4a23c58ebe39..dbed1b18c434 100644 --- a/deps/rabbitmq_management/src/rabbit_mgmt_sup.erl +++ b/deps/rabbitmq_management/src/rabbit_mgmt_sup.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2011-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_mgmt_sup). @@ -13,7 +13,6 @@ -export([start_link/0]). -export([setup_wm_logging/0]). --include_lib("rabbitmq_management_agent/include/rabbit_mgmt_metrics.hrl"). -include_lib("rabbit_common/include/rabbit.hrl"). -include_lib("rabbit_common/include/rabbit_core_metrics.hrl"). diff --git a/deps/rabbitmq_management/src/rabbit_mgmt_sup_sup.erl b/deps/rabbitmq_management/src/rabbit_mgmt_sup_sup.erl index cfda761e4b05..b6c2ec96cbc6 100644 --- a/deps/rabbitmq_management/src/rabbit_mgmt_sup_sup.erl +++ b/deps/rabbitmq_management/src/rabbit_mgmt_sup_sup.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_mgmt_sup_sup). diff --git a/deps/rabbitmq_management/src/rabbit_mgmt_util.erl b/deps/rabbitmq_management/src/rabbit_mgmt_util.erl index 9ae365aff27d..99a8436e16ea 100644 --- a/deps/rabbitmq_management/src/rabbit_mgmt_util.erl +++ b/deps/rabbitmq_management/src/rabbit_mgmt_util.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_mgmt_util). @@ -18,7 +18,8 @@ is_authorized_vhost_visible_for_monitoring/2, is_authorized_global_parameters/2]). -export([user/1]). --export([bad_request/3, service_unavailable/3, bad_request_exception/4, internal_server_error/4, +-export([bad_request/3, service_unavailable/3, bad_request_exception/4, + internal_server_error/3, internal_server_error/4, id/2, parse_bool/1, parse_int/1, redirect_to_home/3]). -export([with_decode/4, not_found/3]). -export([with_channel/4, with_channel/5]). @@ -272,13 +273,23 @@ get_value_param(Name, ReqData) -> Bin -> binary_to_list(Bin) end. +get_sorts_param(ReqData, Def) -> + case get_value_param(<<"sort">>, ReqData) of + undefined -> + Def; + [] -> + Def; + S -> + [S] + end. + reply_list(Facts, DefaultSorts, ReqData, Context, Pagination) -> SortList = - sort_list_and_paginate( - extract_columns_list(Facts, ReqData), - DefaultSorts, - get_value_param(<<"sort">>, ReqData), - get_sort_reverse(ReqData), Pagination), + sort_list_and_paginate( + extract_columns_list(Facts, ReqData), + DefaultSorts, + get_sorts_param(ReqData, undefined), + get_sort_reverse(ReqData), Pagination), reply(SortList, ReqData, Context). @@ -320,7 +331,9 @@ reply_list_or_paginate(Facts, ReqData, Context) -> merge_sorts(DefaultSorts, Extra) -> case Extra of undefined -> DefaultSorts; - Extra -> [Extra | DefaultSorts] + Extra -> + %% it is possible that the extra sorts have an overlap with default + lists:uniq(Extra ++ DefaultSorts) end. %% Resource augmentation. Works out the most optimal configuration of the operations: @@ -352,9 +365,9 @@ augment_resources0(Resources, DefaultSort, BasicColumns, Pagination, ReqData, SortFun = fun (AugCtx) -> sort(DefaultSort, AugCtx) end, AugFun = fun (AugCtx) -> augment(AugmentFun, AugCtx) end, PageFun = fun page/1, - Pagination = pagination_params(ReqData), - Sort = def(get_value_param(<<"sort">>, ReqData), DefaultSort), - Columns = def(columns(ReqData), all), + %% Sort needs to be a list of, erm, strings which are lists + Sort = get_sorts_param(ReqData, DefaultSort), + Columns = columns(ReqData), ColumnsAsStrings = columns_as_strings(Columns), Pipeline = case {Pagination =/= undefined, @@ -368,7 +381,10 @@ augment_resources0(Resources, DefaultSort, BasicColumns, Pagination, ReqData, {true, basic, basic} -> [SortFun, PageFun]; {true, extended, _} -> - % pagination with extended sort columns - SLOW + Path = cowboy_req:path(ReqData), + rabbit_log:debug("HTTP API: ~s slow query mode requested - extended sort on ~0p", + [Path, Sort]), + % pagination with extended sort columns - SLOW! [AugFun, SortFun, PageFun]; {true, basic, extended} -> % pagination with extended columns and sorting on basic @@ -524,7 +540,7 @@ pagination_params(ReqData) -> [PageNum, PageSize])}) end. --spec maybe_reverse([any()], string() | true | false) -> [any()]. +-spec maybe_reverse([any()], string() | boolean()) -> [any()]. maybe_reverse([], _) -> []; maybe_reverse(RangeList, true) when is_list(RangeList) -> @@ -659,6 +675,9 @@ not_found(Reason, ReqData, Context) -> method_not_allowed(Reason, ReqData, Context) -> halt_response(405, method_not_allowed, Reason, ReqData, Context). +internal_server_error(Reason, ReqData, Context) -> + internal_server_error(internal_server_error, Reason, ReqData, Context). + internal_server_error(Error, Reason, ReqData, Context) -> rabbit_log:error("~ts~n~ts", [Error, Reason]), halt_response(500, Error, Reason, ReqData, Context). @@ -686,15 +705,27 @@ id(Key, ReqData) -> read_complete_body(Req) -> read_complete_body(Req, <<"">>). -read_complete_body(Req0, Acc) -> - case cowboy_req:read_body(Req0) of - {ok, Data, Req} -> {ok, <>, Req}; - {more, Data, Req} -> read_complete_body(Req, <>) +read_complete_body(Req, Acc) -> + BodySizeLimit = application:get_env(rabbitmq_management, max_http_body_size, ?MANAGEMENT_DEFAULT_HTTP_MAX_BODY_SIZE), + read_complete_body(Req, Acc, BodySizeLimit). +read_complete_body(Req0, Acc, BodySizeLimit) -> + case bit_size(Acc) > BodySizeLimit of + true -> + {error, "Exceeded HTTP request body size limit"}; + false -> + case cowboy_req:read_body(Req0) of + {ok, Data, Req} -> {ok, <>, Req}; + {more, Data, Req} -> read_complete_body(Req, <>) + end end. with_decode(Keys, ReqData, Context, Fun) -> - {ok, Body, ReqData1} = read_complete_body(ReqData), - with_decode(Keys, Body, ReqData1, Context, Fun). + case read_complete_body(ReqData) of + {error, Reason} -> + bad_request(Reason, ReqData, Context); + {ok, Body, ReqData1} -> + with_decode(Keys, Body, ReqData1, Context, Fun) + end. with_decode(Keys, Body, ReqData, Context, Fun) -> case decode(Keys, Body) of @@ -784,7 +815,8 @@ direct_request(MethodName, Transformers, Extra, ErrorMsg, ReqData, rabbit_log:warning(ErrorMsg, [Explanation]), bad_request(list_to_binary(Explanation), ReqData1, Context); {badrpc, Reason} -> - rabbit_log:warning(ErrorMsg, [Reason]), + Msg = io_lib:format("~tp", [Reason]), + rabbit_log:warning(ErrorMsg, [Msg]), bad_request( list_to_binary( io_lib:format("Request to node ~ts failed with ~tp", @@ -1096,9 +1128,6 @@ int(Name, ReqData) -> end end. -def(undefined, Def) -> Def; -def(V, _) -> V. - -spec qs_val(binary(), cowboy_req:req()) -> any() | undefined. qs_val(Name, ReqData) -> Qs = cowboy_req:parse_qs(ReqData), diff --git a/deps/rabbitmq_management/src/rabbit_mgmt_wm_aliveness_test.erl b/deps/rabbitmq_management/src/rabbit_mgmt_wm_aliveness_test.erl index 04a691fc6ae1..e0efa6f1b08e 100644 --- a/deps/rabbitmq_management/src/rabbit_mgmt_wm_aliveness_test.erl +++ b/deps/rabbitmq_management/src/rabbit_mgmt_wm_aliveness_test.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_mgmt_wm_aliveness_test). diff --git a/deps/rabbitmq_management/src/rabbit_mgmt_wm_auth.erl b/deps/rabbitmq_management/src/rabbit_mgmt_wm_auth.erl index 93bf1b98d583..cc3f0b3f486f 100644 --- a/deps/rabbitmq_management/src/rabbit_mgmt_wm_auth.erl +++ b/deps/rabbitmq_management/src/rabbit_mgmt_wm_auth.erl @@ -2,17 +2,17 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_mgmt_wm_auth). -export([init/2, to_json/2, content_types_provided/2, is_authorized/2]). -export([variances/2]). --export([authSettings/0]). +-export([authSettings/0]). %% for testing only -include_lib("rabbitmq_management_agent/include/rabbit_mgmt_records.hrl"). --include_lib("rabbit_common/include/rabbit.hrl"). +-include_lib("oauth2_client/include/oauth2_client.hrl"). %%-------------------------------------------------------------------- @@ -25,47 +25,124 @@ variances(Req, Context) -> content_types_provided(ReqData, Context) -> {rabbit_mgmt_util:responder_map(to_json), ReqData, Context}. +merge_oauth_provider_info(OAuthResourceServer, MgtResourceServer, ManagementProps) -> + OAuthProviderResult = case proplists:get_value(oauth_provider_id, OAuthResourceServer) of + undefined -> oauth2_client:get_oauth_provider([issuer]); + OauthProviderId -> oauth2_client:get_oauth_provider(OauthProviderId, [issuer]) + end, + OAuthProviderInfo0 = case OAuthProviderResult of + {ok, OAuthProvider} -> oauth_provider_to_map(OAuthProvider); + {error, _} -> #{} + end, + OAuthProviderInfo1 = maps:merge(OAuthProviderInfo0, + case proplists:get_value(oauth_provider_url, ManagementProps) of + undefined -> #{}; + V1 -> #{oauth_provider_url => V1} + end), + maps:merge(OAuthProviderInfo1, proplists:to_map(MgtResourceServer)). + +oauth_provider_to_map(OAuthProvider) -> + % only include issuer and end_session_endpoint for now. The other endpoints are resolved by oidc-client library + Map0 = #{ oauth_provider_url => OAuthProvider#oauth_provider.issuer }, + case OAuthProvider#oauth_provider.end_session_endpoint of + undefined -> Map0; + V -> maps:put(end_session_endpoint, V, Map0) + end. + +skip_unknown_mgt_resource_servers(MgtOauthResources, OAuth2Resources) -> + maps:filter(fun(Key, _Value) -> maps:is_key(Key, OAuth2Resources) end, MgtOauthResources). +skip_disabled_mgt_resource_servers(MgtOauthResources) -> + maps:filter(fun(_Key, Value) -> not proplists:get_value(disabled, Value, false) end, MgtOauthResources). + +extract_oauth2_and_mgt_resources(OAuth2BackendProps, ManagementProps) -> + OAuth2Resources = getAllDeclaredOauth2Resources(OAuth2BackendProps), + MgtResources0 = skip_unknown_mgt_resource_servers(proplists:get_value(oauth_resource_servers, + ManagementProps, #{}), OAuth2Resources), + MgtResources1 = maps:merge(MgtResources0, maps:filtermap(fun(K,_V) -> + case maps:is_key(K, MgtResources0) of + true -> false; + false -> {true, [{id, K}]} + end end, OAuth2Resources)), + MgtResources = maps:map( + fun(K,V) -> merge_oauth_provider_info(maps:get(K, OAuth2Resources, #{}), V, ManagementProps) end, + skip_disabled_mgt_resource_servers(MgtResources1)), + case maps:size(MgtResources) of + 0 -> {}; + _ -> {MgtResources} + end. + +getAllDeclaredOauth2Resources(OAuth2BackendProps) -> + OAuth2Resources = proplists:get_value(resource_servers, OAuth2BackendProps, #{}), + case proplists:get_value(resource_server_id, OAuth2BackendProps) of + undefined -> OAuth2Resources; + Id -> maps:put(Id, [{id, Id}], OAuth2Resources) + end. + authSettings() -> - EnableOAUTH = application:get_env(rabbitmq_management, oauth_enabled, false), + ManagementProps = application:get_all_env(rabbitmq_management), + OAuth2BackendProps = application:get_all_env(rabbitmq_auth_backend_oauth2), + EnableOAUTH = proplists:get_value(oauth_enabled, ManagementProps, false), case EnableOAUTH of + false -> [{oauth_enabled, false}]; true -> - OAuthInitiatedLogonType = application:get_env(rabbitmq_management, oauth_initiated_logon_type, sp_initiated), - OAuthProviderUrl = application:get_env(rabbitmq_management, oauth_provider_url, ""), - case OAuthInitiatedLogonType of - sp_initiated -> - OAuthClientId = application:get_env(rabbitmq_management, oauth_client_id, ""), - OAuthClientSecret = application:get_env(rabbitmq_management, oauth_client_secret, ""), - OAuthMetadataUrl = application:get_env(rabbitmq_management, oauth_metadata_url, ""), - OAuthScopes = application:get_env(rabbitmq_management, oauth_scopes, ""), - OAuthResourceId = application:get_env(rabbitmq_auth_backend_oauth2, resource_server_id, ""), - case is_invalid([OAuthResourceId]) of - true -> - [{oauth_enabled, false}]; - false -> - case is_invalid([OAuthClientId, OAuthProviderUrl]) of - true -> - [{oauth_enabled, false}, {oauth_client_id, <<>>}, {oauth_provider_url, <<>>}]; - false -> - append_oauth_optional_secret([ - {oauth_enabled, true}, - {oauth_client_id, rabbit_data_coercion:to_binary(OAuthClientId)}, - {oauth_provider_url, rabbit_data_coercion:to_binary(OAuthProviderUrl)}, - {oauth_scopes, rabbit_data_coercion:to_binary(OAuthScopes)}, - {oauth_metadata_url, rabbit_data_coercion:to_binary(OAuthMetadataUrl)}, - {oauth_resource_id, rabbit_data_coercion:to_binary(OAuthResourceId)} - ], OAuthClientSecret) - end - end; - idp_initiated -> - [{oauth_enabled, true}, - {oauth_initiated_logon_type, rabbit_data_coercion:to_binary(OAuthInitiatedLogonType)}, - {oauth_provider_url, rabbit_data_coercion:to_binary(OAuthProviderUrl)} - ] - end; - false -> - [{oauth_enabled, false}] + case extract_oauth2_and_mgt_resources(OAuth2BackendProps, ManagementProps) of + {MgtResources} -> produce_auth_settings(MgtResources, ManagementProps); + {} -> [{oauth_enabled, false}] + end end. +skip_mgt_resource_servers_without_oauth_client_id_with_sp_initiated_logon(MgtResourceServers, ManagementProps) -> + DefaultOauthInitiatedLogonType = proplists:get_value(oauth_initiated_logon_type, ManagementProps, sp_initiated), + maps:filter(fun(_K,ResourceServer) -> + SpInitiated = case maps:get(oauth_initiated_logon_type, ResourceServer, DefaultOauthInitiatedLogonType) of + sp_initiated -> true; + _ -> false + end, + not SpInitiated or + not is_invalid([maps:get(oauth_client_id, ResourceServer, undefined)]) end, MgtResourceServers). + + +filter_mgt_resource_servers_without_oauth_client_id_for_sp_initiated(MgtResourceServers, ManagementProps) -> + case is_invalid([proplists:get_value(oauth_client_id, ManagementProps)]) of + true -> skip_mgt_resource_servers_without_oauth_client_id_with_sp_initiated_logon(MgtResourceServers, ManagementProps); + false -> MgtResourceServers + end. + +filter_mgt_resource_servers_without_oauth_provider_url(MgtResourceServers) -> + maps:filter(fun(_K1,V1) -> maps:is_key(oauth_provider_url, V1) end, MgtResourceServers). + +produce_auth_settings(MgtResourceServers, ManagementProps) -> + ConvertValuesToBinary = fun(_K,V) -> [ {K1, to_binary(V1)} || {K1,V1} <- maps:to_list(V) ] end, + FilteredMgtResourceServers = filter_mgt_resource_servers_without_oauth_provider_url( + filter_mgt_resource_servers_without_oauth_client_id_for_sp_initiated(MgtResourceServers, ManagementProps)), + + case maps:size(FilteredMgtResourceServers) of + 0 -> [{oauth_enabled, false}]; + _ -> + filter_empty_properties([ + {oauth_enabled, true}, + {oauth_resource_servers, maps:map(ConvertValuesToBinary, FilteredMgtResourceServers)}, + to_tuple(oauth_disable_basic_auth, ManagementProps, true), + to_tuple(oauth_client_id, ManagementProps), + to_tuple(oauth_client_secret, ManagementProps), + to_tuple(oauth_scopes, ManagementProps), + case proplists:get_value(oauth_initiated_logon_type, ManagementProps, sp_initiated) of + sp_initiated -> {}; + idp_initiated -> {oauth_initiated_logon_type, <<"idp_initiated">>} + end + ]) + end. + +filter_empty_properties(ListOfProperties) -> + lists:filter(fun(Prop) -> + case Prop of + {} -> false; + _ -> true + end + end, ListOfProperties). + +to_binary(Value) -> rabbit_data_coercion:to_binary(Value). + to_json(ReqData, Context) -> rabbit_mgmt_util:reply(authSettings(), ReqData, Context). @@ -73,9 +150,17 @@ is_authorized(ReqData, Context) -> {true, ReqData, Context}. is_invalid(List) -> - lists:any(fun(V) -> V == "" end, List). + lists:any(fun(V) -> case V of + "" -> true; + undefined -> true; + {error, _} -> true; + _ -> false + end end, List). -append_oauth_optional_secret(List, OAuthClientSecret) when OAuthClientSecret == "" -> - List; -append_oauth_optional_secret(List, OAuthClientSecret) -> - lists:append(List, [{oauth_client_secret, rabbit_data_coercion:to_binary(OAuthClientSecret)}]). +to_tuple(Key, Proplist) -> + case proplists:is_defined(Key, Proplist) of + true -> {Key, rabbit_data_coercion:to_binary(proplists:get_value(Key, Proplist))}; + false -> {} + end. +to_tuple(Key, Proplist, DefaultValue) -> + {Key, proplists:get_value(Key, Proplist, DefaultValue)}. diff --git a/deps/rabbitmq_management/src/rabbit_mgmt_wm_auth_attempts.erl b/deps/rabbitmq_management/src/rabbit_mgmt_wm_auth_attempts.erl index ad2b66651b1d..3f062954ac33 100644 --- a/deps/rabbitmq_management/src/rabbit_mgmt_wm_auth_attempts.erl +++ b/deps/rabbitmq_management/src/rabbit_mgmt_wm_auth_attempts.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_mgmt_wm_auth_attempts). @@ -14,8 +14,6 @@ -import(rabbit_misc, [pget/2]). -include_lib("rabbitmq_management_agent/include/rabbit_mgmt_records.hrl"). --include_lib("rabbit_common/include/rabbit.hrl"). - %%-------------------------------------------------------------------- init(Req, [Mode]) -> {cowboy_rest, rabbit_mgmt_headers:set_common_permission_headers(Req, ?MODULE), diff --git a/deps/rabbitmq_management/src/rabbit_mgmt_wm_binding.erl b/deps/rabbitmq_management/src/rabbit_mgmt_wm_binding.erl index 104a000767b6..019e4dbf7a0b 100644 --- a/deps/rabbitmq_management/src/rabbit_mgmt_wm_binding.erl +++ b/deps/rabbitmq_management/src/rabbit_mgmt_wm_binding.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_mgmt_wm_binding). diff --git a/deps/rabbitmq_management/src/rabbit_mgmt_wm_bindings.erl b/deps/rabbitmq_management/src/rabbit_mgmt_wm_bindings.erl index 9f46ad07d51d..299eebb90a0c 100644 --- a/deps/rabbitmq_management/src/rabbit_mgmt_wm_bindings.erl +++ b/deps/rabbitmq_management/src/rabbit_mgmt_wm_bindings.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_mgmt_wm_bindings). diff --git a/deps/rabbitmq_management/src/rabbit_mgmt_wm_channel.erl b/deps/rabbitmq_management/src/rabbit_mgmt_wm_channel.erl index 97b5317af08e..18cda923484e 100644 --- a/deps/rabbitmq_management/src/rabbit_mgmt_wm_channel.erl +++ b/deps/rabbitmq_management/src/rabbit_mgmt_wm_channel.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_mgmt_wm_channel). @@ -12,8 +12,6 @@ -export([variances/2]). -include_lib("rabbitmq_management_agent/include/rabbit_mgmt_records.hrl"). --include_lib("rabbit_common/include/rabbit.hrl"). - %%-------------------------------------------------------------------- init(Req, _State) -> diff --git a/deps/rabbitmq_management/src/rabbit_mgmt_wm_channels.erl b/deps/rabbitmq_management/src/rabbit_mgmt_wm_channels.erl index cb30ac819ece..3ca0c7acad2b 100644 --- a/deps/rabbitmq_management/src/rabbit_mgmt_wm_channels.erl +++ b/deps/rabbitmq_management/src/rabbit_mgmt_wm_channels.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_mgmt_wm_channels). @@ -14,8 +14,6 @@ -import(rabbit_misc, [pget/2]). -include_lib("rabbitmq_management_agent/include/rabbit_mgmt_records.hrl"). --include_lib("rabbit_common/include/rabbit.hrl"). - %%-------------------------------------------------------------------- init(Req, _State) -> diff --git a/deps/rabbitmq_management/src/rabbit_mgmt_wm_channels_vhost.erl b/deps/rabbitmq_management/src/rabbit_mgmt_wm_channels_vhost.erl index 7cf2f7758f93..11941095d387 100644 --- a/deps/rabbitmq_management/src/rabbit_mgmt_wm_channels_vhost.erl +++ b/deps/rabbitmq_management/src/rabbit_mgmt_wm_channels_vhost.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2010-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_mgmt_wm_channels_vhost). @@ -16,8 +16,6 @@ -import(rabbit_misc, [pget/2]). -include_lib("rabbitmq_management_agent/include/rabbit_mgmt_records.hrl"). --include_lib("rabbit_common/include/rabbit.hrl"). - %%-------------------------------------------------------------------- init(Req, _State) -> diff --git a/deps/rabbitmq_management/src/rabbit_mgmt_wm_cluster_name.erl b/deps/rabbitmq_management/src/rabbit_mgmt_wm_cluster_name.erl index 4dc68945aa86..c886866c80a4 100644 --- a/deps/rabbitmq_management/src/rabbit_mgmt_wm_cluster_name.erl +++ b/deps/rabbitmq_management/src/rabbit_mgmt_wm_cluster_name.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_mgmt_wm_cluster_name). diff --git a/deps/rabbitmq_management/src/rabbit_mgmt_wm_connection.erl b/deps/rabbitmq_management/src/rabbit_mgmt_wm_connection.erl index 11d8fc02018a..07728ac438c5 100644 --- a/deps/rabbitmq_management/src/rabbit_mgmt_wm_connection.erl +++ b/deps/rabbitmq_management/src/rabbit_mgmt_wm_connection.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_mgmt_wm_connection). diff --git a/deps/rabbitmq_management/src/rabbit_mgmt_wm_connection_channels.erl b/deps/rabbitmq_management/src/rabbit_mgmt_wm_connection_channels.erl index f9d84c44bb7a..d655d6d2964a 100644 --- a/deps/rabbitmq_management/src/rabbit_mgmt_wm_connection_channels.erl +++ b/deps/rabbitmq_management/src/rabbit_mgmt_wm_connection_channels.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_mgmt_wm_connection_channels). @@ -12,8 +12,6 @@ -export([variances/2]). -include_lib("rabbitmq_management_agent/include/rabbit_mgmt_records.hrl"). --include_lib("rabbit_common/include/rabbit.hrl"). - %%-------------------------------------------------------------------- init(Req, _State) -> diff --git a/deps/rabbitmq_management/src/rabbit_mgmt_wm_connection_user_name.erl b/deps/rabbitmq_management/src/rabbit_mgmt_wm_connection_user_name.erl index 12fecdc4ffb6..9c02de5b62d2 100644 --- a/deps/rabbitmq_management/src/rabbit_mgmt_wm_connection_user_name.erl +++ b/deps/rabbitmq_management/src/rabbit_mgmt_wm_connection_user_name.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_mgmt_wm_connection_user_name). @@ -85,7 +85,8 @@ force_close_connection(ReqData, Conn, Pid) -> network -> rabbit_networking:close_connection(Pid, Reason); _ -> - % best effort, this will work for connections to the stream plugin - gen_server:cast(Pid, {shutdown, Reason}) - end, - ok. + %% Best effort will work for following plugins: + %% rabbitmq_stream, rabbitmq_mqtt, rabbitmq_web_mqtt + _ = Pid ! {shutdown, Reason}, + ok + end. diff --git a/deps/rabbitmq_management/src/rabbit_mgmt_wm_connections.erl b/deps/rabbitmq_management/src/rabbit_mgmt_wm_connections.erl index 0b3764f35342..caeb7ab42af3 100644 --- a/deps/rabbitmq_management/src/rabbit_mgmt_wm_connections.erl +++ b/deps/rabbitmq_management/src/rabbit_mgmt_wm_connections.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_mgmt_wm_connections). diff --git a/deps/rabbitmq_management/src/rabbit_mgmt_wm_connections_vhost.erl b/deps/rabbitmq_management/src/rabbit_mgmt_wm_connections_vhost.erl index 9aff6b318c82..b847bc520587 100644 --- a/deps/rabbitmq_management/src/rabbit_mgmt_wm_connections_vhost.erl +++ b/deps/rabbitmq_management/src/rabbit_mgmt_wm_connections_vhost.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2010-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_mgmt_wm_connections_vhost). @@ -16,8 +16,6 @@ -import(rabbit_misc, [pget/2]). -include_lib("rabbitmq_management_agent/include/rabbit_mgmt_records.hrl"). --include_lib("rabbit_common/include/rabbit.hrl"). - %%-------------------------------------------------------------------- init(Req, _State) -> diff --git a/deps/rabbitmq_management/src/rabbit_mgmt_wm_consumers.erl b/deps/rabbitmq_management/src/rabbit_mgmt_wm_consumers.erl index 50af76a0327b..ad375e50c3e1 100644 --- a/deps/rabbitmq_management/src/rabbit_mgmt_wm_consumers.erl +++ b/deps/rabbitmq_management/src/rabbit_mgmt_wm_consumers.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. -module(rabbit_mgmt_wm_consumers). diff --git a/deps/rabbitmq_management/src/rabbit_mgmt_wm_definitions.erl b/deps/rabbitmq_management/src/rabbit_mgmt_wm_definitions.erl index b6905ab7e74b..335081c7ad55 100644 --- a/deps/rabbitmq_management/src/rabbit_mgmt_wm_definitions.erl +++ b/deps/rabbitmq_management/src/rabbit_mgmt_wm_definitions.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_mgmt_wm_definitions). @@ -63,7 +63,7 @@ all_definitions(ReqData, Context) -> {rabbitmq_version, rabbit_data_coercion:to_binary(Vsn)}, {product_name, rabbit_data_coercion:to_binary(ProductName)}, {product_version, rabbit_data_coercion:to_binary(ProductVersion)}] ++ - filter( + retain_whitelisted( [{users, rabbit_mgmt_wm_users:users(all)}, {vhosts, rabbit_mgmt_wm_vhosts:basic()}, {permissions, rabbit_mgmt_wm_permissions:permissions()}, @@ -84,26 +84,34 @@ all_definitions(ReqData, Context) -> Context). accept_json(ReqData0, Context) -> - {ok, Body, ReqData} = rabbit_mgmt_util:read_complete_body(ReqData0), - accept(Body, ReqData, Context). + case rabbit_mgmt_util:read_complete_body(ReqData0) of + {error, Reason} -> + BodySizeLimit = application:get_env(rabbitmq_management, max_http_body_size, ?MANAGEMENT_DEFAULT_HTTP_MAX_BODY_SIZE), + _ = rabbit_log:warning("HTTP API: uploaded definition file exceeded the maximum request body limit of ~p bytes. " + "Use the 'management.http.max_body_size' key in rabbitmq.conf to increase the limit if necessary", [BodySizeLimit]), + rabbit_mgmt_util:bad_request(Reason, ReqData0, Context); + {ok, Body, ReqData} -> + accept(Body, ReqData, Context) + end. vhost_definitions(ReqData, VHost, Context) -> - %% rabbit_mgmt_wm_<>:basic/1 filters by VHost if it is available + %% rabbit_mgmt_wm_<>:basic/1 filters by VHost if it is available. + %% TODO: should we stop stripping virtual host? Such files cannot be imported on boot, for example. Xs = [strip_vhost(X) || X <- rabbit_mgmt_wm_exchanges:basic(ReqData), export_exchange(X)], VQs = [Q || Q <- rabbit_mgmt_wm_queues:basic(ReqData), export_queue(Q)], Qs = [strip_vhost(Q) || Q <- VQs], QNames = [{pget(name, Q), pget(vhost, Q)} || Q <- VQs], + %% TODO: should we stop stripping virtual host? Such files cannot be imported on boot, for example. Bs = [strip_vhost(B) || B <- rabbit_mgmt_wm_bindings:basic(ReqData), export_binding(B, QNames)], {ok, Vsn} = application:get_key(rabbit, vsn), Parameters = [strip_vhost( - rabbit_mgmt_format:parameter( - rabbit_mgmt_wm_parameters:fix_shovel_publish_properties(P))) + rabbit_mgmt_format:parameter(P)) || P <- rabbit_runtime_parameters:list(VHost)], rabbit_mgmt_util:reply( [{rabbit_version, rabbit_data_coercion:to_binary(Vsn)}] ++ - filter( + retain_whitelisted( [{parameters, Parameters}, {policies, [strip_vhost(P) || P <- rabbit_mgmt_wm_policies:basic(ReqData)]}, {queues, Qs}, @@ -245,7 +253,7 @@ export_name(_Name) -> true. rw_state() -> [{users, [name, password_hash, hashing_algorithm, tags, limits]}, - {vhosts, [name]}, + {vhosts, [name, description, tags, default_queue_type, metadata]}, {permissions, [user, vhost, configure, write, read]}, {topic_permissions, [user, vhost, exchange, write, read]}, {parameters, [vhost, component, name, value]}, @@ -257,14 +265,14 @@ rw_state() -> {bindings, [source, vhost, destination, destination_type, routing_key, arguments]}]. -filter(Items) -> - [filter_items(N, V, proplists:get_value(N, rw_state())) || {N, V} <- Items]. +retain_whitelisted(Items) -> + [retain_whitelisted_items(N, V, proplists:get_value(N, rw_state())) || {N, V} <- Items]. -filter_items(Name, List, Allowed) -> - {Name, [filter_item(I, Allowed) || I <- List]}. +retain_whitelisted_items(Name, List, Allowed) -> + {Name, [only_whitelisted_for_item(I, Allowed) || I <- List]}. -filter_item(Item, Allowed) -> - [{K, Fact} || {K, Fact} <- Item, lists:member(K, Allowed)]. +only_whitelisted_for_item(Item, Allowed) -> + [{K, Fact} || {K, Fact} <- Item, lists:member(K, Allowed), Fact =/= undefined]. strip_vhost(Item) -> lists:keydelete(vhost, 1, Item). diff --git a/deps/rabbitmq_management/src/rabbit_mgmt_wm_deprecated_features.erl b/deps/rabbitmq_management/src/rabbit_mgmt_wm_deprecated_features.erl new file mode 100644 index 000000000000..23bde21c6b2d --- /dev/null +++ b/deps/rabbitmq_management/src/rabbit_mgmt_wm_deprecated_features.erl @@ -0,0 +1,55 @@ +%% This Source Code Form is subject to the terms of the Mozilla Public +%% License, v. 2.0. If a copy of the MPL was not distributed with this +%% file, You can obtain one at https://mozilla.org/MPL/2.0/. +%% +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. +%% + +-module(rabbit_mgmt_wm_deprecated_features). + +-export([init/2, to_json/2, + content_types_provided/2, + is_authorized/2, allowed_methods/2]). +-export([variances/2]). +-export([deprecated_features/1]). + +-include_lib("rabbitmq_management_agent/include/rabbit_mgmt_records.hrl"). +-ifdef(TEST). +-export([feature_is_used/1, + feature_is_unused/1]). +-endif. +%%-------------------------------------------------------------------- + +init(Req, [Mode]) -> + {cowboy_rest, + rabbit_mgmt_headers:set_common_permission_headers(Req, ?MODULE), + {Mode, #context{}}}. + +variances(Req, Context) -> + {[<<"accept-encoding">>, <<"origin">>], Req, Context}. + +content_types_provided(ReqData, Context) -> + {rabbit_mgmt_util:responder_map(to_json), ReqData, Context}. + +allowed_methods(ReqData, Context) -> + {[<<"HEAD">>, <<"GET">>, <<"OPTIONS">>], ReqData, Context}. + +to_json(ReqData, {Mode, Context}) -> + rabbit_mgmt_util:reply_list(deprecated_features(Mode), ReqData, Context). + +is_authorized(ReqData, {Mode, Context}) -> + {Res, Req2, Context2} = rabbit_mgmt_util:is_authorized(ReqData, Context), + {Res, Req2, {Mode, Context2}}. + +%%-------------------------------------------------------------------- + +deprecated_features(Mode) -> + rabbit_depr_ff_extra:cli_info(Mode). + +-ifdef(TEST). +feature_is_used(_Args) -> + true. + +feature_is_unused(_Args) -> + false. +-endif. diff --git a/deps/rabbitmq_management/src/rabbit_mgmt_wm_environment.erl b/deps/rabbitmq_management/src/rabbit_mgmt_wm_environment.erl index 58e2e635eff2..0cc931e971b2 100644 --- a/deps/rabbitmq_management/src/rabbit_mgmt_wm_environment.erl +++ b/deps/rabbitmq_management/src/rabbit_mgmt_wm_environment.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_mgmt_wm_environment). @@ -11,8 +11,6 @@ -export([variances/2]). -include_lib("rabbitmq_management_agent/include/rabbit_mgmt_records.hrl"). --include_lib("rabbit_common/include/rabbit.hrl"). - %%-------------------------------------------------------------------- init(Req, _State) -> diff --git a/deps/rabbitmq_management/src/rabbit_mgmt_wm_exchange.erl b/deps/rabbitmq_management/src/rabbit_mgmt_wm_exchange.erl index faa28899e5ea..d6e4f9bf93d0 100644 --- a/deps/rabbitmq_management/src/rabbit_mgmt_wm_exchange.erl +++ b/deps/rabbitmq_management/src/rabbit_mgmt_wm_exchange.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_mgmt_wm_exchange). @@ -14,8 +14,6 @@ -export([variances/2]). -include_lib("rabbitmq_management_agent/include/rabbit_mgmt_records.hrl"). --include_lib("amqp_client/include/amqp_client.hrl"). - %%-------------------------------------------------------------------- init(Req, _State) -> diff --git a/deps/rabbitmq_management/src/rabbit_mgmt_wm_exchange_publish.erl b/deps/rabbitmq_management/src/rabbit_mgmt_wm_exchange_publish.erl index fcdca19ae5ec..5a2dc27aa360 100644 --- a/deps/rabbitmq_management/src/rabbit_mgmt_wm_exchange_publish.erl +++ b/deps/rabbitmq_management/src/rabbit_mgmt_wm_exchange_publish.erl @@ -2,12 +2,12 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2011-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_mgmt_wm_exchange_publish). --export([init/2, resource_exists/2, is_authorized/2, +-export([init/2, resource_exists/2, allow_missing_post/2, is_authorized/2, allowed_methods/2, content_types_provided/2, accept_content/2, content_types_accepted/2]). -export([variances/2]). @@ -31,10 +31,13 @@ content_types_provided(ReqData, Context) -> resource_exists(ReqData, Context) -> {case rabbit_mgmt_wm_exchange:exchange(ReqData) of - not_found -> false; + not_found -> raise_not_found(ReqData, Context); _ -> true end, ReqData, Context}. +allow_missing_post(ReqData, Context) -> + {false, ReqData, Context}. + content_types_accepted(ReqData, Context) -> {[{'*', accept_content}], ReqData, Context}. @@ -101,6 +104,18 @@ bad({{coordinator_unavailable, _}, _}, ReqData, Context) -> is_authorized(ReqData, Context) -> rabbit_mgmt_util:is_authorized_vhost(ReqData, Context). +raise_not_found(ReqData, Context) -> + ErrorMessage = case rabbit_mgmt_util:vhost(ReqData) of + not_found -> + "vhost_not_found"; + _ -> + "exchange_not_found" + end, + rabbit_mgmt_util:not_found( + rabbit_data_coercion:to_binary(ErrorMessage), + ReqData, + Context). + %%-------------------------------------------------------------------- decode(Payload, <<"string">>) -> Payload; diff --git a/deps/rabbitmq_management/src/rabbit_mgmt_wm_exchanges.erl b/deps/rabbitmq_management/src/rabbit_mgmt_wm_exchanges.erl index 3b1b875b2608..57816670e12b 100644 --- a/deps/rabbitmq_management/src/rabbit_mgmt_wm_exchanges.erl +++ b/deps/rabbitmq_management/src/rabbit_mgmt_wm_exchanges.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_mgmt_wm_exchanges). @@ -12,8 +12,6 @@ -export([variances/2]). -include_lib("rabbitmq_management_agent/include/rabbit_mgmt_records.hrl"). --include_lib("rabbit_common/include/rabbit.hrl"). - -define(DEFAULT_SORT, ["vhost", "name"]). -define(BASIC_COLUMNS, ["vhost", "name", "type", "durable", "auto_delete", diff --git a/deps/rabbitmq_management/src/rabbit_mgmt_wm_extensions.erl b/deps/rabbitmq_management/src/rabbit_mgmt_wm_extensions.erl index ca615a762dd4..15d5b86e2a27 100644 --- a/deps/rabbitmq_management/src/rabbit_mgmt_wm_extensions.erl +++ b/deps/rabbitmq_management/src/rabbit_mgmt_wm_extensions.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2011-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_mgmt_wm_extensions). @@ -11,8 +11,6 @@ -export([variances/2]). -include_lib("rabbitmq_management_agent/include/rabbit_mgmt_records.hrl"). --include_lib("rabbit_common/include/rabbit.hrl"). - %%-------------------------------------------------------------------- init(Req, _State) -> diff --git a/deps/rabbitmq_management/src/rabbit_mgmt_wm_feature_flag_enable.erl b/deps/rabbitmq_management/src/rabbit_mgmt_wm_feature_flag_enable.erl index 60f044d83bf3..b0c0af0e514b 100644 --- a/deps/rabbitmq_management/src/rabbit_mgmt_wm_feature_flag_enable.erl +++ b/deps/rabbitmq_management/src/rabbit_mgmt_wm_feature_flag_enable.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2019-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_mgmt_wm_feature_flag_enable). @@ -13,8 +13,6 @@ -export([variances/2]). -include_lib("rabbitmq_management_agent/include/rabbit_mgmt_records.hrl"). --include_lib("rabbit_common/include/rabbit.hrl"). - %%-------------------------------------------------------------------- init(Req, _Args) -> diff --git a/deps/rabbitmq_management/src/rabbit_mgmt_wm_feature_flags.erl b/deps/rabbitmq_management/src/rabbit_mgmt_wm_feature_flags.erl index 84376a082f6f..4fc295da4241 100644 --- a/deps/rabbitmq_management/src/rabbit_mgmt_wm_feature_flags.erl +++ b/deps/rabbitmq_management/src/rabbit_mgmt_wm_feature_flags.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2019-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_mgmt_wm_feature_flags). @@ -14,8 +14,6 @@ -export([feature_flags/0]). -include_lib("rabbitmq_management_agent/include/rabbit_mgmt_records.hrl"). --include_lib("rabbit_common/include/rabbit.hrl"). - %%-------------------------------------------------------------------- init(Req, _Args) -> @@ -36,8 +34,7 @@ to_json(ReqData, Context) -> rabbit_mgmt_util:reply_list(feature_flags(), ReqData, Context). is_authorized(ReqData, Context) -> - {Res, Req2, Context2} = rabbit_mgmt_util:is_authorized_admin(ReqData, Context), - {Res, Req2, Context2}. + rabbit_mgmt_util:is_authorized(ReqData, Context). %%-------------------------------------------------------------------- diff --git a/deps/rabbitmq_management/src/rabbit_mgmt_wm_global_parameter.erl b/deps/rabbitmq_management/src/rabbit_mgmt_wm_global_parameter.erl index c7b464b150cc..fc3252c31685 100644 --- a/deps/rabbitmq_management/src/rabbit_mgmt_wm_global_parameter.erl +++ b/deps/rabbitmq_management/src/rabbit_mgmt_wm_global_parameter.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_mgmt_wm_global_parameter). diff --git a/deps/rabbitmq_management/src/rabbit_mgmt_wm_global_parameters.erl b/deps/rabbitmq_management/src/rabbit_mgmt_wm_global_parameters.erl index fcdc07496b44..ec8f26d5c95a 100644 --- a/deps/rabbitmq_management/src/rabbit_mgmt_wm_global_parameters.erl +++ b/deps/rabbitmq_management/src/rabbit_mgmt_wm_global_parameters.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_mgmt_wm_global_parameters). diff --git a/deps/rabbitmq_management/src/rabbit_mgmt_wm_hash_password.erl b/deps/rabbitmq_management/src/rabbit_mgmt_wm_hash_password.erl index ff7fee225d19..404d7635891a 100644 --- a/deps/rabbitmq_management/src/rabbit_mgmt_wm_hash_password.erl +++ b/deps/rabbitmq_management/src/rabbit_mgmt_wm_hash_password.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_mgmt_wm_hash_password). @@ -11,8 +11,6 @@ -export([variances/2, allowed_methods/2]). -include_lib("rabbitmq_management_agent/include/rabbit_mgmt_records.hrl"). --include_lib("rabbit_common/include/rabbit.hrl"). - %%-------------------------------------------------------------------- init(Req, _State) -> diff --git a/deps/rabbitmq_management/src/rabbit_mgmt_wm_health_check_alarms.erl b/deps/rabbitmq_management/src/rabbit_mgmt_wm_health_check_alarms.erl index 78bc36848547..7a7e6bad227f 100644 --- a/deps/rabbitmq_management/src/rabbit_mgmt_wm_health_check_alarms.erl +++ b/deps/rabbitmq_management/src/rabbit_mgmt_wm_health_check_alarms.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% %% An HTTP API counterpart of 'rabbitmq-diagnostics check_alarms' diff --git a/deps/rabbitmq_management/src/rabbit_mgmt_wm_health_check_certificate_expiration.erl b/deps/rabbitmq_management/src/rabbit_mgmt_wm_health_check_certificate_expiration.erl index 9d1299095d8c..8ccb9e222128 100644 --- a/deps/rabbitmq_management/src/rabbit_mgmt_wm_health_check_certificate_expiration.erl +++ b/deps/rabbitmq_management/src/rabbit_mgmt_wm_health_check_certificate_expiration.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% %% An HTTP API counterpart of 'rabbitmq-diagnostics check_certificate_expiration' diff --git a/deps/rabbitmq_management/src/rabbit_mgmt_wm_health_check_local_alarms.erl b/deps/rabbitmq_management/src/rabbit_mgmt_wm_health_check_local_alarms.erl index a8f5ed36c7a1..098474afd5fe 100644 --- a/deps/rabbitmq_management/src/rabbit_mgmt_wm_health_check_local_alarms.erl +++ b/deps/rabbitmq_management/src/rabbit_mgmt_wm_health_check_local_alarms.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% %% An HTTP API counterpart of 'rabbitmq-dignoastics check_local_alarms' diff --git a/deps/rabbitmq_management/src/rabbit_mgmt_wm_health_check_node_is_mirror_sync_critical.erl b/deps/rabbitmq_management/src/rabbit_mgmt_wm_health_check_node_is_mirror_sync_critical.erl deleted file mode 100644 index eea5c6f8dc17..000000000000 --- a/deps/rabbitmq_management/src/rabbit_mgmt_wm_health_check_node_is_mirror_sync_critical.erl +++ /dev/null @@ -1,54 +0,0 @@ -%% This Source Code Form is subject to the terms of the Mozilla Public -%% License, v. 2.0. If a copy of the MPL was not distributed with this -%% file, You can obtain one at https://mozilla.org/MPL/2.0/. -%% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. -%% - -%% An HTTP API counterpart of 'rabbitmq-diagnostics check_if_node_is_quorum_critical' --module(rabbit_mgmt_wm_health_check_node_is_mirror_sync_critical). - --export([init/2, to_json/2, content_types_provided/2, is_authorized/2]). --export([resource_exists/2]). --export([variances/2]). - --include_lib("rabbitmq_management_agent/include/rabbit_mgmt_records.hrl"). - -%%-------------------------------------------------------------------- - -init(Req, _State) -> - {cowboy_rest, rabbit_mgmt_headers:set_common_permission_headers(Req, ?MODULE), #context{}}. - -variances(Req, Context) -> - {[<<"accept-encoding">>, <<"origin">>], Req, Context}. - -content_types_provided(ReqData, Context) -> - {rabbit_mgmt_util:responder_map(to_json), ReqData, Context}. - -resource_exists(ReqData, Context) -> - {true, ReqData, Context}. - -to_json(ReqData, Context) -> - case rabbit_nodes:is_single_node_cluster() of - true -> - rabbit_mgmt_util:reply([{status, ok}, - {reason, <<"single node cluster">>}], ReqData, Context); - false -> - case rabbit_amqqueue:list_local_mirrored_classic_without_synchronised_mirrors_for_cli() of - [] -> - rabbit_mgmt_util:reply([{status, ok}], ReqData, Context); - Qs when length(Qs) > 0 -> - Msg = <<"There are classic mirrored queues without online synchronised mirrors">>, - failure(Msg, Qs, ReqData, Context) - end - end. - -failure(Message, Qs, ReqData, Context) -> - {Response, ReqData1, Context1} = rabbit_mgmt_util:reply([{status, failed}, - {reason, Message}, - {queues, Qs}], - ReqData, Context), - {stop, cowboy_req:reply(503, #{}, Response, ReqData1), Context1}. - -is_authorized(ReqData, Context) -> - rabbit_mgmt_util:is_authorized(ReqData, Context). diff --git a/deps/rabbitmq_management/src/rabbit_mgmt_wm_health_check_node_is_quorum_critical.erl b/deps/rabbitmq_management/src/rabbit_mgmt_wm_health_check_node_is_quorum_critical.erl index 97726721c9dc..8f5f30f41b3e 100644 --- a/deps/rabbitmq_management/src/rabbit_mgmt_wm_health_check_node_is_quorum_critical.erl +++ b/deps/rabbitmq_management/src/rabbit_mgmt_wm_health_check_node_is_quorum_critical.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% %% An HTTP API counterpart of 'rabbitmq-diagnostics check_if_node_is_quorum_critical' diff --git a/deps/rabbitmq_management/src/rabbit_mgmt_wm_health_check_port_listener.erl b/deps/rabbitmq_management/src/rabbit_mgmt_wm_health_check_port_listener.erl index b2a0fb303ac1..f6ca4656188d 100644 --- a/deps/rabbitmq_management/src/rabbit_mgmt_wm_health_check_port_listener.erl +++ b/deps/rabbitmq_management/src/rabbit_mgmt_wm_health_check_port_listener.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% %% An HTTP API counterpart of 'rabbitmq-diagnostics check_port_listener' diff --git a/deps/rabbitmq_management/src/rabbit_mgmt_wm_health_check_protocol_listener.erl b/deps/rabbitmq_management/src/rabbit_mgmt_wm_health_check_protocol_listener.erl index 1da87bb4b6cc..7e1f6ab9a2b0 100644 --- a/deps/rabbitmq_management/src/rabbit_mgmt_wm_health_check_protocol_listener.erl +++ b/deps/rabbitmq_management/src/rabbit_mgmt_wm_health_check_protocol_listener.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% %% An HTTP API counterpart of 'rabbitmq-diagnostics check_protocol_listener' diff --git a/deps/rabbitmq_management/src/rabbit_mgmt_wm_health_check_virtual_hosts.erl b/deps/rabbitmq_management/src/rabbit_mgmt_wm_health_check_virtual_hosts.erl index 3e6d1d005c54..e642e1f75f81 100644 --- a/deps/rabbitmq_management/src/rabbit_mgmt_wm_health_check_virtual_hosts.erl +++ b/deps/rabbitmq_management/src/rabbit_mgmt_wm_health_check_virtual_hosts.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% %% An HTTP API counterpart of 'rabbitmq-diagnostics check_virtual_hosts' diff --git a/deps/rabbitmq_management/src/rabbit_mgmt_wm_healthchecks.erl b/deps/rabbitmq_management/src/rabbit_mgmt_wm_healthchecks.erl index 78a18909258e..b5ee8cf58b9e 100644 --- a/deps/rabbitmq_management/src/rabbit_mgmt_wm_healthchecks.erl +++ b/deps/rabbitmq_management/src/rabbit_mgmt_wm_healthchecks.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% %% This original One True Health Check™ has been deprecated as too coarse-grained, diff --git a/deps/rabbitmq_management/src/rabbit_mgmt_wm_limit.erl b/deps/rabbitmq_management/src/rabbit_mgmt_wm_limit.erl index 844c0b6a51d8..afe7b5190d7e 100644 --- a/deps/rabbitmq_management/src/rabbit_mgmt_wm_limit.erl +++ b/deps/rabbitmq_management/src/rabbit_mgmt_wm_limit.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_mgmt_wm_limit). diff --git a/deps/rabbitmq_management/src/rabbit_mgmt_wm_limits.erl b/deps/rabbitmq_management/src/rabbit_mgmt_wm_limits.erl index 9d9568538f92..8f6230fc55e7 100644 --- a/deps/rabbitmq_management/src/rabbit_mgmt_wm_limits.erl +++ b/deps/rabbitmq_management/src/rabbit_mgmt_wm_limits.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_mgmt_wm_limits). @@ -12,8 +12,6 @@ -export([variances/2]). -include_lib("rabbitmq_management_agent/include/rabbit_mgmt_records.hrl"). --include_lib("rabbit_common/include/rabbit.hrl"). - %%-------------------------------------------------------------------- init(Req, _State) -> diff --git a/deps/rabbitmq_management/src/rabbit_mgmt_wm_login.erl b/deps/rabbitmq_management/src/rabbit_mgmt_wm_login.erl index 03477224df30..be8fb9f2fd94 100644 --- a/deps/rabbitmq_management/src/rabbit_mgmt_wm_login.erl +++ b/deps/rabbitmq_management/src/rabbit_mgmt_wm_login.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2011-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_mgmt_wm_login). @@ -13,8 +13,6 @@ -export([variances/2]). -include_lib("rabbitmq_management_agent/include/rabbit_mgmt_records.hrl"). --include_lib("amqp_client/include/amqp_client.hrl"). - %%-------------------------------------------------------------------- init(Req, _State) -> diff --git a/deps/rabbitmq_management/src/rabbit_mgmt_wm_node.erl b/deps/rabbitmq_management/src/rabbit_mgmt_wm_node.erl index f08eb45b9aea..3a3c6e5b8bc6 100644 --- a/deps/rabbitmq_management/src/rabbit_mgmt_wm_node.erl +++ b/deps/rabbitmq_management/src/rabbit_mgmt_wm_node.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_mgmt_wm_node). @@ -12,8 +12,6 @@ -export([variances/2]). -include_lib("rabbitmq_management_agent/include/rabbit_mgmt_records.hrl"). --include_lib("rabbit_common/include/rabbit.hrl"). - %%-------------------------------------------------------------------- init(Req, _State) -> @@ -26,10 +24,7 @@ content_types_provided(ReqData, Context) -> {rabbit_mgmt_util:responder_map(to_json), ReqData, Context}. resource_exists(ReqData, Context) -> - {case node0(ReqData) of - not_found -> false; - _ -> true - end, ReqData, Context}. + {rabbit_mgmt_nodes:node_exists(ReqData), ReqData, Context}. to_json(ReqData, Context) -> rabbit_mgmt_util:reply(node0(ReqData), ReqData, Context). diff --git a/deps/rabbitmq_management/src/rabbit_mgmt_wm_node_memory.erl b/deps/rabbitmq_management/src/rabbit_mgmt_wm_node_memory.erl index 4b4fd022ebb7..0609dba2dbb0 100644 --- a/deps/rabbitmq_management/src/rabbit_mgmt_wm_node_memory.erl +++ b/deps/rabbitmq_management/src/rabbit_mgmt_wm_node_memory.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_mgmt_wm_node_memory). @@ -12,8 +12,6 @@ -export([variances/2]). -include_lib("rabbitmq_management_agent/include/rabbit_mgmt_records.hrl"). --include_lib("rabbit_common/include/rabbit.hrl"). - %%-------------------------------------------------------------------- init(Req, [Mode]) -> @@ -26,7 +24,7 @@ content_types_provided(ReqData, Context) -> {rabbit_mgmt_util:responder_map(to_json), ReqData, Context}. resource_exists(ReqData, Context) -> - {node_exists(ReqData, get_node(ReqData)), ReqData, Context}. + {rabbit_mgmt_nodes:node_exists(ReqData), ReqData, Context}. to_json(ReqData, {Mode, Context}) -> rabbit_mgmt_util:reply(augment(Mode, ReqData), ReqData, {Mode, Context}). @@ -39,16 +37,9 @@ is_authorized(ReqData, {Mode, Context}) -> get_node(ReqData) -> list_to_atom(binary_to_list(rabbit_mgmt_util:id(node, ReqData))). -node_exists(ReqData, Node) -> - case [N || N <- rabbit_mgmt_wm_nodes:all_nodes(ReqData), - proplists:get_value(name, N) == Node] of - [] -> false; - [_] -> true - end. - augment(Mode, ReqData) -> Node = get_node(ReqData), - case node_exists(ReqData, Node) of + case rabbit_mgmt_nodes:node_exists(ReqData) of false -> not_found; true -> diff --git a/deps/rabbitmq_management/src/rabbit_mgmt_wm_node_memory_ets.erl b/deps/rabbitmq_management/src/rabbit_mgmt_wm_node_memory_ets.erl index cd602226ee10..92efb193a3bb 100644 --- a/deps/rabbitmq_management/src/rabbit_mgmt_wm_node_memory_ets.erl +++ b/deps/rabbitmq_management/src/rabbit_mgmt_wm_node_memory_ets.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_mgmt_wm_node_memory_ets). @@ -12,8 +12,6 @@ -export([variances/2]). -include_lib("rabbitmq_management_agent/include/rabbit_mgmt_records.hrl"). --include_lib("rabbit_common/include/rabbit.hrl"). - %%-------------------------------------------------------------------- init(Req, [Mode]) -> @@ -26,7 +24,7 @@ content_types_provided(ReqData, Context) -> {rabbit_mgmt_util:responder_map(to_json), ReqData, Context}. resource_exists(ReqData, Context) -> - {node_exists(ReqData, get_node(ReqData)), ReqData, Context}. + {rabbit_mgmt_nodes:node_exists(ReqData), ReqData, Context}. to_json(ReqData, {Mode, Context}) -> rabbit_mgmt_util:reply(augment(Mode, ReqData), ReqData, {Mode, Context}). diff --git a/deps/rabbitmq_management/src/rabbit_mgmt_wm_nodes.erl b/deps/rabbitmq_management/src/rabbit_mgmt_wm_nodes.erl index feb583b0c821..6d898737459b 100644 --- a/deps/rabbitmq_management/src/rabbit_mgmt_wm_nodes.erl +++ b/deps/rabbitmq_management/src/rabbit_mgmt_wm_nodes.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_mgmt_wm_nodes). diff --git a/deps/rabbitmq_management/src/rabbit_mgmt_wm_operator_policies.erl b/deps/rabbitmq_management/src/rabbit_mgmt_wm_operator_policies.erl index 48979cc662bd..777389faac2a 100644 --- a/deps/rabbitmq_management/src/rabbit_mgmt_wm_operator_policies.erl +++ b/deps/rabbitmq_management/src/rabbit_mgmt_wm_operator_policies.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_mgmt_wm_operator_policies). @@ -12,8 +12,6 @@ -export([variances/2]). -include_lib("rabbitmq_management_agent/include/rabbit_mgmt_records.hrl"). --include_lib("rabbit_common/include/rabbit.hrl"). - %%-------------------------------------------------------------------- init(Req, _State) -> diff --git a/deps/rabbitmq_management/src/rabbit_mgmt_wm_operator_policy.erl b/deps/rabbitmq_management/src/rabbit_mgmt_wm_operator_policy.erl index 8df812c0a6e7..0fe7f20957a3 100644 --- a/deps/rabbitmq_management/src/rabbit_mgmt_wm_operator_policy.erl +++ b/deps/rabbitmq_management/src/rabbit_mgmt_wm_operator_policy.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_mgmt_wm_operator_policy). diff --git a/deps/rabbitmq_management/src/rabbit_mgmt_wm_overview.erl b/deps/rabbitmq_management/src/rabbit_mgmt_wm_overview.erl index 4b2458555767..24ab67ce8f49 100644 --- a/deps/rabbitmq_management/src/rabbit_mgmt_wm_overview.erl +++ b/deps/rabbitmq_management/src/rabbit_mgmt_wm_overview.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_mgmt_wm_overview). diff --git a/deps/rabbitmq_management/src/rabbit_mgmt_wm_parameter.erl b/deps/rabbitmq_management/src/rabbit_mgmt_wm_parameter.erl index 88f49f872357..a30430261a56 100644 --- a/deps/rabbitmq_management/src/rabbit_mgmt_wm_parameter.erl +++ b/deps/rabbitmq_management/src/rabbit_mgmt_wm_parameter.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_mgmt_wm_parameter). @@ -40,8 +40,7 @@ resource_exists(ReqData, Context) -> end, ReqData, Context}. to_json(ReqData, Context) -> - rabbit_mgmt_util:reply(rabbit_mgmt_format:parameter( - rabbit_mgmt_wm_parameters:fix_shovel_publish_properties(parameter(ReqData))), + rabbit_mgmt_util:reply(rabbit_mgmt_format:parameter(parameter(ReqData)), ReqData, Context). accept_content(ReqData0, Context = #context{user = User}) -> diff --git a/deps/rabbitmq_management/src/rabbit_mgmt_wm_parameters.erl b/deps/rabbitmq_management/src/rabbit_mgmt_wm_parameters.erl index e88a179657cc..cf0ddb357470 100644 --- a/deps/rabbitmq_management/src/rabbit_mgmt_wm_parameters.erl +++ b/deps/rabbitmq_management/src/rabbit_mgmt_wm_parameters.erl @@ -2,19 +2,16 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_mgmt_wm_parameters). -export([init/2, to_json/2, content_types_provided/2, is_authorized/2, resource_exists/2, basic/1]). --export([fix_shovel_publish_properties/1]). -export([variances/2]). -include_lib("rabbitmq_management_agent/include/rabbit_mgmt_records.hrl"). --include_lib("rabbit_common/include/rabbit.hrl"). - %%-------------------------------------------------------------------- init(Req, _State) -> @@ -42,25 +39,6 @@ is_authorized(ReqData, Context) -> %%-------------------------------------------------------------------- -%% Hackish fix to make sure we return a JSON object instead of an empty list -%% when the publish-properties value is empty. Should be removed in 3.7.0 -%% when we switch to a new JSON library. -fix_shovel_publish_properties(P) -> - case lists:keyfind(component, 1, P) of - {_, <<"shovel">>} -> - case lists:keytake(value, 1, P) of - {value, {_, Values}, P2} -> - case lists:keytake(<<"publish-properties">>, 1, Values) of - {_, {_, []}, Values2} -> - P2 ++ [{value, Values2 ++ [{<<"publish-properties">>, empty_struct}]}]; - _ -> - P - end; - _ -> P - end; - _ -> P - end. - basic(ReqData) -> Raw = case rabbit_mgmt_util:id(component, ReqData) of none -> rabbit_runtime_parameters:list(); @@ -74,5 +52,5 @@ basic(ReqData) -> end, case Raw of not_found -> not_found; - _ -> [rabbit_mgmt_format:parameter(fix_shovel_publish_properties(P)) || P <- Raw] + _ -> [rabbit_mgmt_format:parameter(P) || P <- Raw] end. diff --git a/deps/rabbitmq_management/src/rabbit_mgmt_wm_permission.erl b/deps/rabbitmq_management/src/rabbit_mgmt_wm_permission.erl index 64885d623e40..48284b9fde22 100644 --- a/deps/rabbitmq_management/src/rabbit_mgmt_wm_permission.erl +++ b/deps/rabbitmq_management/src/rabbit_mgmt_wm_permission.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_mgmt_wm_permission). diff --git a/deps/rabbitmq_management/src/rabbit_mgmt_wm_permissions.erl b/deps/rabbitmq_management/src/rabbit_mgmt_wm_permissions.erl index f39744a5bfbc..192add97ece6 100644 --- a/deps/rabbitmq_management/src/rabbit_mgmt_wm_permissions.erl +++ b/deps/rabbitmq_management/src/rabbit_mgmt_wm_permissions.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_mgmt_wm_permissions). @@ -12,8 +12,6 @@ -export([variances/2]). -include_lib("rabbitmq_management_agent/include/rabbit_mgmt_records.hrl"). --include_lib("rabbit_common/include/rabbit.hrl"). - %%-------------------------------------------------------------------- init(Req, _State) -> diff --git a/deps/rabbitmq_management/src/rabbit_mgmt_wm_permissions_user.erl b/deps/rabbitmq_management/src/rabbit_mgmt_wm_permissions_user.erl index f08f5e0f180b..cbfdc887af9e 100644 --- a/deps/rabbitmq_management/src/rabbit_mgmt_wm_permissions_user.erl +++ b/deps/rabbitmq_management/src/rabbit_mgmt_wm_permissions_user.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_mgmt_wm_permissions_user). @@ -12,8 +12,6 @@ -export([variances/2]). -include_lib("rabbitmq_management_agent/include/rabbit_mgmt_records.hrl"). --include_lib("rabbit_common/include/rabbit.hrl"). - %%-------------------------------------------------------------------- init(Req, _State) -> diff --git a/deps/rabbitmq_management/src/rabbit_mgmt_wm_permissions_vhost.erl b/deps/rabbitmq_management/src/rabbit_mgmt_wm_permissions_vhost.erl index 1253585df0b3..5915ab121cff 100644 --- a/deps/rabbitmq_management/src/rabbit_mgmt_wm_permissions_vhost.erl +++ b/deps/rabbitmq_management/src/rabbit_mgmt_wm_permissions_vhost.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_mgmt_wm_permissions_vhost). @@ -12,8 +12,6 @@ -export([variances/2]). -include_lib("rabbitmq_management_agent/include/rabbit_mgmt_records.hrl"). --include_lib("rabbit_common/include/rabbit.hrl"). - %%-------------------------------------------------------------------- init(Req, _State) -> diff --git a/deps/rabbitmq_management/src/rabbit_mgmt_wm_policies.erl b/deps/rabbitmq_management/src/rabbit_mgmt_wm_policies.erl index 0f9a788beea1..352c2da6b7fd 100644 --- a/deps/rabbitmq_management/src/rabbit_mgmt_wm_policies.erl +++ b/deps/rabbitmq_management/src/rabbit_mgmt_wm_policies.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_mgmt_wm_policies). @@ -12,8 +12,6 @@ -export([variances/2]). -include_lib("rabbitmq_management_agent/include/rabbit_mgmt_records.hrl"). --include_lib("rabbit_common/include/rabbit.hrl"). - %%-------------------------------------------------------------------- init(Req, _State) -> diff --git a/deps/rabbitmq_management/src/rabbit_mgmt_wm_policy.erl b/deps/rabbitmq_management/src/rabbit_mgmt_wm_policy.erl index 467f6f6f14ec..bc1bf31b5905 100644 --- a/deps/rabbitmq_management/src/rabbit_mgmt_wm_policy.erl +++ b/deps/rabbitmq_management/src/rabbit_mgmt_wm_policy.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_mgmt_wm_policy). @@ -34,10 +34,11 @@ allowed_methods(ReqData, Context) -> {[<<"HEAD">>, <<"GET">>, <<"PUT">>, <<"DELETE">>, <<"OPTIONS">>], ReqData, Context}. resource_exists(ReqData, Context) -> - {case policy(ReqData) of - not_found -> false; - _ -> true - end, ReqData, Context}. + Result = case policy(ReqData) of + not_found -> false; + _ -> true + end, + {Result, ReqData, Context}. to_json(ReqData, Context) -> rabbit_mgmt_util:reply(policy(ReqData), ReqData, Context). @@ -76,7 +77,10 @@ is_authorized(ReqData, Context) -> %%-------------------------------------------------------------------- policy(ReqData) -> - rabbit_policy:lookup( - rabbit_mgmt_util:vhost(ReqData), name(ReqData)). + case rabbit_mgmt_util:vhost(ReqData) of + not_found -> not_found; + none -> not_found; + Value -> rabbit_policy:lookup(Value, name(ReqData)) + end. name(ReqData) -> rabbit_mgmt_util:id(name, ReqData). diff --git a/deps/rabbitmq_management/src/rabbit_mgmt_wm_queue.erl b/deps/rabbitmq_management/src/rabbit_mgmt_wm_queue.erl index 0184802f6e7b..5e17ca81ed30 100644 --- a/deps/rabbitmq_management/src/rabbit_mgmt_wm_queue.erl +++ b/deps/rabbitmq_management/src/rabbit_mgmt_wm_queue.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_mgmt_wm_queue). @@ -14,8 +14,6 @@ -export([variances/2]). -include_lib("rabbitmq_management_agent/include/rabbit_mgmt_records.hrl"). --include_lib("amqp_client/include/amqp_client.hrl"). - %%-------------------------------------------------------------------- init(Req, _State) -> @@ -50,8 +48,13 @@ to_json(ReqData, Context) -> rabbit_mgmt_format:strip_pids(Q)), rabbit_mgmt_util:reply(ensure_defaults(Payload), ReqData, Context); true -> - rabbit_mgmt_util:reply(rabbit_mgmt_format:strip_pids(queue(ReqData)), - ReqData, Context) + Q = case rabbit_mgmt_util:enable_queue_totals(ReqData) of + false -> queue(ReqData); + true -> queue_with_totals(ReqData) + end, + rabbit_mgmt_util:reply( + rabbit_mgmt_format:strip_pids(Q), + ReqData, Context) end catch {error, invalid_range_parameters, Reason} -> @@ -110,10 +113,26 @@ queue(ReqData) -> VHost -> queue(VHost, rabbit_mgmt_util:id(queue, ReqData)) end. - queue(VHost, QName) -> Name = rabbit_misc:r(VHost, queue, QName), case rabbit_amqqueue:lookup(Name) of {ok, Q} -> rabbit_mgmt_format:queue(Q); {error, not_found} -> not_found end. + +queue_with_totals(ReqData) -> + case rabbit_mgmt_util:vhost(ReqData) of + not_found -> not_found; + VHost -> queue_with_totals(VHost, rabbit_mgmt_util:id(queue, ReqData)) + end. + +queue_with_totals(VHost, QName) -> + Name = rabbit_misc:r(VHost, queue, QName), + case rabbit_amqqueue:lookup(Name) of + {ok, Q} -> QueueInfo = rabbit_amqqueue:info(Q, + [name, durable, auto_delete, exclusive, + owner_pid, arguments, type, state, + policy, totals, online, type_specific]), + rabbit_mgmt_format:queue_info(QueueInfo); + {error, not_found} -> not_found + end. diff --git a/deps/rabbitmq_management/src/rabbit_mgmt_wm_queue_actions.erl b/deps/rabbitmq_management/src/rabbit_mgmt_wm_queue_actions.erl index 5f464da1faf1..82e0a0ea5a86 100644 --- a/deps/rabbitmq_management/src/rabbit_mgmt_wm_queue_actions.erl +++ b/deps/rabbitmq_management/src/rabbit_mgmt_wm_queue_actions.erl @@ -2,17 +2,16 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2011-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_mgmt_wm_queue_actions). --export([init/2, resource_exists/2, is_authorized/2, +-export([init/2, resource_exists/2, is_authorized/2, allow_missing_post/2, allowed_methods/2, content_types_accepted/2, accept_content/2]). -export([variances/2]). -include_lib("rabbitmq_management_agent/include/rabbit_mgmt_records.hrl"). --include_lib("amqp_client/include/amqp_client.hrl"). -include_lib("rabbit/include/amqqueue.hrl"). %%-------------------------------------------------------------------- @@ -28,10 +27,13 @@ allowed_methods(ReqData, Context) -> resource_exists(ReqData, Context) -> {case rabbit_mgmt_wm_queue:queue(ReqData) of - not_found -> false; + not_found -> raise_not_found(ReqData, Context); _ -> true end, ReqData, Context}. +allow_missing_post(ReqData, Context) -> + {false, ReqData, Context}. + content_types_accepted(ReqData, Context) -> {[{'*', accept_content}], ReqData, Context}. @@ -52,17 +54,18 @@ do_it(ReqData0, Context) -> is_authorized(ReqData, Context) -> rabbit_mgmt_util:is_authorized_admin(ReqData, Context). +raise_not_found(ReqData, Context) -> + ErrorMessage = case rabbit_mgmt_util:vhost(ReqData) of + not_found -> + "vhost_not_found"; + _ -> + "queue_not_found" + end, + rabbit_mgmt_util:not_found( + rabbit_data_coercion:to_binary(ErrorMessage), + ReqData, + Context). %%-------------------------------------------------------------------- -action(<<"sync">>, Q, ReqData, Context) when ?is_amqqueue(Q) -> - QPid = amqqueue:get_pid(Q), - spawn(fun() -> rabbit_amqqueue:sync_mirrors(QPid) end), - {true, ReqData, Context}; - -action(<<"cancel_sync">>, Q, ReqData, Context) when ?is_amqqueue(Q) -> - QPid = amqqueue:get_pid(Q), - _ = rabbit_amqqueue:cancel_sync_mirrors(QPid), - {true, ReqData, Context}; - action(Else, _Q, ReqData, Context) -> rabbit_mgmt_util:bad_request({unknown, Else}, ReqData, Context). diff --git a/deps/rabbitmq_management/src/rabbit_mgmt_wm_queue_get.erl b/deps/rabbitmq_management/src/rabbit_mgmt_wm_queue_get.erl index d5562e9fb9cd..d08439b3a8a4 100644 --- a/deps/rabbitmq_management/src/rabbit_mgmt_wm_queue_get.erl +++ b/deps/rabbitmq_management/src/rabbit_mgmt_wm_queue_get.erl @@ -2,12 +2,12 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2011-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_mgmt_wm_queue_get). --export([init/2, resource_exists/2, is_authorized/2, +-export([init/2, resource_exists/2, is_authorized/2, allow_missing_post/2, allowed_methods/2, accept_content/2, content_types_provided/2, content_types_accepted/2]). -export([variances/2]). @@ -31,10 +31,13 @@ content_types_provided(ReqData, Context) -> resource_exists(ReqData, Context) -> {case rabbit_mgmt_wm_queue:queue(ReqData) of - not_found -> false; + not_found -> raise_not_found(ReqData, Context); _ -> true end, ReqData, Context}. +allow_missing_post(ReqData, Context) -> + {false, ReqData, Context}. + content_types_accepted(ReqData, Context) -> {[{'*', accept_content}], ReqData, Context}. @@ -64,7 +67,7 @@ do_it(ReqData0, Context) -> end, Reply = basic_gets(Count, Ch, Q, AckMode, Enc, Trunc), - maybe_rejects(Reply, Ch, AckMode), + maybe_return(Reply, Ch, AckMode), rabbit_mgmt_util:reply(remove_delivery_tag(Reply), ReqData, Context) end) @@ -96,11 +99,11 @@ parse_ackmode(reject_requeue_true) -> false. % the messages must rejects later, % because we get always the same message if the % messages are requeued inside basic_get/5 -maybe_rejects(R, Ch, AckMode) -> +maybe_return(R, Ch, AckMode) -> lists:foreach(fun(X) -> - maybe_reject(Ch, AckMode, - proplists:get_value(delivery_tag, X)) - end, R). + maybe_reject_or_nack(Ch, AckMode, + proplists:get_value(delivery_tag, X)) + end, R). % removes the delivery_tag from the reply. % it is not necessary @@ -109,12 +112,18 @@ remove_delivery_tag([H|T]) -> [proplists:delete(delivery_tag, H) | [X || X <- remove_delivery_tag(T)]]. -maybe_reject(Ch, AckMode, DeliveryTag) when AckMode == reject_requeue_true; - AckMode == reject_requeue_false -> +maybe_reject_or_nack(Ch, AckMode, DeliveryTag) + when AckMode == reject_requeue_true; + AckMode == reject_requeue_false -> + amqp_channel:call(Ch, + #'basic.reject'{delivery_tag = DeliveryTag, + requeue = ackmode_to_requeue(AckMode)}); +maybe_reject_or_nack(Ch, ack_requeue_true, DeliveryTag) -> amqp_channel:call(Ch, - #'basic.reject'{delivery_tag = DeliveryTag, - requeue = ackmode_to_requeue(AckMode)}); -maybe_reject(_Ch, _AckMode, _DeliveryTag) -> ok. + #'basic.nack'{delivery_tag = DeliveryTag, + multiple = false, + requeue = true}); +maybe_reject_or_nack(_Ch, _AckMode, _DeliveryTag) -> ok. basic_get(Ch, Q, AckMode, Enc, Trunc) -> @@ -143,6 +152,17 @@ basic_get(Ch, Q, AckMode, Enc, Trunc) -> is_authorized(ReqData, Context) -> rabbit_mgmt_util:is_authorized_vhost(ReqData, Context). +raise_not_found(ReqData, Context) -> + ErrorMessage = case rabbit_mgmt_util:vhost(ReqData) of + not_found -> + "vhost_not_found"; + _ -> + "queue_not_found" + end, + rabbit_mgmt_util:not_found( + rabbit_data_coercion:to_binary(ErrorMessage), + ReqData, + Context). %%-------------------------------------------------------------------- maybe_truncate(Payload, none) -> Payload; diff --git a/deps/rabbitmq_management/src/rabbit_mgmt_wm_queue_purge.erl b/deps/rabbitmq_management/src/rabbit_mgmt_wm_queue_purge.erl index 4020ec2f83e2..9c82f4614b37 100644 --- a/deps/rabbitmq_management/src/rabbit_mgmt_wm_queue_purge.erl +++ b/deps/rabbitmq_management/src/rabbit_mgmt_wm_queue_purge.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_mgmt_wm_queue_purge). @@ -12,8 +12,6 @@ -export([variances/2]). -include_lib("rabbitmq_management_agent/include/rabbit_mgmt_records.hrl"). --include_lib("amqp_client/include/amqp_client.hrl"). - %%-------------------------------------------------------------------- init(Req, _State) -> diff --git a/deps/rabbitmq_management/src/rabbit_mgmt_wm_queues.erl b/deps/rabbitmq_management/src/rabbit_mgmt_wm_queues.erl index 858750d49d3c..30962ca830ff 100644 --- a/deps/rabbitmq_management/src/rabbit_mgmt_wm_queues.erl +++ b/deps/rabbitmq_management/src/rabbit_mgmt_wm_queues.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_mgmt_wm_queues). @@ -13,18 +13,30 @@ augmented/2]). -include_lib("rabbitmq_management_agent/include/rabbit_mgmt_records.hrl"). --include_lib("rabbit_common/include/rabbit.hrl"). --include_lib("rabbit/include/amqqueue.hrl"). --define(BASIC_COLUMNS, ["vhost", "name", "durable", "auto_delete", "exclusive", - "owner_pid", "arguments", "pid", "state"]). +-define(BASIC_COLUMNS, + ["vhost", + "name", + "node", + "durable", + "auto_delete", + "exclusive", + "owner_pid", + "arguments", + "type", + "pid", + "state"]). -define(DEFAULT_SORT, ["vhost", "name"]). %%-------------------------------------------------------------------- -init(Req, _State) -> - {cowboy_rest, rabbit_mgmt_headers:set_common_permission_headers(Req, ?MODULE), #context{}}. +init(Req, State) -> + Mode = case State of + [] -> basic; + [detailed] -> detailed + end, + {cowboy_rest, rabbit_mgmt_headers:set_common_permission_headers(Req, ?MODULE), {Mode, #context{}}}. variances(Req, Context) -> {[<<"accept-encoding">>, <<"origin">>], Req, Context}. @@ -32,79 +44,131 @@ variances(Req, Context) -> content_types_provided(ReqData, Context) -> {rabbit_mgmt_util:responder_map(to_json), ReqData, Context}. -resource_exists(ReqData, Context) -> - {case queues0(ReqData) of +resource_exists(ReqData, {Mode, Context}) -> + %% just checking that the vhost requested exists + {case rabbit_mgmt_util:all_or_one_vhost(ReqData, fun (_) -> [] end) of vhost_not_found -> false; _ -> true - end, ReqData, Context}. + end, ReqData, {Mode, Context}}. -to_json(ReqData, Context) -> +to_json(ReqData, {Mode, Context}) -> try Basic = basic_vhost_filtered(ReqData, Context), Data = rabbit_mgmt_util:augment_resources(Basic, ?DEFAULT_SORT, ?BASIC_COLUMNS, ReqData, - Context, fun augment/2), - rabbit_mgmt_util:reply(Data, ReqData, Context) + Context, augment(Mode)), + rabbit_mgmt_util:reply(Data, ReqData, {Mode, Context}) catch {error, invalid_range_parameters, Reason} -> rabbit_mgmt_util:bad_request(iolist_to_binary(Reason), ReqData, - Context) + {Mode, Context}) end. -is_authorized(ReqData, Context) -> - rabbit_mgmt_util:is_authorized_vhost(ReqData, Context). +is_authorized(ReqData, {Mode, Context}) -> + {Res, RD2, C2} = rabbit_mgmt_util:is_authorized_vhost(ReqData, Context), + {Res, RD2, {Mode, C2}}. %%-------------------------------------------------------------------- %% Exported functions basic(ReqData) -> + %% rabbit_nodes:list_running/1 is a potentially slow function that performs + %% a cluster wide query with a reasonably long (10s) timeout. + %% TODO: replace with faster approximate function + Running = rabbit_nodes:list_running(), + Ctx = #{running_nodes => Running}, + FmtQ = fun (Q) -> rabbit_mgmt_format:queue(Q, Ctx) end, case rabbit_mgmt_util:disable_stats(ReqData) of false -> - [rabbit_mgmt_format:queue(Q) || Q <- queues0(ReqData)] ++ - [rabbit_mgmt_format:queue(amqqueue:set_state(Q, down)) || - Q <- down_queues(ReqData)]; + list_queues(ReqData, Running, FmtQ, FmtQ); true -> case rabbit_mgmt_util:enable_queue_totals(ReqData) of false -> - [rabbit_mgmt_format:queue(Q) ++ policy(Q) || Q <- queues0(ReqData)] ++ - [rabbit_mgmt_format:queue(amqqueue:set_state(Q, down)) || - Q <- down_queues(ReqData)]; + list_queues(ReqData, Running, + fun(Q) -> + FmtQ(Q) ++ + %% TODO: just add policy name in + %% rabbit_mgmt_format:queue/1? + policy(Q) + end, + FmtQ); true -> - [rabbit_mgmt_format:queue_info(Q) || Q <- queues_with_totals(ReqData)] ++ - [rabbit_mgmt_format:queue(amqqueue:set_state(Q, down)) || - Q <- down_queues(ReqData)] + %% TODO: this is not optimised like the other code paths + %% most likely we can avoid the collector pattern by + %% simply querying the queue_metrics table for infos + [rabbit_mgmt_format:queue_info(Q) + || Q <- queues_with_totals(ReqData)] ++ + [FmtQ(amqqueue:set_state(Q, down)) || + Q <- down_queues(ReqData, Running)] end end. +list_queues(ReqData, Running, FormatRunningFun, FormatDownFun) -> + [begin + Pid = amqqueue:get_pid(Q), + %% only queues whose leader pid is a on a non running node + %% are considered "down", all other states should be passed + %% as they are and the queue type impl will decide how to + %% emit them. + case not rabbit_amqqueue:is_local_to_node_set(Pid, Running) of + false -> + FormatRunningFun(Q); + true -> + FormatDownFun(amqqueue:set_state(Q, down)) + end + end || Q <- all_queues(ReqData)]. + + augmented(ReqData, Context) -> - augment(rabbit_mgmt_util:filter_vhost(basic(ReqData), ReqData, Context), ReqData). + Fun = augment(basic), + Fun(rabbit_mgmt_util:filter_vhost(basic(ReqData), ReqData, Context), ReqData). %%-------------------------------------------------------------------- %% Private helpers -augment(Basic, ReqData) -> - case rabbit_mgmt_util:disable_stats(ReqData) of - false -> - rabbit_mgmt_db:augment_queues(Basic, rabbit_mgmt_util:range_ceil(ReqData), - basic); - true -> - Basic +augment(Mode) -> + fun(Basic, ReqData) -> + case rabbit_mgmt_util:disable_stats(ReqData) of + false -> + %% The reduced endpoint needs to sit behind a feature flag, + %% as it calls a different data aggregation function + %% that is used against all cluster nodes. + %% Data can be collected locally even if other nodes in the + %% cluster do not, it's just a local ETS table. + %% But it can't be queried until all nodes enable the FF. + IsEnabled = rabbit_feature_flags:is_enabled(detailed_queues_endpoint), + Stats = case {IsEnabled, Mode, rabbit_mgmt_util:columns(ReqData)} of + {false, _, _} -> detailed; + {_, detailed, _} -> detailed; + {_, _, all} -> basic; + _ -> detailed + end, + rabbit_mgmt_db:augment_queues(Basic, + rabbit_mgmt_util:range_ceil(ReqData), + Stats); + true -> + Basic + end end. basic_vhost_filtered(ReqData, Context) -> rabbit_mgmt_util:filter_vhost(basic(ReqData), ReqData, Context). -queues0(ReqData) -> - rabbit_mgmt_util:all_or_one_vhost(ReqData, fun rabbit_amqqueue:list/1). +all_queues(ReqData) -> + rabbit_mgmt_util:all_or_one_vhost(ReqData, fun rabbit_amqqueue:list_all/1). queues_with_totals(ReqData) -> rabbit_mgmt_util:all_or_one_vhost(ReqData, fun collect_info_all/1). collect_info_all(VHostPath) -> - rabbit_amqqueue:collect_info_all(VHostPath, [name, durable, auto_delete, exclusive, owner_pid, arguments, type, state, policy, totals, type_specific]). - -down_queues(ReqData) -> - rabbit_mgmt_util:all_or_one_vhost(ReqData, fun rabbit_amqqueue:list_down/1). + rabbit_amqqueue:collect_info_all(VHostPath, + [name, durable, auto_delete, exclusive, + owner_pid, arguments, type, state, + policy, totals, online, type_specific]). + +down_queues(ReqData, Running) -> + Fun = fun(VhostPath) -> rabbit_amqqueue:list_down(VhostPath, Running) end, + rabbit_mgmt_util:all_or_one_vhost(ReqData, Fun). policy(Q) -> case rabbit_policy:name(Q) of diff --git a/deps/rabbitmq_management/src/rabbit_mgmt_wm_quorum_queue_replicas_add_member.erl b/deps/rabbitmq_management/src/rabbit_mgmt_wm_quorum_queue_replicas_add_member.erl index 2e6376189f9c..15a7d3ff621c 100644 --- a/deps/rabbitmq_management/src/rabbit_mgmt_wm_quorum_queue_replicas_add_member.erl +++ b/deps/rabbitmq_management/src/rabbit_mgmt_wm_quorum_queue_replicas_add_member.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_mgmt_wm_quorum_queue_replicas_add_member). @@ -11,8 +11,6 @@ -export([variances/2]). -include_lib("rabbitmq_management_agent/include/rabbit_mgmt_records.hrl"). --include_lib("rabbit_common/include/rabbit.hrl"). - -define(TIMEOUT, 30_000). init(Req, _State) -> @@ -38,11 +36,17 @@ accept_content(ReqData, Context) -> QName = rabbit_mgmt_util:id(queue, ReqData), Res = rabbit_mgmt_util:with_decode( [node], ReqData, Context, - fun([NewReplicaNode], _Body, _ReqData) -> + fun([NewReplicaNode], Body, _ReqData) -> + Membership = maps:get(<<"membership">>, Body, promotable), rabbit_amqqueue:with( rabbit_misc:r(VHost, queue, QName), fun(_Q) -> - rabbit_quorum_queue:add_member(VHost, QName, rabbit_data_coercion:to_atom(NewReplicaNode), ?TIMEOUT) + rabbit_quorum_queue:add_member( + VHost, + QName, + rabbit_data_coercion:to_atom(NewReplicaNode), + rabbit_data_coercion:to_atom(Membership), + ?TIMEOUT) end) end), case Res of diff --git a/deps/rabbitmq_management/src/rabbit_mgmt_wm_quorum_queue_replicas_delete_member.erl b/deps/rabbitmq_management/src/rabbit_mgmt_wm_quorum_queue_replicas_delete_member.erl index ef4235cc32f8..6d5630342ee6 100644 --- a/deps/rabbitmq_management/src/rabbit_mgmt_wm_quorum_queue_replicas_delete_member.erl +++ b/deps/rabbitmq_management/src/rabbit_mgmt_wm_quorum_queue_replicas_delete_member.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_mgmt_wm_quorum_queue_replicas_delete_member). @@ -11,8 +11,6 @@ -export([variances/2]). -include_lib("rabbitmq_management_agent/include/rabbit_mgmt_records.hrl"). --include_lib("rabbit_common/include/rabbit.hrl"). - init(Req, _State) -> {cowboy_rest, rabbit_mgmt_headers:set_common_permission_headers(Req, ?MODULE), #context{}}. diff --git a/deps/rabbitmq_management/src/rabbit_mgmt_wm_quorum_queue_replicas_grow.erl b/deps/rabbitmq_management/src/rabbit_mgmt_wm_quorum_queue_replicas_grow.erl index 7b87604fab40..226709d745e3 100644 --- a/deps/rabbitmq_management/src/rabbit_mgmt_wm_quorum_queue_replicas_grow.erl +++ b/deps/rabbitmq_management/src/rabbit_mgmt_wm_quorum_queue_replicas_grow.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_mgmt_wm_quorum_queue_replicas_grow). @@ -11,8 +11,6 @@ -export([variances/2]). -include_lib("rabbitmq_management_agent/include/rabbit_mgmt_records.hrl"). --include_lib("rabbit_common/include/rabbit.hrl"). - -define(TIMEOUT, 30_000). init(Req, _State) -> @@ -39,12 +37,14 @@ accept_content(ReqData, Context) -> NewReplicaNode = rabbit_mgmt_util:id(node, ReqData), rabbit_mgmt_util:with_decode( [vhost_pattern, queue_pattern, strategy], ReqData, Context, - fun([VHPattern, QPattern, Strategy], _Body, _ReqData) -> + fun([VHPattern, QPattern, Strategy], Body, _ReqData) -> + Membership = maps:get(<<"membership">>, Body, promotable), rabbit_quorum_queue:grow( rabbit_data_coercion:to_atom(NewReplicaNode), VHPattern, QPattern, - rabbit_data_coercion:to_atom(Strategy)) + rabbit_data_coercion:to_atom(Strategy), + rabbit_data_coercion:to_atom(Membership)) end), {true, ReqData, Context}. diff --git a/deps/rabbitmq_management/src/rabbit_mgmt_wm_quorum_queue_replicas_shrink.erl b/deps/rabbitmq_management/src/rabbit_mgmt_wm_quorum_queue_replicas_shrink.erl index 19925dfc1f0b..68647ba82cfa 100644 --- a/deps/rabbitmq_management/src/rabbit_mgmt_wm_quorum_queue_replicas_shrink.erl +++ b/deps/rabbitmq_management/src/rabbit_mgmt_wm_quorum_queue_replicas_shrink.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_mgmt_wm_quorum_queue_replicas_shrink). @@ -11,8 +11,6 @@ -export([variances/2]). -include_lib("rabbitmq_management_agent/include/rabbit_mgmt_records.hrl"). --include_lib("rabbit_common/include/rabbit.hrl"). - init(Req, _State) -> {cowboy_rest, rabbit_mgmt_headers:set_common_permission_headers(Req, ?MODULE), #context{}}. diff --git a/deps/rabbitmq_management/src/rabbit_mgmt_wm_rebalance_queues.erl b/deps/rabbitmq_management/src/rabbit_mgmt_wm_rebalance_queues.erl index 66c2a3b7bb94..a6265da7536c 100644 --- a/deps/rabbitmq_management/src/rabbit_mgmt_wm_rebalance_queues.erl +++ b/deps/rabbitmq_management/src/rabbit_mgmt_wm_rebalance_queues.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_mgmt_wm_rebalance_queues). diff --git a/deps/rabbitmq_management/src/rabbit_mgmt_wm_redirect.erl b/deps/rabbitmq_management/src/rabbit_mgmt_wm_redirect.erl index cf54b2412206..d8fb3783437d 100644 --- a/deps/rabbitmq_management/src/rabbit_mgmt_wm_redirect.erl +++ b/deps/rabbitmq_management/src/rabbit_mgmt_wm_redirect.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2010-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_mgmt_wm_redirect). diff --git a/deps/rabbitmq_management/src/rabbit_mgmt_wm_reset.erl b/deps/rabbitmq_management/src/rabbit_mgmt_wm_reset.erl index 0a50149146f2..429ef294de50 100644 --- a/deps/rabbitmq_management/src/rabbit_mgmt_wm_reset.erl +++ b/deps/rabbitmq_management/src/rabbit_mgmt_wm_reset.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_mgmt_wm_reset). diff --git a/deps/rabbitmq_management/src/rabbit_mgmt_wm_static.erl b/deps/rabbitmq_management/src/rabbit_mgmt_wm_static.erl index 7b9b1049877a..12ebf4b2e130 100644 --- a/deps/rabbitmq_management/src/rabbit_mgmt_wm_static.erl +++ b/deps/rabbitmq_management/src/rabbit_mgmt_wm_static.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2010-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% %% Alias for cowboy_static that accepts a list of directories diff --git a/deps/rabbitmq_management/src/rabbit_mgmt_wm_topic_permission.erl b/deps/rabbitmq_management/src/rabbit_mgmt_wm_topic_permission.erl index a1a527d27e0d..4db8e9e92aa3 100644 --- a/deps/rabbitmq_management/src/rabbit_mgmt_wm_topic_permission.erl +++ b/deps/rabbitmq_management/src/rabbit_mgmt_wm_topic_permission.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_mgmt_wm_topic_permission). diff --git a/deps/rabbitmq_management/src/rabbit_mgmt_wm_topic_permissions.erl b/deps/rabbitmq_management/src/rabbit_mgmt_wm_topic_permissions.erl index 9fe2285d6160..6128e8778424 100644 --- a/deps/rabbitmq_management/src/rabbit_mgmt_wm_topic_permissions.erl +++ b/deps/rabbitmq_management/src/rabbit_mgmt_wm_topic_permissions.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_mgmt_wm_topic_permissions). @@ -12,8 +12,6 @@ -export([variances/2]). -include_lib("rabbitmq_management_agent/include/rabbit_mgmt_records.hrl"). --include_lib("rabbit_common/include/rabbit.hrl"). - %%-------------------------------------------------------------------- init(Req, _State) -> diff --git a/deps/rabbitmq_management/src/rabbit_mgmt_wm_topic_permissions_user.erl b/deps/rabbitmq_management/src/rabbit_mgmt_wm_topic_permissions_user.erl index 8428bbef33e7..0b30d9d9dc99 100644 --- a/deps/rabbitmq_management/src/rabbit_mgmt_wm_topic_permissions_user.erl +++ b/deps/rabbitmq_management/src/rabbit_mgmt_wm_topic_permissions_user.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_mgmt_wm_topic_permissions_user). @@ -12,8 +12,6 @@ -export([variances/2]). -include_lib("rabbitmq_management_agent/include/rabbit_mgmt_records.hrl"). --include_lib("rabbit_common/include/rabbit.hrl"). - %%-------------------------------------------------------------------- init(Req, _State) -> diff --git a/deps/rabbitmq_management/src/rabbit_mgmt_wm_topic_permissions_vhost.erl b/deps/rabbitmq_management/src/rabbit_mgmt_wm_topic_permissions_vhost.erl index bff931332cf0..e3185cb6097c 100644 --- a/deps/rabbitmq_management/src/rabbit_mgmt_wm_topic_permissions_vhost.erl +++ b/deps/rabbitmq_management/src/rabbit_mgmt_wm_topic_permissions_vhost.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_mgmt_wm_topic_permissions_vhost). @@ -12,8 +12,6 @@ -export([variances/2]). -include_lib("rabbitmq_management_agent/include/rabbit_mgmt_records.hrl"). --include_lib("rabbit_common/include/rabbit.hrl"). - %%-------------------------------------------------------------------- init(Req, _State) -> diff --git a/deps/rabbitmq_management/src/rabbit_mgmt_wm_user.erl b/deps/rabbitmq_management/src/rabbit_mgmt_wm_user.erl index 5dbb208649b1..d6a1c9242c11 100644 --- a/deps/rabbitmq_management/src/rabbit_mgmt_wm_user.erl +++ b/deps/rabbitmq_management/src/rabbit_mgmt_wm_user.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_mgmt_wm_user). diff --git a/deps/rabbitmq_management/src/rabbit_mgmt_wm_user_limit.erl b/deps/rabbitmq_management/src/rabbit_mgmt_wm_user_limit.erl index d64ed199a472..adb398bc8665 100644 --- a/deps/rabbitmq_management/src/rabbit_mgmt_wm_user_limit.erl +++ b/deps/rabbitmq_management/src/rabbit_mgmt_wm_user_limit.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2020-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_mgmt_wm_user_limit). diff --git a/deps/rabbitmq_management/src/rabbit_mgmt_wm_user_limits.erl b/deps/rabbitmq_management/src/rabbit_mgmt_wm_user_limits.erl index 1dc8aa9881c7..2feb8d2a0384 100644 --- a/deps/rabbitmq_management/src/rabbit_mgmt_wm_user_limits.erl +++ b/deps/rabbitmq_management/src/rabbit_mgmt_wm_user_limits.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2020-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_mgmt_wm_user_limits). @@ -12,8 +12,6 @@ -export([variances/2]). -include_lib("rabbitmq_management_agent/include/rabbit_mgmt_records.hrl"). --include_lib("rabbit_common/include/rabbit.hrl"). - %%-------------------------------------------------------------------- init(Req, _State) -> diff --git a/deps/rabbitmq_management/src/rabbit_mgmt_wm_users.erl b/deps/rabbitmq_management/src/rabbit_mgmt_wm_users.erl index 2e5604da9ec9..4ec8988991ae 100644 --- a/deps/rabbitmq_management/src/rabbit_mgmt_wm_users.erl +++ b/deps/rabbitmq_management/src/rabbit_mgmt_wm_users.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_mgmt_wm_users). @@ -16,8 +16,6 @@ -import(rabbit_misc, [pget/2]). -include_lib("rabbitmq_management_agent/include/rabbit_mgmt_records.hrl"). --include_lib("rabbit_common/include/rabbit.hrl"). - -define(BASIC_COLUMNS, ["hashing_algorithm", "rabbit_password_hashing_sha256", "limits", diff --git a/deps/rabbitmq_management/src/rabbit_mgmt_wm_users_bulk_delete.erl b/deps/rabbitmq_management/src/rabbit_mgmt_wm_users_bulk_delete.erl index dc293dd6f794..247f663ecf9c 100644 --- a/deps/rabbitmq_management/src/rabbit_mgmt_wm_users_bulk_delete.erl +++ b/deps/rabbitmq_management/src/rabbit_mgmt_wm_users_bulk_delete.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_mgmt_wm_users_bulk_delete). diff --git a/deps/rabbitmq_management/src/rabbit_mgmt_wm_vhost.erl b/deps/rabbitmq_management/src/rabbit_mgmt_wm_vhost.erl index 57ab7ecd69ae..3d15f116e9a0 100644 --- a/deps/rabbitmq_management/src/rabbit_mgmt_wm_vhost.erl +++ b/deps/rabbitmq_management/src/rabbit_mgmt_wm_vhost.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_mgmt_wm_vhost). @@ -72,14 +72,18 @@ accept_content(ReqData0, Context = #context{user = #user{username = Username}}) case rabbit_vhost:put_vhost(Name, Description, Tags, DefaultQT, Trace, Username) of ok -> {true, ReqData, Context}; - {error, timeout} = E -> + {error, timeout} -> rabbit_mgmt_util:internal_server_error( - "Timed out while waiting for the vhost to initialise", E, + timeout, + "Timed out waiting for the vhost to initialise", ReqData0, Context); {error, E} -> + Reason = iolist_to_binary( + io_lib:format( + "Error occurred while adding vhost: ~tp", + [E])), rabbit_mgmt_util:internal_server_error( - "Error occured while adding vhost", E, - ReqData0, Context); + Reason, ReqData0, Context); {'EXIT', {vhost_limit_exceeded, Explanation}} -> rabbit_mgmt_util:bad_request(list_to_binary(Explanation), ReqData, Context) @@ -88,12 +92,22 @@ accept_content(ReqData0, Context = #context{user = #user{username = Username}}) delete_resource(ReqData, Context = #context{user = #user{username = Username}}) -> VHost = id(ReqData), - try - rabbit_vhost:delete(VHost, Username) - catch _:{error, {no_such_vhost, _}} -> - ok - end, - {true, ReqData, Context}. + case rabbit_vhost:delete(VHost, Username) of + ok -> + {true, ReqData, Context}; + {error, timeout} -> + rabbit_mgmt_util:internal_server_error( + timeout, + "Timed out waiting for the vhost to be deleted", + ReqData, Context); + {error, E} -> + Reason = iolist_to_binary( + io_lib:format( + "Error occurred while deleting vhost: ~tp", + [E])), + rabbit_mgmt_util:internal_server_error( + Reason, ReqData, Context) + end. is_authorized(ReqData, Context) -> rabbit_mgmt_util:is_authorized_admin(ReqData, Context). diff --git a/deps/rabbitmq_management/src/rabbit_mgmt_wm_vhost_restart.erl b/deps/rabbitmq_management/src/rabbit_mgmt_wm_vhost_restart.erl index 985d09a680d1..7533dfde1a61 100644 --- a/deps/rabbitmq_management/src/rabbit_mgmt_wm_vhost_restart.erl +++ b/deps/rabbitmq_management/src/rabbit_mgmt_wm_vhost_restart.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2011-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_mgmt_wm_vhost_restart). @@ -12,8 +12,6 @@ -export([variances/2]). -include_lib("rabbitmq_management_agent/include/rabbit_mgmt_records.hrl"). --include_lib("amqp_client/include/amqp_client.hrl"). - %%-------------------------------------------------------------------- init(Req, _State) -> diff --git a/deps/rabbitmq_management/src/rabbit_mgmt_wm_vhosts.erl b/deps/rabbitmq_management/src/rabbit_mgmt_wm_vhosts.erl index 52c13d164a5c..a0779a2af36a 100644 --- a/deps/rabbitmq_management/src/rabbit_mgmt_wm_vhosts.erl +++ b/deps/rabbitmq_management/src/rabbit_mgmt_wm_vhosts.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_mgmt_wm_vhosts). @@ -12,8 +12,6 @@ -export([basic/0, augmented/2]). -include_lib("rabbitmq_management_agent/include/rabbit_mgmt_records.hrl"). --include_lib("rabbit_common/include/rabbit.hrl"). - -define(BASIC_COLUMNS, ["name", "tracing", "pid"]). -define(DEFAULT_SORT, ["name"]). @@ -57,7 +55,7 @@ augment(Basic, ReqData) -> augmented(ReqData, #context{user = User}) -> case rabbit_mgmt_util:disable_stats(ReqData) of - false -> + false -> rabbit_mgmt_db:augment_vhosts( [rabbit_vhost:info(V) || V <- rabbit_mgmt_util:list_visible_vhosts(User)], rabbit_mgmt_util:range(ReqData)); @@ -66,4 +64,4 @@ augmented(ReqData, #context{user = User}) -> end. basic() -> - rabbit_vhost:info_all([name]). + rabbit_vhost:info_all([name, description, tags, default_queue_type, metadata]). diff --git a/deps/rabbitmq_management/src/rabbit_mgmt_wm_whoami.erl b/deps/rabbitmq_management/src/rabbit_mgmt_wm_whoami.erl index 8193da04178b..40dce655f51f 100644 --- a/deps/rabbitmq_management/src/rabbit_mgmt_wm_whoami.erl +++ b/deps/rabbitmq_management/src/rabbit_mgmt_wm_whoami.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_mgmt_wm_whoami). @@ -11,8 +11,6 @@ -export([variances/2]). -include_lib("rabbitmq_management_agent/include/rabbit_mgmt_records.hrl"). --include_lib("rabbit_common/include/rabbit.hrl"). - %%-------------------------------------------------------------------- init(Req, _State) -> diff --git a/deps/rabbitmq_management/test/cache_SUITE.erl b/deps/rabbitmq_management/test/cache_SUITE.erl index df2949e8bbbb..525752a48462 100644 --- a/deps/rabbitmq_management/test/cache_SUITE.erl +++ b/deps/rabbitmq_management/test/cache_SUITE.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2016-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(cache_SUITE). diff --git a/deps/rabbitmq_management/test/clustering_SUITE.erl b/deps/rabbitmq_management/test/clustering_SUITE.erl index 92a1e3456581..fa7804d9174b 100644 --- a/deps/rabbitmq_management/test/clustering_SUITE.erl +++ b/deps/rabbitmq_management/test/clustering_SUITE.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2016-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(clustering_SUITE). @@ -11,8 +11,8 @@ -include_lib("common_test/include/ct.hrl"). -include_lib("eunit/include/eunit.hrl"). -include_lib("rabbit_common/include/rabbit_core_metrics.hrl"). --include_lib("rabbitmq_management_agent/include/rabbit_mgmt_metrics.hrl"). -include_lib("rabbitmq_ct_helpers/include/rabbit_mgmt_test.hrl"). +-include_lib("rabbitmq_ct_helpers/include/rabbit_assert.hrl"). -import(rabbit_ct_broker_helpers, [get_node_config/3, restart_node/2]). -import(rabbit_mgmt_test_util, [http_get/2, http_put/4, http_post/4, http_delete/3, http_delete/4]). @@ -21,6 +21,8 @@ -compile(nowarn_export_all). -compile(export_all). +-define(STATS_INTERVAL, 250). + all() -> [ {group, non_parallel_tests} @@ -29,9 +31,6 @@ all() -> groups() -> [{non_parallel_tests, [], [ list_cluster_nodes_test, - multi_node_case1_test, - ha_queue_hosted_on_other_node, - ha_queue_with_multiple_consumers, queue_on_other_node, queue_with_multiple_consumers, queue_consumer_cancelled, @@ -66,11 +65,11 @@ groups() -> %% ------------------------------------------------------------------- merge_app_env(Config) -> - Config1 = rabbit_ct_helpers:merge_app_env(Config, - {rabbit, [ - {collect_statistics, fine}, - {collect_statistics_interval, 500} - ]}), + Config1 = rabbit_ct_helpers:merge_app_env( + Config, {rabbit, [ + {collect_statistics, fine}, + {collect_statistics_interval, ?STATS_INTERVAL} + ]}), rabbit_ct_helpers:merge_app_env(Config1, {rabbitmq_management_agent, [ {rates_mode, detailed}, @@ -101,8 +100,6 @@ init_per_group(_, Config) -> end_per_group(_, Config) -> Config. -init_per_testcase(multi_node_case1_test = Testcase, Config) -> - rabbit_ct_helpers:testcase_started(Config, Testcase); init_per_testcase(Testcase, Config) -> rabbit_ct_broker_helpers:rpc(Config, 0, ?MODULE, clear_all_table_data, []), rabbit_ct_broker_helpers:rpc(Config, 1, ?MODULE, clear_all_table_data, []), @@ -111,9 +108,6 @@ init_per_testcase(Testcase, Config) -> Config1 = rabbit_ct_helpers:set_config(Config, {conn, Conn}), rabbit_ct_helpers:testcase_started(Config1, Testcase). -end_per_testcase(multi_node_case1_test = Testcase, Config) -> - rabbit_ct_broker_helpers:close_all_connections(Config, 0, <<"clustering_SUITE:end_per_testcase">>), - rabbit_ct_helpers:testcase_finished(Config, Testcase); end_per_testcase(Testcase, Config) -> rabbit_ct_client_helpers:close_connection(?config(conn, Config)), rabbit_ct_broker_helpers:close_all_connections(Config, 0, <<"clustering_SUITE:end_per_testcase">>), @@ -128,107 +122,6 @@ list_cluster_nodes_test(Config) -> ?assertEqual(2, length(http_get(Config, "/nodes"))), passed. -multi_node_case1_test(Config) -> - Nodename1 = rabbit_data_coercion:to_binary(get_node_config(Config, 0, nodename)), - Nodename2 = rabbit_data_coercion:to_binary(get_node_config(Config, 1, nodename)), - Policy = [{pattern, <<".*">>}, - {definition, [{'ha-mode', <<"all">>}]}], - http_put(Config, "/policies/%2F/HA", Policy, [?CREATED, ?NO_CONTENT]), - http_delete(Config, "/queues/%2F/multi-node-test-queue", [?NO_CONTENT, ?NOT_FOUND]), - - Conn = rabbit_ct_client_helpers:open_unmanaged_connection(Config, 1), - {ok, Chan} = amqp_connection:open_channel(Conn), - _ = queue_declare(Chan, <<"multi-node-test-queue">>), - Q = wait_for_mirrored_queue(Config, "/queues/%2F/multi-node-test-queue"), - - ?assert(lists:member(maps:get(node, Q), [Nodename1, Nodename2])), - [Mirror] = maps:get(slave_nodes, Q), - [Mirror] = maps:get(synchronised_slave_nodes, Q), - ?assert(lists:member(Mirror, [Nodename1, Nodename2])), - - %% restart node2 so that queue master migrates - restart_node(Config, 1), - - Q2 = wait_for_mirrored_queue(Config, "/queues/%2F/multi-node-test-queue"), - http_delete(Config, "/queues/%2F/multi-node-test-queue", ?NO_CONTENT), - http_delete(Config, "/policies/%2F/HA", ?NO_CONTENT), - - ?assert(lists:member(maps:get(node, Q2), [Nodename1, Nodename2])), - - rabbit_ct_client_helpers:close_connection(Conn), - - passed. - -ha_queue_hosted_on_other_node(Config) -> - Policy = [{pattern, <<".*">>}, - {definition, [{'ha-mode', <<"all">>}]}], - http_put(Config, "/policies/%2F/HA", Policy, [?CREATED, ?NO_CONTENT]), - - Conn = rabbit_ct_client_helpers:open_unmanaged_connection(Config, 1), - {ok, Chan} = amqp_connection:open_channel(Conn), - _ = queue_declare_durable(Chan, <<"ha-queue">>), - _ = wait_for_mirrored_queue(Config, "/queues/%2F/ha-queue"), - - {ok, Chan2} = amqp_connection:open_channel(?config(conn, Config)), - consume(Chan, <<"ha-queue">>), - - timer:sleep(5100), - force_stats(), - Res = http_get(Config, "/queues/%2F/ha-queue"), - - % assert some basic data is there - [Cons] = maps:get(consumer_details, Res), - #{} = maps:get(channel_details, Cons), % channel details proplist must not be empty - 0 = maps:get(prefetch_count, Cons), % check one of the augmented properties - <<"ha-queue">> = maps:get(name, Res), - - amqp_channel:close(Chan), - amqp_channel:close(Chan2), - rabbit_ct_client_helpers:close_connection(Conn), - - http_delete(Config, "/queues/%2F/ha-queue", ?NO_CONTENT), - http_delete(Config, "/policies/%2F/HA", ?NO_CONTENT), - - ok. - -ha_queue_with_multiple_consumers(Config) -> - Policy = [{pattern, <<".*">>}, - {definition, [{'ha-mode', <<"all">>}]}], - http_put(Config, "/policies/%2F/HA", Policy, [?CREATED, ?NO_CONTENT]), - - {ok, Chan} = amqp_connection:open_channel(?config(conn, Config)), - _ = queue_declare_durable(Chan, <<"ha-queue3">>), - _ = wait_for_mirrored_queue(Config, "/queues/%2F/ha-queue3"), - - consume(Chan, <<"ha-queue3">>), - force_stats(), - - {ok, Chan2} = amqp_connection:open_channel(?config(conn, Config)), - consume(Chan2, <<"ha-queue3">>), - - timer:sleep(5100), - force_stats(), - - Res = http_get(Config, "/queues/%2F/ha-queue3"), - - % assert some basic data is there - [C1, C2] = maps:get(consumer_details, Res), - % channel details proplist must not be empty - #{} = maps:get(channel_details, C1), - #{} = maps:get(channel_details, C2), - % check one of the augmented properties - 0 = maps:get(prefetch_count, C1), - 0 = maps:get(prefetch_count, C2), - <<"ha-queue3">> = maps:get(name, Res), - - amqp_channel:close(Chan), - amqp_channel:close(Chan2), - - http_delete(Config, "/queues/%2F/ha-queue3", ?NO_CONTENT), - http_delete(Config, "/policies/%2F/HA", ?NO_CONTENT), - - ok. - qq_replicas_add(Config) -> Conn = rabbit_ct_client_helpers:open_unmanaged_connection(Config, 0), {ok, Chan} = amqp_connection:open_channel(Conn), @@ -250,15 +143,17 @@ qq_replicas_delete(Config) -> Conn = rabbit_ct_client_helpers:open_unmanaged_connection(Config, 0), {ok, Chan} = amqp_connection:open_channel(Conn), _ = queue_declare_quorum(Chan, <<"qq.23">>), - _ = wait_for_queue(Config, "/queues/%2F/qq.23"), + + ?awaitMatch(#{members := [_, _]}, http_get(Config, "/queues/%2F/qq.23"), 30000), Nodename1 = rabbit_data_coercion:to_binary(get_node_config(Config, 1, nodename)), Body = [{node, Nodename1}], - http_post(Config, "/queues/quorum/%2F/qq.23/replicas/add", Body, ?NO_CONTENT), - timer:sleep(1100), http_delete(Config, "/queues/quorum/%2F/qq.23/replicas/delete", ?ACCEPTED, Body), - timer:sleep(1100), + ?awaitMatch(#{members := [_]}, http_get(Config, "/queues/%2F/qq.23"), 30000), + + http_post(Config, "/queues/quorum/%2F/qq.23/replicas/add", Body, ?NO_CONTENT), + ?awaitMatch(#{members := [_, _]}, http_get(Config, "/queues/%2F/qq.23"), 30000), http_delete(Config, "/queues/%2F/qq.23", ?NO_CONTENT), @@ -271,16 +166,19 @@ qq_replicas_grow(Config) -> Conn = rabbit_ct_client_helpers:open_unmanaged_connection(Config, 0), {ok, Chan} = amqp_connection:open_channel(Conn), _ = queue_declare_quorum(Chan, <<"qq.24">>), - _ = wait_for_queue(Config, "/queues/%2F/qq.24"), Nodename1 = rabbit_data_coercion:to_list(get_node_config(Config, 1, nodename)), - Body = [ - {strategy, <<"all">>}, - {queue_pattern, <<"qq.24">>}, - {vhost_pattern, <<".*">>} - ], - http_post(Config, "/queues/quorum/replicas/on/" ++ Nodename1 ++ "/grow", Body, ?NO_CONTENT), - timer:sleep(1100), + ?awaitMatch(#{members := [_, _]}, http_get(Config, "/queues/%2F/qq.24"), 30000), + http_delete(Config, "/queues/quorum/%2F/qq.24/replicas/delete", ?ACCEPTED, + [{node, Nodename1}]), + ?awaitMatch(#{members := [_]}, http_get(Config, "/queues/%2F/qq.24"), 30000), + + Body = [{strategy, <<"all">>}, + {queue_pattern, <<"qq.24">>}, + {vhost_pattern, <<".*">>}], + http_post(Config, "/queues/quorum/replicas/on/" ++ Nodename1 ++ "/grow", + Body, ?NO_CONTENT), + ?awaitMatch(#{members := [_, _]}, http_get(Config, "/queues/%2F/qq.24"), 30000), http_delete(Config, "/queues/%2F/qq.24", ?NO_CONTENT), @@ -292,22 +190,17 @@ qq_replicas_grow(Config) -> qq_replicas_shrink(Config) -> Conn = rabbit_ct_client_helpers:open_unmanaged_connection(Config, 0), {ok, Chan} = amqp_connection:open_channel(Conn), - _ = queue_declare_quorum(Chan, <<"qq.24">>), - _ = wait_for_queue(Config, "/queues/%2F/qq.24"), + _ = queue_declare_quorum(Chan, <<"qq.25">>), + ?awaitMatch(#{members := [_, _]}, http_get(Config, "/queues/%2F/qq.25"), 30000), + ?awaitMatch(#{members := [_, _]}, http_get(Config, "/queues/%2F/qq.25"), 30000), Nodename1 = rabbit_data_coercion:to_list(get_node_config(Config, 1, nodename)), - Body = [ - {strategy, <<"all">>}, - {queue_pattern, <<"qq.24">>}, - {vhost_pattern, <<".*">>} - ], - http_post(Config, "/queues/quorum/replicas/on/" ++ Nodename1 ++ "/grow", Body, ?NO_CONTENT), - timer:sleep(1100), - http_delete(Config, "/queues/quorum/replicas/on/" ++ Nodename1 ++ "/shrink", ?ACCEPTED), - timer:sleep(1100), + http_delete(Config, "/queues/quorum/replicas/on/" ++ Nodename1 ++ "/shrink", + ?ACCEPTED), + ?awaitMatch(#{members := [_]}, http_get(Config, "/queues/%2F/qq.25"), 30000), - http_delete(Config, "/queues/%2F/qq.24", ?NO_CONTENT), + http_delete(Config, "/queues/%2F/qq.25", ?NO_CONTENT), amqp_channel:close(Chan), rabbit_ct_client_helpers:close_connection(Conn), @@ -323,8 +216,7 @@ queue_on_other_node(Config) -> {ok, Chan2} = amqp_connection:open_channel(?config(conn, Config)), consume(Chan2, <<"some-queue">>), - timer:sleep(5100), - force_stats(), + force_stats(Config), Res = http_get(Config, "/queues/%2F/some-queue"), % assert some basic data is present @@ -359,8 +251,7 @@ queue_with_multiple_consumers(Config) -> amqp_channel:cast(Chan, #'basic.ack'{delivery_tag = T}) end, - timer:sleep(5100), - force_stats(), + force_stats(Config), Res = http_get(Config, "/queues/%2F/multi-consumer-queue1"), http_delete(Config, "/queues/%2F/multi-consumer-queue1", ?NO_CONTENT), @@ -390,7 +281,7 @@ queue_consumer_cancelled(Config) -> #'basic.cancel_ok'{} = amqp_channel:call(Chan, #'basic.cancel'{consumer_tag = Tag}), - force_stats(), + force_stats(Config), Res = http_get(Config, "/queues/%2F/some-queue"), amqp_channel:close(Chan), @@ -407,10 +298,10 @@ queue_consumer_channel_closed(Config) -> _ = wait_for_queue(Config, "/queues/%2F/some-queue"), consume(Chan, <<"some-queue">>), - force_stats(), % ensure channel stats have been written + force_stats(Config), % ensure channel stats have been written amqp_channel:close(Chan), - force_stats(), + force_stats(Config), Res = http_get(Config, "/queues/%2F/some-queue"), % assert there are no consumer details @@ -431,8 +322,8 @@ queue(Config) -> basic_get(Chan, <<"some-queue">>), publish(Chan2, <<"some-queue">>), basic_get(Chan2, <<"some-queue">>), - force_stats(), - timer:sleep(5100), + force_stats(Config), + Res = http_get(Config, "/queues/%2F/some-queue"), % assert single queue is returned [#{} | _] = maps:get(deliveries, Res), @@ -447,7 +338,7 @@ queues_single(Config) -> http_put(Config, "/queues/%2F/some-queue", none, [?CREATED, ?NO_CONTENT]), _ = wait_for_queue(Config, "/queues/%2F/some-queue"), - force_stats(), + force_stats(Config), Res = http_get(Config, "/queues/%2F"), http_delete(Config, "/queues/%2F/some-queue", ?NO_CONTENT), @@ -463,8 +354,7 @@ queues_multiple(Config) -> _ = wait_for_queue(Config, "/queues/%2F/some-queue"), _ = wait_for_queue(Config, "/queues/%2F/some-other-queue"), - force_stats(), - timer:sleep(5100), + force_stats(Config), Res = http_get(Config, "/queues/%2F"), [Q1, Q2 | _] = Res, @@ -480,10 +370,10 @@ queues_multiple(Config) -> queues_removed(Config) -> http_put(Config, "/queues/%2F/some-queue", none, [?CREATED, ?NO_CONTENT]), - force_stats(), + force_stats(Config), N = length(http_get(Config, "/queues/%2F")), http_delete(Config, "/queues/%2F/some-queue", ?NO_CONTENT), - force_stats(), + force_stats(Config), ?assertEqual(N - 1, length(http_get(Config, "/queues/%2F"))), ok. @@ -497,8 +387,7 @@ channels_multiple_on_different_nodes(Config) -> {ok, Chan2} = amqp_connection:open_channel(Conn2), consume(Chan, <<"some-queue">>), - timer:sleep(5100), - force_stats(), + force_stats(Config), Res = http_get(Config, "/channels"), % assert two channels are present @@ -519,13 +408,12 @@ channel_closed(Config) -> _ = wait_for_queue(Config, "/queues/%2F/some-queue"), {ok, Chan2} = amqp_connection:open_channel(?config(conn, Config)), - force_stats(), + force_stats(Config), consume(Chan2, <<"some-queue">>), amqp_channel:close(Chan), - timer:sleep(5100), - force_stats(), + force_stats(Config), Res = http_get(Config, "/channels"), % assert one channel is present @@ -542,8 +430,8 @@ channel(Config) -> [{_, ChData}] = rabbit_ct_broker_helpers:rpc(Config, 0, ets, tab2list, [channel_created]), ChName = uri_string:recompose(#{path => binary_to_list(pget(name, ChData))}), - timer:sleep(5100), - force_stats(), + + force_stats(Config), Res = http_get(Config, "/channels/" ++ ChName ), % assert channel is non empty #{} = Res, @@ -562,8 +450,8 @@ channel_other_node(Config) -> consume(Chan, Q), publish(Chan, Q), - timer:sleep(5100), - force_stats(), + wait_for_collect_statistics_interval(), + force_stats(Config), Res = http_get(Config, "/channels/" ++ ChName ), % assert channel is non empty @@ -586,8 +474,7 @@ channel_with_consumer_on_other_node(Config) -> consume(Chan, Q), publish(Chan, Q), - timer:sleep(5100), - force_stats(), + force_stats(Config), Res = http_get(Config, "/channels/" ++ ChName), http_delete(Config, "/queues/%2F/some-queue", ?NO_CONTENT), @@ -608,8 +495,7 @@ channel_with_consumer_on_one_node(Config) -> ChName = get_channel_name(Config, 0), consume(Chan, Q), - timer:sleep(5100), - force_stats(), + force_stats(Config), Res = http_get(Config, "/channels/" ++ ChName), amqp_channel:close(Chan), @@ -631,8 +517,7 @@ consumers(Config) -> consume(Chan, <<"some-queue">>), consume(Chan2, <<"some-queue">>), - timer:sleep(5100), - force_stats(), + force_stats(Config), Res = http_get(Config, "/consumers"), % assert there are two non-empty consumer records @@ -657,8 +542,8 @@ connections(Config) -> {ok, _Chan2} = amqp_connection:open_channel(Conn2), %% channel count needs a bit longer for 2nd chan - timer:sleep(5100), - force_stats(), + wait_for_collect_statistics_interval(), + force_stats(Config), Res = http_get(Config, "/connections"), @@ -685,7 +570,7 @@ exchanges(Config) -> consume(Chan, QName), publish_to(Chan, XName, <<"some-key">>), - force_stats(), + force_stats(Config), Res = http_get(Config, "/exchanges"), [X] = [X || X <- Res, maps:get(name, X) =:= XName], @@ -709,8 +594,8 @@ exchange(Config) -> consume(Chan, QName), publish_to(Chan, XName, <<"some-key">>), - force_stats(), - force_stats(), + force_stats(Config), + force_stats(Config), Res = http_get(Config, "/exchanges/%2F/some-other-exchange"), ?assertEqual(<<"direct">>, maps:get(type, Res)), @@ -729,8 +614,9 @@ vhosts(Config) -> Conn2 = rabbit_ct_client_helpers:open_unmanaged_connection(Config, 1), {ok, Chan2} = amqp_connection:open_channel(Conn2), publish(Chan2, <<"some-queue">>), - timer:sleep(5100), % TODO force stat emission - force_stats(), + + wait_for_collect_statistics_interval(), + force_stats(Config), Res = http_get(Config, "/vhosts"), http_delete(Config, "/queues/%2F/some-queue", ?NO_CONTENT), @@ -753,8 +639,9 @@ nodes(Config) -> {ok, Chan2} = amqp_connection:open_channel(Conn), publish(Chan2, <<"some-queue">>), - timer:sleep(5100), % TODO force stat emission - force_stats(), + + wait_for_collect_statistics_interval(), + force_stats(Config), Res = http_get(Config, "/nodes"), http_delete(Config, "/queues/%2F/some-queue", ?NO_CONTENT), @@ -782,8 +669,8 @@ overview(Config) -> {ok, Chan2} = amqp_connection:open_channel(Conn2), publish(Chan, <<"queue-n1">>), publish(Chan2, <<"queue-n2">>), - timer:sleep(5100), % TODO force stat emission - force_stats(), % channel count needs a bit longer for 2nd chan + wait_for_collect_statistics_interval(), + force_stats(Config), % channel count needs a bit longer for 2nd chan Res = http_get(Config, "/overview"), http_delete(Config, "/queues/%2F/queue-n1", ?NO_CONTENT), @@ -827,7 +714,7 @@ disable_plugin(Config) -> clear_all_table_data() -> [ets:delete_all_objects(T) || {T, _} <- ?CORE_TABLES], - [ets:delete_all_objects(T) || {T, _} <- ?TABLES], + rabbit_mgmt_storage:reset(), [gen_server:call(P, purge_cache) || {_, P, _, _} <- supervisor:which_children(rabbit_mgmt_db_cache_sup)], send_to_all_collectors(purge_old_stats). @@ -894,9 +781,6 @@ queue_bind(Chan, Ex, Q, Key) -> routing_key = Key}, #'queue.bind_ok'{} = amqp_channel:call(Chan, Binding). -wait_for_mirrored_queue(Config, Path) -> - wait_for_queue(Config, Path, [slave_nodes, synchronised_slave_nodes]). - wait_for_queue(Config, Path) -> wait_for_queue(Config, Path, []). @@ -946,16 +830,24 @@ dump_table(Config, Table) -> Data0 = rabbit_ct_broker_helpers:rpc(Config, 1, ets, tab2list, [Table]), ct:pal(?LOW_IMPORTANCE, "Node 1: Dump of table ~tp:~n~tp~n", [Table, Data0]). -force_stats() -> - force_all(), - timer:sleep(2000). +force_stats(Config) -> + Nodes = rabbit_ct_broker_helpers:get_node_configs(Config, nodename), + force_all(Nodes), + ok. -force_all() -> - [begin - {rabbit_mgmt_external_stats, N} ! emit_update, - timer:sleep(125) - end || N <- [node() | nodes()]], - send_to_all_collectors(collect_metrics). +force_all(Nodes) -> + lists:append( + [begin + ExtStats = {rabbit_mgmt_external_stats, N}, + ExtStats ! emit_update, + [ExtStats | + [begin + Name = {rabbit_mgmt_metrics_collector:name(Table), N}, + Name ! collect_metrics, + Name + end + || {Table, _} <- ?CORE_TABLES]] + end || N <- Nodes]). send_to_all_collectors(Msg) -> [begin @@ -973,3 +865,6 @@ listener_proto(Proto) when is_atom(Proto) -> %% rabbit:status/0 used this formatting before rabbitmq/rabbitmq-cli#340 listener_proto({Proto, _Port, _Interface}) -> Proto. + +wait_for_collect_statistics_interval() -> + timer:sleep(?STATS_INTERVAL * 2). diff --git a/deps/rabbitmq_management/test/clustering_prop_SUITE.erl b/deps/rabbitmq_management/test/clustering_prop_SUITE.erl index 0d87a492a175..7ade6ece5b40 100644 --- a/deps/rabbitmq_management/test/clustering_prop_SUITE.erl +++ b/deps/rabbitmq_management/test/clustering_prop_SUITE.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2016-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(clustering_prop_SUITE). @@ -11,7 +11,6 @@ -include_lib("common_test/include/ct.hrl"). -include_lib("proper/include/proper.hrl"). -include_lib("rabbit_common/include/rabbit_core_metrics.hrl"). --include_lib("rabbitmq_management_agent/include/rabbit_mgmt_metrics.hrl"). -include_lib("rabbitmq_ct_helpers/include/rabbit_mgmt_test.hrl"). @@ -19,7 +18,9 @@ -import(rabbit_mgmt_test_util, [http_get/2, http_get_from_node/3]). -import(rabbit_misc, [pget/2]). --compile([export_all, nowarn_format]). +-compile([export_all, + nowarn_format, + nowarn_export_all]). -export_type([rmqnode/0, queues/0]). @@ -34,6 +35,8 @@ groups() -> ]} ]. +-define(COLLECT_INTERVAL, 500). + %% ------------------------------------------------------------------- %% Testsuite setup/teardown. %% ------------------------------------------------------------------- @@ -42,7 +45,8 @@ merge_app_env(Config) -> Config1 = rabbit_ct_helpers:merge_app_env(Config, {rabbit, [ {collect_statistics, fine}, - {collect_statistics_interval, 500} + {collect_statistics_interval, + ?COLLECT_INTERVAL} ]}), rabbit_ct_helpers:merge_app_env(Config1, {rabbitmq_management, [ @@ -108,10 +112,13 @@ prop_connection_channel_counts(Config) -> Cons = lists:foldl(fun (Op, Agg) -> execute_op(Config, Op, Agg) end, [], Ops), - force_stats(), + force_stats(Config), Res = validate_counts(Config, Cons), cleanup(Cons), - force_stats(), + rabbit_ct_helpers:await_condition( + fun () -> validate_counts(Config, []) end, + 60000), + force_stats(Config), Res end). @@ -148,29 +155,36 @@ execute_op(_Config, rem_conn, []) -> execute_op(_Config, rem_conn, [{conn, Conn, _Chans} | Rem]) -> rabbit_ct_client_helpers:close_connection(Conn), Rem; -execute_op(_Config, force_stats, State) -> - force_stats(), +execute_op(Config, force_stats, State) -> + force_stats(Config), State. %%---------------------------------------------------------------------------- %% -force_stats() -> - force_all(), - timer:sleep(5000). - -force_all() -> - [begin - {rabbit_mgmt_external_stats, N} ! emit_update, - timer:sleep(100), - [{rabbit_mgmt_metrics_collector:name(Table), N} ! collect_metrics - || {Table, _} <- ?CORE_TABLES] - end - || N <- [node() | nodes()]]. +force_stats(Config) -> + Nodes = rabbit_ct_broker_helpers:get_node_configs(Config, nodename), + Names = force_all(Nodes), + %% wait for all collectors to do their work + %% need to catch as mixed versions tests may timeout + [catch gen_server:call(Name, wait, ?COLLECT_INTERVAL * 2) + || Name <- Names], + ok. + +force_all(Nodes) -> + lists:append( + [begin + [begin + Name = {rabbit_mgmt_metrics_collector:name(Table), N}, + Name ! collect_metrics, + Name + end + || {Table, _} <- ?CORE_TABLES] + end || N <- Nodes]). clear_all_table_data() -> [ets:delete_all_objects(T) || {T, _} <- ?CORE_TABLES], - [ets:delete_all_objects(T) || {T, _} <- ?TABLES], + rabbit_mgmt_storage:reset(), [gen_server:call(P, purge_cache) || {_, P, _, _} <- supervisor:which_children(rabbit_mgmt_db_cache_sup)]. @@ -219,23 +233,6 @@ queue_bind(Chan, Ex, Q, Key) -> routing_key = Key}, #'queue.bind_ok'{} = amqp_channel:call(Chan, Binding). -wait_for(Config, Path) -> - wait_for(Config, Path, [slave_nodes, synchronised_slave_nodes]). - -wait_for(Config, Path, Keys) -> - wait_for(Config, Path, Keys, 1000). - -wait_for(_Config, Path, Keys, 0) -> - exit({timeout, {Path, Keys}}); - -wait_for(Config, Path, Keys, Count) -> - Res = http_get(Config, Path), - case present(Keys, Res) of - false -> timer:sleep(10), - wait_for(Config, Path, Keys, Count - 1); - true -> Res - end. - present(Keys, Res) -> lists:all(fun (Key) -> X = pget(Key, Res), diff --git a/deps/rabbitmq_management/test/config_schema_SUITE.erl b/deps/rabbitmq_management/test/config_schema_SUITE.erl index a62e34c7be76..c944d7090ed1 100644 --- a/deps/rabbitmq_management/test/config_schema_SUITE.erl +++ b/deps/rabbitmq_management/test/config_schema_SUITE.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2016-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(config_schema_SUITE). diff --git a/deps/rabbitmq_management/test/config_schema_SUITE_data/rabbitmq_management.snippets b/deps/rabbitmq_management/test/config_schema_SUITE_data/rabbitmq_management.snippets index edbc2d4819b0..d26639620bb8 100644 --- a/deps/rabbitmq_management/test/config_schema_SUITE_data/rabbitmq_management.snippets +++ b/deps/rabbitmq_management/test/config_schema_SUITE_data/rabbitmq_management.snippets @@ -632,6 +632,44 @@ {oauth_initiated_logon_type, idp_initiated} ]} ], [rabbitmq_management] + }, + {oauth2_with_multiple_resource_servers, + "management.oauth_enabled = true + management.oauth_resource_servers.1.oauth_provider_url = http://one:8080 + management.oauth_resource_servers.1.id = resource-one + management.oauth_resource_servers.1.label = One + management.oauth_resource_servers.1.oauth_client_id = one + management.oauth_resource_servers.1.oauth_scopes = openid profile rabbitmq.* + management.oauth_resource_servers.2.oauth_provider_url = http://two + management.oauth_resource_servers.2.id = resource-two + management.oauth_resource_servers.2.oauth_client_id = two + management.oauth_resource_servers.3.oauth_initiated_logon_type = idp_initiated + management.oauth_resource_servers.3.oauth_provider_url = http://three", + [ + {rabbitmq_management, [ + {oauth_enabled, true}, + {oauth_resource_servers, + #{ + <<"resource-one">> => [ + {oauth_scopes, "openid profile rabbitmq.*"}, + {oauth_client_id, "one"}, + {id, "resource-one"}, + {label, "One"}, + {oauth_provider_url, "http://one:8080"} + ], + <<"resource-two">> => [ + {oauth_client_id, "two"}, + {id, "resource-two"}, + {oauth_provider_url, "http://two"} + ], + <<"3">> => [ + {oauth_initiated_logon_type, idp_initiated}, + {oauth_provider_url, "http://three"} + ] + } + } + ]} + ], [rabbitmq_management] } ]. diff --git a/deps/rabbitmq_management/test/listener_config_SUITE.erl b/deps/rabbitmq_management/test/listener_config_SUITE.erl index 2f70310cf181..ab0920536380 100644 --- a/deps/rabbitmq_management/test/listener_config_SUITE.erl +++ b/deps/rabbitmq_management/test/listener_config_SUITE.erl @@ -2,12 +2,11 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2016-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(listener_config_SUITE). --include_lib("common_test/include/ct.hrl"). -include_lib("eunit/include/eunit.hrl"). -compile(export_all). diff --git a/deps/rabbitmq_management/test/rabbit_mgmt_http_SUITE.erl b/deps/rabbitmq_management/test/rabbit_mgmt_http_SUITE.erl index 38b0f7984dd3..7d354bae1e2f 100644 --- a/deps/rabbitmq_management/test/rabbit_mgmt_http_SUITE.erl +++ b/deps/rabbitmq_management/test/rabbit_mgmt_http_SUITE.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2016-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_mgmt_http_SUITE). @@ -11,9 +11,14 @@ -include_lib("common_test/include/ct.hrl"). -include_lib("eunit/include/eunit.hrl"). -include_lib("rabbitmq_ct_helpers/include/rabbit_mgmt_test.hrl"). +-include_lib("rabbitmq_ct_helpers/include/rabbit_assert.hrl"). -import(rabbit_ct_client_helpers, [close_connection/1, close_channel/1, open_unmanaged_connection/1]). +-import(rabbit_ct_broker_helpers, [rpc/4]). +-import(rabbit_ct_helpers, + [eventually/3, + eventually/1]). -import(rabbit_mgmt_test_util, [assert_list/2, assert_item/2, test_item/2, assert_keys/2, assert_no_keys/2, http_get/2, http_get/3, http_get/5, @@ -31,23 +36,74 @@ -import(rabbit_misc, [pget/2]). --define(COLLECT_INTERVAL, 1000). +-define(COLLECT_INTERVAL, 256). -define(PATH_PREFIX, "/custom-prefix"). --compile(export_all). +-define(AWAIT(Body), + await_condition(fun () -> Body end)). + +-compile([export_all, nowarn_export_all]). all() -> [ - {group, all_tests_with_prefix}, - {group, all_tests_without_prefix} + {group, all_tests_with_prefix}, + {group, all_tests_without_prefix}, + {group, definitions_group1_without_prefix}, + {group, definitions_group2_without_prefix}, + {group, definitions_group3_without_prefix}, + {group, definitions_group4_without_prefix} ]. groups() -> [ - {all_tests_with_prefix, [], all_tests()}, - {all_tests_without_prefix, [], all_tests()} + {all_tests_with_prefix, [], some_tests() ++ all_tests()}, + {all_tests_without_prefix, [], some_tests()}, + %% We have several groups because their interference is + %% way above average. It is easier to use separate groups + %% that get a blank node each than to try to untangle multiple + %% definitions-related tests. MK. + {definitions_group1_without_prefix, [], definitions_group1_tests()}, + {definitions_group2_without_prefix, [], definitions_group2_tests()}, + {definitions_group3_without_prefix, [], definitions_group3_tests()}, + {definitions_group4_without_prefix, [], definitions_group4_tests()} + ]. + +some_tests() -> + [ + users_test, + exchanges_test, + queues_test, + bindings_test, + policy_test, + policy_permissions_test ]. +definitions_group1_tests() -> + [ + definitions_test, + definitions_password_test, + long_definitions_test, + long_definitions_multipart_test + ]. + +definitions_group2_tests() -> + [ + definitions_default_queue_type_test, + definitions_vhost_metadata_test + ]. + +definitions_group3_tests() -> + [ + definitions_server_named_queue_test, + definitions_with_charset_test + ]. + +definitions_group4_tests() -> + [ + definitions_vhost_test + ]. + + all_tests() -> [ cli_redirect_test, api_redirect_test, @@ -61,7 +117,6 @@ all_tests() -> [ vhosts_test, vhosts_description_test, vhosts_trace_test, - users_test, users_legacy_administrator_test, adding_a_user_with_password_test, adding_a_user_with_password_hash_test, @@ -78,32 +133,21 @@ all_tests() -> [ permissions_validation_test, permissions_list_test, permissions_test, - connections_test, + connections_test_amqpl, + connections_test_amqp, multiple_invalid_connections_test, - exchanges_test, - queues_test, quorum_queues_test, stream_queues_have_consumers_field, - bindings_test, bindings_post_test, bindings_null_routing_key_test, bindings_e2e_test, permissions_administrator_test, permissions_vhost_test, permissions_amqp_test, + permissions_queue_delete_test, permissions_connection_channel_consumer_test, consumers_cq_test, consumers_qq_test, - definitions_test, - definitions_vhost_test, - definitions_password_test, - definitions_remove_things_test, - definitions_server_named_queue_test, - definitions_with_charset_test, - definitions_default_queue_type_test, - long_definitions_test, - long_definitions_multipart_test, - aliveness_test, arguments_test, arguments_table_test, queue_purge_test, @@ -113,6 +157,7 @@ all_tests() -> [ connections_channels_pagination_test, exchanges_pagination_test, exchanges_pagination_permissions_test, + queues_detailed_test, queue_pagination_test, queue_pagination_columns_test, queues_pagination_permissions_test, @@ -125,6 +170,7 @@ all_tests() -> [ get_fail_test, publish_test, publish_large_message_test, + publish_large_message_exceeding_http_request_body_size_test, publish_accept_json_test, publish_fail_test, publish_base64_test, @@ -134,8 +180,6 @@ all_tests() -> [ global_parameters_test, disabled_operator_policy_test, operator_policy_test, - policy_test, - policy_permissions_test, issue67_test, extensions_test, cors_test, @@ -144,7 +188,8 @@ all_tests() -> [ rates_test, single_active_consumer_cq_test, single_active_consumer_qq_test, -%% oauth_test, %% disabled until we are able to enable oauth2 plugin + %% This test needs the OAuth 2 plugin to be enabled + %% oauth_test, disable_basic_auth_test, login_test, csp_headers_test, @@ -152,7 +197,9 @@ all_tests() -> [ user_limits_list_test, user_limit_set_test, config_environment_test, - disabled_qq_replica_opers_test + disabled_qq_replica_opers_test, + list_deprecated_features_test, + list_used_deprecated_features_test ]. %% ------------------------------------------------------------------- @@ -160,9 +207,13 @@ all_tests() -> [ %% ------------------------------------------------------------------- merge_app_env(Config) -> Config1 = rabbit_ct_helpers:merge_app_env(Config, - {rabbit, [ - {collect_statistics_interval, ?COLLECT_INTERVAL} - ]}), + {rabbit, + [ + {collect_statistics_interval, + ?COLLECT_INTERVAL}, + {quorum_tick_interval, 256}, + {stream_tick_interval, 256} + ]}), rabbit_ct_helpers:merge_app_env(Config1, {rabbitmq_management, [ {sample_retention_policies, @@ -184,6 +235,13 @@ finish_init(Group, Config) -> Config1 = rabbit_ct_helpers:set_config(Config, NodeConf), merge_app_env(Config1). +init_per_suite(Config) -> + {ok, _} = application:ensure_all_started(amqp10_client), + Config. + +end_per_suite(Config) -> + Config. + init_per_group(all_tests_with_prefix=Group, Config0) -> PathConfig = {rabbitmq_management, [{path_prefix, ?PATH_PREFIX}]}, Config1 = rabbit_ct_helpers:merge_app_env(Config0, PathConfig), @@ -222,14 +280,20 @@ init_per_testcase(Testcase = disabled_qq_replica_opers_test, Config) -> rabbit_ct_broker_helpers:rpc_all(Config, application, set_env, [rabbitmq_management, restrictions, Restrictions]), rabbit_ct_helpers:testcase_started(Config, Testcase); +init_per_testcase(queues_detailed_test, Config) -> + IsEnabled = rabbit_ct_broker_helpers:is_feature_flag_enabled( + Config, detailed_queues_endpoint), + case IsEnabled of + true -> Config; + false -> {skip, "The detailed queues endpoint is not available."} + end; init_per_testcase(Testcase, Config) -> rabbit_ct_broker_helpers:close_all_connections(Config, 0, <<"rabbit_mgmt_SUITE:init_per_testcase">>), rabbit_ct_helpers:testcase_started(Config, Testcase). end_per_testcase(Testcase, Config) -> rabbit_ct_broker_helpers:close_all_connections(Config, 0, <<"rabbit_mgmt_SUITE:end_per_testcase">>), - rabbit_ct_broker_helpers:rpc(Config, 0, application, set_env, - [rabbitmq_management, disable_basic_auth, false]), + rpc(Config, application, set_env, [rabbitmq_management, disable_basic_auth, false]), Config1 = end_per_testcase0(Testcase, Config), rabbit_ct_helpers:testcase_finished(Config1, Testcase). @@ -240,9 +304,7 @@ end_per_testcase0(T, Config) || #{name := Name} <- Vhosts], Config; end_per_testcase0(definitions_password_test, Config) -> - rabbit_ct_broker_helpers:rpc(Config, 0, - application, unset_env, - [rabbit, password_hashing_module]), + rpc(Config, application, unset_env, [rabbit, password_hashing_module]), Config; end_per_testcase0(queues_test, Config) -> rabbit_ct_broker_helpers:delete_vhost(Config, <<"downvhost">>), @@ -276,16 +338,18 @@ end_per_testcase0(permissions_vhost_test, Config) -> rabbit_ct_broker_helpers:delete_user(Config, <<"myuser2">>), Config; end_per_testcase0(config_environment_test, Config) -> - rabbit_ct_broker_helpers:rpc(Config, 0, application, unset_env, - [rabbit, config_environment_test_env]), + rpc(Config, application, unset_env, [rabbit, config_environment_test_env]), Config; end_per_testcase0(disabled_operator_policy_test, Config) -> - rabbit_ct_broker_helpers:rpc(Config, 0, application, unset_env, - [rabbitmq_management, restrictions]), + rpc(Config, application, unset_env, [rabbitmq_management, restrictions]), Config; end_per_testcase0(disabled_qq_replica_opers_test, Config) -> - rabbit_ct_broker_helpers:rpc(Config, 0, application, unset_env, - [rabbitmq_management, restrictions]), + rpc(Config, application, unset_env, [rabbitmq_management, restrictions]), + Config; +end_per_testcase0(Testcase, Config) + when Testcase == list_deprecated_features_test; + Testcase == list_used_deprecated_features_test -> + ok = rpc(Config, rabbit_feature_flags, clear_injected_test_feature_flags, []), Config; end_per_testcase0(_, Config) -> Config. @@ -321,6 +385,7 @@ nodes_test(Config) -> assert_list([DiscNode], http_get(Config, "/nodes")), assert_list([DiscNode], http_get(Config, "/nodes", "monitor", "monitor", ?OK)), http_get(Config, "/nodes", "user", "user", ?NOT_AUTHORISED), + http_get(Config, "/nodes/does-not-exist", ?NOT_FOUND), [Node] = http_get(Config, "/nodes"), Path = "/nodes/" ++ binary_to_list(maps:get(name, Node)), assert_item(DiscNode, http_get(Config, Path, ?OK)), @@ -336,7 +401,7 @@ memory_test(Config) -> Result = http_get(Config, Path, ?OK), assert_keys([memory], Result), Keys = [total, connection_readers, connection_writers, connection_channels, - connection_other, queue_procs, queue_slave_procs, plugins, + connection_other, queue_procs, plugins, other_proc, mnesia, mgmt_db, msg_index, other_ets, binary, code, atom, other_system, allocated_unused, reserved_unallocated], assert_keys(Keys, maps:get(memory, Result)), @@ -359,7 +424,7 @@ ets_tables_memory_test(Config) -> Path = "/nodes/" ++ binary_to_list(maps:get(name, Node)) ++ "/memory/ets", Result = http_get(Config, Path, ?OK), assert_keys([ets_tables_memory], Result), - NonMgmtKeys = [rabbit_vhost,rabbit_user_permission], + NonMgmtKeys = [tracked_connection, tracked_channel], Keys = [queue_stats, vhost_stats_coarse_conn_stats, connection_created_stats, channel_process_stats, consumer_stats, queue_msg_rates], @@ -416,8 +481,7 @@ auth_test(Config) -> %% NOTE: this one won't have www-authenticate in the response, %% because user/password are ok, tags are not test_auth(Config, ?NOT_AUTHORISED, [auth_header("user", "user")]), - WrongAuthResponseHeaders = test_auth(Config, ?NOT_AUTHORISED, [auth_header("guest", "gust")]), - ?assertEqual(true, lists:keymember("www-authenticate", 1, WrongAuthResponseHeaders)), + %?assertEqual(true, lists:keymember("www-authenticate", 1, WrongAuthResponseHeaders)), test_auth(Config, ?OK, [auth_header("guest", "guest")]), http_delete(Config, "/users/user", {group, '2xx'}), passed. @@ -491,49 +555,59 @@ users_test(Config) -> assert_item(#{name => <<"guest">>, tags => [<<"administrator">>]}, http_get(Config, "/whoami")), rabbit_ct_broker_helpers:rpc(Config, 0, application, set_env, - [rabbitmq_management, login_session_timeout, 100]), + [rabbitmq_management, login_session_timeout, 100]), assert_item(#{name => <<"guest">>, - tags => [<<"administrator">>], - login_session_timeout => 100}, + tags => [<<"administrator">>], + login_session_timeout => 100}, http_get(Config, "/whoami")), - http_get(Config, "/users/myuser", ?NOT_FOUND), - http_put_raw(Config, "/users/myuser", "Something not JSON", ?BAD_REQUEST), - http_put(Config, "/users/myuser", [{flim, <<"flam">>}], ?BAD_REQUEST), - http_put(Config, "/users/myuser", [{tags, [<<"management">>]}, - {password, <<"myuser">>}], - {group, '2xx'}), - http_put(Config, "/users/myuser", [{password_hash, <<"not_hash">>}], ?BAD_REQUEST), - http_put(Config, "/users/myuser", [{password_hash, + http_delete(Config, "/users/users_test", [?NO_CONTENT, ?NOT_FOUND]), + http_get(Config, "/users/users_test", [?NO_CONTENT, ?NOT_FOUND]), + http_put_raw(Config, "/users/users_test", "Something not JSON", ?BAD_REQUEST), + http_put(Config, "/users/users_test", [{flim, <<"flam">>}], ?BAD_REQUEST), + http_put(Config, "/users/users_test", [{tags, [<<"management">>]}, + {password, <<"users_test">>}], + {group, '2xx'}), + http_put(Config, "/users/users_test", [{password_hash, <<"not_hash">>}], ?BAD_REQUEST), + http_put(Config, "/users/users_test", [{password_hash, <<"IECV6PZI/Invh0DL187KFpkO5Jc=">>}, - {tags, <<"management">>}], {group, '2xx'}), - assert_item(#{name => <<"myuser">>, tags => [<<"management">>], - password_hash => <<"IECV6PZI/Invh0DL187KFpkO5Jc=">>, - hashing_algorithm => <<"rabbit_password_hashing_sha256">>}, - http_get(Config, "/users/myuser")), + {tags, <<"management">>}], {group, '2xx'}), + assert_item(#{name => <<"users_test">>, tags => [<<"management">>], + password_hash => <<"IECV6PZI/Invh0DL187KFpkO5Jc=">>, + hashing_algorithm => <<"rabbit_password_hashing_sha256">>}, + http_get(Config, "/users/users_test")), - http_put(Config, "/users/myuser", [{password_hash, + http_put(Config, "/users/users_test", [{password_hash, <<"IECV6PZI/Invh0DL187KFpkO5Jc=">>}, - {hashing_algorithm, <<"rabbit_password_hashing_md5">>}, - {tags, [<<"management">>]}], {group, '2xx'}), - assert_item(#{name => <<"myuser">>, tags => [<<"management">>], - password_hash => <<"IECV6PZI/Invh0DL187KFpkO5Jc=">>, - hashing_algorithm => <<"rabbit_password_hashing_md5">>}, - http_get(Config, "/users/myuser")), - http_put(Config, "/users/myuser", [{password, <<"password">>}, - {tags, [<<"administrator">>, <<"foo">>]}], {group, '2xx'}), - assert_item(#{name => <<"myuser">>, tags => [<<"administrator">>, <<"foo">>]}, - http_get(Config, "/users/myuser")), - assert_list(lists:sort([#{name => <<"myuser">>, tags => [<<"administrator">>, <<"foo">>]}, - #{name => <<"guest">>, tags => [<<"administrator">>]}]), - lists:sort(http_get(Config, "/users"))), - test_auth(Config, ?OK, [auth_header("myuser", "password")]), - http_put(Config, "/users/myuser", [{password, <<"password">>}, - {tags, []}], {group, '2xx'}), - assert_item(#{name => <<"myuser">>, tags => []}, - http_get(Config, "/users/myuser")), - http_delete(Config, "/users/myuser", {group, '2xx'}), - test_auth(Config, ?NOT_AUTHORISED, [auth_header("myuser", "password")]), - http_get(Config, "/users/myuser", ?NOT_FOUND), + {hashing_algorithm, <<"rabbit_password_hashing_md5">>}, + {tags, [<<"management">>]}], {group, '2xx'}), + assert_item(#{name => <<"users_test">>, tags => [<<"management">>], + password_hash => <<"IECV6PZI/Invh0DL187KFpkO5Jc=">>, + hashing_algorithm => <<"rabbit_password_hashing_md5">>}, + http_get(Config, "/users/users_test")), + http_put(Config, "/users/users_test", [{password, <<"password">>}, + {tags, [<<"administrator">>, <<"foo">>]}], {group, '2xx'}), + assert_item(#{name => <<"users_test">>, tags => [<<"administrator">>, <<"foo">>]}, + http_get(Config, "/users/users_test")), + Listed = lists:sort(http_get(Config, "/users")), + ct:pal("Listed users: ~tp", [Listed]), + User1 = #{name => <<"users_test">>, tags => [<<"administrator">>, <<"foo">>]}, + User2 = #{name => <<"guest">>, tags => [<<"administrator">>]}, + ?assert(lists:any(fun(U) -> + maps:get(name, U) =:= maps:get(name, User1) andalso + maps:get(tags, U) =:= maps:get(tags, User1) + end, Listed)), + ?assert(lists:any(fun(U) -> + maps:get(name, U) =:= maps:get(name, User2) andalso + maps:get(tags, U) =:= maps:get(tags, User2) + end, Listed)), + test_auth(Config, ?OK, [auth_header("users_test", "password")]), + http_put(Config, "/users/users_test", [{password, <<"password">>}, + {tags, []}], {group, '2xx'}), + assert_item(#{name => <<"users_test">>, tags => []}, + http_get(Config, "/users/users_test")), + http_delete(Config, "/users/users_test", {group, '2xx'}), + test_auth(Config, ?NOT_AUTHORISED, [auth_header("users_test", "password")]), + http_get(Config, "/users/users_test", ?NOT_FOUND), passed. without_permissions_users_test(Config) -> @@ -902,19 +976,22 @@ topic_permissions_test(Config) -> http_delete(Config, "/vhosts/myvhost2", {group, '2xx'}), passed. -connections_test(Config) -> +connections_test_amqpl(Config) -> {Conn, _Ch} = open_connection_and_channel(Config), LocalPort = local_port(Conn), Path = binary_to_list( rabbit_mgmt_format:print( "/connections/127.0.0.1%3A~w%20-%3E%20127.0.0.1%3A~w", [LocalPort, amqp_port(Config)])), - timer:sleep(1500), - Connection = http_get(Config, Path, ?OK), - ?assert(maps:is_key(recv_oct, Connection)), - ?assert(maps:is_key(garbage_collection, Connection)), - ?assert(maps:is_key(send_oct_details, Connection)), - ?assert(maps:is_key(reductions, Connection)), + await_condition( + fun () -> + Connection = http_get(Config, Path, ?OK), + ?assert(maps:is_key(recv_oct, Connection)), + ?assert(maps:is_key(garbage_collection, Connection)), + ?assert(maps:is_key(send_oct_details, Connection)), + ?assert(maps:is_key(reductions, Connection)), + true + end), http_delete(Config, Path, {group, '2xx'}), %% TODO rabbit_reader:shutdown/2 returns before the connection is %% closed. It may not be worth fixing. @@ -927,10 +1004,77 @@ connections_test(Config) -> false end end, - wait_until(Fun, 60), + await_condition(Fun), close_connection(Conn), passed. +%% Test that AMQP 1.0 connection can be listed and closed via the rabbitmq_management plugin. +connections_test_amqp(Config) -> + Node = atom_to_binary(rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename)), + Port = rabbit_ct_broker_helpers:get_node_config(Config, 0, tcp_port_amqp), + User = <<"guest">>, + OpnConf = #{address => ?config(rmq_hostname, Config), + port => Port, + container_id => <<"my container">>, + sasl => {plain, User, <<"guest">>}}, + {ok, C1} = amqp10_client:open_connection(OpnConf), + receive {amqp10_event, {connection, C1, opened}} -> ok + after 5000 -> ct:fail(opened_timeout) + end, + eventually(?_assertEqual(1, length(http_get(Config, "/connections"))), 1000, 10), + ?assertEqual(1, length(rpc(Config, rabbit_amqp1_0, list_local, []))), + [Connection1] = http_get(Config, "/connections"), + ?assertMatch(#{node := Node, + vhost := <<"/">>, + user := User, + auth_mechanism := <<"PLAIN">>, + protocol := <<"AMQP 1-0">>, + client_properties := #{version := _, + product := <<"AMQP 1.0 client">>, + platform := _}}, + Connection1), + ConnectionName = maps:get(name, Connection1), + http_delete(Config, + "/connections/" ++ binary_to_list(uri_string:quote(ConnectionName)), + ?NO_CONTENT), + receive {amqp10_event, + {connection, C1, + {closed, + {internal_error, + <<"Connection forced: \"Closed via management plugin\"">>}}}} -> ok + after 5000 -> ct:fail(closed_timeout) + end, + eventually(?_assertNot(is_process_alive(C1))), + eventually(?_assertEqual([], http_get(Config, "/connections")), 10, 5), + + {ok, C2} = amqp10_client:open_connection(OpnConf), + receive {amqp10_event, {connection, C2, opened}} -> ok + after 5000 -> ct:fail(opened_timeout) + end, + eventually(?_assertEqual(1, length(http_get(Config, "/connections"))), 1000, 10), + http_delete(Config, + "/connections/username/guest", + ?NO_CONTENT), + receive {amqp10_event, + {connection, C2, + {closed, + {internal_error, + <<"Connection forced: \"Closed via management plugin\"">>}}}} -> ok + after 5000 -> ct:fail(closed_timeout) + end, + eventually(?_assertNot(is_process_alive(C2))), + eventually(?_assertEqual([], http_get(Config, "/connections")), 10, 5), + ?assertEqual(0, length(rpc(Config, rabbit_amqp1_0, list_local, []))). + +flush(Prefix) -> + receive + Msg -> + ct:pal("~ts flushed: ~p~n", [Prefix, Msg]), + flush(Prefix) + after 1 -> + ok + end. + multiple_invalid_connections_test(Config) -> Count = 100, spawn_invalid(Config, Count), @@ -1031,6 +1175,10 @@ queues_test(Config) -> ?BAD_REQUEST), http_put(Config, "/queues/%2F/baz", Good, {group, '2xx'}), + %% Wait until metrics are emitted and stats collected + ?awaitMatch(true, maps:is_key(storage_version, + http_get(Config, "/queues/%2F/baz")), + 30000), Queues = http_get(Config, "/queues/%2F"), Queue = http_get(Config, "/queues/%2F/foo"), assert_list([#{name => <<"baz">>, @@ -1038,19 +1186,22 @@ queues_test(Config) -> durable => true, auto_delete => false, exclusive => false, - arguments => #{}}, + arguments => #{}, + storage_version => 2}, #{name => <<"foo">>, vhost => <<"/">>, durable => true, auto_delete => false, exclusive => false, - arguments => #{}}], Queues), + arguments => #{}, + storage_version => 2}], Queues), assert_item(#{name => <<"foo">>, vhost => <<"/">>, durable => true, auto_delete => false, exclusive => false, - arguments => #{}}, Queue), + arguments => #{}, + storage_version => 2}, Queue), http_delete(Config, "/queues/%2F/foo", {group, '2xx'}), http_delete(Config, "/queues/%2F/baz", {group, '2xx'}), @@ -1063,7 +1214,7 @@ queues_test(Config) -> quorum_queues_test(Config) -> %% Test in a loop that no metrics are left behing after deleting a queue - quorum_queues_test_loop(Config, 5). + quorum_queues_test_loop(Config, 2). quorum_queues_test_loop(_Config, 0) -> passed; @@ -1081,18 +1232,19 @@ quorum_queues_test_loop(Config, N) -> end, Publish(), Publish(), - wait_until(fun() -> - Num = maps:get(messages, http_get(Config, "/queues/%2f/qq?lengths_age=60&lengths_incr=5&msg_rates_age=60&msg_rates_incr=5&data_rates_age=60&data_rates_incr=5"), undefined), - ct:pal("wait_until got ~w", [N]), - 2 == Num - end, 100), + rabbit_ct_helpers:await_condition( + fun() -> + Num = maps:get(messages, http_get(Config, "/queues/%2f/qq?lengths_age=60&lengths_incr=5&msg_rates_age=60&msg_rates_incr=5&data_rates_age=60&data_rates_incr=5"), undefined), + 2 == Num + end, ?COLLECT_INTERVAL * 100), http_delete(Config, "/queues/%2f/qq", {group, '2xx'}), http_put(Config, "/queues/%2f/qq", Good, {group, '2xx'}), - wait_until(fun() -> - 0 == maps:get(messages, http_get(Config, "/queues/%2f/qq?lengths_age=60&lengths_incr=5&msg_rates_age=60&msg_rates_incr=5&data_rates_age=60&data_rates_incr=5"), undefined) - end, 100), + rabbit_ct_helpers:await_condition( + fun() -> + 0 == maps:get(messages, http_get(Config, "/queues/%2f/qq?lengths_age=60&lengths_incr=5&msg_rates_age=60&msg_rates_incr=5&data_rates_age=60&data_rates_incr=5"), undefined) + end, ?COLLECT_INTERVAL * 100), http_delete(Config, "/queues/%2f/qq", {group, '2xx'}), close_connection(Conn), @@ -1103,10 +1255,11 @@ stream_queues_have_consumers_field(Config) -> http_get(Config, "/queues/%2f/sq", ?NOT_FOUND), http_put(Config, "/queues/%2f/sq", Good, {group, '2xx'}), - wait_until(fun() -> - Qs = http_get(Config, "/queues/%2F"), - length(Qs) == 1 andalso maps:is_key(consumers, lists:nth(1, Qs)) - end, 50), + rabbit_ct_helpers:await_condition( + fun() -> + Qs = http_get(Config, "/queues/%2F"), + length(Qs) == 1 andalso maps:is_key(consumers, lists:nth(1, Qs)) + end, ?COLLECT_INTERVAL * 100), Queues = http_get(Config, "/queues/%2F"), assert_list([#{name => <<"sq">>, @@ -1114,7 +1267,6 @@ stream_queues_have_consumers_field(Config) -> consumers => 0}], Queues), - http_delete(Config, "/queues/%2f/sq", {group, '2xx'}), ok. @@ -1370,6 +1522,18 @@ permissions_amqp_test(Config) -> http_delete(Config, "/users/myuser", {group, '2xx'}), passed. +permissions_queue_delete_test(Config) -> + QArgs = #{}, + PermArgs = [{configure, <<"foo.*">>}, {write, <<".*">>}, {read, <<".*">>}], + http_put(Config, "/users/myuser", [{password, <<"myuser">>}, + {tags, <<"management">>}], {group, '2xx'}), + http_put(Config, "/permissions/%2F/myuser", PermArgs, {group, '2xx'}), + http_put(Config, "/queues/%2F/bar-queue", QArgs, {group, '2xx'}), + http_delete(Config, "/queues/%2F/bar-queue", "myuser", "myuser", ?NOT_AUTHORISED), + http_delete(Config, "/queues/%2F/bar-queue", {group, '2xx'}), + http_delete(Config, "/users/myuser", {group, '2xx'}), + passed. + %% Opens a new connection and a channel on it. %% The channel is not managed by rabbit_ct_client_helpers and %% should be explicitly closed by the caller. @@ -1415,16 +1579,19 @@ permissions_connection_channel_consumer_test(Config) -> [amqp_channel:subscribe( Ch, #'basic.consume'{queue = <<"test">>}, self()) || Ch <- [Ch1, Ch2, Ch3]], - timer:sleep(1500), AssertLength = fun (Path, User, Len) -> Res = http_get(Config, Path, User, User, ?OK), ?assertEqual(Len, length(Res)) end, - [begin - AssertLength(P, "user", 1), - AssertLength(P, "monitor", 3), - AssertLength(P, "guest", 3) - end || P <- ["/connections", "/channels", "/consumers", "/consumers/%2F"]], + await_condition( + fun () -> + [begin + AssertLength(P, "user", 1), + AssertLength(P, "monitor", 3), + AssertLength(P, "guest", 3) + end || P <- ["/connections", "/channels", "/consumers", "/consumers/%2F"]], + true + end), AssertRead = fun(Path, UserStatus) -> http_get(Config, Path, "user", "user", UserStatus), @@ -1473,12 +1640,15 @@ consumers_test(Config, Args) -> Ch, #'basic.consume'{queue = <<"test">>, no_ack = false, consumer_tag = <<"my-ctag">> }, self()), - timer:sleep(1500), - assert_list([#{exclusive => false, - ack_required => true, - active => true, - activity_status => <<"up">>, - consumer_tag => <<"my-ctag">>}], http_get(Config, "/consumers")), + await_condition( + fun () -> + assert_list([#{exclusive => false, + ack_required => true, + active => true, + activity_status => <<"up">>, + consumer_tag => <<"my-ctag">>}], http_get(Config, "/consumers")), + true + end), amqp_connection:close(Conn), http_delete(Config, "/queues/%2F/test", {group, '2xx'}), passed. @@ -1510,24 +1680,30 @@ single_active_consumer(Config, Url, QName, Args) -> Ch2, #'basic.consume'{queue = QName, no_ack = true, consumer_tag = <<"2">> }, self()), - timer:sleep(1500), - assert_list([#{exclusive => false, - ack_required => false, - active => true, - activity_status => <<"single_active">>, - consumer_tag => <<"1">>}, - #{exclusive => false, - ack_required => false, - active => false, - activity_status => <<"waiting">>, - consumer_tag => <<"2">>}], http_get(Config, "/consumers")), + await_condition( + fun () -> + assert_list([#{exclusive => false, + ack_required => false, + active => true, + activity_status => <<"single_active">>, + consumer_tag => <<"1">>}, + #{exclusive => false, + ack_required => false, + active => false, + activity_status => <<"waiting">>, + consumer_tag => <<"2">>}], http_get(Config, "/consumers")), + true + end), amqp_channel:close(Ch), - timer:sleep(1500), - assert_list([#{exclusive => false, - ack_required => false, - active => true, - activity_status => <<"single_active">>, - consumer_tag => <<"2">>}], http_get(Config, "/consumers")), + await_condition( + fun () -> + assert_list([#{exclusive => false, + ack_required => false, + active => true, + activity_status => <<"single_active">>, + consumer_tag => <<"2">>}], http_get(Config, "/consumers")), + true + end), amqp_connection:close(Conn), http_delete(Config, Url, {group, '2xx'}), passed. @@ -1592,12 +1768,12 @@ defs(Config, Key, URI, CreateMethod, Args, DeleteFun0, DeleteFun1) -> passed. register_parameters_and_policy_validator(Config) -> - rabbit_ct_broker_helpers:rpc(Config, 0, rabbit_mgmt_runtime_parameters_util, register, []), - rabbit_ct_broker_helpers:rpc(Config, 0, rabbit_mgmt_runtime_parameters_util, register_policy_validator, []). + rpc(Config, rabbit_mgmt_runtime_parameters_util, register, []), + rpc(Config, rabbit_mgmt_runtime_parameters_util, register_policy_validator, []). unregister_parameters_and_policy_validator(Config) -> - rabbit_ct_broker_helpers:rpc(Config, 0, rabbit_mgmt_runtime_parameters_util, unregister_policy_validator, []), - rabbit_ct_broker_helpers:rpc(Config, 0, rabbit_mgmt_runtime_parameters_util, unregister, []). + rpc(Config, rabbit_mgmt_runtime_parameters_util, unregister_policy_validator, []), + rpc(Config, rabbit_mgmt_runtime_parameters_util, unregister, []). definitions_test(Config) -> register_parameters_and_policy_validator(Config), @@ -1707,25 +1883,67 @@ long_definitions_vhosts(long_definitions_multipart_test) -> [#{name => <<"long_definitions_test-", Bin/binary, (integer_to_binary(N))/binary>>} || N <- lists:seq(1, 16)]. -defs_default_queue_type_vhost(Config, QueueType) -> + defs_default_queue_type_vhost(Config, QueueType) -> register_parameters_and_policy_validator(Config), %% Create a test vhost - http_put(Config, "/vhosts/test-vhost", #{default_queue_type => QueueType}, {group, '2xx'}), + http_put(Config, "/vhosts/definitions-dqt-vhost-test-vhost", #{default_queue_type => QueueType}, {group, '2xx'}), PermArgs = [{configure, <<".*">>}, {write, <<".*">>}, {read, <<".*">>}], - http_put(Config, "/permissions/test-vhost/guest", PermArgs, {group, '2xx'}), + http_put(Config, "/permissions/definitions-dqt-vhost-test-vhost/guest", PermArgs, {group, '2xx'}), %% Import queue definition without an explicit queue type - http_post(Config, "/definitions/test-vhost", - #{queues => [#{name => <<"test-queue">>, durable => true}]}, - {group, '2xx'}), + http_post(Config, "/definitions/definitions-dqt-vhost-test-vhost", + #{queues => [#{name => <<"test-queue">>, durable => true}]}, + {group, '2xx'}), %% And check whether it was indeed created with the default type - Q = http_get(Config, "/queues/test-vhost/test-queue", ?OK), + Q = http_get(Config, "/queues/definitions-dqt-vhost-test-vhost/test-queue", ?OK), ?assertEqual(QueueType, maps:get(type, Q)), %% Remove the test vhost - http_delete(Config, "/vhosts/test-vhost", {group, '2xx'}), + http_delete(Config, "/vhosts/definitions-dqt-vhost-test-vhost", {group, '2xx'}), + ok. + +definitions_vhost_metadata_test(Config) -> + register_parameters_and_policy_validator(Config), + + VHostName = <<"definitions-vhost-metadata-test">>, + Desc = <<"Created by definitions_vhost_metadata_test">>, + DQT = <<"quorum">>, + Tags = [<<"one">>, <<"tag-two">>], + Metadata = #{ + description => Desc, + default_queue_type => DQT, + tags => Tags + }, + + %% Create a test vhost + http_put(Config, "/vhosts/definitions-vhost-metadata-test", Metadata, {group, '2xx'}), + PermArgs = [{configure, <<".*">>}, {write, <<".*">>}, {read, <<".*">>}], + http_put(Config, "/permissions/definitions-vhost-metadata-test/guest", PermArgs, {group, '2xx'}), + + %% Get the definitions + Definitions = http_get(Config, "/definitions", ?OK), + + %% Check if vhost definition is correct + VHosts = maps:get(vhosts, Definitions), + {value, VH} = lists:search(fun(VH) -> + maps:get(name, VH) =:= VHostName + end, VHosts), + ct:pal("VHost: ~p", [VH]), + ?assertEqual(#{ + name => VHostName, + description => Desc, + default_queue_type => DQT, + tags => Tags, + metadata => Metadata + }, VH), + + %% Post the definitions back + http_post(Config, "/definitions", Definitions, {group, '2xx'}), + + %% Remove the test vhost + http_delete(Config, "/vhosts/definitions-vhost-metadata-test", {group, '2xx'}), ok. definitions_default_queue_type_test(Config) -> @@ -1735,21 +1953,21 @@ definitions_default_queue_type_test(Config) -> defs_vhost(Config, Key, URI, CreateMethod, Args) -> Rep1 = fun (S, S2) -> re:replace(S, "", S2, [{return, list}]) end, - %% Create test vhost - http_put(Config, "/vhosts/test", none, {group, '2xx'}), + %% Create a vhost host + http_put(Config, "/vhosts/defs-vhost-1298379187", none, {group, '2xx'}), PermArgs = [{configure, <<".*">>}, {write, <<".*">>}, {read, <<".*">>}], - http_put(Config, "/permissions/test/guest", PermArgs, {group, '2xx'}), + http_put(Config, "/permissions/defs-vhost-1298379187/guest", PermArgs, {group, '2xx'}), - %% Test against default vhost - defs_vhost(Config, Key, URI, Rep1, "%2F", "test", CreateMethod, Args, - fun(URI2) -> http_delete(Config, URI2, {group, '2xx'}) end), + %% Test against the default vhost + defs_vhost(Config, Key, URI, Rep1, "%2F", "defs-vhost-1298379187", CreateMethod, Args, + fun(URI2) -> http_delete(Config, URI2, {group, '2xx'}) end), - %% Test against test vhost - defs_vhost(Config, Key, URI, Rep1, "test", "%2F", CreateMethod, Args, - fun(URI2) -> http_delete(Config, URI2, {group, '2xx'}) end), + %% Test against the newly created vhost + defs_vhost(Config, Key, URI, Rep1, "defs-vhost-1298379187", "%2F", CreateMethod, Args, + fun(URI2) -> http_delete(Config, URI2, {group, '2xx'}) end), - %% Remove test vhost - http_delete(Config, "/vhosts/test", {group, '2xx'}). + %% Remove the newly created vhost + http_delete(Config, "/vhosts/defs-vhost-1298379187", {group, '2xx'}). defs_vhost(Config, Key, URI0, Rep1, VHost1, VHost2, CreateMethod, Args, DeleteFun) -> @@ -1793,24 +2011,24 @@ definitions_vhost_test(Config) -> register_parameters_and_policy_validator(Config), - defs_vhost(Config, queues, "/queues//my-queue", put, - #{name => <<"my-queue">>, - durable => true}), - defs_vhost(Config, exchanges, "/exchanges//my-exchange", put, - #{name => <<"my-exchange">>, - type => <<"direct">>}), + defs_vhost(Config, queues, "/queues//definitions-vhost-test-imported-q", put, + #{name => <<"definitions-vhost-test-imported-q">>, + durable => true}), + defs_vhost(Config, exchanges, "/exchanges//definitions-vhost-test-imported-dx", put, + #{name => <<"definitions-vhost-test-imported-dx">>, + type => <<"direct">>}), defs_vhost(Config, bindings, "/bindings//e/amq.direct/e/amq.fanout", post, - #{routing_key => <<"routing">>, arguments => #{}}), - defs_vhost(Config, policies, "/policies//my-policy", put, - #{name => <<"my-policy">>, - pattern => <<".*">>, - definition => #{testpos => [1, 2, 3]}, - priority => 1}), + #{routing_key => <<"routing">>, arguments => #{}}), + defs_vhost(Config, policies, "/policies//definitions-vhost-test-policy", put, + #{name => <<"definitions-vhost-test-policy">>, + pattern => <<".*">>, + definition => #{testpos => [1, 2, 3]}, + priority => 1}), defs_vhost(Config, parameters, "/parameters/vhost-limits//limits", put, - #{name => <<"limits">>, - component => <<"vhost-limits">>, - value => #{ 'max-connections' => 100 }}), + #{name => <<"limits">>, + component => <<"vhost-limits">>, + value => #{ 'max-connections' => 100 }}), Upload = #{queues => [], exchanges => [], @@ -1861,9 +2079,7 @@ definitions_password_test(Config) -> password_hash => <<"WAbU0ZIcvjTpxM3Q3SbJhEAM2tQ=">>, tags => <<"management">>} ]}, - rabbit_ct_broker_helpers:rpc(Config, 0, application, set_env, [rabbit, - password_hashing_module, - rabbit_password_hashing_sha512]), + rpc(Config, application, set_env, [rabbit, password_hashing_module, rabbit_password_hashing_sha512]), ExpectedDefault = #{name => <<"myuser">>, password_hash => <<"WAbU0ZIcvjTpxM3Q3SbJhEAM2tQ=">>, @@ -1917,31 +2133,22 @@ definitions_with_charset_test(Config) -> {ok, {{_, ?NO_CONTENT, _}, _, []}} = httpc:request(post, Request, ?HTTPC_OPTS, []), passed. -aliveness_test(Config) -> - #{status := <<"ok">>} = http_get(Config, "/aliveness-test/%2F", ?OK), - http_get(Config, "/aliveness-test/foo", ?NOT_FOUND), - http_delete(Config, "/queues/%2F/aliveness-test", {group, '2xx'}), - passed. - arguments_test(Config) -> XArgs = [{type, <<"headers">>}, - {arguments, [{'alternate-exchange', <<"amq.direct">>}]}], + {arguments, [{'alternate-exchange', <<"amq.direct">>}]}], QArgs = [{arguments, [{'x-expires', 1800000}]}], BArgs = [{routing_key, <<"">>}, - {arguments, [{'x-match', <<"all">>}, - {foo, <<"bar">>}]}], - http_delete(Config, "/exchanges/%2F/myexchange", {one_of, [201, 404]}), - http_put(Config, "/exchanges/%2F/myexchange", XArgs, {group, '2xx'}), - http_put(Config, "/queues/%2F/arguments_test", QArgs, {group, '2xx'}), - http_post(Config, "/bindings/%2F/e/myexchange/q/arguments_test", BArgs, {group, '2xx'}), - Definitions = http_get(Config, "/definitions", ?OK), - http_delete(Config, "/exchanges/%2F/myexchange", {group, '2xx'}), - http_delete(Config, "/queues/%2F/arguments_test", {group, '2xx'}), - http_post(Config, "/definitions", Definitions, {group, '2xx'}), + {arguments, [{'x-match', <<"all">>}, + {foo, <<"bar">>}]}], + http_delete(Config, "/exchanges/%2F/arguments-test-x", {one_of, [201, 404]}), + http_put(Config, "/exchanges/%2F/arguments-test-x", XArgs, {group, '2xx'}), + http_put(Config, "/queues/%2F/arguments-test", QArgs, {group, '2xx'}), + http_post(Config, "/bindings/%2F/e/arguments-test-x/q/arguments-test", BArgs, {group, '2xx'}), + #{'alternate-exchange' := <<"amq.direct">>} = - maps:get(arguments, http_get(Config, "/exchanges/%2F/myexchange", ?OK)), + maps:get(arguments, http_get(Config, "/exchanges/%2F/arguments-test-x", ?OK)), #{'x-expires' := 1800000} = - maps:get(arguments, http_get(Config, "/queues/%2F/arguments_test", ?OK)), + maps:get(arguments, http_get(Config, "/queues/%2F/arguments-test", ?OK)), ArgsTable = [{<<"foo">>,longstr,<<"bar">>}, {<<"x-match">>, longstr, <<"all">>}], Hash = table_hash(ArgsTable), @@ -1950,11 +2157,11 @@ arguments_test(Config) -> assert_item( #{'x-match' => <<"all">>, foo => <<"bar">>}, maps:get(arguments, - http_get(Config, "/bindings/%2F/e/myexchange/q/arguments_test/" ++ + http_get(Config, "/bindings/%2F/e/arguments-test-x/q/arguments-test/" ++ PropertiesKey, ?OK)) ), - http_delete(Config, "/exchanges/%2F/myexchange", {group, '2xx'}), - http_delete(Config, "/queues/%2F/arguments_test", {group, '2xx'}), + http_delete(Config, "/exchanges/%2F/arguments-test-x", {group, '2xx'}), + http_delete(Config, "/queues/%2F/arguments-test", {group, '2xx'}), passed. table_hash(Table) -> @@ -1962,16 +2169,13 @@ table_hash(Table) -> arguments_table_test(Config) -> Args = #{'upstreams' => [<<"amqp://localhost/%2F/upstream1">>, - <<"amqp://localhost/%2F/upstream2">>]}, + <<"amqp://localhost/%2F/upstream2">>]}, XArgs = #{type => <<"headers">>, - arguments => Args}, - http_delete(Config, "/exchanges/%2F/myexchange", {one_of, [201, 404]}), - http_put(Config, "/exchanges/%2F/myexchange", XArgs, {group, '2xx'}), - Definitions = http_get(Config, "/definitions", ?OK), - http_delete(Config, "/exchanges/%2F/myexchange", {group, '2xx'}), - http_post(Config, "/definitions", Definitions, {group, '2xx'}), - Args = maps:get(arguments, http_get(Config, "/exchanges/%2F/myexchange", ?OK)), - http_delete(Config, "/exchanges/%2F/myexchange", {group, '2xx'}), + arguments => Args}, + http_delete(Config, "/exchanges/%2F/arguments-table-test-x", {one_of, [201, 404]}), + http_put(Config, "/exchanges/%2F/arguments-table-test-x", XArgs, {group, '2xx'}), + Args = maps:get(arguments, http_get(Config, "/exchanges/%2F/arguments-table-test-x", ?OK)), + http_delete(Config, "/exchanges/%2F/arguments-table-test-x", {group, '2xx'}), passed. queue_purge_test(Config) -> @@ -2003,8 +2207,6 @@ queue_purge_test(Config) -> queue_actions_test(Config) -> http_put(Config, "/queues/%2F/q", #{}, {group, '2xx'}), - http_post(Config, "/queues/%2F/q/actions", [{action, sync}], {group, '2xx'}), - http_post(Config, "/queues/%2F/q/actions", [{action, cancel_sync}], {group, '2xx'}), http_post(Config, "/queues/%2F/q/actions", [{action, change_colour}], ?BAD_REQUEST), http_delete(Config, "/queues/%2F/q", {group, '2xx'}), passed. @@ -2015,8 +2217,11 @@ exclusive_consumer_test(Config) -> amqp_channel:call(Ch, #'queue.declare'{exclusive = true}), amqp_channel:subscribe(Ch, #'basic.consume'{queue = QName, exclusive = true}, self()), - timer:sleep(1500), %% Sadly we need to sleep to let the stats update - http_get(Config, "/queues/%2F/"), %% Just check we don't blow up + await_condition( + fun () -> + http_get(Config, "/queues/%2F/"), %% Just check we don't blow up + true + end), close_channel(Ch), close_connection(Conn), passed. @@ -2026,15 +2231,18 @@ exclusive_queue_test(Config) -> {Conn, Ch} = open_connection_and_channel(Config), #'queue.declare_ok'{ queue = QName } = amqp_channel:call(Ch, #'queue.declare'{exclusive = true}), - timer:sleep(1500), %% Sadly we need to sleep to let the stats update Path = "/queues/%2F/" ++ rabbit_http_util:quote_plus(QName), - Queue = http_get(Config, Path), - assert_item(#{name => QName, - vhost => <<"/">>, - durable => false, - auto_delete => false, - exclusive => true, - arguments => #{}}, Queue), + await_condition( + fun () -> + Queue = http_get(Config, Path), + assert_item(#{name => QName, + vhost => <<"/">>, + durable => false, + auto_delete => false, + exclusive => true, + arguments => #{}}, Queue), + true + end), amqp_channel:close(Ch), close_connection(Conn), passed. @@ -2049,24 +2257,26 @@ connections_channels_pagination_test(Config) -> Conn2 = open_unmanaged_connection(Config), {ok, Ch2} = amqp_connection:open_channel(Conn2), - %% for stats to update - timer:sleep(1500), - PageOfTwo = http_get(Config, "/connections?page=1&page_size=2", ?OK), - ?assertEqual(3, maps:get(total_count, PageOfTwo)), - ?assertEqual(3, maps:get(filtered_count, PageOfTwo)), - ?assertEqual(2, maps:get(item_count, PageOfTwo)), - ?assertEqual(1, maps:get(page, PageOfTwo)), - ?assertEqual(2, maps:get(page_size, PageOfTwo)), - ?assertEqual(2, maps:get(page_count, PageOfTwo)), - - - TwoOfTwo = http_get(Config, "/channels?page=2&page_size=2", ?OK), - ?assertEqual(3, maps:get(total_count, TwoOfTwo)), - ?assertEqual(3, maps:get(filtered_count, TwoOfTwo)), - ?assertEqual(1, maps:get(item_count, TwoOfTwo)), - ?assertEqual(2, maps:get(page, TwoOfTwo)), - ?assertEqual(2, maps:get(page_size, TwoOfTwo)), - ?assertEqual(2, maps:get(page_count, TwoOfTwo)), + await_condition( + fun () -> + PageOfTwo = http_get(Config, "/connections?page=1&page_size=2", ?OK), + ?assertEqual(3, maps:get(total_count, PageOfTwo)), + ?assertEqual(3, maps:get(filtered_count, PageOfTwo)), + ?assertEqual(2, maps:get(item_count, PageOfTwo)), + ?assertEqual(1, maps:get(page, PageOfTwo)), + ?assertEqual(2, maps:get(page_size, PageOfTwo)), + ?assertEqual(2, maps:get(page_count, PageOfTwo)), + + + TwoOfTwo = http_get(Config, "/channels?page=2&page_size=2", ?OK), + ?assertEqual(3, maps:get(total_count, TwoOfTwo)), + ?assertEqual(3, maps:get(filtered_count, TwoOfTwo)), + ?assertEqual(1, maps:get(item_count, TwoOfTwo)), + ?assertEqual(2, maps:get(page, TwoOfTwo)), + ?assertEqual(2, maps:get(page_size, TwoOfTwo)), + ?assertEqual(2, maps:get(page_count, TwoOfTwo)), + true + end), amqp_channel:close(Ch), amqp_connection:close(Conn), @@ -2088,45 +2298,45 @@ exchanges_pagination_test(Config) -> http_put(Config, "/exchanges/%2F/test2_reg", QArgs, {group, '2xx'}), http_put(Config, "/exchanges/vh1/reg_test3", QArgs, {group, '2xx'}), - %% for stats to update - timer:sleep(1500), - - Total = length(rabbit_ct_broker_helpers:rpc(Config, 0, rabbit_exchange, list_names, [])), - - PageOfTwo = http_get(Config, "/exchanges?page=1&page_size=2", ?OK), - ?assertEqual(Total, maps:get(total_count, PageOfTwo)), - ?assertEqual(Total, maps:get(filtered_count, PageOfTwo)), - ?assertEqual(2, maps:get(item_count, PageOfTwo)), - ?assertEqual(1, maps:get(page, PageOfTwo)), - ?assertEqual(2, maps:get(page_size, PageOfTwo)), - ?assertEqual(round(Total / 2), maps:get(page_count, PageOfTwo)), - assert_list([#{name => <<"">>, vhost => <<"/">>}, - #{name => <<"amq.direct">>, vhost => <<"/">>} - ], maps:get(items, PageOfTwo)), - - ByName = http_get(Config, "/exchanges?page=1&page_size=2&name=reg", ?OK), - ?assertEqual(Total, maps:get(total_count, ByName)), - ?assertEqual(2, maps:get(filtered_count, ByName)), - ?assertEqual(2, maps:get(item_count, ByName)), - ?assertEqual(1, maps:get(page, ByName)), - ?assertEqual(2, maps:get(page_size, ByName)), - ?assertEqual(1, maps:get(page_count, ByName)), - assert_list([#{name => <<"test2_reg">>, vhost => <<"/">>}, - #{name => <<"reg_test3">>, vhost => <<"vh1">>} - ], maps:get(items, ByName)), - - - RegExByName = http_get(Config, - "/exchanges?page=1&page_size=2&name=%5E(?=%5Ereg)&use_regex=true", - ?OK), - ?assertEqual(Total, maps:get(total_count, RegExByName)), - ?assertEqual(1, maps:get(filtered_count, RegExByName)), - ?assertEqual(1, maps:get(item_count, RegExByName)), - ?assertEqual(1, maps:get(page, RegExByName)), - ?assertEqual(2, maps:get(page_size, RegExByName)), - ?assertEqual(1, maps:get(page_count, RegExByName)), - assert_list([#{name => <<"reg_test3">>, vhost => <<"vh1">>} - ], maps:get(items, RegExByName)), + Total = length(rpc(Config, rabbit_exchange, list_names, [])), + await_condition( + fun () -> + PageOfTwo = http_get(Config, "/exchanges?page=1&page_size=2", ?OK), + ?assertEqual(Total, maps:get(total_count, PageOfTwo)), + ?assertEqual(Total, maps:get(filtered_count, PageOfTwo)), + ?assertEqual(2, maps:get(item_count, PageOfTwo)), + ?assertEqual(1, maps:get(page, PageOfTwo)), + ?assertEqual(2, maps:get(page_size, PageOfTwo)), + ?assertEqual(round(Total / 2), maps:get(page_count, PageOfTwo)), + assert_list([#{name => <<"">>, vhost => <<"/">>}, + #{name => <<"amq.direct">>, vhost => <<"/">>} + ], maps:get(items, PageOfTwo)), + + ByName = http_get(Config, "/exchanges?page=1&page_size=2&name=reg", ?OK), + ?assertEqual(Total, maps:get(total_count, ByName)), + ?assertEqual(2, maps:get(filtered_count, ByName)), + ?assertEqual(2, maps:get(item_count, ByName)), + ?assertEqual(1, maps:get(page, ByName)), + ?assertEqual(2, maps:get(page_size, ByName)), + ?assertEqual(1, maps:get(page_count, ByName)), + assert_list([#{name => <<"test2_reg">>, vhost => <<"/">>}, + #{name => <<"reg_test3">>, vhost => <<"vh1">>} + ], maps:get(items, ByName)), + + + RegExByName = http_get(Config, + "/exchanges?page=1&page_size=2&name=%5E(?=%5Ereg)&use_regex=true", + ?OK), + ?assertEqual(Total, maps:get(total_count, RegExByName)), + ?assertEqual(1, maps:get(filtered_count, RegExByName)), + ?assertEqual(1, maps:get(item_count, RegExByName)), + ?assertEqual(1, maps:get(page, RegExByName)), + ?assertEqual(2, maps:get(page_size, RegExByName)), + ?assertEqual(1, maps:get(page_count, RegExByName)), + assert_list([#{name => <<"reg_test3">>, vhost => <<"vh1">>} + ], maps:get(items, RegExByName)), + true + end), http_get(Config, "/exchanges?page=1000", ?BAD_REQUEST), @@ -2158,18 +2368,19 @@ exchanges_pagination_permissions_test(Config) -> http_put(Config, "/exchanges/%2F/test0", QArgs, "admin", "admin", {group, '2xx'}), http_put(Config, "/exchanges/vh1/test1", QArgs, "non-admin", "non-admin", {group, '2xx'}), - %% for stats to update - timer:sleep(1500), - - FirstPage = http_get(Config, "/exchanges?page=1&name=test1", "non-admin", "non-admin", ?OK), - - ?assertEqual(8, maps:get(total_count, FirstPage)), - ?assertEqual(1, maps:get(item_count, FirstPage)), - ?assertEqual(1, maps:get(page, FirstPage)), - ?assertEqual(100, maps:get(page_size, FirstPage)), - ?assertEqual(1, maps:get(page_count, FirstPage)), - assert_list([#{name => <<"test1">>, vhost => <<"vh1">>} - ], maps:get(items, FirstPage)), + await_condition( + fun () -> + FirstPage = http_get(Config, "/exchanges?page=1&name=test1", "non-admin", "non-admin", ?OK), + + ?assertEqual(8, maps:get(total_count, FirstPage)), + ?assertEqual(1, maps:get(item_count, FirstPage)), + ?assertEqual(1, maps:get(page, FirstPage)), + ?assertEqual(100, maps:get(page_size, FirstPage)), + ?assertEqual(1, maps:get(page_count, FirstPage)), + assert_list([#{name => <<"test1">>, vhost => <<"vh1">>} + ], maps:get(items, FirstPage)), + true + end), http_delete(Config, "/exchanges/%2F/test0", {group, '2xx'}), http_delete(Config, "/exchanges/vh1/test1", {group, '2xx'}), http_delete(Config, "/users/admin", {group, '2xx'}), @@ -2181,94 +2392,106 @@ exchanges_pagination_permissions_test(Config) -> queue_pagination_test(Config) -> QArgs = #{}, PermArgs = [{configure, <<".*">>}, {write, <<".*">>}, {read, <<".*">>}], - http_put(Config, "/vhosts/vh1", none, {group, '2xx'}), - http_put(Config, "/permissions/vh1/guest", PermArgs, {group, '2xx'}), + http_put(Config, "/vhosts/vh.tests.queue_pagination_test", none, {group, '2xx'}), + http_put(Config, "/permissions/vh.tests.queue_pagination_test/guest", PermArgs, {group, '2xx'}), - http_get(Config, "/queues/vh1?page=1&page_size=2", ?OK), + http_get(Config, "/queues/vh.tests.queue_pagination_test?page=1&page_size=2", ?OK), http_put(Config, "/queues/%2F/test0", QArgs, {group, '2xx'}), - http_put(Config, "/queues/vh1/test1", QArgs, {group, '2xx'}), + http_put(Config, "/queues/vh.tests.queue_pagination_test/test1", QArgs, {group, '2xx'}), http_put(Config, "/queues/%2F/test2_reg", QArgs, {group, '2xx'}), - http_put(Config, "/queues/vh1/reg_test3", QArgs, {group, '2xx'}), - - %% for stats to update - timer:sleep(1500), - - Total = length(rabbit_ct_broker_helpers:rpc(Config, 0, rabbit_amqqueue, list_names, [])), - - PageOfTwo = http_get(Config, "/queues?page=1&page_size=2", ?OK), - ?assertEqual(Total, maps:get(total_count, PageOfTwo)), - ?assertEqual(Total, maps:get(filtered_count, PageOfTwo)), - ?assertEqual(2, maps:get(item_count, PageOfTwo)), - ?assertEqual(1, maps:get(page, PageOfTwo)), - ?assertEqual(2, maps:get(page_size, PageOfTwo)), - ?assertEqual(2, maps:get(page_count, PageOfTwo)), - assert_list([#{name => <<"test0">>, vhost => <<"/">>}, - #{name => <<"test2_reg">>, vhost => <<"/">>} - ], maps:get(items, PageOfTwo)), - - SortedByName = http_get(Config, "/queues?sort=name&page=1&page_size=2", ?OK), - ?assertEqual(Total, maps:get(total_count, SortedByName)), - ?assertEqual(Total, maps:get(filtered_count, SortedByName)), - ?assertEqual(2, maps:get(item_count, SortedByName)), - ?assertEqual(1, maps:get(page, SortedByName)), - ?assertEqual(2, maps:get(page_size, SortedByName)), - ?assertEqual(2, maps:get(page_count, SortedByName)), - assert_list([#{name => <<"reg_test3">>, vhost => <<"vh1">>}, - #{name => <<"test0">>, vhost => <<"/">>} - ], maps:get(items, SortedByName)), - - - FirstPage = http_get(Config, "/queues?page=1", ?OK), - ?assertEqual(Total, maps:get(total_count, FirstPage)), - ?assertEqual(Total, maps:get(filtered_count, FirstPage)), - ?assertEqual(4, maps:get(item_count, FirstPage)), - ?assertEqual(1, maps:get(page, FirstPage)), - ?assertEqual(100, maps:get(page_size, FirstPage)), - ?assertEqual(1, maps:get(page_count, FirstPage)), - assert_list([#{name => <<"test0">>, vhost => <<"/">>}, - #{name => <<"test1">>, vhost => <<"vh1">>}, - #{name => <<"test2_reg">>, vhost => <<"/">>}, - #{name => <<"reg_test3">>, vhost =><<"vh1">>} - ], maps:get(items, FirstPage)), - - - ReverseSortedByName = http_get(Config, - "/queues?page=2&page_size=2&sort=name&sort_reverse=true", - ?OK), - ?assertEqual(Total, maps:get(total_count, ReverseSortedByName)), - ?assertEqual(Total, maps:get(filtered_count, ReverseSortedByName)), - ?assertEqual(2, maps:get(item_count, ReverseSortedByName)), - ?assertEqual(2, maps:get(page, ReverseSortedByName)), - ?assertEqual(2, maps:get(page_size, ReverseSortedByName)), - ?assertEqual(2, maps:get(page_count, ReverseSortedByName)), - assert_list([#{name => <<"test0">>, vhost => <<"/">>}, - #{name => <<"reg_test3">>, vhost => <<"vh1">>} - ], maps:get(items, ReverseSortedByName)), - - - ByName = http_get(Config, "/queues?page=1&page_size=2&name=reg", ?OK), - ?assertEqual(Total, maps:get(total_count, ByName)), - ?assertEqual(2, maps:get(filtered_count, ByName)), - ?assertEqual(2, maps:get(item_count, ByName)), - ?assertEqual(1, maps:get(page, ByName)), - ?assertEqual(2, maps:get(page_size, ByName)), - ?assertEqual(1, maps:get(page_count, ByName)), - assert_list([#{name => <<"test2_reg">>, vhost => <<"/">>}, - #{name => <<"reg_test3">>, vhost => <<"vh1">>} - ], maps:get(items, ByName)), - - RegExByName = http_get(Config, - "/queues?page=1&page_size=2&name=%5E(?=%5Ereg)&use_regex=true", - ?OK), - ?assertEqual(Total, maps:get(total_count, RegExByName)), - ?assertEqual(1, maps:get(filtered_count, RegExByName)), - ?assertEqual(1, maps:get(item_count, RegExByName)), - ?assertEqual(1, maps:get(page, RegExByName)), - ?assertEqual(2, maps:get(page_size, RegExByName)), - ?assertEqual(1, maps:get(page_count, RegExByName)), - assert_list([#{name => <<"reg_test3">>, vhost => <<"vh1">>} - ], maps:get(items, RegExByName)), + http_put(Config, "/queues/vh.tests.queue_pagination_test/reg_test3", QArgs, {group, '2xx'}), + + ?AWAIT( + begin + Total = length(rpc(Config, rabbit_amqqueue, list_names, [])), + + PageOfTwo = http_get(Config, "/queues?page=1&page_size=2", ?OK), + ?assertEqual(Total, maps:get(total_count, PageOfTwo)), + ?assertEqual(Total, maps:get(filtered_count, PageOfTwo)), + ?assertEqual(2, maps:get(item_count, PageOfTwo)), + ?assertEqual(1, maps:get(page, PageOfTwo)), + ?assertEqual(2, maps:get(page_size, PageOfTwo)), + ?assertEqual(2, maps:get(page_count, PageOfTwo)), + assert_list([#{name => <<"test0">>, vhost => <<"/">>, storage_version => 2}, + #{name => <<"test2_reg">>, vhost => <<"/">>, storage_version => 2} + ], maps:get(items, PageOfTwo)), + + SortedByName = http_get(Config, "/queues?sort=name&page=1&page_size=2", ?OK), + ?assertEqual(Total, maps:get(total_count, SortedByName)), + ?assertEqual(Total, maps:get(filtered_count, SortedByName)), + ?assertEqual(2, maps:get(item_count, SortedByName)), + ?assertEqual(1, maps:get(page, SortedByName)), + ?assertEqual(2, maps:get(page_size, SortedByName)), + ?assertEqual(2, maps:get(page_count, SortedByName)), + assert_list([#{name => <<"reg_test3">>, vhost => <<"vh.tests.queue_pagination_test">>}, + #{name => <<"test0">>, vhost => <<"/">>} + ], maps:get(items, SortedByName)), + + + FirstPage = http_get(Config, "/queues?page=1", ?OK), + ?assertEqual(Total, maps:get(total_count, FirstPage)), + ?assertEqual(Total, maps:get(filtered_count, FirstPage)), + ?assertEqual(4, maps:get(item_count, FirstPage)), + ?assertEqual(1, maps:get(page, FirstPage)), + ?assertEqual(100, maps:get(page_size, FirstPage)), + ?assertEqual(1, maps:get(page_count, FirstPage)), + assert_list([#{name => <<"test0">>, vhost => <<"/">>}, + #{name => <<"test1">>, vhost => <<"vh.tests.queue_pagination_test">>}, + #{name => <<"test2_reg">>, vhost => <<"/">>}, + #{name => <<"reg_test3">>, vhost =><<"vh.tests.queue_pagination_test">>} + ], maps:get(items, FirstPage)), + %% The reduced API version just has the most useful fields. + %% garbage_collection is not one of them + IsEnabled = rabbit_ct_broker_helpers:is_feature_flag_enabled( + Config, detailed_queues_endpoint), + case IsEnabled of + true -> + [?assertNot(maps:is_key(garbage_collection, Item)) || + Item <- maps:get(items, FirstPage)]; + false -> + [?assert(maps:is_key(garbage_collection, Item)) || + Item <- maps:get(items, FirstPage)] + end, + ReverseSortedByName = http_get(Config, + "/queues?page=2&page_size=2&sort=name&sort_reverse=true", + ?OK), + ?assertEqual(Total, maps:get(total_count, ReverseSortedByName)), + ?assertEqual(Total, maps:get(filtered_count, ReverseSortedByName)), + ?assertEqual(2, maps:get(item_count, ReverseSortedByName)), + ?assertEqual(2, maps:get(page, ReverseSortedByName)), + ?assertEqual(2, maps:get(page_size, ReverseSortedByName)), + ?assertEqual(2, maps:get(page_count, ReverseSortedByName)), + assert_list([#{name => <<"test0">>, vhost => <<"/">>}, + #{name => <<"reg_test3">>, vhost => <<"vh.tests.queue_pagination_test">>} + ], maps:get(items, ReverseSortedByName)), + + + ByName = http_get(Config, "/queues?page=1&page_size=2&name=reg", ?OK), + ?assertEqual(Total, maps:get(total_count, ByName)), + ?assertEqual(2, maps:get(filtered_count, ByName)), + ?assertEqual(2, maps:get(item_count, ByName)), + ?assertEqual(1, maps:get(page, ByName)), + ?assertEqual(2, maps:get(page_size, ByName)), + ?assertEqual(1, maps:get(page_count, ByName)), + assert_list([#{name => <<"test2_reg">>, vhost => <<"/">>}, + #{name => <<"reg_test3">>, vhost => <<"vh.tests.queue_pagination_test">>} + ], maps:get(items, ByName)), + + RegExByName = http_get(Config, + "/queues?page=1&page_size=2&name=%5E(?=%5Ereg)&use_regex=true", + ?OK), + ?assertEqual(Total, maps:get(total_count, RegExByName)), + ?assertEqual(1, maps:get(filtered_count, RegExByName)), + ?assertEqual(1, maps:get(item_count, RegExByName)), + ?assertEqual(1, maps:get(page, RegExByName)), + ?assertEqual(2, maps:get(page_size, RegExByName)), + ?assertEqual(1, maps:get(page_count, RegExByName)), + assert_list([#{name => <<"reg_test3">>, vhost => <<"vh.tests.queue_pagination_test">>} + ], maps:get(items, RegExByName)), + true + end + ), http_get(Config, "/queues?page=1000", ?BAD_REQUEST), @@ -2278,23 +2501,23 @@ queue_pagination_test(Config) -> http_get(Config, "/queues?page=1&page_size=501", ?BAD_REQUEST), %% max 500 allowed http_get(Config, "/queues?page=-1&page_size=-2", ?BAD_REQUEST), http_delete(Config, "/queues/%2F/test0", {group, '2xx'}), - http_delete(Config, "/queues/vh1/test1", {group, '2xx'}), + http_delete(Config, "/queues/vh.tests.queue_pagination_test/test1", {group, '2xx'}), http_delete(Config, "/queues/%2F/test2_reg", {group, '2xx'}), - http_delete(Config, "/queues/vh1/reg_test3", {group, '2xx'}), - http_delete(Config, "/vhosts/vh1", {group, '2xx'}), + http_delete(Config, "/queues/vh.tests.queue_pagination_test/reg_test3", {group, '2xx'}), + http_delete(Config, "/vhosts/vh.tests.queue_pagination_test", {group, '2xx'}), passed. queue_pagination_columns_test(Config) -> QArgs = #{}, PermArgs = [{configure, <<".*">>}, {write, <<".*">>}, {read, <<".*">>}], - http_put(Config, "/vhosts/vh1", none, [?CREATED, ?NO_CONTENT]), - http_put(Config, "/permissions/vh1/guest", PermArgs, [?CREATED, ?NO_CONTENT]), + http_put(Config, "/vhosts/vh.tests.queue_pagination_columns_test", none, [?CREATED, ?NO_CONTENT]), + http_put(Config, "/permissions/vh.tests.queue_pagination_columns_test/guest", PermArgs, [?CREATED, ?NO_CONTENT]), - http_get(Config, "/queues/vh1?columns=name&page=1&page_size=2", ?OK), + http_get(Config, "/queues/vh.tests.queue_pagination_columns_test?columns=name&page=1&page_size=2", ?OK), http_put(Config, "/queues/%2F/queue_a", QArgs, {group, '2xx'}), - http_put(Config, "/queues/vh1/queue_b", QArgs, {group, '2xx'}), + http_put(Config, "/queues/vh.tests.queue_pagination_columns_test/queue_b", QArgs, {group, '2xx'}), http_put(Config, "/queues/%2F/queue_c", QArgs, {group, '2xx'}), - http_put(Config, "/queues/vh1/queue_d", QArgs, {group, '2xx'}), + http_put(Config, "/queues/vh.tests.queue_pagination_columns_test/queue_d", QArgs, {group, '2xx'}), PageOfTwo = http_get(Config, "/queues?columns=name&page=1&page_size=2", ?OK), ?assertEqual(4, maps:get(total_count, PageOfTwo)), ?assertEqual(4, maps:get(filtered_count, PageOfTwo)), @@ -2306,7 +2529,7 @@ queue_pagination_columns_test(Config) -> #{name => <<"queue_c">>} ], maps:get(items, PageOfTwo)), - ColumnNameVhost = http_get(Config, "/queues/vh1?columns=name&page=1&page_size=2", ?OK), + ColumnNameVhost = http_get(Config, "/queues/vh.tests.queue_pagination_columns_test?columns=name&page=1&page_size=2", ?OK), ?assertEqual(2, maps:get(total_count, ColumnNameVhost)), ?assertEqual(2, maps:get(filtered_count, ColumnNameVhost)), ?assertEqual(2, maps:get(item_count, ColumnNameVhost)), @@ -2326,17 +2549,66 @@ queue_pagination_columns_test(Config) -> ?assertEqual(2, maps:get(page_count, ColumnsNameVhost)), assert_list([ #{name => <<"queue_b">>, - vhost => <<"vh1">>}, + vhost => <<"vh.tests.queue_pagination_columns_test">>}, #{name => <<"queue_d">>, - vhost => <<"vh1">>} + vhost => <<"vh.tests.queue_pagination_columns_test">>} ], maps:get(items, ColumnsNameVhost)), + ?awaitMatch( + true, + begin + ColumnsGarbageCollection = http_get(Config, "/queues?columns=name,garbage_collection&page=2&page_size=2", ?OK), + %% The reduced API version just has the most useful fields, + %% but we can still query any info item using `columns` + lists:all(fun(Item) -> + maps:is_key(garbage_collection, Item) + end, + maps:get(items, ColumnsGarbageCollection)) + end, 30000), + + http_delete(Config, "/queues/%2F/queue_a", {group, '2xx'}), + http_delete(Config, "/queues/vh.tests.queue_pagination_columns_test/queue_b", {group, '2xx'}), + http_delete(Config, "/queues/%2F/queue_c", {group, '2xx'}), + http_delete(Config, "/queues/vh.tests.queue_pagination_columns_test/queue_d", {group, '2xx'}), + http_delete(Config, "/vhosts/vh.tests.queue_pagination_columns_test", {group, '2xx'}), + passed. + +queues_detailed_test(Config) -> + QArgs = #{}, + http_put(Config, "/queues/%2F/queue_a", QArgs, {group, '2xx'}), + http_put(Config, "/queues/%2F/queue_c", QArgs, {group, '2xx'}), + + ?awaitMatch( + true, + begin + Detailed = http_get(Config, "/queues/detailed", ?OK), + lists:all(fun(Item) -> + maps:is_key(garbage_collection, Item) + end, Detailed) + end, 30000), + + Detailed = http_get(Config, "/queues/detailed", ?OK), + ?assertNot(lists:any(fun(Item) -> + maps:is_key(backing_queue_status, Item) + end, Detailed)), + %% It's null + ?assert(lists:any(fun(Item) -> + maps:is_key(single_active_consumer_tag, Item) + end, Detailed)), + + Reduced = http_get(Config, "/queues", ?OK), + ?assertNot(lists:any(fun(Item) -> + maps:is_key(garbage_collection, Item) + end, Reduced)), + ?assertNot(lists:any(fun(Item) -> + maps:is_key(backing_queue_status, Item) + end, Reduced)), + ?assertNot(lists:any(fun(Item) -> + maps:is_key(single_active_consumer_tag, Item) + end, Reduced)), http_delete(Config, "/queues/%2F/queue_a", {group, '2xx'}), - http_delete(Config, "/queues/vh1/queue_b", {group, '2xx'}), http_delete(Config, "/queues/%2F/queue_c", {group, '2xx'}), - http_delete(Config, "/queues/vh1/queue_d", {group, '2xx'}), - http_delete(Config, "/vhosts/vh1", {group, '2xx'}), passed. queues_pagination_permissions_test(Config) -> @@ -2383,34 +2655,37 @@ samples_range_test(Config) -> {Conn, Ch} = open_connection_and_channel(Config), %% Channels - timer:sleep(2000), - [ConnInfo | _] = http_get(Config, "/channels?lengths_age=60&lengths_incr=1", ?OK), - http_get(Config, "/channels?lengths_age=6000&lengths_incr=1", ?BAD_REQUEST), + ?AWAIT( + begin + [ConnInfo | _] = http_get(Config, "/channels?lengths_age=60&lengths_incr=1", ?OK), + http_get(Config, "/channels?lengths_age=6000&lengths_incr=1", ?BAD_REQUEST), - ConnDetails = maps:get(connection_details, ConnInfo), - ConnName0 = maps:get(name, ConnDetails), - ConnName = uri_string:recompose(#{path => binary_to_list(ConnName0)}), - ChanName = ConnName ++ uri_string:recompose(#{path => " (1)"}), + ConnDetails = maps:get(connection_details, ConnInfo), + ConnName0 = maps:get(name, ConnDetails), + ConnName = uri_string:recompose(#{path => binary_to_list(ConnName0)}), + ChanName = ConnName ++ uri_string:recompose(#{path => " (1)"}), - http_get(Config, "/channels/" ++ ChanName ++ "?lengths_age=60&lengths_incr=1", ?OK), - http_get(Config, "/channels/" ++ ChanName ++ "?lengths_age=6000&lengths_incr=1", ?BAD_REQUEST), + http_get(Config, "/channels/" ++ ChanName ++ "?lengths_age=60&lengths_incr=1", ?OK), + http_get(Config, "/channels/" ++ ChanName ++ "?lengths_age=6000&lengths_incr=1", ?BAD_REQUEST), - http_get(Config, "/vhosts/%2F/channels?lengths_age=60&lengths_incr=1", ?OK), - http_get(Config, "/vhosts/%2F/channels?lengths_age=6000&lengths_incr=1", ?BAD_REQUEST), + http_get(Config, "/vhosts/%2F/channels?lengths_age=60&lengths_incr=1", ?OK), + http_get(Config, "/vhosts/%2F/channels?lengths_age=6000&lengths_incr=1", ?BAD_REQUEST), - %% Connections. + %% Connections. - http_get(Config, "/connections?lengths_age=60&lengths_incr=1", ?OK), - http_get(Config, "/connections?lengths_age=6000&lengths_incr=1", ?BAD_REQUEST), + http_get(Config, "/connections?lengths_age=60&lengths_incr=1", ?OK), + http_get(Config, "/connections?lengths_age=6000&lengths_incr=1", ?BAD_REQUEST), - http_get(Config, "/connections/" ++ ConnName ++ "?lengths_age=60&lengths_incr=1", ?OK), - http_get(Config, "/connections/" ++ ConnName ++ "?lengths_age=6000&lengths_incr=1", ?BAD_REQUEST), + http_get(Config, "/connections/" ++ ConnName ++ "?lengths_age=60&lengths_incr=1", ?OK), + http_get(Config, "/connections/" ++ ConnName ++ "?lengths_age=6000&lengths_incr=1", ?BAD_REQUEST), - http_get(Config, "/connections/" ++ ConnName ++ "/channels?lengths_age=60&lengths_incr=1", ?OK), - http_get(Config, "/connections/" ++ ConnName ++ "/channels?lengths_age=6000&lengths_incr=1", ?BAD_REQUEST), + http_get(Config, "/connections/" ++ ConnName ++ "/channels?lengths_age=60&lengths_incr=1", ?OK), + http_get(Config, "/connections/" ++ ConnName ++ "/channels?lengths_age=6000&lengths_incr=1", ?BAD_REQUEST), - http_get(Config, "/vhosts/%2F/connections?lengths_age=60&lengths_incr=1", ?OK), - http_get(Config, "/vhosts/%2F/connections?lengths_age=6000&lengths_incr=1", ?BAD_REQUEST), + http_get(Config, "/vhosts/%2F/connections?lengths_age=60&lengths_incr=1", ?OK), + http_get(Config, "/vhosts/%2F/connections?lengths_age=6000&lengths_incr=1", ?BAD_REQUEST), + true + end), amqp_channel:close(Ch), amqp_connection:close(Conn), @@ -2433,23 +2708,29 @@ samples_range_test(Config) -> %% Queues http_put(Config, "/queues/%2F/test-001", #{}, {group, '2xx'}), - timer:sleep(2000), - http_get(Config, "/queues/%2F?lengths_age=60&lengths_incr=1", ?OK), - http_get(Config, "/queues/%2F?lengths_age=6000&lengths_incr=1", ?BAD_REQUEST), - http_get(Config, "/queues/%2F/test-001?lengths_age=60&lengths_incr=1", ?OK), - http_get(Config, "/queues/%2F/test-001?lengths_age=6000&lengths_incr=1", ?BAD_REQUEST), + ?AWAIT( + begin + http_get(Config, "/queues/%2F?lengths_age=60&lengths_incr=1", ?OK), + http_get(Config, "/queues/%2F?lengths_age=6000&lengths_incr=1", ?BAD_REQUEST), + http_get(Config, "/queues/%2F/test-001?lengths_age=60&lengths_incr=1", ?OK), + http_get(Config, "/queues/%2F/test-001?lengths_age=6000&lengths_incr=1", ?BAD_REQUEST), + true + end), http_delete(Config, "/queues/%2F/test-001", {group, '2xx'}), %% Vhosts http_put(Config, "/vhosts/vh1", none, {group, '2xx'}), - timer:sleep(2000), - http_get(Config, "/vhosts?lengths_age=60&lengths_incr=1", ?OK), - http_get(Config, "/vhosts?lengths_age=6000&lengths_incr=1", ?BAD_REQUEST), - http_get(Config, "/vhosts/vh1?lengths_age=60&lengths_incr=1", ?OK), - http_get(Config, "/vhosts/vh1?lengths_age=6000&lengths_incr=1", ?BAD_REQUEST), + ?AWAIT( + begin + http_get(Config, "/vhosts?lengths_age=60&lengths_incr=1", ?OK), + http_get(Config, "/vhosts?lengths_age=6000&lengths_incr=1", ?BAD_REQUEST), + http_get(Config, "/vhosts/vh1?lengths_age=60&lengths_incr=1", ?OK), + http_get(Config, "/vhosts/vh1?lengths_age=6000&lengths_incr=1", ?BAD_REQUEST), + true + end), http_delete(Config, "/vhosts/vh1", {group, '2xx'}), @@ -2464,31 +2745,34 @@ sorting_test(Config) -> http_put(Config, "/queues/vh19/test1", QArgs, {group, '2xx'}), http_put(Config, "/queues/%2F/test2", QArgs, {group, '2xx'}), http_put(Config, "/queues/vh19/test3", QArgs, {group, '2xx'}), - timer:sleep(2000), - assert_list([#{name => <<"test0">>}, - #{name => <<"test2">>}, - #{name => <<"test1">>}, - #{name => <<"test3">>}], http_get(Config, "/queues", ?OK)), - assert_list([#{name => <<"test0">>}, - #{name => <<"test1">>}, - #{name => <<"test2">>}, - #{name => <<"test3">>}], http_get(Config, "/queues?sort=name", ?OK)), - assert_list([#{name => <<"test0">>}, - #{name => <<"test2">>}, - #{name => <<"test1">>}, - #{name => <<"test3">>}], http_get(Config, "/queues?sort=vhost", ?OK)), - assert_list([#{name => <<"test3">>}, - #{name => <<"test1">>}, - #{name => <<"test2">>}, - #{name => <<"test0">>}], http_get(Config, "/queues?sort_reverse=true", ?OK)), - assert_list([#{name => <<"test3">>}, - #{name => <<"test2">>}, - #{name => <<"test1">>}, - #{name => <<"test0">>}], http_get(Config, "/queues?sort=name&sort_reverse=true", ?OK)), - assert_list([#{name => <<"test3">>}, - #{name => <<"test1">>}, - #{name => <<"test2">>}, - #{name => <<"test0">>}], http_get(Config, "/queues?sort=vhost&sort_reverse=true", ?OK)), + ?AWAIT( + begin + assert_list([#{name => <<"test0">>}, + #{name => <<"test2">>}, + #{name => <<"test1">>}, + #{name => <<"test3">>}], http_get(Config, "/queues", ?OK)), + assert_list([#{name => <<"test0">>}, + #{name => <<"test1">>}, + #{name => <<"test2">>}, + #{name => <<"test3">>}], http_get(Config, "/queues?sort=name", ?OK)), + assert_list([#{name => <<"test0">>}, + #{name => <<"test2">>}, + #{name => <<"test1">>}, + #{name => <<"test3">>}], http_get(Config, "/queues?sort=vhost", ?OK)), + assert_list([#{name => <<"test3">>}, + #{name => <<"test1">>}, + #{name => <<"test2">>}, + #{name => <<"test0">>}], http_get(Config, "/queues?sort_reverse=true", ?OK)), + assert_list([#{name => <<"test3">>}, + #{name => <<"test2">>}, + #{name => <<"test1">>}, + #{name => <<"test0">>}], http_get(Config, "/queues?sort=name&sort_reverse=true", ?OK)), + assert_list([#{name => <<"test3">>}, + #{name => <<"test1">>}, + #{name => <<"test2">>}, + #{name => <<"test0">>}], http_get(Config, "/queues?sort=vhost&sort_reverse=true", ?OK)), + true + end), %% Rather poor but at least test it doesn't blow up with dots http_get(Config, "/queues?sort=owner_pid_details.name", ?OK), http_delete(Config, "/queues/%2F/test0", {group, '2xx'}), @@ -2504,12 +2788,15 @@ format_output_test(Config) -> http_put(Config, "/vhosts/vh129", none, {group, '2xx'}), http_put(Config, "/permissions/vh129/guest", PermArgs, {group, '2xx'}), http_put(Config, "/queues/%2F/test0", QArgs, {group, '2xx'}), - timer:sleep(2000), - assert_list([#{name => <<"test0">>, - consumer_capacity => 0, - consumer_utilisation => 0, - exclusive_consumer_tag => null, - recoverable_slaves => null}], http_get(Config, "/queues", ?OK)), + + ?AWAIT( + begin + assert_list([#{name => <<"test0">>, + consumer_capacity => 0, + consumer_utilisation => 0, + exclusive_consumer_tag => null}], http_get(Config, "/queues", ?OK)), + true + end), http_delete(Config, "/queues/%2F/test0", {group, '2xx'}), http_delete(Config, "/vhosts/vh129", {group, '2xx'}), passed. @@ -2521,9 +2808,13 @@ columns_test(Config) -> http_put(Config, Path, [{arguments, [{<<"x-message-ttl">>, TTL}]}], {group, '2xx'}), Item = #{arguments => #{'x-message-ttl' => TTL}, name => <<"columns.test">>}, - timer:sleep(2000), - [Item] = http_get(Config, "/queues?columns=arguments.x-message-ttl,name", ?OK), - Item = http_get(Config, "/queues/%2F/columns.test?columns=arguments.x-message-ttl,name", ?OK), + + ?AWAIT( + begin + [Item] = http_get(Config, "/queues?columns=arguments.x-message-ttl,name", ?OK), + Item = http_get(Config, "/queues/%2F/columns.test?columns=arguments.x-message-ttl,name", ?OK), + true + end), http_delete(Config, Path, {group, '2xx'}), passed. @@ -2599,7 +2890,7 @@ get_encoding_test(Config) -> http_put(Config, "/queues/%2F/get_encoding_test", #{}, {group, '2xx'}), http_post(Config, "/exchanges/%2F/amq.default/publish", Utf8Msg, ?OK), http_post(Config, "/exchanges/%2F/amq.default/publish", BinMsg, ?OK), - timer:sleep(250), + [RecvUtf8Msg1, RecvBinMsg1] = http_post(Config, "/queues/%2F/get_encoding_test/get", [{ackmode, ack_requeue_false}, {count, 2}, @@ -2638,7 +2929,7 @@ get_fail_test(Config) -> passed. --define(LARGE_BODY_BYTES, 25000000). +-define(LARGE_BODY_BYTES, 5000000). publish_test(Config) -> Headers = #{'x-forwarding' => [#{uri => <<"amqp://localhost/%2F/upstream">>}]}, @@ -2681,6 +2972,19 @@ publish_large_message_test(Config) -> http_delete(Config, "/queues/%2F/publish_accept_json_test", {group, '2xx'}), passed. +-define(EXCESSIVELY_LARGE_BODY_BYTES, 35000000). + +publish_large_message_exceeding_http_request_body_size_test(Config) -> + Headers = #{'x-forwarding' => [#{uri => <<"amqp://localhost/%2F/upstream">>}]}, + Body = binary:copy(<<"a">>, ?EXCESSIVELY_LARGE_BODY_BYTES), + Msg = msg(<<"large_message_exceeding_http_request_body_size_test">>, Headers, Body), + http_put(Config, "/queues/%2F/large_message_exceeding_http_request_body_size_test", #{}, {group, '2xx'}), + %% exceeds the default HTTP API request body size limit + http_post_accept_json(Config, "/exchanges/%2F/amq.default/publish", + Msg, ?BAD_REQUEST), + http_delete(Config, "/queues/%2F/large_message_exceeding_http_request_body_size_test", {group, '2xx'}), + passed. + publish_accept_json_test(Config) -> Headers = #{'x-forwarding' => [#{uri => <<"amqp://localhost/%2F/upstream">>}]}, Msg = msg(<<"publish_accept_json_test">>, Headers, <<"Hello world">>), @@ -2746,7 +3050,7 @@ publish_base64_test(Config) -> http_post(Config, "/exchanges/%2F/amq.default/publish", Msg, ?OK), http_post(Config, "/exchanges/%2F/amq.default/publish", BadMsg1, ?BAD_REQUEST), http_post(Config, "/exchanges/%2F/amq.default/publish", BadMsg2, ?BAD_REQUEST), - timer:sleep(250), + [Msg2] = http_post(Config, "/queues/%2F/publish_base64_test/get", [{ackmode, ack_requeue_false}, {count, 1}, {encoding, auto}], ?OK), @@ -2943,7 +3247,7 @@ policy_permissions_test(Config) -> http_put(Config, "/permissions/v/mgmt", Perms, {group, '2xx'}), Policy = [{pattern, <<".*">>}, - {definition, [{<<"ha-mode">>, <<"all">>}]}], + {definition, [{<<"max-length-bytes">>, 3000000}]}], Param = [{value, <<"">>}], http_put(Config, "/policies/%2F/HA", Policy, {group, '2xx'}), @@ -3032,7 +3336,7 @@ cors_test(Config) -> %% The Vary header should include "Origin" regardless of CORS configuration. {_, "accept, accept-encoding, origin"} = lists:keyfind("vary", 1, HdNoCORS), %% Enable CORS. - rabbit_ct_broker_helpers:rpc(Config, 0, application, set_env, [rabbitmq_management, cors_allow_origins, ["https://rabbitmq.com"]]), + rpc(Config, application, set_env, [rabbitmq_management, cors_allow_origins, ["https://rabbitmq.com"]]), %% We should only receive allow-origin and allow-credentials from GET. {ok, {_, HdGetCORS, _}} = req(Config, get, "/overview", [{"origin", "https://rabbitmq.com"}, auth_header("guest", "guest")]), @@ -3058,7 +3362,7 @@ cors_test(Config) -> {"access-control-request-headers", "x-piggy-bank"}]), {_, "x-piggy-bank"} = lists:keyfind("access-control-allow-headers", 1, HdAllowHeadersCORS), %% Disable preflight request caching. - rabbit_ct_broker_helpers:rpc(Config, 0, application, set_env, [rabbitmq_management, cors_max_age, undefined]), + rpc(Config, application, set_env, [rabbitmq_management, cors_max_age, undefined]), %% We shouldn't receive max-age anymore. {ok, {_, HdNoMaxAgeCORS, _}} = req(Config, options, "/overview", [{"origin", "https://rabbitmq.com"}, auth_header("guest", "guest")]), @@ -3067,7 +3371,7 @@ cors_test(Config) -> %% Check OPTIONS method in all paths check_cors_all_endpoints(Config), %% Disable CORS again. - rabbit_ct_broker_helpers:rpc(Config, 0, application, set_env, [rabbitmq_management, cors_allow_origins, []]), + rpc(Config, application, set_env, [rabbitmq_management, cors_allow_origins, []]), passed. check_cors_all_endpoints(Config) -> @@ -3402,21 +3706,16 @@ oauth_test(Config) -> ?assertEqual(false, maps:get(oauth_enabled, Map1)), %% Misconfiguration - rabbit_ct_broker_helpers:rpc(Config, 0, application, set_env, - [rabbitmq_management, oauth_enabled, true]), + rpc(Config, application, set_env, [rabbitmq_management, oauth_enabled, true]), Map2 = http_get(Config, "/auth", ?OK), ?assertEqual(false, maps:get(oauth_enabled, Map2)), ?assertEqual(<<>>, maps:get(oauth_client_id, Map2)), ?assertEqual(<<>>, maps:get(oauth_provider_url, Map2)), %% Valid config requires non empty OAuthClientId, OAuthClientSecret, OAuthResourceId, OAuthProviderUrl - rabbit_ct_broker_helpers:rpc(Config, 0, application, set_env, - [rabbitmq_management, oauth_client_id, "rabbit_user"]), - rabbit_ct_broker_helpers:rpc(Config, 0, application, set_env, - [rabbitmq_management, oauth_client_secret, "rabbit_secret"]), - rabbit_ct_broker_helpers:rpc(Config, 0, application, set_env, - [rabbitmq_management, oauth_provider_url, "http://localhost:8080/uaa"]), - rabbit_ct_broker_helpers:rpc(Config, 0, application, set_env, - [rabbitmq_auth_backend_oauth2, resource_server_id, "rabbitmq"]), + rpc(Config, application, set_env, [rabbitmq_management, oauth_client_id, "rabbit_user"]), + rpc(Config, application, set_env, [rabbitmq_management, oauth_client_secret, "rabbit_secret"]), + rpc(Config, application, set_env, [rabbitmq_management, oauth_provider_url, "http://localhost:8080/uaa"]), + rpc(Config, application, set_env, [rabbitmq_auth_backend_oauth2, resource_server_id, "rabbitmq"]), Map3 = http_get(Config, "/auth", ?OK), println(Map3), ?assertEqual(true, maps:get(oauth_enabled, Map3)), @@ -3425,8 +3724,7 @@ oauth_test(Config) -> ?assertEqual(<<"rabbitmq">>, maps:get(resource_server_id, Map3)), ?assertEqual(<<"http://localhost:8080/uaa">>, maps:get(oauth_provider_url, Map3)), %% cleanup - rabbit_ct_broker_helpers:rpc(Config, 0, application, unset_env, - [rabbitmq_management, oauth_enabled]). + rpc(Config, application, unset_env, [rabbitmq_management, oauth_enabled]). login_test(Config) -> http_put(Config, "/users/myuser", [{password, <<"myuser">>}, @@ -3467,8 +3765,7 @@ csp_headers_test(Config) -> ?assert(lists:keymember("content-security-policy", 1, HdGetCsp1)). disable_basic_auth_test(Config) -> - rabbit_ct_broker_helpers:rpc(Config, 0, application, set_env, - [rabbitmq_management, disable_basic_auth, true]), + rpc(Config, application, set_env, [rabbitmq_management, disable_basic_auth, true]), http_get(Config, "/overview", ?NOT_AUTHORISED), %% Ensure that a request without auth header does not return a basic auth prompt @@ -3485,13 +3782,12 @@ disable_basic_auth_test(Config) -> http_delete(Config, "/queues/%2F/myqueue", ?NOT_AUTHORISED), http_get(Config, "/definitions", ?NOT_AUTHORISED), http_post(Config, "/definitions", [], ?NOT_AUTHORISED), - rabbit_ct_broker_helpers:rpc(Config, 0, application, set_env, - [rabbitmq_management, disable_basic_auth, 50]), + rpc(Config, application, set_env, [rabbitmq_management, disable_basic_auth, 50]), %% Defaults to 'false' when config is invalid http_get(Config, "/overview", ?OK). auth_attempts_test(Config) -> - rabbit_ct_broker_helpers:rpc(Config, 0, rabbit_core_metrics, reset_auth_attempt_metrics, []), + rpc(Config, rabbit_core_metrics, reset_auth_attempt_metrics, []), {Conn, _Ch} = open_connection_and_channel(Config), close_connection(Conn), [NodeData] = http_get(Config, "/nodes"), @@ -3510,8 +3806,7 @@ auth_attempts_test(Config) -> ?assertEqual(2, maps:get(auth_attempts_succeeded, Http)), ?assertEqual(0, maps:get(auth_attempts_failed, Http)), - rabbit_ct_broker_helpers:rpc(Config, 0, application, set_env, - [rabbit, track_auth_attempt_source, true]), + rpc(Config, application, set_env, [rabbit, track_auth_attempt_source, true]), {Conn2, _Ch2} = open_connection_and_channel(Config), close_connection(Conn2), Map2 = http_get(Config, "/auth/attempts/" ++ atom_to_list(Node) ++ "/source", ?OK), @@ -3548,10 +3843,9 @@ auth_attempts_test(Config) -> config_environment_test(Config) -> - rabbit_ct_broker_helpers:rpc(Config, 0, application, set_env, - [rabbitmq_management, - config_environment_test_env, - config_environment_test_value]), + rpc(Config, application, set_env, [rabbitmq_management, + config_environment_test_env, + config_environment_test_value]), ResultString = http_get_no_decode(Config, "/config/effective", "guest", "guest", ?OK), CleanString = re:replace(ResultString, "\\s+", "", [global,{return,list}]), @@ -3572,6 +3866,47 @@ disabled_qq_replica_opers_test(Config) -> http_delete(Config, "/queues/quorum/replicas/on/" ++ Nodename ++ "/shrink", ?METHOD_NOT_ALLOWED), passed. +list_deprecated_features_test(Config) -> + Desc = "This is a deprecated feature", + DocUrl = "https://rabbitmq.com/", + FeatureFlags = #{?FUNCTION_NAME => + #{provided_by => ?MODULE, + deprecation_phase => permitted_by_default, + desc => Desc, + doc_url => DocUrl}}, + ok = rpc(Config, rabbit_feature_flags, inject_test_feature_flags, [FeatureFlags]), + Result = http_get(Config, "/deprecated-features", ?OK), + Features = lists:filter(fun(Map) -> + maps:get(name, Map) == atom_to_binary(?FUNCTION_NAME) + end, Result), + ?assertMatch([_], Features), + [Feature] = Features, + ?assertEqual(<<"permitted_by_default">>, maps:get(deprecation_phase, Feature)), + ?assertEqual(atom_to_binary(?MODULE), maps:get(provided_by, Feature)), + ?assertEqual(list_to_binary(Desc), maps:get(desc, Feature)), + ?assertEqual(list_to_binary(DocUrl), maps:get(doc_url, Feature)). + +list_used_deprecated_features_test(Config) -> + Desc = "This is a deprecated feature in use", + DocUrl = "https://rabbitmq.com/", + FeatureFlags = #{?FUNCTION_NAME => + #{provided_by => ?MODULE, + deprecation_phase => removed, + desc => Desc, + doc_url => DocUrl, + callbacks => #{is_feature_used => {rabbit_mgmt_wm_deprecated_features, feature_is_used}}}}, + ok = rpc(Config, rabbit_feature_flags, inject_test_feature_flags, [FeatureFlags]), + Result = http_get(Config, "/deprecated-features/used", ?OK), + Features = lists:filter(fun(Map) -> + maps:get(name, Map) == atom_to_binary(?FUNCTION_NAME) + end, Result), + ?assertMatch([_], Features), + [Feature] = Features, + ?assertEqual(<<"removed">>, maps:get(deprecation_phase, Feature)), + ?assertEqual(atom_to_binary(?MODULE), maps:get(provided_by, Feature)), + ?assertEqual(list_to_binary(Desc), maps:get(desc, Feature)), + ?assertEqual(list_to_binary(DocUrl), maps:get(doc_url, Feature)). + %% ------------------------------------------------------------------- %% Helpers. %% ------------------------------------------------------------------- @@ -3634,17 +3969,6 @@ publish(Ch) -> publish(Ch) end. -wait_until(_Fun, 0) -> - ?assert(wait_failed); -wait_until(Fun, N) -> - case Fun() of - true -> - timer:sleep(1500); - false -> - timer:sleep(?COLLECT_INTERVAL + 100), - wait_until(Fun, N - 1) - end. - http_post_json(Config, Path, Body, Assertion) -> http_upload_raw(Config, post, Path, Body, "guest", "guest", Assertion, [{"content-type", "application/json"}]). @@ -3669,3 +3993,13 @@ get_auth_attempts(Protocol, Map) -> P == Protocol end, Map), A. + +await_condition(Fun) -> + rabbit_ct_helpers:await_condition( + fun () -> + try + Fun() + catch _:_ -> + false + end + end, ?COLLECT_INTERVAL * 100). diff --git a/deps/rabbitmq_management/test/rabbit_mgmt_http_health_checks_SUITE.erl b/deps/rabbitmq_management/test/rabbit_mgmt_http_health_checks_SUITE.erl index a66f94fb31c3..71c532ead6f5 100644 --- a/deps/rabbitmq_management/test/rabbit_mgmt_http_health_checks_SUITE.erl +++ b/deps/rabbitmq_management/test/rabbit_mgmt_http_health_checks_SUITE.erl @@ -2,14 +2,13 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2016-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_mgmt_http_health_checks_SUITE). -include("rabbit_mgmt.hrl"). -include_lib("amqp_client/include/amqp_client.hrl"). --include_lib("common_test/include/ct.hrl"). -include_lib("eunit/include/eunit.hrl"). -include_lib("rabbitmq_ct_helpers/include/rabbit_mgmt_test.hrl"). @@ -17,31 +16,30 @@ req/4, auth_header/2]). --define(COLLECT_INTERVAL, 1000). -define(PATH_PREFIX, "/custom-prefix"). +-compile(nowarn_export_all). -compile(export_all). all() -> [ - {group, all_tests}, + {group, cluster_size_3}, + {group, cluster_size_5}, {group, single_node} ]. groups() -> [ - {all_tests, [], all_tests()}, + {cluster_size_3, [], all_tests()}, + {cluster_size_5, [], [is_quorum_critical_test]}, {single_node, [], [ alarms_test, local_alarms_test, - is_quorum_critical_single_node_test, - is_mirror_sync_critical_single_node_test]} + is_quorum_critical_single_node_test]} ]. all_tests() -> [ health_checks_test, - is_quorum_critical_test, - is_mirror_sync_critical_test, virtual_hosts_test, protocol_listener_test, port_listener_test, @@ -58,7 +56,8 @@ init_per_group(Group, Config0) -> rabbit_ct_helpers:log_environment(), inets:start(), ClusterSize = case Group of - all_tests -> 3; + cluster_size_3 -> 3; + cluster_size_5 -> 5; single_node -> 1 end, NodeConf = [{rmq_nodename_suffix, Group}, @@ -77,9 +76,7 @@ end_per_group(_, Config) -> Steps = Teardown0 ++ Teardown1, rabbit_ct_helpers:run_teardown_steps(Config, Steps). -init_per_testcase(Testcase, Config) - when Testcase == is_quorum_critical_test - orelse Testcase == is_mirror_sync_critical_test -> +init_per_testcase(Testcase, Config) when Testcase == is_quorum_critical_test -> case rabbit_ct_helpers:is_mixed_versions() of true -> {skip, "not mixed versions compatible"}; @@ -89,19 +86,6 @@ init_per_testcase(Testcase, Config) init_per_testcase(Testcase, Config) -> rabbit_ct_helpers:testcase_started(Config, Testcase). -end_per_testcase(is_quorum_critical_test = Testcase, Config) -> - [_, Server2, Server3] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename), - _ = rabbit_ct_broker_helpers:start_node(Config, Server2), - _ = rabbit_ct_broker_helpers:start_node(Config, Server3), - rabbit_ct_broker_helpers:rpc(Config, 0, ?MODULE, delete_queues, []), - rabbit_ct_helpers:testcase_finished(Config, Testcase); -end_per_testcase(is_mirror_sync_critical_test = Testcase, Config) -> - [_, Server2, Server3] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename), - _ = rabbit_ct_broker_helpers:start_node(Config, Server2), - _ = rabbit_ct_broker_helpers:start_node(Config, Server3), - ok = rabbit_ct_broker_helpers:clear_policy(Config, 0, <<"ha">>), - rabbit_ct_broker_helpers:rpc(Config, 0, ?MODULE, delete_queues, []), - rabbit_ct_helpers:testcase_finished(Config, Testcase); end_per_testcase(Testcase, Config) -> rabbit_ct_helpers:testcase_finished(Config, Testcase). @@ -115,7 +99,6 @@ health_checks_test(Config) -> http_get(Config, io_lib:format("/health/checks/port-listener/~tp", [Port]), ?OK), http_get(Config, "/health/checks/protocol-listener/http", ?OK), http_get(Config, "/health/checks/virtual-hosts", ?OK), - http_get(Config, "/health/checks/node-is-mirror-sync-critical", ?OK), http_get(Config, "/health/checks/node-is-quorum-critical", ?OK), passed. @@ -193,9 +176,10 @@ is_quorum_critical_test(Config) -> ?assertEqual(false, maps:is_key(reason, Check0)), ?assertEqual(<<"ok">>, maps:get(status, Check0)), - [Server1, Server2, Server3] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename), - Ch = rabbit_ct_client_helpers:open_channel(Config, Server1), - Args = [{<<"x-queue-type">>, longstr, <<"quorum">>}], + [Server | _] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename), + Ch = rabbit_ct_client_helpers:open_channel(Config, Server), + Args = [{<<"x-queue-type">>, longstr, <<"quorum">>}, + {<<"x-quorum-initial-group-size">>, long, 3}], QName = <<"is_quorum_critical_test">>, ?assertEqual({'queue.declare_ok', QName, 0, 0}, amqp_channel:call(Ch, #'queue.declare'{queue = QName, @@ -205,67 +189,13 @@ is_quorum_critical_test(Config) -> Check1 = http_get(Config, "/health/checks/node-is-quorum-critical", ?OK), ?assertEqual(false, maps:is_key(reason, Check1)), - ok = rabbit_ct_broker_helpers:stop_node(Config, Server2), - ok = rabbit_ct_broker_helpers:stop_node(Config, Server3), - - Body = http_get_failed(Config, "/health/checks/node-is-quorum-critical"), - ?assertEqual(<<"failed">>, maps:get(<<"status">>, Body)), - ?assertEqual(true, maps:is_key(<<"reason">>, Body)), - [Queue] = maps:get(<<"queues">>, Body), - ?assertEqual(QName, maps:get(<<"name">>, Queue)), - - passed. - -is_mirror_sync_critical_single_node_test(Config) -> - Check0 = http_get(Config, "/health/checks/node-is-mirror-sync-critical", ?OK), - ?assertEqual(<<"single node cluster">>, maps:get(reason, Check0)), - ?assertEqual(<<"ok">>, maps:get(status, Check0)), - - ok = rabbit_ct_broker_helpers:set_policy( - Config, 0, <<"ha">>, <<"is_mirror_sync.*">>, <<"queues">>, - [{<<"ha-mode">>, <<"all">>}]), - Server = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename), - Ch = rabbit_ct_client_helpers:open_channel(Config, Server), - QName = <<"is_mirror_sync_critical_single_node_test">>, - ?assertEqual({'queue.declare_ok', QName, 0, 0}, - amqp_channel:call(Ch, #'queue.declare'{queue = QName, - durable = true, - auto_delete = false, - arguments = []})), - Check1 = http_get(Config, "/health/checks/node-is-mirror-sync-critical", ?OK), - ?assertEqual(<<"single node cluster">>, maps:get(reason, Check1)), - - passed. - -is_mirror_sync_critical_test(Config) -> - Path = "/health/checks/node-is-mirror-sync-critical", - Check0 = http_get(Config, Path, ?OK), - ?assertEqual(false, maps:is_key(reason, Check0)), - ?assertEqual(<<"ok">>, maps:get(status, Check0)), - - ok = rabbit_ct_broker_helpers:set_policy( - Config, 0, <<"ha">>, <<"is_mirror_sync.*">>, <<"queues">>, - [{<<"ha-mode">>, <<"all">>}]), - [Server1, Server2, Server3] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename), - Ch = rabbit_ct_client_helpers:open_channel(Config, Server1), - QName = <<"is_mirror_sync_critical_test">>, - ?assertEqual({'queue.declare_ok', QName, 0, 0}, - amqp_channel:call(Ch, #'queue.declare'{queue = QName, - durable = true, - auto_delete = false, - arguments = []})), - rabbit_ct_helpers:await_condition( - fun() -> - {ok, {{_, Code, _}, _, _}} = req(Config, get, Path, [auth_header("guest", "guest")]), - Code == ?OK - end), - Check1 = http_get(Config, Path, ?OK), - ?assertEqual(false, maps:is_key(reason, Check1)), + RaName = binary_to_atom(<<"%2F_", QName/binary>>, utf8), + {ok, [_, {_, Server2}, {_, Server3}], _} = ra:members({RaName, Server}), ok = rabbit_ct_broker_helpers:stop_node(Config, Server2), ok = rabbit_ct_broker_helpers:stop_node(Config, Server3), - Body = http_get_failed(Config, Path), + Body = http_get_failed(Config, "/health/checks/node-is-quorum-critical"), ?assertEqual(<<"failed">>, maps:get(<<"status">>, Body)), ?assertEqual(true, maps:is_key(<<"reason">>, Body)), [Queue] = maps:get(<<"queues">>, Body), diff --git a/deps/rabbitmq_management/test/rabbit_mgmt_only_http_SUITE.erl b/deps/rabbitmq_management/test/rabbit_mgmt_only_http_SUITE.erl index bc08cb48ef4b..7fe227d8f357 100644 --- a/deps/rabbitmq_management/test/rabbit_mgmt_only_http_SUITE.erl +++ b/deps/rabbitmq_management/test/rabbit_mgmt_only_http_SUITE.erl @@ -2,13 +2,12 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2016-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_mgmt_only_http_SUITE). -include_lib("amqp_client/include/amqp_client.hrl"). --include_lib("common_test/include/ct.hrl"). -include_lib("eunit/include/eunit.hrl"). -include_lib("rabbitmq_ct_helpers/include/rabbit_mgmt_test.hrl"). @@ -29,9 +28,10 @@ -import(rabbit_misc, [pget/2]). --define(COLLECT_INTERVAL, 1000). +-define(COLLECT_INTERVAL, 256). -define(PATH_PREFIX, "/custom-prefix"). +-compile(nowarn_export_all). -compile(export_all). all() -> @@ -43,20 +43,23 @@ all() -> groups() -> [ - {all_tests_with_prefix, [], all_tests()}, - {all_tests_without_prefix, [], all_tests()}, + {all_tests_with_prefix, [], some_tests() ++ all_tests()}, + {all_tests_without_prefix, [], some_tests()}, {stats_disabled_on_request, [], [disable_with_disable_stats_parameter_test]}, {invalid_config, [], [invalid_config_test]} ]. -all_tests() -> [ +some_tests() -> + [ overview_test, nodes_test, vhosts_test, connections_test, exchanges_test, - queues_test, - mirrored_queues_test, + queues_test + ]. + +all_tests() -> [ quorum_queues_test, permissions_vhost_test, permissions_connection_channel_consumer_test, @@ -84,9 +87,13 @@ all_tests() -> [ %% ------------------------------------------------------------------- merge_app_env(Config, DisableStats) -> Config1 = rabbit_ct_helpers:merge_app_env(Config, - {rabbit, [ - {collect_statistics_interval, ?COLLECT_INTERVAL} - ]}), + {rabbit, + [ + {collect_statistics_interval, + ?COLLECT_INTERVAL}, + {quorum_tick_interval, 256}, + {stream_tick_interval, 256} + ]}), rabbit_ct_helpers:merge_app_env(Config1, {rabbitmq_management, [ {disable_management_stats, DisableStats}, @@ -122,12 +129,10 @@ init_per_group(all_tests_with_prefix = Group, Config0) -> PathConfig = {rabbitmq_management, [{path_prefix, ?PATH_PREFIX}]}, Config1 = rabbit_ct_helpers:merge_app_env(Config0, PathConfig), Config2 = finish_init(Group, Config1), - Config3 = start_broker(Config2), - Config3; + start_broker(Config2); init_per_group(Group, Config0) -> Config1 = finish_init(Group, Config0), - Config2 = start_broker(Config1), - Config2. + start_broker(Config1). end_per_group(_, Config) -> inets:stop(), @@ -309,7 +314,7 @@ connections_test(Config) -> false end end, - wait_until(Fun, 60), + await_condition(Fun), close_connection(Conn), passed. @@ -401,8 +406,8 @@ queues_test(Config) -> ?BAD_REQUEST), Policy = [{pattern, <<"baz">>}, - {definition, [{<<"ha-mode">>, <<"all">>}]}], - http_put(Config, "/policies/%2F/HA", Policy, {group, '2xx'}), + {definition, [{<<"max-length">>, 100}]}], + http_put(Config, "/policies/%2F/length", Policy, {group, '2xx'}), http_put(Config, "/queues/%2F/baz", Good, {group, '2xx'}), Queues = http_get(Config, "/queues/%2F"), Queue = http_get(Config, "/queues/%2F/foo"), @@ -414,9 +419,7 @@ queues_test(Config) -> auto_delete => false, exclusive => false, arguments => #{}, - node => NodeBin, - slave_nodes => [], - synchronised_slave_nodes => []}, + node => NodeBin}, #{name => <<"foo">>, vhost => <<"/">>, durable => true, @@ -461,8 +464,8 @@ queues_enable_totals_test(Config) -> http_put(Config, "/queues/%2F/foo", GoodQQ, {group, '2xx'}), Policy = [{pattern, <<"baz">>}, - {definition, [{<<"ha-mode">>, <<"all">>}]}], - http_put(Config, "/policies/%2F/HA", Policy, {group, '2xx'}), + {definition, [{<<"max-length">>, 100}]}], + http_put(Config, "/policies/%2F/length", Policy, {group, '2xx'}), http_put(Config, "/queues/%2F/baz", Good, {group, '2xx'}), {Conn, Ch} = open_connection_and_channel(Config), @@ -480,7 +483,7 @@ queues_enable_totals_test(Config) -> length(rabbit_ct_broker_helpers:rpc(Config, 0, ets, tab2list, [queue_coarse_metrics])) == 2 end, - wait_until(Fun, 60), + await_condition(Fun), Queues = http_get(Config, "/queues/%2F"), Queue = http_get(Config, "/queues/%2F/foo"), @@ -494,11 +497,9 @@ queues_enable_totals_test(Config) -> exclusive => false, arguments => #{}, node => NodeBin, - slave_nodes => [], messages => 1, messages_ready => 1, - messages_unacknowledged => 0, - synchronised_slave_nodes => []}, + messages_unacknowledged => 0}, #{name => <<"foo">>, vhost => <<"/">>, durable => true, @@ -514,14 +515,14 @@ queues_enable_totals_test(Config) -> vhost => <<"/">>, durable => true, auto_delete => false, - exclusive => false, + exclusive => null, arguments => #{'x-queue-type' => <<"quorum">>}, leader => NodeBin, + messages => 2, + messages_ready => 2, + messages_unacknowledged => 0, members => [NodeBin]}, Queue), - ?assert(not maps:is_key(messages, Queue)), - ?assert(not maps:is_key(messages_ready, Queue)), - ?assert(not maps:is_key(messages_unacknowledged, Queue)), ?assert(not maps:is_key(message_stats, Queue)), ?assert(not maps:is_key(messages_details, Queue)), ?assert(not maps:is_key(reductions_details, Queue)), @@ -532,41 +533,6 @@ queues_enable_totals_test(Config) -> passed. -mirrored_queues_test(Config) -> - Policy = [{pattern, <<".*">>}, - {definition, [{<<"ha-mode">>, <<"all">>}]}], - http_put(Config, "/policies/%2F/HA", Policy, {group, '2xx'}), - - Good = [{durable, true}, {arguments, []}], - http_get(Config, "/queues/%2f/ha", ?NOT_FOUND), - http_put(Config, "/queues/%2f/ha", Good, {group, '2xx'}), - - {Conn, Ch} = open_connection_and_channel(Config), - Publish = fun() -> - amqp_channel:call( - Ch, #'basic.publish'{exchange = <<"">>, - routing_key = <<"ha">>}, - #amqp_msg{payload = <<"message">>}) - end, - Publish(), - Publish(), - - Queue = http_get(Config, "/queues/%2f/ha?lengths_age=60&lengths_incr=5&msg_rates_age=60&msg_rates_incr=5&data_rates_age=60&data_rates_incr=5"), - - %% It's really only one node, but the only thing that matters in this test is to verify the - %% key exists - Nodes = lists:sort(rabbit_ct_broker_helpers:get_node_configs(Config, nodename)), - - ?assert(not maps:is_key(messages, Queue)), - ?assert(not maps:is_key(messages_details, Queue)), - ?assert(not maps:is_key(reductions_details, Queue)), - ?assert(true, lists:member(maps:get(node, Queue), Nodes)), - ?assertEqual([], get_nodes(slave_nodes, Queue)), - ?assertEqual([], get_nodes(synchronised_slave_nodes, Queue)), - - http_delete(Config, "/queues/%2f/ha", {group, '2xx'}), - close_connection(Conn). - quorum_queues_test(Config) -> Good = [{durable, true}, {arguments, [{'x-queue-type', 'quorum'}]}], http_get(Config, "/queues/%2f/qq", ?NOT_FOUND), @@ -894,8 +860,6 @@ table_hash(Table) -> queue_actions_test(Config) -> http_put(Config, "/queues/%2F/q", #{}, {group, '2xx'}), - http_post(Config, "/queues/%2F/q/actions", [{action, sync}], {group, '2xx'}), - http_post(Config, "/queues/%2F/q/actions", [{action, cancel_sync}], {group, '2xx'}), http_post(Config, "/queues/%2F/q/actions", [{action, change_colour}], ?BAD_REQUEST), http_delete(Config, "/queues/%2F/q", {group, '2xx'}), passed. @@ -1678,17 +1642,16 @@ publish(Ch) -> publish(Ch) end. -wait_until(_Fun, 0) -> - ?assert(wait_failed); -wait_until(Fun, N) -> - case Fun() of - true -> - timer:sleep(1500); - false -> - timer:sleep(?COLLECT_INTERVAL + 100), - wait_until(Fun, N - 1) - end. - http_post_json(Config, Path, Body, Assertion) -> http_upload_raw(Config, post, Path, Body, "guest", "guest", Assertion, [{"Content-Type", "application/json"}]). + +await_condition(Fun) -> + rabbit_ct_helpers:await_condition( + fun () -> + try + Fun() + catch _:_ -> + false + end + end, ?COLLECT_INTERVAL * 100). diff --git a/deps/rabbitmq_management/test/rabbit_mgmt_rabbitmqadmin_SUITE.erl b/deps/rabbitmq_management/test/rabbit_mgmt_rabbitmqadmin_SUITE.erl index 4c7852a34c6f..3b2247d69c88 100644 --- a/deps/rabbitmq_management/test/rabbit_mgmt_rabbitmqadmin_SUITE.erl +++ b/deps/rabbitmq_management/test/rabbit_mgmt_rabbitmqadmin_SUITE.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2016-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_mgmt_rabbitmqadmin_SUITE). @@ -297,12 +297,12 @@ bindings(Config) -> {ok, _} = run(Config, ["delete", "queue", "name=foo"]). policies(Config) -> - {ok, _} = run(Config, ["declare", "policy", "name=ha", - "pattern=.*", "definition={\"ha-mode\":\"all\"}"]), - {ok, [["ha", "/", ".*", "{\"ha-mode\": \"all\"}"]]} = + {ok, _} = run(Config, ["declare", "policy", "name=max-length-bytes", + "pattern=.*", "definition={\"max-length-bytes\":10000}"]), + {ok, [["max-length-bytes", "/", ".*", "{\"max-length-bytes\": 10000}"]]} = run_table(Config, ["list", "policies", "name", "vhost", "pattern", "definition"]), - {ok, _} = run(Config, ["delete", "policy", "name=ha"]). + {ok, _} = run(Config, ["delete", "policy", "name=max-length-bytes"]). operator_policies(Config) -> {ok, _} = run(Config, ["declare", "operator_policy", "name=len", diff --git a/deps/rabbitmq_management/test/rabbit_mgmt_runtime_parameters_util.erl b/deps/rabbitmq_management/test/rabbit_mgmt_runtime_parameters_util.erl index cef8b442c32e..76c83ec2214f 100644 --- a/deps/rabbitmq_management/test/rabbit_mgmt_runtime_parameters_util.erl +++ b/deps/rabbitmq_management/test/rabbit_mgmt_runtime_parameters_util.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_mgmt_runtime_parameters_util). diff --git a/deps/rabbitmq_management/test/rabbit_mgmt_stats_SUITE.erl b/deps/rabbitmq_management/test/rabbit_mgmt_stats_SUITE.erl index b141be018366..99d67fb8b4c6 100644 --- a/deps/rabbitmq_management/test/rabbit_mgmt_stats_SUITE.erl +++ b/deps/rabbitmq_management/test/rabbit_mgmt_stats_SUITE.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2016-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_mgmt_stats_SUITE). diff --git a/deps/rabbitmq_management/test/rabbit_mgmt_test_db_SUITE.erl b/deps/rabbitmq_management/test/rabbit_mgmt_test_db_SUITE.erl index 46d95090df7f..a9c3006be886 100644 --- a/deps/rabbitmq_management/test/rabbit_mgmt_test_db_SUITE.erl +++ b/deps/rabbitmq_management/test/rabbit_mgmt_test_db_SUITE.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2016-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_mgmt_test_db_SUITE). @@ -10,17 +10,20 @@ -include_lib("common_test/include/ct.hrl"). -include_lib("rabbit_common/include/rabbit_core_metrics.hrl"). -include_lib("rabbitmq_management_agent/include/rabbit_mgmt_records.hrl"). --include_lib("rabbitmq_management_agent/include/rabbit_mgmt_metrics.hrl"). --include("rabbit_mgmt.hrl"). -include_lib("rabbitmq_ct_helpers/include/rabbit_mgmt_test.hrl"). -import(rabbit_mgmt_test_util, [assert_list/2, reset_management_settings/1]). -import(rabbit_misc, [pget/2]). --compile(export_all). +-compile([export_all, nowarn_export_all]). -compile({no_auto_import, [ceil/1]}). +-define(AWAIT(Body), + await_condition(fun () -> + Body, + true + end)). all() -> [ {group, non_parallel_tests} @@ -49,14 +52,23 @@ init_per_suite(Config) -> end_per_suite(Config) -> Config. -init_per_group(_, Config) -> - Config1 = rabbit_ct_helpers:set_config(Config, [ - {rmq_nodename_suffix, ?MODULE} - ]), - Config2 = rabbit_ct_helpers:merge_app_env( - rabbit_mgmt_test_util:merge_stats_app_env(Config1, 1000, 1), - {rabbitmq_management_agent, [{rates_mode, detailed}]}), - rabbit_ct_helpers:run_setup_steps(Config2, +init_per_group(_, Config0) -> + Config1 = rabbit_ct_helpers:set_config(Config0, + [ + {rmq_nodename_suffix, ?MODULE}, + {collect_statistics_interval, 256} + ]), + %% it isn't possible to configure a sample interval lower than 1s which + %% means there are a lot of 1s+ sleeps in this suite. + SampleInterval = 1, + Config = rabbit_ct_helpers:merge_app_env( + Config1, {rabbitmq_management_agent, + [{rates_mode, detailed}, + {sample_retention_policies, + [{global, [{605, SampleInterval}]}, + {basic, [{605, SampleInterval}]}, + {detailed, [{10, SampleInterval}]}]}]}), + rabbit_ct_helpers:run_setup_steps(Config, rabbit_ct_broker_helpers:setup_steps() ++ rabbit_ct_client_helpers:setup_steps()). @@ -66,11 +78,9 @@ end_per_group(_, Config) -> rabbit_ct_broker_helpers:teardown_steps()). init_per_testcase(Testcase, Config) -> - reset_management_settings(Config), rabbit_ct_helpers:testcase_started(Config, Testcase). end_per_testcase(Testcase, Config) -> - reset_management_settings(Config), rabbit_ct_helpers:testcase_finished(Config, Testcase). %% ------------------------------------------------------------------- @@ -96,25 +106,32 @@ queue_coarse_test1(_Config) -> || {T, _} <- ?CORE_TABLES], First = exometer_slide:timestamp(), stats_series(fun stats_q/2, [[{test, 1}, {test2, 1}], [{test, 10}], [{test, 20}]]), - timer:sleep(1150 * 2), %% The x2 factor is arbitrary: it makes CI happy. - Last = exometer_slide:timestamp(), Interval = 1, - R = range(First, Last, Interval), - simple_details(get_q(test, R), messages, 20, R), - simple_details(get_vhost(R), messages, 21, R), - simple_details(get_overview_q(R), messages, 21, R), + ?AWAIT( + begin + Last = exometer_slide:timestamp(), + R = range(First, Last, Interval), + simple_details(get_q(test, R), messages, 20, R), + simple_details(get_vhost(R), messages, 21, R), + simple_details(get_overview_q(R), messages, 21, R) + end), delete_q(test), - timer:sleep(1150), - Next = last_ts(First, Interval), - R1 = range(First, Next, Interval), - simple_details(get_vhost(R1), messages, 1, R1), - simple_details(get_overview_q(R1), messages, 1, R1), + + ?AWAIT( + begin + Next = last_ts(First, Interval), + R1 = range(First, Next, Interval), + simple_details(get_vhost(R1), messages, 1, R1), + simple_details(get_overview_q(R1), messages, 1, R1) + end), delete_q(test2), - timer:sleep(1150), - Next2 = last_ts(First, Interval), - R2 = range(First, Next2, Interval), - simple_details(get_vhost(R2), messages, 0, R2), - simple_details(get_overview_q(R2), messages, 0, R2), + ?AWAIT( + begin + Next2 = last_ts(First, Interval), + R2 = range(First, Next2, Interval), + simple_details(get_vhost(R2), messages, 0, R2), + simple_details(get_overview_q(R2), messages, 0, R2) + end), [rabbit_mgmt_metrics_collector:reset_lookups(T) || {T, _} <- ?CORE_TABLES], ok. @@ -139,16 +156,23 @@ connection_coarse_test1(_Config) -> First = exometer_slide:timestamp(), create_conn(test), create_conn(test2), - stats_series(fun stats_conn/2, [[{test, 2}, {test2, 5}], [{test, 5}, {test2, 1}], + stats_series(fun stats_conn/2, [[{test, 2}, {test2, 5}], + [{test, 5}, {test2, 1}], [{test, 10}]]), Last = last_ts(First, 5), R = range(First, Last, 5), - simple_details(get_conn(test, R), recv_oct, 10, R), - simple_details(get_conn(test2, R), recv_oct, 1, R), + ?AWAIT( + begin + simple_details(get_conn(test, R), recv_oct, 10, R), + simple_details(get_conn(test2, R), recv_oct, 1, R) + end), delete_conn(test), delete_conn(test2), - timer:sleep(1150), - assert_list([], rabbit_mgmt_db:get_all_connections(R)), + + ?AWAIT( + begin + assert_list([], rabbit_mgmt_db:get_all_connections(R)) + end), ok. fine_stats_aggregation_test(Config) -> @@ -170,12 +194,16 @@ fine_stats_aggregation_test1(_Config) -> channel_series(ch2, [{[{x, 5}], [{q1, x, 15}, {q2, x, 1}], []}, {[{x, 2}], [{q1, x, 10}, {q2, x, 2}], []}, {[{x, 3}], [{q1, x, 25}, {q2, x, 2}], []}]), - timer:sleep(1150), - fine_stats_aggregation_test0(true, First), + ?AWAIT( + begin + fine_stats_aggregation_test0(true, First) + end), delete_q(q2), - timer:sleep(1150), - fine_stats_aggregation_test0(false, First), + ?AWAIT( + begin + fine_stats_aggregation_test0(false, First) + end), delete_ch(ch1), delete_ch(ch2), delete_conn(test), @@ -229,7 +257,8 @@ fine_stats_aggregation_test0(Q2Exists, First) -> fine_stats_aggregation_time_test(Config) -> %% trace_fun(Config, [{rabbit_mgmt_db, get_data_from_nodes}]), - ok = rabbit_ct_broker_helpers:rpc(Config, 0, ?MODULE, fine_stats_aggregation_time_test1, [Config]). + ok = rabbit_ct_broker_helpers:rpc(Config, 0, ?MODULE, + fine_stats_aggregation_time_test1, [Config]). fine_stats_aggregation_time_test1(_Config) -> [rabbit_mgmt_metrics_collector:override_lookups(T, [{exchange, fun dummy_lookup/1}, @@ -240,23 +269,27 @@ fine_stats_aggregation_time_test1(_Config) -> channel_series(ch, [{[{x, 50}], [{q, x, 15}], [{q, 5}]}, {[{x, 25}], [{q, x, 10}], [{q, 5}]}, {[{x, 25}], [{q, x, 25}], [{q, 10}]}]), - timer:sleep(1150), + Last = exometer_slide:timestamp(), channel_series(ch, [{[{x, 10}], [{q, x, 5}], [{q, 2}]}]), Next = exometer_slide:timestamp(), - R1 = range(First, Last, 1), - assert_fine_stats(m, publish, 100, get_ch(ch, R1), R1), - assert_fine_stats(m, publish, 50, get_q(q, R1), R1), - assert_fine_stats(m, deliver_get, 20, get_q(q, R1), R1), + ct:pal("ch ~p", [get_ch(ch, R1)]), + ?AWAIT( + begin + assert_fine_stats(m, publish, 100, get_ch(ch, R1), R1), + assert_fine_stats(m, publish, 50, get_q(q, R1), R1), + assert_fine_stats(m, deliver_get, 20, get_q(q, R1), R1), - R2 = range(Last, Next, 1), - assert_fine_stats(m, publish, 110, get_ch(ch, R2), R2), - assert_fine_stats(m, publish, 55, get_q(q, R2), R2), - assert_fine_stats(m, deliver_get, 22, get_q(q, R2), R2), + + R2 = range(Last, Next, 1), + assert_fine_stats(m, publish, 110, get_ch(ch, R2), R2), + assert_fine_stats(m, publish, 55, get_q(q, R2), R2), + assert_fine_stats(m, deliver_get, 22, get_q(q, R2), R2) + end), delete_q(q), delete_ch(ch), @@ -291,9 +324,11 @@ all_consumers_test1(_Config) -> {queue, fun dummy_lookup/1}]) || {T, _} <- ?CORE_TABLES], create_cons(q1, ch1, <<"ctag">>, false, true, 0, []), - timer:sleep(1150), - [Consumer] = rabbit_mgmt_db:get_all_consumers(), - [] = proplists:get_value(channel_details, Consumer), + ?AWAIT( + begin + [Consumer] = rabbit_mgmt_db:get_all_consumers(), + [] = proplists:get_value(channel_details, Consumer) + end), %% delete_cons(co), [rabbit_mgmt_metrics_collector:reset_lookups(T) || {T, _} <- ?CORE_TABLES], ok. @@ -318,8 +353,7 @@ create_cons(QName, ChName, Tag, Exclusive, AckRequired, PrefetchCount, Args) -> stats_series(Fun, ListsOfPairs) -> [begin - [Fun(Name, Msgs) || {Name, Msgs} <- List], - timer:sleep(1150) + [Fun(Name, Msgs) || {Name, Msgs} <- List] end || List <- ListsOfPairs]. stats_q(Name, Msgs) -> @@ -330,8 +364,9 @@ stats_conn(Name, Oct) -> channel_series(Name, ListOfStats) -> [begin - stats_ch(Name, XStats, QXStats, QStats), - timer:sleep(1150) + stats_ch(Name, XStats, QXStats, QStats), + timer:sleep(1000), + force_collect_all() end || {XStats, QXStats, QStats} <- ListOfStats]. stats_ch(Name, XStats, QXStats, QStats) -> @@ -467,3 +502,19 @@ pid_del(Name) -> a2b(A) -> list_to_binary(atom_to_list(A)). dummy_lookup(_Thing) -> true. + +await_condition(Fun) -> + rabbit_ct_helpers:await_condition( + fun () -> + try + Fun() + catch _:_ -> + false + end + end, 256 * 100). + +force_collect_all() -> + [begin + gen_server:call(N, force_collect) + end || N <- rabbit_mgmt_metrics_collector:all_names()], + ok. diff --git a/deps/rabbitmq_management/test/rabbit_mgmt_test_unit_SUITE.erl b/deps/rabbitmq_management/test/rabbit_mgmt_test_unit_SUITE.erl index b2303b5b93e6..7813662e0bb8 100644 --- a/deps/rabbitmq_management/test/rabbit_mgmt_test_unit_SUITE.erl +++ b/deps/rabbitmq_management/test/rabbit_mgmt_test_unit_SUITE.erl @@ -2,12 +2,11 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_mgmt_test_unit_SUITE). --include_lib("common_test/include/ct.hrl"). -include_lib("eunit/include/eunit.hrl"). -compile(export_all). @@ -37,6 +36,22 @@ init_per_group(_, Config) -> end_per_group(_, Config) -> Config. +init_per_testcase(_, Config) -> + case application:get_all_env(rabbitmq_management) of + {error, _} = Error -> Error; + Env -> + lists:foreach(fun({Key,_Value})-> + application:unset_env(rabbitmq_management, Key) end, Env), + case application:get_all_env(rabbitmq_auth_backend_oauth2) of + {error, _} = Error -> Error; + Env2 -> lists:foreach(fun({Key,_Value})-> + application:unset_env(rabbitmq_auth_backend_oauth2, Key) end, Env2) + end + end, + Config. + + + %% ------------------------------------------------------------------- %% Test cases. %% ------------------------------------------------------------------- diff --git a/deps/rabbitmq_management/test/rabbit_mgmt_wm_auth_SUITE.erl b/deps/rabbitmq_management/test/rabbit_mgmt_wm_auth_SUITE.erl new file mode 100644 index 000000000000..d47350d2b926 --- /dev/null +++ b/deps/rabbitmq_management/test/rabbit_mgmt_wm_auth_SUITE.erl @@ -0,0 +1,725 @@ +%% This Source Code Form is subject to the terms of the Mozilla Public +%% License, v. 2.0. If a copy of the MPL was not distributed with this +%% file, You can obtain one at https://mozilla.org/MPL/2.0/. +%% +%% Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. +%% + +-module(rabbit_mgmt_wm_auth_SUITE). + +-include_lib("common_test/include/ct.hrl"). +-include_lib("eunit/include/eunit.hrl"). + +-compile(export_all). + +all() -> + [ + {group, without_any_settings}, + {group, with_oauth_disabled}, + {group, verify_client_id_and_secret}, + {group, verify_mgt_oauth_provider_url_with_single_resource}, + {group, verify_mgt_oauth_provider_url_with_single_resource_and_another_resource}, + {group, verify_end_session_endpoint_with_single_resource}, + {group, verify_end_session_endpoint_with_single_resource_and_another_resource}, + {group, verify_oauth_initiated_logon_type_for_sp_initiated}, + {group, verify_oauth_initiated_logon_type_for_idp_initiated}, + {group, verify_oauth_disable_basic_auth}, + {group, verify_oauth_scopes} + ]. + +groups() -> + [ + {without_any_settings, [], [ + should_return_disabled_auth_settings + ]}, + {with_oauth_disabled, [], [ + should_return_disabled_auth_settings + ]}, + {verify_client_id_and_secret, [], [ + {with_oauth_enabled, [], [ + should_return_disabled_auth_settings, + {with_root_issuer_url1, [], [ + {with_resource_server_id_rabbit, [], [ + should_return_disabled_auth_settings, + {with_mgt_oauth_client_id_z, [], [ + should_return_oauth_enabled, + should_return_oauth_client_id_z, + should_not_return_oauth_client_secret, + {with_mgt_oauth_client_secret_q, [], [ + should_return_oauth_enabled, + should_return_oauth_client_secret_q + ]} + ]} + ]}, + {with_resource_server_a, [], [ + should_return_disabled_auth_settings, + {with_mgt_oauth_client_id_z, [], [ + should_return_oauth_enabled, + should_return_oauth_client_id_z, + {with_mgt_resource_server_a_with_client_id_x, [], [ + should_return_oauth_resource_server_a_with_client_id_x + ]}, + {with_mgt_resource_server_a_with_client_secret_w, [], [ + should_return_oauth_resource_server_a_with_client_secret_w + ]} + ]} + ]} + ]} + ]} + ]}, + {verify_mgt_oauth_provider_url_with_single_resource, [], [ + {with_resource_server_id_rabbit, [], [ + {with_root_issuer_url1, [], [ + {with_oauth_enabled, [], [ + should_return_disabled_auth_settings, + {with_mgt_oauth_client_id_z, [], [ + should_return_mgt_oauth_provider_url_url1, + {with_mgt_oauth_provider_url_url0, [], [ + should_return_mgt_oauth_provider_url_url0 + ]} + ]} + ]} + ]}, + {with_oauth_providers_idp1_idp2, [], [ + {with_default_oauth_provider_idp1, [], [ + {with_oauth_enabled, [], [ + should_return_disabled_auth_settings, + {with_mgt_oauth_client_id_z, [], [ + should_return_mgt_oauth_provider_url_idp1_url, + {with_root_issuer_url1, [], [ + should_return_mgt_oauth_provider_url_idp1_url + ]}, + {with_mgt_oauth_provider_url_url0, [], [ + should_return_mgt_oauth_provider_url_url0 + ]} + ]} + ]} + ]} + ]} + ]} + ]}, + {verify_end_session_endpoint_with_single_resource, [], [ + {with_resource_server_id_rabbit, [], [ + {with_root_issuer_url1, [], [ + {with_oauth_enabled, [], [ + {with_mgt_oauth_client_id_z, [], [ + should_not_return_end_session_endpoint, + {with_root_end_session_endpoint_0, [], [ + should_return_end_session_endpoint_0 + ]} + ]} + ]} + ]}, + {with_oauth_providers_idp1_idp2, [], [ + {with_default_oauth_provider_idp1, [], [ + {with_oauth_enabled, [], [ + {with_mgt_oauth_client_id_z, [], [ + should_not_return_end_session_endpoint, + {with_end_session_endpoint_for_idp1_1, [], [ + should_return_end_session_endpoint_1 + ]}, + {with_root_end_session_endpoint_0, [], [ + should_not_return_end_session_endpoint, + {with_end_session_endpoint_for_idp1_1, [], [ + should_return_end_session_endpoint_1 + ]} + ]} + ]} + ]} + ]} + ]} + ]} + ]}, + {verify_end_session_endpoint_with_single_resource_and_another_resource, [], [ + {with_resource_server_id_rabbit, [], [ + {with_resource_server_a, [], [ + {with_root_issuer_url1, [], [ + {with_oauth_enabled, [], [ + should_return_disabled_auth_settings, + {with_mgt_oauth_client_id_z, [], [ + should_not_return_end_session_endpoint, + should_return_oauth_resource_server_a_without_end_session_endpoint, + {with_root_end_session_endpoint_0, [], [ + should_return_end_session_endpoint_0, + should_return_oauth_resource_server_a_with_end_session_endpoint_0 + ]}, + {with_oauth_providers_idp1_idp2, [], [ + {with_default_oauth_provider_idp1, [], [ + {with_end_session_endpoint_for_idp1_1, [], [ + should_return_end_session_endpoint_1, + should_return_oauth_resource_server_a_with_end_session_endpoint_1, + {with_oauth_provider_idp2_for_resource_server_a, [], [ + {with_end_session_endpoint_for_idp2_2, [], [ + should_return_oauth_resource_server_a_with_end_session_endpoint_2 + ]} + ]} + ]} + ]} + ]} + ]} + ]} + ]} + ]} + ]} + ]}, + {verify_mgt_oauth_provider_url_with_single_resource_and_another_resource, [], [ + {with_resource_server_id_rabbit, [], [ + {with_resource_server_a, [], [ + {with_root_issuer_url1, [], [ + {with_oauth_enabled, [], [ + should_return_disabled_auth_settings, + {with_mgt_oauth_client_id_z, [], [ + should_return_oauth_resource_server_rabbit_with_oauth_provider_url_url1, + should_return_oauth_resource_server_a_with_oauth_provider_url_url1, + {with_mgt_oauth_provider_url_url0, [], [ + should_return_oauth_resource_server_rabbit_with_oauth_provider_url_url0, + should_return_oauth_resource_server_a_with_oauth_provider_url_url0, + {with_mgt_oauth_resource_server_a_with_oauth_provider_url_url1, [], [ + should_return_oauth_resource_server_rabbit_with_oauth_provider_url_url0, + should_return_oauth_resource_server_a_with_oauth_provider_url_url1 + ]} + ]} + ]} + ]} + ]}, + {with_oauth_providers_idp1_idp2, [], [ + {with_default_oauth_provider_idp1, [], [ + {with_oauth_enabled, [], [ + should_return_disabled_auth_settings, + {with_mgt_oauth_client_id_z, [], [ + should_return_oauth_resource_server_rabbit_with_oauth_provider_url_idp1_url, + {with_root_issuer_url1, [], [ + should_return_oauth_resource_server_rabbit_with_oauth_provider_url_idp1_url + ]}, + {with_mgt_oauth_provider_url_url0, [], [ + should_return_oauth_resource_server_rabbit_with_oauth_provider_url_url0, + {with_mgt_oauth_resource_server_a_with_oauth_provider_url_url1, [], [ + should_return_oauth_resource_server_rabbit_with_oauth_provider_url_url0, + should_return_oauth_resource_server_a_with_oauth_provider_url_url1 + ]} + ]} + ]} + ]} + ]} + ]} + ]} + ]} + ]}, + {verify_oauth_initiated_logon_type_for_sp_initiated, [], [ + should_return_disabled_auth_settings, + {with_resource_server_id_rabbit, [], [ + {with_root_issuer_url1, [], [ + should_return_disabled_auth_settings, + {with_oauth_enabled, [], [ + should_return_disabled_auth_settings, + {with_mgt_oauth_client_id_z, [], [ + should_return_oauth_enabled, + should_not_return_oauth_initiated_logon_type, + {with_oauth_initiated_logon_type_sp_initiated, [], [ + should_not_return_oauth_initiated_logon_type + ]}, + {with_resource_server_a, [], [ + {with_mgt_resource_server_a_with_oauth_initiated_logon_type_sp_initiated, [], [ + should_return_oauth_resource_server_a_with_oauth_initiated_logon_type_sp_initiated + ]} + ]} + ]} + ]} + ]} + ]} + ]}, + {verify_oauth_initiated_logon_type_for_idp_initiated, [], [ + should_return_disabled_auth_settings, + {with_root_issuer_url1, [], [ + should_return_disabled_auth_settings, + {with_oauth_initiated_logon_type_idp_initiated, [], [ + should_return_disabled_auth_settings, + {with_resource_server_id_rabbit, [], [ + should_return_disabled_auth_settings, + {with_oauth_enabled, [], [ + should_return_oauth_enabled, + should_return_oauth_initiated_logon_type_idp_initiated, + {with_resource_server_a, [], [ + {with_mgt_resource_server_a_with_oauth_initiated_logon_type_idp_initiated, [], [ + should_return_oauth_resource_server_a_with_oauth_initiated_logon_type_idp_initiated + ]}, + {with_mgt_resource_server_a_with_oauth_initiated_logon_type_sp_initiated, [], [ + should_not_return_oauth_resource_server_a, + {with_mgt_resource_server_a_with_client_id_x, [], [ + should_return_oauth_resource_server_a_with_client_id_x + ]} + ]} + ]} + ]} + ]} + ]} + ]} + ]}, + {verify_oauth_disable_basic_auth, [], [ + {with_resource_server_id_rabbit, [], [ + {with_root_issuer_url1, [], [ + {with_oauth_enabled, [], [ + {with_mgt_oauth_client_id_z, [], [ + should_return_oauth_disable_basic_auth_true, + {with_oauth_disable_basic_auth_false, [], [ + should_return_oauth_disable_basic_auth_false + ]} + ]} + ]} + ]} + ]} + ]}, + {verify_oauth_scopes, [], [ + {with_resource_server_id_rabbit, [], [ + {with_root_issuer_url1, [], [ + {with_oauth_enabled, [], [ + {with_mgt_oauth_client_id_z, [], [ + should_not_return_oauth_scopes, + {with_oauth_scopes_admin_mgt, [], [ + should_return_oauth_scopes_admin_mgt, + {with_resource_server_a, [], [ + {with_mgt_resource_server_a_with_scopes_read_write, [], [ + should_return_mgt_oauth_resource_server_a_with_scopes_read_write + ]} + ]} + ]} + ]} + ]} + ]} + ]} + ]} + ]. + +%% ------------------------------------------------------------------- +%% Setup/teardown. +%% ------------------------------------------------------------------- +init_per_suite(Config) -> + [ {rabbit, <<"rabbit">>}, + {idp1, <<"idp1">>}, + {idp2, <<"idp2">>}, + {idp3, <<"idp3">>}, + {idp1_url, <<"https://idp1">>}, + {idp2_url, <<"https://idp2">>}, + {idp3_url, <<"https://idp3">>}, + {url0, <<"https://url0">>}, + {url1, <<"https://url1">>}, + {logout_url_0, <<"https://logout_0">>}, + {logout_url_1, <<"https://logout_1">>}, + {logout_url_2, <<"https://logout_2">>}, + {a, <<"a">>}, + {b, <<"b">>}, + {q, <<"q">>}, + {w, <<"w">>}, + {z, <<"z">>}, + {x, <<"x">>}, + {admin_mgt, <<"admin mgt">>}, + {read_write, <<"read write">>} | Config]. + +end_per_suite(_Config) -> + ok. + +init_per_group(with_oauth_disabled, Config) -> + application:set_env(rabbitmq_management, oauth_enabled, false), + Config; +init_per_group(with_oauth_enabled, Config) -> + application:set_env(rabbitmq_management, oauth_enabled, true), + Config; +init_per_group(with_resource_server_id_rabbit, Config) -> + application:set_env(rabbitmq_auth_backend_oauth2, resource_server_id, ?config(rabbit, Config)), + Config; +init_per_group(with_mgt_oauth_client_id_z, Config) -> + application:set_env(rabbitmq_management, oauth_client_id, ?config(z, Config)), + Config; +init_per_group(with_mgt_resource_server_a_with_client_secret_w, Config) -> + set_attribute_in_entry_for_env_variable(rabbitmq_management, oauth_resource_servers, + ?config(a, Config), oauth_client_secret, ?config(w, Config)), + Config; +init_per_group(with_mgt_oauth_client_secret_q, Config) -> + application:set_env(rabbitmq_management, oauth_client_secret, ?config(q, Config)), + Config; +init_per_group(with_mgt_oauth_provider_url_url0, Config) -> + application:set_env(rabbitmq_management, oauth_provider_url, ?config(url0, Config)), + Config; +init_per_group(with_root_issuer_url1, Config) -> + application:set_env(rabbitmq_auth_backend_oauth2, issuer, ?config(url1, Config)), + Config; +init_per_group(with_oauth_scopes_admin_mgt, Config) -> + application:set_env(rabbitmq_management, oauth_scopes, ?config(admin_mgt, Config)), + Config; +init_per_group(with_oauth_scopes_write_read, Config) -> + application:set_env(rabbitmq_management, oauth_scopes, ?config(write_read, Config)), + Config; +init_per_group(with_oauth_initiated_logon_type_idp_initiated, Config) -> + application:set_env(rabbitmq_management, oauth_initiated_logon_type, idp_initiated), + Config; +init_per_group(with_oauth_initiated_logon_type_sp_initiated, Config) -> + application:set_env(rabbitmq_management, oauth_initiated_logon_type, sp_initiated), + Config; +init_per_group(with_mgt_resource_server_a_with_oauth_initiated_logon_type_sp_initiated, Config) -> + set_attribute_in_entry_for_env_variable(rabbitmq_management, oauth_resource_servers, + ?config(a, Config), oauth_initiated_logon_type, sp_initiated), + Config; +init_per_group(with_mgt_resource_server_a_with_oauth_initiated_logon_type_idp_initiated, Config) -> + set_attribute_in_entry_for_env_variable(rabbitmq_management, oauth_resource_servers, + ?config(a, Config), oauth_initiated_logon_type, idp_initiated), + Config; +init_per_group(with_oauth_disable_basic_auth_false, Config) -> + application:set_env(rabbitmq_management, oauth_disable_basic_auth, false), + Config; +init_per_group(with_oauth_providers_idp1_idp2, Config) -> + application:set_env(rabbitmq_auth_backend_oauth2, oauth_providers, #{ + ?config(idp1, Config) => [ { issuer, ?config(idp1_url, Config)} ], + ?config(idp2, Config) => [ { issuer, ?config(idp2_url, Config)} ] + }), + Config; +init_per_group(with_resource_server_a, Config) -> + set_attribute_in_entry_for_env_variable(rabbitmq_auth_backend_oauth2, resource_servers, + ?config(a, Config), id, ?config(a, Config)), + Config; +init_per_group(with_resource_server_a_with_oauth_provider_idp1, Config) -> + set_attribute_in_entry_for_env_variable(rabbitmq_auth_backend_oauth2, resource_servers, + ?config(a, Config), oauth_provider_id, ?config(idp1, Config)), + Config; +init_per_group(with_mgt_resource_server_a_with_scopes_read_write, Config) -> + set_attribute_in_entry_for_env_variable(rabbitmq_management, oauth_resource_servers, + ?config(a, Config), scopes, ?config(read_write, Config)), + Config; +init_per_group(with_mgt_oauth_resource_server_a_with_oauth_provider_url_url1, Config) -> + set_attribute_in_entry_for_env_variable(rabbitmq_management, oauth_resource_servers, + ?config(a, Config), oauth_provider_url, ?config(url1, Config)), + Config; +init_per_group(with_mgt_resource_server_a_with_client_id_x, Config) -> + set_attribute_in_entry_for_env_variable(rabbitmq_management, oauth_resource_servers, + ?config(a, Config), oauth_client_id, ?config(x, Config)), + Config; +init_per_group(with_default_oauth_provider_idp1, Config) -> + application:set_env(rabbitmq_auth_backend_oauth2, default_oauth_provider, ?config(idp1, Config)), + Config; +init_per_group(with_default_oauth_provider_idp3, Config) -> + application:set_env(rabbitmq_auth_backend_oauth2, default_oauth_provider, ?config(idp3, Config)), + Config; +init_per_group(with_root_end_session_endpoint_0, Config) -> + application:set_env(rabbitmq_auth_backend_oauth2, end_session_endpoint, ?config(logout_url_0, Config)), + Config; +init_per_group(with_end_session_endpoint_for_idp1_1, Config) -> + set_attribute_in_entry_for_env_variable(rabbitmq_auth_backend_oauth2, oauth_providers, + ?config(idp1, Config), end_session_endpoint, ?config(logout_url_1, Config)), + Config; +init_per_group(with_end_session_endpoint_for_idp2_2, Config) -> + set_attribute_in_entry_for_env_variable(rabbitmq_auth_backend_oauth2, oauth_providers, + ?config(idp2, Config), end_session_endpoint, ?config(logout_url_2, Config)), + Config; + +init_per_group(with_oauth_provider_idp2_for_resource_server_a, Config) -> + set_attribute_in_entry_for_env_variable(rabbitmq_auth_backend_oauth2, resource_servers, + ?config(a, Config), oauth_provider_id, ?config(idp2, Config)), + Config; + +init_per_group(_, Config) -> + Config. + +end_per_group(with_oauth_providers_idp1_idp2, Config) -> + application:unset_env(rabbitmq_auth_backend_oauth2, oauth_providers), + Config; +end_per_group(with_mgt_oauth_client_secret_q, Config) -> + application:unset_env(rabbitmq_management, oauth_client_secret), + Config; +end_per_group(with_oauth_scopes_admin_mgt, Config) -> + application:unset_env(rabbitmq_management, oauth_scopes), + Config; +end_per_group(with_oauth_scopes_write_read, Config) -> + application:unset_env(rabbitmq_management, oauth_scopes), + Config; +end_per_group(with_oauth_disabled, Config) -> + application:unset_env(rabbitmq_management, oauth_enabled), + Config; +end_per_group(with_oauth_enabled, Config) -> + application:unset_env(rabbitmq_management, oauth_enabled), + Config; +end_per_group(with_oauth_disable_basic_auth_false, Config) -> + application:unset_env(rabbitmq_management, oauth_disable_basic_auth), + Config; +end_per_group(with_resource_server_id_rabbit, Config) -> + application:unset_env(rabbitmq_auth_backend_oauth2, resource_server_id), + Config; +end_per_group(with_mgt_oauth_provider_url_url0, Config) -> + application:unset_env(rabbitmq_management, oauth_provider_url), + Config; +end_per_group(with_root_issuer_url1, Config) -> + application:unset_env(rabbitmq_auth_backend_oauth2, issuer), + Config; +end_per_group(with_mgt_oauth_client_id_z, Config) -> + application:unset_env(rabbitmq_management, oauth_client_id), + Config; +end_per_group(with_oauth_initiated_logon_type_idp_initiated, Config) -> + application:unset_env(rabbitmq_management, oauth_initiated_logon_type), + Config; +end_per_group(with_oauth_initiated_logon_type_sp_initiated, Config) -> + application:unset_env(rabbitmq_management, oauth_initiated_logon_type), + Config; +end_per_group(with_mgt_resource_server_a_with_client_secret_w, Config) -> + remove_attribute_from_entry_from_env_variable(rabbitmq_management, oauth_resource_servers, + ?config(a, Config), oauth_client_secret), + Config; +end_per_group(with_resource_server_a, Config) -> + remove_entry_from_env_variable(rabbitmq_auth_backend_oauth2, resource_servers, + ?config(a, Config)), + Config; +end_per_group(with_resource_server_a_with_oauth_provider_idp1, Config) -> + remove_attribute_from_entry_from_env_variable(rabbitmq_auth_backend_oauth2, resource_servers, + ?config(a, Config), oauth_provider_id), + Config; +end_per_group(with_mgt_resource_server_a_with_scopes_read_write, Config) -> + remove_attribute_from_entry_from_env_variable(rabbitmq_management, oauth_resource_servers, + ?config(a, Config), scopes), + Config; +end_per_group(with_mgt_oauth_resource_server_a_with_oauth_provider_url_url1, Config) -> + remove_attribute_from_entry_from_env_variable(rabbitmq_management, oauth_resource_servers, + ?config(a, Config), oauth_provider_url), + Config; +end_per_group(with_mgt_resource_server_a_with_oauth_initiated_logon_type_sp_initiated, Config) -> + remove_attribute_from_entry_from_env_variable(rabbitmq_management, oauth_resource_servers, + ?config(a, Config), oauth_initiated_logon_type), + Config; +end_per_group(with_mgt_resource_server_a_with_oauth_initiated_logon_type_idp_initiated, Config) -> + remove_attribute_from_entry_from_env_variable(rabbitmq_management, oauth_resource_servers, + ?config(a, Config), oauth_initiated_logon_type), + Config; +end_per_group(with_mgt_resource_server_a_with_client_id_x, Config) -> + remove_attribute_from_entry_from_env_variable(rabbitmq_management, oauth_resource_servers, + ?config(a, Config), oauth_client_id), + Config; +end_per_group(with_default_oauth_provider_idp1, Config) -> + application:unset_env(rabbitmq_auth_backend_oauth2, default_oauth_provider), + Config; +end_per_group(with_default_oauth_provider_idp3, Config) -> + application:unset_env(rabbitmq_auth_backend_oauth2, default_oauth_provider), + Config; +end_per_group(with_root_end_session_endpoint_0, Config) -> + application:unset_env(rabbitmq_auth_backend_oauth2, end_session_endpoint), + Config; +end_per_group(with_end_session_endpoint_for_idp1_1, Config) -> + remove_attribute_from_entry_from_env_variable(rabbitmq_auth_backend_oauth2, oauth_providers, + ?config(idp1, Config), end_session_endpoint), + Config; +end_per_group(with_end_session_endpoint_for_idp2_2, Config) -> + remove_attribute_from_entry_from_env_variable(rabbitmq_auth_backend_oauth2, oauth_providers, + ?config(idp2, Config), end_session_endpoint), + Config; +end_per_group(with_oauth_provider_idp2_for_resource_server_a, Config) -> + remove_attribute_from_entry_from_env_variable(rabbitmq_auth_backend_oauth2, resource_servers, + ?config(a, Config), oauth_provider_id), + Config; + +end_per_group(_, Config) -> + Config. + + +%% ------------------------------------------------------------------- +%% Test cases. +%% ------------------------------------------------------------------- +should_not_return_oauth_client_secret(_Config) -> + Actual = rabbit_mgmt_wm_auth:authSettings(), + ?assertEqual(false, proplists:is_defined(oauth_client_secret, Actual)). +should_return_oauth_client_secret_q(Config) -> + Actual = rabbit_mgmt_wm_auth:authSettings(), + ?assertEqual(?config(q, Config), proplists:get_value(oauth_client_secret, Actual)). +should_return_oauth_resource_server_a_with_client_id_x(Config) -> + assertEqual_on_attribute_for_oauth_resource_server(rabbit_mgmt_wm_auth:authSettings(), + Config, a, oauth_client_id, x). +should_return_oauth_resource_server_a_with_client_secret_w(Config) -> + assertEqual_on_attribute_for_oauth_resource_server(rabbit_mgmt_wm_auth:authSettings(), + Config, a, oauth_client_secret, w). +should_not_return_oauth_resource_server_a_with_client_secret(Config) -> + assert_attribute_not_defined_for_oauth_resource_server(rabbit_mgmt_wm_auth:authSettings(), + Config, a, oauth_client_secret). + +should_return_mgt_oauth_provider_url_idp1_url(Config) -> + assertEqual_on_attribute_for_oauth_resource_server(rabbit_mgmt_wm_auth:authSettings(), + Config, rabbit, oauth_provider_url, idp1_url). + +should_return_mgt_oauth_provider_url_url1(Config) -> + assertEqual_on_attribute_for_oauth_resource_server(rabbit_mgmt_wm_auth:authSettings(), + Config, rabbit, oauth_provider_url, url1). + +should_return_mgt_oauth_provider_url_url0(Config) -> + assertEqual_on_attribute_for_oauth_resource_server(rabbit_mgmt_wm_auth:authSettings(), + Config, rabbit, oauth_provider_url, url0). + +should_return_oauth_scopes_admin_mgt(Config) -> + Actual = rabbit_mgmt_wm_auth:authSettings(), + ?assertEqual(?config(admin_mgt, Config), proplists:get_value(oauth_scopes, Actual)). + +should_return_mgt_oauth_resource_server_a_with_scopes_read_write(Config) -> + assertEqual_on_attribute_for_oauth_resource_server(rabbit_mgmt_wm_auth:authSettings(), + Config, a, scopes, read_write). + +should_return_disabled_auth_settings(_Config) -> + [{oauth_enabled, false}] = rabbit_mgmt_wm_auth:authSettings(). + +should_return_mgt_resource_server_a_oauth_provider_url_url0(Config) -> + assertEqual_on_attribute_for_oauth_resource_server(rabbit_mgmt_wm_auth:authSettings(), + Config, a, oauth_provider_url, url0). + +should_return_mgt_oauth_resource_server_a_with_client_id_x(Config) -> + assertEqual_on_attribute_for_oauth_resource_server(rabbit_mgmt_wm_auth:authSettings(), + Config, a, oauth_client_id, x). + +should_return_oauth_resource_server_a_with_oauth_provider_url_idp1_url(Config) -> + assertEqual_on_attribute_for_oauth_resource_server(rabbit_mgmt_wm_auth:authSettings(), + Config, a, oauth_provider_url, idp1_url). + +should_return_oauth_resource_server_a_with_oauth_provider_url_url1(Config) -> + assertEqual_on_attribute_for_oauth_resource_server(rabbit_mgmt_wm_auth:authSettings(), + Config, a, oauth_provider_url, url1). + +should_return_oauth_resource_server_a_with_oauth_provider_url_url0(Config) -> + assertEqual_on_attribute_for_oauth_resource_server(rabbit_mgmt_wm_auth:authSettings(), + Config, a, oauth_provider_url, url0). + +should_return_oauth_resource_server_rabbit_with_oauth_provider_url_idp1_url(Config) -> + assertEqual_on_attribute_for_oauth_resource_server(rabbit_mgmt_wm_auth:authSettings(), + Config, rabbit, oauth_provider_url, idp1_url). + +should_return_oauth_resource_server_rabbit_with_oauth_provider_url_url1(Config) -> + assertEqual_on_attribute_for_oauth_resource_server(rabbit_mgmt_wm_auth:authSettings(), + Config, rabbit, oauth_provider_url, url1). + +should_return_oauth_resource_server_rabbit_with_oauth_provider_url_url0(Config) -> + assertEqual_on_attribute_for_oauth_resource_server(rabbit_mgmt_wm_auth:authSettings(), + Config, rabbit, oauth_provider_url, url0). + +should_not_return_oauth_initiated_logon_type(_Config) -> + Actual = rabbit_mgmt_wm_auth:authSettings(), + ?assertEqual(false, proplists:is_defined(oauth_initiated_logon_type, Actual)). +should_return_oauth_initiated_logon_type_idp_initiated(_Config) -> + Actual = rabbit_mgmt_wm_auth:authSettings(), + ?assertEqual(<<"idp_initiated">>, proplists:get_value(oauth_initiated_logon_type, Actual)). + +should_not_return_oauth_resource_server_a(Config) -> + Actual = rabbit_mgmt_wm_auth:authSettings(), + assert_not_defined_oauth_resource_server(Actual, Config, a). + +should_not_return_oauth_resource_server_a_with_oauth_initiated_logon_type(Config) -> + assert_attribute_not_defined_for_oauth_resource_server(rabbit_mgmt_wm_auth:authSettings(), + Config, a, oauth_initiated_logon_type). + +should_return_oauth_resource_server_a_with_oauth_initiated_logon_type_idp_initiated(Config) -> + assertEqual_on_attribute_for_oauth_resource_server(rabbit_mgmt_wm_auth:authSettings(), + Config, a, oauth_initiated_logon_type, <<"idp_initiated">>). +should_return_oauth_resource_server_a_with_oauth_initiated_logon_type_sp_initiated(Config) -> + assertEqual_on_attribute_for_oauth_resource_server(rabbit_mgmt_wm_auth:authSettings(), + Config, a, oauth_initiated_logon_type, <<"sp_initiated">>). + +should_not_return_oauth_scopes(_Config) -> + Actual = rabbit_mgmt_wm_auth:authSettings(), + ?assertEqual(false, proplists:is_defined(scopes, Actual)). + +should_return_oauth_enabled(_Config) -> + Actual = rabbit_mgmt_wm_auth:authSettings(), + log(Actual), + ?assertEqual(true, proplists:get_value(oauth_enabled, Actual)). + +should_return_oauth_idp_initiated_logon(_Config) -> + Actual = rabbit_mgmt_wm_auth:authSettings(), + ?assertEqual(<<"idp_initiated">>, proplists:get_value(oauth_initiated_logon_type, Actual)). + +should_return_oauth_disable_basic_auth_true(_Config) -> + Actual = rabbit_mgmt_wm_auth:authSettings(), + ?assertEqual(true, proplists:get_value(oauth_disable_basic_auth, Actual)). + +should_return_oauth_disable_basic_auth_false(_Config) -> + Actual = rabbit_mgmt_wm_auth:authSettings(), + ?assertEqual(false, proplists:get_value(oauth_disable_basic_auth, Actual)). + +should_return_oauth_client_id_z(Config) -> + Actual = rabbit_mgmt_wm_auth:authSettings(), + ?assertEqual(?config(z, Config), proplists:get_value(oauth_client_id, Actual)). + +should_not_return_end_session_endpoint(Config) -> + assert_attribute_not_defined_for_oauth_resource_server(rabbit_mgmt_wm_auth:authSettings(), + Config, rabbit, end_session_endpoint). + +should_return_end_session_endpoint_0(Config) -> + assertEqual_on_attribute_for_oauth_resource_server(rabbit_mgmt_wm_auth:authSettings(), + Config, rabbit, end_session_endpoint, ?config(logout_url_0, Config)). + +should_return_end_session_endpoint_1(Config) -> + assertEqual_on_attribute_for_oauth_resource_server(rabbit_mgmt_wm_auth:authSettings(), + Config, rabbit, end_session_endpoint, ?config(logout_url_1, Config)). + +should_return_oauth_resource_server_a_without_end_session_endpoint(Config) -> + assert_attribute_not_defined_for_oauth_resource_server(rabbit_mgmt_wm_auth:authSettings(), + Config, a, end_session_endpoint). + +should_return_oauth_resource_server_a_with_end_session_endpoint_0(Config) -> + assertEqual_on_attribute_for_oauth_resource_server(rabbit_mgmt_wm_auth:authSettings(), + Config, a, end_session_endpoint, ?config(logout_url_0, Config)). + +should_return_oauth_resource_server_a_with_end_session_endpoint_1(Config) -> + assertEqual_on_attribute_for_oauth_resource_server(rabbit_mgmt_wm_auth:authSettings(), + Config, a, end_session_endpoint, ?config(logout_url_1, Config)). + +should_return_oauth_resource_server_a_with_end_session_endpoint_2(Config) -> + assertEqual_on_attribute_for_oauth_resource_server(rabbit_mgmt_wm_auth:authSettings(), + Config, a, end_session_endpoint, ?config(logout_url_2, Config)). + +%% ------------------------------------------------------------------- +%% Utility/helper functions +%% ------------------------------------------------------------------- + +delete_key_with_empty_proplist(Key, Map) -> + case maps:get(Key, Map) of + [] -> maps:remove(Key, Map); + _ -> Map + end. +remove_entry_from_env_variable(Application, EnvVar, Key) -> + Map = application:get_env(Application, EnvVar, #{}), + NewMap = maps:remove(Key, Map), + case maps:size(NewMap) of + 0 -> application:unset_env(Application, EnvVar); + _ -> application:set_env(Application, EnvVar, NewMap) + end. +remove_attribute_from_entry_from_env_variable(Application, EnvVar, Key, Attribute) -> + Map = application:get_env(Application, EnvVar, #{}), + Proplist = proplists:delete(Attribute, maps:get(Key, Map, [])), + NewMap = delete_key_with_empty_proplist(Key, maps:put(Key, Proplist, Map)), + case maps:size(NewMap) of + 0 -> application:unset_env(Application, EnvVar); + _ -> application:set_env(Application, EnvVar, NewMap) + end. + +assertEqual_on_attribute_for_oauth_resource_server(Actual, Config, ConfigKey, Attribute, ConfigValue) -> + log(Actual), + OAuthResourceServers = proplists:get_value(oauth_resource_servers, Actual), + OauthResource = maps:get(?config(ConfigKey, Config), OAuthResourceServers), + Value = case ConfigValue of + Binary when is_binary(Binary) -> Binary; + _ -> ?config(ConfigValue, Config) + end, + ?assertEqual(Value, proplists:get_value(Attribute, OauthResource)). + +assert_attribute_not_defined_for_oauth_resource_server(Actual, Config, ConfigKey, Attribute) -> + log(Actual), + OAuthResourceServers = proplists:get_value(oauth_resource_servers, Actual), + OauthResource = maps:get(?config(ConfigKey, Config), OAuthResourceServers), + ?assertEqual(false, proplists:is_defined(Attribute, OauthResource)). + +assert_not_defined_oauth_resource_server(Actual, Config, ConfigKey) -> + log(Actual), + OAuthResourceServers = proplists:get_value(oauth_resource_servers, Actual), + ?assertEqual(false, maps:is_key(?config(ConfigKey, Config), OAuthResourceServers)). + +set_attribute_in_entry_for_env_variable(Application, EnvVar, Key, Attribute, Value) -> + Map = application:get_env(Application, EnvVar, #{}), + ct:log("set_attribute_in_entry_for_env_variable before ~p", [Map]), + Map1 = maps:put(Key, [ { Attribute, Value} | maps:get(Key, Map, []) ], Map), + ct:log("set_attribute_in_entry_for_env_variable after ~p", [Map1]), + application:set_env(Application, EnvVar, Map1). + +log(AuthSettings) -> + logEnvVars(), + ct:log("authSettings: ~p ", [AuthSettings]). +logEnvVars() -> + ct:log("rabbitmq_management: ~p ", [application:get_all_env(rabbitmq_management)]), + ct:log("rabbitmq_auth_backend_oauth2: ~p ", [application:get_all_env(rabbitmq_auth_backend_oauth2)]). diff --git a/deps/rabbitmq_management/test/stats_SUITE.erl b/deps/rabbitmq_management/test/stats_SUITE.erl index 703f49e6e5a5..01e84b3acdce 100644 --- a/deps/rabbitmq_management/test/stats_SUITE.erl +++ b/deps/rabbitmq_management/test/stats_SUITE.erl @@ -2,12 +2,13 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2016-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(stats_SUITE). -include_lib("proper/include/proper.hrl"). +-include_lib("eunit/include/eunit.hrl"). -include_lib("rabbitmq_management_agent/include/rabbit_mgmt_records.hrl"). -compile(export_all). @@ -175,4 +176,4 @@ format_range_constant(_Config) -> SamplesFun), 5 = proplists:get_value(publish, Got), PD = proplists:get_value(publish_details, Got), - 0.0 = proplists:get_value(rate, PD). + ?assertEqual(0.0, proplists:get_value(rate, PD)). diff --git a/deps/rabbitmq_management_agent/.gitignore b/deps/rabbitmq_management_agent/.gitignore deleted file mode 100644 index 7b45202588ad..000000000000 --- a/deps/rabbitmq_management_agent/.gitignore +++ /dev/null @@ -1,18 +0,0 @@ -.sw? -.*.sw? -*.beam -*.plt -/.erlang.mk/ -/cover/ -/deps/ -/doc/ -/ebin/ -/escript/ -/escript.lock -/logs/ -/plugins/ -/plugins.lock -/sbin/ -/sbin.lock - -rabbitmq_management_agent.d diff --git a/deps/rabbitmq_management_agent/BUILD.bazel b/deps/rabbitmq_management_agent/BUILD.bazel index 32711733d9e6..5bdbd9fe7b3f 100644 --- a/deps/rabbitmq_management_agent/BUILD.bazel +++ b/deps/rabbitmq_management_agent/BUILD.bazel @@ -43,7 +43,6 @@ all_srcs(name = "all_srcs") test_suite_beam_files(name = "test_suite_beam_files") # gazelle:erlang_app_extra_app xmerl -# gazelle:erlang_app_extra_app mnesia # gazelle:erlang_app_extra_app ssl # gazelle:erlang_app_extra_app crypto # gazelle:erlang_app_extra_app public_key @@ -62,7 +61,6 @@ rabbitmq_app( beam_files = [":beam_files"], extra_apps = [ "crypto", - "mnesia", "public_key", "ssl", "xmerl", @@ -89,7 +87,7 @@ plt( name = "deps_plt", for_target = ":erlang_app", ignore_warnings = True, - libs = ["//deps/rabbitmq_cli:elixir"], # keep + libs = ["@rules_elixir//elixir"], # keep plt = "//:base_plt", deps = ["//deps/rabbitmq_cli:erlang_app"], # keep ) diff --git a/deps/rabbitmq_management_agent/Makefile b/deps/rabbitmq_management_agent/Makefile index 545880b17469..13531dd7da93 100644 --- a/deps/rabbitmq_management_agent/Makefile +++ b/deps/rabbitmq_management_agent/Makefile @@ -19,7 +19,10 @@ endef DEPS = rabbit_common rabbit rabbitmq_web_dispatch TEST_DEPS = rabbitmq_ct_helpers rabbitmq_ct_client_helpers -LOCAL_DEPS += xmerl mnesia ranch ssl crypto public_key +LOCAL_DEPS += xmerl ranch ssl crypto public_key + +PLT_APPS += rabbitmqctl + DEP_EARLY_PLUGINS = rabbit_common/mk/rabbitmq-early-plugin.mk DEP_PLUGINS = rabbit_common/mk/rabbitmq-plugin.mk diff --git a/deps/rabbitmq_management_agent/include/rabbit_mgmt_agent.hrl b/deps/rabbitmq_management_agent/include/rabbit_mgmt_agent.hrl index 77222e04374d..1458207aee61 100644 --- a/deps/rabbitmq_management_agent/include/rabbit_mgmt_agent.hrl +++ b/deps/rabbitmq_management_agent/include/rabbit_mgmt_agent.hrl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -define(MANAGEMENT_PG_SCOPE, rabbitmq_management). diff --git a/deps/rabbitmq_management_agent/include/rabbit_mgmt_metrics.hrl b/deps/rabbitmq_management_agent/include/rabbit_mgmt_metrics.hrl index 92d20fdb6b8e..4e4bf858551c 100644 --- a/deps/rabbitmq_management_agent/include/rabbit_mgmt_metrics.hrl +++ b/deps/rabbitmq_management_agent/include/rabbit_mgmt_metrics.hrl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -type(event_type() :: queue_stats | queue_exchange_stats | vhost_stats @@ -35,6 +35,7 @@ {exchange_stats_publish_in, set}, {consumer_stats, set}, {queue_stats, set}, + {queue_basic_stats, set}, {queue_msg_stats, set}, {vhost_msg_stats, set}, {queue_process_stats, set}, diff --git a/deps/rabbitmq_management_agent/include/rabbit_mgmt_records.hrl b/deps/rabbitmq_management_agent/include/rabbit_mgmt_records.hrl index b89910151fc5..3bcfc2db2b50 100644 --- a/deps/rabbitmq_management_agent/include/rabbit_mgmt_records.hrl +++ b/deps/rabbitmq_management_agent/include/rabbit_mgmt_records.hrl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -include_lib("rabbitmq_web_dispatch/include/rabbitmq_web_dispatch_records.hrl"). diff --git a/deps/rabbitmq_management_agent/src/Elixir.RabbitMQ.CLI.Ctl.Commands.ResetStatsDbCommand.erl b/deps/rabbitmq_management_agent/src/Elixir.RabbitMQ.CLI.Ctl.Commands.ResetStatsDbCommand.erl index 9896542ed9fa..8ccabba375cf 100644 --- a/deps/rabbitmq_management_agent/src/Elixir.RabbitMQ.CLI.Ctl.Commands.ResetStatsDbCommand.erl +++ b/deps/rabbitmq_management_agent/src/Elixir.RabbitMQ.CLI.Ctl.Commands.ResetStatsDbCommand.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module('Elixir.RabbitMQ.CLI.Ctl.Commands.ResetStatsDbCommand'). diff --git a/deps/rabbitmq_management_agent/src/exometer_slide.erl b/deps/rabbitmq_management_agent/src/exometer_slide.erl index b4c6d3e8a19e..3f77c83d84a3 100644 --- a/deps/rabbitmq_management_agent/src/exometer_slide.erl +++ b/deps/rabbitmq_management_agent/src/exometer_slide.erl @@ -44,7 +44,7 @@ %% @end %% %% -%% All modifications are (C) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% All modifications are (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% The Initial Developer of the Original Code is Basho Technologies, Inc. -module(exometer_slide). diff --git a/deps/rabbitmq_management_agent/src/rabbit_mgmt_agent_app.erl b/deps/rabbitmq_management_agent/src/rabbit_mgmt_agent_app.erl index 8de60f9a0dfc..eb84b06e7a59 100644 --- a/deps/rabbitmq_management_agent/src/rabbit_mgmt_agent_app.erl +++ b/deps/rabbitmq_management_agent/src/rabbit_mgmt_agent_app.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_mgmt_agent_app). diff --git a/deps/rabbitmq_management_agent/src/rabbit_mgmt_agent_config.erl b/deps/rabbitmq_management_agent/src/rabbit_mgmt_agent_config.erl index b86eb4236f56..08f0b7f928f0 100644 --- a/deps/rabbitmq_management_agent/src/rabbit_mgmt_agent_config.erl +++ b/deps/rabbitmq_management_agent/src/rabbit_mgmt_agent_config.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_mgmt_agent_config). diff --git a/deps/rabbitmq_management_agent/src/rabbit_mgmt_agent_sup.erl b/deps/rabbitmq_management_agent/src/rabbit_mgmt_agent_sup.erl index 71b72e73be76..04504a1e847c 100644 --- a/deps/rabbitmq_management_agent/src/rabbit_mgmt_agent_sup.erl +++ b/deps/rabbitmq_management_agent/src/rabbit_mgmt_agent_sup.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_mgmt_agent_sup). diff --git a/deps/rabbitmq_management_agent/src/rabbit_mgmt_agent_sup_sup.erl b/deps/rabbitmq_management_agent/src/rabbit_mgmt_agent_sup_sup.erl index 5a167d724716..a446c0710b3f 100644 --- a/deps/rabbitmq_management_agent/src/rabbit_mgmt_agent_sup_sup.erl +++ b/deps/rabbitmq_management_agent/src/rabbit_mgmt_agent_sup_sup.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_mgmt_agent_sup_sup). diff --git a/deps/rabbitmq_management_agent/src/rabbit_mgmt_data.erl b/deps/rabbitmq_management_agent/src/rabbit_mgmt_data.erl index 9cf1d0f78f6f..53b56104a0f8 100644 --- a/deps/rabbitmq_management_agent/src/rabbit_mgmt_data.erl +++ b/deps/rabbitmq_management_agent/src/rabbit_mgmt_data.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2016-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_mgmt_data). @@ -10,14 +10,13 @@ -include("rabbit_mgmt_records.hrl"). -include("rabbit_mgmt_metrics.hrl"). -include_lib("rabbit_common/include/rabbit.hrl"). --include_lib("rabbit_common/include/rabbit_core_metrics.hrl"). - -export([empty/2, pick_range/2]). % delegate api -export([overview_data/4, consumer_data/2, all_list_queue_data/3, + all_list_basic_queue_data/3, all_detail_queue_data/3, all_exchange_data/3, all_connection_data/3, @@ -66,6 +65,12 @@ all_list_queue_data(_Pid, Ids, Ranges) -> maps:put(Id, Data, Acc) end, #{}, Ids). +all_list_basic_queue_data(_Pid, Ids, Ranges) -> + lists:foldl(fun (Id, Acc) -> + Data = list_basic_queue_data(Ranges, Id), + maps:put(Id, Data, Acc) + end, #{}, Ids). + all_detail_channel_data(_Pid, Ids, Ranges) -> lists:foldl(fun (Id, Acc) -> Data = detail_channel_data(Ranges, Id), @@ -204,6 +209,11 @@ list_queue_data(Ranges, Id) -> queue_raw_deliver_stats_data(Ranges, Id) ++ [{queue_stats, lookup_element(queue_stats, Id)}]). +list_basic_queue_data(Ranges, Id) -> + maps:from_list(queue_raw_message_data(Ranges, Id) ++ + queue_raw_deliver_stats_data(Ranges, Id) ++ + [{queue_stats, lookup_element(queue_basic_stats, Id)}]). + detail_channel_data(Ranges, Id) -> maps:from_list(channel_raw_message_data(Ranges, Id) ++ channel_raw_detail_stats_data(Ranges, Id) ++ @@ -367,12 +377,11 @@ match_consumer_spec(Id) -> match_queue_consumer_spec(Id) -> [{{{'$1', '_', '_'}, '_'}, [{'==', {Id}, '$1'}], ['$_']}]. -lookup_element(Table, Key) -> lookup_element(Table, Key, 2). +lookup_element(Table, Key) -> + lookup_element(Table, Key, 2). lookup_element(Table, Key, Pos) -> - try ets:lookup_element(Table, Key, Pos) - catch error:badarg -> [] - end. + ets:lookup_element(Table, Key, Pos, []). -spec lookup_smaller_sample(atom(), any()) -> maybe_slide(). lookup_smaller_sample(Table, Id) -> diff --git a/deps/rabbitmq_management_agent/src/rabbit_mgmt_data_compat.erl b/deps/rabbitmq_management_agent/src/rabbit_mgmt_data_compat.erl index 1de6cdb1d044..0295fa930018 100644 --- a/deps/rabbitmq_management_agent/src/rabbit_mgmt_data_compat.erl +++ b/deps/rabbitmq_management_agent/src/rabbit_mgmt_data_compat.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2018-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_mgmt_data_compat). diff --git a/deps/rabbitmq_management_agent/src/rabbit_mgmt_db_handler.erl b/deps/rabbitmq_management_agent/src/rabbit_mgmt_db_handler.erl index 7fe9072bdb46..09183e1875e7 100644 --- a/deps/rabbitmq_management_agent/src/rabbit_mgmt_db_handler.erl +++ b/deps/rabbitmq_management_agent/src/rabbit_mgmt_db_handler.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_mgmt_db_handler). @@ -92,6 +92,10 @@ handle_event(_, State) -> handle_info(_Info, State) -> {ok, State}. +terminate(stop, _State) -> + %% if the node is stopping, we don't want to wait + %% 5 seconds for the statistics to get disabled + ok; terminate(_Arg, _State) -> ensure_statistics_disabled(), ok. diff --git a/deps/rabbitmq_management_agent/src/rabbit_mgmt_external_stats.erl b/deps/rabbitmq_management_agent/src/rabbit_mgmt_external_stats.erl index 4b969553fdc9..7a78f8bb30f2 100644 --- a/deps/rabbitmq_management_agent/src/rabbit_mgmt_external_stats.erl +++ b/deps/rabbitmq_management_agent/src/rabbit_mgmt_external_stats.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_mgmt_external_stats). @@ -15,8 +15,6 @@ -export([list_registry_plugins/1]). --include_lib("rabbit_common/include/rabbit.hrl"). - -define(METRICS_KEYS, [fd_used, sockets_used, mem_used, disk_free, proc_used, gc_num, gc_bytes_reclaimed, context_switches]). @@ -197,10 +195,11 @@ i(fd_used, State) -> get_used_fd(State); i(fd_total, #state{fd_total = FdTotal}=State) -> {State, FdTotal}; +%% sockets_used and sockets_total are unused since RabbitMQ 4.0. i(sockets_used, State) -> - {State, proplists:get_value(sockets_used, file_handle_cache:info([sockets_used]))}; + {State, 0}; i(sockets_total, State) -> - {State, proplists:get_value(sockets_limit, file_handle_cache:info([sockets_limit]))}; + {State, 0}; i(os_pid, State) -> {State, rabbit_data_coercion:to_utf8_binary(os:getpid())}; i(mem_used, State) -> @@ -245,7 +244,7 @@ i(net_ticktime, State) -> i(persister_stats, State) -> {State, persister_stats(State)}; i(enabled_plugins, State) -> - {ok, Dir} = application:get_env(rabbit, enabled_plugins_file), + Dir = rabbit_plugins:enabled_plugins_file(), {State, rabbit_plugins:read_enabled(Dir)}; i(auth_mechanisms, State) -> {ok, Mechanisms} = application:get_env(rabbit, auth_mechanisms), @@ -415,10 +414,23 @@ update_state(State0) -> FHC = get_fhc_stats(), State0#state{fhc_stats = FHC}. +%% @todo All these stats are zeroes. Remove eventually. get_fhc_stats() -> dict:to_list(dict:merge(fun(_, V1, V2) -> V1 + V2 end, - dict:from_list(file_handle_cache_stats:get()), + dict:from_list(zero_fhc_stats()), dict:from_list(get_ra_io_metrics()))). +zero_fhc_stats() -> + [{{Op, Counter}, 0} || Op <- [io_read, io_write], + Counter <- [count, bytes, time]] + ++ + [{{Op, Counter}, 0} || Op <- [io_sync, io_seek], + Counter <- [count, time]] + ++ + [{{Op, Counter}, 0} || Op <- [io_reopen, mnesia_ram_tx, mnesia_disk_tx, + msg_store_read, msg_store_write, + queue_index_write, queue_index_read], + Counter <- [count]]. + get_ra_io_metrics() -> lists:sort(ets:tab2list(ra_io_metrics)). diff --git a/deps/rabbitmq_management_agent/src/rabbit_mgmt_ff.erl b/deps/rabbitmq_management_agent/src/rabbit_mgmt_ff.erl index 9be869bcbdd3..5022adc020b3 100644 --- a/deps/rabbitmq_management_agent/src/rabbit_mgmt_ff.erl +++ b/deps/rabbitmq_management_agent/src/rabbit_mgmt_ff.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2018-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_mgmt_ff). diff --git a/deps/rabbitmq_management_agent/src/rabbit_mgmt_format.erl b/deps/rabbitmq_management_agent/src/rabbit_mgmt_format.erl index 5a333801ea24..2f748796f627 100644 --- a/deps/rabbitmq_management_agent/src/rabbit_mgmt_format.erl +++ b/deps/rabbitmq_management_agent/src/rabbit_mgmt_format.erl @@ -2,14 +2,14 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_mgmt_format). -export([format/2, ip/1, ipb/1, amqp_table/1, tuple/1]). -export([parameter/1, now_to_str/0, now_to_str/1, strip_pids/1]). --export([protocol/1, resource/1, queue/1, queue_state/1, queue_info/1]). +-export([protocol/1, resource/1, queue/1, queue/2, queue_state/1, queue_info/1]). -export([exchange/1, user/1, internal_user/1, binding/1, url/2]). -export([pack_binding_props/2, tokenise/1]). -export([to_amqp_table/1, listener/1, web_context/1, properties/1, basic_properties/1]). @@ -18,7 +18,8 @@ -export([format_nulls/1, escape_html_tags/1]). -export([print/2, print/1]). --export([format_queue_stats/1, format_channel_stats/1, +-export([format_queue_stats/1, format_queue_basic_stats/1, + format_channel_stats/1, format_consumer_arguments/1, format_connection_created/1, format_accept_content/1, format_args/1]). @@ -52,20 +53,15 @@ format_queue_stats({exclusive_consumer_pid, _}) -> []; format_queue_stats({single_active_consumer_pid, _}) -> []; -format_queue_stats({slave_pids, ''}) -> - []; -format_queue_stats({slave_pids, Pids}) -> - [{slave_nodes, [node(Pid) || Pid <- Pids]}]; format_queue_stats({leader, Leader}) -> [{node, Leader}]; -format_queue_stats({synchronised_slave_pids, ''}) -> - []; format_queue_stats({effective_policy_definition, []}) -> [{effective_policy_definition, #{}}]; -format_queue_stats({synchronised_slave_pids, Pids}) -> - [{synchronised_slave_nodes, [node(Pid) || Pid <- Pids]}]; format_queue_stats({backing_queue_status, Value}) -> - [{backing_queue_status, properties(Value)}]; + case proplists:get_value(version, Value, undefined) of + undefined -> []; + Version -> [{storage_version, Version}] + end; format_queue_stats({idle_since, Value}) -> [{idle_since, now_to_str(Value)}]; format_queue_stats({state, Value}) -> @@ -74,9 +70,55 @@ format_queue_stats({disk_reads, _}) -> []; format_queue_stats({disk_writes, _}) -> []; +format_queue_stats({members, Value}) -> + [{members, lists:sort(Value)}]; +format_queue_stats({online, Value}) -> + [{online, lists:sort(Value)}]; +format_queue_stats({open_files, Value}) -> + [{open_files, lists:sort(Value)}]; format_queue_stats(Stat) -> [Stat]. +format_queue_basic_stats({_, ''}) -> + []; +format_queue_basic_stats({reductions, _}) -> + []; +format_queue_basic_stats({exclusive_consumer_pid, _}) -> + []; +format_queue_basic_stats({single_active_consumer_pid, _}) -> + []; +format_queue_basic_stats({slave_pids, Pids}) -> + [{slave_nodes, [node(Pid) || Pid <- Pids]}]; +format_queue_basic_stats({leader, Leader}) -> + [{node, Leader}]; +format_queue_basic_stats({effective_policy_definition, []}) -> + [{effective_policy_definition, #{}}]; +format_queue_basic_stats({synchronised_slave_pids, Pids}) -> + [{synchronised_slave_nodes, [node(Pid) || Pid <- Pids]}]; +format_queue_basic_stats({backing_queue_status, Value}) -> + case proplists:get_value(version, Value, undefined) of + undefined -> []; + Version -> [{storage_version, Version}] + end; +format_queue_basic_stats({garbage_collection, _}) -> + []; +format_queue_basic_stats({idle_since, _Value}) -> + []; +format_queue_basic_stats({state, Value}) -> + queue_state(Value); +format_queue_basic_stats({disk_reads, _}) -> + []; +format_queue_basic_stats({disk_writes, _}) -> + []; +format_queue_basic_stats({members, Value}) -> + [{members, lists:sort(Value)}]; +format_queue_basic_stats({online, Value}) -> + [{online, lists:sort(Value)}]; +format_queue_basic_stats({open_files, Value}) -> + [{open_files, lists:sort(Value)}]; +format_queue_basic_stats(Stat) -> + [Stat]. + format_channel_stats([{idle_since, Value} | Rest]) -> [{idle_since, now_to_str(Value)} | Rest]; format_channel_stats(Stats) -> @@ -176,6 +218,8 @@ protocol(unknown) -> unknown; protocol(Version = {_Major, _Minor, _Revision}) -> protocol({'AMQP', Version}); +protocol(Version = {1, 0}) -> + protocol({'AMQP', Version}); protocol({Family, Version}) -> print("~ts ~ts", [Family, protocol_version(Version)]); protocol(Protocol) when is_binary(Protocol) -> @@ -219,9 +263,11 @@ internal_user(User) -> {tags, tags_as_binaries(internal_user:get_tags(User))}, {limits, internal_user:get_limits(User)}]. -user(User) -> +user(User) -> [{name, User#user.username}, - {tags, tags_as_binaries(User#user.tags)}]. + {tags, tags_as_binaries(User#user.tags)}, + {is_internal_user, lists:any(fun({Module,_}) -> Module == rabbit_auth_backend_internal end, + User#user.authz_backends)}]. tags_as_binaries(Tags) -> [to_binary(T) || T <- Tags]. @@ -329,34 +375,26 @@ exchange(X) -> %% We get queues using rabbit_amqqueue:list/1 rather than :info_all/1 since %% the latter wakes up each queue. Therefore we have a record rather than a %% proplist to deal with. -queue(Q) when ?is_amqqueue(Q) -> - Name = amqqueue:get_name(Q), +queue(Q) -> + queue(Q, #{}). + +queue(Q, Ctx) when ?is_amqqueue(Q) -> + #resource{name = Name, virtual_host = VHost} = amqqueue:get_name(Q), Durable = amqqueue:is_durable(Q), AutoDelete = amqqueue:is_auto_delete(Q), ExclusiveOwner = amqqueue:get_exclusive_owner(Q), Arguments = amqqueue:get_arguments(Q), Pid = amqqueue:get_pid(Q), - State = amqqueue:get_state(Q), - %% TODO: in the future queue types should be registered with their - %% full and short names and this hard-coded translation should not be - %% necessary - Type = case amqqueue:get_type(Q) of - rabbit_classic_queue -> classic; - rabbit_quorum_queue -> quorum; - rabbit_stream_queue -> stream; - T -> T - end, - format( - [{name, Name}, - {durable, Durable}, - {auto_delete, AutoDelete}, - {exclusive, is_pid(ExclusiveOwner)}, - {owner_pid, ExclusiveOwner}, - {arguments, Arguments}, - {pid, Pid}, - {type, Type}, - {state, State}] ++ rabbit_amqqueue:format(Q), - {fun format_exchange_and_queue/1, false}). + [{name, Name}, + {vhost, VHost}, + {durable, Durable}, + {auto_delete, AutoDelete}, + {exclusive, is_pid(ExclusiveOwner)}, + {owner_pid, ExclusiveOwner}, + {arguments, amqp_table(Arguments)}, + {pid, Pid} + %% type specific stuff like, state, type, members etc is returned here + | rabbit_queue_type:format(Q, Ctx)]. queue_info(List) -> format(List, {fun format_exchange_and_queue/1, false}). @@ -469,14 +507,6 @@ strip_pids([{channel_pid, _} | T], Acc) -> strip_pids(T, Acc); strip_pids([{exclusive_consumer_pid, _} | T], Acc) -> strip_pids(T, Acc); -strip_pids([{slave_pids, ''} | T], Acc) -> - strip_pids(T, Acc); -strip_pids([{slave_pids, Pids} | T], Acc) -> - strip_pids(T, [{slave_nodes, [node(Pid) || Pid <- Pids]} | Acc]); -strip_pids([{synchronised_slave_pids, ''} | T], Acc) -> - strip_pids(T, Acc); -strip_pids([{synchronised_slave_pids, Pids} | T], Acc) -> - strip_pids(T, [{synchronised_slave_nodes, [node(Pid) || Pid <- Pids]} | Acc]); strip_pids([{K, [P|_] = Nested} | T], Acc) when is_tuple(P) -> % recurse strip_pids(T, [{K, strip_pids(Nested)} | Acc]); strip_pids([{K, [L|_] = Nested} | T], Acc) when is_list(L) -> % recurse diff --git a/deps/rabbitmq_management_agent/src/rabbit_mgmt_gc.erl b/deps/rabbitmq_management_agent/src/rabbit_mgmt_gc.erl index b03aab448ef7..55ebf40b099a 100644 --- a/deps/rabbitmq_management_agent/src/rabbit_mgmt_gc.erl +++ b/deps/rabbitmq_management_agent/src/rabbit_mgmt_gc.erl @@ -2,12 +2,10 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_mgmt_gc). --include_lib("rabbit_common/include/rabbit.hrl"). - -record(state, {timer, interval }). @@ -80,6 +78,7 @@ gc_queues() -> LocalGbSet = gb_sets:from_list(LocalQueues), gc_entity(queue_stats_publish, GbSet), gc_entity(queue_stats, LocalGbSet), + gc_entity(queue_basic_stats, LocalGbSet), gc_entity(queue_msg_stats, LocalGbSet), gc_entity(queue_process_stats, LocalGbSet), gc_entity(queue_msg_rates, LocalGbSet), diff --git a/deps/rabbitmq_management_agent/src/rabbit_mgmt_metrics_collector.erl b/deps/rabbitmq_management_agent/src/rabbit_mgmt_metrics_collector.erl index 4e2d6d2496a5..776da4a41f40 100644 --- a/deps/rabbitmq_management_agent/src/rabbit_mgmt_metrics_collector.erl +++ b/deps/rabbitmq_management_agent/src/rabbit_mgmt_metrics_collector.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_mgmt_metrics_collector). @@ -15,7 +15,7 @@ -spec start_link(atom()) -> rabbit_types:ok_pid_or_error(). --export([name/1]). +-export([name/1, all_names/0]). -export([start_link/1]). -export([override_lookups/2, reset_lookups/1]). -export([init/1, handle_call/3, handle_cast/2, handle_info/2, terminate/2, @@ -49,6 +49,9 @@ reset(Table) -> name(Table) -> list_to_atom((atom_to_list(Table) ++ "_metrics_collector")). +all_names() -> + [name(Table) || {Table, _} <- ?CORE_TABLES]. + start_link(Table) -> gen_server:start_link({local, name(Table)}, ?MODULE, [Table], []). @@ -82,6 +85,13 @@ handle_call({override_lookups, Lookups}, _From, State) -> lookup_exchange = pget(exchange, Lookups)}}; handle_call({submit, Fun}, _From, State) -> {reply, Fun(), State}; +handle_call(wait, _From, State) -> + {reply, ok, State}; +handle_call(force_collect, _From, State0) -> + Timestamp = exometer_slide:timestamp(), + State = aggregate_metrics(Timestamp, State0), + %% used for testing + {reply, ok, State}; handle_call(_Request, _From, State) -> {noreply, State}. @@ -150,6 +160,7 @@ handle_deleted_queues(queue_coarse_metrics, Remainders, QNegStats, Size, Interval, false) || {Size, Interval} <- BPolicies], ets:delete(queue_stats, Queue), + ets:delete(queue_basic_stats, Queue), ets:delete(queue_process_stats, Queue) end, maps:to_list(Remainders)); handle_deleted_queues(_T, _R, _P) -> ok. @@ -441,13 +452,17 @@ aggregate_entry({Id, Metrics, 0}, NextStats, Ops0, GPolicies), Ops2 = case QueueFun(Id) of true -> - O = insert_entry_ops(queue_msg_rates, Id, false, Stats, Ops1, - BPolicies), + O_1 = insert_entry_ops(queue_msg_rates, Id, false, Stats, Ops1, + BPolicies), Fmt = rabbit_mgmt_format:format( Metrics, {fun rabbit_mgmt_format:format_queue_stats/1, false}), - insert_op(queue_stats, Id, ?queue_stats(Id, Fmt), O); - false -> + FmtBasic = rabbit_mgmt_format:format( + Metrics, + {fun rabbit_mgmt_format:format_queue_basic_stats/1, false}), + O_2 = insert_op(queue_basic_stats, Id, ?queue_stats(Id, FmtBasic), O_1), + insert_op(queue_stats, Id, ?queue_stats(Id, Fmt), O_2); + _ -> Ops1 end, {insert_old_aggr_stats(NextStats, Id, Stats), Ops2, State}; @@ -649,6 +664,8 @@ vhost(#resource{virtual_host = VHost}) -> VHost; vhost({queue_stats, #resource{virtual_host = VHost}}) -> VHost; +vhost({queue_basic_stats, #resource{virtual_host = VHost}}) -> + VHost; vhost({TName, Pid}) -> pget(vhost, lookup_element(TName, Pid, 2)). diff --git a/deps/rabbitmq_management_agent/src/rabbit_mgmt_metrics_gc.erl b/deps/rabbitmq_management_agent/src/rabbit_mgmt_metrics_gc.erl index 12365e8d07a7..7e48e6672422 100644 --- a/deps/rabbitmq_management_agent/src/rabbit_mgmt_metrics_gc.erl +++ b/deps/rabbitmq_management_agent/src/rabbit_mgmt_metrics_gc.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_mgmt_metrics_gc). @@ -19,6 +19,8 @@ -export([init/1, handle_call/3, handle_cast/2, handle_info/2, terminate/2, code_change/3]). +-define(LARGE_CONSUMER_COUNT, 1000). + name(EventType) -> list_to_atom((atom_to_list(EventType) ++ "_metrics_gc")). @@ -42,7 +44,8 @@ handle_cast({event, #event{type = connection_closed, props = Props}}, handle_cast({event, #event{type = channel_closed, props = Props}}, State = #state{basic_i = BIntervals}) -> Pid = pget(pid, Props), - remove_channel(Pid, BIntervals), + ConsumerCount = pget(consumer_count, Props), + remove_channel(Pid, ConsumerCount, BIntervals), {noreply, State}; handle_cast({event, #event{type = consumer_deleted, props = Props}}, State) -> remove_consumer(Props), @@ -82,13 +85,13 @@ remove_connection(Id, BIntervals) -> delete_samples(connection_stats_coarse_conn_stats, Id, BIntervals), ok. -remove_channel(Id, BIntervals) -> +remove_channel(Id, ConsumerCount, BIntervals) -> ets:delete(channel_created_stats, Id), ets:delete(channel_stats, Id), delete_samples(channel_process_stats, Id, BIntervals), delete_samples(channel_stats_fine_stats, Id, BIntervals), delete_samples(channel_stats_deliver_stats, Id, BIntervals), - index_delete(consumer_stats, channel, Id), + index_delete(consumer_stats, {channel, ConsumerCount}, Id), index_delete(channel_exchange_stats_fine_stats, channel, Id), index_delete(channel_queue_stats_deliver_stats, channel, Id), ok. @@ -108,6 +111,7 @@ remove_exchange(Name, BIntervals) -> remove_queue(Name, BIntervals) -> ets:delete(queue_stats, Name), + ets:delete(queue_basic_stats, Name), delete_samples(queue_stats_publish, Name, BIntervals), delete_samples(queue_stats_deliver_stats, Name, BIntervals), delete_samples(queue_process_stats, Name, BIntervals), @@ -136,18 +140,32 @@ delete_samples(Table, Id, Intervals) -> [ets:delete(Table, {Id, I}) || I <- Intervals], ok. -index_delete(consumer_stats = Table, channel = Type, Id) -> - IndexTable = rabbit_mgmt_metrics_collector:index_table(Table, Type), - MatchPattern = {'_', Id, '_'}, - %% Delete consumer_stats_queue_index - ets:match_delete(consumer_stats_queue_index, - {'_', MatchPattern}), - %% Delete consumer_stats - ets:match_delete(consumer_stats, - {MatchPattern,'_'}), - %% Delete consumer_stats_channel_index - ets:delete(IndexTable, Id), - ok; +index_delete(consumer_stats = Table, {channel = Type, ConsumerCount}, Id) -> + %% This uses two different deletion strategies depending on how many + %% consumers a channel had. Most of the time there are many channels + %% with a few (or even just one) consumers. For this common case, `ets:delete/2` is optimal + %% since it avoids table scans. + %% + %% In the rather extreme scenario where only a handful of channels have a very large + %% (e.g. tens of thousands) of consumers, `ets:match_delete/2` becomes a more efficient option. + %% + %% See rabbitmq-server/rabbitmq#10451, rabbitmq-server/rabbitmq#9356. + case ConsumerCount > ?LARGE_CONSUMER_COUNT of + true -> + IndexTable = rabbit_mgmt_metrics_collector:index_table(Table, Type), + MatchPattern = {'_', Id, '_'}, + %% Delete consumer_stats_queue_index + ets:match_delete(consumer_stats_queue_index, + {'_', MatchPattern}), + %% Delete consumer_stats + ets:match_delete(consumer_stats, + {MatchPattern,'_'}), + %% Delete consumer_stats_channel_index + ets:delete(IndexTable, Id), + ok; + false -> + index_delete(Table, Type, Id) + end; index_delete(Table, Type, Id) -> IndexTable = rabbit_mgmt_metrics_collector:index_table(Table, Type), Keys = ets:lookup(IndexTable, Id), diff --git a/deps/rabbitmq_management_agent/src/rabbit_mgmt_storage.erl b/deps/rabbitmq_management_agent/src/rabbit_mgmt_storage.erl index 3551f087fc6a..05b136b90c47 100644 --- a/deps/rabbitmq_management_agent/src/rabbit_mgmt_storage.erl +++ b/deps/rabbitmq_management_agent/src/rabbit_mgmt_storage.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_mgmt_storage). -behaviour(gen_server2). diff --git a/deps/rabbitmq_management_agent/test/exometer_slide_SUITE.erl b/deps/rabbitmq_management_agent/test/exometer_slide_SUITE.erl index 58252309f19b..02ad874aa533 100644 --- a/deps/rabbitmq_management_agent/test/exometer_slide_SUITE.erl +++ b/deps/rabbitmq_management_agent/test/exometer_slide_SUITE.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2016-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(exometer_slide_SUITE). diff --git a/deps/rabbitmq_management_agent/test/metrics_SUITE.erl b/deps/rabbitmq_management_agent/test/metrics_SUITE.erl index 36729aa236c8..c5165efff670 100644 --- a/deps/rabbitmq_management_agent/test/metrics_SUITE.erl +++ b/deps/rabbitmq_management_agent/test/metrics_SUITE.erl @@ -2,12 +2,11 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2016-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(metrics_SUITE). -compile(export_all). --include_lib("common_test/include/ct.hrl"). -include_lib("amqp_client/include/amqp_client.hrl"). all() -> diff --git a/deps/rabbitmq_management_agent/test/rabbit_mgmt_gc_SUITE.erl b/deps/rabbitmq_management_agent/test/rabbit_mgmt_gc_SUITE.erl index a306002f308f..06ad8d8c954b 100644 --- a/deps/rabbitmq_management_agent/test/rabbit_mgmt_gc_SUITE.erl +++ b/deps/rabbitmq_management_agent/test/rabbit_mgmt_gc_SUITE.erl @@ -2,12 +2,11 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_mgmt_gc_SUITE). --include_lib("common_test/include/ct.hrl"). -include_lib("eunit/include/eunit.hrl"). -include_lib("amqp_client/include/amqp_client.hrl"). -include("rabbit_mgmt_metrics.hrl"). @@ -23,6 +22,7 @@ groups() -> [ {non_parallel_tests, [], [ queue_stats, + basic_queue_stats, quorum_queue_stats, connection_stats, channel_stats, @@ -77,6 +77,13 @@ init_per_group(_, Config) -> end_per_group(_, Config) -> Config. +init_per_testcase(basic_queue_stats, Config) -> + IsEnabled = rabbit_ct_broker_helpers:is_feature_flag_enabled( + Config, reduced_queues_endpoint), + case IsEnabled of + true -> Config; + false -> {skip, "The detailed queues endpoint is not available."} + end; init_per_testcase(Testcase, Config) -> rabbit_ct_helpers:testcase_started(Config, Testcase), rabbit_ct_helpers:run_steps(Config, @@ -176,6 +183,40 @@ queue_stats(Config) -> ok. +basic_queue_stats(Config) -> + A = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename), + Ch = rabbit_ct_client_helpers:open_channel(Config, A), + + amqp_channel:call(Ch, #'queue.declare'{queue = <<"queue_stats">>}), + amqp_channel:cast(Ch, #'basic.publish'{routing_key = <<"queue_stats">>}, + #amqp_msg{payload = <<"hello">>}), + {#'basic.get_ok'{}, _} = amqp_channel:call(Ch, #'basic.get'{queue = <<"queue_stats">>, + no_ack = true}), + timer:sleep(1150), + + Q = q(<<"myqueue">>), + + rabbit_ct_broker_helpers:rpc(Config, A, ets, insert, + [queue_basic_stats, {Q, infos}]), + + [_] = rabbit_ct_broker_helpers:rpc(Config, A, ets, lookup, + [queue_basic_stats, Q]), + + %% Trigger gc. When the gen_server:call returns, the gc has already finished. + rabbit_ct_broker_helpers:rpc(Config, A, erlang, send, [rabbit_mgmt_gc, start_gc]), + rabbit_ct_broker_helpers:rpc(Config, A, gen_server, call, [rabbit_mgmt_gc, test]), + + [_|_] = rabbit_ct_broker_helpers:rpc(Config, A, ets, tab2list, + [queue_basic_stats]), + + [] = rabbit_ct_broker_helpers:rpc(Config, A, ets, lookup, + [queue_basic_stats, Q]), + + amqp_channel:call(Ch, #'queue.delete'{queue = <<"queue_stats">>}), + rabbit_ct_client_helpers:close_channel(Ch), + + ok. + quorum_queue_stats(Config) -> A = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename), B = rabbit_ct_broker_helpers:get_node_config(Config, 1, nodename), diff --git a/deps/rabbitmq_management_agent/test/rabbit_mgmt_slide_SUITE.erl b/deps/rabbitmq_management_agent/test/rabbit_mgmt_slide_SUITE.erl index 121e8dc60225..da13710f7305 100644 --- a/deps/rabbitmq_management_agent/test/rabbit_mgmt_slide_SUITE.erl +++ b/deps/rabbitmq_management_agent/test/rabbit_mgmt_slide_SUITE.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2016-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_mgmt_slide_SUITE). diff --git a/deps/rabbitmq_mqtt/.gitignore b/deps/rabbitmq_mqtt/.gitignore index 42376fc0ad6f..548353cc0ed2 100644 --- a/deps/rabbitmq_mqtt/.gitignore +++ b/deps/rabbitmq_mqtt/.gitignore @@ -1,27 +1,4 @@ -.sw? -.*.sw? -*.beam -*.coverdata -.idea/* -/.erlang.mk/ -/cover/ -/deps/ -/doc/ -/ebin/ -/escript/ -/escript.lock /log/ -/logs/ -/plugins/ -/plugins.lock -/sbin/ -/sbin.lock -/test/ct.cover.spec -/xrefr debug/* -*.plt test/config_schema_SUITE_data/schema/ -test/.idea/* - -rabbitmq_mqtt.d diff --git a/deps/rabbitmq_mqtt/BUILD.bazel b/deps/rabbitmq_mqtt/BUILD.bazel index 7c2272a82621..b9280b4dbbd4 100644 --- a/deps/rabbitmq_mqtt/BUILD.bazel +++ b/deps/rabbitmq_mqtt/BUILD.bazel @@ -4,7 +4,6 @@ load("@rules_erlang//:dialyze.bzl", "dialyze", "plt") load( "//:rabbitmq.bzl", "BROKER_VERSION_REQUIREMENTS_ANY", - "ENABLE_FEATURE_MAYBE_EXPR", "RABBITMQ_DIALYZER_OPTS", "assert_suites", "broker_for_integration_suites", @@ -27,10 +26,7 @@ APP_DESCRIPTION = "RabbitMQ MQTT Adapter" APP_MODULE = "rabbit_mqtt" APP_ENV = """[ - {default_user, <<"guest">>}, - {default_pass, <<"guest">>}, {ssl_cert_login,false}, - %% To satisfy an unfortunate expectation from popular MQTT clients. {allow_anonymous, true}, {vhost, <<"/">>}, {exchange, <<"amq.topic">>}, @@ -85,7 +81,6 @@ rabbitmq_app( "//deps/amqp10_common:erlang_app", "//deps/rabbit:erlang_app", "//deps/rabbit_common:erlang_app", - "@ra//:erlang_app", "@ranch//:erlang_app", ], ) @@ -102,7 +97,7 @@ plt( name = "deps_plt", for_target = ":erlang_app", ignore_warnings = True, - libs = ["//deps/rabbitmq_cli:elixir"], # keep + libs = ["@rules_elixir//elixir"], # keep plt = "//:base_plt", deps = ["//deps/rabbitmq_cli:erlang_app"], # keep ) @@ -121,7 +116,6 @@ eunit( ":test_event_recorder_beam", ":test_util_beam", ], - erl_extra_args = [ENABLE_FEATURE_MAYBE_EXPR], target = ":test_erlang_app", ) @@ -130,7 +124,6 @@ broker_for_integration_suites( "//deps/rabbitmq_management:erlang_app", "//deps/rabbitmq_web_mqtt:erlang_app", "//deps/rabbitmq_consistent_hash_exchange:erlang_app", - "//deps/rabbitmq_amqp1_0:erlang_app", "//deps/rabbitmq_stomp:erlang_app", "//deps/rabbitmq_stream:erlang_app", ], @@ -156,7 +149,7 @@ rabbitmq_integration_suite( ":test_util_beam", ], flaky = True, - shard_count = 6, + shard_count = 4, sharding_method = "case", runtime_deps = [ "@emqtt//:erlang_app", @@ -181,16 +174,6 @@ rabbitmq_integration_suite( name = "config_schema_SUITE", ) -rabbitmq_integration_suite( - name = "ff_SUITE", - additional_beam = [ - ":test_util_beam", - ], - runtime_deps = [ - "@emqtt//:erlang_app", - ], -) - rabbitmq_integration_suite( name = "java_SUITE", additional_beam = [ @@ -200,14 +183,12 @@ rabbitmq_integration_suite( sharding_method = "group", ) -rabbitmq_suite( - name = "mqtt_machine_SUITE", - size = "small", -) - rabbitmq_suite( name = "processor_SUITE", size = "small", + runtime_deps = [ + "@meck//:erlang_app", + ], deps = [ "//deps/amqp_client:erlang_app", "//deps/rabbit_common:erlang_app", @@ -250,7 +231,7 @@ rabbitmq_integration_suite( ":test_util_beam", ":test_event_recorder_beam", ], - shard_count = 12, + shard_count = 10, runtime_deps = [ "//deps/rabbitmq_management_agent:erlang_app", "@emqtt//:erlang_app", @@ -278,8 +259,9 @@ rabbitmq_integration_suite( additional_beam = [ ":test_util_beam", ], + shard_count = 2, runtime_deps = [ - "//deps/amqp10_client:erlang_app", + "//deps/rabbitmq_amqp_client:erlang_app", "//deps/rabbitmq_stomp:erlang_app", "//deps/rabbitmq_stream_common:erlang_app", "@emqtt//:erlang_app", @@ -314,6 +296,7 @@ rabbitmq_suite( size = "small", deps = [ "//deps/amqp10_common:erlang_app", + "//deps/rabbit:erlang_app", ], ) diff --git a/deps/rabbitmq_mqtt/Makefile b/deps/rabbitmq_mqtt/Makefile index 05a2e045320f..64bfb24e5116 100644 --- a/deps/rabbitmq_mqtt/Makefile +++ b/deps/rabbitmq_mqtt/Makefile @@ -4,10 +4,7 @@ PROJECT_MOD = rabbit_mqtt define PROJECT_ENV [ - {default_user, <<"guest">>}, - {default_pass, <<"guest">>}, {ssl_cert_login,false}, - %% To satisfy an unfortunate expectation from popular MQTT clients. {allow_anonymous, true}, {vhost, <<"/">>}, {exchange, <<"amq.topic">>}, @@ -44,11 +41,13 @@ BUILD_WITHOUT_QUIC=1 export BUILD_WITHOUT_QUIC LOCAL_DEPS = ssl -DEPS = ranch rabbit_common rabbit ra -TEST_DEPS = emqtt ct_helper rabbitmq_ct_helpers rabbitmq_ct_client_helpers rabbitmq_management rabbitmq_web_mqtt amqp_client rabbitmq_consistent_hash_exchange rabbitmq_amqp1_0 amqp10_client rabbitmq_stomp rabbitmq_stream +DEPS = ranch rabbit_common rabbit amqp10_common +TEST_DEPS = emqtt ct_helper rabbitmq_ct_helpers rabbitmq_ct_client_helpers rabbitmq_management rabbitmq_web_mqtt amqp_client rabbitmq_consistent_hash_exchange rabbitmq_amqp_client rabbitmq_stomp rabbitmq_stream + +PLT_APPS += rabbitmqctl elixir dep_ct_helper = git https://github.com/extend/ct_helper.git master -dep_emqtt = git https://github.com/rabbitmq/emqtt.git master +dep_emqtt = git https://github.com/emqx/emqtt.git 1.11.0 DEP_EARLY_PLUGINS = rabbit_common/mk/rabbitmq-early-plugin.mk DEP_PLUGINS = rabbit_common/mk/rabbitmq-plugin.mk diff --git a/deps/rabbitmq_mqtt/README.md b/deps/rabbitmq_mqtt/README.md index f0ba5d6b0382..9afbdba051a7 100644 --- a/deps/rabbitmq_mqtt/README.md +++ b/deps/rabbitmq_mqtt/README.md @@ -32,7 +32,7 @@ against it. Note that there must be no other MQTT server running on ports `1883` ## Copyright and License -(c) 2007-2020 VMware, Inc. or its affiliates. +(c) 2007-2024 Broadcom. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. Released under the [Mozilla Public License](https://www.rabbitmq.com/mpl.html), the same as RabbitMQ. diff --git a/deps/rabbitmq_mqtt/app.bzl b/deps/rabbitmq_mqtt/app.bzl index 05074b8c3d69..87d17a12e46d 100644 --- a/deps/rabbitmq_mqtt/app.bzl +++ b/deps/rabbitmq_mqtt/app.bzl @@ -17,14 +17,9 @@ def all_beam_files(name = "all_beam_files"): erlang_bytecode( name = "other_beam", srcs = [ - "src/Elixir.RabbitMQ.CLI.Ctl.Commands.DecommissionMqttNodeCommand.erl", "src/Elixir.RabbitMQ.CLI.Ctl.Commands.ListMqttConnectionsCommand.erl", "src/mc_mqtt.erl", - "src/mqtt_machine.erl", - "src/mqtt_machine_v0.erl", - "src/mqtt_node.erl", "src/rabbit_mqtt.erl", - "src/rabbit_mqtt_collector.erl", "src/rabbit_mqtt_confirms.erl", "src/rabbit_mqtt_ff.erl", "src/rabbit_mqtt_internal_event_handler.erl", @@ -46,7 +41,7 @@ def all_beam_files(name = "all_beam_files"): beam = [":behaviours"], dest = "ebin", erlc_opts = "//:erlc_opts", - deps = ["//deps/amqp10_common:erlang_app", "//deps/rabbit:erlang_app", "//deps/rabbit_common:erlang_app", "//deps/rabbitmq_cli:erlang_app", "@ra//:erlang_app", "@ranch//:erlang_app"], + deps = ["//deps/amqp10_common:erlang_app", "//deps/rabbit:erlang_app", "//deps/rabbit_common:erlang_app", "//deps/rabbitmq_cli:erlang_app", "@ranch//:erlang_app"], ) def all_test_beam_files(name = "all_test_beam_files"): @@ -68,14 +63,9 @@ def all_test_beam_files(name = "all_test_beam_files"): name = "test_other_beam", testonly = True, srcs = [ - "src/Elixir.RabbitMQ.CLI.Ctl.Commands.DecommissionMqttNodeCommand.erl", "src/Elixir.RabbitMQ.CLI.Ctl.Commands.ListMqttConnectionsCommand.erl", "src/mc_mqtt.erl", - "src/mqtt_machine.erl", - "src/mqtt_machine_v0.erl", - "src/mqtt_node.erl", "src/rabbit_mqtt.erl", - "src/rabbit_mqtt_collector.erl", "src/rabbit_mqtt_confirms.erl", "src/rabbit_mqtt_ff.erl", "src/rabbit_mqtt_internal_event_handler.erl", @@ -102,7 +92,6 @@ def all_test_beam_files(name = "all_test_beam_files"): "//deps/rabbit:erlang_app", "//deps/rabbit_common:erlang_app", "//deps/rabbitmq_cli:erlang_app", - "@ra//:erlang_app", "@ranch//:erlang_app", ], ) @@ -127,14 +116,9 @@ def all_srcs(name = "all_srcs"): filegroup( name = "srcs", srcs = [ - "src/Elixir.RabbitMQ.CLI.Ctl.Commands.DecommissionMqttNodeCommand.erl", "src/Elixir.RabbitMQ.CLI.Ctl.Commands.ListMqttConnectionsCommand.erl", "src/mc_mqtt.erl", - "src/mqtt_machine.erl", - "src/mqtt_machine_v0.erl", - "src/mqtt_node.erl", "src/rabbit_mqtt.erl", - "src/rabbit_mqtt_collector.erl", "src/rabbit_mqtt_confirms.erl", "src/rabbit_mqtt_ff.erl", "src/rabbit_mqtt_internal_event_handler.erl", @@ -156,8 +140,6 @@ def all_srcs(name = "all_srcs"): filegroup( name = "public_hdrs", srcs = [ - "include/mqtt_machine.hrl", - "include/mqtt_machine_v0.hrl", "include/rabbit_mqtt.hrl", "include/rabbit_mqtt_packet.hrl", ], @@ -213,15 +195,7 @@ def test_suite_beam_files(name = "test_suite_beam_files"): app_name = "rabbitmq_mqtt", erlc_opts = "//:test_erlc_opts", ) - erlang_bytecode( - name = "mqtt_machine_SUITE_beam_files", - testonly = True, - srcs = ["test/mqtt_machine_SUITE.erl"], - outs = ["test/mqtt_machine_SUITE.beam"], - hdrs = ["include/mqtt_machine.hrl"], - app_name = "rabbitmq_mqtt", - erlc_opts = "//:test_erlc_opts", - ) + erlang_bytecode( name = "processor_SUITE_beam_files", testonly = True, @@ -280,14 +254,7 @@ def test_suite_beam_files(name = "test_suite_beam_files"): app_name = "rabbitmq_mqtt", erlc_opts = "//:test_erlc_opts", ) - erlang_bytecode( - name = "ff_SUITE_beam_files", - testonly = True, - srcs = ["test/ff_SUITE.erl"], - outs = ["test/ff_SUITE.beam"], - app_name = "rabbitmq_mqtt", - erlc_opts = "//:test_erlc_opts", - ) + erlang_bytecode( name = "shared_SUITE_beam_files", testonly = True, @@ -351,7 +318,7 @@ def test_suite_beam_files(name = "test_suite_beam_files"): hdrs = ["include/rabbit_mqtt_packet.hrl"], app_name = "rabbitmq_mqtt", erlc_opts = "//:test_erlc_opts", - deps = ["//deps/amqp10_common:erlang_app"], + deps = ["//deps/amqp10_common:erlang_app", "//deps/rabbit:erlang_app", "//deps/rabbit_common:erlang_app"], ) erlang_bytecode( name = "protocol_interop_SUITE_beam_files", @@ -360,5 +327,5 @@ def test_suite_beam_files(name = "test_suite_beam_files"): outs = ["test/protocol_interop_SUITE.beam"], app_name = "rabbitmq_mqtt", erlc_opts = "//:test_erlc_opts", - deps = ["//deps/amqp_client:erlang_app", "//deps/rabbitmq_stomp:erlang_app"], + deps = ["//deps/amqp10_common:erlang_app", "//deps/amqp_client:erlang_app", "//deps/rabbitmq_stomp:erlang_app"], ) diff --git a/deps/rabbitmq_mqtt/include/mqtt_machine.hrl b/deps/rabbitmq_mqtt/include/mqtt_machine.hrl deleted file mode 100644 index 53b84c03ebe3..000000000000 --- a/deps/rabbitmq_mqtt/include/mqtt_machine.hrl +++ /dev/null @@ -1,25 +0,0 @@ -%% This Source Code Form is subject to the terms of the Mozilla Public -%% License, v. 2.0. If a copy of the MPL was not distributed with this -%% file, You can obtain one at https://mozilla.org/MPL/2.0/. -%% -%% Copyright (c) 2020-2023 VMware, Inc. or its affiliates. All rights reserved. -%% - -%% A client ID that is tracked in Ra is a list of bytes -%% as returned by binary_to_list/1 in -%% https://github.com/rabbitmq/rabbitmq-server/blob/48467d6e1283b8d81e52cfd49c06ea4eaa31617d/deps/rabbitmq_mqtt/src/rabbit_mqtt_frame.erl#L137 -%% prior to 3.12.0. -%% This has two downsides: -%% 1. Lists consume more memory than binaries (when tracking many clients). -%% 2. This violates the MQTT spec which states -%% "The ClientId MUST be a UTF-8 encoded string as defined in Section 1.5.3 [MQTT-3.1.3-4]." [v4 3.1.3.1] -%% However, for backwards compatibility, we leave the client ID as a list of bytes in the Ra machine state because -%% feature flag delete_ra_cluster_mqtt_node introduced in 3.12.0 will delete the Ra cluster anyway. --type client_id_ra() :: [byte()]. - --record(machine_state, { - client_ids = #{} :: #{client_id_ra() => Connection :: pid()}, - pids = #{} :: #{Connection :: pid() => [client_id_ra(), ...]}, - %% add acouple of fields for future extensibility - reserved_1, - reserved_2}). diff --git a/deps/rabbitmq_mqtt/include/mqtt_machine_v0.hrl b/deps/rabbitmq_mqtt/include/mqtt_machine_v0.hrl deleted file mode 100644 index 53c8af3ed7a1..000000000000 --- a/deps/rabbitmq_mqtt/include/mqtt_machine_v0.hrl +++ /dev/null @@ -1,8 +0,0 @@ -%% This Source Code Form is subject to the terms of the Mozilla Public -%% License, v. 2.0. If a copy of the MPL was not distributed with this -%% file, You can obtain one at https://mozilla.org/MPL/2.0/. -%% -%% Copyright (c) 2020-2023 VMware, Inc. or its affiliates. All rights reserved. -%% - --record(machine_state, {client_ids = #{}}). diff --git a/deps/rabbitmq_mqtt/include/rabbit_mqtt.hrl b/deps/rabbitmq_mqtt/include/rabbit_mqtt.hrl index 17c90aa6aaf8..88bc8e43c19b 100644 --- a/deps/rabbitmq_mqtt/include/rabbit_mqtt.hrl +++ b/deps/rabbitmq_mqtt/include/rabbit_mqtt.hrl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2020-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -define(APP_NAME, rabbitmq_mqtt). @@ -10,8 +10,11 @@ -define(QUEUE_TYPE_QOS_0, rabbit_mqtt_qos0_queue). -define(PERSISTENT_TERM_MAILBOX_SOFT_LIMIT, mqtt_mailbox_soft_limit). -define(PERSISTENT_TERM_EXCHANGE, mqtt_exchange). --define(MQTT_GUIDE_URL, <<"https://rabbitmq.com/mqtt.html">>). +-define(DEFAULT_MQTT_EXCHANGE, <<"amq.topic">>). +-define(MQTT_GUIDE_URL, <<"https://rabbitmq.com/docs/mqtt/">>). +-define(MQTT_TCP_PROTOCOL, 'mqtt'). +-define(MQTT_TLS_PROTOCOL, 'mqtt/ssl'). -define(MQTT_PROTO_V3, mqtt310). -define(MQTT_PROTO_V4, mqtt311). -define(MQTT_PROTO_V5, mqtt50). diff --git a/deps/rabbitmq_mqtt/include/rabbit_mqtt_packet.hrl b/deps/rabbitmq_mqtt/include/rabbit_mqtt_packet.hrl index f62dbcd31b7b..b5e44fd9866d 100644 --- a/deps/rabbitmq_mqtt/include/rabbit_mqtt_packet.hrl +++ b/deps/rabbitmq_mqtt/include/rabbit_mqtt_packet.hrl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2020-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -define(PERSISTENT_TERM_MAX_PACKET_SIZE_UNAUTHENTICATED, mqtt_max_packet_size_unauthenticated). diff --git a/deps/rabbitmq_mqtt/priv/schema/rabbitmq_mqtt.schema b/deps/rabbitmq_mqtt/priv/schema/rabbitmq_mqtt.schema index cef29eeb4eaf..80f1d83295f9 100644 --- a/deps/rabbitmq_mqtt/priv/schema/rabbitmq_mqtt.schema +++ b/deps/rabbitmq_mqtt/priv/schema/rabbitmq_mqtt.schema @@ -6,35 +6,8 @@ %% ---------------------------------------------------------------------------- % {rabbitmq_mqtt, -% [%% Set the default user name and password. Will be used as the default login -%% if a connecting client provides no other login details. -%% -%% Please note that setting this will allow clients to connect without -%% authenticating! -%% -%% {default_user, <<"guest">>}, -%% {default_pass, <<"guest">>}, - -{mapping, "mqtt.default_user", "rabbitmq_mqtt.default_user", [ - {datatype, string} -]}. - -{mapping, "mqtt.default_pass", "rabbitmq_mqtt.default_pass", [ - {datatype, string} -]}. - -{translation, "rabbitmq_mqtt.default_user", -fun(Conf) -> - list_to_binary(cuttlefish:conf_get("mqtt.default_user", Conf)) -end}. - -{translation, "rabbitmq_mqtt.default_pass", -fun(Conf) -> - list_to_binary(cuttlefish:conf_get("mqtt.default_pass", Conf)) -end}. - -%% Enable anonymous access. If this is set to false, clients MUST provide -%% login information in order to connect. See the default_user/default_pass +% [%% Enable anonymous access. If this is set to false, clients MUST provide +%% login information in order to connect. See the anonymous_login_user/anonymous_login_pass %% configuration elements for managing logins without authentication. %% %% {allow_anonymous, true}, diff --git a/deps/rabbitmq_mqtt/src/Elixir.RabbitMQ.CLI.Ctl.Commands.DecommissionMqttNodeCommand.erl b/deps/rabbitmq_mqtt/src/Elixir.RabbitMQ.CLI.Ctl.Commands.DecommissionMqttNodeCommand.erl deleted file mode 100644 index fa8e09341c6b..000000000000 --- a/deps/rabbitmq_mqtt/src/Elixir.RabbitMQ.CLI.Ctl.Commands.DecommissionMqttNodeCommand.erl +++ /dev/null @@ -1,67 +0,0 @@ -%% This Source Code Form is subject to the terms of the Mozilla Public -%% License, v. 2.0. If a copy of the MPL was not distributed with this -%% file, You can obtain one at https://mozilla.org/MPL/2.0/. -%% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. - --module('Elixir.RabbitMQ.CLI.Ctl.Commands.DecommissionMqttNodeCommand'). - --include("rabbit_mqtt.hrl"). - --behaviour('Elixir.RabbitMQ.CLI.CommandBehaviour'). - --export([scopes/0, - switches/0, - aliases/0, - usage/0, - usage_doc_guides/0, - banner/2, - validate/2, - merge_defaults/2, - run/2, - output/2, - description/0, - help_section/0]). - -scopes() -> [ctl]. -switches() -> []. -aliases() -> []. - -description() -> <<"Removes cluster member and permanently deletes its cluster-wide MQTT state">>. - -help_section() -> - {plugin, mqtt}. - -validate([], _Opts) -> - {validation_failure, not_enough_args}; -validate([_, _ | _], _Opts) -> - {validation_failure, too_many_args}; -validate([_], _) -> - ok. - -merge_defaults(Args, Opts) -> - {Args, Opts}. - -usage() -> - <<"decommission_mqtt_node ">>. - -usage_doc_guides() -> - [?MQTT_GUIDE_URL]. - -run([Node], #{node := NodeName, - timeout := Timeout}) -> - case rabbit_misc:rpc_call(NodeName, rabbit_mqtt_collector, leave, [Node], Timeout) of - {badrpc, _} = Error -> - Error; - nodedown -> - {ok, list_to_binary(io_lib:format("Node ~ts is down but has been successfully removed" - " from the cluster", [Node]))}; - Result -> - %% 'ok' or 'timeout' - Result - end. - -banner([Node], _) -> list_to_binary(io_lib:format("Removing node ~ts from the list of MQTT nodes...", [Node])). - -output(Result, _Opts) -> - 'Elixir.RabbitMQ.CLI.DefaultOutput':output(Result). diff --git a/deps/rabbitmq_mqtt/src/Elixir.RabbitMQ.CLI.Ctl.Commands.ListMqttConnectionsCommand.erl b/deps/rabbitmq_mqtt/src/Elixir.RabbitMQ.CLI.Ctl.Commands.ListMqttConnectionsCommand.erl index 07265b56e109..98fe6968122d 100644 --- a/deps/rabbitmq_mqtt/src/Elixir.RabbitMQ.CLI.Ctl.Commands.ListMqttConnectionsCommand.erl +++ b/deps/rabbitmq_mqtt/src/Elixir.RabbitMQ.CLI.Ctl.Commands.ListMqttConnectionsCommand.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. -module('Elixir.RabbitMQ.CLI.Ctl.Commands.ListMqttConnectionsCommand'). diff --git a/deps/rabbitmq_mqtt/src/mc_mqtt.erl b/deps/rabbitmq_mqtt/src/mc_mqtt.erl index 2d183518cf6a..b6cae214c8c3 100644 --- a/deps/rabbitmq_mqtt/src/mc_mqtt.erl +++ b/deps/rabbitmq_mqtt/src/mc_mqtt.erl @@ -1,15 +1,14 @@ -module(mc_mqtt). -behaviour(mc). --include("rabbit_mqtt_packet.hrl"). -include("rabbit_mqtt.hrl"). +-include("rabbit_mqtt_packet.hrl"). -include_lib("rabbit_common/include/rabbit_framing.hrl"). -include_lib("amqp10_common/include/amqp10_framing.hrl"). -include_lib("rabbit_common/include/rabbit.hrl"). -include_lib("rabbit/include/mc.hrl"). -define(CONTENT_TYPE_AMQP, <<"message/vnd.rabbitmq.amqp">>). --define(DEFAULT_MQTT_EXCHANGE, <<"amq.topic">>). -export([ init/1, @@ -17,25 +16,19 @@ x_header/2, property/2, routing_headers/2, - convert_to/2, - convert_from/2, + convert_to/3, + convert_from/3, protocol_state/2, prepare/2 ]). init(Msg = #mqtt_msg{qos = Qos, - props = Props}) - when is_integer(Qos) -> - Anns0 = case Qos > 0 of - true -> - #{durable => true}; - false -> - #{} - end, + props = Props}) -> + Anns0 = #{?ANN_DURABLE => durable(Qos)}, Anns1 = case Props of #{'Message-Expiry-Interval' := Seconds} -> Anns0#{ttl => timer:seconds(Seconds), - timestamp => os:system_time(millisecond)}; + ?ANN_TIMESTAMP => os:system_time(millisecond)}; _ -> Anns0 end, @@ -52,76 +45,81 @@ init(Msg = #mqtt_msg{qos = Qos, end, {Msg, Anns}. -convert_from(mc_amqp, Sections) -> - {Header, MsgAnns, AmqpProps, AppProps, PayloadRev, - PayloadFormatIndicator, ContentType} = +convert_from(mc_amqp, Sections, Env) -> + {Header, MsgAnns, AmqpProps, AppProps, PayloadRev, ContentType} = lists:foldl( fun(#'v1_0.header'{} = S, Acc) -> setelement(1, Acc, S); + (_Ignore = #'v1_0.delivery_annotations'{}, Acc) -> + Acc; (#'v1_0.message_annotations'{content = List}, Acc) -> setelement(2, Acc, List); (#'v1_0.properties'{} = S, Acc) -> setelement(3, Acc, S); (#'v1_0.application_properties'{content = List}, Acc) -> setelement(4, Acc, List); - (#'v1_0.footer'{}, Acc) -> - Acc; + ({amqp_encoded_body_and_footer, Body}, Acc0) -> + %% assertions + [] = element(5, Acc0), + undefined = element(6, Acc0), + Acc = setelement(5, Acc0, [Body]), + setelement(6, Acc, ?CONTENT_TYPE_AMQP); (#'v1_0.data'{content = C}, Acc) -> + %% assertion + undefined = element(6, Acc), setelement(5, Acc, [C | element(5, Acc)]); - (#'v1_0.amqp_value'{content = {binary, Bin}}, Acc) -> - setelement(5, Acc, [Bin]); - (#'v1_0.amqp_value'{content = C} = Val, Acc) -> - case amqp_to_utf8_string(C) of - cannot_convert -> - amqp_encode(Val, Acc); - String -> - Acc1 = setelement(5, Acc, [String]), - setelement(6, Acc1, true) - end; - (#'v1_0.amqp_sequence'{} = Seq, Acc) -> - amqp_encode(Seq, Acc) - end, {undefined, [], undefined, [], [], false, undefined}, Sections), + (Val, Acc0) + when is_record(Val, 'v1_0.amqp_value') orelse + is_record(Val, 'v1_0.amqp_sequence') -> + IoData = amqp10_framing:encode_bin(Val), + Acc = setelement(5, Acc0, [IoData | element(5, Acc0)]), + setelement(6, Acc, ?CONTENT_TYPE_AMQP); + (_Ignore = #'v1_0.footer'{}, Acc) -> + Acc + end, {undefined, [], undefined, [], [], undefined}, Sections), Qos = case Header of - #'v1_0.header'{durable = true} -> - ?QOS_1; + #'v1_0.header'{durable = false} -> + ?QOS_0; _ -> - ?QOS_0 + ?QOS_1 end, - Props0 = case PayloadFormatIndicator of - true -> #{'Payload-Format-Indicator' => 1}; - false -> #{} - end, - Props1 = case AmqpProps of + Props0 = case AmqpProps of #'v1_0.properties'{reply_to = {utf8, Address}} -> - MqttX = persistent_term:get(?PERSISTENT_TERM_EXCHANGE), + MqttX = maps:get(mqtt_x, Env, ?DEFAULT_MQTT_EXCHANGE), case Address of - <<"/topic/", Topic/binary>> - when MqttX =:= ?DEFAULT_MQTT_EXCHANGE -> - add_response_topic(Topic, Props0); - <<"/exchange/", MqttX:(byte_size(MqttX))/binary, "/", RoutingKey/binary>> -> - add_response_topic(RoutingKey, Props0); + <<"/exchanges/", + MqttX:(byte_size(MqttX))/binary, + "/", + RoutingKeyQuoted/binary>> -> + try rabbit_uri:urldecode(RoutingKeyQuoted) of + RoutingKey -> + MqttTopic = rabbit_mqtt_util:amqp_to_mqtt(RoutingKey), + #{'Response-Topic' => MqttTopic} + catch error:_ -> + #{} + end; _ -> - Props0 + #{} end; _ -> - Props0 + #{} end, - Props2 = case AmqpProps of + Props1 = case AmqpProps of #'v1_0.properties'{correlation_id = {_Type, _Val} = Corr} -> - Props1#{'Correlation-Data' => correlation_id(Corr)}; + Props0#{'Correlation-Data' => correlation_id(Corr)}; _ -> - Props1 + Props0 end, - Props3 = case ContentType of + Props2 = case ContentType of undefined -> case AmqpProps of #'v1_0.properties'{content_type = {symbol, ContentType1}} -> - Props2#{'Content-Type' => rabbit_data_coercion:to_binary(ContentType1)}; + Props1#{'Content-Type' => rabbit_data_coercion:to_binary(ContentType1)}; _ -> - Props2 + Props1 end; _ -> - Props2#{'Content-Type' => ContentType} + Props1#{'Content-Type' => ContentType} end, UserProp0 = lists:filtermap(fun({{symbol, <<"x-", _/binary>> = Key}, Val}) -> filter_map_amqp_to_utf8_string(Key, Val); @@ -133,17 +131,18 @@ convert_from(mc_amqp, Sections) -> filter_map_amqp_to_utf8_string(Key, Val) end, AppProps), Props = case UserProp0 ++ UserProp1 of - [] -> Props3; - UserProp -> Props3#{'User-Property' => UserProp} + [] -> Props2; + UserProp -> Props2#{'User-Property' => UserProp} end, - Payload = lists:flatten(lists:reverse(PayloadRev)), + Payload = lists:reverse(PayloadRev), #mqtt_msg{retain = false, qos = Qos, dup = false, props = Props, payload = Payload}; convert_from(mc_amqpl, #content{properties = PBasic, - payload_fragments_rev = Payload}) -> + payload_fragments_rev = PFR}, + _Env) -> #'P_basic'{expiration = Expiration, delivery_mode = DelMode, headers = H0, @@ -194,25 +193,17 @@ convert_from(mc_amqpl, #content{properties = PBasic, #mqtt_msg{retain = false, qos = Qos, dup = false, - payload = lists:reverse(Payload), + payload = lists:reverse(PFR), props = P}; -convert_from(_SourceProto, _) -> +convert_from(_SourceProto, _, _) -> not_implemented. -convert_to(?MODULE, Msg) -> +convert_to(?MODULE, Msg, _Env) -> Msg; convert_to(mc_amqp, #mqtt_msg{qos = Qos, props = Props, - payload = Payload}) -> - Body = case Props of - #{'Payload-Format-Indicator' := 1} - when is_binary(Payload) -> - #'v1_0.amqp_value'{content = {utf8, Payload}}; - _ -> - #'v1_0.data'{content = Payload} - end, - S0 = [Body], - + payload = Payload}, Env) -> + S0 = [#'v1_0.data'{content = Payload}], %% x- prefixed MQTT User Properties go into Message Annotations. %% All other MQTT User Properties go into Application Properties. %% MQTT User Property allows duplicate keys, while AMQP maps don't. @@ -233,7 +224,8 @@ convert_to(mc_amqp, #mqtt_msg{qos = Qos, Acc end; ({Name, Val}, {MAnns, AProps, M}) -> - {MAnns, [{{utf8, Name}, {utf8, Val}} | AProps], M#{Name => true}} + {MAnns, [{{utf8, Name}, {utf8, Val}} | AProps], + M#{Name => true}} end, {[], [], #{}}, UserProps), {lists:reverse(MsgAnnsRev), lists:reverse(AppPropsRev)}; _ -> @@ -257,19 +249,24 @@ convert_to(mc_amqp, #mqtt_msg{qos = Qos, end, CorrId = case Props of #{'Correlation-Data' := Corr} -> - {binary, Corr}; + case mc_util:urn_string_to_uuid(Corr) of + {ok, MsgUUID} -> + {uuid, MsgUUID}; + _ -> + {binary, Corr} + end; _ -> undefined end, ReplyTo = case Props of #{'Response-Topic' := MqttTopic} -> + Exchange = maps:get(mqtt_x, Env, ?DEFAULT_MQTT_EXCHANGE), Topic = rabbit_mqtt_util:mqtt_to_amqp(MqttTopic), - Address = case persistent_term:get(?PERSISTENT_TERM_EXCHANGE) of - ?DEFAULT_MQTT_EXCHANGE -> - <<"/topic/", Topic/binary>>; - Exchange -> - <<"/exchange/", Exchange/binary, "/", Topic/binary>> - end, + TopicQuoted = uri_string:quote(Topic), + %% We assume here that Exchange doesn't contain characters + %% that need to be quoted. This is a reasonable assumption + %% given that amq.topic is the default MQTT topic exchange. + Address = <<"/exchanges/", Exchange/binary, "/", TopicQuoted/binary>>, {utf8, Address}; _ -> undefined @@ -287,24 +284,32 @@ convert_to(mc_amqp, #mqtt_msg{qos = Qos, [] -> S2; _ -> [#'v1_0.message_annotations'{content = MsgAnns} | S2] end, - S = [#'v1_0.header'{durable = Qos > 0} | S3], - mc_amqp:convert_from(mc_amqp, S); + S = [#'v1_0.header'{durable = durable(Qos)} | S3], + mc_amqp:convert_from(mc_amqp, S, Env); convert_to(mc_amqpl, #mqtt_msg{qos = Qos, props = Props, - payload = Payload}) -> + payload = Payload}, _Env) -> DelMode = case Qos of ?QOS_0 -> 1; ?QOS_1 -> 2 end, ContentType = case Props of - #{'Content-Type' := ContType} -> ContType; - _ -> undefined + #{'Content-Type' := ContType} + when ?IS_SHORTSTR_LEN(ContType)-> + case mc_util:utf8_string_is_ascii(ContType) of + true -> + ContType; + false -> + undefined + end; + _ -> + undefined end, Hs0 = case Props of #{'User-Property' := UserProperty} -> lists:filtermap( fun({Name, Value}) - when byte_size(Name) =< ?AMQP_LEGACY_FIELD_NAME_MAX_LEN -> + when ?IS_SHORTSTR_LEN(Name) -> {true, {Name, longstr, Value}}; (_) -> false @@ -348,15 +353,14 @@ convert_to(mc_amqpl, #mqtt_msg{qos = Qos, delivery_mode = DelMode, correlation_id = CorrId, expiration = Expiration}, - PFR = case is_binary(Payload) of - true -> [Payload]; - false -> lists:reverse(Payload) - end, + %% In practice, when converting from mc_mqtt to mc_amqpl, Payload will + %% be a single binary, in which case iolist_to_binary/1 is cheap. + PFR = [iolist_to_binary(Payload)], #content{class_id = 60, properties = BP, properties_bin = none, payload_fragments_rev = PFR}; -convert_to(_TargetProto, #mqtt_msg{}) -> +convert_to(_TargetProto, #mqtt_msg{}, _Env) -> not_implemented. size(#mqtt_msg{payload = Payload, @@ -387,7 +391,12 @@ x_header(_Key, #mqtt_msg{}) -> undefined. property(correlation_id, #mqtt_msg{props = #{'Correlation-Data' := Corr}}) -> - {binary, Corr}; + case mc_util:urn_string_to_uuid(Corr) of + {ok, UUId} -> + {uuid, UUId}; + _ -> + {binary, Corr} + end; property(_Key, #mqtt_msg{}) -> undefined. @@ -405,7 +414,8 @@ routing_headers(#mqtt_msg{}, _Opts) -> #{}. protocol_state(Msg = #mqtt_msg{props = Props0, - topic = Topic}, Anns) -> + topic = Topic, + qos = Qos0}, Anns) -> %% Remove any PUBLISH or Will Properties that are not forwarded unaltered. Props1 = maps:remove('Message-Expiry-Interval', Props0), {WillDelay, Props2} = case maps:take('Will-Delay-Interval', Props1) of @@ -416,37 +426,56 @@ protocol_state(Msg = #mqtt_msg{props = Props0, undefined -> Props2; Ttl -> - case maps:get(timestamp, Anns) of + case maps:get(?ANN_TIMESTAMP, Anns) of undefined -> Props2; Timestamp -> SourceProtocolIsMqtt = Topic =/= undefined, - %% Only if source protocol is MQTT we know that timestamp was set by the server. + %% Only if source protocol is MQTT we know that + %% timestamp was set by the server. case SourceProtocolIsMqtt of false -> Props2; true -> - %% "The PUBLISH packet sent to a Client by the Server MUST contain a - %% Message Expiry Interval set to the received value minus the time that - %% the Application Message has been waiting in the Server" [MQTT-3.3.2-6] + %% "The PUBLISH packet sent to a Client by + %% the Server MUST contain a + %% Message Expiry Interval set to the received + %% value minus the time that + %% the Application Message has been waiting + %% in the Server" [MQTT-3.3.2-6] WaitingMillis0 = os:system_time(millisecond) - Timestamp, - %% For a delayed Will Message, the waiting time starts - %% when the Will Message was published. + %% For a delayed Will Message, the waiting + %% time starts when the Will Message was published. WaitingMillis = WaitingMillis0 - WillDelay * 1000, MEIMillis = max(0, Ttl - WaitingMillis), Props2#{'Message-Expiry-Interval' => MEIMillis div 1000} end end end, - [RoutingKey | _] = maps:get(routing_keys, Anns), - Msg#mqtt_msg{topic = rabbit_mqtt_util:amqp_to_mqtt(RoutingKey), + [RoutingKey | _] = maps:get(?ANN_ROUTING_KEYS, Anns), + %% We rely on the mc annotation to tell whether the message is durable because if + %% the message was originally sent with AMQP, the AMQP header isn't stored on disk. + Qos = case Anns of + #{?ANN_DURABLE := false} -> + ?QOS_0; + #{?ANN_DURABLE := true} -> + ?QOS_1; + _ -> + %% If the mc durable annotation isn't set, the message might be durable + %% or not depending on whether the message was sent before or after + %% https://github.com/rabbitmq/rabbitmq-server/pull/11012 (3.13.2) + %% Hence, we rely on the QoS from the mqtt_msg. + Qos0 + end, + Msg#mqtt_msg{qos = Qos, + topic = rabbit_mqtt_util:amqp_to_mqtt(RoutingKey), props = Props}. prepare(_For, #mqtt_msg{} = Msg) -> Msg. correlation_id({uuid, UUID}) -> - mc_util:uuid_to_string(UUID); + mc_util:uuid_to_urn_string(UUID); correlation_id({_T, Corr}) -> rabbit_data_coercion:to_binary(Corr). @@ -498,6 +527,9 @@ filter_map_amqp_to_utf8_string(Key, TypeVal) -> amqp_to_utf8_string({utf8, Val}) when is_binary(Val) -> Val; +amqp_to_utf8_string({symbol, Val}) + when is_binary(Val) -> + Val; amqp_to_utf8_string(Val) when Val =:= null; Val =:= undefined -> @@ -520,13 +552,9 @@ amqp_to_utf8_string({T, Val}) when T =:= double; T =:= float -> float_to_binary(Val); -amqp_to_utf8_string(Val) - when Val =:= true; - Val =:= {boolean, true} -> +amqp_to_utf8_string(true) -> <<"true">>; -amqp_to_utf8_string(Val) - when Val =:= false; - Val =:= {boolean, false} -> +amqp_to_utf8_string(false) -> <<"false">>; amqp_to_utf8_string({T, _Val}) when T =:= map; @@ -536,11 +564,5 @@ amqp_to_utf8_string({T, _Val}) T =:= binary -> cannot_convert. -amqp_encode(Data, Acc0) -> - Bin = amqp10_framing:encode_bin(Data), - Acc = setelement(5, Acc0, [Bin | element(5, Acc0)]), - setelement(7, Acc, ?CONTENT_TYPE_AMQP). - -add_response_topic(AmqpTopic, PublishProperties) -> - MqttTopic = rabbit_mqtt_util:amqp_to_mqtt(AmqpTopic), - PublishProperties#{'Response-Topic' => MqttTopic}. +durable(?QOS_0) -> false; +durable(?QOS_1) -> true. diff --git a/deps/rabbitmq_mqtt/src/mqtt_machine.erl b/deps/rabbitmq_mqtt/src/mqtt_machine.erl deleted file mode 100644 index 457eb1b7b7a3..000000000000 --- a/deps/rabbitmq_mqtt/src/mqtt_machine.erl +++ /dev/null @@ -1,201 +0,0 @@ -%% This Source Code Form is subject to the terms of the Mozilla Public -%% License, v. 2.0. If a copy of the MPL was not distributed with this -%% file, You can obtain one at https://mozilla.org/MPL/2.0/. -%% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. -%% --module(mqtt_machine). --behaviour(ra_machine). - --include("mqtt_machine.hrl"). - --export([version/0, - which_module/1, - init/1, - apply/3, - state_enter/2, - notify_connection/2, - overview/1]). - --type state() :: #machine_state{}. - --type config() :: map(). - --type reply() :: {ok, term()} | {error, term()}. - --type command() :: {register, client_id_ra(), pid()} | - {unregister, client_id_ra(), pid()} | - list. -version() -> 1. - -which_module(1) -> ?MODULE; -which_module(0) -> mqtt_machine_v0. - --spec init(config()) -> state(). -init(_Conf) -> - #machine_state{}. - --spec apply(map(), command(), state()) -> - {state(), reply(), ra_machine:effects()}. -apply(_Meta, {register, ClientId, Pid}, - #machine_state{client_ids = Ids, - pids = Pids0} = State0) -> - {Effects, Ids1, Pids} = - case maps:find(ClientId, Ids) of - {ok, OldPid} when Pid =/= OldPid -> - Effects0 = [{demonitor, process, OldPid}, - {monitor, process, Pid}, - {mod_call, ?MODULE, notify_connection, - [OldPid, duplicate_id]}], - Pids2 = case maps:take(OldPid, Pids0) of - error -> - Pids0; - {[ClientId], Pids1} -> - Pids1; - {ClientIds, Pids1} -> - Pids1#{ClientId => lists:delete(ClientId, ClientIds)} - end, - Pids3 = maps:update_with(Pid, fun(CIds) -> [ClientId | CIds] end, - [ClientId], Pids2), - {Effects0, maps:remove(ClientId, Ids), Pids3}; - - {ok, Pid} -> - {[], Ids, Pids0}; - error -> - Pids1 = maps:update_with(Pid, fun(CIds) -> [ClientId | CIds] end, - [ClientId], Pids0), - Effects0 = [{monitor, process, Pid}], - {Effects0, Ids, Pids1} - end, - State = State0#machine_state{client_ids = maps:put(ClientId, Pid, Ids1), - pids = Pids}, - {State, ok, Effects}; - -apply(Meta, {unregister, ClientId, Pid}, #machine_state{client_ids = Ids, - pids = Pids0} = State0) -> - State = case maps:find(ClientId, Ids) of - {ok, Pid} -> - Pids = case maps:get(Pid, Pids0, undefined) of - undefined -> - Pids0; - [ClientId] -> - maps:remove(Pid, Pids0); - Cids -> - Pids0#{Pid => lists:delete(ClientId, Cids)} - end, - - State0#machine_state{client_ids = maps:remove(ClientId, Ids), - pids = Pids}; - %% don't delete client id that might belong to a newer connection - %% that kicked the one with Pid out - {ok, _AnotherPid} -> - State0; - error -> - State0 - end, - Effects0 = [{demonitor, process, Pid}], - %% snapshot only when the map has changed - Effects = case State of - State0 -> Effects0; - _ -> Effects0 ++ snapshot_effects(Meta, State) - end, - {State, ok, Effects}; - -apply(_Meta, {down, DownPid, noconnection}, State) -> - %% Monitor the node the pid is on (see {nodeup, Node} below) - %% so that we can detect when the node is re-connected and discover the - %% actual fate of the connection processes on it - Effect = {monitor, node, node(DownPid)}, - {State, ok, Effect}; - -apply(Meta, {down, DownPid, _}, #machine_state{client_ids = Ids, - pids = Pids0} = State0) -> - case maps:get(DownPid, Pids0, undefined) of - undefined -> - {State0, ok, []}; - ClientIds -> - Ids1 = maps:without(ClientIds, Ids), - State = State0#machine_state{client_ids = Ids1, - pids = maps:remove(DownPid, Pids0)}, - Effects = lists:map(fun(Id) -> - [{mod_call, rabbit_log, debug, - ["MQTT connection with client id '~ts' failed", [Id]]}] - end, ClientIds), - {State, ok, Effects ++ snapshot_effects(Meta, State)} - end; - -apply(_Meta, {nodeup, Node}, State) -> - %% Work out if any pids that were disconnected are still - %% alive. - %% Re-request the monitor for the pids on the now-back node. - Effects = [{monitor, process, Pid} || Pid <- all_pids(State), node(Pid) == Node], - {State, ok, Effects}; -apply(_Meta, {nodedown, _Node}, State) -> - {State, ok}; - -apply(Meta, {leave, Node}, #machine_state{client_ids = Ids, - pids = Pids0} = State0) -> - {Keep, Remove} = maps:fold( - fun (ClientId, Pid, {In, Out}) -> - case node(Pid) =/= Node of - true -> - {In#{ClientId => Pid}, Out}; - false -> - {In, Out#{ClientId => Pid}} - end - end, {#{}, #{}}, Ids), - Effects = maps:fold(fun (ClientId, _Pid, Acc) -> - Pid = maps:get(ClientId, Ids), - [ - {demonitor, process, Pid}, - {mod_call, ?MODULE, notify_connection, [Pid, decommission_node]}, - {mod_call, rabbit_log, debug, - ["MQTT will remove client ID '~ts' from known " - "as its node has been decommissioned", [ClientId]]} - ] ++ Acc - end, [], Remove), - - State = State0#machine_state{client_ids = Keep, - pids = maps:without(maps:values(Remove), Pids0)}, - {State, ok, Effects ++ snapshot_effects(Meta, State)}; -apply(_Meta, {machine_version, 0, 1}, {machine_state, Ids}) -> - Pids = maps:fold( - fun(Id, Pid, Acc) -> - maps:update_with(Pid, - fun(CIds) -> [Id | CIds] end, - [Id], Acc) - end, #{}, Ids), - {#machine_state{client_ids = Ids, - pids = Pids}, ok, []}; -apply(_Meta, Unknown, State) -> - logger:error("MQTT Raft state machine v1 received unknown command ~tp", [Unknown]), - {State, {error, {unknown_command, Unknown}}, []}. - --spec state_enter(ra_server:ra_state() | eol, state()) -> - ra_machine:effects(). -state_enter(leader, State) -> - %% re-request monitors for all known pids, this would clean up - %% records for all connections are no longer around, e.g. right after node restart - [{monitor, process, Pid} || Pid <- all_pids(State)]; -state_enter(_, _) -> - []. - --spec overview(state()) -> map(). -overview(#machine_state{client_ids = ClientIds, - pids = Pids}) -> - #{num_client_ids => maps:size(ClientIds), - num_pids => maps:size(Pids)}. - -%% ========================== - -%% Avoids blocking the Raft leader. --spec notify_connection(pid(), duplicate_id | decommission_node) -> pid(). -notify_connection(Pid, Reason) -> - spawn(fun() -> gen_server2:cast(Pid, Reason) end). - --spec snapshot_effects(map(), state()) -> ra_machine:effects(). -snapshot_effects(#{index := RaftIdx}, State) -> - [{release_cursor, RaftIdx, State}]. - -all_pids(#machine_state{client_ids = Ids}) -> - maps:values(Ids). diff --git a/deps/rabbitmq_mqtt/src/mqtt_machine_v0.erl b/deps/rabbitmq_mqtt/src/mqtt_machine_v0.erl deleted file mode 100644 index 45184d620b4e..000000000000 --- a/deps/rabbitmq_mqtt/src/mqtt_machine_v0.erl +++ /dev/null @@ -1,137 +0,0 @@ -%% This Source Code Form is subject to the terms of the Mozilla Public -%% License, v. 2.0. If a copy of the MPL was not distributed with this -%% file, You can obtain one at https://mozilla.org/MPL/2.0/. -%% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. -%% --module(mqtt_machine_v0). --behaviour(ra_machine). - --include("mqtt_machine_v0.hrl"). - --export([init/1, - apply/3, - state_enter/2, - notify_connection/2]). - --type state() :: #machine_state{}. - --type config() :: map(). - --type reply() :: {ok, term()} | {error, term()}. --type client_id_ra() :: term(). - --type command() :: {register, client_id_ra(), pid()} | - {unregister, client_id_ra(), pid()} | - list. - --spec init(config()) -> state(). -init(_Conf) -> - #machine_state{}. - --spec apply(map(), command(), state()) -> - {state(), reply(), ra_machine:effects()}. -apply(_Meta, {register, ClientId, Pid}, #machine_state{client_ids = Ids} = State0) -> - {Effects, Ids1} = - case maps:find(ClientId, Ids) of - {ok, OldPid} when Pid =/= OldPid -> - Effects0 = [{demonitor, process, OldPid}, - {monitor, process, Pid}, - {mod_call, ?MODULE, notify_connection, [OldPid, duplicate_id]}], - {Effects0, maps:remove(ClientId, Ids)}; - _ -> - Effects0 = [{monitor, process, Pid}], - {Effects0, Ids} - end, - State = State0#machine_state{client_ids = maps:put(ClientId, Pid, Ids1)}, - {State, ok, Effects}; - -apply(Meta, {unregister, ClientId, Pid}, #machine_state{client_ids = Ids} = State0) -> - State = case maps:find(ClientId, Ids) of - {ok, Pid} -> State0#machine_state{client_ids = maps:remove(ClientId, Ids)}; - %% don't delete client id that might belong to a newer connection - %% that kicked the one with Pid out - {ok, _AnotherPid} -> State0; - error -> State0 - end, - Effects0 = [{demonitor, process, Pid}], - %% snapshot only when the map has changed - Effects = case State of - State0 -> Effects0; - _ -> Effects0 ++ snapshot_effects(Meta, State) - end, - {State, ok, Effects}; - -apply(_Meta, {down, DownPid, noconnection}, State) -> - %% Monitor the node the pid is on (see {nodeup, Node} below) - %% so that we can detect when the node is re-connected and discover the - %% actual fate of the connection processes on it - Effect = {monitor, node, node(DownPid)}, - {State, ok, Effect}; - -apply(Meta, {down, DownPid, _}, #machine_state{client_ids = Ids} = State0) -> - Ids1 = maps:filter(fun (_ClientId, Pid) when Pid =:= DownPid -> - false; - (_, _) -> - true - end, Ids), - State = State0#machine_state{client_ids = Ids1}, - Delta = maps:keys(Ids) -- maps:keys(Ids1), - Effects = lists:map(fun(Id) -> - [{mod_call, rabbit_log, debug, - ["MQTT connection with client id '~ts' failed", [Id]]}] end, Delta), - {State, ok, Effects ++ snapshot_effects(Meta, State)}; - -apply(_Meta, {nodeup, Node}, State) -> - %% Work out if any pids that were disconnected are still - %% alive. - %% Re-request the monitor for the pids on the now-back node. - Effects = [{monitor, process, Pid} || Pid <- all_pids(State), node(Pid) == Node], - {State, ok, Effects}; -apply(_Meta, {nodedown, _Node}, State) -> - {State, ok}; - -apply(Meta, {leave, Node}, #machine_state{client_ids = Ids} = State0) -> - Ids1 = maps:filter(fun (_ClientId, Pid) -> node(Pid) =/= Node end, Ids), - Delta = maps:keys(Ids) -- maps:keys(Ids1), - - Effects = lists:foldl(fun (ClientId, Acc) -> - Pid = maps:get(ClientId, Ids), - [ - {demonitor, process, Pid}, - {mod_call, ?MODULE, notify_connection, [Pid, decommission_node]}, - {mod_call, rabbit_log, debug, - ["MQTT will remove client ID '~ts' from known " - "as its node has been decommissioned", [ClientId]]} - ] ++ Acc - end, [], Delta), - - State = State0#machine_state{client_ids = Ids1}, - {State, ok, Effects ++ snapshot_effects(Meta, State)}; - -apply(_Meta, Unknown, State) -> - logger:error("MQTT Raft state machine received an unknown command ~tp", [Unknown]), - {State, {error, {unknown_command, Unknown}}, []}. - --spec state_enter(ra_server:ra_state(), state()) -> - ra_machine:effects(). -state_enter(leader, State) -> - %% re-request monitors for all known pids, this would clean up - %% records for all connections are no longer around, e.g. right after node restart - [{monitor, process, Pid} || Pid <- all_pids(State)]; -state_enter(_, _) -> - []. - -%% ========================== - -%% Avoids blocking the Raft leader. --spec notify_connection(pid(), duplicate_id | decommission_node) -> pid(). -notify_connection(Pid, Reason) -> - spawn(fun() -> gen_server2:cast(Pid, Reason) end). - --spec snapshot_effects(map(), state()) -> ra_machine:effects(). -snapshot_effects(#{index := RaftIdx}, State) -> - [{release_cursor, RaftIdx, State}]. - -all_pids(#machine_state{client_ids = Ids}) -> - maps:values(Ids). diff --git a/deps/rabbitmq_mqtt/src/mqtt_node.erl b/deps/rabbitmq_mqtt/src/mqtt_node.erl deleted file mode 100644 index bd3eca1a7f77..000000000000 --- a/deps/rabbitmq_mqtt/src/mqtt_node.erl +++ /dev/null @@ -1,174 +0,0 @@ -%% This Source Code Form is subject to the terms of the Mozilla Public -%% License, v. 2.0. If a copy of the MPL was not distributed with this -%% file, You can obtain one at https://mozilla.org/MPL/2.0/. -%% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. -%% --module(mqtt_node). - --export([start/0, node_id/0, server_id/0, all_node_ids/0, leave/1, trigger_election/0, - delete/1]). - --define(ID_NAME, mqtt_node). --define(START_TIMEOUT, 100_000). --define(RETRY_INTERVAL, 5000). --define(RA_OPERATION_TIMEOUT, 60_000). --define(RA_SYSTEM, coordination). - -node_id() -> - server_id(node()). - -server_id() -> - server_id(node()). - -server_id(Node) -> - {?ID_NAME, Node}. - -all_node_ids() -> - [server_id(N) || N <- rabbit_nodes:list_members(), - can_participate_in_clientid_tracking(N)]. - -start() -> - %% 3s to 6s randomized - Repetitions = rand:uniform(10) + 10, - start(300, Repetitions). - -start(_Delay, AttemptsLeft) when AttemptsLeft =< 0 -> - ok = start_server(), - trigger_election(); -start(Delay, AttemptsLeft) -> - NodeId = server_id(), - Nodes = compatible_peer_servers(), - case ra_directory:uid_of(?RA_SYSTEM, ?ID_NAME) of - undefined -> - case Nodes of - [] -> - %% Since cluster members are not known ahead of time and initial boot can be happening in parallel, - %% we wait and check a few times (up to a few seconds) to see if we can discover any peers to - %% join before forming a cluster. This reduces the probability of N independent clusters being - %% formed in the common scenario of N nodes booting in parallel e.g. because they were started - %% at the same time by a deployment tool. - %% - %% This scenario does not guarantee single cluster formation but without knowing the list of members - %% ahead of time, this is a best effort workaround. Multi-node consensus is apparently hard - %% to achieve without having consensus around expected cluster members. - rabbit_log:info("MQTT: will wait for ~tp more ms for cluster members to join before triggering a Raft leader election", [Delay]), - timer:sleep(Delay), - start(Delay, AttemptsLeft - 1); - Peers -> - %% Trigger an election. - %% This is required when we start a node for the first time. - %% Using default timeout because it supposed to reply fast. - rabbit_log:info("MQTT: discovered cluster peers that support client ID tracking: ~p", [Peers]), - ok = start_server(), - _ = join_peers(NodeId, Peers), - ra:trigger_election(NodeId, ?RA_OPERATION_TIMEOUT) - end; - _ -> - _ = join_peers(NodeId, Nodes), - ok = ra:restart_server(?RA_SYSTEM, NodeId), - ra:trigger_election(NodeId, ?RA_OPERATION_TIMEOUT) - end. - -compatible_peer_servers() -> - all_node_ids() -- [(node_id())]. - -start_server() -> - NodeId = node_id(), - Nodes = compatible_peer_servers(), - UId = ra:new_uid(ra_lib:to_binary(?ID_NAME)), - Timeout = application:get_env(kernel, net_ticktime, 60) + 5, - Conf = #{cluster_name => ?ID_NAME, - id => NodeId, - uid => UId, - friendly_name => atom_to_list(?ID_NAME), - initial_members => Nodes, - log_init_args => #{uid => UId}, - tick_timeout => Timeout, - machine => {module, mqtt_machine, #{}} - }, - rabbit_log:info("MQTT: starting Ra server with initial members: ~p", [Nodes]), - ra:start_server(?RA_SYSTEM, Conf). - -trigger_election() -> - ra:trigger_election(server_id(), ?RA_OPERATION_TIMEOUT). - -join_peers(_NodeId, []) -> - ok; -join_peers(NodeId, Nodes) -> - join_peers(NodeId, Nodes, 100). - -join_peers(_NodeId, _Nodes, RetriesLeft) when RetriesLeft =:= 0 -> - rabbit_log:error("MQTT: exhausted all attempts while trying to rejoin cluster peers"); -join_peers(NodeId, Nodes, RetriesLeft) -> - case ra:members(Nodes, ?START_TIMEOUT) of - {ok, Members, _} -> - case lists:member(NodeId, Members) of - true -> ok; - false -> ra:add_member(Members, NodeId) - end; - {timeout, _} -> - rabbit_log:debug("MQTT: timed out contacting cluster peers, %s retries left", [RetriesLeft]), - timer:sleep(?RETRY_INTERVAL), - join_peers(NodeId, Nodes, RetriesLeft - 1); - Err -> - Err - end. - --spec leave(node()) -> 'ok' | 'timeout' | 'nodedown'. -leave(Node) -> - NodeId = server_id(), - ToLeave = server_id(Node), - try - ra:leave_and_delete_server(?RA_SYSTEM, NodeId, ToLeave) - catch - exit:{{nodedown, Node}, _} -> - nodedown - end. - -can_participate_in_clientid_tracking(Node) -> - case rpc:call(Node, mqtt_machine, module_info, []) of - {badrpc, _} -> false; - _ -> true - end. - --spec delete(Args) -> Ret when - Args :: rabbit_feature_flags:enable_callback_args(), - Ret :: rabbit_feature_flags:enable_callback_ret(). -delete(_) -> - RaNodes = all_node_ids(), - Nodes = lists:map(fun({_, N}) -> N end, RaNodes), - LockId = {?ID_NAME, node_id()}, - rabbit_log:info("Trying to acquire lock ~p on nodes ~p ...", [LockId, Nodes]), - true = global:set_lock(LockId, Nodes), - rabbit_log:info("Acquired lock ~p", [LockId]), - try whereis(?ID_NAME) of - undefined -> - rabbit_log:info("Local Ra process ~s does not exist", [?ID_NAME]), - ok; - _ -> - rabbit_log:info("Deleting Ra cluster ~s ...", [?ID_NAME]), - try ra:delete_cluster(RaNodes, 15_000) of - {ok, _Leader} -> - rabbit_log:info("Successfully deleted Ra cluster ~s", [?ID_NAME]), - ok; - {error, Reason} -> - rabbit_log:info("Failed to delete Ra cluster ~s: ~p", [?ID_NAME, Reason]), - ServerId = server_id(), - case ra:force_delete_server(?RA_SYSTEM, ServerId) of - ok -> - rabbit_log:info("Successfully force deleted Ra server ~p", [ServerId]), - ok; - Error -> - rabbit_log:error("Failed to force delete Ra server ~p: ~p", - [ServerId, Error]), - {error, Error} - end - catch exit:{{shutdown, delete}, _StackTrace} -> - rabbit_log:info("Ra cluster ~s already being deleted", [?ID_NAME]), - ok - end - after - true = global:del_lock(LockId, Nodes), - rabbit_log:info("Released lock ~p", [LockId]) - end. diff --git a/deps/rabbitmq_mqtt/src/rabbit_mqtt.erl b/deps/rabbitmq_mqtt/src/rabbit_mqtt.erl index 785ec0d334e3..c5ea59abedea 100644 --- a/deps/rabbitmq_mqtt/src/rabbit_mqtt.erl +++ b/deps/rabbitmq_mqtt/src/rabbit_mqtt.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_mqtt). @@ -26,12 +26,6 @@ start(normal, []) -> persist_static_configuration(), {ok, Listeners} = application:get_env(tcp_listeners), {ok, SslListeners} = application:get_env(ssl_listeners), - case rabbit_mqtt_ff:track_client_id_in_ra() of - true -> - ok = mqtt_node:start(); - false -> - ok - end, Result = rabbit_mqtt_sup:start_link({Listeners, SslListeners}, []), EMPid = case rabbit_event:start_link() of {ok, Pid} -> Pid; @@ -45,32 +39,19 @@ stop(_) -> -spec emit_connection_info_all([node()], rabbit_types:info_keys(), reference(), pid()) -> term(). emit_connection_info_all(Nodes, Items, Ref, AggregatorPid) -> - case rabbit_mqtt_ff:track_client_id_in_ra() of - true -> - %% Ra tracks connections cluster-wide. - AllPids = rabbit_mqtt_collector:list_pids(), - emit_connection_info(Items, Ref, AggregatorPid, AllPids), - %% Our node already emitted infos for all connections. Therefore, for the - %% remaining nodes, we send back 'finished' so that the CLI does not time out. - [AggregatorPid ! {Ref, finished} || _ <- lists:seq(1, length(Nodes) - 1)]; - false -> - Pids = [spawn_link(Node, ?MODULE, emit_connection_info_local, - [Items, Ref, AggregatorPid]) - || Node <- Nodes], - rabbit_control_misc:await_emitters_termination(Pids) - end. + Pids = [spawn_link(Node, ?MODULE, emit_connection_info_local, + [Items, Ref, AggregatorPid]) + || Node <- Nodes], + rabbit_control_misc:await_emitters_termination(Pids). -spec emit_connection_info_local(rabbit_types:info_keys(), reference(), pid()) -> ok. emit_connection_info_local(Items, Ref, AggregatorPid) -> - LocalPids = local_connection_pids(), - emit_connection_info(Items, Ref, AggregatorPid, LocalPids). - -emit_connection_info(Items, Ref, AggregatorPid, Pids) -> + LocalPids = list_local_mqtt_connections(), rabbit_control_misc:emitting_map_with_exit_handler( AggregatorPid, Ref, fun(Pid) -> rabbit_mqtt_reader:info(Pid, Items) - end, Pids). + end, LocalPids). -spec close_local_client_connections(atom()) -> {'ok', non_neg_integer()}. close_local_client_connections(Reason) -> @@ -82,16 +63,17 @@ close_local_client_connections(Reason) -> -spec local_connection_pids() -> [pid()]. local_connection_pids() -> - case rabbit_mqtt_ff:track_client_id_in_ra() of - true -> - AllPids = rabbit_mqtt_collector:list_pids(), - lists:filter(fun(Pid) -> node(Pid) =:= node() end, AllPids); - false -> - PgScope = persistent_term:get(?PG_SCOPE), - lists:flatmap(fun(Group) -> - pg:get_local_members(PgScope, Group) - end, pg:which_groups(PgScope)) - end. + PgScope = persistent_term:get(?PG_SCOPE), + lists:flatmap(fun(Group) -> + pg:get_local_members(PgScope, Group) + end, pg:which_groups(PgScope)). + +%% This function excludes Web MQTT connections +list_local_mqtt_connections() -> + PlainPids = rabbit_networking:list_local_connections_of_protocol(?MQTT_TCP_PROTOCOL), + TLSPids = rabbit_networking:list_local_connections_of_protocol(?MQTT_TLS_PROTOCOL), + PlainPids ++ TLSPids. + init_global_counters() -> lists:foreach(fun init_global_counters/1, [?MQTT_PROTO_V3, diff --git a/deps/rabbitmq_mqtt/src/rabbit_mqtt_collector.erl b/deps/rabbitmq_mqtt/src/rabbit_mqtt_collector.erl deleted file mode 100644 index 250486ce452f..000000000000 --- a/deps/rabbitmq_mqtt/src/rabbit_mqtt_collector.erl +++ /dev/null @@ -1,98 +0,0 @@ -%% This Source Code Form is subject to the terms of the Mozilla Public -%% License, v. 2.0. If a copy of the MPL was not distributed with this -%% file, You can obtain one at https://mozilla.org/MPL/2.0/. -%% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. -%% - --module(rabbit_mqtt_collector). - --include("mqtt_machine.hrl"). - --export([register/2, register/3, unregister/2, - list/0, list_pids/0, leave/1]). - -%%---------------------------------------------------------------------------- --spec register(client_id_ra(), pid()) -> {ok, reference()} | {error, term()}. -register(ClientId, Pid) -> - {ClusterName, _} = NodeId = mqtt_node:server_id(), - case ra_leaderboard:lookup_leader(ClusterName) of - undefined -> - case ra:members(NodeId) of - {ok, _, Leader} -> - register(Leader, ClientId, Pid); - _ = Error -> - Error - end; - Leader -> - register(Leader, ClientId, Pid) - end. - --spec register(ra:server_id(), client_id_ra(), pid()) -> - {ok, reference()} | {error, term()}. -register(ServerId, ClientId, Pid) -> - Corr = make_ref(), - send_ra_command(ServerId, {register, ClientId, Pid}, Corr), - erlang:send_after(5000, self(), {ra_event, undefined, register_timeout}), - {ok, Corr}. - --spec unregister(client_id_ra(), pid()) -> ok. -unregister(ClientId, Pid) -> - {ClusterName, _} = mqtt_node:server_id(), - case ra_leaderboard:lookup_leader(ClusterName) of - undefined -> - ok; - Leader -> - send_ra_command(Leader, {unregister, ClientId, Pid}, no_correlation) - end. - --spec list_pids() -> [pid()]. -list_pids() -> - list(fun(#machine_state{pids = Pids}) -> maps:keys(Pids) end). - --spec list() -> term(). -list() -> - list(fun(#machine_state{client_ids = Ids}) -> maps:to_list(Ids) end). - -list(QF) -> - {ClusterName, _} = mqtt_node:server_id(), - case ra_leaderboard:lookup_leader(ClusterName) of - undefined -> - NodeIds = mqtt_node:all_node_ids(), - case ra:leader_query(NodeIds, QF) of - {ok, {_, Result}, _} -> Result; - {timeout, _} -> - rabbit_log:debug("~ts:list/1 leader query timed out", - [?MODULE]), - [] - end; - Leader -> - case ra:leader_query(Leader, QF) of - {ok, {_, Result}, _} -> Result; - {error, _} -> - []; - {timeout, _} -> - rabbit_log:debug("~ts:list/1 leader query timed out", - [?MODULE]), - [] - end - end. - --spec leave(binary()) -> ok | timeout | nodedown. -leave(NodeBin) -> - Node = binary_to_atom(NodeBin, utf8), - ServerId = mqtt_node:server_id(), - run_ra_command(ServerId, {leave, Node}), - mqtt_node:leave(Node). - -%%---------------------------------------------------------------------------- --spec run_ra_command(term(), term()) -> term() | {error, term()}. -run_ra_command(ServerId, RaCommand) -> - case ra:process_command(ServerId, RaCommand) of - {ok, Result, _} -> Result; - _ = Error -> Error - end. - --spec send_ra_command(term(), term(), term()) -> ok. -send_ra_command(ServerId, RaCommand, Correlation) -> - ok = ra:pipeline_command(ServerId, RaCommand, Correlation, normal). diff --git a/deps/rabbitmq_mqtt/src/rabbit_mqtt_confirms.erl b/deps/rabbitmq_mqtt/src/rabbit_mqtt_confirms.erl index 3090fe7f6879..cbcf6159d079 100644 --- a/deps/rabbitmq_mqtt/src/rabbit_mqtt_confirms.erl +++ b/deps/rabbitmq_mqtt/src/rabbit_mqtt_confirms.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2020-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_mqtt_confirms). @@ -18,12 +18,8 @@ size/1, contains/2]). -%% As done in OTP's sets module: -%% Empty list is cheaper to serialize than atom. --define(VALUE, []). - -type queue_name() :: rabbit_amqqueue:name(). --opaque state() :: #{packet_id() => #{queue_name() => ?VALUE}}. +-opaque state() :: #{packet_id() => #{queue_name() => ok}}. -export_type([state/0]). -spec init() -> state(). @@ -43,7 +39,7 @@ insert(PktId, QNames, State) when is_integer(PktId) andalso PktId > 0 andalso not is_map_key(PktId, State) -> - QMap = maps:from_keys(QNames, ?VALUE), + QMap = maps:from_keys(QNames, ok), maps:put(PktId, QMap, State). -spec confirm([packet_id()], queue_name(), state()) -> diff --git a/deps/rabbitmq_mqtt/src/rabbit_mqtt_ff.erl b/deps/rabbitmq_mqtt/src/rabbit_mqtt_ff.erl index d1307c72127c..3b35c794af39 100644 --- a/deps/rabbitmq_mqtt/src/rabbit_mqtt_ff.erl +++ b/deps/rabbitmq_mqtt/src/rabbit_mqtt_ff.erl @@ -2,15 +2,13 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2018-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_mqtt_ff). -include("rabbit_mqtt.hrl"). --export([track_client_id_in_ra/0]). - %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% %% Feature flags introduced in 3.12.0 %% %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% @@ -18,14 +16,13 @@ -rabbit_feature_flag( {?QUEUE_TYPE_QOS_0, #{desc => "Support pseudo queue type for MQTT QoS 0 subscribers omitting a queue process", - stability => stable + stability => required }}). -rabbit_feature_flag( {delete_ra_cluster_mqtt_node, #{desc => "Delete Ra cluster 'mqtt_node' since MQTT client IDs are tracked locally", - stability => stable, - callbacks => #{enable => {mqtt_node, delete}} + stability => required }}). %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% @@ -40,14 +37,10 @@ -rabbit_feature_flag( {mqtt_v5, #{desc => "Support MQTT 5.0", - stability => stable, + stability => required, depends_on => [ %% MQTT 5.0 feature Will Delay Interval depends on client ID tracking in pg local. delete_ra_cluster_mqtt_node, message_containers ] }}). - --spec track_client_id_in_ra() -> boolean(). -track_client_id_in_ra() -> - rabbit_feature_flags:is_disabled(delete_ra_cluster_mqtt_node). diff --git a/deps/rabbitmq_mqtt/src/rabbit_mqtt_internal_event_handler.erl b/deps/rabbitmq_mqtt/src/rabbit_mqtt_internal_event_handler.erl index 17f7cc2f39b8..a394693b26b1 100644 --- a/deps/rabbitmq_mqtt/src/rabbit_mqtt_internal_event_handler.erl +++ b/deps/rabbitmq_mqtt/src/rabbit_mqtt_internal_event_handler.erl @@ -2,14 +2,14 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_mqtt_internal_event_handler). -behaviour(gen_event). --export([init/1, handle_event/2, handle_call/2]). +-export([init/1, handle_event/2, handle_call/2, handle_info/2]). -import(rabbit_misc, [pget/2]). @@ -35,3 +35,6 @@ handle_event(_Event, ?STATE) -> handle_call(_Request, ?STATE) -> {ok, ok, ?STATE}. + +handle_info(_Info, State) -> + {ok, State}. diff --git a/deps/rabbitmq_mqtt/src/rabbit_mqtt_packet.erl b/deps/rabbitmq_mqtt/src/rabbit_mqtt_packet.erl index b4afbd4dd354..36650a6b358a 100644 --- a/deps/rabbitmq_mqtt/src/rabbit_mqtt_packet.erl +++ b/deps/rabbitmq_mqtt/src/rabbit_mqtt_packet.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_mqtt_packet). diff --git a/deps/rabbitmq_mqtt/src/rabbit_mqtt_processor.erl b/deps/rabbitmq_mqtt/src/rabbit_mqtt_processor.erl index 5933bc9ee9e0..f9983f47c0df 100644 --- a/deps/rabbitmq_mqtt/src/rabbit_mqtt_processor.erl +++ b/deps/rabbitmq_mqtt/src/rabbit_mqtt_processor.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_mqtt_processor). @@ -10,7 +10,7 @@ -export([info/2, init/4, process_packet/2, terminate/3, handle_pre_hibernate/0, - handle_ra_event/2, handle_down/2, handle_queue_event/2, + handle_down/2, handle_queue_event/2, proto_version_tuple/1, throttle/2, format_status/1, remove_duplicate_client_id_connections/2, remove_duplicate_client_id_connections/3, @@ -33,6 +33,7 @@ -include_lib("kernel/include/logger.hrl"). -include_lib("rabbit_common/include/rabbit.hrl"). -include_lib("rabbit/include/amqqueue.hrl"). +-include_lib("rabbit/include/mc.hrl"). -include("rabbit_mqtt.hrl"). -include("rabbit_mqtt_packet.hrl"). @@ -41,10 +42,16 @@ -define(QUEUE_TTL_KEY, <<"x-expires">>). -define(DEFAULT_EXCHANGE_NAME, <<>>). +-ifdef(TEST). +-define(SILENT_CLOSE_DELAY, 10). +-else. +-define(SILENT_CLOSE_DELAY, 3_000). +-endif. + -type send_fun() :: fun((iodata()) -> ok). -type session_expiry_interval() :: non_neg_integer() | infinity. -type subscriptions() :: #{topic_filter() => #mqtt_subscription_opts{}}. --type topic_aliases() :: {Inbound :: #{topic() => pos_integer()}, +-type topic_aliases() :: {Inbound :: #{pos_integer() => topic()}, Outbound :: #{topic() => pos_integer()}}. -record(auth_state, @@ -65,7 +72,6 @@ published = false :: boolean(), ssl_login_name :: none | binary(), retainer_pid :: pid(), - delivery_flow :: flow | noflow, trace_state :: rabbit_trace:state(), prefetch :: non_neg_integer(), vhost :: rabbit_types:vhost(), @@ -100,7 +106,6 @@ %% [v5 4.8.1] subscriptions = #{} :: subscriptions(), auth_state = #auth_state{}, - ra_register_state :: option(registered | {pending, reference()}), %% quorum queues and streams whose soft limit has been exceeded queues_soft_limit_exceeded = sets:new([{version, 2}]) :: sets:set(), qos0_messages_dropped = 0 :: non_neg_integer(), @@ -143,10 +148,6 @@ process_connect( "protocol version: ~p, keepalive: ~p, property names: ~p", [ClientId0, Username0, CleanStart, ProtoVer, KeepaliveSecs, maps:keys(ConnectProps)]), SslLoginName = ssl_login_name(Socket), - Flow = case rabbit_misc:get_env(rabbit, mirroring_flow_control, true) of - true -> flow; - false -> noflow - end, MaxPacketSize = maps:get('Maximum-Packet-Size', ConnectProps, ?MAX_PACKET_SIZE), TopicAliasMax = persistent_term:get(?PERSISTENT_TERM_TOPIC_ALIAS_MAXIMUM), TopicAliasMaxOutbound = min(maps:get('Topic-Alias-Maximum', ConnectProps, 0), TopicAliasMax), @@ -180,10 +181,9 @@ process_connect( end, Result0 = maybe - ok ?= check_protocol_version(ProtoVer), ok ?= check_extended_auth(ConnectProps), {ok, ClientId} ?= ensure_client_id(ClientId0, CleanStart, ProtoVer), - {ok, {Username1, Password}} ?= check_credentials(Username0, Password0, SslLoginName, PeerIp), + {ok, Username1, Password} ?= check_credentials(Username0, Password0, SslLoginName, PeerIp), {VHostPickedUsing, {VHost, Username2}} = get_vhost(Username1, SslLoginName, Port), ?LOG_DEBUG("MQTT connection ~s picked vhost using ~s", [ConnName0, VHostPickedUsing]), @@ -195,8 +195,9 @@ process_connect( ok ?= check_user_connection_limit(Username), {ok, AuthzCtx} ?= check_vhost_access(VHost, User, ClientId, PeerIp), ok ?= check_user_loopback(Username, PeerIp), + ok ?= ensure_credential_expiry_timer(User, PeerIp), rabbit_core_metrics:auth_attempt_succeeded(PeerIp, Username, mqtt), - {ok, RaRegisterState} ?= register_client_id(VHost, ClientId, CleanStart, WillProps), + ok = register_client_id(VHost, ClientId, CleanStart, WillProps), {ok, WillMsg} ?= make_will_msg(Packet), {TraceState, ConnName} = init_trace(VHost, ConnName0), ok = rabbit_mqtt_keepalive:start(KeepaliveSecs, Socket), @@ -207,7 +208,6 @@ process_connect( clean_start = CleanStart, session_expiry_interval_secs = SessionExpiry, ssl_login_name = SslLoginName, - delivery_flow = Flow, trace_state = TraceState, prefetch = prefetch(ConnectProps), conn_name = ConnName, @@ -226,8 +226,7 @@ process_connect( topic_alias_maximum_outbound = TopicAliasMaxOutbound}, auth_state = #auth_state{ user = User, - authz_ctx = AuthzCtx}, - ra_register_state = RaRegisterState}, + authz_ctx = AuthzCtx}}, ok ?= clear_will_msg(S), {ok, S} end, @@ -322,7 +321,6 @@ process_connect(State0) -> {ok, SessPresent, State} else {error, _} = Error -> - unregister_client(State0), Error end. @@ -618,18 +616,6 @@ update_session_expiry_interval(QName, Expiry) -> ok = rabbit_queue_type:policy_changed(Q) % respects queue args end. -check_protocol_version(V) - when V =:= 3 orelse V =:= 4 -> - ok; -check_protocol_version(5) -> - case rabbit_feature_flags:is_enabled(mqtt_v5) of - true -> - ok; - false -> - ?LOG_ERROR("Rejecting MQTT 5.0 connection because feature flag mqtt_v5 is disabled"), - {error, ?RC_UNSUPPORTED_PROTOCOL_VERSION} - end. - check_extended_auth(#{'Authentication-Method' := Method}) -> %% In future, we could support SASL via rabbit_auth_mechanism %% as done by rabbit_reader and rabbit_stream_reader. @@ -640,20 +626,20 @@ check_extended_auth(_) -> check_credentials(Username, Password, SslLoginName, PeerIp) -> case creds(Username, Password, SslLoginName) of + {ok, _, _} = Ok -> + Ok; nocreds -> - auth_attempt_failed(PeerIp, <<>>), ?LOG_ERROR("MQTT login failed: no credentials provided"), + auth_attempt_failed(PeerIp, <<>>), {error, ?RC_BAD_USER_NAME_OR_PASSWORD}; {invalid_creds, {undefined, Pass}} when is_binary(Pass) -> - auth_attempt_failed(PeerIp, <<>>), ?LOG_ERROR("MQTT login failed: no username is provided"), + auth_attempt_failed(PeerIp, <<>>), {error, ?RC_BAD_USER_NAME_OR_PASSWORD}; {invalid_creds, {User, _Pass}} when is_binary(User) -> - auth_attempt_failed(PeerIp, User), ?LOG_ERROR("MQTT login failed for user '~s': no password provided", [User]), - {error, ?RC_BAD_USER_NAME_OR_PASSWORD}; - {UserBin, PassBin} -> - {ok, {UserBin, PassBin}} + auth_attempt_failed(PeerIp, User), + {error, ?RC_BAD_USER_NAME_OR_PASSWORD} end. -spec ensure_client_id(client_id(), boolean(), protocol_version()) -> @@ -670,47 +656,29 @@ ensure_client_id(ClientId, _, _) when is_binary(ClientId) -> {ok, ClientId}. --spec register_client_id(rabbit_types:vhost(), client_id(), boolean(), properties()) -> - {ok, RaRegisterState :: undefined | {pending, reference()}} | - {error, ConnectErrorCode :: pos_integer()}. +-spec register_client_id(rabbit_types:vhost(), client_id(), boolean(), properties()) -> ok. register_client_id(VHost, ClientId, CleanStart, WillProps) when is_binary(VHost), is_binary(ClientId) -> - %% Always register client ID in pg. PgGroup = {VHost, ClientId}, ok = pg:join(persistent_term:get(?PG_SCOPE), PgGroup, self()), - - case rabbit_mqtt_ff:track_client_id_in_ra() of - true -> - case collector_register(ClientId) of - {ok, Corr} -> - %% Ra node takes care of removing duplicate client ID connections. - {ok, {pending, Corr}}; - {error, _} = Err -> - %% e.g. this node was removed from the MQTT cluster members - ?LOG_ERROR("MQTT connection failed to register client ID ~s in vhost ~s in Ra: ~p", - [ClientId, VHost, Err]), - {error, ?RC_IMPLEMENTATION_SPECIFIC_ERROR} - end; - false -> - %% "If a Network Connection uses a Client Identifier of an existing Network Connection to - %% the Server, the Will Message for the exiting connection is sent unless the new - %% connection specifies Clean Start of 0 and the Will Delay is greater than zero." - %% [v5 3.1.3.2.2] - Args = case {CleanStart, WillProps} of - {false, #{'Will-Delay-Interval' := I}} when I > 0 -> - [PgGroup, self(), false]; - _ -> - [PgGroup, self()] - end, - ok = erpc:multicast([node() | nodes()], - ?MODULE, - remove_duplicate_client_id_connections, - Args), - {ok, undefined} - end. - -%% Once feature flag mqtt_v5 becomes required, the caller should always pass SendWill to this -%% function (remove_duplicate_client_id_connections/2) so that we can delete this function. + %% "If a Network Connection uses a Client Identifier of an existing Network Connection to + %% the Server, the Will Message for the exiting connection is sent unless the new + %% connection specifies Clean Start of 0 and the Will Delay is greater than zero." + %% [v5 3.1.3.2.2] + SendWill = case {CleanStart, WillProps} of + {false, #{'Will-Delay-Interval' := I}} when I > 0 -> + false; + _ -> + true + end, + ok = erpc:multicast([node() | nodes()], + ?MODULE, + remove_duplicate_client_id_connections, + [PgGroup, self(), SendWill]). + +%% remove_duplicate_client_id_connections/2 is only called from 3.13 nodes. +%% Hence, this function can be deleted when mixed version clusters between +%% this version and 3.13 are disallowed. -spec remove_duplicate_client_id_connections( {rabbit_types:vhost(), client_id()}, pid()) -> ok. remove_duplicate_client_id_connections(PgGroup, PidToKeep) -> @@ -1036,8 +1004,8 @@ check_vhost_exists(VHost, Username, PeerIp) -> true -> ok; false -> - auth_attempt_failed(PeerIp, Username), ?LOG_ERROR("MQTT connection failed: virtual host '~s' does not exist", [VHost]), + auth_attempt_failed(PeerIp, Username), {error, ?RC_BAD_USER_NAME_OR_PASSWORD} end. @@ -1053,7 +1021,7 @@ check_vhost_connection_limit(VHost) -> check_vhost_alive(VHost) -> case rabbit_vhost_sup_sup:is_vhost_alive(VHost) of - true -> + true -> ok; false -> ?LOG_ERROR("MQTT connection failed: vhost '~s' is down", [VHost]), @@ -1076,10 +1044,10 @@ check_user_login(VHost, Username, Password, ClientId, PeerIp, ConnName) -> notify_auth_result(user_authentication_success, Username1, ConnName), {ok, User}; {refused, Username, Msg, Args} -> - auth_attempt_failed(PeerIp, Username), ?LOG_ERROR("MQTT connection failed: access refused for user '~s':" ++ Msg, [Username | Args]), notify_auth_result(user_authentication_failure, Username, ConnName), + auth_attempt_failed(PeerIp, Username), {error, ?RC_BAD_USER_NAME_OR_PASSWORD} end. @@ -1108,9 +1076,9 @@ check_vhost_access(VHost, User = #user{username = Username}, ClientId, PeerIp) - ok -> {ok, AuthzCtx} catch exit:#amqp_error{name = not_allowed} -> - auth_attempt_failed(PeerIp, Username), ?LOG_ERROR("MQTT connection failed: access refused for user '~s' to vhost '~s'", [Username, VHost]), + auth_attempt_failed(PeerIp, Username), {error, ?RC_NOT_AUTHORIZED} end. @@ -1119,12 +1087,33 @@ check_user_loopback(Username, PeerIp) -> ok -> ok; not_allowed -> + ?LOG_WARNING("MQTT login failed: user '~s' can only connect via localhost", + [Username]), auth_attempt_failed(PeerIp, Username), - ?LOG_WARNING( - "MQTT login failed: user '~s' can only connect via localhost", [Username]), {error, ?RC_NOT_AUTHORIZED} end. + +ensure_credential_expiry_timer(User = #user{username = Username}, PeerIp) -> + case rabbit_access_control:expiry_timestamp(User) of + never -> + ok; + Ts when is_integer(Ts) -> + Time = (Ts - os:system_time(second)) * 1000, + ?LOG_DEBUG("Credential expires in ~b ms frow now " + "(absolute timestamp = ~b seconds since epoch)", + [Time, Ts]), + case Time > 0 of + true -> + _TimerRef = erlang:send_after(Time, self(), credential_expired), + ok; + false -> + ?LOG_WARNING("Credential expired ~b ms ago", [abs(Time)]), + auth_attempt_failed(PeerIp, Username), + {error, ?RC_NOT_AUTHORIZED} + end + end. + get_vhost(UserBin, none, Port) -> get_vhost_no_ssl(UserBin, Port); get_vhost(UserBin, SslLogin, Port) -> @@ -1212,34 +1201,43 @@ get_vhost_from_port_mapping(Port, Mapping) -> Res. creds(User, Pass, SSLLoginName) -> - DefaultUser = rabbit_mqtt_util:env(default_user), - DefaultPass = rabbit_mqtt_util:env(default_pass), - {ok, Anon} = application:get_env(?APP_NAME, allow_anonymous), - {ok, TLSAuth} = application:get_env(?APP_NAME, ssl_cert_login), - HaveDefaultCreds = Anon =:= true andalso - is_binary(DefaultUser) andalso - is_binary(DefaultPass), - CredentialsProvided = User =/= undefined orelse Pass =/= undefined, - CorrectCredentials = is_binary(User) andalso is_binary(Pass) andalso Pass =/= <<>>, + ValidCredentials = is_binary(User) andalso is_binary(Pass) andalso Pass =/= <<>>, + {ok, TLSAuth} = application:get_env(?APP_NAME, ssl_cert_login), SSLLoginProvided = TLSAuth =:= true andalso SSLLoginName =/= none, - case {CredentialsProvided, CorrectCredentials, SSLLoginProvided, HaveDefaultCreds} of - %% Username and password take priority - {true, true, _, _} -> {User, Pass}; - %% Either username or password is provided - {true, false, _, _} -> {invalid_creds, {User, Pass}}; - %% rabbitmq_mqtt.ssl_cert_login is true. SSL user name provided. - %% Authenticating using username only. - {false, false, true, _} -> {SSLLoginName, none}; - %% Anonymous connection uses default credentials - {false, false, false, true} -> {DefaultUser, DefaultPass}; - _ -> nocreds + case {CredentialsProvided, ValidCredentials, SSLLoginProvided} of + {true, true, _} -> + %% Username and password take priority + {ok, User, Pass}; + {true, false, _} -> + %% Either username or password is provided + {invalid_creds, {User, Pass}}; + {false, false, true} -> + %% rabbitmq_mqtt.ssl_cert_login is true. SSL user name provided. + %% Authenticating using username only. + {ok, SSLLoginName, none}; + {false, false, false} -> + {ok, AllowAnon} = application:get_env(?APP_NAME, allow_anonymous), + case AllowAnon of + true -> + case rabbit_auth_mechanism_anonymous:credentials() of + {ok, _, _} = Ok -> + Ok; + error -> + nocreds + end; + false -> + nocreds + end; + _ -> + nocreds end. -spec auth_attempt_failed(inet:ip_address(), binary()) -> ok. auth_attempt_failed(PeerIp, Username) -> - rabbit_core_metrics:auth_attempt_failed(PeerIp, Username, mqtt). + rabbit_core_metrics:auth_attempt_failed(PeerIp, Username, mqtt), + timer:sleep(?SILENT_CLOSE_DELAY). maybe_downgrade_qos(?QOS_0) -> ?QOS_0; maybe_downgrade_qos(?QOS_1) -> ?QOS_1; @@ -1353,32 +1351,27 @@ create_queue(QNamePart, QOwner, QArgs, QType, Err0 -> Err0 end end, - case rabbit_vhost_limit:is_over_queue_limit(VHost) of - false -> - rabbit_core_metrics:queue_declared(QName), - Q0 = amqqueue:new(QName, - none, - _Durable = true, - _AutoDelete = false, - QOwner, - QArgs, - VHost, - #{user => Username}, - QType), - case rabbit_queue_type:declare(Q0, node()) of - {new, Q} when ?is_amqqueue(Q) -> - rabbit_core_metrics:queue_created(QName), - {ok, Q}; - Other -> - ?LOG_ERROR("Failed to declare ~s: ~p", - [rabbit_misc:rs(QName), Other]), - {error, queue_declare} - end; - {true, Limit} -> - ?LOG_ERROR("cannot declare ~s because " - "queue limit ~p in vhost '~s' is reached", - [rabbit_misc:rs(QName), Limit, VHost]), - {error, queue_limit_exceeded} + rabbit_core_metrics:queue_declared(QName), + Q0 = amqqueue:new(QName, + none, + _Durable = true, + _AutoDelete = false, + QOwner, + QArgs, + VHost, + #{user => Username}, + QType), + case rabbit_queue_type:declare(Q0, node()) of + {new, Q} when ?is_amqqueue(Q) -> + rabbit_core_metrics:queue_created(QName), + {ok, Q}; + {error, queue_limit_exceeded, Reason, ReasonArgs} -> + ?LOG_ERROR(Reason, ReasonArgs), + {error, queue_limit_exceeded}; + Other -> + ?LOG_ERROR("Failed to declare ~s: ~p", + [rabbit_misc:rs(QName), Other]), + {error, queue_declare} end else {error, access_refused} = Err -> @@ -1413,13 +1406,8 @@ queue_ttl_args(SessionExpirySecs) when is_integer(SessionExpirySecs) andalso SessionExpirySecs > 0 -> [{?QUEUE_TTL_KEY, long, timer:seconds(SessionExpirySecs)}]. -queue_type(?QOS_0, 0, QArgs) -> - case rabbit_queue_type:is_enabled(?QUEUE_TYPE_QOS_0) of - true -> - ?QUEUE_TYPE_QOS_0; - false -> - rabbit_amqqueue:get_queue_type(QArgs) - end; +queue_type(?QOS_0, 0, _QArgs) -> + ?QUEUE_TYPE_QOS_0; queue_type(_, _, QArgs) -> rabbit_amqqueue:get_queue_type(QArgs). @@ -1444,7 +1432,7 @@ consume(Q, QoS, #state{ channel_pid => self(), limiter_pid => none, limiter_active => false, - prefetch_count => Prefetch, + mode => {simple_prefetch, Prefetch}, consumer_tag => ?CONSUMER_TAG, exclusive_consume => false, args => [], @@ -1545,21 +1533,19 @@ publish_to_queues( #mqtt_msg{topic = Topic, packet_id = PacketId} = MqttMsg, #state{cfg = #cfg{exchange = ExchangeName = #resource{name = ExchangeNameBin}, - delivery_flow = Flow, conn_name = ConnName, trace_state = TraceState}, auth_state = #auth_state{user = #user{username = Username}}} = State) -> - - Anns = #{exchange => ExchangeNameBin, - routing_keys => [mqtt_to_amqp(Topic)]}, - Msg0 = mc:init(mc_mqtt, MqttMsg, Anns), + Anns = #{?ANN_EXCHANGE => ExchangeNameBin, + ?ANN_ROUTING_KEYS => [mqtt_to_amqp(Topic)]}, + Msg0 = mc:init(mc_mqtt, MqttMsg, Anns, mc_env()), Msg = rabbit_message_interceptor:intercept(Msg0), case rabbit_exchange:lookup(ExchangeName) of {ok, Exchange} -> QNames0 = rabbit_exchange:route(Exchange, Msg, #{return_binding_keys => true}), QNames = drop_local(QNames0, State), rabbit_trace:tap_in(Msg, QNames, ConnName, Username, TraceState), - Opts = maps_put_truthy(flow, Flow, maps_put_truthy(correlation, PacketId, #{})), + Opts = maps_put_truthy(correlation, PacketId, #{}), deliver_to_queues(Msg, Opts, QNames, State); {error, not_found} -> ?LOG_ERROR("~s not found", [rabbit_misc:rs(ExchangeName)]), @@ -1581,7 +1567,8 @@ drop_local(QNames, #state{subscriptions = Subs, "qos", _:1/binary >>}, #{binding_keys := BindingKeys}}) when Vhost0 =:= Vhost andalso - ClientId0 =:= ClientId -> + ClientId0 =:= ClientId andalso + map_size(BindingKeys) > 0 -> rabbit_misc:maps_any( fun(BKey, true) -> TopicFilter = amqp_to_mqtt(BKey), @@ -1599,14 +1586,13 @@ drop_local(QNames, #state{subscriptions = Subs, drop_local(QNames, _) -> QNames. -deliver_to_queues(Message0, +deliver_to_queues(Message, Options, RoutedToQNames, State0 = #state{queue_states = QStates0, cfg = #cfg{proto_ver = ProtoVer}}) -> Qs0 = rabbit_amqqueue:lookup_many(RoutedToQNames), Qs = rabbit_amqqueue:prepend_extra_bcc(Qs0), - Message = compat(Message0, State0), case rabbit_queue_type:deliver(Qs, Message, Options, QStates0) of {ok, QStates, Actions} -> rabbit_global_counters:messages_routed(ProtoVer, length(Qs)), @@ -1652,12 +1638,17 @@ process_routing_confirm(#{}, _, State) -> -spec send_puback(packet_id() | list(packet_id()), reason_code(), state()) -> ok. send_puback(PktIds0, ReasonCode, State) when is_list(PktIds0) -> - %% Classic queues confirm messages unordered. - %% Let's sort them here assuming most MQTT clients send with an increasing packet identifier. - PktIds = lists:usort(PktIds0), - lists:foreach(fun(Id) -> - send_puback(Id, ReasonCode, State) - end, PktIds); + case rabbit_node_monitor:pause_partition_guard() of + ok -> + %% Classic queues confirm messages unordered. + %% Let's sort them here assuming most MQTT clients send with an increasing packet identifier. + PktIds = lists:usort(PktIds0), + lists:foreach(fun(Id) -> + send_puback(Id, ReasonCode, State) + end, PktIds); + pausing -> + ok + end; send_puback(PktId, ReasonCode, State = #state{cfg = #cfg{proto_ver = ProtoVer}}) -> rabbit_global_counters:messages_confirmed(ProtoVer, 1), Packet = #mqtt_packet{fixed = #mqtt_packet_fixed{type = ?PUBACK}, @@ -1705,11 +1696,11 @@ send_disconnect(_, _) -> ok. -spec terminate(boolean(), rabbit_event:event_props(), state()) -> ok. -terminate(SendWill, Infos, State) -> +terminate(SendWill, Infos, State = #state{queue_states = QStates}) -> + rabbit_queue_type:close(QStates), rabbit_core_metrics:connection_closed(self()), rabbit_event:notify(connection_closed, Infos), rabbit_networking:unregister_non_amqp_connection(self()), - unregister_client(State), maybe_decrement_consumer(State), maybe_decrement_publisher(State), _ = maybe_delete_mqtt_qos0_queue(State), @@ -1762,20 +1753,20 @@ maybe_send_will( kind = exchange, name = ?DEFAULT_EXCHANGE_NAME}, #resource{name = QNameBin} = amqqueue:get_name(Q), - Anns0 = #{exchange => ?DEFAULT_EXCHANGE_NAME, - routing_keys => [QNameBin], + Anns0 = #{?ANN_EXCHANGE => ?DEFAULT_EXCHANGE_NAME, + ?ANN_ROUTING_KEYS => [QNameBin], ttl => Ttl, %% Persist message regardless of Will QoS since there is no noticable %% performance benefit if that single message is transient. This ensures that %% delayed Will Messages are not lost after a broker restart. - durable => true}, + ?ANN_DURABLE => true}, Anns = case Props of #{'Message-Expiry-Interval' := MEI} -> Anns0#{dead_letter_ttl => timer:seconds(MEI)}; _ -> Anns0 end, - Msg = mc:init(mc_mqtt, MqttMsg, Anns), + Msg = mc:init(mc_mqtt, MqttMsg, Anns, mc_env()), case check_publish_permitted(DefaultX, Topic, State) of ok -> ok = rabbit_queue_type:publish_at_most_once(DefaultX, Msg), @@ -1805,15 +1796,6 @@ log_delayed_will_failure(Topic, ClientId, Reason) -> ?LOG_DEBUG("failed to schedule delayed Will Message to topic ~s for MQTT client ID ~s: ~p", [Topic, ClientId, Reason]). -unregister_client(#state{cfg = #cfg{client_id = ClientIdBin}}) -> - case rabbit_mqtt_ff:track_client_id_in_ra() of - true -> - ClientId = rabbit_data_coercion:to_list(ClientIdBin), - rabbit_mqtt_collector:unregister(ClientId, self()); - false -> - ok - end. - maybe_delete_mqtt_qos0_queue( State = #state{cfg = #cfg{clean_start = true}, auth_state = #auth_state{user = #user{username = Username}}}) -> @@ -1840,30 +1822,32 @@ delete_queue(QName, user = User = #user{username = Username}, authz_ctx = AuthzCtx}}) -> %% configure access to queue required for queue.delete - case check_resource_access(User, QName, configure, AuthzCtx) of - ok -> - case rabbit_amqqueue:with( - QName, - fun (Q) -> - rabbit_queue_type:delete(Q, false, false, Username) - end, - fun (not_found) -> - ok; - ({absent, Q, crashed}) -> - rabbit_classic_queue:delete_crashed(Q, Username); - ({absent, Q, stopped}) -> - rabbit_classic_queue:delete_crashed(Q, Username); - ({absent, _Q, _Reason}) -> - ok - end) of - {ok, _N} -> - ok; - ok -> - ok - end; - {error, access_refused} = E -> - E - end. + %% We only check access if the queue actually exists. + rabbit_amqqueue:with( + QName, + fun (Q) -> + case check_resource_access(User, QName, configure, AuthzCtx) of + ok -> + {ok, _N} = rabbit_queue_type:delete(Q, false, false, Username), + ok; + Err -> + Err + end + end, + fun (not_found) -> + ok; + ({absent, Q, State}) + when State =:= crashed orelse + State =:= stopped -> + case check_resource_access(User, QName, configure, AuthzCtx) of + ok -> + rabbit_classic_queue:delete_crashed(Q, Username); + Err -> + Err + end; + ({absent, _Q, _State}) -> + ok + end). -spec handle_pre_hibernate() -> ok. handle_pre_hibernate() -> @@ -1871,41 +1855,6 @@ handle_pre_hibernate() -> erase(topic_permission_cache), ok. --spec handle_ra_event(register_timeout -| {applied, [{reference(), ok}]} -| {not_leader, term(), reference()}, state()) -> state(). -handle_ra_event({applied, [{Corr, ok}]}, - State = #state{ra_register_state = {pending, Corr}}) -> - %% success case - command was applied transition into registered state - State#state{ra_register_state = registered}; -handle_ra_event({not_leader, Leader, Corr}, - State = #state{ra_register_state = {pending, Corr}, - cfg = #cfg{client_id = ClientIdBin}}) -> - case rabbit_mqtt_ff:track_client_id_in_ra() of - true -> - ClientId = rabbit_data_coercion:to_list(ClientIdBin), - %% retry command against actual leader - {ok, NewCorr} = rabbit_mqtt_collector:register(Leader, ClientId, self()), - State#state{ra_register_state = {pending, NewCorr}}; - false -> - State - end; -handle_ra_event(register_timeout, - State = #state{ra_register_state = {pending, _Corr}, - cfg = #cfg{client_id = ClientId}}) -> - case rabbit_mqtt_ff:track_client_id_in_ra() of - true -> - {ok, NewCorr} = collector_register(ClientId), - State#state{ra_register_state = {pending, NewCorr}}; - false -> - State - end; -handle_ra_event(register_timeout, State) -> - State; -handle_ra_event(Evt, State) -> - ?LOG_DEBUG("unhandled ra_event: ~w ", [Evt]), - State. - -spec handle_down(term(), state()) -> {ok, state()} | {error, Reason :: any()}. handle_down({{'DOWN', QName}, _MRef, process, QPid, Reason}, @@ -1995,25 +1944,14 @@ handle_queue_actions(Actions, #state{} = State0) -> handle_queue_down(QName, State0 = #state{cfg = #cfg{client_id = ClientId}}) -> %% Classic queue is down. - case rabbit_amqqueue:lookup(QName) of - {ok, Q} -> - case rabbit_mqtt_util:qos_from_queue_name(QName, ClientId) of - no_consuming_queue -> - State0; - QoS -> - %% Consuming classic queue is down. - %% Let's try to re-consume: HA failover for classic mirrored queues. - case consume(Q, QoS, State0) of - {ok, State} -> - State; - {error, _Reason} -> - ?LOG_INFO("Terminating MQTT connection because consuming ~s is down.", - [rabbit_misc:rs(QName)]), - throw(consuming_queue_down) - end - end; - {error, not_found} -> - State0 + case rabbit_mqtt_util:qos_from_queue_name(QName, ClientId) of + no_consuming_queue -> + State0; + _QoS -> + %% Consuming classic queue is down. + ?LOG_INFO("Terminating MQTT connection because consuming ~s is down.", + [rabbit_misc:rs(QName)]), + throw(consuming_queue_down) end. deliver_to_client(Msgs, Ack, State) -> @@ -2027,7 +1965,7 @@ deliver_one_to_client({QNameOrType, QPid, QMsgId, _Redelivered, Mc} = Delivery, true -> ?QOS_1; false -> ?QOS_0 end, - McMqtt = mc:convert(mc_mqtt, Mc), + McMqtt = mc:convert(mc_mqtt, Mc, mc_env()), MqttMsg = #mqtt_msg{qos = PublisherQos} = mc:protocol_state(McMqtt), QoS = effective_qos(PublisherQos, SubscriberQoS), {SettleOp, State1} = maybe_publish_to_client(MqttMsg, Delivery, QoS, State0), @@ -2456,10 +2394,6 @@ message_redelivered(true, ProtoVer, QType) -> message_redelivered(_, _, _) -> ok. -collector_register(ClientIdBin) -> - ClientId = rabbit_data_coercion:to_list(ClientIdBin), - rabbit_mqtt_collector:register(ClientId, self()). - %% "Reason Codes less than 0x80 indicate successful completion of an operation. %% Reason Code values of 0x80 or greater indicate failure." -spec is_success(reason_code()) -> boolean(). @@ -2474,7 +2408,6 @@ format_status( packet_id = PackID, subscriptions = Subscriptions, auth_state = AuthState, - ra_register_state = RaRegisterState, queues_soft_limit_exceeded = QSLE, qos0_messages_dropped = Qos0MsgsDropped, cfg = #cfg{ @@ -2488,7 +2421,6 @@ format_status( published = Published, ssl_login_name = SSLLoginName, retainer_pid = RetainerPid, - delivery_flow = DeliveryFlow, trace_state = TraceState, prefetch = Prefetch, client_id = ClientID, @@ -2510,7 +2442,6 @@ format_status( ssl_login_name => SSLLoginName, retainer_pid => RetainerPid, - delivery_flow => DeliveryFlow, trace_state => TraceState, prefetch => Prefetch, client_id => ClientID, @@ -2527,20 +2458,13 @@ format_status( packet_id => PackID, subscriptions => Subscriptions, auth_state => AuthState, - ra_register_state => RaRegisterState, queues_soft_limit_exceeded => QSLE, qos0_messages_dropped => Qos0MsgsDropped}. --spec compat(mc:state(), state()) -> mc:state(). -compat(McMqtt, #state{cfg = #cfg{exchange = XName}}) -> - case rabbit_feature_flags:is_enabled(message_containers) of - true -> - McMqtt; - false = FFState -> - #mqtt_msg{qos = Qos} = mc:protocol_state(McMqtt), - [RoutingKey] = mc:get_annotation(routing_keys, McMqtt), - McLegacy = mc:convert(mc_amqpl, McMqtt), - Content = mc:protocol_state(McLegacy), - BasicMsg = mc_amqpl:message(XName, RoutingKey, Content, #{}, FFState), - rabbit_basic:add_header(<<"x-mqtt-publish-qos">>, byte, Qos, BasicMsg) +mc_env() -> + case persistent_term:get(?PERSISTENT_TERM_EXCHANGE) of + ?DEFAULT_MQTT_EXCHANGE -> + #{}; + MqttX -> + #{mqtt_x => MqttX} end. diff --git a/deps/rabbitmq_mqtt/src/rabbit_mqtt_qos0_queue.erl b/deps/rabbitmq_mqtt/src/rabbit_mqtt_qos0_queue.erl index 4e2dc6b46388..47cf18e976a2 100644 --- a/deps/rabbitmq_mqtt/src/rabbit_mqtt_qos0_queue.erl +++ b/deps/rabbitmq_mqtt/src/rabbit_mqtt_qos0_queue.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2018-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% %% This module is a pseudo queue type. @@ -36,6 +36,7 @@ policy_changed/1, info/2, stat/1, + format/2, capabilities/0, notify_decorators/1 ]). @@ -47,10 +48,11 @@ close/1, update/2, consume/3, - cancel/5, + cancel/3, handle_event/3, settle/5, - credit/5, + credit_v1/5, + credit/6, dequeue/5, state_info/1 ]). @@ -68,8 +70,10 @@ is_stateful() -> -spec declare(amqqueue:amqqueue(), node()) -> {'new' | 'existing' | 'owner_died', amqqueue:amqqueue()} | - {'absent', amqqueue:amqqueue(), rabbit_amqqueue:absent_reason()}. + {'absent', amqqueue:amqqueue(), rabbit_amqqueue:absent_reason()} | + {protocol_error, internal_error, string(), [string()]}. declare(Q0, _Node) -> + QName = amqqueue:get_name(Q0), Q1 = case amqqueue:get_pid(Q0) of none -> %% declaring process becomes the queue @@ -84,7 +88,7 @@ declare(Q0, _Node) -> Opts = amqqueue:get_options(Q), ActingUser = maps:get(user, Opts, ?UNKNOWN_USER), rabbit_event:notify(queue_created, - [{name, amqqueue:get_name(Q)}, + [{name, QName}, {durable, true}, {auto_delete, false}, {exclusive, true}, @@ -92,6 +96,11 @@ declare(Q0, _Node) -> {arguments, amqqueue:get_arguments(Q)}, {user_who_performed_action, ActingUser}]), {new, Q}; + {error, timeout} -> + {protocol_error, internal_error, + "Could not declare ~ts because the metadata store operation " + "timed out", + [rabbit_misc:rs(QName)]}; Other -> Other end. @@ -143,7 +152,7 @@ deliver(Qs, Msg, Options) -> {[], Actions}. -spec is_enabled() -> boolean(). -is_enabled() -> rabbit_feature_flags:is_enabled(?MODULE). +is_enabled() -> true. -spec is_compatible(boolean(), boolean(), boolean()) -> boolean(). @@ -202,6 +211,12 @@ notify_decorators(_) -> stat(_Q) -> {ok, 0, 0}. +-spec format(amqqueue:amqqueue(), map()) -> + [{atom(), term()}]. +format(Q, _Ctx) -> + [{type, ?MODULE}, + {state, amqqueue:get_state(Q)}]. + -spec capabilities() -> #{atom() := term()}. capabilities() -> @@ -261,8 +276,8 @@ update(A1,A2) -> consume(A1,A2,A3) -> ?UNSUPPORTED([A1,A2,A3]). -cancel(A1,A2,A3,A4,A5) -> - ?UNSUPPORTED([A1,A2,A3,A4,A5]). +cancel(A1,A2,A3) -> + ?UNSUPPORTED([A1,A2,A3]). handle_event(A1,A2,A3) -> ?UNSUPPORTED([A1,A2,A3]). @@ -270,9 +285,12 @@ handle_event(A1,A2,A3) -> settle(A1,A2,A3,A4,A5) -> ?UNSUPPORTED([A1,A2,A3,A4,A5]). -credit(A1,A2,A3,A4,A5) -> +credit_v1(A1,A2,A3,A4,A5) -> ?UNSUPPORTED([A1,A2,A3,A4,A5]). +credit(A1,A2,A3,A4,A5,A6) -> + ?UNSUPPORTED([A1,A2,A3,A4,A5,A6]). + dequeue(A1,A2,A3,A4,A5) -> ?UNSUPPORTED([A1,A2,A3,A4,A5]). diff --git a/deps/rabbitmq_mqtt/src/rabbit_mqtt_reader.erl b/deps/rabbitmq_mqtt/src/rabbit_mqtt_reader.erl index cc9ef98e3bee..c37a6e0ef64e 100644 --- a/deps/rabbitmq_mqtt/src/rabbit_mqtt_reader.erl +++ b/deps/rabbitmq_mqtt/src/rabbit_mqtt_reader.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_mqtt_reader). @@ -71,34 +71,39 @@ close_connection(Pid, Reason) -> init(Ref) -> process_flag(trap_exit, true), logger:set_process_metadata(#{domain => ?RMQLOG_DOMAIN_CONN ++ [mqtt]}), - {ok, Sock} = rabbit_networking:handshake(Ref, - application:get_env(?APP_NAME, proxy_protocol, false)), - RealSocket = rabbit_net:unwrap_socket(Sock), - case rabbit_net:connection_string(Sock, inbound) of - {ok, ConnStr} -> - ConnName = rabbit_data_coercion:to_binary(ConnStr), - ?LOG_DEBUG("MQTT accepting TCP connection ~tp (~ts)", [self(), ConnName]), - _ = rabbit_alarm:register(self(), {?MODULE, conserve_resources, []}), - LoginTimeout = application:get_env(?APP_NAME, login_timeout, 10_000), - erlang:send_after(LoginTimeout, self(), login_timeout), - State0 = #state{socket = RealSocket, - proxy_socket = rabbit_net:maybe_get_proxy_socket(Sock), - conn_name = ConnName, - await_recv = false, - connection_state = running, - conserve = false, - parse_state = rabbit_mqtt_packet:init_state()}, - State1 = control_throttle(State0), - State = rabbit_event:init_stats_timer(State1, #state.stats_timer), - gen_server:enter_loop(?MODULE, [], State); - {error, Reason = enotconn} -> - ?LOG_INFO("MQTT could not get connection string: ~s", [Reason]), - rabbit_net:fast_close(RealSocket), - ignore; + ProxyProtocolEnabled = application:get_env(?APP_NAME, proxy_protocol, false), + case rabbit_networking:handshake(Ref, ProxyProtocolEnabled) of {error, Reason} -> - ?LOG_ERROR("MQTT could not get connection string: ~p", [Reason]), - rabbit_net:fast_close(RealSocket), - {stop, Reason} + ?LOG_ERROR("MQTT could not establish connection: ~s", [Reason]), + {stop, Reason}; + {ok, Sock} -> + RealSocket = rabbit_net:unwrap_socket(Sock), + case rabbit_net:connection_string(Sock, inbound) of + {ok, ConnStr} -> + ConnName = rabbit_data_coercion:to_binary(ConnStr), + ?LOG_DEBUG("MQTT accepting TCP connection ~tp (~ts)", [self(), ConnName]), + _ = rabbit_alarm:register(self(), {?MODULE, conserve_resources, []}), + LoginTimeout = application:get_env(?APP_NAME, login_timeout, 10_000), + erlang:send_after(LoginTimeout, self(), login_timeout), + State0 = #state{socket = RealSocket, + proxy_socket = rabbit_net:maybe_get_proxy_socket(Sock), + conn_name = ConnName, + await_recv = false, + connection_state = running, + conserve = false, + parse_state = rabbit_mqtt_packet:init_state()}, + State1 = control_throttle(State0), + State = rabbit_event:init_stats_timer(State1, #state.stats_timer), + gen_server:enter_loop(?MODULE, [], State); + {error, Reason = enotconn} -> + ?LOG_INFO("MQTT could not get connection string: ~s", [Reason]), + rabbit_net:fast_close(RealSocket), + ignore; + {error, Reason} -> + ?LOG_ERROR("MQTT could not get connection string: ~p", [Reason]), + rabbit_net:fast_close(RealSocket), + {stop, Reason} + end end. handle_call({info, InfoItems}, _From, State) -> @@ -107,11 +112,6 @@ handle_call({info, InfoItems}, _From, State) -> handle_call(Msg, From, State) -> {stop, {mqtt_unexpected_call, Msg, From}, State}. -%% Delete this backward compatibility clause when feature flag -%% delete_ra_cluster_mqtt_node becomes required. -handle_cast(duplicate_id, State) -> - handle_cast({duplicate_id, true}, State); - handle_cast({duplicate_id, SendWill}, State = #state{proc_state = PState, conn_name = ConnName}) -> @@ -120,16 +120,9 @@ handle_cast({duplicate_id, SendWill}, rabbit_mqtt_processor:send_disconnect(?RC_SESSION_TAKEN_OVER, PState), {stop, {shutdown, duplicate_id}, {SendWill, State}}; -handle_cast(decommission_node, - State = #state{ proc_state = PState, - conn_name = ConnName }) -> - ?LOG_WARNING("MQTT disconnecting client ~tp with client ID '~ts' as its node is about" - " to be decommissioned", - [ConnName, rabbit_mqtt_processor:info(client_id, PState)]), - {stop, {shutdown, decommission_node}, State}; - handle_cast({close_connection, Reason}, - State = #state{conn_name = ConnName, proc_state = PState}) -> + State = #state{conn_name = ConnName, + proc_state = PState}) -> ?LOG_WARNING("MQTT disconnecting client ~tp with client ID '~ts', reason: ~ts", [ConnName, rabbit_mqtt_processor:info(client_id, PState), Reason]), case Reason of @@ -140,11 +133,13 @@ handle_cast({close_connection, Reason}, handle_cast(QueueEvent = {queue_event, _, _}, State = #state{proc_state = PState0}) -> - case rabbit_mqtt_processor:handle_queue_event(QueueEvent, PState0) of + try rabbit_mqtt_processor:handle_queue_event(QueueEvent, PState0) of {ok, PState} -> maybe_process_deferred_recv(control_throttle(pstate(State, PState))); - {error, Reason, PState} -> - {stop, Reason, pstate(State, PState)} + {error, Reason0, PState} -> + {stop, Reason0, pstate(State, PState)} + catch throw:{send_failed, Reason1} -> + network_error(Reason1, State) end; handle_cast({force_event_refresh, Ref}, State0) -> @@ -193,12 +188,6 @@ handle_info({Tag, Sock, Reason}, State = #state{socket = Sock}) when Tag =:= tcp_error; Tag =:= ssl_error -> network_error(Reason, State); -handle_info({inet_reply, Sock, ok}, State = #state{socket = Sock}) -> - {noreply, State, ?HIBERNATE_AFTER}; - -handle_info({inet_reply, Sock, {error, Reason}}, State = #state{socket = Sock}) -> - network_error(Reason, State); - handle_info({conserve_resources, Conserve}, State) -> maybe_process_deferred_recv( control_throttle(State #state{ conserve = Conserve })); @@ -221,6 +210,14 @@ handle_info({keepalive, Req}, State = #state{proc_state = PState, {stop, Reason, State} end; +handle_info(credential_expired, + State = #state{conn_name = ConnName, + proc_state = PState}) -> + ?LOG_WARNING("MQTT disconnecting client ~tp with client ID '~ts' because credential expired", + [ConnName, rabbit_mqtt_processor:info(client_id, PState)]), + rabbit_mqtt_processor:send_disconnect(?RC_MAXIMUM_CONNECT_TIME, PState), + {stop, {shutdown, {disconnect, server_initiated}}, State}; + handle_info(login_timeout, State = #state{proc_state = connect_packet_unprocessed, conn_name = ConnName}) -> %% The connection is also closed if the CONNECT packet happens to @@ -236,13 +233,6 @@ handle_info(login_timeout, State) -> handle_info(emit_stats, State) -> {noreply, emit_stats(State), ?HIBERNATE_AFTER}; -handle_info({ra_event, _From, Evt}, - #state{proc_state = PState0} = State) -> - %% handle applied event to ensure registration command actually got applied - %% handle not_leader notification in case we send the command to a non-leader - PState = rabbit_mqtt_processor:handle_ra_event(Evt, PState0), - {noreply, pstate(State, PState), ?HIBERNATE_AFTER}; - handle_info({{'DOWN', _QName}, _MRef, process, _Pid, _Reason} = Evt, #state{proc_state = PState0} = State) -> case rabbit_mqtt_processor:handle_down(Evt, PState0) of @@ -334,16 +324,17 @@ process_received_bytes(Bytes, State = #state{socket = Socket, {ok, Packet, Rest, ParseState1} -> case ProcState of connect_packet_unprocessed -> - Send = fun(Data) -> - try rabbit_net:port_command(Socket, Data) - catch error:Reason -> - ?LOG_ERROR("writing to MQTT socket ~p failed: ~p", - [Socket, Reason]), - exit({send_failed, Reason}) - end, - ok - end, - try rabbit_mqtt_processor:init(Packet, Socket, ConnName, Send) of + SendFun = fun(Data) -> + case rabbit_net:send(Socket, Data) of + ok -> + ok; + {error, Reason} -> + ?LOG_ERROR("writing to MQTT socket ~p failed: ~p", + [Socket, Reason]), + throw({send_failed, Reason}) + end + end, + try rabbit_mqtt_processor:init(Packet, Socket, ConnName, SendFun) of {ok, ProcState1} -> ?LOG_INFO("Accepted MQTT connection ~ts for client ID ~ts", [ConnName, rabbit_mqtt_processor:info(client_id, ProcState1)]), @@ -359,7 +350,7 @@ process_received_bytes(Bytes, State = #state{socket = Socket, ?LOG_ERROR("Rejected MQTT connection ~ts with Connect Reason Code ~p", [ConnName, ConnectReasonCode]), {stop, shutdown, {_SendWill = false, State}} - catch exit:{send_failed, Reason} -> + catch throw:{send_failed, Reason} -> network_error(Reason, State) end; _ -> @@ -380,7 +371,7 @@ process_received_bytes(Bytes, State = #state{socket = Socket, {stop, {shutdown, Reason}, pstate(State, ProcState1)}; {stop, {disconnect, {client_initiated, SendWill}}, ProcState1} -> {stop, normal, {SendWill, pstate(State, ProcState1)}} - catch exit:{send_failed, Reason} -> + catch throw:{send_failed, Reason} -> network_error(Reason, State) end end; diff --git a/deps/rabbitmq_mqtt/src/rabbit_mqtt_retained_msg_store.erl b/deps/rabbitmq_mqtt/src/rabbit_mqtt_retained_msg_store.erl index cfee52422356..838104b5f9a1 100644 --- a/deps/rabbitmq_mqtt/src/rabbit_mqtt_retained_msg_store.erl +++ b/deps/rabbitmq_mqtt/src/rabbit_mqtt_retained_msg_store.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_mqtt_retained_msg_store). diff --git a/deps/rabbitmq_mqtt/src/rabbit_mqtt_retained_msg_store_dets.erl b/deps/rabbitmq_mqtt/src/rabbit_mqtt_retained_msg_store_dets.erl index bca4da566902..bbae75f1829b 100644 --- a/deps/rabbitmq_mqtt/src/rabbit_mqtt_retained_msg_store_dets.erl +++ b/deps/rabbitmq_mqtt/src/rabbit_mqtt_retained_msg_store_dets.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_mqtt_retained_msg_store_dets). diff --git a/deps/rabbitmq_mqtt/src/rabbit_mqtt_retained_msg_store_ets.erl b/deps/rabbitmq_mqtt/src/rabbit_mqtt_retained_msg_store_ets.erl index 913ae955409a..70a770ff526a 100644 --- a/deps/rabbitmq_mqtt/src/rabbit_mqtt_retained_msg_store_ets.erl +++ b/deps/rabbitmq_mqtt/src/rabbit_mqtt_retained_msg_store_ets.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_mqtt_retained_msg_store_ets). diff --git a/deps/rabbitmq_mqtt/src/rabbit_mqtt_retained_msg_store_noop.erl b/deps/rabbitmq_mqtt/src/rabbit_mqtt_retained_msg_store_noop.erl index 9ba6db5d6c28..bd798df5e923 100644 --- a/deps/rabbitmq_mqtt/src/rabbit_mqtt_retained_msg_store_noop.erl +++ b/deps/rabbitmq_mqtt/src/rabbit_mqtt_retained_msg_store_noop.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_mqtt_retained_msg_store_noop). diff --git a/deps/rabbitmq_mqtt/src/rabbit_mqtt_retainer.erl b/deps/rabbitmq_mqtt/src/rabbit_mqtt_retainer.erl index 460435cb28ce..7ba569bb82b7 100644 --- a/deps/rabbitmq_mqtt/src/rabbit_mqtt_retainer.erl +++ b/deps/rabbitmq_mqtt/src/rabbit_mqtt_retainer.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_mqtt_retainer). diff --git a/deps/rabbitmq_mqtt/src/rabbit_mqtt_retainer_sup.erl b/deps/rabbitmq_mqtt/src/rabbit_mqtt_retainer_sup.erl index 930b4d3adf26..94e035dc9e25 100644 --- a/deps/rabbitmq_mqtt/src/rabbit_mqtt_retainer_sup.erl +++ b/deps/rabbitmq_mqtt/src/rabbit_mqtt_retainer_sup.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_mqtt_retainer_sup). diff --git a/deps/rabbitmq_mqtt/src/rabbit_mqtt_sup.erl b/deps/rabbitmq_mqtt/src/rabbit_mqtt_sup.erl index 8c983c396dd0..943960ccffd5 100644 --- a/deps/rabbitmq_mqtt/src/rabbit_mqtt_sup.erl +++ b/deps/rabbitmq_mqtt/src/rabbit_mqtt_sup.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_mqtt_sup). @@ -13,9 +13,6 @@ -export([start_link/2, init/1, stop_listeners/0]). --define(TCP_PROTOCOL, 'mqtt'). --define(TLS_PROTOCOL, 'mqtt/ssl'). - start_link(Listeners, []) -> supervisor:start_link({local, ?MODULE}, ?MODULE, [Listeners]). @@ -28,14 +25,11 @@ init([{Listeners, SslListeners0}]) -> [] -> {none, 0, []}; _ -> {rabbit_networking:ensure_ssl(), application:get_env(?APP_NAME, num_ssl_acceptors, 10), - case rabbit_networking:poodle_check('MQTT') of - ok -> SslListeners0; - danger -> [] - end} + SslListeners0} end, %% Use separate process group scope per RabbitMQ node. This achieves a local-only %% process group which requires less memory with millions of connections. - PgScope = list_to_atom(io_lib:format("~s_~s", [?PG_SCOPE, node()])), + PgScope = rabbit:pg_local_scope(?PG_SCOPE), persistent_term:put(?PG_SCOPE, PgScope), {ok, {#{strategy => one_for_all, @@ -69,8 +63,8 @@ init([{Listeners, SslListeners0}]) -> -spec stop_listeners() -> ok. stop_listeners() -> - _ = rabbit_networking:stop_ranch_listener_of_protocol(?TCP_PROTOCOL), - _ = rabbit_networking:stop_ranch_listener_of_protocol(?TLS_PROTOCOL), + _ = rabbit_networking:stop_ranch_listener_of_protocol(?MQTT_TCP_PROTOCOL), + _ = rabbit_networking:stop_ranch_listener_of_protocol(?MQTT_TLS_PROTOCOL), ok. %% @@ -89,7 +83,7 @@ tcp_listener_spec([Address, SocketOpts, NumAcceptors, ConcurrentConnsSups]) -> rabbit_mqtt_listener_sup, Address, SocketOpts, - transport(?TCP_PROTOCOL), + transport(?MQTT_TCP_PROTOCOL), rabbit_mqtt_reader, [], mqtt, @@ -104,7 +98,7 @@ ssl_listener_spec([Address, SocketOpts, SslOpts, NumAcceptors, ConcurrentConnsSu rabbit_mqtt_listener_sup, Address, SocketOpts ++ SslOpts, - transport(?TLS_PROTOCOL), + transport(?MQTT_TLS_PROTOCOL), rabbit_mqtt_reader, [], 'mqtt/ssl', @@ -114,7 +108,7 @@ ssl_listener_spec([Address, SocketOpts, SslOpts, NumAcceptors, ConcurrentConnsSu "MQTT TLS listener" ). -transport(?TCP_PROTOCOL) -> +transport(?MQTT_TCP_PROTOCOL) -> ranch_tcp; -transport(?TLS_PROTOCOL) -> +transport(?MQTT_TLS_PROTOCOL) -> ranch_ssl. diff --git a/deps/rabbitmq_mqtt/src/rabbit_mqtt_util.erl b/deps/rabbitmq_mqtt/src/rabbit_mqtt_util.erl index 236af0e0899a..b8c65cb7e54c 100644 --- a/deps/rabbitmq_mqtt/src/rabbit_mqtt_util.erl +++ b/deps/rabbitmq_mqtt/src/rabbit_mqtt_util.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_mqtt_util). @@ -141,10 +141,10 @@ env(Key) -> undefined -> undefined end. -coerce_env_value(default_pass, Val) -> rabbit_data_coercion:to_binary(Val); -coerce_env_value(default_user, Val) -> rabbit_data_coercion:to_binary(Val); -coerce_env_value(vhost, Val) -> rabbit_data_coercion:to_binary(Val); -coerce_env_value(_, Val) -> Val. +coerce_env_value(vhost, Val) -> + rabbit_data_coercion:to_binary(Val); +coerce_env_value(_, Val) -> + Val. -spec table_lookup(rabbit_framing:amqp_table() | undefined, binary()) -> tuple() | undefined. diff --git a/deps/rabbitmq_mqtt/test/auth_SUITE.erl b/deps/rabbitmq_mqtt/test/auth_SUITE.erl index 68cdeb0b1872..a1434b336ff6 100644 --- a/deps/rabbitmq_mqtt/test/auth_SUITE.erl +++ b/deps/rabbitmq_mqtt/test/auth_SUITE.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(auth_SUITE). -compile([export_all, @@ -97,7 +97,8 @@ sub_groups() -> topic_read_permission, topic_write_permission, topic_write_permission_variable_expansion, - loopback_user_connects_from_remote_host + loopback_user_connects_from_remote_host, + connect_permission ] }, {limit, [shuffle], @@ -122,24 +123,27 @@ init_per_group(authz, Config0) -> User = <<"mqtt-user">>, Password = <<"mqtt-password">>, VHost = <<"mqtt-vhost">>, - MqttConfig = {rabbitmq_mqtt, [{default_user, User} - ,{default_pass, Password} - ,{allow_anonymous, true} - ,{vhost, VHost} - ,{exchange, <<"amq.topic">>} - ]}, - Config1 = rabbit_ct_helpers:run_setup_steps(rabbit_ct_helpers:merge_app_env(Config0, MqttConfig), - rabbit_ct_broker_helpers:setup_steps() ++ - rabbit_ct_client_helpers:setup_steps()), - rabbit_ct_broker_helpers:add_user(Config1, User, Password), - rabbit_ct_broker_helpers:add_vhost(Config1, VHost), - [Log|_] = rpc(Config1, 0, rabbit, log_locations, []), - Config2 = [{mqtt_user, User}, - {mqtt_vhost, VHost}, - {mqtt_password, Password}, - {log_location, Log} | Config1], - ok = rabbit_ct_broker_helpers:enable_feature_flag(Config2, mqtt_v5), - Config2; + Env = [{rabbitmq_mqtt, + [{allow_anonymous, true}, + {vhost, VHost}, + {exchange, <<"amq.topic">>} + ]}, + {rabbit, + [{anonymous_login_user, User}, + {anonymous_login_pass, Password} + ]}], + Config1 = rabbit_ct_helpers:merge_app_env(Config0, Env), + Config = rabbit_ct_helpers:run_setup_steps( + Config1, + rabbit_ct_broker_helpers:setup_steps() ++ + rabbit_ct_client_helpers:setup_steps()), + rabbit_ct_broker_helpers:add_user(Config, User, Password), + rabbit_ct_broker_helpers:add_vhost(Config, VHost), + [Log|_] = rpc(Config, 0, rabbit, log_locations, []), + [{mqtt_user, User}, + {mqtt_vhost, VHost}, + {mqtt_password, Password}, + {log_location, Log} | Config]; init_per_group(Group, Config) -> Suffix = rabbit_ct_helpers:testcase_absname(Config, "", "-"), Config1 = rabbit_ct_helpers:set_config(Config, [ @@ -148,22 +152,20 @@ init_per_group(Group, Config) -> ]), MqttConfig = mqtt_config(Group), AuthConfig = auth_config(Group), - Config2 = rabbit_ct_helpers:run_setup_steps( - Config1, - [fun(Conf) -> case MqttConfig of - undefined -> Conf; - _ -> merge_app_env(MqttConfig, Conf) - end - end] ++ - [fun(Conf) -> case AuthConfig of - undefined -> Conf; - _ -> merge_app_env(AuthConfig, Conf) - end - end] ++ - rabbit_ct_broker_helpers:setup_steps() ++ - rabbit_ct_client_helpers:setup_steps()), - ok = rabbit_ct_broker_helpers:enable_feature_flag(Config2, mqtt_v5), - Config2. + rabbit_ct_helpers:run_setup_steps( + Config1, + [fun(Conf) -> case MqttConfig of + undefined -> Conf; + _ -> merge_app_env(MqttConfig, Conf) + end + end] ++ + [fun(Conf) -> case AuthConfig of + undefined -> Conf; + _ -> merge_app_env(AuthConfig, Conf) + end + end] ++ + rabbit_ct_broker_helpers:setup_steps() ++ + rabbit_ct_client_helpers:setup_steps()). end_per_group(G, Config) when G =:= v4; @@ -284,7 +286,8 @@ init_per_testcase(T, Config) when T =:= will_queue_create_permission_queue_read; T =:= will_queue_create_permission_exchange_write; T =:= will_queue_publish_permission_exchange_write; - T =:= will_queue_publish_permission_topic_write -> + T =:= will_queue_publish_permission_topic_write; + T =:= will_queue_delete_permission -> case ?config(mqtt_version, Config) of v4 -> {skip, "Will Delay Interval is an MQTT 5.0 feature"}; v5 -> testcase_started(Config, T) @@ -414,7 +417,6 @@ anonymous_auth_success(Config) -> anonymous_auth_failure(Config) -> expect_authentication_failure(fun connect_anonymous/1, Config). - ssl_user_auth_success(Config) -> expect_successful_connection(fun connect_ssl/1, Config). @@ -507,7 +509,8 @@ connect_ssl(Config) -> CertsDir = ?config(rmq_certsdir, Config), SSLConfig = [{cacertfile, filename:join([CertsDir, "testca", "cacert.pem"])}, {certfile, filename:join([CertsDir, "client", "cert.pem"])}, - {keyfile, filename:join([CertsDir, "client", "key.pem"])}], + {keyfile, filename:join([CertsDir, "client", "key.pem"])}, + {server_name_indication, "localhost"}], P = rabbit_ct_broker_helpers:get_node_config(Config, 0, tcp_port_mqtt_tls), emqtt:start_link([{host, "localhost"}, {port, P}, @@ -527,8 +530,8 @@ client_id_propagation(Config) -> rpc(Config, 0, rabbit_auth_backend_mqtt_mock, setup, [Self]) end), %% the setup process will notify us - receive - ok -> ok + SetupProcess = receive + {ok, SP} -> SP after 3000 -> ct:fail("timeout waiting for rabbit_auth_backend_mqtt_mock:setup/1") end, @@ -562,7 +565,11 @@ client_id_propagation(Config) -> VariableMap = maps:get(variable_map, TopicContext), ?assertEqual(ClientId, maps:get(<<"client_id">>, VariableMap)), - ok = emqtt:disconnect(C). + ok = emqtt:disconnect(C), + + SetupProcess ! stop, + + ok. %% These tests try to cover all operations that are listed in the %% table in https://www.rabbitmq.com/access-control.html#authorisation @@ -982,6 +989,12 @@ loopback_user_connects_from_remote_host(Config) -> true = rpc(Config, 0, meck, validate, [Mod]), ok = rpc(Config, 0, meck, unload, [Mod]). +%% No specific configure, write, or read permissions should be required for only connecting. +connect_permission(Config) -> + set_permissions("", "", "", Config), + C = open_mqtt_connection(Config), + ok = emqtt:disconnect(C). + set_topic_permissions(WritePat, ReadPat, Config) -> rpc(Config, 0, rabbit_auth_backend_internal, set_topic_permissions, @@ -1092,7 +1105,8 @@ vhost_connection_limit(Config) -> unlink(C3), ?assertMatch({error, {ExpectedError, _}}, emqtt:connect(C3)), ok = emqtt:disconnect(C1), - ok = emqtt:disconnect(C2). + ok = emqtt:disconnect(C2), + ok = rabbit_ct_broker_helpers:clear_vhost_limit(Config, 0, <<"/">>). vhost_queue_limit(Config) -> ok = rabbit_ct_broker_helpers:set_vhost_limit(Config, 0, <<"/">>, max_queues, 1), @@ -1105,7 +1119,8 @@ vhost_queue_limit(Config) -> emqtt:subscribe(C, [{<<"topic1">>, qos0}, {<<"topic2">>, qos1}, {<<"topic3">>, qos1}])), - ok = assert_connection_closed(C). + ok = assert_connection_closed(C), + ok = rabbit_ct_broker_helpers:clear_vhost_limit(Config, 0, <<"/">>). user_connection_limit(Config) -> DefaultUser = <<"guest">>, diff --git a/deps/rabbitmq_mqtt/test/cluster_SUITE.erl b/deps/rabbitmq_mqtt/test/cluster_SUITE.erl index e6e8f76618cc..bb404499d6f5 100644 --- a/deps/rabbitmq_mqtt/test/cluster_SUITE.erl +++ b/deps/rabbitmq_mqtt/test/cluster_SUITE.erl @@ -2,12 +2,11 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(cluster_SUITE). -compile([export_all, nowarn_export_all]). --include_lib("common_test/include/ct.hrl"). -include_lib("eunit/include/eunit.hrl"). -import(util, [expect_publishes/3, connect/3, @@ -43,8 +42,7 @@ groups() -> cluster_size_5() -> [ connection_id_tracking, - connection_id_tracking_on_nodedown, - connection_id_tracking_with_decommissioned_node + connection_id_tracking_on_nodedown ]. %% ------------------------------------------------------------------- @@ -81,11 +79,11 @@ init_per_testcase(Testcase, Config) -> {rmq_nodename_suffix, Testcase}, {rmq_nodes_clustered, true} ]), - Config2 = rabbit_ct_helpers:run_setup_steps(Config1, - [ fun merge_app_env/1 ] ++ + rabbit_ct_helpers:run_setup_steps( + Config1, + [fun merge_app_env/1] ++ setup_steps() ++ - rabbit_ct_client_helpers:setup_steps()), - util:maybe_skip_v5(Config2). + rabbit_ct_client_helpers:setup_steps()). end_per_testcase(Testcase, Config) -> rabbit_ct_helpers:run_steps(Config, @@ -141,26 +139,7 @@ connection_id_tracking_on_nodedown(Config) -> process_flag(trap_exit, true), ok = stop_node(Config, 0), await_exit(C), - ok = eventually(?_assertEqual([], util:all_connection_pids(1, Config)), 500, 4). - -connection_id_tracking_with_decommissioned_node(Config) -> - case rpc(Config, rabbit_mqtt_ff, track_client_id_in_ra, []) of - false -> - {skip, "This test requires client ID tracking in Ra"}; - true -> - Server = get_node_config(Config, 0, nodename), - C = connect(<<"simpleClient">>, Config, ?OPTS), - {ok, _, _} = emqtt:subscribe(C, <<"TopicA">>, qos0), - ok = emqtt:publish(C, <<"TopicA">>, <<"Payload">>), - ok = expect_publishes(C, <<"TopicA">>, [<<"Payload">>]), - - assert_connection_count(Config, 4, 1), - process_flag(trap_exit, true), - {ok, _} = rabbitmqctl(Config, 0, ["decommission_mqtt_node", Server]), - await_exit(C), - assert_connection_count(Config, 4, 0), - ok - end. + ok = eventually(?_assertEqual([], util:all_connection_pids(Config)), 500, 4). %% %% Helpers diff --git a/deps/rabbitmq_mqtt/test/command_SUITE.erl b/deps/rabbitmq_mqtt/test/command_SUITE.erl index 6548eb118531..528c4b0b1b97 100644 --- a/deps/rabbitmq_mqtt/test/command_SUITE.erl +++ b/deps/rabbitmq_mqtt/test/command_SUITE.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. -module(command_SUITE). @@ -56,8 +56,7 @@ end_per_suite(Config) -> init_per_group(unit, Config) -> Config; init_per_group(Group, Config) -> - Config1 = rabbit_ct_helpers:set_config(Config, {mqtt_version, Group}), - util:maybe_skip_v5(Config1). + rabbit_ct_helpers:set_config(Config, {mqtt_version, Group}). end_per_group(_, Config) -> Config. @@ -86,6 +85,13 @@ run(Config) -> %% No connections [] = 'Elixir.Enum':to_list(?COMMAND:run([], Opts)), + %% Open a WebMQTT connection, command won't list it + WebMqttConfig = [{websocket, true} | Config], + _C0 = connect(<<"simpleWebMqttClient">>, WebMqttConfig, [{ack_timeout, 1}]), + + [] = 'Elixir.Enum':to_list(?COMMAND:run([], Opts)), + + %% Open a connection C1 = connect(<<"simpleClient">>, Config, [{ack_timeout, 1}]), timer:sleep(100), diff --git a/deps/rabbitmq_mqtt/test/config_SUITE.erl b/deps/rabbitmq_mqtt/test/config_SUITE.erl index f4faf7bc4d73..768a53c0e5df 100644 --- a/deps/rabbitmq_mqtt/test/config_SUITE.erl +++ b/deps/rabbitmq_mqtt/test/config_SUITE.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. -module(config_SUITE). -compile([export_all, diff --git a/deps/rabbitmq_mqtt/test/config_schema_SUITE.erl b/deps/rabbitmq_mqtt/test/config_schema_SUITE.erl index c3f3d867c48b..b8349253f0e1 100644 --- a/deps/rabbitmq_mqtt/test/config_schema_SUITE.erl +++ b/deps/rabbitmq_mqtt/test/config_schema_SUITE.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2016-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(config_schema_SUITE). diff --git a/deps/rabbitmq_mqtt/test/config_schema_SUITE_data/rabbitmq_mqtt.snippets b/deps/rabbitmq_mqtt/test/config_schema_SUITE_data/rabbitmq_mqtt.snippets index df1a3f3a57f5..7feb71a6b92e 100644 --- a/deps/rabbitmq_mqtt/test/config_schema_SUITE_data/rabbitmq_mqtt.snippets +++ b/deps/rabbitmq_mqtt/test/config_schema_SUITE_data/rabbitmq_mqtt.snippets @@ -1,7 +1,5 @@ [{defaults, "listeners.tcp.default = 5672 - mqtt.default_user = guest - mqtt.default_pass = guest mqtt.allow_anonymous = true mqtt.vhost = / mqtt.exchange = amq.topic @@ -20,9 +18,7 @@ mqtt.topic_alias_maximum = 16", [{rabbit,[{tcp_listeners,[5672]}]}, {rabbitmq_mqtt, - [{default_user,<<"guest">>}, - {default_pass,<<"guest">>}, - {allow_anonymous,true}, + [{allow_anonymous,true}, {vhost,<<"/">>}, {exchange,<<"amq.topic">>}, {max_session_expiry_interval_seconds,86400}, @@ -101,8 +97,6 @@ [rabbitmq_mqtt]}, {proxy_protocol, "listeners.tcp.default = 5672 - mqtt.default_user = guest - mqtt.default_pass = guest mqtt.allow_anonymous = true mqtt.vhost = / mqtt.exchange = amq.topic @@ -111,9 +105,7 @@ mqtt.proxy_protocol = true", [{rabbit,[{tcp_listeners,[5672]}]}, {rabbitmq_mqtt, - [{default_user,<<"guest">>}, - {default_pass,<<"guest">>}, - {allow_anonymous,true}, + [{allow_anonymous,true}, {vhost,<<"/">>}, {exchange,<<"amq.topic">>}, {max_session_expiry_interval_seconds,infinity}, @@ -121,9 +113,7 @@ {proxy_protocol,true}]}], [rabbitmq_mqtt]}, {prefetch_retained_msg_store, - "mqtt.default_user = guest - mqtt.default_pass = guest - mqtt.allow_anonymous = true + "mqtt.allow_anonymous = true mqtt.vhost = / mqtt.exchange = amq.topic mqtt.max_session_expiry_interval_seconds = 1800 @@ -136,9 +126,7 @@ mqtt.listeners.ssl = none mqtt.listeners.tcp.default = 1883", [{rabbitmq_mqtt, - [{default_user,<<"guest">>}, - {default_pass,<<"guest">>}, - {allow_anonymous,true}, + [{allow_anonymous,true}, {vhost,<<"/">>}, {exchange,<<"amq.topic">>}, {max_session_expiry_interval_seconds,1800}, diff --git a/deps/rabbitmq_mqtt/test/event_recorder.erl b/deps/rabbitmq_mqtt/test/event_recorder.erl index cd495f9427a5..0aacfd789a4d 100644 --- a/deps/rabbitmq_mqtt/test/event_recorder.erl +++ b/deps/rabbitmq_mqtt/test/event_recorder.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(event_recorder). diff --git a/deps/rabbitmq_mqtt/test/ff_SUITE.erl b/deps/rabbitmq_mqtt/test/ff_SUITE.erl deleted file mode 100644 index 35b641f3c2a0..000000000000 --- a/deps/rabbitmq_mqtt/test/ff_SUITE.erl +++ /dev/null @@ -1,169 +0,0 @@ -%% This Source Code Form is subject to the terms of the Mozilla Public -%% License, v. 2.0. If a copy of the MPL was not distributed with this -%% file, You can obtain one at https://mozilla.org/MPL/2.0/. -%% -%% Copyright (c) 2016-2023 VMware, Inc. or its affiliates. All rights reserved. - --module(ff_SUITE). - --compile([export_all, nowarn_export_all]). - --include_lib("common_test/include/ct.hrl"). --include_lib("eunit/include/eunit.hrl"). - --import(rabbit_ct_broker_helpers, [rpc/5]). --import(rabbit_ct_helpers, [eventually/1]). --import(util, [expect_publishes/3, - get_global_counters/4, - connect/2, - connect/4]). - --define(PROTO_VER, v4). - -all() -> - [ - {group, cluster_size_3} - ]. - -groups() -> - [ - {cluster_size_3, [], - [rabbit_mqtt_qos0_queue, - %% delete_ra_cluster_mqtt_node must run before mqtt_v5 - %% because the latter depends on (i.e. auto-enables) the former. - delete_ra_cluster_mqtt_node, - mqtt_v5]} - ]. - -suite() -> - [ - {timetrap, {minutes, 10}} - ]. - -init_per_suite(Config) -> - rabbit_ct_helpers:log_environment(), - rabbit_ct_helpers:run_setup_steps(Config, []). - -end_per_suite(Config) -> - rabbit_ct_helpers:run_teardown_steps(Config). - -init_per_group(Group = cluster_size_3, Config0) -> - Config1 = rabbit_ct_helpers:set_config(Config0, [{rmq_nodes_count, 3}, - {rmq_nodename_suffix, Group}]), - Config = rabbit_ct_helpers:merge_app_env( - Config1, {rabbit, [{forced_feature_flags_on_init, []}]}), - rabbit_ct_helpers:run_steps(Config, - rabbit_ct_broker_helpers:setup_steps() ++ - rabbit_ct_client_helpers:setup_steps()). - -end_per_group(_Group, Config) -> - rabbit_ct_helpers:run_steps(Config, - rabbit_ct_client_helpers:teardown_steps() ++ - rabbit_ct_broker_helpers:teardown_steps()). - -init_per_testcase(TestCase, Config) -> - case rabbit_ct_broker_helpers:is_feature_flag_supported(Config, TestCase) of - true -> - ?assertNot(rabbit_ct_broker_helpers:is_feature_flag_enabled(Config, TestCase)), - Config; - false -> - {skip, io_lib:format("feature flag ~s is unsupported", [TestCase])} - end. - -end_per_testcase(_TestCase, Config) -> - Config. - -delete_ra_cluster_mqtt_node(Config) -> - FeatureFlag = ?FUNCTION_NAME, - C = connect(<<"my-client">>, Config, 1, []), - timer:sleep(500), - %% old client ID tracking works - ?assertEqual(1, length(util:all_connection_pids(Config))), - %% Ra processes are alive - ?assert(lists:all(fun erlang:is_pid/1, - rabbit_ct_broker_helpers:rpc_all(Config, erlang, whereis, [mqtt_node]))), - - ?assertEqual(ok, - rabbit_ct_broker_helpers:enable_feature_flag(Config, FeatureFlag)), - - %% Ra processes should be gone - eventually( - ?_assert(lists:all(fun(Pid) -> Pid =:= undefined end, - rabbit_ct_broker_helpers:rpc_all(Config, erlang, whereis, [mqtt_node])))), - %% new client ID tracking works - ?assertEqual(1, length(util:all_connection_pids(Config))), - ok = emqtt:disconnect(C), - eventually(?_assertEqual(0, length(util:all_connection_pids(Config)))). - -rabbit_mqtt_qos0_queue(Config) -> - FeatureFlag = ?FUNCTION_NAME, - Msg = Topic = ClientId = atom_to_binary(?FUNCTION_NAME), - - C1 = connect(ClientId, Config), - {ok, _, [0]} = emqtt:subscribe(C1, Topic, qos0), - ok = emqtt:publish(C1, Topic, Msg, qos0), - ok = expect_publishes(C1, Topic, [Msg]), - ?assertEqual(1, - length(rpc(Config, 0, rabbit_amqqueue, list_by_type, [rabbit_classic_queue]))), - - ?assertEqual(ok, - rabbit_ct_broker_helpers:enable_feature_flag(Config, FeatureFlag)), - - %% Queue type does not chanage for existing connection. - ?assertEqual(1, - length(rpc(Config, 0, rabbit_amqqueue, list_by_type, [rabbit_classic_queue]))), - ok = emqtt:publish(C1, Topic, Msg, qos0), - ok = expect_publishes(C1, Topic, [Msg]), - ?assertMatch(#{messages_delivered_total := 2, - messages_delivered_consume_auto_ack_total := 2}, - get_global_counters(Config, ?PROTO_VER, 0, [{queue_type, rabbit_classic_queue}])), - - %% Reconnecting with the same client ID will terminate the old connection. - true = unlink(C1), - C2 = connect(ClientId, Config), - {ok, _, [0]} = emqtt:subscribe(C2, Topic, qos0), - %% This time, we get the new queue type. - eventually( - ?_assertEqual(0, - length(rpc(Config, 0, rabbit_amqqueue, list_by_type, [rabbit_classic_queue])))), - ?assertEqual(1, - length(rpc(Config, 0, rabbit_amqqueue, list_by_type, [FeatureFlag]))), - ok = emqtt:publish(C2, Topic, Msg, qos0), - ok = expect_publishes(C2, Topic, [Msg]), - ?assertMatch(#{messages_delivered_total := 1, - messages_delivered_consume_auto_ack_total := 1}, - get_global_counters(Config, ?PROTO_VER, 0, [{queue_type, FeatureFlag}])), - ok = emqtt:disconnect(C2). - -mqtt_v5(Config) -> - FeatureFlag = ?FUNCTION_NAME, - - %% MQTT 5.0 is not yet supported. - {C1, Connect} = util:start_client(?FUNCTION_NAME, Config, 0, [{proto_ver, v5}]), - unlink(C1), - ?assertEqual({error, {unsupported_protocol_version, #{}}}, Connect(C1)), - - %% Send message from node 0. - %% Message is stored in old AMQP 0.9.1 format on node 1. - Topic = <<"my/topic">>, - C2 = connect(<<"sub-v4">>, Config, 1, util:non_clean_sess_opts()), - {ok, _, [1]} = emqtt:subscribe(C2, Topic, qos1), - ok = emqtt:disconnect(C2), - C3 = connect(<<"pub-v4">>, Config), - {ok, _} = emqtt:publish(C3, Topic, <<"msg">>, qos1), - ok = emqtt:disconnect(C3), - - DependantFF = message_containers, - ?assertNot(rabbit_ct_broker_helpers:is_feature_flag_enabled(Config, DependantFF)), - ?assertEqual(ok, rabbit_ct_broker_helpers:enable_feature_flag(Config, FeatureFlag)), - ?assert(rabbit_ct_broker_helpers:is_feature_flag_enabled(Config, DependantFF)), - - %% Translate from old AMQP 0.9.1 message format consuming from node 2. - C4 = connect(<<"sub-v4">>, Config, 2, [{clean_start, false}]), - ok = expect_publishes(C4, Topic, [<<"msg">>]), - ok = emqtt:disconnect(C4), - - %% MQTT 5.0 is now supported. - {C5, Connect} = util:start_client(?FUNCTION_NAME, Config, 0, [{proto_ver, v5}]), - ?assertMatch({ok, _}, Connect(C5)), - ok = emqtt:disconnect(C5). diff --git a/deps/rabbitmq_mqtt/test/java_SUITE.erl b/deps/rabbitmq_mqtt/test/java_SUITE.erl index 56a4028e2320..eb4f6ac48622 100644 --- a/deps/rabbitmq_mqtt/test/java_SUITE.erl +++ b/deps/rabbitmq_mqtt/test/java_SUITE.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(java_SUITE). @@ -54,18 +54,17 @@ end_per_suite(Config) -> init_per_group(Group, Config0) -> Suffix = rabbit_ct_helpers:testcase_absname(Config0, "", "-"), - Config1 = rabbit_ct_helpers:set_config(Config0, [ - {rmq_nodename_suffix, Suffix}, - {rmq_certspwd, "bunnychow"}, - {rmq_nodes_clustered, true}, - {rmq_nodes_count, 3}, - {mqtt_version, Group} - ]), - Config = rabbit_ct_helpers:run_setup_steps(Config1, - [ fun merge_app_env/1 ] ++ + Config = rabbit_ct_helpers:set_config( + Config0, [{rmq_nodename_suffix, Suffix}, + {rmq_certspwd, "bunnychow"}, + {rmq_nodes_clustered, true}, + {rmq_nodes_count, 3}, + {mqtt_version, Group}]), + rabbit_ct_helpers:run_setup_steps( + Config, + [fun merge_app_env/1] ++ rabbit_ct_broker_helpers:setup_steps() ++ - rabbit_ct_client_helpers:setup_steps()), - util:maybe_skip_v5(Config). + rabbit_ct_client_helpers:setup_steps()). end_per_group(_, Config) -> rabbit_ct_helpers:run_teardown_steps(Config, diff --git a/deps/rabbitmq_mqtt/test/java_SUITE_data/.mvn/wrapper/MavenWrapperDownloader.java b/deps/rabbitmq_mqtt/test/java_SUITE_data/.mvn/wrapper/MavenWrapperDownloader.java deleted file mode 100755 index 2e394d5b347b..000000000000 --- a/deps/rabbitmq_mqtt/test/java_SUITE_data/.mvn/wrapper/MavenWrapperDownloader.java +++ /dev/null @@ -1,110 +0,0 @@ -/* -Licensed to the Apache Software Foundation (ASF) under one -or more contributor license agreements. See the NOTICE file -distributed with this work for additional information -regarding copyright ownership. The ASF licenses this file -to you under the Apache License, Version 2.0 (the -"License"); you may not use this file except in compliance -with the License. You may obtain a copy of the License at - - https://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, -software distributed under the License is distributed on an -"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -KIND, either express or implied. See the License for the -specific language governing permissions and limitations -under the License. -*/ - -import java.net.*; -import java.io.*; -import java.nio.channels.*; -import java.util.Properties; - -public class MavenWrapperDownloader { - - /** - * Default URL to download the maven-wrapper.jar from, if no 'downloadUrl' is provided. - */ - private static final String DEFAULT_DOWNLOAD_URL = - "https://repo.maven.apache.org/maven2/io/takari/maven-wrapper/0.4.2/maven-wrapper-0.4.2.jar"; - - /** - * Path to the maven-wrapper.properties file, which might contain a downloadUrl property to - * use instead of the default one. - */ - private static final String MAVEN_WRAPPER_PROPERTIES_PATH = - ".mvn/wrapper/maven-wrapper.properties"; - - /** - * Path where the maven-wrapper.jar will be saved to. - */ - private static final String MAVEN_WRAPPER_JAR_PATH = - ".mvn/wrapper/maven-wrapper.jar"; - - /** - * Name of the property which should be used to override the default download url for the wrapper. - */ - private static final String PROPERTY_NAME_WRAPPER_URL = "wrapperUrl"; - - public static void main(String args[]) { - System.out.println("- Downloader started"); - File baseDirectory = new File(args[0]); - System.out.println("- Using base directory: " + baseDirectory.getAbsolutePath()); - - // If the maven-wrapper.properties exists, read it and check if it contains a custom - // wrapperUrl parameter. - File mavenWrapperPropertyFile = new File(baseDirectory, MAVEN_WRAPPER_PROPERTIES_PATH); - String url = DEFAULT_DOWNLOAD_URL; - if(mavenWrapperPropertyFile.exists()) { - FileInputStream mavenWrapperPropertyFileInputStream = null; - try { - mavenWrapperPropertyFileInputStream = new FileInputStream(mavenWrapperPropertyFile); - Properties mavenWrapperProperties = new Properties(); - mavenWrapperProperties.load(mavenWrapperPropertyFileInputStream); - url = mavenWrapperProperties.getProperty(PROPERTY_NAME_WRAPPER_URL, url); - } catch (IOException e) { - System.out.println("- ERROR loading '" + MAVEN_WRAPPER_PROPERTIES_PATH + "'"); - } finally { - try { - if(mavenWrapperPropertyFileInputStream != null) { - mavenWrapperPropertyFileInputStream.close(); - } - } catch (IOException e) { - // Ignore ... - } - } - } - System.out.println("- Downloading from: : " + url); - - File outputFile = new File(baseDirectory.getAbsolutePath(), MAVEN_WRAPPER_JAR_PATH); - if(!outputFile.getParentFile().exists()) { - if(!outputFile.getParentFile().mkdirs()) { - System.out.println( - "- ERROR creating output direcrory '" + outputFile.getParentFile().getAbsolutePath() + "'"); - } - } - System.out.println("- Downloading to: " + outputFile.getAbsolutePath()); - try { - downloadFileFromURL(url, outputFile); - System.out.println("Done"); - System.exit(0); - } catch (Throwable e) { - System.out.println("- Error downloading"); - e.printStackTrace(); - System.exit(1); - } - } - - private static void downloadFileFromURL(String urlString, File destination) throws Exception { - URL website = new URL(urlString); - ReadableByteChannel rbc; - rbc = Channels.newChannel(website.openStream()); - FileOutputStream fos = new FileOutputStream(destination); - fos.getChannel().transferFrom(rbc, 0, Long.MAX_VALUE); - fos.close(); - rbc.close(); - } - -} diff --git a/deps/rabbitmq_mqtt/test/java_SUITE_data/.mvn/wrapper/maven-wrapper.jar b/deps/rabbitmq_mqtt/test/java_SUITE_data/.mvn/wrapper/maven-wrapper.jar old mode 100755 new mode 100644 index 01e67997377a..cb28b0e37c7d Binary files a/deps/rabbitmq_mqtt/test/java_SUITE_data/.mvn/wrapper/maven-wrapper.jar and b/deps/rabbitmq_mqtt/test/java_SUITE_data/.mvn/wrapper/maven-wrapper.jar differ diff --git a/deps/rabbitmq_mqtt/test/java_SUITE_data/.mvn/wrapper/maven-wrapper.properties b/deps/rabbitmq_mqtt/test/java_SUITE_data/.mvn/wrapper/maven-wrapper.properties index 00d32aab1d44..10cbcb5e307b 100755 --- a/deps/rabbitmq_mqtt/test/java_SUITE_data/.mvn/wrapper/maven-wrapper.properties +++ b/deps/rabbitmq_mqtt/test/java_SUITE_data/.mvn/wrapper/maven-wrapper.properties @@ -1 +1,18 @@ -distributionUrl=https://repo.maven.apache.org/maven2/org/apache/maven/apache-maven/3.5.4/apache-maven-3.5.4-bin.zip \ No newline at end of file +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +distributionUrl=https://repo.maven.apache.org/maven2/org/apache/maven/apache-maven/3.9.8/apache-maven-3.9.8-bin.zip +wrapperUrl=https://repo.maven.apache.org/maven2/org/apache/maven/wrapper/maven-wrapper/3.2.0/maven-wrapper-3.2.0.jar diff --git a/deps/rabbitmq_mqtt/test/java_SUITE_data/mvnw b/deps/rabbitmq_mqtt/test/java_SUITE_data/mvnw index 88203bbfeccf..8d937f4c14f1 100755 --- a/deps/rabbitmq_mqtt/test/java_SUITE_data/mvnw +++ b/deps/rabbitmq_mqtt/test/java_SUITE_data/mvnw @@ -8,7 +8,7 @@ # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # -# https://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an @@ -19,7 +19,7 @@ # ---------------------------------------------------------------------------- # ---------------------------------------------------------------------------- -# Maven2 Start Up Batch script +# Apache Maven Wrapper startup batch script, version 3.2.0 # # Required ENV vars: # ------------------ @@ -27,7 +27,6 @@ # # Optional ENV vars # ----------------- -# M2_HOME - location of maven2's installed home dir # MAVEN_OPTS - parameters passed to the Java VM when running Maven # e.g. to debug Maven itself, use # set MAVEN_OPTS=-Xdebug -Xrunjdwp:transport=dt_socket,server=y,suspend=y,address=8000 @@ -36,6 +35,10 @@ if [ -z "$MAVEN_SKIP_RC" ] ; then + if [ -f /usr/local/etc/mavenrc ] ; then + . /usr/local/etc/mavenrc + fi + if [ -f /etc/mavenrc ] ; then . /etc/mavenrc fi @@ -50,7 +53,7 @@ fi cygwin=false; darwin=false; mingw=false -case "`uname`" in +case "$(uname)" in CYGWIN*) cygwin=true ;; MINGW*) mingw=true;; Darwin*) darwin=true @@ -58,9 +61,9 @@ case "`uname`" in # See https://developer.apple.com/library/mac/qa/qa1170/_index.html if [ -z "$JAVA_HOME" ]; then if [ -x "/usr/libexec/java_home" ]; then - export JAVA_HOME="`/usr/libexec/java_home`" + JAVA_HOME="$(/usr/libexec/java_home)"; export JAVA_HOME else - export JAVA_HOME="/Library/Java/Home" + JAVA_HOME="/Library/Java/Home"; export JAVA_HOME fi fi ;; @@ -68,68 +71,38 @@ esac if [ -z "$JAVA_HOME" ] ; then if [ -r /etc/gentoo-release ] ; then - JAVA_HOME=`java-config --jre-home` + JAVA_HOME=$(java-config --jre-home) fi fi -if [ -z "$M2_HOME" ] ; then - ## resolve links - $0 may be a link to maven's home - PRG="$0" - - # need this for relative symlinks - while [ -h "$PRG" ] ; do - ls=`ls -ld "$PRG"` - link=`expr "$ls" : '.*-> \(.*\)$'` - if expr "$link" : '/.*' > /dev/null; then - PRG="$link" - else - PRG="`dirname "$PRG"`/$link" - fi - done - - saveddir=`pwd` - - M2_HOME=`dirname "$PRG"`/.. - - # make it fully qualified - M2_HOME=`cd "$M2_HOME" && pwd` - - cd "$saveddir" - # echo Using m2 at $M2_HOME -fi - # For Cygwin, ensure paths are in UNIX format before anything is touched if $cygwin ; then - [ -n "$M2_HOME" ] && - M2_HOME=`cygpath --unix "$M2_HOME"` [ -n "$JAVA_HOME" ] && - JAVA_HOME=`cygpath --unix "$JAVA_HOME"` + JAVA_HOME=$(cygpath --unix "$JAVA_HOME") [ -n "$CLASSPATH" ] && - CLASSPATH=`cygpath --path --unix "$CLASSPATH"` + CLASSPATH=$(cygpath --path --unix "$CLASSPATH") fi # For Mingw, ensure paths are in UNIX format before anything is touched if $mingw ; then - [ -n "$M2_HOME" ] && - M2_HOME="`(cd "$M2_HOME"; pwd)`" - [ -n "$JAVA_HOME" ] && - JAVA_HOME="`(cd "$JAVA_HOME"; pwd)`" + [ -n "$JAVA_HOME" ] && [ -d "$JAVA_HOME" ] && + JAVA_HOME="$(cd "$JAVA_HOME" || (echo "cannot cd into $JAVA_HOME."; exit 1); pwd)" fi if [ -z "$JAVA_HOME" ]; then - javaExecutable="`which javac`" - if [ -n "$javaExecutable" ] && ! [ "`expr \"$javaExecutable\" : '\([^ ]*\)'`" = "no" ]; then + javaExecutable="$(which javac)" + if [ -n "$javaExecutable" ] && ! [ "$(expr "\"$javaExecutable\"" : '\([^ ]*\)')" = "no" ]; then # readlink(1) is not available as standard on Solaris 10. - readLink=`which readlink` - if [ ! `expr "$readLink" : '\([^ ]*\)'` = "no" ]; then + readLink=$(which readlink) + if [ ! "$(expr "$readLink" : '\([^ ]*\)')" = "no" ]; then if $darwin ; then - javaHome="`dirname \"$javaExecutable\"`" - javaExecutable="`cd \"$javaHome\" && pwd -P`/javac" + javaHome="$(dirname "\"$javaExecutable\"")" + javaExecutable="$(cd "\"$javaHome\"" && pwd -P)/javac" else - javaExecutable="`readlink -f \"$javaExecutable\"`" + javaExecutable="$(readlink -f "\"$javaExecutable\"")" fi - javaHome="`dirname \"$javaExecutable\"`" - javaHome=`expr "$javaHome" : '\(.*\)/bin'` + javaHome="$(dirname "\"$javaExecutable\"")" + javaHome=$(expr "$javaHome" : '\(.*\)/bin') JAVA_HOME="$javaHome" export JAVA_HOME fi @@ -145,7 +118,7 @@ if [ -z "$JAVACMD" ] ; then JAVACMD="$JAVA_HOME/bin/java" fi else - JAVACMD="`which java`" + JAVACMD="$(\unset -f command 2>/dev/null; \command -v java)" fi fi @@ -159,12 +132,9 @@ if [ -z "$JAVA_HOME" ] ; then echo "Warning: JAVA_HOME environment variable is not set." fi -CLASSWORLDS_LAUNCHER=org.codehaus.plexus.classworlds.launcher.Launcher - # traverses directory structure from process work directory to filesystem root # first directory with .mvn subdirectory is considered project base directory find_maven_basedir() { - if [ -z "$1" ] then echo "Path not specified to find_maven_basedir" @@ -180,76 +150,99 @@ find_maven_basedir() { fi # workaround for JBEAP-8937 (on Solaris 10/Sparc) if [ -d "${wdir}" ]; then - wdir=`cd "$wdir/.."; pwd` + wdir=$(cd "$wdir/.." || exit 1; pwd) fi # end of workaround done - echo "${basedir}" + printf '%s' "$(cd "$basedir" || exit 1; pwd)" } # concatenates all lines of a file concat_lines() { if [ -f "$1" ]; then - echo "$(tr -s '\n' ' ' < "$1")" + # Remove \r in case we run on Windows within Git Bash + # and check out the repository with auto CRLF management + # enabled. Otherwise, we may read lines that are delimited with + # \r\n and produce $'-Xarg\r' rather than -Xarg due to word + # splitting rules. + tr -s '\r\n' ' ' < "$1" fi } -BASE_DIR=`find_maven_basedir "$(pwd)"` +log() { + if [ "$MVNW_VERBOSE" = true ]; then + printf '%s\n' "$1" + fi +} + +BASE_DIR=$(find_maven_basedir "$(dirname "$0")") if [ -z "$BASE_DIR" ]; then exit 1; fi +MAVEN_PROJECTBASEDIR=${MAVEN_BASEDIR:-"$BASE_DIR"}; export MAVEN_PROJECTBASEDIR +log "$MAVEN_PROJECTBASEDIR" + ########################################################################################## # Extension to allow automatically downloading the maven-wrapper.jar from Maven-central # This allows using the maven wrapper in projects that prohibit checking in binary data. ########################################################################################## -if [ -r "$BASE_DIR/.mvn/wrapper/maven-wrapper.jar" ]; then - if [ "$MVNW_VERBOSE" = true ]; then - echo "Found .mvn/wrapper/maven-wrapper.jar" - fi +wrapperJarPath="$MAVEN_PROJECTBASEDIR/.mvn/wrapper/maven-wrapper.jar" +if [ -r "$wrapperJarPath" ]; then + log "Found $wrapperJarPath" else - if [ "$MVNW_VERBOSE" = true ]; then - echo "Couldn't find .mvn/wrapper/maven-wrapper.jar, downloading it ..." + log "Couldn't find $wrapperJarPath, downloading it ..." + + if [ -n "$MVNW_REPOURL" ]; then + wrapperUrl="$MVNW_REPOURL/org/apache/maven/wrapper/maven-wrapper/3.2.0/maven-wrapper-3.2.0.jar" + else + wrapperUrl="https://repo.maven.apache.org/maven2/org/apache/maven/wrapper/maven-wrapper/3.2.0/maven-wrapper-3.2.0.jar" fi - jarUrl="https://repo.maven.apache.org/maven2/io/takari/maven-wrapper/0.4.2/maven-wrapper-0.4.2.jar" - while IFS="=" read key value; do - case "$key" in (wrapperUrl) jarUrl="$value"; break ;; + while IFS="=" read -r key value; do + # Remove '\r' from value to allow usage on windows as IFS does not consider '\r' as a separator ( considers space, tab, new line ('\n'), and custom '=' ) + safeValue=$(echo "$value" | tr -d '\r') + case "$key" in (wrapperUrl) wrapperUrl="$safeValue"; break ;; esac - done < "$BASE_DIR/.mvn/wrapper/maven-wrapper.properties" - if [ "$MVNW_VERBOSE" = true ]; then - echo "Downloading from: $jarUrl" + done < "$MAVEN_PROJECTBASEDIR/.mvn/wrapper/maven-wrapper.properties" + log "Downloading from: $wrapperUrl" + + if $cygwin; then + wrapperJarPath=$(cygpath --path --windows "$wrapperJarPath") fi - wrapperJarPath="$BASE_DIR/.mvn/wrapper/maven-wrapper.jar" if command -v wget > /dev/null; then - if [ "$MVNW_VERBOSE" = true ]; then - echo "Found wget ... using wget" + log "Found wget ... using wget" + [ "$MVNW_VERBOSE" = true ] && QUIET="" || QUIET="--quiet" + if [ -z "$MVNW_USERNAME" ] || [ -z "$MVNW_PASSWORD" ]; then + wget $QUIET "$wrapperUrl" -O "$wrapperJarPath" || rm -f "$wrapperJarPath" + else + wget $QUIET --http-user="$MVNW_USERNAME" --http-password="$MVNW_PASSWORD" "$wrapperUrl" -O "$wrapperJarPath" || rm -f "$wrapperJarPath" fi - wget "$jarUrl" -O "$wrapperJarPath" elif command -v curl > /dev/null; then - if [ "$MVNW_VERBOSE" = true ]; then - echo "Found curl ... using curl" + log "Found curl ... using curl" + [ "$MVNW_VERBOSE" = true ] && QUIET="" || QUIET="--silent" + if [ -z "$MVNW_USERNAME" ] || [ -z "$MVNW_PASSWORD" ]; then + curl $QUIET -o "$wrapperJarPath" "$wrapperUrl" -f -L || rm -f "$wrapperJarPath" + else + curl $QUIET --user "$MVNW_USERNAME:$MVNW_PASSWORD" -o "$wrapperJarPath" "$wrapperUrl" -f -L || rm -f "$wrapperJarPath" fi - curl -o "$wrapperJarPath" "$jarUrl" else - if [ "$MVNW_VERBOSE" = true ]; then - echo "Falling back to using Java to download" + log "Falling back to using Java to download" + javaSource="$MAVEN_PROJECTBASEDIR/.mvn/wrapper/MavenWrapperDownloader.java" + javaClass="$MAVEN_PROJECTBASEDIR/.mvn/wrapper/MavenWrapperDownloader.class" + # For Cygwin, switch paths to Windows format before running javac + if $cygwin; then + javaSource=$(cygpath --path --windows "$javaSource") + javaClass=$(cygpath --path --windows "$javaClass") fi - javaClass="$BASE_DIR/.mvn/wrapper/MavenWrapperDownloader.java" - if [ -e "$javaClass" ]; then - if [ ! -e "$BASE_DIR/.mvn/wrapper/MavenWrapperDownloader.class" ]; then - if [ "$MVNW_VERBOSE" = true ]; then - echo " - Compiling MavenWrapperDownloader.java ..." - fi - # Compiling the Java class - ("$JAVA_HOME/bin/javac" "$javaClass") + if [ -e "$javaSource" ]; then + if [ ! -e "$javaClass" ]; then + log " - Compiling MavenWrapperDownloader.java ..." + ("$JAVA_HOME/bin/javac" "$javaSource") fi - if [ -e "$BASE_DIR/.mvn/wrapper/MavenWrapperDownloader.class" ]; then - # Running the downloader - if [ "$MVNW_VERBOSE" = true ]; then - echo " - Running MavenWrapperDownloader.java ..." - fi - ("$JAVA_HOME/bin/java" -cp .mvn/wrapper MavenWrapperDownloader "$MAVEN_PROJECTBASEDIR") + if [ -e "$javaClass" ]; then + log " - Running MavenWrapperDownloader.java ..." + ("$JAVA_HOME/bin/java" -cp .mvn/wrapper MavenWrapperDownloader "$wrapperUrl" "$wrapperJarPath") || rm -f "$wrapperJarPath" fi fi fi @@ -258,28 +251,58 @@ fi # End of extension ########################################################################################## -export MAVEN_PROJECTBASEDIR=${MAVEN_BASEDIR:-"$BASE_DIR"} -if [ "$MVNW_VERBOSE" = true ]; then - echo $MAVEN_PROJECTBASEDIR +# If specified, validate the SHA-256 sum of the Maven wrapper jar file +wrapperSha256Sum="" +while IFS="=" read -r key value; do + case "$key" in (wrapperSha256Sum) wrapperSha256Sum=$value; break ;; + esac +done < "$MAVEN_PROJECTBASEDIR/.mvn/wrapper/maven-wrapper.properties" +if [ -n "$wrapperSha256Sum" ]; then + wrapperSha256Result=false + if command -v sha256sum > /dev/null; then + if echo "$wrapperSha256Sum $wrapperJarPath" | sha256sum -c > /dev/null 2>&1; then + wrapperSha256Result=true + fi + elif command -v shasum > /dev/null; then + if echo "$wrapperSha256Sum $wrapperJarPath" | shasum -a 256 -c > /dev/null 2>&1; then + wrapperSha256Result=true + fi + else + echo "Checksum validation was requested but neither 'sha256sum' or 'shasum' are available." + echo "Please install either command, or disable validation by removing 'wrapperSha256Sum' from your maven-wrapper.properties." + exit 1 + fi + if [ $wrapperSha256Result = false ]; then + echo "Error: Failed to validate Maven wrapper SHA-256, your Maven wrapper might be compromised." >&2 + echo "Investigate or delete $wrapperJarPath to attempt a clean download." >&2 + echo "If you updated your Maven version, you need to update the specified wrapperSha256Sum property." >&2 + exit 1 + fi fi + MAVEN_OPTS="$(concat_lines "$MAVEN_PROJECTBASEDIR/.mvn/jvm.config") $MAVEN_OPTS" # For Cygwin, switch paths to Windows format before running java if $cygwin; then - [ -n "$M2_HOME" ] && - M2_HOME=`cygpath --path --windows "$M2_HOME"` [ -n "$JAVA_HOME" ] && - JAVA_HOME=`cygpath --path --windows "$JAVA_HOME"` + JAVA_HOME=$(cygpath --path --windows "$JAVA_HOME") [ -n "$CLASSPATH" ] && - CLASSPATH=`cygpath --path --windows "$CLASSPATH"` + CLASSPATH=$(cygpath --path --windows "$CLASSPATH") [ -n "$MAVEN_PROJECTBASEDIR" ] && - MAVEN_PROJECTBASEDIR=`cygpath --path --windows "$MAVEN_PROJECTBASEDIR"` + MAVEN_PROJECTBASEDIR=$(cygpath --path --windows "$MAVEN_PROJECTBASEDIR") fi +# Provide a "standardized" way to retrieve the CLI args that will +# work with both Windows and non-Windows executions. +MAVEN_CMD_LINE_ARGS="$MAVEN_CONFIG $*" +export MAVEN_CMD_LINE_ARGS + WRAPPER_LAUNCHER=org.apache.maven.wrapper.MavenWrapperMain +# shellcheck disable=SC2086 # safe args exec "$JAVACMD" \ $MAVEN_OPTS \ + $MAVEN_DEBUG_OPTS \ -classpath "$MAVEN_PROJECTBASEDIR/.mvn/wrapper/maven-wrapper.jar" \ - "-Dmaven.home=${M2_HOME}" "-Dmaven.multiModuleProjectDirectory=${MAVEN_PROJECTBASEDIR}" \ + "-Dmaven.multiModuleProjectDirectory=${MAVEN_PROJECTBASEDIR}" \ ${WRAPPER_LAUNCHER} $MAVEN_CONFIG "$@" diff --git a/deps/rabbitmq_mqtt/test/java_SUITE_data/mvnw.cmd b/deps/rabbitmq_mqtt/test/java_SUITE_data/mvnw.cmd old mode 100755 new mode 100644 index a5284c79395d..f80fbad3e766 --- a/deps/rabbitmq_mqtt/test/java_SUITE_data/mvnw.cmd +++ b/deps/rabbitmq_mqtt/test/java_SUITE_data/mvnw.cmd @@ -7,7 +7,7 @@ @REM "License"); you may not use this file except in compliance @REM with the License. You may obtain a copy of the License at @REM -@REM https://www.apache.org/licenses/LICENSE-2.0 +@REM http://www.apache.org/licenses/LICENSE-2.0 @REM @REM Unless required by applicable law or agreed to in writing, @REM software distributed under the License is distributed on an @@ -18,15 +18,14 @@ @REM ---------------------------------------------------------------------------- @REM ---------------------------------------------------------------------------- -@REM Maven2 Start Up Batch script +@REM Apache Maven Wrapper startup batch script, version 3.2.0 @REM @REM Required ENV vars: @REM JAVA_HOME - location of a JDK home dir @REM @REM Optional ENV vars -@REM M2_HOME - location of maven2's installed home dir @REM MAVEN_BATCH_ECHO - set to 'on' to enable the echoing of the batch commands -@REM MAVEN_BATCH_PAUSE - set to 'on' to wait for a key stroke before ending +@REM MAVEN_BATCH_PAUSE - set to 'on' to wait for a keystroke before ending @REM MAVEN_OPTS - parameters passed to the Java VM when running Maven @REM e.g. to debug Maven itself, use @REM set MAVEN_OPTS=-Xdebug -Xrunjdwp:transport=dt_socket,server=y,suspend=y,address=8000 @@ -37,7 +36,7 @@ @echo off @REM set title of command window title %0 -@REM enable echoing my setting MAVEN_BATCH_ECHO to 'on' +@REM enable echoing by setting MAVEN_BATCH_ECHO to 'on' @if "%MAVEN_BATCH_ECHO%" == "on" echo %MAVEN_BATCH_ECHO% @REM set %HOME% to equivalent of $HOME @@ -46,8 +45,8 @@ if "%HOME%" == "" (set "HOME=%HOMEDRIVE%%HOMEPATH%") @REM Execute a user defined script before this one if not "%MAVEN_SKIP_RC%" == "" goto skipRcPre @REM check for pre script, once with legacy .bat ending and once with .cmd ending -if exist "%HOME%\mavenrc_pre.bat" call "%HOME%\mavenrc_pre.bat" -if exist "%HOME%\mavenrc_pre.cmd" call "%HOME%\mavenrc_pre.cmd" +if exist "%USERPROFILE%\mavenrc_pre.bat" call "%USERPROFILE%\mavenrc_pre.bat" %* +if exist "%USERPROFILE%\mavenrc_pre.cmd" call "%USERPROFILE%\mavenrc_pre.cmd" %* :skipRcPre @setlocal @@ -120,24 +119,69 @@ SET MAVEN_JAVA_EXE="%JAVA_HOME%\bin\java.exe" set WRAPPER_JAR="%MAVEN_PROJECTBASEDIR%\.mvn\wrapper\maven-wrapper.jar" set WRAPPER_LAUNCHER=org.apache.maven.wrapper.MavenWrapperMain -set DOWNLOAD_URL="https://repo.maven.apache.org/maven2/io/takari/maven-wrapper/0.4.2/maven-wrapper-0.4.2.jar" -FOR /F "tokens=1,2 delims==" %%A IN (%MAVEN_PROJECTBASEDIR%\.mvn\wrapper\maven-wrapper.properties) DO ( - IF "%%A"=="wrapperUrl" SET DOWNLOAD_URL=%%B +set WRAPPER_URL="https://repo.maven.apache.org/maven2/org/apache/maven/wrapper/maven-wrapper/3.2.0/maven-wrapper-3.2.0.jar" + +FOR /F "usebackq tokens=1,2 delims==" %%A IN ("%MAVEN_PROJECTBASEDIR%\.mvn\wrapper\maven-wrapper.properties") DO ( + IF "%%A"=="wrapperUrl" SET WRAPPER_URL=%%B ) @REM Extension to allow automatically downloading the maven-wrapper.jar from Maven-central @REM This allows using the maven wrapper in projects that prohibit checking in binary data. if exist %WRAPPER_JAR% ( - echo Found %WRAPPER_JAR% + if "%MVNW_VERBOSE%" == "true" ( + echo Found %WRAPPER_JAR% + ) ) else ( - echo Couldn't find %WRAPPER_JAR%, downloading it ... - echo Downloading from: %DOWNLOAD_URL% - powershell -Command "(New-Object Net.WebClient).DownloadFile('%DOWNLOAD_URL%', '%WRAPPER_JAR%')" - echo Finished downloading %WRAPPER_JAR% + if not "%MVNW_REPOURL%" == "" ( + SET WRAPPER_URL="%MVNW_REPOURL%/org/apache/maven/wrapper/maven-wrapper/3.2.0/maven-wrapper-3.2.0.jar" + ) + if "%MVNW_VERBOSE%" == "true" ( + echo Couldn't find %WRAPPER_JAR%, downloading it ... + echo Downloading from: %WRAPPER_URL% + ) + + powershell -Command "&{"^ + "$webclient = new-object System.Net.WebClient;"^ + "if (-not ([string]::IsNullOrEmpty('%MVNW_USERNAME%') -and [string]::IsNullOrEmpty('%MVNW_PASSWORD%'))) {"^ + "$webclient.Credentials = new-object System.Net.NetworkCredential('%MVNW_USERNAME%', '%MVNW_PASSWORD%');"^ + "}"^ + "[Net.ServicePointManager]::SecurityProtocol = [Net.SecurityProtocolType]::Tls12; $webclient.DownloadFile('%WRAPPER_URL%', '%WRAPPER_JAR%')"^ + "}" + if "%MVNW_VERBOSE%" == "true" ( + echo Finished downloading %WRAPPER_JAR% + ) ) @REM End of extension -%MAVEN_JAVA_EXE% %JVM_CONFIG_MAVEN_PROPS% %MAVEN_OPTS% %MAVEN_DEBUG_OPTS% -classpath %WRAPPER_JAR% "-Dmaven.multiModuleProjectDirectory=%MAVEN_PROJECTBASEDIR%" %WRAPPER_LAUNCHER% %MAVEN_CONFIG% %* +@REM If specified, validate the SHA-256 sum of the Maven wrapper jar file +SET WRAPPER_SHA_256_SUM="" +FOR /F "usebackq tokens=1,2 delims==" %%A IN ("%MAVEN_PROJECTBASEDIR%\.mvn\wrapper\maven-wrapper.properties") DO ( + IF "%%A"=="wrapperSha256Sum" SET WRAPPER_SHA_256_SUM=%%B +) +IF NOT %WRAPPER_SHA_256_SUM%=="" ( + powershell -Command "&{"^ + "$hash = (Get-FileHash \"%WRAPPER_JAR%\" -Algorithm SHA256).Hash.ToLower();"^ + "If('%WRAPPER_SHA_256_SUM%' -ne $hash){"^ + " Write-Output 'Error: Failed to validate Maven wrapper SHA-256, your Maven wrapper might be compromised.';"^ + " Write-Output 'Investigate or delete %WRAPPER_JAR% to attempt a clean download.';"^ + " Write-Output 'If you updated your Maven version, you need to update the specified wrapperSha256Sum property.';"^ + " exit 1;"^ + "}"^ + "}" + if ERRORLEVEL 1 goto error +) + +@REM Provide a "standardized" way to retrieve the CLI args that will +@REM work with both Windows and non-Windows executions. +set MAVEN_CMD_LINE_ARGS=%* + +%MAVEN_JAVA_EXE% ^ + %JVM_CONFIG_MAVEN_PROPS% ^ + %MAVEN_OPTS% ^ + %MAVEN_DEBUG_OPTS% ^ + -classpath %WRAPPER_JAR% ^ + "-Dmaven.multiModuleProjectDirectory=%MAVEN_PROJECTBASEDIR%" ^ + %WRAPPER_LAUNCHER% %MAVEN_CONFIG% %* if ERRORLEVEL 1 goto error goto end @@ -147,15 +191,15 @@ set ERROR_CODE=1 :end @endlocal & set ERROR_CODE=%ERROR_CODE% -if not "%MAVEN_SKIP_RC%" == "" goto skipRcPost +if not "%MAVEN_SKIP_RC%"=="" goto skipRcPost @REM check for post script, once with legacy .bat ending and once with .cmd ending -if exist "%HOME%\mavenrc_post.bat" call "%HOME%\mavenrc_post.bat" -if exist "%HOME%\mavenrc_post.cmd" call "%HOME%\mavenrc_post.cmd" +if exist "%USERPROFILE%\mavenrc_post.bat" call "%USERPROFILE%\mavenrc_post.bat" +if exist "%USERPROFILE%\mavenrc_post.cmd" call "%USERPROFILE%\mavenrc_post.cmd" :skipRcPost @REM pause the script if MAVEN_BATCH_PAUSE is set to 'on' -if "%MAVEN_BATCH_PAUSE%" == "on" pause +if "%MAVEN_BATCH_PAUSE%"=="on" pause -if "%MAVEN_TERMINATE_CMD%" == "on" exit %ERROR_CODE% +if "%MAVEN_TERMINATE_CMD%"=="on" exit %ERROR_CODE% -exit /B %ERROR_CODE% +cmd /C exit /B %ERROR_CODE% diff --git a/deps/rabbitmq_mqtt/test/java_SUITE_data/pom.xml b/deps/rabbitmq_mqtt/test/java_SUITE_data/pom.xml index 32f4c250770b..30fd99353226 100644 --- a/deps/rabbitmq_mqtt/test/java_SUITE_data/pom.xml +++ b/deps/rabbitmq_mqtt/test/java_SUITE_data/pom.xml @@ -15,15 +15,15 @@ [1.2.5,) [1.2.5,) - 5.18.0 - 5.10.0 - 3.24.2 - 1.2.12 - 3.1.2 + 5.21.0 + 5.11.0 + 3.26.3 + 1.2.13 + 3.4.0 2.1.1 2.4.21 - 3.11.0 - 2.39.0 + 3.12.1 + 2.43.0 1.17.0 ${project.build.directory}/ca.keystore bunnychow @@ -180,7 +180,7 @@ // License, v. 2.0. If a copy of the MPL was not distributed with this // file, You can obtain one at https://mozilla.org/MPL/2.0/. // - // Copyright (c) $YEAR VMware, Inc. or its affiliates. All rights reserved. + // Copyright (c) 2007-2024, Inc. or its affiliates. All rights reserved. // diff --git a/deps/rabbitmq_mqtt/test/java_SUITE_data/src/test/java/com/rabbitmq/mqtt/test/MqttTest.java b/deps/rabbitmq_mqtt/test/java_SUITE_data/src/test/java/com/rabbitmq/mqtt/test/MqttTest.java index 3d50a400295d..d67aeade2f2a 100644 --- a/deps/rabbitmq_mqtt/test/java_SUITE_data/src/test/java/com/rabbitmq/mqtt/test/MqttTest.java +++ b/deps/rabbitmq_mqtt/test/java_SUITE_data/src/test/java/com/rabbitmq/mqtt/test/MqttTest.java @@ -2,7 +2,7 @@ // License, v. 2.0. If a copy of the MPL was not distributed with this // file, You can obtain one at https://mozilla.org/MPL/2.0/. // -// Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +// Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. // package com.rabbitmq.mqtt.test; diff --git a/deps/rabbitmq_mqtt/test/java_SUITE_data/src/test/java/com/rabbitmq/mqtt/test/MqttV5Test.java b/deps/rabbitmq_mqtt/test/java_SUITE_data/src/test/java/com/rabbitmq/mqtt/test/MqttV5Test.java index d785638ec096..1b611e6a432e 100644 --- a/deps/rabbitmq_mqtt/test/java_SUITE_data/src/test/java/com/rabbitmq/mqtt/test/MqttV5Test.java +++ b/deps/rabbitmq_mqtt/test/java_SUITE_data/src/test/java/com/rabbitmq/mqtt/test/MqttV5Test.java @@ -2,7 +2,7 @@ // License, v. 2.0. If a copy of the MPL was not distributed with this // file, You can obtain one at https://mozilla.org/MPL/2.0/. // -// Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +// Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. // package com.rabbitmq.mqtt.test; diff --git a/deps/rabbitmq_mqtt/test/java_SUITE_data/src/test/java/com/rabbitmq/mqtt/test/tls/MqttSSLTest.java b/deps/rabbitmq_mqtt/test/java_SUITE_data/src/test/java/com/rabbitmq/mqtt/test/tls/MqttSSLTest.java index 608c3fd3c36e..6d4fc6a25966 100644 --- a/deps/rabbitmq_mqtt/test/java_SUITE_data/src/test/java/com/rabbitmq/mqtt/test/tls/MqttSSLTest.java +++ b/deps/rabbitmq_mqtt/test/java_SUITE_data/src/test/java/com/rabbitmq/mqtt/test/tls/MqttSSLTest.java @@ -2,7 +2,7 @@ // License, v. 2.0. If a copy of the MPL was not distributed with this // file, You can obtain one at https://mozilla.org/MPL/2.0/. // -// Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +// Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. // package com.rabbitmq.mqtt.test.tls; diff --git a/deps/rabbitmq_mqtt/test/java_SUITE_data/src/test/java/com/rabbitmq/mqtt/test/tls/MqttV5SSLTest.java b/deps/rabbitmq_mqtt/test/java_SUITE_data/src/test/java/com/rabbitmq/mqtt/test/tls/MqttV5SSLTest.java index abd01634280b..68b7c484878f 100644 --- a/deps/rabbitmq_mqtt/test/java_SUITE_data/src/test/java/com/rabbitmq/mqtt/test/tls/MqttV5SSLTest.java +++ b/deps/rabbitmq_mqtt/test/java_SUITE_data/src/test/java/com/rabbitmq/mqtt/test/tls/MqttV5SSLTest.java @@ -2,7 +2,7 @@ // License, v. 2.0. If a copy of the MPL was not distributed with this // file, You can obtain one at https://mozilla.org/MPL/2.0/. // -// Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +// Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. // package com.rabbitmq.mqtt.test.tls; diff --git a/deps/rabbitmq_mqtt/test/java_SUITE_data/src/test/java/com/rabbitmq/mqtt/test/tls/MutualAuth.java b/deps/rabbitmq_mqtt/test/java_SUITE_data/src/test/java/com/rabbitmq/mqtt/test/tls/MutualAuth.java index 3d83df5efe13..60cf08e3f2b6 100644 --- a/deps/rabbitmq_mqtt/test/java_SUITE_data/src/test/java/com/rabbitmq/mqtt/test/tls/MutualAuth.java +++ b/deps/rabbitmq_mqtt/test/java_SUITE_data/src/test/java/com/rabbitmq/mqtt/test/tls/MutualAuth.java @@ -2,7 +2,7 @@ // License, v. 2.0. If a copy of the MPL was not distributed with this // file, You can obtain one at https://mozilla.org/MPL/2.0/. // -// Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +// Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. // package com.rabbitmq.mqtt.test.tls; diff --git a/deps/rabbitmq_mqtt/test/mc_mqtt_SUITE.erl b/deps/rabbitmq_mqtt/test/mc_mqtt_SUITE.erl index 7720974b5394..9a0d9de6447a 100644 --- a/deps/rabbitmq_mqtt/test/mc_mqtt_SUITE.erl +++ b/deps/rabbitmq_mqtt/test/mc_mqtt_SUITE.erl @@ -4,38 +4,37 @@ nowarn_export_all]). -include_lib("rabbitmq_mqtt/include/rabbit_mqtt_packet.hrl"). +-include_lib("rabbit/include/mc.hrl"). +-include_lib("rabbit_common/include/rabbit_framing.hrl"). +-include_lib("rabbit_common/include/rabbit.hrl"). -include_lib("amqp10_common/include/amqp10_framing.hrl"). -include_lib("eunit/include/eunit.hrl"). all() -> [ - {group, lossless}, - {group, lossy} + {group, tests} ]. groups() -> [ - {lossless, [shuffle], + {tests, [shuffle], [roundtrip_amqp, - roundtrip_amqp_payload_format_indicator, roundtrip_amqp_response_topic, roundtrip_amqpl, roundtrip_amqpl_correlation, - amqp_to_mqtt_amqp_value_section_binary, - amqp_to_mqtt_amqp_value_section_list, - amqp_to_mqtt_amqp_value_section_null, - amqp_to_mqtt_amqp_value_section_int, - amqp_to_mqtt_amqp_value_section_boolean - ] - }, - {lossy, [shuffle], - [roundtrip_amqp_user_property, + amqp_to_mqtt_body_sections, + roundtrip_amqp_user_property, roundtrip_amqpl_user_property, roundtrip_amqp_content_type, amqp_to_mqtt_reply_to, - amqp_to_mqtt_footer - ] - } + amqp_to_mqtt_footer, + mqtt_amqpl, + mqtt_amqpl_alt, + mqtt_amqp, + mqtt_amqp_alt, + amqp_mqtt, + is_persistent + ]} ]. roundtrip_amqp(_Config) -> @@ -51,7 +50,7 @@ roundtrip_amqp(_Config) -> {<<"key-2">>, <<"val-2">>}, {<<"key-3">>, <<"val-3">>}, {<<"key-1">>, <<"val-1">>}]}}, - Anns = #{routing_keys => [rabbit_mqtt_util:mqtt_to_amqp(Msg#mqtt_msg.topic)]}, + Anns = #{?ANN_ROUTING_KEYS => [rabbit_mqtt_util:mqtt_to_amqp(Msg#mqtt_msg.topic)]}, Mc0 = mc:init(mc_mqtt, Msg, Anns), BytesTopic = 9, @@ -63,8 +62,9 @@ roundtrip_amqp(_Config) -> ExpectedSize = {MetaDataSize, PayloadSize}, ?assertEqual(ExpectedSize, mc:size(Mc0)), - ?assertEqual(Msg, mc_mqtt:convert_to(mc_mqtt, Msg)), - ?assertEqual(not_implemented, mc_mqtt:convert_to(mc_stomp, Msg)), + Env = #{}, + ?assertEqual(Msg, mc_mqtt:convert_to(mc_mqtt, Msg, Env)), + ?assertEqual(not_implemented, mc_mqtt:convert_to(mc_stomp, Msg, Env)), ?assertEqual(Mc0, mc:convert(mc_mqtt, Mc0)), %% roundtrip @@ -103,30 +103,18 @@ roundtrip_amqp(_Config) -> %% We expect order to be maintained. ?assertMatch(#{'User-Property' := ExpectedUserProperty}, Props). -%% The indicator that the Payload is UTF-8 encoded should not be lost when translating -%% from MQTT 5.0 to AMQP 1.0 or vice versa. -roundtrip_amqp_payload_format_indicator(_Config) -> - Msg0 = mqtt_msg(), - Msg = Msg0#mqtt_msg{payload = <<"🐇"/utf8>>, - props = #{'Payload-Format-Indicator' => 1}}, - #mqtt_msg{payload = Payload, - props = Props} = roundtrip(mc_amqp, Msg), - ?assertEqual(unicode:characters_to_binary("🐇"), - iolist_to_binary(Payload)), - ?assertMatch(#{'Payload-Format-Indicator' := 1}, Props). - roundtrip_amqp_response_topic(_Config) -> Topic = <<"/rabbit/🐇"/utf8>>, Msg0 = mqtt_msg(), - Key = mqtt_exchange, + Key = mqtt_x, MqttExchanges = [<<"amq.topic">>, <<"some-other-topic-exchange">>], [begin - ok = persistent_term:put(Key, X), + Env = #{Key => X}, Msg = Msg0#mqtt_msg{props = #{'Response-Topic' => Topic}}, ?assertMatch(#mqtt_msg{props = #{'Response-Topic' := Topic}}, - roundtrip(mc_amqp, Msg)), - true = persistent_term:erase(Key) + roundtrip(mc_amqp, Msg, Env)), + ok end || X <- MqttExchanges]. roundtrip_amqpl(_Config) -> @@ -143,7 +131,7 @@ roundtrip_amqpl(_Config) -> {<<"key-2">>, <<"val-2">>}, {<<"key-3">>, <<"val-3">>}, {<<"key-1">>, <<"val-1">>}]}}, - Anns = #{routing_keys => [rabbit_mqtt_util:mqtt_to_amqp(Msg#mqtt_msg.topic)]}, + Anns = #{?ANN_ROUTING_KEYS => [rabbit_mqtt_util:mqtt_to_amqp(Msg#mqtt_msg.topic)]}, Mc0 = mc:init(mc_mqtt, Msg, Anns), Mc1 = mc:convert(mc_amqpl, Mc0), Mc = mc:convert(mc_mqtt, Mc1), @@ -177,49 +165,28 @@ roundtrip_amqpl_correlation(_Config) -> ?assertMatch(#mqtt_msg{props = #{'Correlation-Data' := Correlation}}, roundtrip(mc_amqpl, Msg)). -%% Binaries should be sent unmodified. -amqp_to_mqtt_amqp_value_section_binary(_Config) -> - Val = amqp_value({binary, <<0, 255>>}), - #mqtt_msg{props = Props, - payload = Payload} = amqp_to_mqtt([Val]), - ?assertEqual(<<0, 255>>, iolist_to_binary(Payload)), - ?assertEqual(#{}, Props). - -%% Lists cannot be converted to a text representation. -%% They should be encoded using the AMQP 1.0 type system. -amqp_to_mqtt_amqp_value_section_list(_Config) -> - Val = amqp_value({list, [{uint, 3}]}), - #mqtt_msg{props = Props, - payload = Payload} = amqp_to_mqtt([Val]), - ?assertEqual(#{'Content-Type' => <<"message/vnd.rabbitmq.amqp">>}, Props), - ?assert(iolist_size(Payload) > 0). - -amqp_to_mqtt_amqp_value_section_null(_Config) -> - Val = amqp_value(null), +amqp_to_mqtt_body_sections(_Config) -> + %% An amqp-value section should get AMQP encoded. + Body1 = [#'v1_0.amqp_value'{content = {list, [{uint, 3}]}}], + #mqtt_msg{props = #{'Content-Type' := <<"message/vnd.rabbitmq.amqp">>}, + payload = Payload1} = amqp_to_mqtt(Body1), + ?assertEqual(Body1, amqp10_framing:decode_bin(iolist_to_binary(Payload1))), + + %% amqp-sequence sections should get AMQP encoded. + Body2 = [#'v1_0.amqp_sequence'{content = [true, false]}, + #'v1_0.amqp_sequence'{content = [{binary, <<0, 255>>}]}], + #mqtt_msg{props = #{'Content-Type' := <<"message/vnd.rabbitmq.amqp">>}, + payload = Payload2} = amqp_to_mqtt(Body2), + ?assertEqual(Body2, amqp10_framing:decode_bin(iolist_to_binary(Payload2))), + + %% Binary data of multiple data sections should get concatenated. + Body3 = [#'v1_0.data'{content = <<0>>}, + #'v1_0.data'{content = <<11, 10>>}, + #'v1_0.data'{content = <<9>>}], #mqtt_msg{props = Props, - payload = Payload} = amqp_to_mqtt([Val]), - ?assertEqual(#{'Payload-Format-Indicator' => 1}, Props), - ?assertEqual(0, iolist_size(Payload)). - -amqp_to_mqtt_amqp_value_section_int(_Config) -> - Val = amqp_value({int, -3}), - #mqtt_msg{props = Props, - payload = Payload} = amqp_to_mqtt([Val]), - ?assertEqual(#{'Payload-Format-Indicator' => 1}, Props), - ?assertEqual(<<"-3">>, iolist_to_binary(Payload)). - -amqp_to_mqtt_amqp_value_section_boolean(_Config) -> - Val1 = amqp_value(true), - #mqtt_msg{props = Props1, - payload = Payload1} = amqp_to_mqtt([Val1]), - ?assertEqual(#{'Payload-Format-Indicator' => 1}, Props1), - ?assertEqual(<<"true">>, iolist_to_binary(Payload1)), - - Val2 = amqp_value({boolean, false}), - #mqtt_msg{props = Props2, - payload = Payload2} = amqp_to_mqtt([Val2]), - ?assertEqual(#{'Payload-Format-Indicator' => 1}, Props2), - ?assertEqual(<<"false">>, iolist_to_binary(Payload2)). + payload = Payload3} = amqp_to_mqtt(Body3), + ?assertEqual(0, maps:size(Props)), + ?assertEqual(<<0, 11, 10, 9>>, iolist_to_binary(Payload3)). %% When converting from MQTT 5.0 to AMQP 1.0, we expect to lose some User Property. roundtrip_amqp_user_property(_Config) -> @@ -248,7 +215,7 @@ roundtrip_amqpl_user_property(_Config) -> Msg = Msg0#mqtt_msg{ props = #{'User-Property' => [{<<"key-2">>, <<"val-2">>}, {<<"key-1">>, <<"val-1">>}, - {binary:copy(<<"k">>, 129), <<"val-2">>}, + {binary:copy(<<"k">>, 256), <<"val-2">>}, {<<"key-1">>, <<"val-1">>} ]}}, ?assertMatch(#mqtt_msg{props = #{'User-Property' := [{<<"key-1">>, <<"val-1">>}, @@ -267,45 +234,308 @@ roundtrip_amqp_content_type(_Config) -> amqp_to_mqtt_reply_to(_Config) -> Val = amqp_value({utf8, <<"hey">>}), - Key = mqtt_exchange, - ok = persistent_term:put(Key, <<"mqtt-topic-exchange">>), + Key = mqtt_x, + Env = #{Key => <<"mqtt-topic-exchange">>}, - AmqpProps1 = #'v1_0.properties'{reply_to = {utf8, <<"/exchange/mqtt-topic-exchange/my.routing.key">>}}, - #mqtt_msg{props = Props1} = amqp_to_mqtt([AmqpProps1, Val]), + AmqpProps1 = #'v1_0.properties'{reply_to = {utf8, <<"/exchanges/mqtt-topic-exchange/my.routing.key">>}}, + #mqtt_msg{props = Props1} = amqp_to_mqtt([AmqpProps1, Val], Env), ?assertEqual({ok, <<"my/routing/key">>}, maps:find('Response-Topic', Props1)), - AmqpProps2 = #'v1_0.properties'{reply_to = {utf8, <<"/exchange/NON-mqtt-topic-exchange/my.routing.key">>}}, + AmqpProps2 = #'v1_0.properties'{reply_to = {utf8, <<"/exchanges/NON-mqtt-topic-exchange/my.routing.key">>}}, #mqtt_msg{props = Props2} = amqp_to_mqtt([AmqpProps2, Val]), ?assertEqual(error, maps:find('Response-Topic', Props2)), - true = persistent_term:erase(Key). + RoutingKey = <<"my.sp%$@cial.routing.key">>, + %% The AMQP client must percent encode the AMQP reply_to address URI. We expect the + %% AMQP -> MQTT conversion to percent decode because an MQTT response topic is not percent encoded. + RoutingKeyQuoted = uri_string:quote(RoutingKey), + AmqpProps3 = #'v1_0.properties'{reply_to = {utf8, <<"/exchanges/mqtt-topic-exchange/", RoutingKeyQuoted/binary>>}}, + #mqtt_msg{props = Props3} = amqp_to_mqtt([AmqpProps3, Val], Env), + ?assertEqual({ok, <<"my/sp%$@cial/routing/key">>}, + maps:find('Response-Topic', Props3)), + + %% If the AMQP client did not percent encode the AMQP reply_to address URI as required, + %% then the reply_to should be ignored by the conversion. + AmqpProps4 = #'v1_0.properties'{reply_to = {utf8, <<"/exchanges/mqtt-topic-exchange/", RoutingKey/binary>>}}, + #mqtt_msg{props = Props4} = amqp_to_mqtt([AmqpProps4, Val], Env), + ?assertEqual(error, + maps:find('Response-Topic', Props4)). amqp_to_mqtt_footer(_Config) -> - Val = amqp_value({utf8, <<"hey">>}), - Footer = #'v1_0.footer'{content = [{symbol, <<"key">>}, {utf8, <<"value">>}]}, + Body = <<"hey">>, + Footer = #'v1_0.footer'{content = [{{symbol, <<"key">>}, {utf8, <<"value">>}}]}, %% We can translate, but lose the footer. - #mqtt_msg{payload = Payload} = amqp_to_mqtt([Val, Footer]), + #mqtt_msg{payload = Payload} = amqp_to_mqtt([#'v1_0.data'{content = Body}, Footer]), ?assertEqual(<<"hey">>, iolist_to_binary(Payload)). +mqtt_amqpl(_Config) -> + Msg0 = mqtt_msg(), + Msg = Msg0#mqtt_msg{qos = 1, + props = #{'Content-Type' => <<"text/plain">>, + 'User-Property' => [{<<"key-2">>, <<"val-2">>}, + {<<"key-1">>, <<"val-1">>}], + 'Correlation-Data' => <<"banana">>, + 'Message-Expiry-Interval' => 1001, + 'Response-Topic' => <<"tmp/blah/responses">> + } + }, + Anns = #{?ANN_ROUTING_KEYS => [rabbit_mqtt_util:mqtt_to_amqp(Msg#mqtt_msg.topic)]}, + Mc = mc:init(mc_mqtt, Msg, Anns), + MsgL = mc:convert(mc_amqpl, Mc), + + #content{properties = #'P_basic'{headers = HL} = Props} = + mc:protocol_state(MsgL), + + ?assertMatch(#'P_basic'{delivery_mode = 2, + correlation_id = <<"banana">>, + expiration = <<"1001000">>, + content_type = <<"text/plain">>}, Props), + ?assertMatch({_, longstr, <<"val-2">>}, amqpl_header(<<"key-2">>, HL)), + ?assertMatch({_, longstr, <<"val-1">>}, amqpl_header(<<"key-1">>, HL)), + ?assertMatch({_, longstr, <<"tmp.blah.responses">>}, + amqpl_header(<<"x-reply-to-topic">>, HL)), + ok. + +mqtt_amqpl_alt(_Config) -> + InvalidUtf8 = <<14,23,97,23,144,149,12,108,140,66,151,2>>, + Msg0 = mqtt_msg(), + Msg = Msg0#mqtt_msg{qos = 0, + props = #{'Content-Type' => <<"no-ascii🐇"/utf8>>, + % 'User-Property' => [{<<"key-2">>, <<"val-2">>}, + % {<<"key-1">>, <<"val-1">>}], + 'Correlation-Data' => InvalidUtf8 + } + }, + Anns = #{?ANN_ROUTING_KEYS => [rabbit_mqtt_util:mqtt_to_amqp(Msg#mqtt_msg.topic)]}, + Mc = mc:init(mc_mqtt, Msg, Anns), + MsgL = mc:convert(mc_amqpl, Mc), + + #content{properties = #'P_basic'{headers = HL} = Props} = + mc:protocol_state(MsgL), + + ?assertMatch(#'P_basic'{delivery_mode = 1, + correlation_id = undefined, + content_type = undefined}, Props), + + ?assertMatch({_, longstr, InvalidUtf8}, + amqpl_header(<<"x-correlation-id">>, HL)), + ok. + +mqtt_amqp(_Config) -> + Key = mqtt_x, + Ex = <<"mqtt-topic-exchange">>, + Env = #{Key => <<"mqtt-topic-exchange">>}, + Mqtt0 = mqtt_msg(), + Mqtt = Mqtt0#mqtt_msg{qos = 1, + props = #{'Content-Type' => <<"text/plain">>, + 'User-Property' => + [{<<"key-2">>, <<"val-2">>}, + {<<"key-1">>, <<"val-1">>}, + {<<"x-stream-filter">>, <<"apple">>}], + 'Correlation-Data' => <<"banana">>, + 'Message-Expiry-Interval' => 1001, + 'Response-Topic' => <<"tmp/blah/responses">> + } + }, + Anns = #{?ANN_EXCHANGE => Ex, + ?ANN_ROUTING_KEYS => [rabbit_mqtt_util:mqtt_to_amqp(Mqtt#mqtt_msg.topic)]}, + Mc = mc:init(mc_mqtt, Mqtt, Anns, Env), + %% no target env + Msg = mc:convert(mc_amqp, Mc), + + [H, + #'v1_0.message_annotations'{content = MA}, + P, + #'v1_0.application_properties'{content = AP}, + D] = amqp10_framing:decode_bin(iolist_to_binary(mc:protocol_state(Msg))), + + ?assertMatch(#'v1_0.header'{durable = true}, H), + ?assertEqual({utf8, <<"apple">>}, amqp_map_get(symbol(<<"x-stream-filter">>), MA)), + ?assertMatch(#'v1_0.properties'{content_type = {symbol, <<"text/plain">>}, + correlation_id = {binary, <<"banana">>}}, P), + ?assertEqual({utf8, <<"val-1">>}, amqp_map_get(utf8(<<"key-1">>), AP)), + ?assertEqual({utf8, <<"val-2">>}, amqp_map_get(utf8(<<"key-2">>), AP)), + ?assertMatch(#'v1_0.data'{content = _}, D), + ok. + +mqtt_amqp_alt(_Config) -> + Key = mqtt_x, + Ex = <<"mqtt-topic-exchange">>, + Env = #{Key => <<"mqtt-topic-exchange">>}, + CorrId = <<"urn:uuid:550e8400-e29b-41d4-a716-446655440000">>, + Mqtt0 = mqtt_msg(), + Mqtt = Mqtt0#mqtt_msg{qos = 0, + props = #{'Content-Type' => <<"text/plain">>, + 'User-Property' => + [{<<"key-2">>, <<"val-2">>}, + {<<"key-1">>, <<"val-1">>}, + {<<"x-stream-filter">>, <<"apple">>}], + 'Correlation-Data' => CorrId, + 'Message-Expiry-Interval' => 1001, + 'Response-Topic' => <<"tmp/blah/responses">> + } + }, + Anns = #{?ANN_EXCHANGE => Ex, + ?ANN_ROUTING_KEYS => [rabbit_mqtt_util:mqtt_to_amqp(Mqtt#mqtt_msg.topic)]}, + Mc = mc:init(mc_mqtt, Mqtt, Anns, Env), + Msg = mc:convert(mc_amqp, Mc), + + [H, + #'v1_0.message_annotations'{content = MA}, + P, + #'v1_0.application_properties'{content = AP}, + D] = amqp10_framing:decode_bin(iolist_to_binary(mc:protocol_state(Msg))), + + ?assertMatch(#'v1_0.header'{durable = false}, H), + ?assertEqual({utf8, <<"apple">>}, amqp_map_get(symbol(<<"x-stream-filter">>), MA)), + ?assertMatch(#'v1_0.properties'{content_type = {symbol, <<"text/plain">>}, + correlation_id = {uuid, _}}, P), + ?assertEqual({utf8, <<"val-1">>}, amqp_map_get(utf8(<<"key-1">>), AP)), + ?assertEqual({utf8, <<"val-2">>}, amqp_map_get(utf8(<<"key-2">>), AP)), + ?assertEqual(#'v1_0.data'{content = <<>>}, D), + ok. + +amqp_mqtt(_Config) -> + Env = #{mqtt_x => <<"mqtt-topic-exchange">>}, + H = #'v1_0.header'{priority = {ubyte, 3}, + ttl = {uint, 20000}, + durable = true}, + MAC = [ + {{symbol, <<"x-stream-filter">>}, {utf8, <<"apple">>}}, + thead2(list, [utf8(<<"l">>)]), + thead2(map, [{utf8(<<"k">>), utf8(<<"v">>)}]), + thead2('x-list', list, [utf8(<<"l">>)]), + thead2('x-map', map, [{utf8(<<"k">>), utf8(<<"v">>)}]) + ], + CorrIdOut = <<"urn:uuid:550e8400-e29b-41d4-a716-446655440000">>, + {ok, CorrUUId} = mc_util:urn_string_to_uuid(CorrIdOut), + M = #'v1_0.message_annotations'{content = MAC}, + P = #'v1_0.properties'{content_type = {symbol, <<"text/plain">>}, + correlation_id = {uuid, CorrUUId}, + creation_time = {timestamp, 10000} + }, + AC = [ + thead(long, 5), + thead(ulong, 5), + thead(utf8, <<"a-string">>), + thead(binary, <<"data">>), + thead(symbol, <<"symbol">>), + thead(ubyte, 255), + thead(short, 2), + thead(ushort, 3), + thead(uint, 4), + thead(int, 4), + thead(double, 5.0), + thead(float, 6.0), + thead(timestamp, 7000), + thead(byte, -128), + {{utf8, <<"boolean1">>}, true}, + {{utf8, <<"boolean2">>}, false}, + {utf8(<<"null">>), null} + ], + A = #'v1_0.application_properties'{content = AC}, + D = #'v1_0.data'{content = <<"data">>}, + + Anns = #{?ANN_EXCHANGE => <<"exch">>, + ?ANN_ROUTING_KEYS => [<<"apple">>]}, + Payload = iolist_to_binary([amqp10_framing:encode_bin(Section) || Section <- [H, M, P, A, D]]), + AMsg = mc:init(mc_amqp, Payload, Anns), + Msg = mc:convert(mc_mqtt, AMsg, Env), + ?assertMatch({uuid, CorrUUId}, mc:correlation_id(Msg)), + Mqtt = mc:protocol_state(Msg), + ?assertMatch( + #mqtt_msg{ + qos = 1, + props = #{'Content-Type' := <<"text/plain">>, + 'User-Property' := [{<<"x-stream-filter">>,<<"apple">>}, + {<<"long">>,<<"5">>}, + {<<"ulong">>,<<"5">>}, + {<<"utf8">>,<<"a-string">>}, + {<<"symbol">>,<<"symbol">>}, + {<<"ubyte">>,<<"255">>}, + {<<"short">>,<<"2">>}, + {<<"ushort">>,<<"3">>}, + {<<"uint">>,<<"4">>}, + {<<"int">>,<<"4">>}, + {<<"double">>, + <<"5.00000000000000000000e+00">>}, + {<<"float">>, + <<"6.00000000000000000000e+00">>}, + {<<"timestamp">>,<<"7">>}, + {<<"byte">>,<<"-128">>}, + {<<"boolean1">>,<<"true">>}, + {<<"boolean2">>,<<"false">>}, + {<<"null">>,<<>>}], + 'Correlation-Data' := CorrIdOut}}, + Mqtt). + +is_persistent(_Config) -> + Msg0 = #mqtt_msg{qos = 0, + topic = <<"my/topic">>, + payload = <<>>}, + Mc0 = mc:init(mc_mqtt, Msg0, #{}), + ?assertNot(mc:is_persistent(Mc0)), + + Msg1 = #mqtt_msg{qos = 1, + topic = <<"my/topic">>, + payload = <<>>}, + Mc1 = mc:init(mc_mqtt, Msg1, #{}), + ?assert(mc:is_persistent(Mc1)). + mqtt_msg() -> #mqtt_msg{qos = 0, topic = <<"my/topic">>, payload = <<>>}. roundtrip(Mod, MqttMsg) -> - Anns = #{routing_keys => [rabbit_mqtt_util:mqtt_to_amqp(MqttMsg#mqtt_msg.topic)]}, - Mc0 = mc:init(mc_mqtt, MqttMsg, Anns), + roundtrip(Mod, MqttMsg, #{}). + +roundtrip(Mod, MqttMsg, SrcEnv) -> + Anns = #{?ANN_ROUTING_KEYS => [rabbit_mqtt_util:mqtt_to_amqp(MqttMsg#mqtt_msg.topic)]}, + Mc0 = mc:init(mc_mqtt, MqttMsg, Anns, SrcEnv), Mc1 = mc:convert(Mod, Mc0), Mc = mc:convert(mc_mqtt, Mc1), mc:protocol_state(Mc). amqp_to_mqtt(Sections) -> - Anns = #{routing_keys => [<<"apple">>]}, - Mc0 = mc:init(mc_amqp, Sections, Anns), - Mc = mc:convert(mc_mqtt, Mc0), + amqp_to_mqtt(Sections, #{}). + +amqp_to_mqtt(Sections, Env) -> + Anns = #{?ANN_ROUTING_KEYS => [<<"apple">>]}, + Payload = iolist_to_binary([amqp10_framing:encode_bin(S) || S <- Sections]), + Mc0 = mc:init(mc_amqp, Payload, Anns), + Mc = mc:convert(mc_mqtt, Mc0, Env), mc:protocol_state(Mc). amqp_value(Content) -> #'v1_0.amqp_value'{content = Content}. + +amqpl_header(K, H) -> + rabbit_basic:header(K, H). + +amqp_map_get(_K, []) -> + undefined; +amqp_map_get(K, Tuples) -> + case lists:keyfind(K, 1, Tuples) of + false -> + undefined; + {_, V} -> + V + end. + +symbol(X) -> + {symbol, X}. + +utf8(X) -> + {utf8, X}. + +thead(T, Value) -> + {utf8(atom_to_binary(T)), {T, Value}}. + +thead2(T, Value) -> + {symbol(atom_to_binary(T)), {T, Value}}. + +thead2(K, T, Value) -> + {symbol(atom_to_binary(K)), {T, Value}}. diff --git a/deps/rabbitmq_mqtt/test/mqtt_machine_SUITE.erl b/deps/rabbitmq_mqtt/test/mqtt_machine_SUITE.erl deleted file mode 100644 index f73149a0036e..000000000000 --- a/deps/rabbitmq_mqtt/test/mqtt_machine_SUITE.erl +++ /dev/null @@ -1,96 +0,0 @@ --module(mqtt_machine_SUITE). - --compile([export_all, nowarn_export_all]). - --include_lib("common_test/include/ct.hrl"). --include_lib("eunit/include/eunit.hrl"). --include("mqtt_machine.hrl"). - -%%%=================================================================== -%%% Common Test callbacks -%%%=================================================================== - -all() -> - [ - {group, tests} - ]. - - -all_tests() -> - [ - basics, - machine_upgrade, - many_downs - ]. - -groups() -> - [ - {tests, [], all_tests()} - ]. - -%%%=================================================================== -%%% Test cases -%%%=================================================================== - -basics(_Config) -> - S0 = mqtt_machine:init(#{}), - ClientId = <<"id1">>, - OthPid = spawn(fun () -> ok end), - {S1, ok, _} = mqtt_machine:apply(meta(1), {register, ClientId, self()}, S0), - ?assertMatch(#machine_state{client_ids = Ids} when map_size(Ids) == 1, S1), - ?assertMatch(#machine_state{pids = Pids} when map_size(Pids) == 1, S1), - {S2, ok, _} = mqtt_machine:apply(meta(2), {register, ClientId, OthPid}, S1), - ?assertMatch(#machine_state{client_ids = #{ClientId := OthPid} = Ids} - when map_size(Ids) == 1, S2), - {S3, ok, _} = mqtt_machine:apply(meta(3), {down, OthPid, noproc}, S2), - ?assertMatch(#machine_state{client_ids = Ids} when map_size(Ids) == 0, S3), - {S4, ok, _} = mqtt_machine:apply(meta(3), {unregister, ClientId, OthPid}, S2), - ?assertMatch(#machine_state{client_ids = Ids} when map_size(Ids) == 0, S4), - - ok. - -machine_upgrade(_Config) -> - S0 = mqtt_machine_v0:init(#{}), - ClientId = <<"id1">>, - Self = self(), - {S1, ok, _} = mqtt_machine_v0:apply(meta(1), {register, ClientId, self()}, S0), - ?assertMatch({machine_state, Ids} when map_size(Ids) == 1, S1), - {S2, ok, _} = mqtt_machine:apply(meta(2), {machine_version, 0, 1}, S1), - ?assertMatch(#machine_state{client_ids = #{ClientId := Self}, - pids = #{Self := [ClientId]} = Pids} - when map_size(Pids) == 1, S2), - {S3, ok, _} = mqtt_machine:apply(meta(3), {down, self(), noproc}, S2), - ?assertMatch(#machine_state{client_ids = Ids, - pids = Pids} - when map_size(Ids) == 0 andalso map_size(Pids) == 0, S3), - - ok. - -many_downs(_Config) -> - S0 = mqtt_machine:init(#{}), - Clients = [{list_to_binary(integer_to_list(I)), spawn(fun() -> ok end)} - || I <- lists:seq(1, 10000)], - S1 = lists:foldl( - fun ({ClientId, Pid}, Acc0) -> - {Acc, ok, _} = mqtt_machine:apply(meta(1), {register, ClientId, Pid}, Acc0), - Acc - end, S0, Clients), - _ = lists:foldl( - fun ({_ClientId, Pid}, Acc0) -> - {Acc, ok, _} = mqtt_machine:apply(meta(1), {down, Pid, noproc}, Acc0), - Acc - end, S1, Clients), - _ = lists:foldl( - fun ({ClientId, Pid}, Acc0) -> - {Acc, ok, _} = mqtt_machine:apply(meta(1), {unregister, ClientId, - Pid}, Acc0), - Acc - end, S0, Clients), - - ok. -%% Utility - -meta(Idx) -> - #{index => Idx, - term => 1, - ts => erlang:system_time(millisecond)}. diff --git a/deps/rabbitmq_mqtt/test/packet_prop_SUITE.erl b/deps/rabbitmq_mqtt/test/packet_prop_SUITE.erl index d22da9a8c105..e33afd5965da 100644 --- a/deps/rabbitmq_mqtt/test/packet_prop_SUITE.erl +++ b/deps/rabbitmq_mqtt/test/packet_prop_SUITE.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. -module(packet_prop_SUITE). -compile([export_all, nowarn_export_all]). diff --git a/deps/rabbitmq_mqtt/test/processor_SUITE.erl b/deps/rabbitmq_mqtt/test/processor_SUITE.erl index 15ae0dd5374c..647d4fca55b4 100644 --- a/deps/rabbitmq_mqtt/test/processor_SUITE.erl +++ b/deps/rabbitmq_mqtt/test/processor_SUITE.erl @@ -2,13 +2,12 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. -module(processor_SUITE). -compile([export_all, nowarn_export_all]). --include_lib("common_test/include/ct.hrl"). -include_lib("eunit/include/eunit.hrl"). -include_lib("amqp_client/include/amqp_client.hrl"). @@ -43,9 +42,17 @@ init_per_testcase(get_vhost, Config) -> mnesia:create_table(rabbit_runtime_parameters, [ {attributes, record_info(fields, runtime_parameters)}, {record_name, runtime_parameters}]), + meck:new(rabbit_feature_flags, [passthrough, no_link]), + meck:expect( + rabbit_feature_flags, is_enabled, + fun + (khepri_db, _) -> false; + (FeatureNames, _) -> meck:passthrough([FeatureNames]) + end), Config; init_per_testcase(_, Config) -> Config. end_per_testcase(get_vhost, Config) -> + meck:unload(rabbit_feature_flags), mnesia:stop(), Config; end_per_testcase(_, Config) -> Config. diff --git a/deps/rabbitmq_mqtt/test/protocol_interop_SUITE.erl b/deps/rabbitmq_mqtt/test/protocol_interop_SUITE.erl index 686ac67e6998..249e335e2afd 100644 --- a/deps/rabbitmq_mqtt/test/protocol_interop_SUITE.erl +++ b/deps/rabbitmq_mqtt/test/protocol_interop_SUITE.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% %% This test suite covers protocol interoperability publishing via MQTT 5.0, @@ -15,27 +15,36 @@ -include_lib("common_test/include/ct.hrl"). -include_lib("eunit/include/eunit.hrl"). -include_lib("amqp_client/include/amqp_client.hrl"). +-include_lib("amqp10_common/include/amqp10_framing.hrl"). -include_lib("rabbitmq_stomp/include/rabbit_stomp_frame.hrl"). -import(util, - [connect/2]). + [connect/2, + connect/4]). -import(rabbit_ct_broker_helpers, [rpc/4]). -import(rabbit_ct_helpers, - [eventually/3]). + [eventually/1, + eventually/3]). all() -> - [{group, tests}]. + [{group, cluster_size_1}, + {group, cluster_size_3}]. groups() -> - [{tests, [shuffle], + [{cluster_size_1, [shuffle], [ - amqpl, - amqp, - stomp, - stream - ] - }]. + mqtt_amqpl_mqtt, + mqtt_amqp_mqtt, + amqp_mqtt_amqp, + mqtt_stomp_mqtt, + mqtt_stream + ]}, + {cluster_size_3, [shuffle], + [ + amqp_mqtt_qos0, + amqp_mqtt_qos1 + ]}]. %% ------------------------------------------------------------------- %% Testsuite setup/teardown. @@ -49,18 +58,21 @@ init_per_suite(Config) -> end_per_suite(Config) -> rabbit_ct_helpers:run_teardown_steps(Config). -init_per_group(_Group, Config0) -> +init_per_group(Group, Config0) -> + Nodes = case Group of + cluster_size_1 -> 1; + cluster_size_3 -> 3 + end, Config1 = rabbit_ct_helpers:set_config( Config0, - {mqtt_version, v5}), + [{rmq_nodes_count, Nodes}, + {mqtt_version, v5}]), Config = rabbit_ct_helpers:run_steps( Config1, rabbit_ct_broker_helpers:setup_steps() ++ rabbit_ct_client_helpers:setup_steps()), - ok = rabbit_ct_broker_helpers:enable_feature_flag(Config, mqtt_v5), - Plugins = [rabbitmq_amqp1_0, - rabbitmq_stomp, + Plugins = [rabbitmq_stomp, rabbitmq_stream], [ok = rabbit_ct_broker_helpers:enable_plugin(Config, 0, Plugin) || Plugin <- Plugins], Config. @@ -84,10 +96,11 @@ end_per_testcase(Testcase, Config) -> %% Testsuite cases %% ------------------------------------------------------------------- -amqpl(Config) -> +mqtt_amqpl_mqtt(Config) -> Q = ClientId = atom_to_binary(?FUNCTION_NAME), Ch = rabbit_ct_client_helpers:open_channel(Config), - #'queue.declare_ok'{} = amqp_channel:call(Ch, #'queue.declare'{queue = Q}), + #'queue.declare_ok'{} = amqp_channel:call(Ch, #'queue.declare'{queue = Q, + durable = true}), #'queue.bind_ok'{} = amqp_channel:call(Ch, #'queue.bind'{queue = Q, exchange = <<"amq.topic">>, routing_key = <<"my.topic">>}), @@ -146,9 +159,17 @@ amqpl(Config) -> after 1000 -> ct:fail("did not receive reply") end, + %% Another message MQTT 5.0 to AMQP 0.9.1, this time with QoS 0 + ok = emqtt:publish(C, <<"my/topic">>, RequestPayload, [{qos, 0}]), + eventually( + ?_assertMatch( + {#'basic.get_ok'{}, #amqp_msg{payload = RequestPayload, + props = #'P_basic'{delivery_mode = 1}}}, + amqp_channel:call(Ch, #'basic.get'{queue = Q}))), + ok = emqtt:disconnect(C). -amqp(Config) -> +mqtt_amqp_mqtt(Config) -> Host = ?config(rmq_hostname, Config), Port = rabbit_ct_broker_helpers:get_node_config(Config, 0, tcp_port_amqp), ClientId = Container = atom_to_binary(?FUNCTION_NAME), @@ -158,20 +179,26 @@ amqp(Config) -> sasl => {plain, <<"guest">>, <<"guest">>}}, {ok, Connection1} = amqp10_client:open_connection(OpnConf), {ok, Session1} = amqp10_client:begin_session(Connection1), - ReceiverLinkName = <<"test-receiver">>, + {ok, LinkPair} = rabbitmq_amqp_client:attach_management_link_pair_sync(Session1, <<"pair">>), + QName = <<"queue for AMQP 1.0 client">>, + {ok, _} = rabbitmq_amqp_client:declare_queue(LinkPair, QName, #{}), + ok = rabbitmq_amqp_client:bind_queue(LinkPair, QName, <<"amq.topic">>, <<"topic.1">>, #{}), + ok = rabbitmq_amqp_client:detach_management_link_pair_sync(LinkPair), {ok, Receiver} = amqp10_client:attach_receiver_link( - Session1, ReceiverLinkName, <<"/topic/topic.1">>, unsettled), + Session1, <<"test-receiver">>, + rabbitmq_amqp_address:queue(QName), + unsettled, configuration), %% MQTT 5.0 to AMQP 1.0 C = connect(ClientId, Config), - MqttResponseTopic = <<"response/topic">>, + MqttResponseTopic = <<"response/topic/🥕"/utf8>>, {ok, _, [1]} = emqtt:subscribe(C, #{'Subscription-Identifier' => 999}, [{MqttResponseTopic, [{qos, 1}]}]), Correlation = <<"some correlation ID">>, ContentType = <<"text/plain">>, RequestPayload = <<"my request">>, - UserProperty = [{<<"rabbit🐇"/utf8>>, <<"carrot🥕"/utf8>>}, - {<<"x-rabbit🐇"/utf8>>, <<"carrot🥕"/utf8>>}, + UserProperty = [{<<"🐇"/utf8>>, <<"🥕"/utf8>>}, + {<<"x-🐇"/utf8>>, <<"🥕"/utf8>>}, {<<"key">>, <<"val">>}, {<<"key">>, <<"val">>}, {<<"x-key">>, <<"val">>}, @@ -183,18 +210,35 @@ amqp(Config) -> 'User-Property' => UserProperty}, RequestPayload, [{qos, 1}]), - %% As of 3.13, AMQP 1.0 is proxied via AMQP 0.9.1 and therefore the conversion from - %% mc_mqtt to mc_amqpl takes place. We therefore lose MQTT User Property and Response Topic - %% which gets converted to AMQP 0.9.1 headers. In the future, Native AMQP 1.0 will convert - %% from mc_mqtt to mc_amqp allowing us to do many more assertions here. {ok, Msg1} = amqp10_client:get_msg(Receiver), ct:pal("Received AMQP 1.0 message:~n~p", [Msg1]), - ?assertEqual([RequestPayload], amqp10_msg:body(Msg1)), - ?assertMatch(#{correlation_id := Correlation, - content_type := ContentType}, amqp10_msg:properties(Msg1)), + ?assert(amqp10_msg:header(durable, Msg1)), ?assert(amqp10_msg:header(first_acquirer, Msg1)), + %% We expect to receive x-headers in message annotations. + %% However, since annotation keys are symbols and symbols are only valid ASCII, + %% we expect header + %% {<<"x-🐇"/utf8>>, <<"🥕"/utf8>>} + %% to be dropped. + ?assertEqual(#{<<"x-key">> => <<"val">>, + <<"x-exchange">> => <<"amq.topic">>, + <<"x-routing-key">> => <<"topic.1">>}, + amqp10_msg:message_annotations(Msg1)), + %% In contrast, application property keys are of type string, and therefore UTF-8 encoded. + ?assertEqual(#{<<"🐇"/utf8>> => <<"🥕"/utf8>>, + <<"key">> => <<"val">>}, + amqp10_msg:application_properties(Msg1)), + + #{correlation_id := Correlation, + content_type := ContentType, + reply_to := ReplyToAddress} = amqp10_msg:properties(Msg1), + ExpectedReplyToAddress = rabbitmq_amqp_address:exchange( + <<"amq.topic">>, <<"response.topic.🥕"/utf8>>), + ?assertEqual(ExpectedReplyToAddress, ReplyToAddress), + + ?assertEqual(RequestPayload, amqp10_msg:body_bin(Msg1)), + ok = amqp10_client:settle_msg(Receiver, Msg1, accepted), ok = amqp10_client:detach_link(Receiver), ok = amqp10_client:end_session(Session1), @@ -205,20 +249,21 @@ amqp(Config) -> {ok, Session2} = amqp10_client:begin_session(Connection2), SenderLinkName = <<"test-sender">>, {ok, Sender} = amqp10_client:attach_sender_link( - %% With Native AMQP 1.0, address should be read from received reply-to - Session2, SenderLinkName, <<"/topic/response.topic">>, unsettled), + Session2, SenderLinkName, ReplyToAddress, unsettled), receive {amqp10_event, {link, Sender, credited}} -> ok after 1000 -> ct:fail(credited_timeout) end, DTag = <<"my-dtag">>, ReplyPayload = <<"my response">>, - Msg2a = amqp10_msg:new(DTag, ReplyPayload), + Msg2a = amqp10_msg:new(DTag, #'v1_0.data'{content = ReplyPayload}), Msg2b = amqp10_msg:set_properties( #{correlation_id => Correlation, content_type => ContentType}, Msg2a), - Msg2 = amqp10_msg:set_headers(#{durable => true}, Msg2b), + %% Use the 2 byte AMQP boolean encoding, see AMQP §1.6.2 + True = {boolean, true}, + Msg2 = amqp10_msg:set_headers(#{durable => True}, Msg2b), ok = amqp10_client:send_msg(Sender, Msg2), receive {amqp10_disposition, {accepted, DTag}} -> ok after 1000 -> ct:fail(settled_timeout) @@ -237,13 +282,188 @@ amqp(Config) -> payload := ReplyPayload, properties := #{'Content-Type' := ContentType, 'Correlation-Data' := Correlation, - 'Subscription-Identifier' := 999}}, + 'Subscription-Identifier' := 999} + }, MqttMsg) after 1000 -> ct:fail("did not receive reply") end, ok = emqtt:disconnect(C). -stomp(Config) -> +amqp_mqtt_amqp(Config) -> + Correlation = QName = ClientId = Container = atom_to_binary(?FUNCTION_NAME), + + C = connect(ClientId, Config), + {ok, _, [1]} = emqtt:subscribe(C, <<"t/1">>, qos1), + + Host = ?config(rmq_hostname, Config), + Port = rabbit_ct_broker_helpers:get_node_config(Config, 0, tcp_port_amqp), + OpnConf = #{address => Host, + port => Port, + container_id => Container, + sasl => {plain, <<"guest">>, <<"guest">>}}, + {ok, Connection} = amqp10_client:open_connection(OpnConf), + {ok, Session} = amqp10_client:begin_session(Connection), + {ok, LinkPair} = rabbitmq_amqp_client:attach_management_link_pair_sync(Session, <<"pair">>), + {ok, _} = rabbitmq_amqp_client:declare_queue(LinkPair, QName, #{}), + ok = rabbitmq_amqp_client:bind_queue(LinkPair, QName, <<"amq.topic">>, <<"[.]">>, #{}), + ok = rabbitmq_amqp_client:detach_management_link_pair_sync(LinkPair), + {ok, Receiver} = amqp10_client:attach_receiver_link( + Session, <<"receiver">>, rabbitmq_amqp_address:queue(QName)), + + %% AMQP 1.0 to MQTT 5.0 + {ok, Sender} = amqp10_client:attach_sender_link( + Session, + <<"sender">>, + rabbitmq_amqp_address:exchange(<<"amq.topic">>, <<"t.1">>)), + receive {amqp10_event, {link, Sender, credited}} -> ok + after 2000 -> ct:fail(credited_timeout) + end, + RequestBody = <<"my request">>, + + Msg1 = amqp10_msg:set_headers( + #{durable => true}, + amqp10_msg:set_properties( + #{correlation_id => Correlation, + reply_to => rabbitmq_amqp_address:exchange(<<"amq.topic">>, <<"[.]">>)}, + amqp10_msg:new(<<>>, RequestBody, true))), + ok = amqp10_client:send_msg(Sender, Msg1), + + ResponseTopic = <<"[/]">>, + receive {publish, MqttMsg} -> + ct:pal("Received MQTT message:~n~p", [MqttMsg]), + #{client_pid := C, + qos := 1, + topic := <<"t/1">>, + payload := RequestBody, + properties := Props = #{'Correlation-Data' := Correlation} + } = MqttMsg, + case rabbit_ct_broker_helpers:is_feature_flag_enabled( + Config, 'rabbitmq_4.0.0') of + true -> + ?assertEqual({ok, ResponseTopic}, + maps:find('Response-Topic', Props)); + false -> + ok + end + after 2000 -> ct:fail("did not receive request") + end, + + %% MQTT 5.0 to AMQP 1.0 + RespBody = <<"my response">>, + {ok, _} = emqtt:publish(C, ResponseTopic, + #{'Correlation-Data' => Correlation}, + RespBody, [{qos, 1}]), + + {ok, Msg2} = amqp10_client:get_msg(Receiver), + ct:pal("Received AMQP 1.0 message:~n~p", [Msg2]), + ?assertEqual(RespBody, amqp10_msg:body_bin(Msg2)), + + ok = emqtt:disconnect(C), + ok = amqp10_client:end_session(Session), + ok = amqp10_client:close_connection(Connection). + +%% Send messages with different AMQP body sections and +%% consume via MQTT 5.0 with a QoS 0 subscription. +amqp_mqtt_qos0(Config) -> + %% We want to test that the old node can receive from an MQTT QoS 0 queue. + amqp_mqtt(0, Config). + +%% Send messages with different AMQP body sections and +%% consume via MQTT 5.0 with a QoS 1 subscription. +amqp_mqtt_qos1(Config) -> + amqp_mqtt(1, Config). + +amqp_mqtt(Qos, Config) -> + ClientId = Container = atom_to_binary(?FUNCTION_NAME), + + %% Connect MQTT subscriber to the old node. + C = connect(ClientId, Config, 1, []), + {ok, _, [Qos]} = emqtt:subscribe(C, <<"my/topic">>, Qos), + + Host = ?config(rmq_hostname, Config), + Port = rabbit_ct_broker_helpers:get_node_config(Config, 0, tcp_port_amqp), + OpnConf = #{address => Host, + port => Port, + container_id => Container, + sasl => {plain, <<"guest">>, <<"guest">>}}, + {ok, Connection} = amqp10_client:open_connection(OpnConf), + {ok, Session} = amqp10_client:begin_session(Connection), + + {ok, Sender} = amqp10_client:attach_sender_link( + Session, + <<"sender">>, + rabbitmq_amqp_address:exchange(<<"amq.topic">>, <<"my.topic">>)), + receive {amqp10_event, {link, Sender, credited}} -> ok + after 2000 -> ct:fail(credited_timeout) + end, + + %% single amqp-value section + Body1 = #'v1_0.amqp_value'{content = {binary, <<0, 255>>}}, + Body2 = #'v1_0.amqp_value'{content = false}, + %% single amqp-sequene section + Body3 = [#'v1_0.amqp_sequence'{content = [{binary, <<0, 255>>}]}], + %% multiple amqp-sequene sections + Body4 = [#'v1_0.amqp_sequence'{content = [{long, -1}]}, + #'v1_0.amqp_sequence'{content = [true, {utf8, <<"🐇"/utf8>>}]}], + %% single data section + Body5 = [#'v1_0.data'{content = <<0, 255>>}], + %% multiple data sections + Body6 = [#'v1_0.data'{content = <<0, 1>>}, + #'v1_0.data'{content = <<2, 3>>}], + + [ok = amqp10_client:send_msg(Sender, + amqp10_msg:set_headers( + #{durable => true}, + amqp10_msg:new(<<>>, Body, true))) || + Body <- [Body1, Body2, Body3, Body4, Body5, Body6]], + + ok = amqp10_client:detach_link(Sender), + ok = amqp10_client:end_session(Session), + ok = amqp10_client:close_connection(Connection), + + receive {publish, MqttMsg1} -> + #{client_pid := C, + qos := Qos, + topic := <<"my/topic">>, + payload := Payload1, + properties := Props + } = MqttMsg1, + ?assertEqual([Body1], amqp10_framing:decode_bin(Payload1)), + case rabbit_ct_broker_helpers:is_feature_flag_enabled( + Config, 'rabbitmq_4.0.0') of + true -> + ?assertEqual({ok, <<"message/vnd.rabbitmq.amqp">>}, + maps:find('Content-Type', Props)); + false -> + ok + end + after 5000 -> ct:fail({missing_publish, ?LINE}) + end, + receive {publish, #{payload := Payload2}} -> + ?assertEqual([Body2], amqp10_framing:decode_bin(Payload2)) + after 5000 -> ct:fail({missing_publish, ?LINE}) + end, + receive {publish, #{payload := Payload3}} -> + ?assertEqual(Body3, amqp10_framing:decode_bin(Payload3)) + after 5000 -> ct:fail({missing_publish, ?LINE}) + end, + receive {publish, #{payload := Payload4}} -> + ?assertEqual(Body4, amqp10_framing:decode_bin(Payload4)) + after 5000 -> ct:fail({missing_publish, ?LINE}) + end, + receive {publish, #{payload := Payload5}} -> + ?assertEqual(<<0, 255>>, Payload5) + after 5000 -> ct:fail({missing_publish, ?LINE}) + end, + receive {publish, #{payload := Payload6}} -> + %% We expect that RabbitMQ concatenates the binaries of multiple data sections. + ?assertEqual(<<0, 1, 2, 3>>, Payload6) + after 5000 -> ct:fail({missing_publish, ?LINE}) + end, + + ok = emqtt:disconnect(C). + +mqtt_stomp_mqtt(Config) -> {ok, StompC0} = stomp_connect(Config), ok = stomp_send(StompC0, "SUBSCRIBE", [{"destination", "/topic/t.1"}, {"receipt", "my-receipt"}, @@ -328,7 +548,7 @@ stomp(Config) -> %% The stream test case is one-way because an MQTT client can publish to a stream, %% but not consume (directly) from a stream. -stream(Config) -> +mqtt_stream(Config) -> Q = ClientId = atom_to_binary(?FUNCTION_NAME), Ch = rabbit_ct_client_helpers:open_channel(Config), @@ -362,8 +582,7 @@ stream(Config) -> #{'Content-Type' => ContentType, 'Correlation-Data' => Correlation, 'Response-Topic' => <<"response/topic">>, - 'User-Property' => UserProperty, - 'Payload-Format-Indicator' => 1}, + 'User-Property' => UserProperty}, Payload, [{qos, 1}]), ok = emqtt:disconnect(C), @@ -426,19 +645,17 @@ stream(Config) -> <<"x-routing-key">> => <<"my.topic">>, <<"x-key">> => <<"val">>}, amqp10_msg:message_annotations(Msg)), - ?assertEqual(#{correlation_id => Correlation, - content_type => ContentType, - %% We expect that reply_to contains a valid address, - %% and that the topic format got translated from MQTT to AMQP 0.9.1. - reply_to => <<"/topic/response.topic">>}, - amqp10_msg:properties(Msg)), + ?assertEqual( + #{correlation_id => Correlation, + content_type => ContentType, + %% We expect that reply_to contains a valid AMQP 1.0 address, + %% and that the topic format got translated from MQTT to AMQP 0.9.1. + reply_to => rabbitmq_amqp_address:exchange(<<"amq.topic">>, <<"response.topic">>)}, + amqp10_msg:properties(Msg)), ?assertEqual(#{<<"rabbit🐇"/utf8>> => <<"carrot🥕"/utf8>>, <<"key">> => <<"val">>}, amqp10_msg:application_properties(Msg)), - %% We excpet the body to be a single AMQP 1.0 value section where the value is a string - %% because we set the MQTT 5.0 Payload-Format-Indicator. - ?assertEqual({'v1_0.amqp_value', {utf8, Payload}}, - amqp10_msg:body(Msg)). + ?assertEqual(Payload, amqp10_msg:body_bin(Msg)). %% ------------------------------------------------------------------- %% Helpers diff --git a/deps/rabbitmq_mqtt/test/proxy_protocol_SUITE.erl b/deps/rabbitmq_mqtt/test/proxy_protocol_SUITE.erl index 57a6f304a48d..3a84150b0b27 100644 --- a/deps/rabbitmq_mqtt/test/proxy_protocol_SUITE.erl +++ b/deps/rabbitmq_mqtt/test/proxy_protocol_SUITE.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(proxy_protocol_SUITE). -compile([export_all, nowarn_export_all]). @@ -57,9 +57,10 @@ end_per_suite(Config) -> rabbit_ct_broker_helpers:teardown_steps()). init_per_group(Group, Config) -> - Config1 = rabbit_ct_helpers:set_config(Config, {mqtt_version, Group}), - util:maybe_skip_v5(Config1). -end_per_group(_, Config) -> Config. + rabbit_ct_helpers:set_config(Config, {mqtt_version, Group}). + +end_per_group(_Group, Config) -> + Config. init_per_testcase(Testcase, Config) -> rabbit_ct_helpers:testcase_started(Config, Testcase). diff --git a/deps/rabbitmq_mqtt/test/rabbit_auth_backend_mqtt_mock.erl b/deps/rabbitmq_mqtt/test/rabbit_auth_backend_mqtt_mock.erl index 60093fb566de..98ad0f4ea6f9 100644 --- a/deps/rabbitmq_mqtt/test/rabbit_auth_backend_mqtt_mock.erl +++ b/deps/rabbitmq_mqtt/test/rabbit_auth_backend_mqtt_mock.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2019-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% %% A mock authn/authz that records information during calls. For testing purposes only. @@ -16,12 +16,12 @@ -export([setup/1, user_login_authentication/2, user_login_authorization/2, check_vhost_access/3, check_resource_access/4, check_topic_access/4, - state_can_expire/0, + expiry_timestamp/1, get/1]). setup(CallerPid) -> ets:new(?MODULE, [set, public, named_table]), - CallerPid ! ok, + CallerPid ! {ok, self()}, receive stop -> ok end. @@ -47,7 +47,8 @@ check_topic_access(#auth_user{}, #resource{}, _Permission, TopicContext) -> ets:insert(?MODULE, {topic_access, TopicContext}), true. -state_can_expire() -> false. +expiry_timestamp(_) -> + never. get(K) -> ets:lookup(?MODULE, K). diff --git a/deps/rabbitmq_mqtt/test/rabbitmq_mqtt.app b/deps/rabbitmq_mqtt/test/rabbitmq_mqtt.app index c4083ec5fc81..287c59cfe230 100644 --- a/deps/rabbitmq_mqtt/test/rabbitmq_mqtt.app +++ b/deps/rabbitmq_mqtt/test/rabbitmq_mqtt.app @@ -4,9 +4,7 @@ {modules, []}, {registered, []}, {mod, {rabbit_mqtt, []}}, - {env, [{default_user, "guest_user"}, - {default_pass, "guest_pass"}, - {ssl_cert_login,false}, + {env, [{ssl_cert_login,false}, {allow_anonymous, true}, {vhost, "/"}, {exchange, "amq.topic"}, diff --git a/deps/rabbitmq_mqtt/test/reader_SUITE.erl b/deps/rabbitmq_mqtt/test/reader_SUITE.erl index d706faff996d..1c4fa1331980 100644 --- a/deps/rabbitmq_mqtt/test/reader_SUITE.erl +++ b/deps/rabbitmq_mqtt/test/reader_SUITE.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(reader_SUITE). -compile([export_all, @@ -63,13 +63,11 @@ merge_app_env(Config) -> init_per_suite(Config) -> rabbit_ct_helpers:log_environment(), Config1 = rabbit_ct_helpers:set_config(Config, {rmq_nodename_suffix, ?MODULE}), - Config2 = rabbit_ct_helpers:run_setup_steps( - Config1, - [fun merge_app_env/1] ++ - rabbit_ct_broker_helpers:setup_steps() ++ - rabbit_ct_client_helpers:setup_steps()), - ok = rabbit_ct_broker_helpers:enable_feature_flag(Config2, mqtt_v5), - Config2. + rabbit_ct_helpers:run_setup_steps( + Config1, + [fun merge_app_env/1] ++ + rabbit_ct_broker_helpers:setup_steps() ++ + rabbit_ct_client_helpers:setup_steps()). end_per_suite(Config) -> rabbit_ct_helpers:run_teardown_steps(Config, @@ -94,8 +92,9 @@ end_per_testcase(Testcase, Config) -> block_connack_timeout(Config) -> P = rabbit_ct_broker_helpers:get_node_config(Config, 0, tcp_port_mqtt), - Ports0 = rpc(Config, erlang, ports, []), + Ports = rpc(Config, erlang, ports, []), + DefaultWatermark = rpc(Config, vm_memory_monitor, get_vm_memory_high_watermark, []), ok = rpc(Config, vm_memory_monitor, set_vm_memory_high_watermark, [0]), %% Let connection block. timer:sleep(100), @@ -109,34 +108,35 @@ block_connack_timeout(Config) -> unlink(Client), ClientMRef = monitor(process, Client), {error, connack_timeout} = emqtt:connect(Client), - receive - {'DOWN', ClientMRef, process, Client, connack_timeout} -> - ok - after 200 -> - ct:fail("missing connack_timeout in client") + receive {'DOWN', ClientMRef, process, Client, connack_timeout} -> ok + after 200 -> ct:fail("missing connack_timeout in client") end, - Ports = rpc(Config, erlang, ports, []), - %% Server creates 1 new port to handle our MQTT connection. - [NewPort] = Ports -- Ports0, - {connected, MqttReader} = rpc(Config, erlang, port_info, [NewPort, connected]), + MqttReader = rpc(Config, ?MODULE, mqtt_connection_pid, [Ports]), MqttReaderMRef = monitor(process, MqttReader), %% Unblock connection. CONNECT packet will be processed on the server. - rpc(Config, vm_memory_monitor, set_vm_memory_high_watermark, [0.4]), + rpc(Config, vm_memory_monitor, set_vm_memory_high_watermark, [DefaultWatermark]), - receive - {'DOWN', MqttReaderMRef, process, MqttReader, {shutdown, {socket_ends, einval}}} -> - %% We expect that MQTT reader process exits (without crashing) - %% because our client already disconnected. - ok - after 2000 -> - ct:fail("missing peername_not_known from server") + receive {'DOWN', MqttReaderMRef, process, MqttReader, {shutdown, {socket_ends, einval}}} -> + %% We expect that MQTT reader process exits (without crashing) + %% because our client already disconnected. + ok + after 2000 -> ct:fail("missing peername_not_known from server") end, %% Ensure that our client is not registered. ?assertEqual([], all_connection_pids(Config)), ok. +mqtt_connection_pid(ExistingPorts) -> + NewPorts = erlang:ports() -- ExistingPorts, + %% Server creates 1 new TCP port to handle our MQTT connection. + [MqttConnectionPort] = lists:filter(fun(P) -> + erlang:port_info(P, name) =:= {name, "tcp_inet"} + end, NewPorts), + {connected, MqttConnectionPid} = erlang:port_info(MqttConnectionPort, connected), + MqttConnectionPid. + handle_invalid_packets(Config) -> N = rpc(Config, ets, info, [connection_metrics, size]), P = rabbit_ct_broker_helpers:get_node_config(Config, 0, tcp_port_mqtt), @@ -150,15 +150,13 @@ handle_invalid_packets(Config) -> ?assertEqual(N, rpc(Config, ets, info, [connection_metrics, size])). login_timeout(Config) -> - rpc(Config, application, set_env, [rabbitmq_mqtt, login_timeout, 400]), - P = rabbit_ct_broker_helpers:get_node_config(Config, 0, tcp_port_mqtt), - {ok, C} = gen_tcp:connect("localhost", P, [{active, false}]), - - try - {error, closed} = gen_tcp:recv(C, 0, 500) - after - rpc(Config, application, unset_env, [rabbitmq_mqtt, login_timeout]) - end. + App = rabbitmq_mqtt, + Par = ?FUNCTION_NAME, + ok = rpc(Config, application, set_env, [App, Par, 400]), + Port = rabbit_ct_broker_helpers:get_node_config(Config, 0, tcp_port_mqtt), + {ok, Socket} = gen_tcp:connect("localhost", Port, [{active, false}]), + ?assertEqual({error, closed}, gen_tcp:recv(Socket, 0, 500)), + ok = rpc(Config, application, unset_env, [App, Par]). stats(Config) -> C = connect(?FUNCTION_NAME, Config), @@ -265,8 +263,6 @@ rabbit_mqtt_qos0_queue_overflow(Config) -> #{messages_dead_lettered_maxlen_total := NumDeadLettered} } = rabbit_ct_broker_helpers:rpc(Config, rabbit_global_counters, overview, []), - ok = rabbit_ct_broker_helpers:enable_feature_flag(Config, QType), - Topic = atom_to_binary(?FUNCTION_NAME), Msg = binary:copy(<<"x">>, 4000), NumMsgs = 10_000, diff --git a/deps/rabbitmq_mqtt/test/retainer_SUITE.erl b/deps/rabbitmq_mqtt/test/retainer_SUITE.erl index 6ae4bdeef4a2..a69df2e6e2aa 100644 --- a/deps/rabbitmq_mqtt/test/retainer_SUITE.erl +++ b/deps/rabbitmq_mqtt/test/retainer_SUITE.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(retainer_SUITE). -compile([export_all, nowarn_export_all]). @@ -63,7 +63,7 @@ init_per_group(G, Config) rabbit_ct_helpers:set_config(Config, {mqtt_version, G}); init_per_group(Group, Config0) -> Suffix = rabbit_ct_helpers:testcase_absname(Config0, "", "-"), - Config1 = rabbit_ct_helpers:set_config( + Config = rabbit_ct_helpers:set_config( Config0, {rmq_nodename_suffix, Suffix}), Mod = list_to_atom("rabbit_mqtt_retained_msg_store_" ++ atom_to_list(Group)), Env = [{rabbitmq_mqtt, [{retained_message_store, Mod}]}, @@ -73,13 +73,11 @@ init_per_group(Group, Config0) -> {default_vhost, "/"}, {default_permissions, [".*", ".*", ".*"]} ]}], - Config = rabbit_ct_helpers:run_setup_steps( - Config1, - [fun(Conf) -> rabbit_ct_helpers:merge_app_env(Conf, Env) end] ++ - rabbit_ct_broker_helpers:setup_steps() ++ - rabbit_ct_client_helpers:setup_steps()), - ok = rabbit_ct_broker_helpers:enable_feature_flag(Config, mqtt_v5), - Config. + rabbit_ct_helpers:run_setup_steps( + Config, + [fun(Conf) -> rabbit_ct_helpers:merge_app_env(Conf, Env) end] ++ + rabbit_ct_broker_helpers:setup_steps() ++ + rabbit_ct_client_helpers:setup_steps()). end_per_group(G, Config) when G =:= v4; diff --git a/deps/rabbitmq_mqtt/test/shared_SUITE.erl b/deps/rabbitmq_mqtt/test/shared_SUITE.erl index 70821a27a3a1..a401b664df6a 100644 --- a/deps/rabbitmq_mqtt/test/shared_SUITE.erl +++ b/deps/rabbitmq_mqtt/test/shared_SUITE.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% This test suite contains test cases that are shared between (i.e. executed across): %% 1. plugins rabbitmq_mqtt and rabbitmq_web_mqtt @@ -28,7 +28,7 @@ get_node_config/3, drain_node/2, revive_node/2, - is_feature_flag_enabled/2 + await_metadata_store_consistent/2 ]). -import(rabbit_ct_helpers, [eventually/3, @@ -56,32 +56,35 @@ all() -> [{group, mqtt}, {group, web_mqtt}]. +%% The code being tested under v3 and v4 is almost identical. +%% To save time in CI, we therefore run only a very small subset of tests in v3. groups() -> [ {mqtt, [], - [{v3, [], - [{cluster_size_1, [], cluster_size_1_tests()}, - {cluster_size_3, [], cluster_size_3_tests()}]}, - {v4, [], - [{cluster_size_1, [], cluster_size_1_tests()}, - {cluster_size_3, [], cluster_size_3_tests()}]}, - {v5, [], - [{cluster_size_1, [], cluster_size_1_tests()}, - {cluster_size_3, [], cluster_size_3_tests()}]} + [{cluster_size_1, [], + [{v3, [], cluster_size_1_tests_v3()}, + {v4, [], cluster_size_1_tests()}, + {v5, [], cluster_size_1_tests()}]}, + {cluster_size_3, [], + [{v4, [], cluster_size_3_tests()}, + {v5, [], cluster_size_3_tests()}]} ]}, {web_mqtt, [], - [{v3, [], - [{cluster_size_1, [], cluster_size_1_tests()}, - {cluster_size_3, [], cluster_size_3_tests()}]}, - {v4, [], - [{cluster_size_1, [], cluster_size_1_tests()}, - {cluster_size_3, [], cluster_size_3_tests()}]}, - {v5, [], - [{cluster_size_1, [], cluster_size_1_tests()}, - {cluster_size_3, [], cluster_size_3_tests()}]} + [{cluster_size_1, [], + [{v3, [], cluster_size_1_tests_v3()}, + {v4, [], cluster_size_1_tests()}, + {v5, [], cluster_size_1_tests()}]}, + {cluster_size_3, [], + [{v4, [], cluster_size_3_tests()}, + {v5, [], cluster_size_3_tests()}]} ]} ]. +cluster_size_1_tests_v3() -> + [global_counters, + events + ]. + cluster_size_1_tests() -> [ global_counters %% must be the 1st test case @@ -132,20 +135,19 @@ cluster_size_3_tests() -> [ pubsub, queue_down_qos1, - consuming_classic_mirrored_queue_down, consuming_classic_queue_down, - flow_classic_mirrored_queue, flow_quorum_queue, flow_stream, rabbit_mqtt_qos0_queue, + rabbit_mqtt_qos0_queue_kill_node, cli_list_queues, - maintenance, delete_create_queue, + session_reconnect, + session_takeover, + duplicate_client_id, publish_to_all_queue_types_qos0, publish_to_all_queue_types_qos1, - duplicate_client_id, - session_reconnect, - session_takeover + maintenance ]. suite() -> @@ -157,7 +159,12 @@ suite() -> init_per_suite(Config) -> rabbit_ct_helpers:log_environment(), - rabbit_ct_helpers:run_setup_steps(Config). + Config1 = rabbit_ct_helpers:merge_app_env( + Config, {rabbit, [ + {quorum_tick_interval, 1000}, + {stream_tick_interval, 1000} + ]}), + rabbit_ct_helpers:run_setup_steps(Config1). end_per_suite(Config) -> rabbit_ct_helpers:run_teardown_steps(Config). @@ -179,18 +186,14 @@ init_per_group(Group, Config0) -> cluster_size_3 -> 3 end, Suffix = rabbit_ct_helpers:testcase_absname(Config0, "", "-"), - Config1 = rabbit_ct_helpers:set_config( - Config0, - [{rmq_nodes_count, Nodes}, - {rmq_nodename_suffix, Suffix}]), - Config2 = rabbit_ct_helpers:merge_app_env( - Config1, - {rabbit, [{classic_queue_default_version, 2}]}), - Config = rabbit_ct_helpers:run_steps( - Config2, - rabbit_ct_broker_helpers:setup_steps() ++ - rabbit_ct_client_helpers:setup_steps()), - util:maybe_skip_v5(Config). + Config = rabbit_ct_helpers:set_config( + Config0, + [{rmq_nodes_count, Nodes}, + {rmq_nodename_suffix, Suffix}]), + rabbit_ct_helpers:run_steps( + Config, + rabbit_ct_broker_helpers:setup_steps() ++ + rabbit_ct_client_helpers:setup_steps()). end_per_group(G, Config) when G =:= cluster_size_1; @@ -205,7 +208,7 @@ end_per_group(_, Config) -> init_per_testcase(T, Config) when T =:= management_plugin_connection; T =:= management_plugin_enable -> - ok = inets:start(), + inets:start(), init_per_testcase0(T, Config); init_per_testcase(Testcase, Config) -> init_per_testcase0(Testcase, Config). @@ -383,7 +386,6 @@ publish_to_all_queue_types(Config, QoS) -> Ch = rabbit_ct_client_helpers:open_channel(Config), CQ = <<"classic-queue">>, - CMQ = <<"classic-mirrored-queue">>, QQ = <<"quorum-queue">>, SQ = <<"stream-queue">>, Topic = <<"mytopic">>, @@ -391,9 +393,66 @@ publish_to_all_queue_types(Config, QoS) -> declare_queue(Ch, CQ, []), bind(Ch, CQ, Topic), - ok = rabbit_ct_broker_helpers:set_ha_policy(Config, 0, CMQ, <<"all">>), - declare_queue(Ch, CMQ, []), - bind(Ch, CMQ, Topic), + declare_queue(Ch, QQ, [{<<"x-queue-type">>, longstr, <<"quorum">>}]), + bind(Ch, QQ, Topic), + + declare_queue(Ch, SQ, [{<<"x-queue-type">>, longstr, <<"stream">>}]), + bind(Ch, SQ, Topic), + + NumMsgs = 1000, + C = connect(?FUNCTION_NAME, Config, [{max_inflight, 200}, + {retry_interval, 2}]), + Self = self(), + lists:foreach( + fun(N) -> + %% Publish async all messages at once to trigger flow control + ok = emqtt:publish_async(C, Topic, integer_to_binary(N), QoS, + {fun(N0, {ok, #{reason_code_name := success}}) -> + Self ! {self(), N0}; + (N0, ok) -> + Self ! {self(), N0} + end, [N]}) + end, lists:seq(1, NumMsgs)), + ok = await_confirms_ordered(C, 1, NumMsgs), + eventually(?_assert( + begin + L = rabbitmqctl_list(Config, 0, ["list_queues", "messages", "--no-table-headers"]), + length(L) =:= 3 andalso + lists:all(fun([Bin]) -> + N = binary_to_integer(Bin), + case QoS of + qos0 -> + N =:= NumMsgs; + qos1 -> + %% Allow for some duplicates when client resends + %% a message that gets acked at roughly the same time. + N >= NumMsgs andalso + N < NumMsgs * 2 + end + end, L) + end), 1000, 20), + + delete_queue(Ch, [CQ, QQ, SQ]), + ok = emqtt:disconnect(C), + ?awaitMatch([], + all_connection_pids(Config), 10_000, 1000). + +publish_to_all_non_deprecated_queue_types_qos0(Config) -> + publish_to_all_non_deprecated_queue_types(Config, qos0). + +publish_to_all_non_deprecated_queue_types_qos1(Config) -> + publish_to_all_non_deprecated_queue_types(Config, qos1). + +publish_to_all_non_deprecated_queue_types(Config, QoS) -> + Ch = rabbit_ct_client_helpers:open_channel(Config), + + CQ = <<"classic-queue">>, + QQ = <<"quorum-queue">>, + SQ = <<"stream-queue">>, + Topic = <<"mytopic">>, + + declare_queue(Ch, CQ, []), + bind(Ch, CQ, Topic), declare_queue(Ch, QQ, [{<<"x-queue-type">>, longstr, <<"quorum">>}]), bind(Ch, QQ, Topic), @@ -417,7 +476,7 @@ publish_to_all_queue_types(Config, QoS) -> eventually(?_assert( begin L = rabbitmqctl_list(Config, 0, ["list_queues", "messages", "--no-table-headers"]), - length(L) =:= 4 andalso + length(L) =:= 3 andalso lists:all(fun([Bin]) -> N = binary_to_integer(Bin), case QoS of @@ -432,18 +491,11 @@ publish_to_all_queue_types(Config, QoS) -> end, L) end), 2000, 10), - delete_queue(Ch, [CQ, CMQ, QQ, SQ]), - ok = rabbit_ct_broker_helpers:clear_policy(Config, 0, CMQ), + delete_queue(Ch, [CQ, QQ, SQ]), ok = emqtt:disconnect(C), ?awaitMatch([], all_connection_pids(Config), 10_000, 1000). -flow_classic_mirrored_queue(Config) -> - QueueName = <<"flow">>, - ok = rabbit_ct_broker_helpers:set_ha_policy(Config, 0, QueueName, <<"all">>), - flow(Config, {rabbit, credit_flow_default_credit, {2, 1}}, <<"classic">>), - ok = rabbit_ct_broker_helpers:clear_policy(Config, 0, QueueName). - flow_quorum_queue(Config) -> flow(Config, {rabbit, quorum_commands_soft_limit, 1}, <<"quorum">>). @@ -483,8 +535,8 @@ flow(Config, {App, Par, Val}, QueueType) ok = emqtt:disconnect(C), ?awaitMatch([], all_connection_pids(Config), 10_000, 1000), - Result = rpc_all(Config, application, set_env, [App, Par, DefaultVal]), - ok. + ?assertEqual(Result, + rpc_all(Config, application, set_env, [App, Par, DefaultVal])). events(Config) -> ok = rabbit_ct_broker_helpers:add_code_path_to_all_nodes(Config, event_recorder), @@ -525,27 +577,13 @@ events(Config) -> QueueNameBin = <<"mqtt-subscription-", ClientId/binary, "qos0">>, QueueName = {resource, <<"/">>, queue, QueueNameBin}, - [E2, E3 | E4] = get_events(Server), - QueueType = case is_feature_flag_enabled(Config, rabbit_mqtt_qos0_queue) of - true -> - ?assertEqual([], E4), - rabbit_mqtt_qos0_queue; - false -> - [ConsumerCreated] = E4, - assert_event_type(consumer_created, ConsumerCreated), - assert_event_prop([{queue, QueueName}, - {ack_required, false}, - {exclusive, false}, - {arguments, []}], - ConsumerCreated), - classic - end, + [E2, E3] = get_events(Server), assert_event_type(queue_created, E2), assert_event_prop([{name, QueueName}, {durable, true}, {auto_delete, false}, {exclusive, true}, - {type, QueueType}, + {type, rabbit_mqtt_qos0_queue}, {arguments, []}], E2), assert_event_type(binding_created, E3), @@ -564,28 +602,18 @@ events(Config) -> {ok, _, _} = emqtt:unsubscribe(C, MqttTopic), - [E5] = get_events(Server), - assert_event_type(binding_deleted, E5), + [E4] = get_events(Server), + assert_event_type(binding_deleted, E4), ok = emqtt:disconnect(C), - [E6, E7 | E8] = get_events(Server), - assert_event_type(connection_closed, E6), - ?assertEqual(E1#event.props, E6#event.props, + [E5, E6] = get_events(Server), + assert_event_type(connection_closed, E5), + ?assertEqual(E1#event.props, E5#event.props, "connection_closed event props should match connection_created event props. " "See https://github.com/rabbitmq/rabbitmq-server/discussions/6331"), - - case is_feature_flag_enabled(Config, rabbit_mqtt_qos0_queue) of - true -> - assert_event_type(queue_deleted, E7), - assert_event_prop({name, QueueName}, E7); - false -> - assert_event_type(consumer_deleted, E7), - assert_event_prop({queue, QueueName}, E7), - [QueueDeleted] = E8, - assert_event_type(queue_deleted, QueueDeleted), - assert_event_prop({name, QueueName}, QueueDeleted) - end, + assert_event_type(queue_deleted, E6), + assert_event_prop({name, QueueName}, E6), ok = gen_event:delete_handler({rabbit_event, Server}, event_recorder, []). @@ -628,38 +656,24 @@ global_counters(Config) -> messages_unroutable_dropped_total => 1, messages_unroutable_returned_total => 1}, get_global_counters(Config, ProtoVer)), - - case is_feature_flag_enabled(Config, rabbit_mqtt_qos0_queue) of - true -> - ?assertEqual(#{messages_delivered_total => 2, - messages_acknowledged_total => 1, - messages_delivered_consume_auto_ack_total => 1, - messages_delivered_consume_manual_ack_total => 1, - messages_delivered_get_auto_ack_total => 0, - messages_delivered_get_manual_ack_total => 0, - messages_get_empty_total => 0, - messages_redelivered_total => 0}, - get_global_counters(Config, ProtoVer, 0, [{queue_type, rabbit_classic_queue}])), - ?assertEqual(#{messages_delivered_total => 1, - messages_acknowledged_total => 0, - messages_delivered_consume_auto_ack_total => 1, - messages_delivered_consume_manual_ack_total => 0, - messages_delivered_get_auto_ack_total => 0, - messages_delivered_get_manual_ack_total => 0, - messages_get_empty_total => 0, - messages_redelivered_total => 0}, - get_global_counters(Config, ProtoVer, 0, [{queue_type, rabbit_mqtt_qos0_queue}])); - false -> - ?assertEqual(#{messages_delivered_total => 3, - messages_acknowledged_total => 1, - messages_delivered_consume_auto_ack_total => 2, - messages_delivered_consume_manual_ack_total => 1, - messages_delivered_get_auto_ack_total => 0, - messages_delivered_get_manual_ack_total => 0, - messages_get_empty_total => 0, - messages_redelivered_total => 0}, - get_global_counters(Config, ProtoVer, 0, [{queue_type, rabbit_classic_queue}])) - end, + ?assertEqual(#{messages_delivered_total => 2, + messages_acknowledged_total => 1, + messages_delivered_consume_auto_ack_total => 1, + messages_delivered_consume_manual_ack_total => 1, + messages_delivered_get_auto_ack_total => 0, + messages_delivered_get_manual_ack_total => 0, + messages_get_empty_total => 0, + messages_redelivered_total => 0}, + get_global_counters(Config, ProtoVer, 0, [{queue_type, rabbit_classic_queue}])), + ?assertEqual(#{messages_delivered_total => 1, + messages_acknowledged_total => 0, + messages_delivered_consume_auto_ack_total => 1, + messages_delivered_consume_manual_ack_total => 0, + messages_delivered_get_auto_ack_total => 0, + messages_delivered_get_manual_ack_total => 0, + messages_get_empty_total => 0, + messages_redelivered_total => 0}, + get_global_counters(Config, ProtoVer, 0, [{queue_type, rabbit_mqtt_qos0_queue}])), {ok, _, _} = emqtt:unsubscribe(C, Topic1), ?assertEqual(1, maps:get(consumers, get_global_counters(Config, ProtoVer))), @@ -678,10 +692,10 @@ global_counters(Config) -> pubsub(Config) -> Topic0 = <<"t/0">>, Topic1 = <<"t/1">>, - C0 = connect(<<"c0">>, Config, 0, []), C1 = connect(<<"c1">>, Config, 1, []), - {ok, _, [1]} = emqtt:subscribe(C0, Topic0, qos1), {ok, _, [1]} = emqtt:subscribe(C1, Topic1, qos1), + C0 = connect(<<"c0">>, Config, 0, []), + {ok, _, [1]} = emqtt:subscribe(C0, Topic0, qos1), {ok, _} = emqtt:publish(C0, Topic1, <<"m1">>, qos1), receive {publish, #{client_pid := C1, @@ -742,48 +756,6 @@ queue_down_qos1(Config) -> delete_queue(Ch0, CQ), ok = emqtt:disconnect(C). -%% Even though classic mirrored queues are deprecated, we know that some users have set up -%% a policy to mirror MQTT queues. So, we need to support that use case in RabbitMQ 3.x -%% and failover consumption when the classic mirrored queue leader fails. -consuming_classic_mirrored_queue_down(Config) -> - [Server1, Server2, _Server3] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename), - ClientId = Topic = PolicyName = atom_to_binary(?FUNCTION_NAME), - - ok = rabbit_ct_broker_helpers:set_policy( - Config, Server1, PolicyName, <<".*">>, <<"queues">>, - [{<<"ha-mode">>, <<"all">>}, - {<<"queue-master-locator">>, <<"client-local">>}]), - - %% Declare queue leader on Server1. - C1 = connect(ClientId, Config, Server1, non_clean_sess_opts()), - {ok, _, _} = emqtt:subscribe(C1, Topic, qos1), - ok = emqtt:disconnect(C1), - - %% Consume from Server2. - C2 = connect(ClientId, Config, Server2, non_clean_sess_opts()), - - %% Sanity check that consumption works. - {ok, _} = emqtt:publish(C2, Topic, <<"m1">>, qos1), - ok = expect_publishes(C2, Topic, [<<"m1">>]), - - %% Let's stop the queue leader node. - ok = rabbit_ct_broker_helpers:stop_node(Config, Server1), - - %% Consumption should continue to work. - {ok, _} = emqtt:publish(C2, Topic, <<"m2">>, qos1), - ok = expect_publishes(C2, Topic, [<<"m2">>]), - - %% Cleanup - ok = emqtt:disconnect(C2), - ok = rabbit_ct_broker_helpers:start_node(Config, Server1), - ?assertMatch([_Q], - rpc(Config, Server1, rabbit_amqqueue, list, [])), - C3 = connect(ClientId, Config, Server2, [{clean_start, true}]), - ok = emqtt:disconnect(C3), - ?assertEqual([], - rpc(Config, Server1, rabbit_amqqueue, list, [])), - ok = rabbit_ct_broker_helpers:clear_policy(Config, Server1, PolicyName). - %% Consuming classic queue on a different node goes down. consuming_classic_queue_down(Config) -> [Server1, _Server2, Server3] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename), @@ -805,7 +777,7 @@ consuming_classic_queue_down(Config) -> process_flag(trap_exit, true), ok = rabbit_ct_broker_helpers:stop_node(Config, Server1), - %% When the dedicated MQTT connection (non-mirrored classic) queue goes down, it is reasonable + %% When the dedicated MQTT connection queue goes down, it is reasonable %% that the server closes the MQTT connection because the MQTT client cannot consume anymore. eventually(?_assertMatch(#{consumers := 0}, get_global_counters(Config, ProtoVer, Server3)), @@ -857,7 +829,9 @@ delete_create_queue(Config) -> timer:sleep(2), delete_queue(Ch, [CQ1, QQ]), %% Give queues some time to be fully deleted - timer:sleep(2000), + %% TODO: wait longer for quorum queues in mixed mode as it can take longer + %% for deletion to complete, delete timeout is 5s so we need to exceed that + timer:sleep(6000), %% We expect confirms for all messages. %% Confirm here does not mean that messages made it ever to the deleted queues. @@ -1107,13 +1081,26 @@ amqp_to_mqtt_qos0(Config) -> %% Test that the server wraps around the packet identifier. many_qos1_messages(Config) -> Topic = ClientId = atom_to_binary(?FUNCTION_NAME), - C = connect(ClientId, Config, 0, [{retry_interval, 600}]), - {ok, _, [1]} = emqtt:subscribe(C, {Topic, qos1}), NumMsgs = 16#ffff + 100, + C = connect(ClientId, Config, 0, [{retry_interval, 600}, + {max_inflight, NumMsgs div 8}]), + {ok, _, [1]} = emqtt:subscribe(C, {Topic, qos1}), Payloads = lists:map(fun integer_to_binary/1, lists:seq(1, NumMsgs)), + Self = self(), + Target = lists:last(Payloads), lists:foreach(fun(P) -> - {ok, _} = emqtt:publish(C, Topic, P, qos1) + Cb = {fun(T, _) when T == Target -> + Self ! proceed; + (_, _) -> + ok + end, [P]}, + ok = emqtt:publish_async(C, Topic, P, qos1, Cb) end, Payloads), + receive + proceed -> ok + after 30000 -> + ct:fail("message to proceed never received") + end, ok = expect_publishes(C, Topic, Payloads), ok = emqtt:disconnect(C). @@ -1136,24 +1123,74 @@ rabbit_mqtt_qos0_queue(Config) -> ok = emqtt:disconnect(Sub), ok = emqtt:disconnect(Pub). +rabbit_mqtt_qos0_queue_kill_node(Config) -> + Topic1 = <<"t/1">>, + Topic2 = <<"t/2">>, + Pub = connect(<<"publisher">>, Config, 2, []), + + SubscriberId = <<"subscriber">>, + Sub0 = connect(SubscriberId, Config, 0, []), + {ok, _, [0]} = emqtt:subscribe(Sub0, Topic1, qos0), + ok = await_metadata_store_consistent(Config, 2), + ok = emqtt:publish(Pub, Topic1, <<"m0">>, qos0), + ok = expect_publishes(Sub0, Topic1, [<<"m0">>]), + + process_flag(trap_exit, true), + ok = rabbit_ct_broker_helpers:kill_node(Config, 0), + ok = await_exit(Sub0), + %% Wait to run rabbit_amqqueue:on_node_down/1 on both live nodes. + timer:sleep(500), + %% Re-connect to a live node with same MQTT client ID. + Sub1 = connect(SubscriberId, Config, 1, []), + {ok, _, [0]} = emqtt:subscribe(Sub1, Topic2, qos0), + ok = emqtt:publish(Pub, Topic2, <<"m1">>, qos0), + ok = expect_publishes(Sub1, Topic2, [<<"m1">>]), + %% Since we started a new clean session, previous subscription should have been deleted. + ok = emqtt:publish(Pub, Topic1, <<"m2">>, qos0), + receive {publish, _} = Publish -> ct:fail({unexpected, Publish}) + after 300 -> ok + end, + + ok = rabbit_ct_broker_helpers:start_node(Config, 0), + ok = rabbit_ct_broker_helpers:kill_node(Config, 1), + %% This time, do not wait. + %% rabbit_amqqueue:on_node_down/1 may or may not have run. + Sub2 = connect(SubscriberId, Config, 2, []), + {ok, _, [0]} = emqtt:subscribe(Sub2, Topic2, qos0), + ok = emqtt:publish(Pub, Topic2, <<"m3">>, qos0), + ok = expect_publishes(Sub2, Topic2, [<<"m3">>]), + + ok = emqtt:disconnect(Sub2), + ok = emqtt:disconnect(Pub), + ok = rabbit_ct_broker_helpers:start_node(Config, 1), + ?assertEqual([], rpc(Config, rabbit_db_binding, get_all, [])). + %% Test that MQTT connection can be listed and closed via the rabbitmq_management plugin. management_plugin_connection(Config) -> KeepaliveSecs = 99, ClientId = atom_to_binary(?FUNCTION_NAME), Node = atom_to_binary(get_node_config(Config, 0, nodename)), - C = connect(ClientId, Config, [{keepalive, KeepaliveSecs}]), + C1 = connect(ClientId, Config, [{keepalive, KeepaliveSecs}]), eventually(?_assertEqual(1, length(http_get(Config, "/connections"))), 1000, 10), [#{client_properties := #{client_id := ClientId}, timeout := KeepaliveSecs, node := Node, name := ConnectionName}] = http_get(Config, "/connections"), - process_flag(trap_exit, true), http_delete(Config, - "/connections/" ++ binary_to_list(uri_string:quote((ConnectionName))), + "/connections/" ++ binary_to_list(uri_string:quote(ConnectionName)), ?NO_CONTENT), - await_exit(C), + await_exit(C1), + ?assertEqual([], http_get(Config, "/connections")), + eventually(?_assertEqual([], all_connection_pids(Config)), 500, 3), + + C2 = connect(ClientId, Config, [{keepalive, KeepaliveSecs}]), + eventually(?_assertEqual(1, length(http_get(Config, "/connections"))), 1000, 10), + http_delete(Config, + "/connections/username/guest", + ?NO_CONTENT), + await_exit(C2), ?assertEqual([], http_get(Config, "/connections")), eventually(?_assertEqual([], all_connection_pids(Config)), 500, 3). @@ -1182,13 +1219,7 @@ cli_list_queues(Config) -> "type", "name", "state", "durable", "auto_delete", "arguments", "pid", "owner_pid", "messages", "exclusive_consumer_tag" ]), - ExpectedQueueType = case is_feature_flag_enabled(Config, rabbit_mqtt_qos0_queue) of - true -> - <<"MQTT QoS 0">>; - false -> - <<"classic">> - end, - ?assertMatch([[ExpectedQueueType, <<"mqtt-subscription-cli_list_queuesqos0">>, + ?assertMatch([[<<"MQTT QoS 0">>, <<"mqtt-subscription-cli_list_queuesqos0">>, <<"running">>, <<"true">>, <<"false">>, <<"[]">>, _, _, <<"0">>, <<"">>]], Qs), @@ -1255,7 +1286,7 @@ keepalive(Config) -> await_exit(C1), assert_v5_disconnect_reason_code(Config, ?RC_KEEP_ALIVE_TIMEOUT), - true = rpc(Config, meck, validate, [Mod]), + ?assert(rpc(Config, meck, validate, [Mod])), ok = rpc(Config, meck, unload, [Mod]), C2 = connect(<<"client2">>, Config), @@ -1284,7 +1315,7 @@ keepalive_turned_off(Config) -> rabbit_ct_helpers:consistently(?_assert(erlang:is_process_alive(C))), - true = rpc(Config, meck, validate, [Mod]), + ?assert(rpc(Config, meck, validate, [Mod])), ok = rpc(Config, meck, unload, [Mod]), ok = emqtt:disconnect(C). @@ -1318,14 +1349,14 @@ session_takeover(Config) -> session_switch(Config, Disconnect) -> Topic = ClientId = atom_to_binary(?FUNCTION_NAME), %% Connect to old node in mixed version cluster. - C1 = connect(ClientId, Config, 1, [non_clean_sess_opts()]), + C1 = connect(ClientId, Config, 1, non_clean_sess_opts()), {ok, _, [1]} = emqtt:subscribe(C1, Topic, qos1), case Disconnect of true -> ok = emqtt:disconnect(C1); false -> unlink(C1) end, %% Connect to new node in mixed version cluster. - C2 = connect(ClientId, Config, 0, [non_clean_sess_opts()]), + C2 = connect(ClientId, Config, 0, non_clean_sess_opts()), case Disconnect of true -> ok; false -> assert_v5_disconnect_reason_code(Config, ?RC_SESSION_TAKEN_OVER) @@ -1375,7 +1406,7 @@ block(Config) -> block_only_publisher(Config) -> Topic = atom_to_binary(?FUNCTION_NAME), - Opts = [{ack_timeout, 2}], + Opts = [{ack_timeout, 1}], Con = connect(<<"background-connection">>, Config, Opts), Sub = connect(<<"subscriber-connection">>, Config, Opts), Pub = connect(<<"publisher-connection">>, Config, Opts), @@ -1426,14 +1457,8 @@ clean_session_disconnect_client(Config) -> {ok, _, _} = emqtt:subscribe(C, <<"topic1">>, qos1), QsQos0 = rpc(Config, rabbit_amqqueue, list_by_type, [rabbit_mqtt_qos0_queue]), QsClassic = rpc(Config, rabbit_amqqueue, list_by_type, [rabbit_classic_queue]), - case is_feature_flag_enabled(Config, rabbit_mqtt_qos0_queue) of - true -> - ?assertEqual(1, length(QsQos0)), - ?assertEqual(1, length(QsClassic)); - false -> - ?assertEqual(0, length(QsQos0)), - ?assertEqual(2, length(QsClassic)) - end, + ?assertEqual(1, length(QsQos0)), + ?assertEqual(1, length(QsClassic)), ok = emqtt:disconnect(C), %% After terminating a clean session, we expect any session state to be cleaned up on the server. @@ -1453,32 +1478,17 @@ clean_session_node_down(NodeDown, Config) -> {ok, _, _} = emqtt:subscribe(C, <<"topic1">>, qos1), QsQos0 = rpc(Config, rabbit_amqqueue, list_by_type, [rabbit_mqtt_qos0_queue]), QsClassic = rpc(Config, rabbit_amqqueue, list_by_type, [rabbit_classic_queue]), - case is_feature_flag_enabled(Config, rabbit_mqtt_qos0_queue) of - true -> - ?assertEqual(1, length(QsQos0)), - ?assertEqual(1, length(QsClassic)); - false -> - ?assertEqual(0, length(QsQos0)), - ?assertEqual(2, length(QsClassic)) - end, - Tables = [rabbit_durable_queue, - rabbit_queue, - rabbit_durable_route, - rabbit_semi_durable_route, - rabbit_route, - rabbit_reverse_route, - rabbit_topic_trie_node, - rabbit_topic_trie_edge, - rabbit_topic_trie_binding], - [?assertNotEqual(0, rpc(Config, ets, info, [T, size])) || T <- Tables], + ?assertEqual(1, length(QsQos0)), + ?assertEqual(1, length(QsClassic)), + ?assertEqual(2, rpc(Config, rabbit_amqqueue, count, [])), unlink(C), ok = rabbit_ct_broker_helpers:NodeDown(Config, 0), ok = rabbit_ct_broker_helpers:start_node(Config, 0), - %% After terminating a clean session by either node crash or graceful node shutdown, we - %% expect any session state to be cleaned up on the server once the server finished booting. - [?assertEqual(0, rpc(Config, ets, info, [T, size])) || T <- Tables]. + %% After terminating a clean session by a node crash, we expect any session + %% state to be cleaned up on the server once the server comes back up. + ?assertEqual(0, rpc(Config, rabbit_amqqueue, count, [])). rabbit_status_connection_count(Config) -> _Pid = rabbit_ct_client_helpers:open_connection(Config, 0), @@ -1665,31 +1675,55 @@ default_queue_type(Config) -> ok = rabbit_ct_broker_helpers:delete_vhost(Config, Vhost). incoming_message_interceptors(Config) -> - Key = {rabbit, ?FUNCTION_NAME}, + Key = ?FUNCTION_NAME, ok = rpc(Config, persistent_term, put, [Key, [{set_header_timestamp, false}]]), Ch = rabbit_ct_client_helpers:open_channel(Config), - Payload = ClientId = QName = Topic = atom_to_binary(?FUNCTION_NAME), - declare_queue(Ch, QName, []), - bind(Ch, QName, Topic), + Payload = ClientId = Topic = atom_to_binary(?FUNCTION_NAME), + CQName = <<"my classic queue">>, + Stream = <<"my stream">>, + declare_queue(Ch, CQName, [{<<"x-queue-type">>, longstr, <<"classic">>}]), + declare_queue(Ch, Stream, [{<<"x-queue-type">>, longstr, <<"stream">>}]), + bind(Ch, CQName, Topic), + bind(Ch, Stream, Topic), C = connect(ClientId, Config), - ok = emqtt:publish(C, Topic, Payload), + NowSecs = os:system_time(second), - NowMs = os:system_time(millisecond), - eventually( - ?_assertMatch( - {#'basic.get_ok'{}, - #amqp_msg{payload = Payload, - props = #'P_basic'{ - timestamp = Secs, - headers = [{<<"timestamp_in_ms">>, long, Ms} | _XHeaders] - }}} - when Ms < NowMs + 4000 andalso - Ms > NowMs - 4000 andalso - Secs < NowSecs + 4 andalso - Secs > NowSecs - 4, - amqp_channel:call(Ch, #'basic.get'{queue = QName}))), + NowMillis = os:system_time(millisecond), + {ok, _} = emqtt:publish(C, Topic, Payload, qos1), - delete_queue(Ch, QName), + {#'basic.get_ok'{}, + #amqp_msg{payload = Payload, + props = #'P_basic'{ + timestamp = Secs, + headers = [{<<"timestamp_in_ms">>, long, Millis} | _] + }} + } = amqp_channel:call(Ch, #'basic.get'{queue = CQName}), + + ?assert(Secs < NowSecs + 4), + ?assert(Secs > NowSecs - 4), + ?assert(Millis < NowMillis + 4000), + ?assert(Millis > NowMillis - 4000), + + #'basic.qos_ok'{} = amqp_channel:call(Ch, #'basic.qos'{prefetch_count = 1}), + CTag = <<"my ctag">>, + #'basic.consume_ok'{} = amqp_channel:subscribe( + Ch, + #'basic.consume'{ + queue = Stream, + consumer_tag = CTag, + arguments = [{<<"x-stream-offset">>, longstr, <<"first">>}]}, + self()), + receive {#'basic.deliver'{consumer_tag = CTag}, + #amqp_msg{payload = Payload, + props = #'P_basic'{ + headers = [{<<"timestamp_in_ms">>, long, Millis} | _XHeaders] + }}} -> + ok + after 5000 -> ct:fail(missing_deliver) + end, + + delete_queue(Ch, Stream), + delete_queue(Ch, CQName), true = rpc(Config, persistent_term, erase, [Key]), ok = emqtt:disconnect(C). diff --git a/deps/rabbitmq_mqtt/test/util.erl b/deps/rabbitmq_mqtt/test/util.erl index 686be8083c40..160235e158e3 100644 --- a/deps/rabbitmq_mqtt/test/util.erl +++ b/deps/rabbitmq_mqtt/test/util.erl @@ -6,7 +6,6 @@ -include_lib("eunit/include/eunit.hrl"). -export([all_connection_pids/1, - all_connection_pids/2, publish_qos1_timeout/4, sync_publish_result/3, get_global_counters/1, @@ -25,23 +24,13 @@ assert_message_expiry_interval/2, await_exit/1, await_exit/2, - maybe_skip_v5/1, non_clean_sess_opts/0 ]). all_connection_pids(Config) -> - all_connection_pids(0, Config). - -all_connection_pids(Node, Config) -> - case rabbit_ct_broker_helpers:rpc( - Config, Node, rabbit_feature_flags, is_enabled, [delete_ra_cluster_mqtt_node]) of - true -> - Nodes = rabbit_ct_broker_helpers:get_node_configs(Config, nodename), - Result = erpc:multicall(Nodes, rabbit_mqtt, local_connection_pids, [], 5000), - lists:append([Pids || {ok, Pids} <- Result]); - false -> - rabbit_ct_broker_helpers:rpc(Config, Node, rabbit_mqtt_collector, list_pids, []) - end. + Nodes = rabbit_ct_broker_helpers:get_node_configs(Config, nodename), + Result = erpc:multicall(Nodes, rabbit_mqtt, local_connection_pids, [], 5000), + lists:append([Pids || {ok, Pids} <- Result]). publish_qos1_timeout(Client, Topic, Payload, Timeout) -> Mref = erlang:monitor(process, Client), @@ -141,17 +130,6 @@ await_exit(Pid, Reason) -> 20_000 -> ct:fail({missing_exit, Pid}) end. -maybe_skip_v5(Config) -> - case ?config(mqtt_version, Config) of - v5 -> - case rabbit_ct_broker_helpers:enable_feature_flag(Config, mqtt_v5) of - ok -> Config; - {skip, _} = Skip -> Skip - end; - _ -> - Config - end. - %% "CleanStart=0 and SessionExpiry=0xFFFFFFFF (UINT_MAX) for %% MQTT 5.0 would provide the same as CleanSession=0 for 3.1.1." %% https://issues.oasis-open.org/projects/MQTT/issues/MQTT-538 diff --git a/deps/rabbitmq_mqtt/test/util_SUITE.erl b/deps/rabbitmq_mqtt/test/util_SUITE.erl index 2f9e00e738df..3b16c8e68824 100644 --- a/deps/rabbitmq_mqtt/test/util_SUITE.erl +++ b/deps/rabbitmq_mqtt/test/util_SUITE.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. -module(util_SUITE). -compile([export_all, nowarn_export_all]). @@ -18,8 +18,6 @@ groups() -> [ {tests, [parallel], [ coerce_vhost, - coerce_default_user, - coerce_default_pass, mqtt_amqp_topic_translation ] } @@ -36,12 +34,6 @@ end_per_suite(Config) -> coerce_vhost(_) -> ?assertEqual(<<"/">>, rabbit_mqtt_util:env(vhost)). -coerce_default_user(_) -> - ?assertEqual(<<"guest_user">>, rabbit_mqtt_util:env(default_user)). - -coerce_default_pass(_) -> - ?assertEqual(<<"guest_pass">>, rabbit_mqtt_util:env(default_pass)). - mqtt_amqp_topic_translation(_) -> ok = application:set_env(rabbitmq_mqtt, sparkplug, true), ok = rabbit_mqtt_util:init_sparkplug(), diff --git a/deps/rabbitmq_mqtt/test/v5_SUITE.erl b/deps/rabbitmq_mqtt/test/v5_SUITE.erl index d9c4663f837f..475b9450af9a 100644 --- a/deps/rabbitmq_mqtt/test/v5_SUITE.erl +++ b/deps/rabbitmq_mqtt/test/v5_SUITE.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% This test suite covers MQTT 5.0 features. -module(v5_SUITE). @@ -167,21 +167,13 @@ init_per_group(Group, Config0) -> [{mqtt_version, v5}, {rmq_nodes_count, Nodes}, {rmq_nodename_suffix, Suffix}]), - Config2 = rabbit_ct_helpers:merge_app_env( - Config1, - {rabbit, [{classic_queue_default_version, 2}, - {quorum_tick_interval, 200}]}), - Config = rabbit_ct_helpers:run_steps( - Config2, - rabbit_ct_broker_helpers:setup_steps() ++ - rabbit_ct_client_helpers:setup_steps()), - case Group of - cluster_size_1 -> - ok = rabbit_ct_broker_helpers:enable_feature_flag(Config, mqtt_v5), - Config; - cluster_size_3 -> - util:maybe_skip_v5(Config) - end. + Config = rabbit_ct_helpers:merge_app_env( + Config1, + {rabbit, [{quorum_tick_interval, 200}]}), + rabbit_ct_helpers:run_steps( + Config, + rabbit_ct_broker_helpers:setup_steps() ++ + rabbit_ct_client_helpers:setup_steps()). end_per_group(G, Config) when G =:= cluster_size_1; @@ -244,7 +236,9 @@ client_set_max_packet_size_publish(Config) -> assert_nothing_received(), NumRejected = dead_letter_metric(messages_dead_lettered_rejected_total, Config) - NumRejectedBefore, ?assertEqual(1, NumRejected), - ok = emqtt:disconnect(C). + ok = emqtt:disconnect(C), + ok. + client_set_max_packet_size_connack(Config) -> {C, Connect} = start_client(?FUNCTION_NAME, Config, 0, @@ -1325,22 +1319,21 @@ will_qos2(Config) -> ?assertEqual({error, {qos_not_supported, #{}}}, Connect(C)). will_delay_less_than_session_expiry(Config) -> - will_delay(1, 5, Config). + will_delay(1, 5, ?FUNCTION_NAME, Config). will_delay_equals_session_expiry(Config) -> - will_delay(1, 1, Config). + will_delay(1, 1, ?FUNCTION_NAME, Config). will_delay_greater_than_session_expiry(Config) -> - will_delay(5, 1, Config). + will_delay(5, 1, ?FUNCTION_NAME, Config). %% "The Server delays publishing the Client’s Will Message until the Will Delay %% Interval has passed or the Session ends, whichever happens first." [v5 3.1.3.2.2] -will_delay(WillDelay, SessionExpiry, Config) +will_delay(WillDelay, SessionExpiry, ClientId, Config) when WillDelay =:= 1 orelse SessionExpiry =:= 1-> Topic = <<"a/b">>, Msg = <<"msg">>, - ClientId = ?FUNCTION_NAME, Opts = [{properties, #{'Session-Expiry-Interval' => SessionExpiry}}, {will_props, #{'Will-Delay-Interval' => WillDelay}}, {will_topic, Topic}, @@ -1353,8 +1346,9 @@ will_delay(WillDelay, SessionExpiry, Config) receive TooEarly -> ct:fail(TooEarly) after 800 -> ok end, - receive {publish, #{payload := Msg}} -> ok - after 2000 -> ct:fail(will_message_timeout) + receive {publish, #{payload := Msg}} -> ok; + Unexpected -> ct:fail({unexpected_message, Unexpected}) + after 3000 -> ct:fail(will_message_timeout) end, %% Cleanup C2 = connect(ClientId, Config), diff --git a/deps/rabbitmq_peer_discovery_aws/.gitignore b/deps/rabbitmq_peer_discovery_aws/.gitignore index 08630c4a36ec..581bcd1d7477 100644 --- a/deps/rabbitmq_peer_discovery_aws/.gitignore +++ b/deps/rabbitmq_peer_discovery_aws/.gitignore @@ -1,25 +1 @@ -*~ -.sw? -.*.sw? -*.beam -*.coverdata -/.erlang.mk/ -/cover/ -/deps/ -/doc/ -/ebin/ -/escript/ -/escript.lock -/git-revisions.txt -/logs/ -/plugins/ -/plugins.lock -/rebar.config -/rebar.lock -/sbin/ -/sbin.lock /test/config_schema_SUITE_data/schema/ -/test/ct.cover.spec -/xrefr - -/rabbitmq_peer_discovery_aws.d diff --git a/deps/rabbitmq_peer_discovery_aws/README.md b/deps/rabbitmq_peer_discovery_aws/README.md index 744bace156ba..80176daa3f92 100644 --- a/deps/rabbitmq_peer_discovery_aws/README.md +++ b/deps/rabbitmq_peer_discovery_aws/README.md @@ -53,4 +53,4 @@ See [CONTRIBUTING.md](./CONTRIBUTING.md) and our [development process overview]( ## Copyright -(c) 2007-2023 VMware, Inc. or its affiliates. +(c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. diff --git a/deps/rabbitmq_peer_discovery_aws/priv/schema/rabbitmq_peer_discovery_aws.schema b/deps/rabbitmq_peer_discovery_aws/priv/schema/rabbitmq_peer_discovery_aws.schema index 5fd3df547a0a..df7e6678aeb0 100644 --- a/deps/rabbitmq_peer_discovery_aws/priv/schema/rabbitmq_peer_discovery_aws.schema +++ b/deps/rabbitmq_peer_discovery_aws/priv/schema/rabbitmq_peer_discovery_aws.schema @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% %% region @@ -94,3 +94,17 @@ fun(Conf) -> [ {lists:last(K), V} || {K, V} <- Settings ] end end}. + +%% hostname_path + +{mapping, "cluster_formation.aws.hostname_path", "rabbit.cluster_formation.peer_discovery_aws.aws_hostname_path", [ + {datatype, string, + {default, ["privateDnsName"]}} +]}. +{translation, "rabbit.cluster_formation.peer_discovery_aws.aws_hostname_path", +fun(Conf) -> + case cuttlefish:conf_get("cluster_formation.aws.hostname_path", Conf, undefined) of + undefined -> cuttlefish:unset(); + Value -> rabbit_peer_discovery_util:as_list(Value) + end +end}. diff --git a/deps/rabbitmq_peer_discovery_aws/src/rabbit_peer_discovery_aws.erl b/deps/rabbitmq_peer_discovery_aws/src/rabbit_peer_discovery_aws.erl index 4548e238c65e..bec7390defad 100644 --- a/deps/rabbitmq_peer_discovery_aws/src/rabbit_peer_discovery_aws.erl +++ b/deps/rabbitmq_peer_discovery_aws/src/rabbit_peer_discovery_aws.erl @@ -4,13 +4,12 @@ %% %% The Initial Developer of the Original Code is AWeber Communications. %% Copyright (c) 2015-2016 AWeber Communications -%% Copyright (c) 2016-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. All rights reserved. %% -module(rabbit_peer_discovery_aws). -behaviour(rabbit_peer_discovery_backend). --include_lib("rabbit_common/include/rabbit.hrl"). -include_lib("rabbitmq_peer_discovery_common/include/rabbit_peer_discovery.hrl"). -export([init/0, list_nodes/0, supports_registration/0, register/0, unregister/0, @@ -18,6 +17,8 @@ -type tags() :: map(). -type filters() :: [{string(), string()}]. +-type props() :: [{string(), props()}] | string(). +-type path() :: [string() | integer()]. -ifdef(TEST). -compile(export_all). @@ -55,6 +56,11 @@ env_variable = "AWS_EC2_REGION", default_value = "undefined" }, + aws_hostname_path => #peer_discovery_config_entry_meta{ + type = list, + env_variable = "AWS_HOSTNAME_PATH", + default_value = ["privateDnsName"] + }, aws_use_private_ip => #peer_discovery_config_entry_meta{ type = atom, env_variable = "AWS_USE_PRIVATE_IP", @@ -113,34 +119,28 @@ unregister() -> post_registration() -> ok. --spec lock(Node :: node()) -> {ok, {{ResourceId :: string(), LockRequesterId :: node()}, Nodes :: [node()]}} | - {error, Reason :: string()}. +-spec lock(Nodes :: [node()]) -> + {ok, {{ResourceId :: string(), LockRequesterId :: node()}, Nodes :: [node()]}} | + {error, Reason :: string()}. -lock(Node) -> - %% call list_nodes/0 externally such that meck can mock the function - case ?MODULE:list_nodes() of - {ok, {[], disc}} -> - {error, "Cannot lock since no nodes got discovered."}; - {ok, {Nodes, disc}} -> - case lists:member(Node, Nodes) of +lock(Nodes) -> + Node = node(), + case lists:member(Node, Nodes) of true -> - rabbit_log:info("Will try to lock connecting to nodes ~tp", [Nodes]), - LockId = rabbit_nodes:lock_id(Node), - Retries = rabbit_nodes:lock_retries(), - case global:set_lock(LockId, Nodes, Retries) of - true -> - {ok, {LockId, Nodes}}; - false -> - {error, io_lib:format("Acquiring lock taking too long, bailing out after ~b retries", [Retries])} - end; + rabbit_log:info("Will try to lock connecting to nodes ~tp", [Nodes]), + LockId = rabbit_nodes:lock_id(Node), + Retries = rabbit_nodes:lock_retries(), + case global:set_lock(LockId, Nodes, Retries) of + true -> + {ok, {LockId, Nodes}}; + false -> + {error, io_lib:format("Acquiring lock taking too long, bailing out after ~b retries", [Retries])} + end; false -> - %% Don't try to acquire the global lock when our own node is not discoverable by peers. - %% We shouldn't run into this branch because our node is running and should have been discovered. - {error, lists:flatten(io_lib:format("Local node ~ts is not part of discovered nodes ~tp", [Node, Nodes]))} - end; - {error, _} = Error -> - Error - end. + %% Don't try to acquire the global lock when our own node is not discoverable by peers. + %% We shouldn't run into this branch because our node is running and should have been discovered. + {error, lists:flatten(io_lib:format("Local node ~ts is not part of discovered nodes ~tp", [Node, Nodes]))} + end. -spec unlock({{ResourceId :: string(), LockRequestedId :: atom()}, Nodes :: [atom()]}) -> 'ok'. unlock({LockId, Nodes}) -> @@ -312,16 +312,15 @@ get_node_list_from_tags(M) when map_size(M) =:= 0 -> get_node_list_from_tags(Tags) -> {ok, {[?UTIL_MODULE:node_name(N) || N <- get_hostname_by_tags(Tags)], disc}}. +-spec get_hostname_name_from_reservation_set(props(), [string()]) -> [string()]. get_hostname_name_from_reservation_set([], Accum) -> Accum; get_hostname_name_from_reservation_set([{"item", RI}|T], Accum) -> InstancesSet = proplists:get_value("instancesSet", RI), Items = [Item || {"item", Item} <- InstancesSet], - HostnameKey = select_hostname(), - Hostnames = [Hostname || Item <- Items, - {HKey, Hostname} <- Item, - HKey == HostnameKey, - Hostname =/= ""], - get_hostname_name_from_reservation_set(T, Accum ++ Hostnames). + HostnamePath = get_hostname_path(), + Hostnames = [get_hostname(HostnamePath, Item) || Item <- Items], + Hostnames2 = [Name || Name <- Hostnames, Name =/= ""], + get_hostname_name_from_reservation_set(T, Accum ++ Hostnames2). get_hostname_names(Path) -> case rabbitmq_aws:api_get_request("ec2", Path) of @@ -347,14 +346,32 @@ get_hostname_by_tags(Tags) -> Names end. --spec select_hostname() -> string(). -select_hostname() -> - case get_config_key(aws_use_private_ip, ?CONFIG_MODULE:config_map(?BACKEND_CONFIG_KEY)) of - true -> "privateIpAddress"; - false -> "privateDnsName"; - _ -> "privateDnsName" +-spec get_hostname_path() -> path(). +get_hostname_path() -> + UsePrivateIP = get_config_key(aws_use_private_ip, ?CONFIG_MODULE:config_map(?BACKEND_CONFIG_KEY)), + HostnamePath = get_config_key(aws_hostname_path, ?CONFIG_MODULE:config_map(?BACKEND_CONFIG_KEY)), + case HostnamePath of + ["privateDnsName"] when UsePrivateIP -> ["privateIpAddress"]; + P -> P end. +-spec get_hostname(path(), props()) -> string(). +get_hostname(Path, Props) -> + List = lists:foldl(fun get_value/2, Props, Path), + case io_lib:latin1_char_list(List) of + true -> List; + _ -> "" + end. + +-spec get_value(string()|integer(), props()) -> props(). +get_value(_, []) -> + []; +get_value(Key, Props) when is_integer(Key) -> + {"item", Props2} = lists:nth(Key, Props), + Props2; +get_value(Key, Props) -> + proplists:get_value(Key, Props). + -spec get_tags() -> tags(). get_tags() -> Tags = get_config_key(aws_ec2_tags, ?CONFIG_MODULE:config_map(?BACKEND_CONFIG_KEY)), diff --git a/deps/rabbitmq_peer_discovery_aws/src/rabbitmq_peer_discovery_aws.erl b/deps/rabbitmq_peer_discovery_aws/src/rabbitmq_peer_discovery_aws.erl index fc4f1f02671b..2c8f1b69e041 100644 --- a/deps/rabbitmq_peer_discovery_aws/src/rabbitmq_peer_discovery_aws.erl +++ b/deps/rabbitmq_peer_discovery_aws/src/rabbitmq_peer_discovery_aws.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% %% This module exists as an alias for rabbit_peer_discovery_aws. @@ -45,7 +45,7 @@ unregister() -> post_registration() -> ?DELEGATE:post_registration(). --spec lock(Node :: node()) -> {ok, {ResourceId :: string(), LockRequesterId :: node()}} | {error, Reason :: string()}. +-spec lock(Nodes :: [node()]) -> {ok, {ResourceId :: string(), LockRequesterId :: node()}} | {error, Reason :: string()}. lock(Node) -> ?DELEGATE:lock(Node). diff --git a/deps/rabbitmq_peer_discovery_aws/test/aws_ecs_util.erl b/deps/rabbitmq_peer_discovery_aws/test/aws_ecs_util.erl index 9f6213c1aace..da129893ec13 100644 --- a/deps/rabbitmq_peer_discovery_aws/test/aws_ecs_util.erl +++ b/deps/rabbitmq_peer_discovery_aws/test/aws_ecs_util.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(aws_ecs_util). diff --git a/deps/rabbitmq_peer_discovery_aws/test/config_schema_SUITE.erl b/deps/rabbitmq_peer_discovery_aws/test/config_schema_SUITE.erl index 993df18072f0..bcf2b2215036 100644 --- a/deps/rabbitmq_peer_discovery_aws/test/config_schema_SUITE.erl +++ b/deps/rabbitmq_peer_discovery_aws/test/config_schema_SUITE.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(config_schema_SUITE). diff --git a/deps/rabbitmq_peer_discovery_aws/test/config_schema_SUITE_data/rabbitmq_peer_discovery_aws.snippets b/deps/rabbitmq_peer_discovery_aws/test/config_schema_SUITE_data/rabbitmq_peer_discovery_aws.snippets index d1fa86ae0093..c9ac1c9ad316 100644 --- a/deps/rabbitmq_peer_discovery_aws/test/config_schema_SUITE_data/rabbitmq_peer_discovery_aws.snippets +++ b/deps/rabbitmq_peer_discovery_aws/test/config_schema_SUITE_data/rabbitmq_peer_discovery_aws.snippets @@ -91,5 +91,17 @@ ]} ]} ], [rabbitmq_peer_discovery_aws] + }, + {aws_hostname_path, + "cluster_formation.aws.hostname_path = banana, 1, apple", + [ + {rabbit, [ + {cluster_formation, [ + {peer_discovery_aws, [ + {aws_hostname_path, ["banana", 1, "apple"]} + ]} + ]} + ]} + ], [rabbitmq_peer_discovery_aws] } ]. diff --git a/deps/rabbitmq_peer_discovery_aws/test/integration_SUITE.erl b/deps/rabbitmq_peer_discovery_aws/test/integration_SUITE.erl index 26309e8f8d97..b19595c099e9 100644 --- a/deps/rabbitmq_peer_discovery_aws/test/integration_SUITE.erl +++ b/deps/rabbitmq_peer_discovery_aws/test/integration_SUITE.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(integration_SUITE). @@ -128,6 +128,7 @@ register_tagged_task(Config) -> RabbitmqDefaultUser = ?config(rabbitmq_default_user, Config), RabbitmqDefaultPass = ?config(rabbitmq_default_pass, Config), RabbitmqConf = string:join([ + "log.console.level = debug", "default_user = " ++ RabbitmqDefaultUser, "default_pass = " ++ RabbitmqDefaultPass, "cluster_formation.peer_discovery_backend = aws", @@ -141,6 +142,7 @@ register_autoscaled_task(Config) -> RabbitmqDefaultUser = ?config(rabbitmq_default_user, Config), RabbitmqDefaultPass = ?config(rabbitmq_default_pass, Config), RabbitmqConf = string:join([ + "log.console.level = debug", "default_user = " ++ RabbitmqDefaultUser, "default_pass = " ++ RabbitmqDefaultPass, "cluster_formation.peer_discovery_backend = aws", @@ -155,16 +157,22 @@ task_json(Config, RabbitmqConf) -> RabbitmqImage = ?config(rabbitmq_image, Config), RabbitmqErlangCookie = ?config(rabbitmq_erlang_cookie, Config), ServiceName = ?config(ecs_service_name, Config), + ClusterName = ?config(ecs_cluster_name, Config), {ok, Binary} = file:read_file(filename:join(DataDir, "task_definition.json")), TaskDef = rabbit_json:decode(Binary), [RabbitContainerDef, SidecarContainerDef] = maps:get(<<"containerDefinitions">>, TaskDef), + LogConfiguration = maps:get(<<"logConfiguration">>, RabbitContainerDef), + Options = maps:get(<<"options">>, LogConfiguration), + Options1 = Options#{<<"awslogs-stream-prefix">> := list_to_binary(ClusterName)}, + LogConfiguration1 = LogConfiguration#{<<"options">> := Options1}, RabbitContainerDef1 = RabbitContainerDef#{ <<"image">> := list_to_binary(RabbitmqImage), <<"environment">> := [#{<<"name">> => <<"RABBITMQ_ERLANG_COOKIE">>, - <<"value">> => list_to_binary(RabbitmqErlangCookie)}] + <<"value">> => list_to_binary(RabbitmqErlangCookie)}], + <<"logConfiguration">> := LogConfiguration1 }, SidecarContainerDef1 = SidecarContainerDef#{<<"environment">> := [#{<<"name">> => <<"DATA">>, diff --git a/deps/rabbitmq_peer_discovery_aws/test/integration_SUITE_data/task_definition.json b/deps/rabbitmq_peer_discovery_aws/test/integration_SUITE_data/task_definition.json index 354f6a1e2b91..5a2bcfcb8c0e 100644 --- a/deps/rabbitmq_peer_discovery_aws/test/integration_SUITE_data/task_definition.json +++ b/deps/rabbitmq_peer_discovery_aws/test/integration_SUITE_data/task_definition.json @@ -19,6 +19,15 @@ "protocol": "tcp" } ], + "logConfiguration": { + "logDriver": "awslogs", + "options": { + "awslogs-create-group": "true", + "awslogs-group": "awslogs-ecs-peer-discovery-aws", + "awslogs-region": "eu-west-1", + "awslogs-stream-prefix": "PLACEHOLDER" + } + }, "essential": true, "environment": [ { diff --git a/deps/rabbitmq_peer_discovery_aws/test/unit_SUITE.erl b/deps/rabbitmq_peer_discovery_aws/test/unit_SUITE.erl index 52d31090d4a9..85ba2e4bffbc 100644 --- a/deps/rabbitmq_peer_discovery_aws/test/unit_SUITE.erl +++ b/deps/rabbitmq_peer_discovery_aws/test/unit_SUITE.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(unit_SUITE). @@ -28,8 +28,7 @@ groups() -> {lock, [], [ lock_single_node, lock_multiple_nodes, - lock_local_node_not_discovered, - lock_list_nodes_fails + lock_local_node_not_discovered ]} ]. @@ -48,14 +47,23 @@ maybe_add_tag_filters(_Config) -> ?assertEqual(Expectation, Result). get_hostname_name_from_reservation_set(_Config) -> - { + ok = eunit:test({ foreach, fun on_start/0, fun on_finish/1, [{"from private DNS", fun() -> - Expectation = ["ip-10-0-16-31.eu-west-1.compute.internal", - "ip-10-0-16-29.eu-west-1.compute.internal"], + Expectation = ["ip-10-0-16-29.eu-west-1.compute.internal", + "ip-10-0-16-31.eu-west-1.compute.internal"], + ?assertEqual(Expectation, + rabbit_peer_discovery_aws:get_hostname_name_from_reservation_set( + reservation_set(), [])) + end}, + {"from arbitrary path", + fun() -> + os:putenv("AWS_HOSTNAME_PATH", "networkInterfaceSet,1,association,publicDnsName"), + Expectation = ["ec2-203-0-113-11.eu-west-1.compute.amazonaws.com", + "ec2-203-0-113-21.eu-west-1.compute.amazonaws.com"], ?assertEqual(Expectation, rabbit_peer_discovery_aws:get_hostname_name_from_reservation_set( reservation_set(), [])) @@ -63,12 +71,12 @@ get_hostname_name_from_reservation_set(_Config) -> {"from private IP", fun() -> os:putenv("AWS_USE_PRIVATE_IP", "true"), - Expectation = ["10.0.16.31", "10.0.16.29"], + Expectation = ["10.0.16.29", "10.0.16.31"], ?assertEqual(Expectation, rabbit_peer_discovery_aws:get_hostname_name_from_reservation_set( reservation_set(), [])) end}] - }. + }). registration_support(_Config) -> ?assertEqual(false, rabbit_peer_discovery_aws:supports_registration()). @@ -76,34 +84,28 @@ registration_support(_Config) -> lock_single_node(_Config) -> LocalNode = node(), Nodes = [LocalNode], - meck:expect(rabbit_peer_discovery_aws, list_nodes, 0, {ok, {Nodes, disc}}), - {ok, {LockId, Nodes}} = rabbit_peer_discovery_aws:lock(LocalNode), + {ok, {LockId, Nodes}} = rabbit_peer_discovery_aws:lock([LocalNode]), ?assertEqual(ok, rabbit_peer_discovery_aws:unlock({LockId, Nodes})). lock_multiple_nodes(_Config) -> application:set_env(rabbit, cluster_formation, [{internal_lock_retries, 2}]), LocalNode = node(), - OtherNode = other@host, - Nodes = [OtherNode, LocalNode], - meck:expect(rabbit_peer_discovery_aws, list_nodes, 0, {ok, {Nodes, disc}}), - - {ok, {{LockResourceId, OtherNode}, Nodes}} = rabbit_peer_discovery_aws:lock(OtherNode), - ?assertEqual({error, "Acquiring lock taking too long, bailing out after 2 retries"}, - rabbit_peer_discovery_aws:lock(LocalNode)), - ?assertEqual(ok, rabbitmq_peer_discovery_aws:unlock({{LockResourceId, OtherNode}, Nodes})), - - ?assertEqual({ok, {{LockResourceId, LocalNode}, Nodes}}, rabbit_peer_discovery_aws:lock(LocalNode)), - ?assertEqual(ok, rabbitmq_peer_discovery_aws:unlock({{LockResourceId, LocalNode}, Nodes})). + OtherNodeA = a@host, + OtherNodeB = b@host, + + meck:expect(rabbit_nodes, lock_id, 1, {rabbit_nodes:cookie_hash(), OtherNodeA}), + {ok, {{LockResourceId, OtherNodeA}, [LocalNode, OtherNodeA]}} = rabbit_peer_discovery_aws:lock([LocalNode, OtherNodeA]), + meck:expect(rabbit_nodes, lock_id, 1, {rabbit_nodes:cookie_hash(), OtherNodeB}), + ?assertEqual({error, "Acquiring lock taking too long, bailing out after 2 retries"}, rabbit_peer_discovery_aws:lock([LocalNode, OtherNodeB])), + ?assertEqual(ok, rabbit_peer_discovery_aws:unlock({{LockResourceId, OtherNodeA}, [LocalNode, OtherNodeA]})), + ?assertEqual({ok, {{LockResourceId, OtherNodeB}, [LocalNode, OtherNodeB]}}, rabbit_peer_discovery_aws:lock([LocalNode, OtherNodeB])), + ?assertEqual(ok, rabbit_peer_discovery_aws:unlock({{LockResourceId, OtherNodeB}, [LocalNode, OtherNodeB]})), + meck:unload(rabbit_nodes). lock_local_node_not_discovered(_Config) -> - meck:expect(rabbit_peer_discovery_aws, list_nodes, 0, {ok, {[n1@host, n2@host], disc}} ), - Expectation = {error, "Local node me@host is not part of discovered nodes [n1@host,n2@host]"}, - ?assertEqual(Expectation, rabbit_peer_discovery_aws:lock(me@host)). - -lock_list_nodes_fails(_Config) -> - meck:expect(rabbit_peer_discovery_aws, list_nodes, 0, {error, "failed for some reason"}), - ?assertEqual({error, "failed for some reason"}, rabbit_peer_discovery_aws:lock(me@host)). + Expectation = {error, "Local node " ++ atom_to_list(node()) ++ " is not part of discovered nodes [me@host]"}, + ?assertEqual(Expectation, rabbit_peer_discovery_aws:lock([me@host])). %%% %%% Implementation @@ -117,6 +119,7 @@ on_finish(_Config) -> reset() -> application:unset_env(rabbit, cluster_formation), + os:unsetenv("AWS_HOSTNAME_PATH"), os:unsetenv("AWS_USE_PRIVATE_IP"). reservation_set() -> @@ -136,6 +139,19 @@ reservation_set() -> {"launchTime","2017-04-07T12:05:10"}, {"subnetId","subnet-61ff660"}, {"vpcId","vpc-4fe1562b"}, + {"networkInterfaceSet", [ + {"item", + [{"association", + [{"publicIp","203.0.113.11"}, + {"publicDnsName", + "ec2-203-0-113-11.eu-west-1.compute.amazonaws.com"}, + {"ipOwnerId","amazon"}]}]}, + {"item", + [{"association", + [{"publicIp","203.0.113.12"}, + {"publicDnsName", + "ec2-203-0-113-12.eu-west-1.compute.amazonaws.com"}, + {"ipOwnerId","amazon"}]}]}]}, {"privateIpAddress","10.0.16.29"}]}]}]}, {"item", [{"reservationId","r-006cfdbf8d04c5f01"}, {"ownerId","248536293561"}, @@ -153,4 +169,17 @@ reservation_set() -> {"launchTime","2017-04-07T12:05:10"}, {"subnetId","subnet-61ff660"}, {"vpcId","vpc-4fe1562b"}, + {"networkInterfaceSet", [ + {"item", + [{"association", + [{"publicIp","203.0.113.21"}, + {"publicDnsName", + "ec2-203-0-113-21.eu-west-1.compute.amazonaws.com"}, + {"ipOwnerId","amazon"}]}]}, + {"item", + [{"association", + [{"publicIp","203.0.113.22"}, + {"publicDnsName", + "ec2-203-0-113-22.eu-west-1.compute.amazonaws.com"}, + {"ipOwnerId","amazon"}]}]}]}, {"privateIpAddress","10.0.16.31"}]}]}]}]. diff --git a/deps/rabbitmq_peer_discovery_common/.gitignore b/deps/rabbitmq_peer_discovery_common/.gitignore index 11537ad822e6..581bcd1d7477 100644 --- a/deps/rabbitmq_peer_discovery_common/.gitignore +++ b/deps/rabbitmq_peer_discovery_common/.gitignore @@ -1,25 +1 @@ -*~ -.sw? -.*.sw? -*.beam -*.coverdata -/.erlang.mk/ -/cover/ -/deps/ -/doc/ -/ebin/ -/escript/ -/escript.lock -/git-revisions.txt -/logs/ -/plugins/ -/plugins.lock -/rebar.config -/rebar.lock -/sbin/ -/sbin.lock /test/config_schema_SUITE_data/schema/ -/test/ct.cover.spec -/xrefr - -/rabbitmq_peer_discovery_common.d diff --git a/deps/rabbitmq_peer_discovery_common/README.md b/deps/rabbitmq_peer_discovery_common/README.md index 74958a27e87d..fe3450043517 100644 --- a/deps/rabbitmq_peer_discovery_common/README.md +++ b/deps/rabbitmq_peer_discovery_common/README.md @@ -24,4 +24,4 @@ See [CONTRIBUTING.md](./CONTRIBUTING.md) and our [development process overview]( ## Copyright -(c) 2007-2020 VMware, Inc. or its affiliates. +(c) 2007-2024 Broadcom. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. diff --git a/deps/rabbitmq_peer_discovery_common/include/rabbit_peer_discovery.hrl b/deps/rabbitmq_peer_discovery_common/include/rabbit_peer_discovery.hrl index 67bbf51268ba..5512c936f371 100644 --- a/deps/rabbitmq_peer_discovery_common/include/rabbit_peer_discovery.hrl +++ b/deps/rabbitmq_peer_discovery_common/include/rabbit_peer_discovery.hrl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2020-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -include_lib("rabbit_common/include/logging.hrl"). @@ -16,7 +16,14 @@ % by `httpc` -define(DEFAULT_HTTP_TIMEOUT, 2250). --type peer_discovery_config_value() :: atom() | integer() | string() | list() | map() | any() | undefined. +-type peer_discovery_config_value() :: port() + | atom() + | integer() + | string() + | proplists:proplist() + | map() + | list() + | undefined. -record(peer_discovery_config_entry_meta, {env_variable :: string(), diff --git a/deps/rabbitmq_peer_discovery_common/priv/schema/rabbitmq_peer_discovery_common.schema b/deps/rabbitmq_peer_discovery_common/priv/schema/rabbitmq_peer_discovery_common.schema index 59936e02b0c4..05425ca6b96f 100644 --- a/deps/rabbitmq_peer_discovery_common/priv/schema/rabbitmq_peer_discovery_common.schema +++ b/deps/rabbitmq_peer_discovery_common/priv/schema/rabbitmq_peer_discovery_common.schema @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% %% interval diff --git a/deps/rabbitmq_peer_discovery_common/src/rabbit_peer_discovery_cleanup.erl b/deps/rabbitmq_peer_discovery_common/src/rabbit_peer_discovery_cleanup.erl index fcfdf7385d14..8ca2301fe8c7 100644 --- a/deps/rabbitmq_peer_discovery_common/src/rabbit_peer_discovery_cleanup.erl +++ b/deps/rabbitmq_peer_discovery_common/src/rabbit_peer_discovery_cleanup.erl @@ -4,7 +4,7 @@ %% %% The Initial Developer of the Original Code is AWeber Communications. %% Copyright (c) 2015-2016 AWeber Communications -%% Copyright (c) 2016-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. All rights reserved. %% -module(rabbit_peer_discovery_cleanup). @@ -262,11 +262,11 @@ maybe_cleanup(State, UnreachableNodes) -> %% @doc Iterate over the list of partitioned nodes, either logging the %% node that would be removed or actually removing it. %% @spec maybe_remove_nodes(PartitionedNodes :: [node()], -%% WarnOnly :: true | false) -> ok +%% WarnOnly :: boolean()) -> ok %% @end %%-------------------------------------------------------------------- -spec maybe_remove_nodes(PartitionedNodes :: [node()], - WarnOnly :: true | false) -> ok. + WarnOnly :: boolean()) -> ok. maybe_remove_nodes([], _) -> ok; maybe_remove_nodes([Node | Nodes], true) -> ?LOG_WARNING( diff --git a/deps/rabbitmq_peer_discovery_common/src/rabbit_peer_discovery_common_app.erl b/deps/rabbitmq_peer_discovery_common/src/rabbit_peer_discovery_common_app.erl index 8b5735823098..3f3086c6e1a3 100644 --- a/deps/rabbitmq_peer_discovery_common/src/rabbit_peer_discovery_common_app.erl +++ b/deps/rabbitmq_peer_discovery_common/src/rabbit_peer_discovery_common_app.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_peer_discovery_common_app). diff --git a/deps/rabbitmq_peer_discovery_common/src/rabbit_peer_discovery_common_sup.erl b/deps/rabbitmq_peer_discovery_common/src/rabbit_peer_discovery_common_sup.erl index 3a3cf8c92c2c..6b9fa9211204 100644 --- a/deps/rabbitmq_peer_discovery_common/src/rabbit_peer_discovery_common_sup.erl +++ b/deps/rabbitmq_peer_discovery_common/src/rabbit_peer_discovery_common_sup.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_peer_discovery_common_sup). diff --git a/deps/rabbitmq_peer_discovery_common/src/rabbit_peer_discovery_config.erl b/deps/rabbitmq_peer_discovery_common/src/rabbit_peer_discovery_config.erl index 07210257e01b..cfd3896e5cbf 100644 --- a/deps/rabbitmq_peer_discovery_common/src/rabbit_peer_discovery_config.erl +++ b/deps/rabbitmq_peer_discovery_common/src/rabbit_peer_discovery_config.erl @@ -4,7 +4,7 @@ %% %% The Initial Developer of the Original Code is AWeber Communications. %% Copyright (c) 2015-2016 AWeber Communications -%% Copyright (c) 2016-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. All rights reserved. %% -module(rabbit_peer_discovery_config). @@ -119,15 +119,15 @@ get_integer_from_env_variable_or_map(Map, OSKey, AppKey, Default) -> %% @end %%-------------------------------------------------------------------- -spec normalize(Type :: atom(), - Value :: atom() | boolean() | integer() | string() | list()) -> - atom() | integer() | string(). + Value :: term()) -> + peer_discovery_config_value(). %% TODO: switch these to use delegate to rabbit_data_coercion:* normalize(Type, Value) when Type =:= port -> rabbit_peer_discovery_util:parse_port(Value); normalize(Type, Value) when Type =:= atom -> rabbit_peer_discovery_util:as_atom(Value); normalize(Type, Value) when Type =:= list -> - rabbit_data_coercion:to_list(Value); + rabbit_peer_discovery_util:as_list(Value); normalize(Type, Value) when Type =:= integer -> rabbit_peer_discovery_util:as_integer(Value); normalize(Type, Value) when Type =:= string -> diff --git a/deps/rabbitmq_peer_discovery_common/src/rabbit_peer_discovery_httpc.erl b/deps/rabbitmq_peer_discovery_common/src/rabbit_peer_discovery_httpc.erl index a80e075dc587..10d6af951ce4 100644 --- a/deps/rabbitmq_peer_discovery_common/src/rabbit_peer_discovery_httpc.erl +++ b/deps/rabbitmq_peer_discovery_common/src/rabbit_peer_discovery_httpc.erl @@ -4,7 +4,7 @@ %% %% The Initial Developer of the Original Code is AWeber Communications. %% Copyright (c) 2015-2016 AWeber Communications -%% Copyright (c) 2016-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. All rights reserved. %% -module(rabbit_peer_discovery_httpc). diff --git a/deps/rabbitmq_peer_discovery_common/src/rabbit_peer_discovery_util.erl b/deps/rabbitmq_peer_discovery_common/src/rabbit_peer_discovery_util.erl index 827fe160c865..4521a93c2d25 100644 --- a/deps/rabbitmq_peer_discovery_common/src/rabbit_peer_discovery_util.erl +++ b/deps/rabbitmq_peer_discovery_common/src/rabbit_peer_discovery_util.erl @@ -4,7 +4,7 @@ %% %% The Initial Developer of the Original Code is AWeber Communications. %% Copyright (c) 2015-2016 AWeber Communications -%% Copyright (c) 2016-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. All rights reserved. %% -module(rabbit_peer_discovery_util). @@ -246,7 +246,7 @@ node_name_parse(Value) -> %% result of the IPv4 check is processed. %% @end %%-------------------------------------------------------------------- --spec node_name_parse(IsIPv4 :: true | false, Value :: string()) +-spec node_name_parse(IsIPv4 :: boolean(), Value :: string()) -> string(). node_name_parse(true, Value) -> Value; node_name_parse(false, Value) -> @@ -410,14 +410,18 @@ as_list([]) -> []; as_list(Value) when is_atom(Value) ; is_integer(Value) ; is_binary(Value) -> [Value]; as_list(Value) when is_list(Value) -> + Parse = fun(T) -> + S = string:strip(T), + case string:to_float(S) of + {Float, []} -> Float; + _ -> case string:to_integer(S) of + {Integer, []} -> Integer; + _ -> S + end + end + end, case io_lib:printable_list(Value) or io_lib:printable_unicode_list(Value) of - true -> [case string:to_float(S) of - {Float, []} -> Float; - _ -> case string:to_integer(S) of - {Integer, []} -> Integer; - _ -> string:strip(S) - end - end || S <- string:tokens(Value, ",")]; + true -> [Parse(T) || T <- string:tokens(Value, ",")]; false -> Value end; as_list(Value) -> diff --git a/deps/rabbitmq_peer_discovery_common/test/config_schema_SUITE.erl b/deps/rabbitmq_peer_discovery_common/test/config_schema_SUITE.erl index 797d6df475dd..05d3745605c9 100644 --- a/deps/rabbitmq_peer_discovery_common/test/config_schema_SUITE.erl +++ b/deps/rabbitmq_peer_discovery_common/test/config_schema_SUITE.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(config_schema_SUITE). diff --git a/deps/rabbitmq_peer_discovery_common/test/config_schema_SUITE_data/rabbitmq_peer_discovery_common.snippets b/deps/rabbitmq_peer_discovery_common/test/config_schema_SUITE_data/rabbitmq_peer_discovery_common.snippets index 9d285b62ebe8..77d1ea18fced 100644 --- a/deps/rabbitmq_peer_discovery_common/test/config_schema_SUITE_data/rabbitmq_peer_discovery_common.snippets +++ b/deps/rabbitmq_peer_discovery_common/test/config_schema_SUITE_data/rabbitmq_peer_discovery_common.snippets @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% [ diff --git a/deps/rabbitmq_peer_discovery_consul/.gitignore b/deps/rabbitmq_peer_discovery_consul/.gitignore index 5f4c75965872..581bcd1d7477 100644 --- a/deps/rabbitmq_peer_discovery_consul/.gitignore +++ b/deps/rabbitmq_peer_discovery_consul/.gitignore @@ -1,25 +1 @@ -*~ -.sw? -.*.sw? -*.beam -*.coverdata -/.erlang.mk/ -/cover/ -/deps/ -/doc/ -/ebin/ -/escript/ -/escript.lock -/git-revisions.txt -/logs/ -/plugins/ -/plugins.lock -/rebar.config -/rebar.lock -/sbin/ -/sbin.lock /test/config_schema_SUITE_data/schema/ -/test/ct.cover.spec -/xrefr - -/rabbitmq_peer_discovery_consul.d diff --git a/deps/rabbitmq_peer_discovery_consul/BUILD.bazel b/deps/rabbitmq_peer_discovery_consul/BUILD.bazel index 5a047f7d67a6..11e70ad3e34f 100644 --- a/deps/rabbitmq_peer_discovery_consul/BUILD.bazel +++ b/deps/rabbitmq_peer_discovery_consul/BUILD.bazel @@ -79,6 +79,11 @@ rabbitmq_integration_suite( name = "config_schema_SUITE", ) +rabbitmq_integration_suite( + name = "system_SUITE", + size = "large", +) + rabbitmq_suite( name = "rabbitmq_peer_discovery_consul_SUITE", size = "medium", diff --git a/deps/rabbitmq_peer_discovery_consul/README.md b/deps/rabbitmq_peer_discovery_consul/README.md index 448e15c00732..134d7dc8da20 100644 --- a/deps/rabbitmq_peer_discovery_consul/README.md +++ b/deps/rabbitmq_peer_discovery_consul/README.md @@ -53,4 +53,4 @@ See [CONTRIBUTING.md](./CONTRIBUTING.md) and our [development process overview]( ## Copyright -(c) 2007-2020 VMware, Inc. or its affiliates. +(c) 2007-2024 Broadcom. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. diff --git a/deps/rabbitmq_peer_discovery_consul/app.bzl b/deps/rabbitmq_peer_discovery_consul/app.bzl index 7a7de92884a0..44ae06ccf848 100644 --- a/deps/rabbitmq_peer_discovery_consul/app.bzl +++ b/deps/rabbitmq_peer_discovery_consul/app.bzl @@ -99,6 +99,14 @@ def test_suite_beam_files(name = "test_suite_beam_files"): app_name = "rabbitmq_peer_discovery_consul", erlc_opts = "//:test_erlc_opts", ) + erlang_bytecode( + name = "system_SUITE_beam_files", + testonly = True, + srcs = ["test/system_SUITE.erl"], + outs = ["test/system_SUITE.beam"], + app_name = "rabbitmq_peer_discovery_consul", + erlc_opts = "//:test_erlc_opts", + ) erlang_bytecode( name = "rabbitmq_peer_discovery_consul_SUITE_beam_files", testonly = True, diff --git a/deps/rabbitmq_peer_discovery_consul/include/rabbit_peer_discovery_consul.hrl b/deps/rabbitmq_peer_discovery_consul/include/rabbit_peer_discovery_consul.hrl index 1c2497496855..9a351bb7c0f7 100644 --- a/deps/rabbitmq_peer_discovery_consul/include/rabbit_peer_discovery_consul.hrl +++ b/deps/rabbitmq_peer_discovery_consul/include/rabbit_peer_discovery_consul.hrl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2020-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -define(BACKEND_CONFIG_KEY, peer_discovery_consul). @@ -69,6 +69,11 @@ env_variable = "CONSUL_SVC_ADDR_NODENAME", default_value = false }, + consul_svc_id => #peer_discovery_config_entry_meta{ + type = string, + env_variable = "CONSUL_SVC_ID", + default_value = "undefined" + }, consul_svc_port => #peer_discovery_config_entry_meta{ type = integer, env_variable = "CONSUL_SVC_PORT", diff --git a/deps/rabbitmq_peer_discovery_consul/priv/schema/rabbitmq_peer_discovery_consul.schema b/deps/rabbitmq_peer_discovery_consul/priv/schema/rabbitmq_peer_discovery_consul.schema index 17742c9a0965..4e5188bb06a4 100644 --- a/deps/rabbitmq_peer_discovery_consul/priv/schema/rabbitmq_peer_discovery_consul.schema +++ b/deps/rabbitmq_peer_discovery_consul/priv/schema/rabbitmq_peer_discovery_consul.schema @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% %% host @@ -140,7 +140,7 @@ fun(Conf) -> end}. -%% use (Erlang) node name when compuing service address? +%% use (Erlang) node name when computing service address? {mapping, "cluster_formation.consul.svc_addr_use_nodename", "rabbit.cluster_formation.peer_discovery_consul.consul_svc_addr_nodename", [ {datatype, {enum, [true, false]}} @@ -155,6 +155,21 @@ fun(Conf) -> end}. +%% service ID + +{mapping, "cluster_formation.consul.svc_id", "rabbit.cluster_formation.peer_discovery_consul.consul_svc_id", [ + {datatype, string} +]}. + +{translation, "rabbit.cluster_formation.peer_discovery_consul.consul_svc_id", +fun(Conf) -> + case cuttlefish:conf_get("cluster_formation.consul.svc_id", Conf, undefined) of + undefined -> cuttlefish:unset(); + Value -> Value + end +end}. + + %% (optionally) append a suffix to node names retrieved from Consul {mapping, "cluster_formation.consul.domain_suffix", "rabbit.cluster_formation.peer_discovery_consul.consul_domain", [ diff --git a/deps/rabbitmq_peer_discovery_consul/src/rabbit_peer_discovery_consul.erl b/deps/rabbitmq_peer_discovery_consul/src/rabbit_peer_discovery_consul.erl index c83a7a94c4c3..d64af2fc935e 100644 --- a/deps/rabbitmq_peer_discovery_consul/src/rabbit_peer_discovery_consul.erl +++ b/deps/rabbitmq_peer_discovery_consul/src/rabbit_peer_discovery_consul.erl @@ -4,14 +4,13 @@ %% %% The Initial Developer of the Original Code is AWeber Communications. %% Copyright (c) 2015-2016 AWeber Communications -%% Copyright (c) 2016-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. All rights reserved. %% -module(rabbit_peer_discovery_consul). -behaviour(rabbit_peer_discovery_backend). -include_lib("kernel/include/logger.hrl"). --include_lib("rabbit_common/include/rabbit.hrl"). -include_lib("rabbitmq_peer_discovery_common/include/rabbit_peer_discovery.hrl"). -include("rabbit_peer_discovery_consul.hrl"). @@ -34,6 +33,9 @@ -define(CONSUL_CHECK_NOTES, "RabbitMQ Consul-based peer discovery plugin TTL check"). +-define(META_KEY_CLUSTER_NAME, <<"cluster">>). +-define(META_KEY_ERLANG_NODENAME, <<"erlang-node-name">>). + %% %% API %% @@ -65,21 +67,31 @@ list_nodes() -> {ok, {[], disc}} end, Fun2 = fun(Proplist) -> - M = maps:from_list(Proplist), - Path = rabbit_peer_discovery_httpc:build_path([v1, health, service, get_config_key(consul_svc, M)]), - HttpOpts = http_options(M), - case rabbit_peer_discovery_httpc:get(get_config_key(consul_scheme, M), - get_config_key(consul_host, M), - get_integer_config_key(consul_port, M), - Path, - list_nodes_query_args(), - maybe_add_acl([]), - HttpOpts) of - {ok, Nodes} -> - IncludeWithWarnings = get_config_key(consul_include_nodes_with_warnings, M), - Result = extract_nodes( - filter_nodes(Nodes, IncludeWithWarnings)), - {ok, {Result, disc}}; + case internal_lock() of + {ok, Priv} -> + try + M = maps:from_list(Proplist), + Path = rabbit_peer_discovery_httpc:build_path([v1, health, service, service_name()]), + HttpOpts = http_options(M), + case rabbit_peer_discovery_httpc:get(get_config_key(consul_scheme, M), + get_config_key(consul_host, M), + get_integer_config_key(consul_port, M), + Path, + list_nodes_query_args(), + maybe_add_acl([]), + HttpOpts) of + {ok, Nodes} -> + IncludeWithWarnings = get_config_key(consul_include_nodes_with_warnings, M), + Result = extract_node( + sort_nodes( + filter_nodes(Nodes, IncludeWithWarnings))), + {ok, {Result, disc}}; + {error, _} = Error -> + Error + end + after + internal_unlock(Priv) + end; {error, _} = Error -> Error end @@ -160,13 +172,26 @@ post_registration() -> send_health_check_pass(), ok. --spec lock(Node :: atom()) -> {ok, Data :: term()} | {error, Reason :: string()}. +-spec lock(Nodes :: [node()]) -> + not_supported. + +lock(_Nodes) -> + not_supported. + +-spec unlock(Data :: term()) -> ok. + +unlock(_Data) -> + ok. + +-spec internal_lock() -> + {ok, Data :: term()} | {error, Reason :: string()}. -lock(Node) -> +internal_lock() -> M = ?CONFIG_MODULE:config_map(?BACKEND_CONFIG_KEY), ?LOG_DEBUG( "Effective Consul peer discovery configuration: ~tp", [M], #{domain => ?RMQLOG_DOMAIN_PEER_DIS}), + Node = node(), case create_session(Node, get_config_key(consul_svc_ttl, M)) of {ok, SessionId} -> TRef = start_session_ttl_updater(SessionId), @@ -174,13 +199,13 @@ lock(Node) -> EndTime = Now + get_config_key(lock_wait_time, M), lock(TRef, SessionId, Now, EndTime); {error, Reason} -> - {error, lists:flatten(io_lib:format("Error while creating a session, reason: ~ts", + {error, lists:flatten(io_lib:format("Error while creating a session, reason: ~0p", [Reason]))} end. --spec unlock({SessionId :: string(), TRef :: timer:tref()}) -> ok. +-spec internal_unlock({SessionId :: string(), TRef :: timer:tref()}) -> ok. -unlock({SessionId, TRef}) -> +internal_unlock({SessionId, TRef}) -> _ = timer:cancel(TRef), ?LOG_DEBUG( "Stopped session renewal", @@ -251,24 +276,41 @@ filter_nodes(Nodes, Warn) -> false -> Nodes end. --spec extract_nodes(ConsulResult :: [#{binary() => term()}]) -> list(). -extract_nodes(Data) -> extract_nodes(Data, []). - --spec extract_nodes(ConsulResult :: [#{binary() => term()}], Nodes :: list()) - -> list(). -extract_nodes([], Nodes) -> Nodes; -extract_nodes([H | T], Nodes) -> - Service = maps:get(<<"Service">>, H), - Value = maps:get(<<"Address">>, Service), - NodeName = case ?UTIL_MODULE:as_string(Value) of - "" -> - NodeData = maps:get(<<"Node">>, H), - Node = maps:get(<<"Node">>, NodeData), - maybe_add_domain(?UTIL_MODULE:node_name(Node)); - Address -> - ?UTIL_MODULE:node_name(Address) +-spec sort_nodes(ConsulResult :: [#{binary() => term()}]) -> [#{binary() => term()}]. +sort_nodes(Nodes) -> + lists:sort( + fun(NodeA, NodeB) -> + IndexA = maps:get( + <<"CreateIndex">>, + maps:get(<<"Service">>, NodeA, #{}), undefined), + IndexB = maps:get( + <<"CreateIndex">>, + maps:get(<<"Service">>, NodeB, #{}), undefined), + %% `undefined' is always greater than an integer, so we are fine here. + IndexA =< IndexB + end, Nodes). + +-spec extract_node(ConsulResult :: [#{binary() => term()}]) -> list(). +extract_node([]) -> + []; +extract_node([H | _]) -> + Service = maps:get(<<"Service">>, H), + Meta = maps:get(<<"Meta">>, Service, #{}), + NodeName = case Meta of + #{?META_KEY_ERLANG_NODENAME := Node} -> + binary_to_atom(Node); + _ -> + Value = maps:get(<<"Address">>, Service), + case ?UTIL_MODULE:as_string(Value) of + "" -> + NodeData = maps:get(<<"Node">>, H), + Node = maps:get(<<"Node">>, NodeData), + maybe_add_domain(?UTIL_MODULE:node_name(Node)); + Address -> + ?UTIL_MODULE:node_name(Address) + end end, - extract_nodes(T, lists:merge(Nodes, [NodeName])). + NodeName. -spec maybe_add_acl(QArgs :: list()) -> list(). maybe_add_acl(List) -> @@ -333,8 +375,7 @@ registration_body_add_id() -> -spec registration_body_add_name(Payload :: list()) -> list(). registration_body_add_name(Payload) -> - M = ?CONFIG_MODULE:config_map(?BACKEND_CONFIG_KEY), - Name = rabbit_data_coercion:to_atom(get_config_key(consul_svc, M)), + Name = rabbit_data_coercion:to_atom(service_name()), lists:append(Payload, [{'Name', Name}]). -spec registration_body_maybe_add_address(Payload :: list()) @@ -416,24 +457,19 @@ registration_body_maybe_add_tag(Payload, Cluster, Tags) -> -spec registration_body_maybe_add_meta(Payload :: list()) -> list(). registration_body_maybe_add_meta(Payload) -> - M = ?CONFIG_MODULE:config_map(?BACKEND_CONFIG_KEY), - ClusterName = get_config_key(cluster_name, M), - Meta = ?UTIL_MODULE:as_list(get_config_key(consul_svc_meta, M)), - registration_body_maybe_add_meta(Payload, ClusterName, Meta). - --spec registration_body_maybe_add_meta(Payload :: list(), - ClusterName :: string(), - Meta :: list()) -> list(). -registration_body_maybe_add_meta(Payload, "default", []) -> - Payload; -registration_body_maybe_add_meta(Payload, "default", Meta) -> - lists:append(Payload, [{<<"meta">>, Meta}]); -registration_body_maybe_add_meta(Payload, _ClusterName, []) -> - Payload; -registration_body_maybe_add_meta(Payload, ClusterName, Meta) -> - Merged = maps:to_list(maps:merge(#{<<"cluster">> => rabbit_data_coercion:to_binary(ClusterName)}, maps:from_list(Meta))), - lists:append(Payload, [{<<"meta">>, Merged}]). - + M = ?CONFIG_MODULE:config_map(?BACKEND_CONFIG_KEY), + Meta0 = ?UTIL_MODULE:as_list(get_config_key(consul_svc_meta, M)), + Meta1 = maps:from_list(Meta0), + Meta2 = Meta1#{?META_KEY_ERLANG_NODENAME => atom_to_binary(node())}, + Meta3 = case get_config_key(cluster_name, M) of + "default" -> + Meta2; + ClusterName -> + ClusterName1 = rabbit_data_coercion:to_binary(ClusterName), + Meta2#{?META_KEY_CLUSTER_NAME => ClusterName1} + end, + Merged = maps:to_list(Meta3), + lists:append(Payload, [{'Meta', Merged}]). -spec validate_addr_parameters(false | true, false | true) -> false | true. validate_addr_parameters(false, true) -> @@ -482,14 +518,24 @@ service_address(_, false, NIC, _) -> -spec service_id() -> string(). service_id() -> M = ?CONFIG_MODULE:config_map(?BACKEND_CONFIG_KEY), - service_id(get_config_key(consul_svc, M), - service_address()). + case get_config_key(consul_svc_id, M) of + "undefined" -> + service_id(get_config_key(consul_svc, M), + service_address()); + ID -> + ID + end. -spec service_id(Name :: string(), Address :: string()) -> string(). service_id(Service, "undefined") -> Service; service_id(Service, Address) -> string:join([Service, Address], ":"). +-spec service_name() -> string(). +service_name() -> + M = ?CONFIG_MODULE:config_map(?BACKEND_CONFIG_KEY), + get_config_key(consul_svc, M). + -spec service_ttl(TTL :: integer()) -> string(). service_ttl(Value) -> rabbit_peer_discovery_util:as_string(Value) ++ "s". @@ -605,7 +651,7 @@ wait_for_list_nodes(N) -> %% Create a session to be acquired for a common key %% @end %%-------------------------------------------------------------------- --spec create_session(atom(), pos_integer()) -> {ok, string()} | {error, Reason::string()}. +-spec create_session(atom(), pos_integer()) -> {ok, string()} | {error, Reason::any()}. create_session(Name, TTL) -> case consul_session_create([], maybe_add_acl([]), [{'Name', Name}, @@ -690,7 +736,7 @@ start_session_ttl_updater(SessionId) -> %% Tries to acquire lock. If the lock is held by someone else, waits until it %% is released, or too much time has passed %% @end --spec lock(timer:tref(), string(), pos_integer(), pos_integer()) -> {ok, string()} | {error, string()}. +-spec lock(timer:tref(), string(), pos_integer(), pos_integer()) -> {ok, {SessionId :: string(), TRef :: timer:tref()}} | {error, string()}. lock(TRef, _, Now, EndTime) when EndTime < Now -> _ = timer:cancel(TRef), {error, "Acquiring lock taking too long, bailing out"}; diff --git a/deps/rabbitmq_peer_discovery_consul/src/rabbitmq_peer_discovery_consul.erl b/deps/rabbitmq_peer_discovery_consul/src/rabbitmq_peer_discovery_consul.erl index 169716b94378..ab889b8edb44 100644 --- a/deps/rabbitmq_peer_discovery_consul/src/rabbitmq_peer_discovery_consul.erl +++ b/deps/rabbitmq_peer_discovery_consul/src/rabbitmq_peer_discovery_consul.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbitmq_peer_discovery_consul). @@ -42,7 +42,7 @@ unregister() -> post_registration() -> ?DELEGATE:post_registration(). --spec lock(Node :: atom()) -> {ok, Data :: term()} | {error, Reason :: string()}. +-spec lock(Nodes :: [node()]) -> not_supported. lock(Node) -> ?DELEGATE:lock(Node). diff --git a/deps/rabbitmq_peer_discovery_consul/src/rabbitmq_peer_discovery_consul_app.erl b/deps/rabbitmq_peer_discovery_consul/src/rabbitmq_peer_discovery_consul_app.erl index 65ba8ac57c56..d37b88576a34 100644 --- a/deps/rabbitmq_peer_discovery_consul/src/rabbitmq_peer_discovery_consul_app.erl +++ b/deps/rabbitmq_peer_discovery_consul/src/rabbitmq_peer_discovery_consul_app.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbitmq_peer_discovery_consul_app). diff --git a/deps/rabbitmq_peer_discovery_consul/src/rabbitmq_peer_discovery_consul_health_check_helper.erl b/deps/rabbitmq_peer_discovery_consul/src/rabbitmq_peer_discovery_consul_health_check_helper.erl index 6257c9472485..289757842484 100644 --- a/deps/rabbitmq_peer_discovery_consul/src/rabbitmq_peer_discovery_consul_health_check_helper.erl +++ b/deps/rabbitmq_peer_discovery_consul/src/rabbitmq_peer_discovery_consul_health_check_helper.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% %% This gen_server starts a periodic timer on behalf of diff --git a/deps/rabbitmq_peer_discovery_consul/src/rabbitmq_peer_discovery_consul_sup.erl b/deps/rabbitmq_peer_discovery_consul/src/rabbitmq_peer_discovery_consul_sup.erl index 9074ba4f56f4..3e0a4dcc0bfe 100644 --- a/deps/rabbitmq_peer_discovery_consul/src/rabbitmq_peer_discovery_consul_sup.erl +++ b/deps/rabbitmq_peer_discovery_consul/src/rabbitmq_peer_discovery_consul_sup.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbitmq_peer_discovery_consul_sup). diff --git a/deps/rabbitmq_peer_discovery_consul/test/config_schema_SUITE.erl b/deps/rabbitmq_peer_discovery_consul/test/config_schema_SUITE.erl index 2bc2c5173b16..c441f401dc6a 100644 --- a/deps/rabbitmq_peer_discovery_consul/test/config_schema_SUITE.erl +++ b/deps/rabbitmq_peer_discovery_consul/test/config_schema_SUITE.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(config_schema_SUITE). diff --git a/deps/rabbitmq_peer_discovery_consul/test/config_schema_SUITE_data/rabbitmq_peer_discovery_consul.snippets b/deps/rabbitmq_peer_discovery_consul/test/config_schema_SUITE_data/rabbitmq_peer_discovery_consul.snippets index de857934566b..d31c208a6d50 100644 --- a/deps/rabbitmq_peer_discovery_consul/test/config_schema_SUITE_data/rabbitmq_peer_discovery_consul.snippets +++ b/deps/rabbitmq_peer_discovery_consul/test/config_schema_SUITE_data/rabbitmq_peer_discovery_consul.snippets @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% [ {consul_discovery_mechanism_as_module, diff --git a/deps/rabbitmq_peer_discovery_consul/test/rabbitmq_peer_discovery_consul_SUITE.erl b/deps/rabbitmq_peer_discovery_consul/test/rabbitmq_peer_discovery_consul_SUITE.erl index bfbad5801185..a0734b265875 100644 --- a/deps/rabbitmq_peer_discovery_consul/test/rabbitmq_peer_discovery_consul_SUITE.erl +++ b/deps/rabbitmq_peer_discovery_consul/test/rabbitmq_peer_discovery_consul_SUITE.erl @@ -4,13 +4,12 @@ %% %% The Initial Developer of the Original Code is AWeber Communications. %% Copyright (c) 2015-2016 AWeber Communications -%% Copyright (c) 2016-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. All rights reserved. %% -module(rabbitmq_peer_discovery_consul_SUITE). -compile(export_all). --include_lib("common_test/include/ct.hrl"). -include_lib("eunit/include/eunit.hrl"). @@ -155,7 +154,9 @@ registration_body_simple_case(_Config) -> {'Check', [{'Notes', ?CONSUL_CHECK_NOTES}, {'TTL', '30s'}, - {'Status', 'passing'}]}], + {'Status', 'passing'}]}, + {'Meta', + [{<<"erlang-node-name">>, atom_to_binary(node())}]}], ?assertEqual(Expectation, rabbit_peer_discovery_consul:build_registration_body()). registration_body_svc_addr_set_via_env_var(_Config) -> @@ -167,7 +168,9 @@ registration_body_svc_addr_set_via_env_var(_Config) -> {'Check', [{'Notes', ?CONSUL_CHECK_NOTES}, {'TTL', '30s'}, - {'Status', 'passing'}]}], + {'Status', 'passing'}]}, + {'Meta', + [{<<"erlang-node-name">>, atom_to_binary(node())}]}], ?assertEqual(Expectation, rabbit_peer_discovery_consul:build_registration_body()). registration_body_svc_ttl_set_via_env_var(_Config) -> @@ -178,7 +181,9 @@ registration_body_svc_ttl_set_via_env_var(_Config) -> {'Check', [{'Notes', ?CONSUL_CHECK_NOTES}, {'TTL', '257s'}, - {'Status', 'passing'}]}], + {'Status', 'passing'}]}, + {'Meta', + [{<<"erlang-node-name">>, atom_to_binary(node())}]}], ?assertEqual(Expectation, rabbit_peer_discovery_consul:build_registration_body()). registration_body_svc_tags_set_via_env_var(_Config) -> @@ -190,7 +195,9 @@ registration_body_svc_tags_set_via_env_var(_Config) -> [{'Notes', ?CONSUL_CHECK_NOTES}, {'TTL', '30s'}, {'Status', 'passing'}]}, - {'Tags',['urlprefix-:5672 proto=tcp',mq,'mq server']}], + {'Tags',['urlprefix-:5672 proto=tcp',mq,'mq server']}, + {'Meta', + [{<<"erlang-node-name">>, atom_to_binary(node())}]}], ?assertEqual(Expectation, rabbit_peer_discovery_consul:build_registration_body()). registration_body_deregister_after_set_via_env_var(_Config) -> @@ -202,7 +209,9 @@ registration_body_deregister_after_set_via_env_var(_Config) -> [{'Notes', ?CONSUL_CHECK_NOTES}, {'TTL','30s'}, {'Status', 'passing'}, - {'DeregisterCriticalServiceAfter','520s'}]}], + {'DeregisterCriticalServiceAfter','520s'}]}, + {'Meta', + [{<<"erlang-node-name">>, atom_to_binary(node())}]}], ?assertEqual(Expectation, rabbit_peer_discovery_consul:build_registration_body()). registration_body_ttl_and_deregister_after_both_unset_via_env_var(_Config) -> @@ -210,7 +219,9 @@ registration_body_ttl_and_deregister_after_both_unset_via_env_var(_Config) -> os:putenv("CONSUL_SVC_TTL", ""), Expectation = [{'ID', 'rabbitmq'}, {'Name', rabbitmq}, - {'Port', 5672}], + {'Port', 5672}, + {'Meta', + [{<<"erlang-node-name">>, atom_to_binary(node())}]}], ?assertEqual(Expectation, rabbit_peer_discovery_consul:build_registration_body()). %% "deregister after" won't be enabled if TTL isn't set @@ -219,7 +230,9 @@ registration_body_ttl_unset_and_deregister_after_set_via_env_var(_Config) -> os:putenv("CONSUL_SVC_TTL", ""), Expectation = [{'ID', 'rabbitmq'}, {'Name', rabbitmq}, - {'Port', 5672}], + {'Port', 5672}, + {'Meta', + [{<<"erlang-node-name">>, atom_to_binary(node())}]}], ?assertEqual(Expectation, rabbit_peer_discovery_consul:build_registration_body()). service_id_all_defaults_test(_Config) -> @@ -331,13 +344,22 @@ list_nodes_return_value_basic_test(_Config) -> {consul_port, 8500} ]} ]), + meck:expect(rabbit_peer_discovery_httpc, put, + fun + (_, _, _, "v1/session/create", _, _, _, _) -> + Body = "{\"ID\":\"some-session-id\"}", + rabbit_json:try_decode(rabbit_data_coercion:to_binary(Body)); + (_, _, _, "v1/kv/rabbitmq/default/startup_lock", _, _, _, _) -> + Body = "true", + rabbit_json:try_decode(rabbit_data_coercion:to_binary(Body)) + end), meck:expect(rabbit_peer_discovery_httpc, get, fun(_, _, _, _, _, _, _) -> Body = "[{\"Node\": {\"Node\": \"rabbit2.internal.domain\", \"Address\": \"10.20.16.160\"}, \"Checks\": [{\"Node\": \"rabbit2.internal.domain\", \"CheckID\": \"service:rabbitmq\", \"Name\": \"Service \'rabbitmq\' check\", \"ServiceName\": \"rabbitmq\", \"Notes\": \"Connect to the port internally every 30 seconds\", \"Status\": \"passing\", \"ServiceID\": \"rabbitmq\", \"Output\": \"\"}, {\"Node\": \"rabbit2.internal.domain\", \"CheckID\": \"serfHealth\", \"Name\": \"Serf Health Status\", \"ServiceName\": \"\", \"Notes\": \"\", \"Status\": \"passing\", \"ServiceID\": \"\", \"Output\": \"Agent alive and reachable\"}], \"Service\": {\"Address\": \"\", \"Port\": 5672, \"ID\": \"rabbitmq\", \"Service\": \"rabbitmq\", \"Tags\": [\"amqp\"]}}, {\"Node\": {\"Node\": \"rabbit1.internal.domain\", \"Address\": \"10.20.16.159\"}, \"Checks\": [{\"Node\": \"rabbit1.internal.domain\", \"CheckID\": \"service:rabbitmq\", \"Name\": \"Service \'rabbitmq\' check\", \"ServiceName\": \"rabbitmq\", \"Notes\": \"Connect to the port internally every 30 seconds\", \"Status\": \"passing\", \"ServiceID\": \"rabbitmq\", \"Output\": \"\"}, {\"Node\": \"rabbit1.internal.domain\", \"CheckID\": \"serfHealth\", \"Name\": \"Serf Health Status\", \"ServiceName\": \"\", \"Notes\": \"\", \"Status\": \"passing\", \"ServiceID\": \"\", \"Output\": \"Agent alive and reachable\"}], \"Service\": {\"Address\": \"\", \"Port\": 5672, \"ID\": \"rabbitmq\", \"Service\": \"rabbitmq\", \"Tags\": [\"amqp\"]}}]", rabbit_json:try_decode(rabbit_data_coercion:to_binary(Body)) end), meck:expect(rabbit_nodes, name_type, fun() -> shortnames end), - ?assertEqual({ok, {['rabbit@rabbit1', 'rabbit@rabbit2'], disc}}, + ?assertEqual({ok, {'rabbit@rabbit2', disc}}, rabbit_peer_discovery_consul:list_nodes()), ?assert(meck:validate(rabbit_peer_discovery_httpc)). @@ -350,13 +372,22 @@ list_nodes_return_value_basic_long_node_name_test(_Config) -> {consul_port, 8500} ]} ]), + meck:expect(rabbit_peer_discovery_httpc, put, + fun + (_, _, _, "v1/session/create", _, _, _, _) -> + Body = "{\"ID\":\"some-session-id\"}", + rabbit_json:try_decode(rabbit_data_coercion:to_binary(Body)); + (_, _, _, "v1/kv/rabbitmq/default/startup_lock", _, _, _, _) -> + Body = "true", + rabbit_json:try_decode(rabbit_data_coercion:to_binary(Body)) + end), meck:expect(rabbit_peer_discovery_httpc, get, fun(_, _, _, _, _, _, _) -> Body = "[{\"Node\": {\"Node\": \"rabbit2\", \"Address\": \"10.20.16.160\"}, \"Checks\": [{\"Node\": \"rabbit2\", \"CheckID\": \"service:rabbitmq\", \"Name\": \"Service \'rabbitmq\' check\", \"ServiceName\": \"rabbitmq\", \"Notes\": \"Connect to the port internally every 30 seconds\", \"Status\": \"passing\", \"ServiceID\": \"rabbitmq\", \"Output\": \"\"}, {\"Node\": \"rabbit2\", \"CheckID\": \"serfHealth\", \"Name\": \"Serf Health Status\", \"ServiceName\": \"\", \"Notes\": \"\", \"Status\": \"passing\", \"ServiceID\": \"\", \"Output\": \"Agent alive and reachable\"}], \"Service\": {\"Address\": \"\", \"Port\": 5672, \"ID\": \"rabbitmq\", \"Service\": \"rabbitmq\", \"Tags\": [\"amqp\"]}}, {\"Node\": {\"Node\": \"rabbit1\", \"Address\": \"10.20.16.159\"}, \"Checks\": [{\"Node\": \"rabbit1\", \"CheckID\": \"service:rabbitmq\", \"Name\": \"Service \'rabbitmq\' check\", \"ServiceName\": \"rabbitmq\", \"Notes\": \"Connect to the port internally every 30 seconds\", \"Status\": \"passing\", \"ServiceID\": \"rabbitmq\", \"Output\": \"\"}, {\"Node\": \"rabbit1\", \"CheckID\": \"serfHealth\", \"Name\": \"Serf Health Status\", \"ServiceName\": \"\", \"Notes\": \"\", \"Status\": \"passing\", \"ServiceID\": \"\", \"Output\": \"Agent alive and reachable\"}], \"Service\": {\"Address\": \"\", \"Port\": 5672, \"ID\": \"rabbitmq\", \"Service\": \"rabbitmq\", \"Tags\": [\"amqp\"]}}]", rabbit_json:try_decode(rabbit_data_coercion:to_binary(Body)) end), meck:expect(rabbit_nodes, name_type, fun() -> longnames end), - ?assertEqual({ok, {['rabbit@rabbit1.node.consul', 'rabbit@rabbit2.node.consul'], disc}}, + ?assertEqual({ok, {'rabbit@rabbit2.node.consul', disc}}, rabbit_peer_discovery_consul:list_nodes()), ?assert(meck:validate(rabbit_peer_discovery_httpc)). @@ -370,6 +401,15 @@ list_nodes_return_value_long_node_name_and_custom_domain_test(_Config) -> {consul_domain, "internal"} ]} ]), + meck:expect(rabbit_peer_discovery_httpc, put, + fun + (_, _, _, "v1/session/create", _, _, _, _) -> + Body = "{\"ID\":\"some-session-id\"}", + rabbit_json:try_decode(rabbit_data_coercion:to_binary(Body)); + (_, _, _, "v1/kv/rabbitmq/default/startup_lock", _, _, _, _) -> + Body = "true", + rabbit_json:try_decode(rabbit_data_coercion:to_binary(Body)) + end), meck:expect(rabbit_peer_discovery_httpc, get, fun(_, _, _, _, _, _, _) -> Body = "[{\"Node\": {\"Node\": \"rabbit2\", \"Address\": \"10.20.16.160\"}, \"Checks\": [{\"Node\": \"rabbit2\", \"CheckID\": \"service:rabbitmq\", \"Name\": \"Service \'rabbitmq\' check\", \"ServiceName\": \"rabbitmq\", \"Notes\": \"Connect to the port internally every 30 seconds\", \"Status\": \"passing\", \"ServiceID\": \"rabbitmq\", \"Output\": \"\"}, {\"Node\": \"rabbit2\", \"CheckID\": \"serfHealth\", \"Name\": \"Serf Health Status\", \"ServiceName\": \"\", \"Notes\": \"\", \"Status\": \"passing\", \"ServiceID\": \"\", \"Output\": \"Agent alive and reachable\"}], \"Service\": {\"Address\": \"\", \"Port\": 5672, \"ID\": \"rabbitmq\", \"Service\": \"rabbitmq\", \"Tags\": [\"amqp\"]}}, {\"Node\": {\"Node\": \"rabbit1\", \"Address\": \"10.20.16.159\"}, \"Checks\": [{\"Node\": \"rabbit1\", \"CheckID\": \"service:rabbitmq\", \"Name\": \"Service \'rabbitmq\' check\", \"ServiceName\": \"rabbitmq\", \"Notes\": \"Connect to the port internally every 30 seconds\", \"Status\": \"passing\", \"ServiceID\": \"rabbitmq\", \"Output\": \"\"}, {\"Node\": \"rabbit1\", \"CheckID\": \"serfHealth\", \"Name\": \"Serf Health Status\", \"ServiceName\": \"\", \"Notes\": \"\", \"Status\": \"passing\", \"ServiceID\": \"\", \"Output\": \"Agent alive and reachable\"}], \"Service\": {\"Address\": \"\", \"Port\": 5672, \"ID\": \"rabbitmq\", \"Service\": \"rabbitmq\", \"Tags\": [\"amqp\"]}}]", @@ -378,7 +418,7 @@ list_nodes_return_value_long_node_name_and_custom_domain_test(_Config) -> meck:expect(rabbit_nodes, name_type, fun() -> longnames end), - ?assertEqual({ok, {['rabbit@rabbit1.node.internal', 'rabbit@rabbit2.node.internal'], disc}}, + ?assertEqual({ok, {'rabbit@rabbit2.node.internal', disc}}, rabbit_peer_discovery_consul:list_nodes()), ?assert(meck:validate(rabbit_peer_discovery_httpc)). @@ -391,12 +431,21 @@ list_nodes_return_value_srv_address_test(_Config) -> {consul_port, 8500} ]} ]), + meck:expect(rabbit_peer_discovery_httpc, put, + fun + (_, _, _, "v1/session/create", _, _, _, _) -> + Body = "{\"ID\":\"some-session-id\"}", + rabbit_json:try_decode(rabbit_data_coercion:to_binary(Body)); + (_, _, _, "v1/kv/rabbitmq/default/startup_lock", _, _, _, _) -> + Body = "true", + rabbit_json:try_decode(rabbit_data_coercion:to_binary(Body)) + end), meck:expect(rabbit_peer_discovery_httpc, get, fun(_, _, _, _, _, _, _) -> Body = "[{\"Node\": {\"Node\": \"rabbit2.internal.domain\", \"Address\": \"10.20.16.160\"}, \"Checks\": [{\"Node\": \"rabbit2.internal.domain\", \"CheckID\": \"service:rabbitmq\", \"Name\": \"Service \'rabbitmq\' check\", \"ServiceName\": \"rabbitmq\", \"Notes\": \"Connect to the port internally every 30 seconds\", \"Status\": \"passing\", \"ServiceID\": \"rabbitmq:172.172.16.4.50\", \"Output\": \"\"}, {\"Node\": \"rabbit2.internal.domain\", \"CheckID\": \"serfHealth\", \"Name\": \"Serf Health Status\", \"ServiceName\": \"\", \"Notes\": \"\", \"Status\": \"passing\", \"ServiceID\": \"\", \"Output\": \"Agent alive and reachable\"}], \"Service\": {\"Address\": \"172.16.4.51\", \"Port\": 5672, \"ID\": \"rabbitmq:172.16.4.51\", \"Service\": \"rabbitmq\", \"Tags\": [\"amqp\"]}}, {\"Node\": {\"Node\": \"rabbit1.internal.domain\", \"Address\": \"10.20.16.159\"}, \"Checks\": [{\"Node\": \"rabbit1.internal.domain\", \"CheckID\": \"service:rabbitmq\", \"Name\": \"Service \'rabbitmq\' check\", \"ServiceName\": \"rabbitmq\", \"Notes\": \"Connect to the port internally every 30 seconds\", \"Status\": \"passing\", \"ServiceID\": \"rabbitmq\", \"Output\": \"\"}, {\"Node\": \"rabbit1.internal.domain\", \"CheckID\": \"serfHealth\", \"Name\": \"Serf Health Status\", \"ServiceName\": \"\", \"Notes\": \"\", \"Status\": \"passing\", \"ServiceID\": \"\", \"Output\": \"Agent alive and reachable\"}], \"Service\": {\"Address\": \"172.172.16.51\", \"Port\": 5672, \"ID\": \"rabbitmq:172.172.16.51\", \"Service\": \"rabbitmq\", \"Tags\": [\"amqp\"]}}]", rabbit_json:try_decode(rabbit_data_coercion:to_binary(Body)) end), - ?assertEqual({ok, {['rabbit@172.16.4.51', 'rabbit@172.172.16.51'], disc}}, + ?assertEqual({ok, {'rabbit@172.16.4.51', disc}}, rabbit_peer_discovery_consul:list_nodes()), ?assert(meck:validate(rabbit_peer_discovery_httpc)). @@ -409,6 +458,15 @@ list_nodes_return_value_nodes_in_warning_state_included_test(_Config) -> {consul_port, 8500} ]} ]), + meck:expect(rabbit_peer_discovery_httpc, put, + fun + (_, _, _, "v1/session/create", _, _, _, _) -> + Body = "{\"ID\":\"some-session-id\"}", + rabbit_json:try_decode(rabbit_data_coercion:to_binary(Body)); + (_, _, _, "v1/kv/rabbitmq/default/startup_lock", _, _, _, _) -> + Body = "true", + rabbit_json:try_decode(rabbit_data_coercion:to_binary(Body)) + end), meck:expect(rabbit_peer_discovery_httpc, get, fun(_, _, _, _, [], _, _) -> rabbit_json:try_decode(list_of_nodes_with_warnings()); @@ -416,7 +474,7 @@ list_nodes_return_value_nodes_in_warning_state_included_test(_Config) -> rabbit_json:try_decode(list_of_nodes_without_warnings()) end), os:putenv("CONSUL_INCLUDE_NODES_WITH_WARNINGS", "true"), - ?assertEqual({ok, {['rabbit@172.16.4.51'], disc}}, + ?assertEqual({ok, {'rabbit@172.16.4.51', disc}}, rabbit_peer_discovery_consul:list_nodes()), ?assert(meck:validate(rabbit_peer_discovery_httpc)). @@ -429,6 +487,15 @@ list_nodes_return_value_nodes_in_warning_state_filtered_out_test(_Config) -> {consul_port, 8500} ]} ]), + meck:expect(rabbit_peer_discovery_httpc, put, + fun + (_, _, _, "v1/session/create", _, _, _, _) -> + Body = "{\"ID\":\"some-session-id\"}", + rabbit_json:try_decode(rabbit_data_coercion:to_binary(Body)); + (_, _, _, "v1/kv/rabbitmq/default/startup_lock", _, _, _, _) -> + Body = "true", + rabbit_json:try_decode(rabbit_data_coercion:to_binary(Body)) + end), meck:expect(rabbit_peer_discovery_httpc, get, fun(_, _, _, _, [], _, _) -> rabbit_json:try_decode(list_of_nodes_with_warnings()); @@ -436,7 +503,7 @@ list_nodes_return_value_nodes_in_warning_state_filtered_out_test(_Config) -> rabbit_json:try_decode(list_of_nodes_without_warnings()) end), os:putenv("CONSUL_INCLUDE_NODES_WITH_WARNINGS", "false"), - ?assertEqual({ok, {['rabbit@172.16.4.51', 'rabbit@172.172.16.51'], disc}}, + ?assertEqual({ok, {'rabbit@172.16.4.51', disc}}, rabbit_peer_discovery_consul:list_nodes()), ?assert(meck:validate(rabbit_peer_discovery_httpc)). @@ -450,7 +517,8 @@ registration_with_all_default_values_test(_Config) -> ?assertEqual("v1/agent/service/register", Path), ?assertEqual([], Args), ?assertEqual([], Headers), - Expect = <<"{\"ID\":\"rabbitmq\",\"Name\":\"rabbitmq\",\"Port\":5672,\"Check\":{\"Notes\":\"RabbitMQ Consul-based peer discovery plugin TTL check\",\"TTL\":\"30s\",\"Status\":\"passing\"}}">>, + Node = atom_to_binary(node()), + Expect = <<"{\"ID\":\"rabbitmq\",\"Name\":\"rabbitmq\",\"Port\":5672,\"Check\":{\"Notes\":\"RabbitMQ Consul-based peer discovery plugin TTL check\",\"TTL\":\"30s\",\"Status\":\"passing\"},\"Meta\":{\"erlang-node-name\":\"", Node/binary, "\"}}">>, ?assertEqual(Expect, Body), {ok, []} end), @@ -467,7 +535,8 @@ registration_with_cluster_name_test(_Config) -> ?assertEqual("v1/agent/service/register", Path), ?assertEqual([], Args), ?assertEqual([], Headers), - Expect = <<"{\"ID\":\"rabbitmq\",\"Name\":\"rabbitmq\",\"Port\":5672,\"Check\":{\"Notes\":\"RabbitMQ Consul-based peer discovery plugin TTL check\",\"TTL\":\"30s\",\"Status\":\"passing\"},\"Tags\":[\"test-rabbit\"]}">>, + Node = atom_to_binary(node()), + Expect = <<"{\"ID\":\"rabbitmq\",\"Name\":\"rabbitmq\",\"Port\":5672,\"Check\":{\"Notes\":\"RabbitMQ Consul-based peer discovery plugin TTL check\",\"TTL\":\"30s\",\"Status\":\"passing\"},\"Tags\":[\"test-rabbit\"],\"Meta\":{\"cluster\":\"test-rabbit\",\"erlang-node-name\":\"", Node/binary, "\"}}">>, ?assertEqual(Expect, Body), {ok, []} end), @@ -484,7 +553,8 @@ registration_without_acl_token_test(_Config) -> ?assertEqual("v1/agent/service/register", Path), ?assertEqual([], Args), ?assertEqual([], Headers), - Expect = <<"{\"ID\":\"rabbit:10.0.0.1\",\"Name\":\"rabbit\",\"Address\":\"10.0.0.1\",\"Port\":5671,\"Check\":{\"Notes\":\"RabbitMQ Consul-based peer discovery plugin TTL check\",\"TTL\":\"30s\",\"Status\":\"passing\"}}">>, + Node = atom_to_binary(node()), + Expect = <<"{\"ID\":\"rabbit:10.0.0.1\",\"Name\":\"rabbit\",\"Address\":\"10.0.0.1\",\"Port\":5671,\"Check\":{\"Notes\":\"RabbitMQ Consul-based peer discovery plugin TTL check\",\"TTL\":\"30s\",\"Status\":\"passing\"},\"Meta\":{\"erlang-node-name\":\"", Node/binary, "\"}}">>, ?assertEqual(Expect, Body), {ok, []} end), @@ -506,7 +576,8 @@ registration_with_acl_token_test(_Config) -> ?assertEqual("v1/agent/service/register", Path), ?assertEqual([], Args), ?assertEqual([], Headers), - Expect = <<"{\"ID\":\"rabbit:10.0.0.1\",\"Name\":\"rabbit\",\"Address\":\"10.0.0.1\",\"Port\":5671,\"Check\":{\"Notes\":\"RabbitMQ Consul-based peer discovery plugin TTL check\",\"TTL\":\"30s\",\"Status\":\"passing\"}}">>, + Node = atom_to_binary(node()), + Expect = <<"{\"ID\":\"rabbit:10.0.0.1\",\"Name\":\"rabbit\",\"Address\":\"10.0.0.1\",\"Port\":5671,\"Check\":{\"Notes\":\"RabbitMQ Consul-based peer discovery plugin TTL check\",\"TTL\":\"30s\",\"Status\":\"passing\"},\"Meta\":{\"erlang-node-name\":\"", Node/binary, "\"}}">>, ?assertEqual(Expect, Body), {ok, []} end), @@ -531,7 +602,8 @@ registration_with_auto_addr_test(_Config) -> ?assertEqual("v1/agent/service/register", Path), ?assertEqual([], Args), ?assertEqual([{"X-Consul-Token", "token-value"}], Headers), - Expect = <<"{\"ID\":\"rabbitmq:bob\",\"Name\":\"rabbitmq\",\"Address\":\"bob\",\"Port\":5672,\"Check\":{\"Notes\":\"RabbitMQ Consul-based peer discovery plugin TTL check\",\"TTL\":\"30s\",\"Status\":\"passing\"}}">>, + Node = atom_to_binary(node()), + Expect = <<"{\"ID\":\"rabbitmq:bob\",\"Name\":\"rabbitmq\",\"Address\":\"bob\",\"Port\":5672,\"Check\":{\"Notes\":\"RabbitMQ Consul-based peer discovery plugin TTL check\",\"TTL\":\"30s\",\"Status\":\"passing\"},\"Meta\":{\"erlang-node-name\":\"", Node/binary, "\"}}">>, ?assertEqual(Expect, Body), {ok, []} end), @@ -555,7 +627,8 @@ registration_with_auto_addr_from_nodename_test(_Config) -> ?assertEqual("v1/agent/service/register", Path), ?assertEqual([], Args), ?assertEqual([{"X-Consul-Token", "token-value"}], Headers), - Expect = <<"{\"ID\":\"rabbitmq:bob.consul.node\",\"Name\":\"rabbitmq\",\"Address\":\"bob.consul.node\",\"Port\":5672,\"Check\":{\"Notes\":\"RabbitMQ Consul-based peer discovery plugin TTL check\",\"TTL\":\"30s\",\"Status\":\"passing\"}}">>, + Node = atom_to_binary(node()), + Expect = <<"{\"ID\":\"rabbitmq:bob.consul.node\",\"Name\":\"rabbitmq\",\"Address\":\"bob.consul.node\",\"Port\":5672,\"Check\":{\"Notes\":\"RabbitMQ Consul-based peer discovery plugin TTL check\",\"TTL\":\"30s\",\"Status\":\"passing\"},\"Meta\":{\"erlang-node-name\":\"", Node/binary, "\"}}">>, ?assertEqual(Expect, Body), {ok, []} end), @@ -583,7 +656,8 @@ registration_with_auto_addr_nic_test(_Config) -> ?assertEqual("v1/agent/service/register", Path), ?assertEqual([], Args), ?assertEqual([{"X-Consul-Token", "token-value"}], Headers), - Expect = <<"{\"ID\":\"rabbitmq:172.16.4.50\",\"Name\":\"rabbitmq\",\"Address\":\"172.16.4.50\",\"Port\":5672,\"Check\":{\"Notes\":\"RabbitMQ Consul-based peer discovery plugin TTL check\",\"TTL\":\"30s\",\"Status\":\"passing\"}}">>, + Node = atom_to_binary(node()), + Expect = <<"{\"ID\":\"rabbitmq:172.16.4.50\",\"Name\":\"rabbitmq\",\"Address\":\"172.16.4.50\",\"Port\":5672,\"Check\":{\"Notes\":\"RabbitMQ Consul-based peer discovery plugin TTL check\",\"TTL\":\"30s\",\"Status\":\"passing\"},\"Meta\":{\"erlang-node-name\":\"", Node/binary, "\"}}">>, ?assertEqual(Expect, Body), {ok, []} end), @@ -611,7 +685,8 @@ registration_with_auto_addr_nic_issue_12_test(_Config) -> ?assertEqual("v1/agent/service/register", Path), ?assertEqual([], Args), ?assertEqual([{"X-Consul-Token", "token-value"}], Headers), - Expect = <<"{\"ID\":\"rabbitmq:172.16.4.50\",\"Name\":\"rabbitmq\",\"Address\":\"172.16.4.50\",\"Port\":5672,\"Check\":{\"Notes\":\"RabbitMQ Consul-based peer discovery plugin TTL check\",\"TTL\":\"30s\",\"Status\":\"passing\"}}">>, + Node = atom_to_binary(node()), + Expect = <<"{\"ID\":\"rabbitmq:172.16.4.50\",\"Name\":\"rabbitmq\",\"Address\":\"172.16.4.50\",\"Port\":5672,\"Check\":{\"Notes\":\"RabbitMQ Consul-based peer discovery plugin TTL check\",\"TTL\":\"30s\",\"Status\":\"passing\"},\"Meta\":{\"erlang-node-name\":\"", Node/binary, "\"}}">>, ?assertEqual(Expect, Body), {ok, []} end), diff --git a/deps/rabbitmq_peer_discovery_consul/test/system_SUITE.erl b/deps/rabbitmq_peer_discovery_consul/test/system_SUITE.erl new file mode 100644 index 000000000000..99080862509c --- /dev/null +++ b/deps/rabbitmq_peer_discovery_consul/test/system_SUITE.erl @@ -0,0 +1,317 @@ +%% This Source Code Form is subject to the terms of the Mozilla Public +%% License, v. 2.0. If a copy of the MPL was not distributed with this +%% file, You can obtain one at https://mozilla.org/MPL/2.0/. +%% +%% Copyright (c) 2024 Broadcom. The term “Broadcom” refers to Broadcom Inc. +%% and/or its subsidiaries. All rights reserved. All rights reserved. +%% + +-module(system_SUITE). + +-include_lib("common_test/include/ct.hrl"). +-include_lib("eunit/include/eunit.hrl"). + +-export([all/0, + groups/0, + init_per_suite/1, + end_per_suite/1, + init_per_group/2, + end_per_group/2, + init_per_testcase/2, + end_per_testcase/2, + + start_one_member_at_a_time/1, + start_members_concurrently/1]). + +-define(CONSUL_GIT_REPO, "https://github.com/hashicorp/consul.git"). +-define(CONSUL_GIT_REF, "v1.18.1"). + +all() -> + [ + {group, clustering} + ]. + +groups() -> + [ + {clustering, [], [start_one_member_at_a_time, + start_members_concurrently]} + ]. + +init_per_suite(Config) -> + rabbit_ct_helpers:log_environment(), + rabbit_ct_helpers:run_setup_steps( + Config, + [fun clone_consul/1, + fun compile_consul/1, + fun config_consul/1, + fun start_consul/1]). + +end_per_suite(Config) -> + rabbit_ct_helpers:run_teardown_steps(Config, [fun stop_consul/1]). + +init_per_group(clustering, Config) -> + rabbit_ct_helpers:set_config( + Config, + [{rmq_nodes_count, 3}, + {rmq_nodes_clustered, false}]); +init_per_group(_Group, Config) -> + Config. + +end_per_group(_Group, Config) -> + Config. + +init_per_testcase(Testcase, Config) + when Testcase =:= start_one_member_at_a_time orelse + Testcase =:= start_members_concurrently -> + rabbit_ct_helpers:testcase_started(Config, Testcase), + TestNumber = rabbit_ct_helpers:testcase_number(Config, ?MODULE, Testcase), + ClusterSize = ?config(rmq_nodes_count, Config), + Config1 = rabbit_ct_helpers:set_config( + Config, + [{rmq_nodename_suffix, Testcase}, + {tcp_ports_base, {skip_n_nodes, + TestNumber * ClusterSize}} + ]), + Config2 = rabbit_ct_helpers:merge_app_env( + Config1, {rabbit, [{log, [{file, [{level, debug}]}]}]}), + Config3 = rabbit_ct_helpers:run_steps( + Config2, + rabbit_ct_broker_helpers:setup_steps() ++ + rabbit_ct_client_helpers:setup_steps()), + case Config3 of + _ when is_list(Config3) -> + try + _ = rabbit_ct_broker_helpers:rpc_all( + Config3, rabbit_peer_discovery_backend, api_version, []), + Config3 + catch + error:{exception, undef, + [{rabbit_peer_discovery_backend, api_version, _, _} + | _]} -> + Config4 = rabbit_ct_helpers:run_steps( + Config3, + rabbit_ct_client_helpers:teardown_steps() ++ + rabbit_ct_broker_helpers:teardown_steps()), + rabbit_ct_helpers:testcase_finished(Config4, Testcase), + {skip, + "Some nodes use the old discover->register order; " + "the testcase would likely fail"} + end; + {skip, _} -> + Config3 + end; +init_per_testcase(_Testcase, Config) -> + Config. + +end_per_testcase(Testcase, Config) + when Testcase =:= start_one_member_at_a_time orelse + Testcase =:= start_members_concurrently -> + Config1 = rabbit_ct_helpers:run_steps( + Config, + rabbit_ct_client_helpers:teardown_steps() ++ + rabbit_ct_broker_helpers:teardown_steps()), + rabbit_ct_helpers:testcase_finished(Config1, Testcase); +end_per_testcase(_Testcase, Config) -> + Config. + +clone_consul(Config) -> + DataDir = ?config(data_dir, Config), + ConsulSrcdir = filename:join(DataDir, "consul"), + Cmd = case filelib:is_dir(ConsulSrcdir) of + true -> + ct:pal( + "Checking out Consul Git reference, ref = ~s", + [?CONSUL_GIT_REF]), + ["git", "-C", ConsulSrcdir, + "checkout", ?CONSUL_GIT_REF]; + false -> + ct:pal( + "Cloning Consul Git repository, ref = ~s", + [?CONSUL_GIT_REF]), + ["git", "clone", + "--branch", ?CONSUL_GIT_REF, + ?CONSUL_GIT_REPO, ConsulSrcdir] + end, + case rabbit_ct_helpers:exec(Cmd) of + {ok, _} -> + rabbit_ct_helpers:set_config( + Config, {consul_srcdir, ConsulSrcdir}); + {error, _} -> + {skip, "Failed to clone Consul"} + end. + +compile_consul(Config) -> + ConsulSrcdir = ?config(consul_srcdir, Config), + ct:pal("Compiling Consul in ~ts", [ConsulSrcdir]), + Cmd = ["go", "install"], + GOPATH = filename:join(ConsulSrcdir, "go"), + GOFLAGS = "-modcacherw", + Options = [{cd, ConsulSrcdir}, + {env, [{"BINDIR", false}, + {"GOPATH", GOPATH}, + {"GOFLAGS", GOFLAGS}]}], + case rabbit_ct_helpers:exec(Cmd, Options) of + {ok, _} -> + ConsulExe = case os:type() of + {win32, _} -> "consul.exe"; + _ -> "consul" + end, + ConsulBin = filename:join([GOPATH, "bin", ConsulExe]), + ?assert(filelib:is_regular(ConsulBin)), + rabbit_ct_helpers:set_config(Config, {consul_bin, ConsulBin}); + {error, _} -> + {skip, "Failed to compile Consul"} + end. + +config_consul(Config) -> + DataDir = ?config(data_dir, Config), + PrivDir = ?config(priv_dir, Config), + ConsulConfDir = filename:join(PrivDir, "conf.consul"), + ConsulDataDir = filename:join(PrivDir, "data.consul"), + ConsulHost = "localhost", + ConsulTcpPort = 8500, + + ConsulConfTpl = filename:join(DataDir, "consul.hcl"), + {ok, ConsulConf0} = file:read_file(ConsulConfTpl), + ConsulConf1 = io_lib:format( + "~ts~n" + "node_name = \"~ts\"~n" + "domain = \"~ts\"~n" + "data_dir = \"~ts\"~n" + "ports {~n" + " http = ~b~n" + " grpc = -1~n" + "}~n", + [ConsulConf0, ConsulHost, ConsulHost, ConsulDataDir, + ConsulTcpPort]), + ConsulConfFile = filename:join(ConsulConfDir, "consul.hcl"), + ok = file:make_dir(ConsulConfDir), + ok = file:write_file(ConsulConfFile, ConsulConf1), + rabbit_ct_helpers:set_config( + Config, + [{consul_conf_dir, ConsulConfDir}, + {consul_host, ConsulHost}, + {consul_tcp_port, ConsulTcpPort}]). + +start_consul(Config) -> + ct:pal("Starting Consul daemon"), + ConsulBin = ?config(consul_bin, Config), + ConsulConfDir = ?config(consul_conf_dir, Config), + Cmd = [ConsulBin, "agent", "-config-dir", ConsulConfDir], + ConsulPid = spawn(fun() -> rabbit_ct_helpers:exec(Cmd) end), + rabbit_ct_helpers:set_config(Config, {consul_pid, ConsulPid}). + +stop_consul(Config) -> + case rabbit_ct_helpers:get_config(Config, consul_pid) of + ConsulPid when is_pid(ConsulPid) -> + ct:pal( + "Stopping Consul daemon by killing control process ~p", + [ConsulPid]), + erlang:exit(ConsulPid, kill), + _ = case os:type() of + {win32, _} -> ok; + _ -> rabbit_ct_helpers:exec(["pkill", "consul"]) + end; + undefined -> + ok + end, + Config. + +%% +%% Test cases +%% + +start_one_member_at_a_time(Config) -> + Config1 = configure_peer_discovery(Config), + + Nodes = rabbit_ct_broker_helpers:get_node_configs(Config1, nodename), + lists:foreach( + fun(Node) -> + ?assertEqual( + ok, + rabbit_ct_broker_helpers:start_node(Config1, Node)) + end, Nodes), + + assert_full_cluster(Config1). + +start_members_concurrently(Config) -> + Config1 = configure_peer_discovery(Config), + + Nodes = rabbit_ct_broker_helpers:get_node_configs(Config1, nodename), + Parent = self(), + Pids = lists:map( + fun(Node) -> + spawn_link( + fun() -> + receive + go -> + ?assertEqual( + ok, + rabbit_ct_broker_helpers:start_node( + Config1, Node)), + Parent ! started + end + end) + end, Nodes), + + lists:foreach(fun(Pid) -> Pid ! go end, Pids), + lists:foreach(fun(_Pid) -> receive started -> ok end end, Pids), + + assert_full_cluster(Config1). + +configure_peer_discovery(Config) -> + Nodes = rabbit_ct_broker_helpers:get_node_configs(Config, nodename), + lists:foreach( + fun(Node) -> + Members = lists:sort( + rabbit_ct_broker_helpers:cluster_members_online( + Config, Node)), + ?assertEqual([Node], Members) + end, Nodes), + + lists:foreach( + fun(Node) -> + ?assertEqual( + ok, + rabbit_ct_broker_helpers:stop_broker(Config, Node)), + ?assertEqual( + ok, + rabbit_ct_broker_helpers:reset_node(Config, Node)), + ?assertEqual( + ok, + rabbit_ct_broker_helpers:stop_node(Config, Node)) + end, Nodes), + + ConsulHost = ?config(consul_host, Config), + ConsulTcpPort = ?config(consul_tcp_port, Config), + lists:foreach( + fun(Node) -> + Config1 = rabbit_ct_helpers:merge_app_env( + Config, + {rabbit, + [{cluster_formation, + [{peer_discovery_backend, + rabbit_peer_discovery_consul}, + {peer_discovery_consul, + [{consul_svc_id, atom_to_list(Node)}, + {consul_host, ConsulHost}, + {consul_port, ConsulTcpPort}, + {consul_scheme, "http"}]}]}]}), + ?assertEqual( + ok, + rabbit_ct_broker_helpers:rewrite_node_config_file( + Config1, Node)) + end, Nodes), + + Config. + +assert_full_cluster(Config) -> + Nodes = rabbit_ct_broker_helpers:get_node_configs(Config, nodename), + ExpectedMembers = lists:sort(Nodes), + lists:foreach( + fun(Node) -> + Members = lists:sort( + rabbit_ct_broker_helpers:cluster_members_online( + Config, Node)), + ?assertEqual(ExpectedMembers, Members) + end, Nodes). diff --git a/deps/rabbitmq_peer_discovery_consul/test/system_SUITE_data/.gitignore b/deps/rabbitmq_peer_discovery_consul/test/system_SUITE_data/.gitignore new file mode 100644 index 000000000000..75ab1a997deb --- /dev/null +++ b/deps/rabbitmq_peer_discovery_consul/test/system_SUITE_data/.gitignore @@ -0,0 +1 @@ +/consul/ diff --git a/deps/rabbitmq_peer_discovery_consul/test/system_SUITE_data/consul.hcl b/deps/rabbitmq_peer_discovery_consul/test/system_SUITE_data/consul.hcl new file mode 100644 index 000000000000..4a850633c427 --- /dev/null +++ b/deps/rabbitmq_peer_discovery_consul/test/system_SUITE_data/consul.hcl @@ -0,0 +1,31 @@ +log_level = "DEBUG" +enable_syslog = false +enable_script_checks = false +enable_local_script_checks = true + +datacenter = "dc1" +server = true +bootstrap_expect = 1 + +## ACL configuration +acl = { + enabled = true + default_policy = "allow" + enable_token_persistence = true + enable_token_replication = true + down_policy = "extend-cache" +} + +# Enable service mesh +connect { + enabled = true +} + +# Addresses and ports +client_addr = "0.0.0.0" +bind_addr = "{{ GetInterfaceIP \"eth0\" }}" + +addresses { + grpc = "0.0.0.0" + http = "0.0.0.0" +} diff --git a/deps/rabbitmq_peer_discovery_etcd/.gitignore b/deps/rabbitmq_peer_discovery_etcd/.gitignore index 018293a553b3..8221ed949476 100644 --- a/deps/rabbitmq_peer_discovery_etcd/.gitignore +++ b/deps/rabbitmq_peer_discovery_etcd/.gitignore @@ -1,26 +1,2 @@ -*~ -.sw? -.*.sw? -*.beam -*.coverdata tags -/.erlang.mk/ -/cover/ -/deps/ -/doc/ -/ebin/ -/escript/ -/escript.lock -/git-revisions.txt -/logs/ -/plugins/ -/plugins.lock -/rebar.config -/rebar.lock -/sbin/ -/sbin.lock /test/config_schema_SUITE_data/schema/ -/test/ct.cover.spec -/xrefr - -/rabbitmq_peer_discovery_etcd.d diff --git a/deps/rabbitmq_peer_discovery_etcd/BUILD.bazel b/deps/rabbitmq_peer_discovery_etcd/BUILD.bazel index d36795154044..eea80562a689 100644 --- a/deps/rabbitmq_peer_discovery_etcd/BUILD.bazel +++ b/deps/rabbitmq_peer_discovery_etcd/BUILD.bazel @@ -99,7 +99,7 @@ rabbitmq_integration_suite( rabbitmq_integration_suite( name = "system_SUITE", - size = "medium", + size = "large", ) rabbitmq_suite( diff --git a/deps/rabbitmq_peer_discovery_etcd/README.md b/deps/rabbitmq_peer_discovery_etcd/README.md index 4183557bddb8..6b743db724b9 100644 --- a/deps/rabbitmq_peer_discovery_etcd/README.md +++ b/deps/rabbitmq_peer_discovery_etcd/README.md @@ -54,4 +54,4 @@ See [CONTRIBUTING.md](./CONTRIBUTING.md) and our [development process overview]( ## Copyright -(c) 2007-2020 VMware, Inc. or its affiliates. +(c) 2007-2024 Broadcom. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. diff --git a/deps/rabbitmq_peer_discovery_etcd/include/rabbit_peer_discovery_etcd.hrl b/deps/rabbitmq_peer_discovery_etcd/include/rabbit_peer_discovery_etcd.hrl index 20455ef5ab0e..2f4d48901d58 100644 --- a/deps/rabbitmq_peer_discovery_etcd/include/rabbit_peer_discovery_etcd.hrl +++ b/deps/rabbitmq_peer_discovery_etcd/include/rabbit_peer_discovery_etcd.hrl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2020-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -define(BACKEND_CONFIG_KEY, peer_discovery_etcd). diff --git a/deps/rabbitmq_peer_discovery_etcd/priv/schema/rabbitmq_peer_discovery_etcd.schema b/deps/rabbitmq_peer_discovery_etcd/priv/schema/rabbitmq_peer_discovery_etcd.schema index 3a2c3f93992f..20bfc5fd7a12 100644 --- a/deps/rabbitmq_peer_discovery_etcd/priv/schema/rabbitmq_peer_discovery_etcd.schema +++ b/deps/rabbitmq_peer_discovery_etcd/priv/schema/rabbitmq_peer_discovery_etcd.schema @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% %% Endpoints diff --git a/deps/rabbitmq_peer_discovery_etcd/src/rabbit_peer_discovery_etcd.erl b/deps/rabbitmq_peer_discovery_etcd/src/rabbit_peer_discovery_etcd.erl index 3d5fbf80bfbf..6ce7fdbf5e02 100644 --- a/deps/rabbitmq_peer_discovery_etcd/src/rabbit_peer_discovery_etcd.erl +++ b/deps/rabbitmq_peer_discovery_etcd/src/rabbit_peer_discovery_etcd.erl @@ -4,13 +4,12 @@ %% %% The Initial Developer of the Original Code is AWeber Communications. %% Copyright (c) 2015-2016 AWeber Communications -%% Copyright (c) 2016-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. All rights reserved. %% -module(rabbit_peer_discovery_etcd). -behaviour(rabbit_peer_discovery_backend). --include_lib("rabbit_common/include/rabbit.hrl"). -include_lib("rabbitmq_peer_discovery_common/include/rabbit_peer_discovery.hrl"). -include("rabbit_peer_discovery_etcd.hrl"). @@ -60,9 +59,13 @@ list_nodes() -> {ok, {[], disc}} end, Fun2 = fun(_Proplist) -> - %% error logging will be done by the client - Nodes = rabbitmq_peer_discovery_etcd_v3_client:list_nodes(), - {ok, {Nodes, disc}} + %% nodes are returned sorted with the create_revision as + %% the first element in the tuple. + %% The node with the lowest create_revision is thus selected + %% based on the assumption that the create_revision remains + %% consistent throughout the lifetime of the etcd key. + [{_, Node} | _] = rabbitmq_peer_discovery_etcd_v3_client:list_nodes(), + {ok, {Node, disc}} end, rabbit_peer_discovery_util:maybe_backend_configured(?BACKEND_CONFIG_KEY, Fun0, Fun1, Fun2). @@ -93,9 +96,11 @@ unregister() -> post_registration() -> ok. --spec lock(Node :: atom()) -> {ok, Data :: term()} | {error, Reason :: string()}. +-spec lock(Nodes :: [node()]) -> + {ok, Data :: term()} | {error, Reason :: string()}. -lock(Node) when is_atom(Node) -> +lock(Nodes) when is_list(Nodes) -> + Node = node(), case rabbitmq_peer_discovery_etcd_v3_client:lock(Node) of {ok, GeneratedKey} -> {ok, GeneratedKey}; {error, _} = Error -> Error diff --git a/deps/rabbitmq_peer_discovery_etcd/src/rabbitmq_peer_discovery_etcd.erl b/deps/rabbitmq_peer_discovery_etcd/src/rabbitmq_peer_discovery_etcd.erl index 603213cf57f3..ae788a6f4a31 100644 --- a/deps/rabbitmq_peer_discovery_etcd/src/rabbitmq_peer_discovery_etcd.erl +++ b/deps/rabbitmq_peer_discovery_etcd/src/rabbitmq_peer_discovery_etcd.erl @@ -4,7 +4,7 @@ %% %% The Initial Developer of the Original Code is AWeber Communications. %% Copyright (c) 2015-2016 AWeber Communications -%% Copyright (c) 2016-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. All rights reserved. %% -module(rabbitmq_peer_discovery_etcd). @@ -44,7 +44,7 @@ unregister() -> post_registration() -> ?DELEGATE:post_registration(). --spec lock(Node :: atom()) -> {'ok', term()} | {'error', string()}. +-spec lock(Nodes :: [node()]) -> {'ok', term()} | {'error', string()}. lock(Node) -> ?DELEGATE:lock(Node). diff --git a/deps/rabbitmq_peer_discovery_etcd/src/rabbitmq_peer_discovery_etcd_app.erl b/deps/rabbitmq_peer_discovery_etcd/src/rabbitmq_peer_discovery_etcd_app.erl index dbc48cf129d0..9267c6f0d942 100644 --- a/deps/rabbitmq_peer_discovery_etcd/src/rabbitmq_peer_discovery_etcd_app.erl +++ b/deps/rabbitmq_peer_discovery_etcd/src/rabbitmq_peer_discovery_etcd_app.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbitmq_peer_discovery_etcd_app). diff --git a/deps/rabbitmq_peer_discovery_etcd/src/rabbitmq_peer_discovery_etcd_sup.erl b/deps/rabbitmq_peer_discovery_etcd/src/rabbitmq_peer_discovery_etcd_sup.erl index 190d75ed29fe..d80f8db6c102 100644 --- a/deps/rabbitmq_peer_discovery_etcd/src/rabbitmq_peer_discovery_etcd_sup.erl +++ b/deps/rabbitmq_peer_discovery_etcd/src/rabbitmq_peer_discovery_etcd_sup.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbitmq_peer_discovery_etcd_sup). diff --git a/deps/rabbitmq_peer_discovery_etcd/src/rabbitmq_peer_discovery_etcd_v3_client.erl b/deps/rabbitmq_peer_discovery_etcd/src/rabbitmq_peer_discovery_etcd_v3_client.erl index b18d556425e8..9a0fc9da426f 100644 --- a/deps/rabbitmq_peer_discovery_etcd/src/rabbitmq_peer_discovery_etcd_v3_client.erl +++ b/deps/rabbitmq_peer_discovery_etcd/src/rabbitmq_peer_discovery_etcd_v3_client.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbitmq_peer_discovery_etcd_v3_client). @@ -230,16 +230,13 @@ connected({call, From}, list_keys, Data = #statem_data{connection_name = Conn}) rabbit_log:debug("etcd peer discovery: will use prefix ~ts to query for node keys", [Prefix]), {ok, #{kvs := Result}} = eetcd_kv:get(C2), rabbit_log:debug("etcd peer discovery returned keys: ~tp", [Result]), - Values = [maps:get(value, M) || M <- Result], - rabbit_log:debug("etcd peer discovery: listing node keys returned ~b results", [length(Values)]), - ParsedNodes = lists:map(fun extract_node/1, Values), - {Successes, Failures} = lists:partition(fun filter_node/1, ParsedNodes), - JoinedString = lists:join(",", [rabbit_data_coercion:to_list(Node) || Node <- lists:usort(Successes)]), - rabbit_log:error("etcd peer discovery: successfully extracted nodes: ~ts", [JoinedString]), - lists:foreach(fun(Val) -> - rabbit_log:error("etcd peer discovery: failed to extract node name from etcd value ~tp", [Val]) - end, Failures), - gen_statem:reply(From, lists:usort(Successes)), + Values = [{maps:get(create_revision, M), maps:get(value, M)} || M <- Result], + rabbit_log:debug("etcd peer discovery: listing node keys returned ~b results", + [length(Values)]), + ParsedNodes = lists:filtermap(fun extract_node/1, Values), + rabbit_log:info("etcd peer discovery: successfully extracted nodes: ~0tp", + [ParsedNodes]), + gen_statem:reply(From, lists:usort(ParsedNodes)), keep_state_and_data. @@ -298,15 +295,18 @@ registration_value(#statem_data{node_key_lease_id = LeaseID, node_key_ttl_in_sec <<"ttl">> => TTL })). --spec extract_node(binary()) -> atom() | {error, any()}. - -extract_node(Payload) -> +extract_node({CreatedRev, Payload}) -> case rabbit_json:try_decode(Payload) of - {error, Error} -> {error, Error}; + {error, _Error} -> + rabbit_log:error("etcd peer discovery: failed to extract node name from etcd value ~tp", + [Payload]), + false; {ok, Map} -> case maps:get(<<"node">>, Map, undefined) of - undefined -> undefined; - Node -> rabbit_data_coercion:to_atom(Node) + undefined -> + false; + Node -> + {true, {CreatedRev, rabbit_data_coercion:to_atom(Node)}} end end. diff --git a/deps/rabbitmq_peer_discovery_etcd/test/config_schema_SUITE.erl b/deps/rabbitmq_peer_discovery_etcd/test/config_schema_SUITE.erl index 53fb7ef8e4af..577e5acbbcbb 100644 --- a/deps/rabbitmq_peer_discovery_etcd/test/config_schema_SUITE.erl +++ b/deps/rabbitmq_peer_discovery_etcd/test/config_schema_SUITE.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(config_schema_SUITE). diff --git a/deps/rabbitmq_peer_discovery_etcd/test/config_schema_SUITE_data/rabbitmq_peer_discovery_etcd.snippets b/deps/rabbitmq_peer_discovery_etcd/test/config_schema_SUITE_data/rabbitmq_peer_discovery_etcd.snippets index f6e035fa79cf..e6af60ff14e0 100644 --- a/deps/rabbitmq_peer_discovery_etcd/test/config_schema_SUITE_data/rabbitmq_peer_discovery_etcd.snippets +++ b/deps/rabbitmq_peer_discovery_etcd/test/config_schema_SUITE_data/rabbitmq_peer_discovery_etcd.snippets @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% [ diff --git a/deps/rabbitmq_peer_discovery_etcd/test/system_SUITE.erl b/deps/rabbitmq_peer_discovery_etcd/test/system_SUITE.erl index b8184e66f7dc..5224ff4ebd96 100644 --- a/deps/rabbitmq_peer_discovery_etcd/test/system_SUITE.erl +++ b/deps/rabbitmq_peer_discovery_etcd/test/system_SUITE.erl @@ -4,24 +4,38 @@ %% %% The Initial Developer of the Original Code is AWeber Communications. %% Copyright (c) 2015-2016 AWeber Communications -%% Copyright (c) 2016-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. All rights reserved. %% -module(system_SUITE). --compile(export_all). - -include_lib("common_test/include/ct.hrl"). -include_lib("eunit/include/eunit.hrl"). -include("rabbit_peer_discovery_etcd.hrl"). --import(rabbit_data_coercion, [to_binary/1, to_integer/1]). +-define(ETCD_GIT_REPO, "https://github.com/etcd-io/etcd.git"). +-define(ETCD_GIT_REF, "v3.5.13"). + +-export([all/0, + groups/0, + init_per_suite/1, + end_per_suite/1, + init_per_group/2, + end_per_group/2, + init_per_testcase/2, + end_per_testcase/2, + etcd_connection_sanity_check_test/1, + init_opens_a_connection_test/1, + registration_with_locking_test/1, + start_one_member_at_a_time/1, + start_members_concurrently/1]). all() -> [ - {group, v3_client} + {group, v3_client}, + {group, clustering} ]. groups() -> @@ -30,47 +44,184 @@ groups() -> etcd_connection_sanity_check_test, init_opens_a_connection_test, registration_with_locking_test - ]} + ]}, + {clustering, [], [start_one_member_at_a_time, + start_members_concurrently]} ]. init_per_suite(Config) -> rabbit_ct_helpers:log_environment(), - rabbit_ct_helpers:run_setup_steps(Config, [fun init_etcd/1]). + rabbit_ct_helpers:run_setup_steps( + Config, + [fun clone_etcd/1, + fun compile_etcd/1, + fun start_etcd/1]). end_per_suite(Config) -> rabbit_ct_helpers:run_teardown_steps(Config, [fun stop_etcd/1]). -init_etcd(Config) -> +init_per_group(clustering, Config) -> + rabbit_ct_helpers:set_config( + Config, + [{rmq_nodes_count, 3}, + {rmq_nodes_clustered, false}]); +init_per_group(_Group, Config) -> + Config. + +end_per_group(_Group, Config) -> + Config. + +init_per_testcase(Testcase, Config) + when Testcase =:= start_one_member_at_a_time orelse + Testcase =:= start_members_concurrently -> + rabbit_ct_helpers:testcase_started(Config, Testcase), + TestNumber = rabbit_ct_helpers:testcase_number(Config, ?MODULE, Testcase), + ClusterSize = ?config(rmq_nodes_count, Config), + Config1 = rabbit_ct_helpers:set_config( + Config, + [{rmq_nodename_suffix, Testcase}, + {tcp_ports_base, {skip_n_nodes, + TestNumber * ClusterSize}} + ]), + Config2 = rabbit_ct_helpers:merge_app_env( + Config1, {rabbit, [{log, [{file, [{level, debug}]}]}]}), + Config3 = rabbit_ct_helpers:run_steps( + Config2, + rabbit_ct_broker_helpers:setup_steps() ++ + rabbit_ct_client_helpers:setup_steps()), + case Config3 of + _ when is_list(Config3) -> + try + _ = rabbit_ct_broker_helpers:rpc_all( + Config3, rabbit_peer_discovery_backend, api_version, []), + Config3 + catch + error:{exception, undef, + [{rabbit_peer_discovery_backend, api_version, _, _} + | _]} -> + Config4 = rabbit_ct_helpers:run_steps( + Config3, + rabbit_ct_client_helpers:teardown_steps() ++ + rabbit_ct_broker_helpers:teardown_steps()), + rabbit_ct_helpers:testcase_finished(Config4, Testcase), + {skip, + "Some nodes use the old discover->register order; " + "the testcase would likely fail"} + end; + {skip, _} -> + Config3 + end; +init_per_testcase(_Testcase, Config) -> + Config. + +end_per_testcase(Testcase, Config) + when Testcase =:= start_one_member_at_a_time orelse + Testcase =:= start_members_concurrently -> + Config1 = rabbit_ct_helpers:run_steps( + Config, + rabbit_ct_client_helpers:teardown_steps() ++ + rabbit_ct_broker_helpers:teardown_steps()), + rabbit_ct_helpers:testcase_finished(Config1, Testcase); +end_per_testcase(_Testcase, Config) -> + Config. + +clone_etcd(Config) -> DataDir = ?config(data_dir, Config), - PrivDir = ?config(priv_dir, Config), - TcpPort = 25389, - EtcdDir = filename:join([PrivDir, "etcd"]), - InitEtcd = filename:join([DataDir, "init-etcd.sh"]), - Cmd = [InitEtcd, EtcdDir, {"~b", [TcpPort]}], + EtcdSrcdir = filename:join(DataDir, "etcd"), + Cmd = case filelib:is_dir(EtcdSrcdir) of + true -> + ct:pal( + "Checking out etcd Git reference, ref = ~s", + [?ETCD_GIT_REF]), + ["git", "-C", EtcdSrcdir, + "checkout", ?ETCD_GIT_REF]; + false -> + ct:pal( + "Cloning etcd Git repository, ref = ~s", + [?ETCD_GIT_REF]), + ["git", "clone", + "--branch", ?ETCD_GIT_REF, + ?ETCD_GIT_REPO, EtcdSrcdir] + end, case rabbit_ct_helpers:exec(Cmd) of - {ok, Stdout} -> - case re:run(Stdout, "^ETCD_PID=([0-9]+)$", [{capture, all_but_first, list}, multiline]) of - {match, [EtcdPid]} -> - ct:pal(?LOW_IMPORTANCE, "etcd PID: ~ts~netcd is listening on: ~b", [EtcdPid, TcpPort]), - rabbit_ct_helpers:set_config(Config, [{etcd_pid, EtcdPid}, - {etcd_endpoints, [rabbit_misc:format("localhost:~tp", [TcpPort])]}, - {etcd_port, TcpPort}]); - nomatch -> - ct:pal(?HI_IMPORTANCE, "init-etcd.sh output did not match what's expected: ~tp", [Stdout]) - end; - {error, Code, Reason} -> - ct:pal(?HI_IMPORTANCE, "init-etcd.sh exited with code ~tp: ~tp", [Code, Reason]), - _ = rabbit_ct_helpers:exec(["pkill", "-INT", "etcd"]), - {skip, "Failed to initialize etcd"} + {ok, _} -> + rabbit_ct_helpers:set_config(Config, {etcd_srcdir, EtcdSrcdir}); + {error, _} -> + {skip, "Failed to clone etcd"} end. +compile_etcd(Config) -> + EtcdSrcdir = ?config(etcd_srcdir, Config), + ct:pal("Compiling etcd in ~ts", [EtcdSrcdir]), + Script0 = case os:type() of + {win32, _} -> "build.bat"; + _ -> "build.sh" + end, + Script1 = filename:join(EtcdSrcdir, Script0), + Cmd = [Script1], + GOPATH = filename:join(EtcdSrcdir, "go"), + GOFLAGS = "-modcacherw", + Options = [{cd, EtcdSrcdir}, + {env, [{"BINDIR", false}, + {"GOPATH", GOPATH}, + {"GOFLAGS", GOFLAGS}]}], + case rabbit_ct_helpers:exec(Cmd, Options) of + {ok, _} -> + EtcdExe = case os:type() of + {win32, _} -> "etcd.exe"; + _ -> "etcd" + end, + EtcdBin = filename:join([EtcdSrcdir, "bin", EtcdExe]), + ?assert(filelib:is_regular(EtcdBin)), + rabbit_ct_helpers:set_config(Config, {etcd_bin, EtcdBin}); + {error, _} -> + {skip, "Failed to compile etcd"} + end. + +start_etcd(Config) -> + ct:pal("Starting etcd daemon"), + EtcdBin = ?config(etcd_bin, Config), + PrivDir = ?config(priv_dir, Config), + EtcdDataDir = filename:join(PrivDir, "data.etcd"), + EtcdName = ?MODULE_STRING, + EtcdHost = "localhost", + EtcdClientPort = 2379, + EtcdClientUrl = rabbit_misc:format( + "http://~s:~b", [EtcdHost, EtcdClientPort]), + EtcdAdvPort = 2380, + EtcdAdvUrl = rabbit_misc:format( + "http://~s:~b", [EtcdHost, EtcdAdvPort]), + Cmd = [EtcdBin, + "--data-dir", EtcdDataDir, + "--name", EtcdName, + "--initial-advertise-peer-urls", EtcdAdvUrl, + "--listen-peer-urls", EtcdAdvUrl, + "--advertise-client-urls", EtcdClientUrl, + "--listen-client-urls", EtcdClientUrl, + "--initial-cluster", EtcdName ++ "=" ++ EtcdAdvUrl, + "--initial-cluster-state", "new", + "--initial-cluster-token", "test-token", + "--log-level", "debug", "--log-outputs", "stdout"], + EtcdPid = spawn(fun() -> rabbit_ct_helpers:exec(Cmd) end), + + EtcdEndpoint = rabbit_misc:format("~s:~b", [EtcdHost, EtcdClientPort]), + rabbit_ct_helpers:set_config( + Config, + [{etcd_pid, EtcdPid}, + {etcd_endpoints, [EtcdEndpoint]}]). + stop_etcd(Config) -> - EtcdPid = ?config(etcd_pid, Config), - Cmd = ["kill", "-INT", EtcdPid], - _ = rabbit_ct_helpers:exec(Cmd), + case rabbit_ct_helpers:get_config(Config, etcd_pid) of + EtcdPid when is_pid(EtcdPid) -> + ct:pal( + "Stopping etcd daemon by killing control process ~p", + [EtcdPid]), + erlang:exit(EtcdPid, kill); + undefined -> + ok + end, Config. - %% %% Test cases %% @@ -120,7 +271,12 @@ registration_with_locking_test(Config) -> ?assertEqual(ok, rabbitmq_peer_discovery_etcd_v3_client:unlock(Pid, LockOwnerKey)), Condition2 = fun() -> - [node()] =:= rabbitmq_peer_discovery_etcd_v3_client:list_nodes(Pid) + case rabbitmq_peer_discovery_etcd_v3_client:list_nodes(Pid) of + [{_, N}] when N =:= node() -> + true; + _ -> + false + end end, try rabbit_ct_helpers:await_condition(Condition2, 45000) @@ -128,6 +284,98 @@ registration_with_locking_test(Config) -> gen_statem:stop(Pid) end. +start_one_member_at_a_time(Config) -> + Config1 = configure_peer_discovery(Config), + + Nodes = rabbit_ct_broker_helpers:get_node_configs(Config1, nodename), + lists:foreach( + fun(Node) -> + ?assertEqual( + ok, + rabbit_ct_broker_helpers:start_node(Config1, Node)) + end, Nodes), + + assert_full_cluster(Config1). + +start_members_concurrently(Config) -> + Config1 = configure_peer_discovery(Config), + + Nodes = rabbit_ct_broker_helpers:get_node_configs(Config1, nodename), + Parent = self(), + Pids = lists:map( + fun(Node) -> + spawn_link( + fun() -> + receive + go -> + ?assertEqual( + ok, + rabbit_ct_broker_helpers:start_node( + Config1, Node)), + Parent ! started + end + end) + end, Nodes), + + lists:foreach(fun(Pid) -> Pid ! go end, Pids), + lists:foreach(fun(_Pid) -> receive started -> ok end end, Pids), + + assert_full_cluster(Config1). + +configure_peer_discovery(Config) -> + Nodes = rabbit_ct_broker_helpers:get_node_configs(Config, nodename), + lists:foreach( + fun(Node) -> + Members = lists:sort( + rabbit_ct_broker_helpers:cluster_members_online( + Config, Node)), + ?assertEqual([Node], Members) + end, Nodes), + + lists:foreach( + fun(Node) -> + ?assertEqual( + ok, + rabbit_ct_broker_helpers:stop_broker(Config, Node)), + ?assertEqual( + ok, + rabbit_ct_broker_helpers:reset_node(Config, Node)), + ?assertEqual( + ok, + rabbit_ct_broker_helpers:stop_node(Config, Node)) + end, Nodes), + + Endpoints = ?config(etcd_endpoints, Config), + Config1 = rabbit_ct_helpers:merge_app_env( + Config, + {rabbit, + [{cluster_formation, + [{peer_discovery_backend, rabbit_peer_discovery_etcd}, + {peer_discovery_etcd, + [{endpoints, Endpoints}, + {etcd_prefix, "rabbitmq"}, + {cluster_name, atom_to_list(?FUNCTION_NAME)}]}]}]}), + lists:foreach( + fun(Node) -> + ?assertEqual( + ok, + rabbit_ct_broker_helpers:rewrite_node_config_file( + Config1, Node)) + end, Nodes), + + Config1. + +assert_full_cluster(Config) -> + Nodes = rabbit_ct_broker_helpers:get_node_configs(Config, nodename), + ExpectedMembers = lists:sort(Nodes), + lists:foreach( + fun(Node) -> + Members = lists:sort( + rabbit_ct_broker_helpers:cluster_members_online( + Config, Node)), + ?assertEqual(ExpectedMembers, Members) + end, Nodes). + %% %% Helpers %% diff --git a/deps/rabbitmq_peer_discovery_etcd/test/system_SUITE_data/.gitignore b/deps/rabbitmq_peer_discovery_etcd/test/system_SUITE_data/.gitignore new file mode 100644 index 000000000000..e22d17a8cc03 --- /dev/null +++ b/deps/rabbitmq_peer_discovery_etcd/test/system_SUITE_data/.gitignore @@ -0,0 +1 @@ +/etcd/ diff --git a/deps/rabbitmq_peer_discovery_etcd/test/system_SUITE_data/init-etcd.sh b/deps/rabbitmq_peer_discovery_etcd/test/system_SUITE_data/init-etcd.sh index aca709260693..d9aacf83f3c2 100755 --- a/deps/rabbitmq_peer_discovery_etcd/test/system_SUITE_data/init-etcd.sh +++ b/deps/rabbitmq_peer_discovery_etcd/test/system_SUITE_data/init-etcd.sh @@ -5,7 +5,7 @@ # License, v. 2.0. If a copy of the MPL was not distributed with this # file, You can obtain one at https://mozilla.org/MPL/2.0/. # -# Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved. +# Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. # set -ex diff --git a/deps/rabbitmq_peer_discovery_etcd/test/unit_SUITE.erl b/deps/rabbitmq_peer_discovery_etcd/test/unit_SUITE.erl index 9f7ae7d56bcc..704991fd68d5 100644 --- a/deps/rabbitmq_peer_discovery_etcd/test/unit_SUITE.erl +++ b/deps/rabbitmq_peer_discovery_etcd/test/unit_SUITE.erl @@ -4,7 +4,7 @@ %% %% The Initial Developer of the Original Code is AWeber Communications. %% Copyright (c) 2015-2016 AWeber Communications -%% Copyright (c) 2016-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. All rights reserved. %% -module(unit_SUITE). @@ -55,10 +55,14 @@ registration_value_test(_Config) -> extract_nodes_case1_test(_Config) -> Input = registration_value_of(8488283859587364900, 61), Expected = node(), - - ?assertEqual(Expected, rabbitmq_peer_discovery_etcd_v3_client:extract_node(Input)), - - ?assertEqual(undefined, rabbitmq_peer_discovery_etcd_v3_client:extract_node(<<"{}">>)). + CreatedRev = ?LINE, + ?assertEqual({true, {CreatedRev, Expected}}, + rabbitmq_peer_discovery_etcd_v3_client:extract_node( + {CreatedRev, Input})), + + ?assertEqual(false, + rabbitmq_peer_discovery_etcd_v3_client:extract_node( + {CreatedRev, <<"{}">>})). filter_nodes_test(_Config) -> Input = [node(), undefined, undefined, {error, reason1}, {error, {another, reason}}], diff --git a/deps/rabbitmq_peer_discovery_k8s/.gitignore b/deps/rabbitmq_peer_discovery_k8s/.gitignore index 08362ef45fd8..581bcd1d7477 100644 --- a/deps/rabbitmq_peer_discovery_k8s/.gitignore +++ b/deps/rabbitmq_peer_discovery_k8s/.gitignore @@ -1,25 +1 @@ -*~ -.sw? -.*.sw? -*.beam -*.coverdata -/.erlang.mk/ -/cover/ -/deps/ -/doc/ -/ebin/ -/escript/ -/escript.lock -/git-revisions.txt -/logs/ -/plugins/ -/plugins.lock -/rebar.config -/rebar.lock -/sbin/ -/sbin.lock /test/config_schema_SUITE_data/schema/ -/test/ct.cover.spec -/xrefr - -/rabbitmq_peer_discovery_k8s.d diff --git a/deps/rabbitmq_peer_discovery_k8s/README.md b/deps/rabbitmq_peer_discovery_k8s/README.md index 2a03b3a2ac58..96217cbee488 100644 --- a/deps/rabbitmq_peer_discovery_k8s/README.md +++ b/deps/rabbitmq_peer_discovery_k8s/README.md @@ -58,4 +58,4 @@ See [CONTRIBUTING.md](./CONTRIBUTING.md) and our [development process overview]( ## Copyright -(c) 2007-2020 VMware, Inc. or its affiliates. +(c) 2007-2024 Broadcom. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. diff --git a/deps/rabbitmq_peer_discovery_k8s/include/rabbit_peer_discovery_k8s.hrl b/deps/rabbitmq_peer_discovery_k8s/include/rabbit_peer_discovery_k8s.hrl index 725a79cc1053..45f0a0a9d421 100644 --- a/deps/rabbitmq_peer_discovery_k8s/include/rabbit_peer_discovery_k8s.hrl +++ b/deps/rabbitmq_peer_discovery_k8s/include/rabbit_peer_discovery_k8s.hrl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2020-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -define(CONFIG_MODULE, rabbit_peer_discovery_config). diff --git a/deps/rabbitmq_peer_discovery_k8s/priv/schema/rabbitmq_peer_discovery_k8s.schema b/deps/rabbitmq_peer_discovery_k8s/priv/schema/rabbitmq_peer_discovery_k8s.schema index 6c21b6cceaec..2b45c8ab0763 100644 --- a/deps/rabbitmq_peer_discovery_k8s/priv/schema/rabbitmq_peer_discovery_k8s.schema +++ b/deps/rabbitmq_peer_discovery_k8s/priv/schema/rabbitmq_peer_discovery_k8s.schema @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% %% Kubernetes host @@ -51,7 +51,7 @@ end}. %% (ACL) Token path {mapping, "cluster_formation.k8s.token_path", "rabbit.cluster_formation.peer_discovery_k8s.k8s_token_path", [ - {datatype, string} + {datatype, string}, {validators, ["file_accessible"]} ]}. {translation, "rabbit.cluster_formation.peer_discovery_k8s.k8s_token_path", @@ -62,10 +62,14 @@ fun(Conf) -> end end}. -%% Certificate path +%% +%% TLS +%% + +%% deprecated {mapping, "cluster_formation.k8s.cert_path", "rabbit.cluster_formation.peer_discovery_k8s.k8s_cert_path", [ - {datatype, string} + {datatype, string}, {validators, ["file_accessible"]} ]}. {translation, "rabbit.cluster_formation.peer_discovery_k8s.k8s_cert_path", @@ -76,10 +80,73 @@ fun(Conf) -> end end}. +%% modern keys + +{mapping, "cluster_formation.k8s.tls.cacertfile", "rabbit.cluster_formation.peer_discovery_k8s.ssl_options.cacertfile", + [{datatype, string}, {validators, ["file_accessible"]} +]}. + +{translation, "rabbit.cluster_formation.peer_discovery_k8s.ssl_options.cacertfile", +fun(Conf) -> + case cuttlefish:conf_get("cluster_formation.k8s.tls.cacertfile", Conf, undefined) of + undefined -> cuttlefish:unset(); + Value -> Value + end +end}. + +{mapping, "cluster_formation.k8s.tls.certfile", "rabbit.cluster_formation.peer_discovery_k8s.ssl_options.certfile", + [{datatype, string}, {validators, ["file_accessible"]} +]}. + +{translation, "rabbit.cluster_formation.peer_discovery_k8s.ssl_options.certfile", +fun(Conf) -> + case cuttlefish:conf_get("cluster_formation.k8s.tls.certfile", Conf, undefined) of + undefined -> cuttlefish:unset(); + Value -> Value + end +end}. + +{mapping, "cluster_formation.k8s.tls.keyfile", "rabbit.cluster_formation.peer_discovery_k8s.ssl_options.keyfile", + [{datatype, string}, {validators, ["file_accessible"]} +]}. + +{translation, "rabbit.cluster_formation.peer_discovery_k8s.ssl_options.keyfile", +fun(Conf) -> + case cuttlefish:conf_get("cluster_formation.k8s.tls.keyfile", Conf, undefined) of + undefined -> cuttlefish:unset(); + Value -> Value + end +end}. + +{mapping, "cluster_formation.k8s.tls.verify", "rabbit.cluster_formation.peer_discovery_k8s.ssl_options.verify", [ + {datatype, {enum, [verify_peer, verify_none]}} +]}. + +{translation, "rabbit.cluster_formation.peer_discovery_k8s.ssl_options.verify", +fun(Conf) -> + case cuttlefish:conf_get("cluster_formation.k8s.tls.verify", Conf, undefined) of + undefined -> cuttlefish:unset(); + Value -> Value + end +end}. + +{mapping, "cluster_formation.k8s.tls.fail_if_no_peer_cert", "rabbit.cluster_formation.peer_discovery_k8s.ssl_options.fail_if_no_peer_cert", [ + {datatype, {enum, [true, false]}} +]}. + +{translation, "rabbit.cluster_formation.peer_discovery_k8s.ssl_options.fail_if_no_peer_cert", +fun(Conf) -> + case cuttlefish:conf_get("cluster_formation.k8s.tls.fail_if_no_peer_cert", Conf, undefined) of + undefined -> cuttlefish:unset(); + Value -> Value + end +end}. + + %% Namespace path {mapping, "cluster_formation.k8s.namespace_path", "rabbit.cluster_formation.peer_discovery_k8s.k8s_namespace_path", [ - {datatype, string} + {datatype, string}, {validators, ["file_accessible"]} ]}. {translation, "rabbit.cluster_formation.peer_discovery_k8s.k8s_namespace_path", diff --git a/deps/rabbitmq_peer_discovery_k8s/src/rabbit_peer_discovery_k8s.erl b/deps/rabbitmq_peer_discovery_k8s/src/rabbit_peer_discovery_k8s.erl index e42d86329d27..c05962edbeba 100644 --- a/deps/rabbitmq_peer_discovery_k8s/src/rabbit_peer_discovery_k8s.erl +++ b/deps/rabbitmq_peer_discovery_k8s/src/rabbit_peer_discovery_k8s.erl @@ -4,13 +4,12 @@ %% %% The Initial Developer of the Original Code is AWeber Communications. %% Copyright (c) 2015-2016 AWeber Communications -%% Copyright (c) 2016-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. All rights reserved. %% -module(rabbit_peer_discovery_k8s). -behaviour(rabbit_peer_discovery_backend). --include_lib("rabbit_common/include/rabbit.hrl"). -include_lib("rabbitmq_peer_discovery_common/include/rabbit_peer_discovery.hrl"). -include("rabbit_peer_discovery_k8s.hrl"). @@ -68,33 +67,29 @@ register() -> unregister() -> ok. --spec lock(Node :: node()) -> {ok, {{ResourceId :: string(), LockRequesterId :: node()}, Nodes :: [node()]}} | - {error, Reason :: string()}. +-spec lock(Nodes :: [node()]) -> + {ok, {{ResourceId :: string(), LockRequesterId :: node()}, Nodes :: [node()]}} | + {error, Reason :: string()}. -lock(Node) -> - %% call list_nodes/0 externally such that meck can mock the function - case ?MODULE:list_nodes() of - {ok, {Nodes, disc}} -> - case lists:member(Node, Nodes) of +lock(Nodes) -> + Node = node(), + case lists:member(Node, Nodes) of true -> - rabbit_log:info("Will try to lock connecting to nodes ~tp", [Nodes]), - LockId = rabbit_nodes:lock_id(Node), - Retries = rabbit_nodes:lock_retries(), - case global:set_lock(LockId, Nodes, Retries) of - true -> - {ok, {LockId, Nodes}}; - false -> - {error, io_lib:format("Acquiring lock taking too long, bailing out after ~b retries", [Retries])} - end; + rabbit_log:info("Will try to lock connecting to nodes ~tp", [Nodes]), + LockId = rabbit_nodes:lock_id(Node), + Retries = rabbit_nodes:lock_retries(), + case global:set_lock(LockId, Nodes, Retries) of + true -> + {ok, {LockId, Nodes}}; + false -> + {error, io_lib:format("Acquiring lock taking too long, bailing out after ~b retries", [Retries])} + end; false -> - %% Don't try to acquire the global lock when local node is not discoverable by peers. - %% This branch is just an additional safety check. We should never run into this branch - %% because the local Pod is in state 'Running' and we listed both ready and not-ready addresses. - {error, lists:flatten(io_lib:format("Local node ~ts is not part of discovered nodes ~tp", [Node, Nodes]))} - end; - {error, _} = Error -> - Error - end. + %% Don't try to acquire the global lock when local node is not discoverable by peers. + %% This branch is just an additional safety check. We should never run into this branch + %% because the local Pod is in state 'Running' and we listed both ready and not-ready addresses. + {error, lists:flatten(io_lib:format("Local node ~ts is not part of discovered nodes ~tp", [Node, Nodes]))} + end. -spec unlock({{ResourceId :: string(), LockRequestedId :: atom()}, Nodes :: [atom()]}) -> 'ok'. unlock({LockId, Nodes}) -> @@ -120,14 +115,29 @@ make_request() -> M = ?CONFIG_MODULE:config_map(?BACKEND_CONFIG_KEY), {ok, Token} = rabbit_misc:raw_read_file(get_config_key(k8s_token_path, M)), Token1 = binary:replace(Token, <<"\n">>, <<>>), + + rabbit_log:debug("Will issue a Kubernetes API request client with the following settings: ~tp", [M]), + + TLSClientOpts0 = maps:get(ssl_options, M, []), + LegacyCACertfilePath = get_config_key(k8s_cert_path, M), + %% merge legacy CA certificate file argument if TLSClientOpts does not have its modern counterpart set + TLSClientOpts = case proplists:get_value(cacertfile, TLSClientOpts0, undefined) of + undefined -> + [{cacertfile, LegacyCACertfilePath} | TLSClientOpts0]; + _Other -> + TLSClientOpts0 + end, + + rabbit_log:debug("Will issue a Kubernetes API request client with the following TLS options: ~tp", [TLSClientOpts]), + ?HTTPC_MODULE:get( get_config_key(k8s_scheme, M), get_config_key(k8s_host, M), get_config_key(k8s_port, M), - base_path(endpoints,get_config_key(k8s_service_name, M)), + base_path(endpoints, get_config_key(k8s_service_name, M)), [], [{"Authorization", "Bearer " ++ binary_to_list(Token1)}], - [{ssl, [{cacertfile, get_config_key(k8s_cert_path, M)}]}]). + [{ssl, TLSClientOpts}]). %% @spec node_name(k8s_endpoint) -> list() %% @doc Return a full rabbit node name, appending hostname suffix diff --git a/deps/rabbitmq_peer_discovery_k8s/src/rabbitmq_peer_discovery_k8s.erl b/deps/rabbitmq_peer_discovery_k8s/src/rabbitmq_peer_discovery_k8s.erl index 809cc1d52eb1..091e6103b315 100644 --- a/deps/rabbitmq_peer_discovery_k8s/src/rabbitmq_peer_discovery_k8s.erl +++ b/deps/rabbitmq_peer_discovery_k8s/src/rabbitmq_peer_discovery_k8s.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% %% This module exists as an alias for rabbit_peer_discovery_k8s. @@ -44,7 +44,7 @@ unregister() -> post_registration() -> ?DELEGATE:post_registration(). --spec lock(Node :: node()) -> {ok, {ResourceId :: string(), LockRequesterId :: node()}} | {error, Reason :: string()}. +-spec lock(Nodes :: [node()]) -> {ok, {ResourceId :: string(), LockRequesterId :: node()}} | {error, Reason :: string()}. lock(Node) -> ?DELEGATE:lock(Node). diff --git a/deps/rabbitmq_peer_discovery_k8s/src/rabbitmq_peer_discovery_k8s_app.erl b/deps/rabbitmq_peer_discovery_k8s/src/rabbitmq_peer_discovery_k8s_app.erl index 413e73224a9d..2dfdee8f11c5 100644 --- a/deps/rabbitmq_peer_discovery_k8s/src/rabbitmq_peer_discovery_k8s_app.erl +++ b/deps/rabbitmq_peer_discovery_k8s/src/rabbitmq_peer_discovery_k8s_app.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbitmq_peer_discovery_k8s_app). diff --git a/deps/rabbitmq_peer_discovery_k8s/src/rabbitmq_peer_discovery_k8s_node_monitor.erl b/deps/rabbitmq_peer_discovery_k8s/src/rabbitmq_peer_discovery_k8s_node_monitor.erl index cf4eeaf158f3..1b568ac01a25 100644 --- a/deps/rabbitmq_peer_discovery_k8s/src/rabbitmq_peer_discovery_k8s_node_monitor.erl +++ b/deps/rabbitmq_peer_discovery_k8s/src/rabbitmq_peer_discovery_k8s_node_monitor.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% %% This gen_server receives node monitoring events from net_kernel diff --git a/deps/rabbitmq_peer_discovery_k8s/src/rabbitmq_peer_discovery_k8s_sup.erl b/deps/rabbitmq_peer_discovery_k8s/src/rabbitmq_peer_discovery_k8s_sup.erl index b08897d0ee81..0f0d992aef6f 100644 --- a/deps/rabbitmq_peer_discovery_k8s/src/rabbitmq_peer_discovery_k8s_sup.erl +++ b/deps/rabbitmq_peer_discovery_k8s/src/rabbitmq_peer_discovery_k8s_sup.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbitmq_peer_discovery_k8s_sup). diff --git a/deps/rabbitmq_peer_discovery_k8s/test/config_schema_SUITE.erl b/deps/rabbitmq_peer_discovery_k8s/test/config_schema_SUITE.erl index 06baabcad548..0e468d6de660 100644 --- a/deps/rabbitmq_peer_discovery_k8s/test/config_schema_SUITE.erl +++ b/deps/rabbitmq_peer_discovery_k8s/test/config_schema_SUITE.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(config_schema_SUITE). diff --git a/deps/rabbitmq_peer_discovery_k8s/test/config_schema_SUITE_data/certs/cacert.pem b/deps/rabbitmq_peer_discovery_k8s/test/config_schema_SUITE_data/certs/cacert.pem new file mode 100644 index 000000000000..eaf6b67806ce --- /dev/null +++ b/deps/rabbitmq_peer_discovery_k8s/test/config_schema_SUITE_data/certs/cacert.pem @@ -0,0 +1 @@ +I'm not a certificate diff --git a/deps/rabbitmq_peer_discovery_k8s/test/config_schema_SUITE_data/certs/cert.pem b/deps/rabbitmq_peer_discovery_k8s/test/config_schema_SUITE_data/certs/cert.pem new file mode 100644 index 000000000000..eaf6b67806ce --- /dev/null +++ b/deps/rabbitmq_peer_discovery_k8s/test/config_schema_SUITE_data/certs/cert.pem @@ -0,0 +1 @@ +I'm not a certificate diff --git a/deps/rabbitmq_peer_discovery_k8s/test/config_schema_SUITE_data/certs/key.pem b/deps/rabbitmq_peer_discovery_k8s/test/config_schema_SUITE_data/certs/key.pem new file mode 100644 index 000000000000..eaf6b67806ce --- /dev/null +++ b/deps/rabbitmq_peer_discovery_k8s/test/config_schema_SUITE_data/certs/key.pem @@ -0,0 +1 @@ +I'm not a certificate diff --git a/deps/rabbitmq_peer_discovery_k8s/test/config_schema_SUITE_data/namespace.txt b/deps/rabbitmq_peer_discovery_k8s/test/config_schema_SUITE_data/namespace.txt new file mode 100644 index 000000000000..e24d8b738556 --- /dev/null +++ b/deps/rabbitmq_peer_discovery_k8s/test/config_schema_SUITE_data/namespace.txt @@ -0,0 +1 @@ +example-namespace \ No newline at end of file diff --git a/deps/rabbitmq_peer_discovery_k8s/test/config_schema_SUITE_data/rabbitmq_peer_discovery_k8s.snippets b/deps/rabbitmq_peer_discovery_k8s/test/config_schema_SUITE_data/rabbitmq_peer_discovery_k8s.snippets index 958a8a645dfb..6dbb7ee2916d 100644 --- a/deps/rabbitmq_peer_discovery_k8s/test/config_schema_SUITE_data/rabbitmq_peer_discovery_k8s.snippets +++ b/deps/rabbitmq_peer_discovery_k8s/test/config_schema_SUITE_data/rabbitmq_peer_discovery_k8s.snippets @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% [ @@ -101,33 +101,72 @@ ], [rabbitmq_peer_discovery_k8s] } -, {k8s_token_path, "cluster_formation.k8s.token_path = /a/b/c", [ +, {k8s_token_path, "cluster_formation.k8s.token_path = test/config_schema_SUITE_data/token.txt", [ {rabbit, [ {cluster_formation, [ {peer_discovery_k8s, [ - {k8s_token_path, "/a/b/c"} + {k8s_token_path, "test/config_schema_SUITE_data/token.txt"} ]} ]} ]} ], [rabbitmq_peer_discovery_k8s] } -, {k8s_token_path, "cluster_formation.k8s.cert_path = /a/b/c", [ +, {k8s_ca_certificate_legacy_cert_path, "cluster_formation.k8s.cert_path = test/config_schema_SUITE_data/certs/cacert.pem", [ {rabbit, [ {cluster_formation, [ {peer_discovery_k8s, [ - {k8s_cert_path, "/a/b/c"} + {k8s_cert_path, "test/config_schema_SUITE_data/certs/cacert.pem"} ]} ]} ]} ], [rabbitmq_peer_discovery_k8s] } -, {k8s_token_path, "cluster_formation.k8s.namespace_path = /a/b/c", [ +, {k8s_ca_certificate_modern_path, "cluster_formation.k8s.tls.cacertfile = test/config_schema_SUITE_data/certs/cacert.pem", [ {rabbit, [ {cluster_formation, [ {peer_discovery_k8s, [ - {k8s_namespace_path, "/a/b/c"} + {ssl_options, [ + {cacertfile, "test/config_schema_SUITE_data/certs/cacert.pem"} + ]} + ]} + ]} + ]} + ], [rabbitmq_peer_discovery_k8s] + } + +, {k8s_client_certificate_modern_path, "cluster_formation.k8s.tls.certfile = test/config_schema_SUITE_data/certs/cert.pem", [ + {rabbit, [ + {cluster_formation, [ + {peer_discovery_k8s, [ + {ssl_options, [ + {certfile, "test/config_schema_SUITE_data/certs/cert.pem"} + ]} + ]} + ]} + ]} + ], [rabbitmq_peer_discovery_k8s] + } + +, {k8s_client_key_modern_path, "cluster_formation.k8s.tls.keyfile = test/config_schema_SUITE_data/certs/key.pem", [ + {rabbit, [ + {cluster_formation, [ + {peer_discovery_k8s, [ + {ssl_options, [ + {keyfile, "test/config_schema_SUITE_data/certs/key.pem"} + ]} + ]} + ]} + ]} + ], [rabbitmq_peer_discovery_k8s] + } + +, {k8s_namespace_path, "cluster_formation.k8s.namespace_path = test/config_schema_SUITE_data/namespace.txt", [ + {rabbit, [ + {cluster_formation, [ + {peer_discovery_k8s, [ + {k8s_namespace_path, "test/config_schema_SUITE_data/namespace.txt"} ]} ]} ]} diff --git a/deps/rabbitmq_peer_discovery_k8s/test/config_schema_SUITE_data/token.txt b/deps/rabbitmq_peer_discovery_k8s/test/config_schema_SUITE_data/token.txt new file mode 100644 index 000000000000..bc5f685299c8 --- /dev/null +++ b/deps/rabbitmq_peer_discovery_k8s/test/config_schema_SUITE_data/token.txt @@ -0,0 +1 @@ +example-token \ No newline at end of file diff --git a/deps/rabbitmq_peer_discovery_k8s/test/rabbitmq_peer_discovery_k8s_SUITE.erl b/deps/rabbitmq_peer_discovery_k8s/test/rabbitmq_peer_discovery_k8s_SUITE.erl index da6c0b45e8ca..376f3f4e8b9f 100644 --- a/deps/rabbitmq_peer_discovery_k8s/test/rabbitmq_peer_discovery_k8s_SUITE.erl +++ b/deps/rabbitmq_peer_discovery_k8s/test/rabbitmq_peer_discovery_k8s_SUITE.erl @@ -4,13 +4,12 @@ %% %% The Initial Developer of the Original Code is AWeber Communications. %% Copyright (c) 2015-2016 AWeber Communications -%% Copyright (c) 2016-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. All rights reserved. %% -module(rabbitmq_peer_discovery_k8s_SUITE). -compile(export_all). --include_lib("common_test/include/ct.hrl"). -include_lib("eunit/include/eunit.hrl"). @@ -40,8 +39,7 @@ groups() -> {lock, [], [ lock_single_node, lock_multiple_nodes, - lock_local_node_not_discovered, - lock_list_nodes_fails + lock_local_node_not_discovered ]} ]. @@ -146,29 +144,25 @@ event_v1_test(_Config) -> lock_single_node(_Config) -> LocalNode = node(), Nodes = [LocalNode], - meck:expect(rabbit_peer_discovery_k8s, list_nodes, 0, {ok, {[LocalNode], disc}}), - {ok, {LockId, Nodes}} = rabbit_peer_discovery_k8s:lock(LocalNode), + {ok, {LockId, Nodes}} = rabbit_peer_discovery_k8s:lock([LocalNode]), ?assertEqual(ok, rabbit_peer_discovery_k8s:unlock({LockId, Nodes})). lock_multiple_nodes(_Config) -> application:set_env(rabbit, cluster_formation, [{internal_lock_retries, 2}]), LocalNode = node(), - OtherNode = other@host, - Nodes = [OtherNode, LocalNode], - meck:expect(rabbit_peer_discovery_k8s, list_nodes, 0, {ok, {Nodes, disc}}), - - {ok, {{LockResourceId, OtherNode}, Nodes}} = rabbit_peer_discovery_k8s:lock(OtherNode), - ?assertEqual({error, "Acquiring lock taking too long, bailing out after 2 retries"}, rabbit_peer_discovery_k8s:lock(LocalNode)), - ?assertEqual(ok, rabbitmq_peer_discovery_k8s:unlock({{LockResourceId, OtherNode}, Nodes})), - ?assertEqual({ok, {{LockResourceId, LocalNode}, Nodes}}, rabbit_peer_discovery_k8s:lock(LocalNode)), - ?assertEqual(ok, rabbitmq_peer_discovery_k8s:unlock({{LockResourceId, LocalNode}, Nodes})). + OtherNodeA = a@host, + OtherNodeB = b@host, + + meck:expect(rabbit_nodes, lock_id, 1, {rabbit_nodes:cookie_hash(), OtherNodeA}), + {ok, {{LockResourceId, OtherNodeA}, [LocalNode, OtherNodeA]}} = rabbit_peer_discovery_k8s:lock([LocalNode, OtherNodeA]), + meck:expect(rabbit_nodes, lock_id, 1, {rabbit_nodes:cookie_hash(), OtherNodeB}), + ?assertEqual({error, "Acquiring lock taking too long, bailing out after 2 retries"}, rabbit_peer_discovery_k8s:lock([LocalNode, OtherNodeB])), + ?assertEqual(ok, rabbit_peer_discovery_k8s:unlock({{LockResourceId, OtherNodeA}, [LocalNode, OtherNodeA]})), + ?assertEqual({ok, {{LockResourceId, OtherNodeB}, [LocalNode, OtherNodeB]}}, rabbit_peer_discovery_k8s:lock([LocalNode, OtherNodeB])), + ?assertEqual(ok, rabbit_peer_discovery_k8s:unlock({{LockResourceId, OtherNodeB}, [LocalNode, OtherNodeB]})), + meck:unload(rabbit_nodes). lock_local_node_not_discovered(_Config) -> - meck:expect(rabbit_peer_discovery_k8s, list_nodes, 0, {ok, {[n1@host, n2@host], disc}} ), - Expectation = {error, "Local node me@host is not part of discovered nodes [n1@host,n2@host]"}, - ?assertEqual(Expectation, rabbit_peer_discovery_k8s:lock(me@host)). - -lock_list_nodes_fails(_Config) -> - meck:expect(rabbit_peer_discovery_k8s, list_nodes, 0, {error, "K8s API unavailable"}), - ?assertEqual({error, "K8s API unavailable"}, rabbit_peer_discovery_k8s:lock(me@host)). + Expectation = {error, "Local node " ++ atom_to_list(node()) ++ " is not part of discovered nodes [me@host]"}, + ?assertEqual(Expectation, rabbit_peer_discovery_k8s:lock([me@host])). diff --git a/deps/rabbit/apps/rabbitmq_prelaunch/BUILD.bazel b/deps/rabbitmq_prelaunch/BUILD.bazel similarity index 100% rename from deps/rabbit/apps/rabbitmq_prelaunch/BUILD.bazel rename to deps/rabbitmq_prelaunch/BUILD.bazel diff --git a/deps/rabbit/apps/rabbitmq_prelaunch/Makefile b/deps/rabbitmq_prelaunch/Makefile similarity index 60% rename from deps/rabbit/apps/rabbitmq_prelaunch/Makefile rename to deps/rabbitmq_prelaunch/Makefile index f4ea02e72fad..38c4b940ab3e 100644 --- a/deps/rabbit/apps/rabbitmq_prelaunch/Makefile +++ b/deps/rabbitmq_prelaunch/Makefile @@ -1,12 +1,13 @@ PROJECT = rabbitmq_prelaunch PROJECT_DESCRIPTION = RabbitMQ prelaunch setup -PROJECT_VERSION = 1.0.0 +PROJECT_VERSION = 4.0.0 PROJECT_MOD = rabbit_prelaunch_app DEPS = rabbit_common cuttlefish thoas -dep_cuttlefish = hex 3.1.0 + +PLT_APPS += runtime_tools eunit osiris systemd DEP_PLUGINS = rabbit_common/mk/rabbitmq-build.mk -include ../../../../rabbitmq-components.mk -include ../../../../erlang.mk +include ../../rabbitmq-components.mk +include ../../erlang.mk diff --git a/deps/rabbit/apps/rabbitmq_prelaunch/app.bzl b/deps/rabbitmq_prelaunch/app.bzl similarity index 100% rename from deps/rabbit/apps/rabbitmq_prelaunch/app.bzl rename to deps/rabbitmq_prelaunch/app.bzl diff --git a/deps/rabbit/apps/rabbitmq_prelaunch/src/rabbit_boot_state.erl b/deps/rabbitmq_prelaunch/src/rabbit_boot_state.erl similarity index 94% rename from deps/rabbit/apps/rabbitmq_prelaunch/src/rabbit_boot_state.erl rename to deps/rabbitmq_prelaunch/src/rabbit_boot_state.erl index c9387f758624..6ab18a37b675 100644 --- a/deps/rabbit/apps/rabbitmq_prelaunch/src/rabbit_boot_state.erl +++ b/deps/rabbitmq_prelaunch/src/rabbit_boot_state.erl @@ -3,7 +3,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2019-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_boot_state). diff --git a/deps/rabbit/apps/rabbitmq_prelaunch/src/rabbit_boot_state_sup.erl b/deps/rabbitmq_prelaunch/src/rabbit_boot_state_sup.erl similarity index 90% rename from deps/rabbit/apps/rabbitmq_prelaunch/src/rabbit_boot_state_sup.erl rename to deps/rabbitmq_prelaunch/src/rabbit_boot_state_sup.erl index 03a3924355e4..d770f58184f6 100644 --- a/deps/rabbit/apps/rabbitmq_prelaunch/src/rabbit_boot_state_sup.erl +++ b/deps/rabbitmq_prelaunch/src/rabbit_boot_state_sup.erl @@ -3,7 +3,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2020-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_boot_state_sup). diff --git a/deps/rabbit/apps/rabbitmq_prelaunch/src/rabbit_boot_state_systemd.erl b/deps/rabbitmq_prelaunch/src/rabbit_boot_state_systemd.erl similarity index 92% rename from deps/rabbit/apps/rabbitmq_prelaunch/src/rabbit_boot_state_systemd.erl rename to deps/rabbitmq_prelaunch/src/rabbit_boot_state_systemd.erl index c68c16b47445..dadf7cd852f1 100644 --- a/deps/rabbit/apps/rabbitmq_prelaunch/src/rabbit_boot_state_systemd.erl +++ b/deps/rabbitmq_prelaunch/src/rabbit_boot_state_systemd.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2015-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_boot_state_systemd). diff --git a/deps/rabbit/apps/rabbitmq_prelaunch/src/rabbit_boot_state_xterm_titlebar.erl b/deps/rabbitmq_prelaunch/src/rabbit_boot_state_xterm_titlebar.erl similarity index 95% rename from deps/rabbit/apps/rabbitmq_prelaunch/src/rabbit_boot_state_xterm_titlebar.erl rename to deps/rabbitmq_prelaunch/src/rabbit_boot_state_xterm_titlebar.erl index 8e4ea716bcc2..5c542294e7d6 100644 --- a/deps/rabbit/apps/rabbitmq_prelaunch/src/rabbit_boot_state_xterm_titlebar.erl +++ b/deps/rabbitmq_prelaunch/src/rabbit_boot_state_xterm_titlebar.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2021 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% %% @doc @@ -14,8 +14,6 @@ -behaviour(gen_server). --include_lib("kernel/include/logger.hrl"). - -include_lib("rabbit_common/include/logging.hrl"). -export([start_link/0]). diff --git a/deps/rabbit/apps/rabbitmq_prelaunch/src/rabbit_logger_fmt_helpers.erl b/deps/rabbitmq_prelaunch/src/rabbit_logger_fmt_helpers.erl similarity index 98% rename from deps/rabbit/apps/rabbitmq_prelaunch/src/rabbit_logger_fmt_helpers.erl rename to deps/rabbitmq_prelaunch/src/rabbit_logger_fmt_helpers.erl index 70ef4f6b80ea..aab14c139ad1 100644 --- a/deps/rabbit/apps/rabbitmq_prelaunch/src/rabbit_logger_fmt_helpers.erl +++ b/deps/rabbitmq_prelaunch/src/rabbit_logger_fmt_helpers.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2021 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_logger_fmt_helpers). diff --git a/deps/rabbit/apps/rabbitmq_prelaunch/src/rabbit_logger_json_fmt.erl b/deps/rabbitmq_prelaunch/src/rabbit_logger_json_fmt.erl similarity index 96% rename from deps/rabbit/apps/rabbitmq_prelaunch/src/rabbit_logger_json_fmt.erl rename to deps/rabbitmq_prelaunch/src/rabbit_logger_json_fmt.erl index 16134ed51605..7b9a292853e8 100644 --- a/deps/rabbit/apps/rabbitmq_prelaunch/src/rabbit_logger_json_fmt.erl +++ b/deps/rabbitmq_prelaunch/src/rabbit_logger_json_fmt.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2021 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_logger_json_fmt). diff --git a/deps/rabbit/apps/rabbitmq_prelaunch/src/rabbit_logger_std_h.erl b/deps/rabbitmq_prelaunch/src/rabbit_logger_std_h.erl similarity index 96% rename from deps/rabbit/apps/rabbitmq_prelaunch/src/rabbit_logger_std_h.erl rename to deps/rabbitmq_prelaunch/src/rabbit_logger_std_h.erl index 9cd9b13ad44d..43b68b153968 100644 --- a/deps/rabbit/apps/rabbitmq_prelaunch/src/rabbit_logger_std_h.erl +++ b/deps/rabbitmq_prelaunch/src/rabbit_logger_std_h.erl @@ -20,19 +20,9 @@ -module(rabbit_logger_std_h). -ifdef(TEST). --define(io_put_chars(DEVICE, DATA), begin - %% We log to Common Test log as well. - %% This is the file we use to check - %% the message made it to - %% stdout/stderr. - ct:log("~ts", [DATA]), - io:put_chars(DEVICE, DATA) - end). - -export([parse_date_spec/1, parse_day_of_week/2, parse_day_of_month/2, parse_hour/2, parse_minute/2]). --else. --define(io_put_chars(DEVICE, DATA), io:put_chars(DEVICE, DATA)). -endif. + -define(file_write(DEVICE, DATA), file:write(DEVICE, DATA)). -define(file_datasync(DEVICE), file:datasync(DEVICE)). @@ -50,6 +40,9 @@ -export([log/2, adding_handler/1, removing_handler/1, changing_config/3, filter_config/1]). +%% Internal export to allow the use of meck. +-export([io_put_chars/2]). + -define(DEFAULT_CALL_TIMEOUT, 5000). %%%=================================================================== @@ -524,22 +517,40 @@ ensure_file(#{inode:=INode0,file_name:=FileName,modes:=Modes}=State) -> State#{last_check=>timestamp()}; _ -> close_log_file(State), - case file:open(FileName,Modes) of - {ok,Fd} -> - {ok,#file_info{inode=INode}} = - file:read_file_info(FileName,[raw]), - State#{fd=>Fd,inode=>INode, - last_check=>timestamp(), - synced=>true,sync_res=>ok}; - Error -> - exit({could_not_reopen_file,Error}) - end + {ok, Fd} = ensure_open(FileName, Modes), + {ok,#file_info{inode=INode}} = + file:read_file_info(FileName,[raw]), + State#{fd=>Fd,inode=>INode, + last_check=>timestamp(), + synced=>true,sync_res=>ok} end; ensure_file(State) -> State. +ensure_open(Filename, Modes) -> + case filelib:ensure_dir(Filename) of + ok -> + case file:open(Filename, Modes) of + {ok, Fd} -> + {ok, Fd}; + Error -> + exit({could_not_reopen_file,Error}) + end; + Error -> + exit({could_not_create_dir_for_file,Error}) + end. + +write_to_dev(Bin,#{dev:=standard_io}=State) -> + try + ?MODULE:io_put_chars(user, Bin) + catch _E:_R -> + ?MODULE:io_put_chars( + standard_error, "Failed to write log message to stdout, trying stderr\n"), + ?MODULE:io_put_chars(standard_error, Bin) + end, + State; write_to_dev(Bin,#{dev:=DevName}=State) -> - ?io_put_chars(DevName, Bin), + ?MODULE:io_put_chars(DevName, Bin), State; write_to_dev(Bin, State) -> State1 = #{fd:=Fd} = maybe_ensure_file(State), @@ -548,6 +559,9 @@ write_to_dev(Bin, State) -> maybe_notify_error(write,Result,State2), State2#{synced=>false,write_res=>Result}. +io_put_chars(DevName, Bin) -> + io:put_chars(DevName, Bin). + sync_dev(#{synced:=false}=State) -> State1 = #{fd:=Fd} = maybe_ensure_file(State), Result = ?file_datasync(Fd), diff --git a/deps/rabbit/apps/rabbitmq_prelaunch/src/rabbit_logger_text_fmt.erl b/deps/rabbitmq_prelaunch/src/rabbit_logger_text_fmt.erl similarity index 96% rename from deps/rabbit/apps/rabbitmq_prelaunch/src/rabbit_logger_text_fmt.erl rename to deps/rabbitmq_prelaunch/src/rabbit_logger_text_fmt.erl index 330349c4c727..c3c3f6d630da 100644 --- a/deps/rabbit/apps/rabbitmq_prelaunch/src/rabbit_logger_text_fmt.erl +++ b/deps/rabbitmq_prelaunch/src/rabbit_logger_text_fmt.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2021 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_logger_text_fmt). diff --git a/deps/rabbit/apps/rabbitmq_prelaunch/src/rabbit_prelaunch.erl b/deps/rabbitmq_prelaunch/src/rabbit_prelaunch.erl similarity index 100% rename from deps/rabbit/apps/rabbitmq_prelaunch/src/rabbit_prelaunch.erl rename to deps/rabbitmq_prelaunch/src/rabbit_prelaunch.erl diff --git a/deps/rabbit/apps/rabbitmq_prelaunch/src/rabbit_prelaunch_app.erl b/deps/rabbitmq_prelaunch/src/rabbit_prelaunch_app.erl similarity index 100% rename from deps/rabbit/apps/rabbitmq_prelaunch/src/rabbit_prelaunch_app.erl rename to deps/rabbitmq_prelaunch/src/rabbit_prelaunch_app.erl diff --git a/deps/rabbit/apps/rabbitmq_prelaunch/src/rabbit_prelaunch_conf.erl b/deps/rabbitmq_prelaunch/src/rabbit_prelaunch_conf.erl similarity index 99% rename from deps/rabbit/apps/rabbitmq_prelaunch/src/rabbit_prelaunch_conf.erl rename to deps/rabbitmq_prelaunch/src/rabbit_prelaunch_conf.erl index 6265665eea86..c2f27226a1c5 100644 --- a/deps/rabbit/apps/rabbitmq_prelaunch/src/rabbit_prelaunch_conf.erl +++ b/deps/rabbitmq_prelaunch/src/rabbit_prelaunch_conf.erl @@ -1,6 +1,5 @@ -module(rabbit_prelaunch_conf). --include_lib("kernel/include/file.hrl"). -include_lib("kernel/include/logger.hrl"). -include_lib("stdlib/include/zip.hrl"). diff --git a/deps/rabbit/apps/rabbitmq_prelaunch/src/rabbit_prelaunch_dist.erl b/deps/rabbitmq_prelaunch/src/rabbit_prelaunch_dist.erl similarity index 99% rename from deps/rabbit/apps/rabbitmq_prelaunch/src/rabbit_prelaunch_dist.erl rename to deps/rabbitmq_prelaunch/src/rabbit_prelaunch_dist.erl index d78d347e2b52..feacbe667f82 100644 --- a/deps/rabbit/apps/rabbitmq_prelaunch/src/rabbit_prelaunch_dist.erl +++ b/deps/rabbitmq_prelaunch/src/rabbit_prelaunch_dist.erl @@ -12,7 +12,7 @@ setup(#{nodename := Node, nodename_type := NameType} = Context) -> "~n== Erlang distribution ==", [], #{domain => ?RMQLOG_DOMAIN_PRELAUNCH}), ?LOG_DEBUG( - "Rqeuested node name: ~ts (type: ~ts)", + "Requested node name: ~ts (type: ~ts)", [Node, NameType], #{domain => ?RMQLOG_DOMAIN_PRELAUNCH}), case node() of @@ -149,5 +149,3 @@ set_credentials_obfuscation_secret() -> credentials_obfuscation_fallback_secret, <<"nocookie">>), ok = credentials_obfuscation:set_fallback_secret(Fallback). - - \ No newline at end of file diff --git a/deps/rabbit/apps/rabbitmq_prelaunch/src/rabbit_prelaunch_early_logging.erl b/deps/rabbitmq_prelaunch/src/rabbit_prelaunch_early_logging.erl similarity index 99% rename from deps/rabbit/apps/rabbitmq_prelaunch/src/rabbit_prelaunch_early_logging.erl rename to deps/rabbitmq_prelaunch/src/rabbit_prelaunch_early_logging.erl index d79fc9e7b893..acdea3c89c49 100644 --- a/deps/rabbit/apps/rabbitmq_prelaunch/src/rabbit_prelaunch_early_logging.erl +++ b/deps/rabbitmq_prelaunch/src/rabbit_prelaunch_early_logging.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2019-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_prelaunch_early_logging). @@ -25,10 +25,9 @@ -export([filter_log_event/2]). -export([filter_discarded_message/2]). --ifdef(TEST). +%% For internal testing purpose only. -export([levels/0, determine_prefix/1]). --endif. -define(CONFIGURED_KEY, {?MODULE, configured}). diff --git a/deps/rabbit/apps/rabbitmq_prelaunch/src/rabbit_prelaunch_erlang_compat.erl b/deps/rabbitmq_prelaunch/src/rabbit_prelaunch_erlang_compat.erl similarity index 91% rename from deps/rabbit/apps/rabbitmq_prelaunch/src/rabbit_prelaunch_erlang_compat.erl rename to deps/rabbitmq_prelaunch/src/rabbit_prelaunch_erlang_compat.erl index 98217aa7af05..ebf7126b6edd 100644 --- a/deps/rabbit/apps/rabbitmq_prelaunch/src/rabbit_prelaunch_erlang_compat.erl +++ b/deps/rabbitmq_prelaunch/src/rabbit_prelaunch_erlang_compat.erl @@ -6,8 +6,10 @@ -export([check/1]). --define(OTP_MINIMUM, "25.0"). --define(ERTS_MINIMUM, "13.0"). +%% minimum Erlang/OTP version supported +-define(OTP_MINIMUM, "26.0"). +%% the ERTS version provided by the minimum Erlang/OTP version supported +-define(ERTS_MINIMUM, "14.0"). check(_Context) -> ?LOG_DEBUG( diff --git a/deps/rabbit/apps/rabbitmq_prelaunch/src/rabbit_prelaunch_errors.erl b/deps/rabbitmq_prelaunch/src/rabbit_prelaunch_errors.erl similarity index 100% rename from deps/rabbit/apps/rabbitmq_prelaunch/src/rabbit_prelaunch_errors.erl rename to deps/rabbitmq_prelaunch/src/rabbit_prelaunch_errors.erl diff --git a/deps/rabbit/apps/rabbitmq_prelaunch/src/rabbit_prelaunch_file.erl b/deps/rabbitmq_prelaunch/src/rabbit_prelaunch_file.erl similarity index 95% rename from deps/rabbit/apps/rabbitmq_prelaunch/src/rabbit_prelaunch_file.erl rename to deps/rabbitmq_prelaunch/src/rabbit_prelaunch_file.erl index 347e3057759f..1e33162c6472 100644 --- a/deps/rabbit/apps/rabbitmq_prelaunch/src/rabbit_prelaunch_file.erl +++ b/deps/rabbitmq_prelaunch/src/rabbit_prelaunch_file.erl @@ -12,7 +12,7 @@ %% limitations under the License. %% %% Copyright Ericsson AB 2011-2023. All Rights Reserved. -%% Copyright (c) 2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% %% This code originated here and has been modified to suit RabbitMQ: %% https://github.com/erlang/otp/blob/2d43af53899d35423f1c83887026089c91bce010/lib/ssl/src/ssl_dist_sup.erl diff --git a/deps/rabbit/apps/rabbitmq_prelaunch/src/rabbit_prelaunch_sighandler.erl b/deps/rabbitmq_prelaunch/src/rabbit_prelaunch_sighandler.erl similarity index 100% rename from deps/rabbit/apps/rabbitmq_prelaunch/src/rabbit_prelaunch_sighandler.erl rename to deps/rabbitmq_prelaunch/src/rabbit_prelaunch_sighandler.erl diff --git a/deps/rabbit/apps/rabbitmq_prelaunch/src/rabbit_prelaunch_sup.erl b/deps/rabbitmq_prelaunch/src/rabbit_prelaunch_sup.erl similarity index 100% rename from deps/rabbit/apps/rabbitmq_prelaunch/src/rabbit_prelaunch_sup.erl rename to deps/rabbitmq_prelaunch/src/rabbit_prelaunch_sup.erl diff --git a/deps/rabbit/apps/rabbitmq_prelaunch/test/rabbit_logger_std_h_SUITE.erl b/deps/rabbitmq_prelaunch/test/rabbit_logger_std_h_SUITE.erl similarity index 99% rename from deps/rabbit/apps/rabbitmq_prelaunch/test/rabbit_logger_std_h_SUITE.erl rename to deps/rabbitmq_prelaunch/test/rabbit_logger_std_h_SUITE.erl index 13d60f259435..98e40cb893a3 100644 --- a/deps/rabbit/apps/rabbitmq_prelaunch/test/rabbit_logger_std_h_SUITE.erl +++ b/deps/rabbitmq_prelaunch/test/rabbit_logger_std_h_SUITE.erl @@ -1,6 +1,5 @@ -module(rabbit_logger_std_h_SUITE). --include_lib("common_test/include/ct.hrl"). -include_lib("eunit/include/eunit.hrl"). -compile(export_all). diff --git a/deps/rabbit/apps/rabbitmq_prelaunch/test/rabbit_prelaunch_file_SUITE.erl b/deps/rabbitmq_prelaunch/test/rabbit_prelaunch_file_SUITE.erl similarity index 94% rename from deps/rabbit/apps/rabbitmq_prelaunch/test/rabbit_prelaunch_file_SUITE.erl rename to deps/rabbitmq_prelaunch/test/rabbit_prelaunch_file_SUITE.erl index a92fef6b6a60..42bb60e3ec1b 100644 --- a/deps/rabbit/apps/rabbitmq_prelaunch/test/rabbit_prelaunch_file_SUITE.erl +++ b/deps/rabbitmq_prelaunch/test/rabbit_prelaunch_file_SUITE.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_prelaunch_file_SUITE). diff --git a/deps/rabbit/apps/rabbitmq_prelaunch/test/rabbit_prelaunch_file_SUITE_data/advanced.config b/deps/rabbitmq_prelaunch/test/rabbit_prelaunch_file_SUITE_data/advanced.config similarity index 100% rename from deps/rabbit/apps/rabbitmq_prelaunch/test/rabbit_prelaunch_file_SUITE_data/advanced.config rename to deps/rabbitmq_prelaunch/test/rabbit_prelaunch_file_SUITE_data/advanced.config diff --git a/deps/rabbit/apps/rabbitmq_prelaunch/test/rabbit_prelaunch_file_SUITE_data/bad-advanced.config b/deps/rabbitmq_prelaunch/test/rabbit_prelaunch_file_SUITE_data/bad-advanced.config similarity index 100% rename from deps/rabbit/apps/rabbitmq_prelaunch/test/rabbit_prelaunch_file_SUITE_data/bad-advanced.config rename to deps/rabbitmq_prelaunch/test/rabbit_prelaunch_file_SUITE_data/bad-advanced.config diff --git a/deps/rabbit/apps/rabbitmq_prelaunch/test/rabbit_prelaunch_file_SUITE_data/rabbitmq.config b/deps/rabbitmq_prelaunch/test/rabbit_prelaunch_file_SUITE_data/rabbitmq.config similarity index 100% rename from deps/rabbit/apps/rabbitmq_prelaunch/test/rabbit_prelaunch_file_SUITE_data/rabbitmq.config rename to deps/rabbitmq_prelaunch/test/rabbit_prelaunch_file_SUITE_data/rabbitmq.config diff --git a/deps/rabbitmq_prometheus/.gitignore b/deps/rabbitmq_prometheus/.gitignore index c00cbc643a2f..a32818d446ff 100644 --- a/deps/rabbitmq_prometheus/.gitignore +++ b/deps/rabbitmq_prometheus/.gitignore @@ -1,24 +1,3 @@ -.sw? -.*.sw? -*.beam -*~ -\#* -.#* -*.d -/.erlang.mk/ -/cover/ -/deps/ -/doc/ -/ebin/ -/escript/ -/escript.lock -/logs/ -/plugins/ -/plugins.lock -/sbin/ -/sbin.lock - -erl_crash.dump prometheus/data test/config_schema_SUITE_data/schema/ diff --git a/deps/rabbitmq_prometheus/BUILD.bazel b/deps/rabbitmq_prometheus/BUILD.bazel index d92bc2070e20..b0d71c0cda52 100644 --- a/deps/rabbitmq_prometheus/BUILD.bazel +++ b/deps/rabbitmq_prometheus/BUILD.bazel @@ -81,6 +81,7 @@ dialyze( eunit( name = "eunit", + compiled_suites = [":rabbitmq_prometheus_collector_test_proxy_beam_files"], #keep target = ":test_erlang_app", ) diff --git a/deps/rabbitmq_prometheus/README.md b/deps/rabbitmq_prometheus/README.md index 61a5a2b903f4..c82d35c75a16 100644 --- a/deps/rabbitmq_prometheus/README.md +++ b/deps/rabbitmq_prometheus/README.md @@ -89,7 +89,7 @@ rabbitmqctl eval 'application:set_env(rabbitmq_prometheus, return_per_object_met As mentioned in the previous section, returning a lot of per-object metrics is quite computationally expensive process. One of the reasons is that `/metrics/per-object` returns every possible metric for every possible object - even if having them makes no sense in the day-to-day monitoring activity. -That's why there is an additional endpoint that always return per-object metrics and allows one to explicitly query only the things that are relevant - `/metrics/detailed`. By default it doesn't return anything at all, but it's possible to specify required metric groups and virtual host filters in the GET-parameters. Scraping `/metrics/detailed?vhost=vhost-1&vhost=vhost-2&family=queue_coarse_metrics&family=queue_consumer_count`. will only return requested metrics (and not, for example, channel metrics that include erlang PID in labels). +That's why there is an additional endpoint that always return per-object metrics and allows one to explicitly query only the things that are relevant - `/metrics/detailed`. By default it doesn't return anything at all, but it's possible to specify required metric groups and virtual host filters in the GET-parameters. Scraping `/metrics/detailed?vhost=vhost-1&vhost=vhost-2&family=queue_coarse_metrics&family=queue_consumer_count`. will only return requested metrics (and not, for example, channel metrics that include erlang PID in labels). This endpoint supports the following parameters: @@ -122,4 +122,4 @@ all app app-c_src apps-ct asciidoc ## Copyright -(c) 2007-2020 VMware, Inc. or its affiliates. +(c) 2007-2024 Broadcom. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. diff --git a/deps/rabbitmq_prometheus/app.bzl b/deps/rabbitmq_prometheus/app.bzl index 2370b1afa9db..a77dcbb9bb09 100644 --- a/deps/rabbitmq_prometheus/app.bzl +++ b/deps/rabbitmq_prometheus/app.bzl @@ -12,6 +12,7 @@ def all_beam_files(name = "all_beam_files"): "src/collectors/prometheus_process_collector.erl", "src/collectors/prometheus_rabbitmq_alarm_metrics_collector.erl", "src/collectors/prometheus_rabbitmq_core_metrics_collector.erl", + "src/collectors/prometheus_rabbitmq_dynamic_collector.erl", "src/collectors/prometheus_rabbitmq_global_metrics_collector.erl", "src/rabbit_prometheus_app.erl", "src/rabbit_prometheus_dispatcher.erl", @@ -22,7 +23,6 @@ def all_beam_files(name = "all_beam_files"): dest = "ebin", erlc_opts = "//:erlc_opts", deps = [ - "//deps/amqp_client:erlang_app", "//deps/rabbit_common:erlang_app", "//deps/rabbitmq_web_dispatch:erlang_app", "@prometheus//:erlang_app", @@ -42,6 +42,7 @@ def all_test_beam_files(name = "all_test_beam_files"): "src/collectors/prometheus_process_collector.erl", "src/collectors/prometheus_rabbitmq_alarm_metrics_collector.erl", "src/collectors/prometheus_rabbitmq_core_metrics_collector.erl", + "src/collectors/prometheus_rabbitmq_dynamic_collector.erl", "src/collectors/prometheus_rabbitmq_global_metrics_collector.erl", "src/rabbit_prometheus_app.erl", "src/rabbit_prometheus_dispatcher.erl", @@ -52,7 +53,6 @@ def all_test_beam_files(name = "all_test_beam_files"): dest = "test", erlc_opts = "//:test_erlc_opts", deps = [ - "//deps/amqp_client:erlang_app", "//deps/rabbit_common:erlang_app", "//deps/rabbitmq_web_dispatch:erlang_app", "@prometheus//:erlang_app", @@ -83,6 +83,7 @@ def all_srcs(name = "all_srcs"): "src/collectors/prometheus_process_collector.erl", "src/collectors/prometheus_rabbitmq_alarm_metrics_collector.erl", "src/collectors/prometheus_rabbitmq_core_metrics_collector.erl", + "src/collectors/prometheus_rabbitmq_dynamic_collector.erl", "src/collectors/prometheus_rabbitmq_global_metrics_collector.erl", "src/rabbit_prometheus_app.erl", "src/rabbit_prometheus_dispatcher.erl", @@ -121,3 +122,12 @@ def test_suite_beam_files(name = "test_suite_beam_files"): "//deps/rabbitmq_ct_helpers:erlang_app", ], ) + + erlang_bytecode( + name = "rabbitmq_prometheus_collector_test_proxy_beam_files", + testonly = True, + srcs = ["test/rabbitmq_prometheus_collector_test_proxy.erl"], + outs = ["test/rabbitmq_prometheus_collector_test_proxy.beam"], + app_name = "rabbitmq_prometheus", + erlc_opts = "//:test_erlc_opts", + ) diff --git a/deps/rabbitmq_prometheus/docker/docker-entrypoint.sh b/deps/rabbitmq_prometheus/docker/docker-entrypoint.sh index 08cd3bcf12e5..b5994f87a73a 100755 --- a/deps/rabbitmq_prometheus/docker/docker-entrypoint.sh +++ b/deps/rabbitmq_prometheus/docker/docker-entrypoint.sh @@ -395,7 +395,7 @@ if [ "$haveSslConfig" ] && [ -f "$combinedSsl" ]; then # More ENV vars for make clustering happiness # we don't handle clustering in this script, but these args should ensure # clustered SSL-enabled members will talk nicely - export ERL_SSL_PATH="$(erl -eval 'io:format("~p", [code:lib_dir(ssl, ebin)]),halt().' -noshell)" + export ERL_SSL_PATH="$(erl -eval 'io:format("~p", [filename:join(code:lib_dir(ssl), "ebin")]),halt().' -noshell)" sslErlArgs="-pa $ERL_SSL_PATH -proto_dist inet_tls -ssl_dist_opt server_certfile $combinedSsl -ssl_dist_opt server_secure_renegotiate true client_secure_renegotiate true" export RABBITMQ_SERVER_ADDITIONAL_ERL_ARGS="${RABBITMQ_SERVER_ADDITIONAL_ERL_ARGS:-} $sslErlArgs" export RABBITMQ_CTL_ERL_ARGS="${RABBITMQ_CTL_ERL_ARGS:-} $sslErlArgs" diff --git a/deps/rabbitmq_prometheus/docker/grafana/dashboards/Erlang-Distribution.json b/deps/rabbitmq_prometheus/docker/grafana/dashboards/Erlang-Distribution.json index cefedbcaaf51..c4ab9f2e92a1 100644 --- a/deps/rabbitmq_prometheus/docker/grafana/dashboards/Erlang-Distribution.json +++ b/deps/rabbitmq_prometheus/docker/grafana/dashboards/Erlang-Distribution.json @@ -134,7 +134,7 @@ "pluginVersion": "8.3.4", "targets": [ { - "expr": "count(erlang_vm_dist_node_state * on(instance) group_left(rabbitmq_cluster) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\"}) OR vector(0)", + "expr": "count(erlang_vm_dist_node_state * on(instance, job) group_left(rabbitmq_cluster) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\"}) OR vector(0)", "format": "time_series", "interval": "", "intervalFactor": 1, @@ -214,7 +214,7 @@ "pluginVersion": "8.3.4", "targets": [ { - "expr": "count(erlang_vm_dist_node_state * on(instance) group_left(rabbitmq_cluster) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\"} == 3) OR vector(0)", + "expr": "count(erlang_vm_dist_node_state * on(instance, job) group_left(rabbitmq_cluster) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\"} == 3) OR vector(0)", "format": "time_series", "interval": "", "intervalFactor": 1, @@ -294,7 +294,7 @@ "pluginVersion": "8.3.4", "targets": [ { - "expr": "count(erlang_vm_dist_node_state * on(instance) group_left(rabbitmq_cluster) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\"} == 1) OR vector(0)", + "expr": "count(erlang_vm_dist_node_state * on(instance, job) group_left(rabbitmq_cluster) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\"} == 1) OR vector(0)", "format": "time_series", "interval": "", "intervalFactor": 1, @@ -374,7 +374,7 @@ "pluginVersion": "8.3.4", "targets": [ { - "expr": "count(erlang_vm_dist_node_state * on(instance) group_left(rabbitmq_cluster) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\"} == 2) OR vector(0)", + "expr": "count(erlang_vm_dist_node_state * on(instance, job) group_left(rabbitmq_cluster) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\"} == 2) OR vector(0)", "format": "time_series", "interval": "", "intervalFactor": 1, @@ -466,7 +466,7 @@ "displayAliasType": "Warning / Critical", "displayType": "Regular", "displayValueWithAlias": "Never", - "expr": "erlang_vm_dist_node_state * on(instance) group_left(rabbitmq_node, rabbitmq_cluster) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\"}", + "expr": "erlang_vm_dist_node_state * on(instance, job) group_left(rabbitmq_node, rabbitmq_cluster) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\"}", "format": "time_series", "intervalFactor": 1, "legendFormat": " {{rabbitmq_node}} -> {{peer}}", @@ -738,7 +738,7 @@ "pluginVersion": "8.3.4", "targets": [ { - "expr": "erlang_vm_dist_node_queue_size_bytes * on(instance) group_left(rabbitmq_node, rabbitmq_cluster) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\"}", + "expr": "erlang_vm_dist_node_queue_size_bytes * on(instance, job) group_left(rabbitmq_node, rabbitmq_cluster) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\"}", "format": "time_series", "interval": "", "intervalFactor": 1, @@ -1002,7 +1002,7 @@ "pluginVersion": "8.3.4", "targets": [ { - "expr": "rate(erlang_vm_dist_send_bytes[60s]) * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\"}", + "expr": "rate(erlang_vm_dist_send_bytes[60s]) * on(instance, job) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\"}", "format": "time_series", "intervalFactor": 1, "legendFormat": "{{rabbitmq_node}} -> {{peer}}", @@ -1248,7 +1248,7 @@ "pluginVersion": "8.3.4", "targets": [ { - "expr": "rate(erlang_vm_dist_recv_bytes[60s]) * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\"}", + "expr": "rate(erlang_vm_dist_recv_bytes[60s]) * on(instance, job) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\"}", "format": "time_series", "intervalFactor": 1, "legendFormat": "{{rabbitmq_node}} <- {{peer}}", @@ -1494,7 +1494,7 @@ "pluginVersion": "8.3.4", "targets": [ { - "expr": "rate(erlang_vm_dist_send_cnt[60s]) * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\"}", + "expr": "rate(erlang_vm_dist_send_cnt[60s]) * on(instance, job) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\"}", "format": "time_series", "intervalFactor": 1, "legendFormat": "{{rabbitmq_node}} -> {{peer}}", @@ -1740,7 +1740,7 @@ "pluginVersion": "8.3.4", "targets": [ { - "expr": "rate(erlang_vm_dist_recv_cnt[60s]) * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\"}", + "expr": "rate(erlang_vm_dist_recv_cnt[60s]) * on(instance, job) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\"}", "format": "time_series", "intervalFactor": 1, "legendFormat": "{{rabbitmq_node}} <- {{peer}}", @@ -1986,7 +1986,7 @@ "pluginVersion": "8.3.4", "targets": [ { - "expr": "(rate(erlang_vm_dist_send_bytes[60s]) * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\"}) / \n(rate(erlang_vm_dist_send_cnt[60s]) * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\"})", + "expr": "(rate(erlang_vm_dist_send_bytes[60s]) * on(instance, job) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\"}) / \n(rate(erlang_vm_dist_send_cnt[60s]) * on(instance, job) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\"})", "format": "time_series", "intervalFactor": 1, "legendFormat": "{{rabbitmq_node}} -> {{peer}}", @@ -2232,7 +2232,7 @@ "pluginVersion": "8.3.4", "targets": [ { - "expr": "(rate(erlang_vm_dist_recv_bytes[60s]) * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\"}) / \n(rate(erlang_vm_dist_recv_cnt[60s]) * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\"})", + "expr": "(rate(erlang_vm_dist_recv_bytes[60s]) * on(instance, job) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\"}) / \n(rate(erlang_vm_dist_recv_cnt[60s]) * on(instance, job) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\"})", "format": "time_series", "intervalFactor": 1, "legendFormat": "{{rabbitmq_node}} <- {{peer}}", @@ -2495,7 +2495,7 @@ "pluginVersion": "8.3.4", "targets": [ { - "expr": "erlang_vm_dist_port_memory_bytes * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\"}", + "expr": "erlang_vm_dist_port_memory_bytes * on(instance, job) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\"}", "format": "time_series", "intervalFactor": 1, "legendFormat": "{{rabbitmq_node}} -> {{peer}}", @@ -2606,7 +2606,7 @@ "pluginVersion": "8.3.4", "targets": [ { - "expr": "erlang_vm_dist_port_queue_size_bytes * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\"}", + "expr": "erlang_vm_dist_port_queue_size_bytes * on(instance, job) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\"}", "format": "time_series", "intervalFactor": 1, "legendFormat": "{{rabbitmq_node}} -> {{peer}}", @@ -2711,7 +2711,7 @@ "displayAliasType": "Warning / Critical", "displayType": "Regular", "displayValueWithAlias": "Never", - "expr": "erlang_vm_dist_proc_status{type=\"$erlang_vm_dist_proc_type\"} * on(instance) group_left(rabbitmq_node, rabbitmq_cluster) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\"} ", + "expr": "erlang_vm_dist_proc_status{type=\"$erlang_vm_dist_proc_type\"} * on(instance, job) group_left(rabbitmq_node, rabbitmq_cluster) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\"} ", "format": "time_series", "intervalFactor": 1, "legendFormat": " {{rabbitmq_node}} -> {{peer}}", @@ -2848,7 +2848,7 @@ "pluginVersion": "8.3.4", "targets": [ { - "expr": "erlang_vm_dist_proc_message_queue_len{type=\"$erlang_vm_dist_proc_type\"} * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\"}", + "expr": "erlang_vm_dist_proc_message_queue_len{type=\"$erlang_vm_dist_proc_type\"} * on(instance, job) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\"}", "format": "time_series", "intervalFactor": 1, "legendFormat": "{{rabbitmq_node}} -> {{peer}}", @@ -3094,7 +3094,7 @@ "pluginVersion": "8.3.4", "targets": [ { - "expr": "erlang_vm_dist_proc_memory_bytes{type=\"$erlang_vm_dist_proc_type\"} * on(instance) group_left(rabbitmq_node, rabbitmq_cluster) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\"}", + "expr": "erlang_vm_dist_proc_memory_bytes{type=\"$erlang_vm_dist_proc_type\"} * on(instance, job) group_left(rabbitmq_node, rabbitmq_cluster) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\"}", "format": "time_series", "intervalFactor": 1, "legendFormat": "{{rabbitmq_node}} -> {{peer}}", @@ -3340,7 +3340,7 @@ "pluginVersion": "8.3.4", "targets": [ { - "expr": "rate(erlang_vm_dist_proc_reductions{type=\"$erlang_vm_dist_proc_type\"}[60s]) * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\"}", + "expr": "rate(erlang_vm_dist_proc_reductions{type=\"$erlang_vm_dist_proc_type\"}[60s]) * on(instance, job) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\"}", "format": "time_series", "intervalFactor": 1, "legendFormat": "{{rabbitmq_node}} -> {{peer}}", @@ -3487,4 +3487,4 @@ "uid": "d-SFCCmZz", "version": 20220805, "weekStart": "" -} \ No newline at end of file +} diff --git a/deps/rabbitmq_prometheus/docker/grafana/dashboards/Erlang-Distributions-Compare.json b/deps/rabbitmq_prometheus/docker/grafana/dashboards/Erlang-Distributions-Compare.json index 72829c8a6ce8..ab7d548d0e06 100644 --- a/deps/rabbitmq_prometheus/docker/grafana/dashboards/Erlang-Distributions-Compare.json +++ b/deps/rabbitmq_prometheus/docker/grafana/dashboards/Erlang-Distributions-Compare.json @@ -160,7 +160,7 @@ "pluginVersion": "8.3.4", "targets": [ { - "expr": "rate(erlang_vm_dist_send_bytes[60s]) * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=~\"$rabbitmq_cluster\", namespace=\"$namespace\"}", + "expr": "rate(erlang_vm_dist_send_bytes[60s]) * on(instance, job) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=~\"$rabbitmq_cluster\", namespace=\"$namespace\"}", "legendFormat": "{{rabbitmq_node}} -> {{peer}}", "refId": "A" } @@ -415,7 +415,7 @@ "pluginVersion": "8.3.4", "targets": [ { - "expr": "rate(erlang_vm_dist_send_bytes[60s]) * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=~\"$rabbitmq_cluster\", namespace=\"$namespace\"}", + "expr": "rate(erlang_vm_dist_send_bytes[60s]) * on(instance, job) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=~\"$rabbitmq_cluster\", namespace=\"$namespace\"}", "format": "time_series", "intervalFactor": 1, "legendFormat": "{{rabbitmq_node}} -> {{peer}}", @@ -1955,4 +1955,4 @@ "uid": "C0jeDstZk", "version": 20220805, "weekStart": "" -} \ No newline at end of file +} diff --git a/deps/rabbitmq_prometheus/docker/grafana/dashboards/Erlang-Memory-Allocators.json b/deps/rabbitmq_prometheus/docker/grafana/dashboards/Erlang-Memory-Allocators.json index 8b434c5c6772..0e7e06218d65 100644 --- a/deps/rabbitmq_prometheus/docker/grafana/dashboards/Erlang-Memory-Allocators.json +++ b/deps/rabbitmq_prometheus/docker/grafana/dashboards/Erlang-Memory-Allocators.json @@ -140,7 +140,7 @@ "repeatDirection": "v", "targets": [ { - "expr": "sum (erlang_vm_allocators{usage=\"blocks_size\"} * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\", rabbitmq_node=\"$rabbitmq_node\"})\n/\nsum (erlang_vm_allocators{usage=\"carriers_size\"} * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\", rabbitmq_node=\"$rabbitmq_node\"})\n* 100", + "expr": "sum (erlang_vm_allocators{usage=\"blocks_size\"} * on(instance, job) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\", rabbitmq_node=\"$rabbitmq_node\"})\n/\nsum (erlang_vm_allocators{usage=\"carriers_size\"} * on(instance, job) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\", rabbitmq_node=\"$rabbitmq_node\"})\n* 100", "format": "time_series", "intervalFactor": 1, "refId": "A" @@ -219,7 +219,7 @@ "repeatDirection": "v", "targets": [ { - "expr": "(\n sum (erlang_vm_allocators{usage=\"carriers_size\"} * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\", rabbitmq_node=\"$rabbitmq_node\"})\n -\n sum (erlang_vm_allocators{usage=\"blocks_size\"} * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\", rabbitmq_node=\"$rabbitmq_node\"})\n) / sum (erlang_vm_allocators{usage=\"carriers_size\"} * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\", rabbitmq_node=\"$rabbitmq_node\"})\n* 100", + "expr": "(\n sum (erlang_vm_allocators{usage=\"carriers_size\"} * on(instance, job) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\", rabbitmq_node=\"$rabbitmq_node\"})\n -\n sum (erlang_vm_allocators{usage=\"blocks_size\"} * on(instance, job) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\", rabbitmq_node=\"$rabbitmq_node\"})\n) / sum (erlang_vm_allocators{usage=\"carriers_size\"} * on(instance, job) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\", rabbitmq_node=\"$rabbitmq_node\"})\n* 100", "format": "time_series", "intervalFactor": 1, "refId": "A" @@ -296,7 +296,7 @@ "repeatDirection": "v", "targets": [ { - "expr": "sum (erlang_vm_allocators{usage=\"blocks_size\"} * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\", rabbitmq_node=\"$rabbitmq_node\"})", + "expr": "sum (erlang_vm_allocators{usage=\"blocks_size\"} * on(instance, job) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\", rabbitmq_node=\"$rabbitmq_node\"})", "format": "time_series", "intervalFactor": 1, "refId": "A" @@ -373,7 +373,7 @@ "repeatDirection": "v", "targets": [ { - "expr": "sum (erlang_vm_allocators{usage=\"carriers_size\"} * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\", rabbitmq_node=\"$rabbitmq_node\"})\n-\nsum (erlang_vm_allocators{usage=\"blocks_size\"} * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\", rabbitmq_node=\"$rabbitmq_node\"})", + "expr": "sum (erlang_vm_allocators{usage=\"carriers_size\"} * on(instance, job) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\", rabbitmq_node=\"$rabbitmq_node\"})\n-\nsum (erlang_vm_allocators{usage=\"blocks_size\"} * on(instance, job) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\", rabbitmq_node=\"$rabbitmq_node\"})", "format": "time_series", "intervalFactor": 1, "refId": "A" @@ -450,7 +450,7 @@ "repeatDirection": "v", "targets": [ { - "expr": "sum (erlang_vm_allocators{usage=\"carriers_size\"} * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\", rabbitmq_node=\"$rabbitmq_node\"})", + "expr": "sum (erlang_vm_allocators{usage=\"carriers_size\"} * on(instance, job) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\", rabbitmq_node=\"$rabbitmq_node\"})", "format": "time_series", "intervalFactor": 1, "refId": "A" @@ -527,7 +527,7 @@ "repeatDirection": "v", "targets": [ { - "expr": "sum (rabbitmq_process_resident_memory_bytes * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\", rabbitmq_node=\"$rabbitmq_node\"})", + "expr": "sum (rabbitmq_process_resident_memory_bytes * on(instance, job) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\", rabbitmq_node=\"$rabbitmq_node\"})", "format": "time_series", "intervalFactor": 1, "refId": "A" @@ -606,17 +606,17 @@ "pluginVersion": "8.3.4", "targets": [ { - "expr": "rabbitmq_process_resident_memory_bytes * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\", rabbitmq_node=\"$rabbitmq_node\"}", + "expr": "rabbitmq_process_resident_memory_bytes * on(instance, job) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\", rabbitmq_node=\"$rabbitmq_node\"}", "legendFormat": "Resident Set Size", "refId": "A" }, { - "expr": "sum (erlang_vm_allocators{usage=\"blocks_size\"} * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\", rabbitmq_node=\"$rabbitmq_node\"})", + "expr": "sum (erlang_vm_allocators{usage=\"blocks_size\"} * on(instance, job) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\", rabbitmq_node=\"$rabbitmq_node\"})", "legendFormat": "Allocated Used", "refId": "B" }, { - "expr": "sum (erlang_vm_allocators{usage=\"carriers_size\"} * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\", rabbitmq_node=\"$rabbitmq_node\"})\n-\nsum (erlang_vm_allocators{usage=\"blocks_size\"} * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\", rabbitmq_node=\"$rabbitmq_node\"})", + "expr": "sum (erlang_vm_allocators{usage=\"carriers_size\"} * on(instance, job) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\", rabbitmq_node=\"$rabbitmq_node\"})\n-\nsum (erlang_vm_allocators{usage=\"blocks_size\"} * on(instance, job) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\", rabbitmq_node=\"$rabbitmq_node\"})", "legendFormat": "Allocated Unused", "refId": "C" } @@ -771,7 +771,7 @@ "repeatDirection": "v", "targets": [ { - "expr": "rabbitmq_process_resident_memory_bytes * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\", rabbitmq_node=\"$rabbitmq_node\"}", + "expr": "rabbitmq_process_resident_memory_bytes * on(instance, job) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\", rabbitmq_node=\"$rabbitmq_node\"}", "format": "time_series", "instant": false, "intervalFactor": 1, @@ -779,12 +779,12 @@ "refId": "A" }, { - "expr": "sum (erlang_vm_allocators{usage=\"blocks_size\"} * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\", rabbitmq_node=\"$rabbitmq_node\"})", + "expr": "sum (erlang_vm_allocators{usage=\"blocks_size\"} * on(instance, job) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\", rabbitmq_node=\"$rabbitmq_node\"})", "legendFormat": "Allocated Used", "refId": "B" }, { - "expr": "sum (erlang_vm_allocators{usage=\"carriers_size\"} * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\", rabbitmq_node=\"$rabbitmq_node\"})\n-\nsum (erlang_vm_allocators{usage=\"blocks_size\"} * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\", rabbitmq_node=\"$rabbitmq_node\"})", + "expr": "sum (erlang_vm_allocators{usage=\"carriers_size\"} * on(instance, job) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\", rabbitmq_node=\"$rabbitmq_node\"})\n-\nsum (erlang_vm_allocators{usage=\"blocks_size\"} * on(instance, job) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\", rabbitmq_node=\"$rabbitmq_node\"})", "legendFormat": "Allocated Unused", "refId": "C" } @@ -883,7 +883,7 @@ "pluginVersion": "8.3.4", "targets": [ { - "expr": "sum by(alloc) (erlang_vm_allocators{usage=\"carriers_size\"} * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\", rabbitmq_node=\"$rabbitmq_node\"})", + "expr": "sum by(alloc) (erlang_vm_allocators{usage=\"carriers_size\"} * on(instance, job) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\", rabbitmq_node=\"$rabbitmq_node\"})", "legendFormat": "{{alloc}}", "refId": "A" } @@ -985,7 +985,7 @@ "repeatDirection": "v", "targets": [ { - "expr": "sum by(alloc) (erlang_vm_allocators{usage=\"carriers_size\"} * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\", rabbitmq_node=\"$rabbitmq_node\"})", + "expr": "sum by(alloc) (erlang_vm_allocators{usage=\"carriers_size\"} * on(instance, job) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\", rabbitmq_node=\"$rabbitmq_node\"})", "format": "time_series", "instant": false, "intervalFactor": 1, @@ -1082,7 +1082,7 @@ "pluginVersion": "8.3.4", "targets": [ { - "expr": "sum (erlang_vm_allocators{alloc=~\"$memory_allocator\", usage=\"blocks_size\", kind=\"mbcs\"} * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\", rabbitmq_node=\"$rabbitmq_node\"})\n/\nsum (erlang_vm_allocators{alloc=~\"$memory_allocator\", usage=\"carriers_size\", kind=\"mbcs\"} * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\", rabbitmq_node=\"$rabbitmq_node\"})\n* 100", + "expr": "sum (erlang_vm_allocators{alloc=~\"$memory_allocator\", usage=\"blocks_size\", kind=\"mbcs\"} * on(instance, job) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\", rabbitmq_node=\"$rabbitmq_node\"})\n/\nsum (erlang_vm_allocators{alloc=~\"$memory_allocator\", usage=\"carriers_size\", kind=\"mbcs\"} * on(instance, job) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\", rabbitmq_node=\"$rabbitmq_node\"})\n* 100", "format": "time_series", "instant": false, "intervalFactor": 1, @@ -1158,7 +1158,7 @@ "pluginVersion": "8.3.4", "targets": [ { - "expr": "sum (erlang_vm_allocators{kind=\"mbcs\", alloc=\"$memory_allocator\", usage=\"carriers_size\"} * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\", rabbitmq_node=\"$rabbitmq_node\"})\n/\nsum (erlang_vm_allocators{kind=\"mbcs\", alloc=\"$memory_allocator\", usage=\"carriers\"} * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\", rabbitmq_node=\"$rabbitmq_node\"})", + "expr": "sum (erlang_vm_allocators{kind=\"mbcs\", alloc=\"$memory_allocator\", usage=\"carriers_size\"} * on(instance, job) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\", rabbitmq_node=\"$rabbitmq_node\"})\n/\nsum (erlang_vm_allocators{kind=\"mbcs\", alloc=\"$memory_allocator\", usage=\"carriers\"} * on(instance, job) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\", rabbitmq_node=\"$rabbitmq_node\"})", "format": "time_series", "instant": false, "intervalFactor": 1, @@ -1234,7 +1234,7 @@ "pluginVersion": "8.3.4", "targets": [ { - "expr": "sum (erlang_vm_allocators{kind=\"mbcs\", alloc=\"$memory_allocator\", usage=\"blocks_size\"} * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\", rabbitmq_node=\"$rabbitmq_node\"})\n/\nsum (erlang_vm_allocators{kind=\"mbcs\", alloc=\"$memory_allocator\", usage=\"blocks\"} * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\", rabbitmq_node=\"$rabbitmq_node\"})", + "expr": "sum (erlang_vm_allocators{kind=\"mbcs\", alloc=\"$memory_allocator\", usage=\"blocks_size\"} * on(instance, job) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\", rabbitmq_node=\"$rabbitmq_node\"})\n/\nsum (erlang_vm_allocators{kind=\"mbcs\", alloc=\"$memory_allocator\", usage=\"blocks\"} * on(instance, job) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\", rabbitmq_node=\"$rabbitmq_node\"})", "format": "time_series", "instant": false, "intervalFactor": 1, @@ -1313,7 +1313,7 @@ "pluginVersion": "8.3.4", "targets": [ { - "expr": "sum (erlang_vm_allocators{alloc=~\"$memory_allocator\", usage=\"blocks_size\", kind=\"mbcs_pool\"} * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\", rabbitmq_node=\"$rabbitmq_node\"})\n/\nsum (erlang_vm_allocators{alloc=~\"$memory_allocator\", usage=\"carriers_size\", kind=\"mbcs_pool\"} * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\", rabbitmq_node=\"$rabbitmq_node\"})\n* 100", + "expr": "sum (erlang_vm_allocators{alloc=~\"$memory_allocator\", usage=\"blocks_size\", kind=\"mbcs_pool\"} * on(instance, job) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\", rabbitmq_node=\"$rabbitmq_node\"})\n/\nsum (erlang_vm_allocators{alloc=~\"$memory_allocator\", usage=\"carriers_size\", kind=\"mbcs_pool\"} * on(instance, job) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\", rabbitmq_node=\"$rabbitmq_node\"})\n* 100", "format": "time_series", "instant": false, "intervalFactor": 1, @@ -1389,7 +1389,7 @@ "pluginVersion": "8.3.4", "targets": [ { - "expr": "sum (erlang_vm_allocators{kind=\"mbcs_pool\", alloc=\"$memory_allocator\", usage=\"carriers_size\"} * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\", rabbitmq_node=\"$rabbitmq_node\"})\n/\nsum (erlang_vm_allocators{kind=\"mbcs_pool\", alloc=\"$memory_allocator\", usage=\"carriers\"} * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\", rabbitmq_node=\"$rabbitmq_node\"})", + "expr": "sum (erlang_vm_allocators{kind=\"mbcs_pool\", alloc=\"$memory_allocator\", usage=\"carriers_size\"} * on(instance, job) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\", rabbitmq_node=\"$rabbitmq_node\"})\n/\nsum (erlang_vm_allocators{kind=\"mbcs_pool\", alloc=\"$memory_allocator\", usage=\"carriers\"} * on(instance, job) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\", rabbitmq_node=\"$rabbitmq_node\"})", "format": "time_series", "instant": false, "intervalFactor": 1, @@ -1465,7 +1465,7 @@ "pluginVersion": "8.3.4", "targets": [ { - "expr": "sum (erlang_vm_allocators{kind=\"mbcs_pool\", alloc=\"$memory_allocator\", usage=\"blocks_size\"} * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\", rabbitmq_node=\"$rabbitmq_node\"})\n/\nsum (erlang_vm_allocators{kind=\"mbcs_pool\", alloc=\"$memory_allocator\", usage=\"blocks\"} * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\", rabbitmq_node=\"$rabbitmq_node\"})", + "expr": "sum (erlang_vm_allocators{kind=\"mbcs_pool\", alloc=\"$memory_allocator\", usage=\"blocks_size\"} * on(instance, job) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\", rabbitmq_node=\"$rabbitmq_node\"})\n/\nsum (erlang_vm_allocators{kind=\"mbcs_pool\", alloc=\"$memory_allocator\", usage=\"blocks\"} * on(instance, job) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\", rabbitmq_node=\"$rabbitmq_node\"})", "format": "time_series", "instant": false, "intervalFactor": 1, @@ -1544,7 +1544,7 @@ "pluginVersion": "8.3.4", "targets": [ { - "expr": "sum (erlang_vm_allocators{alloc=~\"$memory_allocator\", usage=\"blocks_size\", kind=\"sbcs\"} * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\", rabbitmq_node=\"$rabbitmq_node\"})\n/\nsum (erlang_vm_allocators{alloc=~\"$memory_allocator\", usage=\"carriers_size\", kind=\"sbcs\"} * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\", rabbitmq_node=\"$rabbitmq_node\"})\n* 100", + "expr": "sum (erlang_vm_allocators{alloc=~\"$memory_allocator\", usage=\"blocks_size\", kind=\"sbcs\"} * on(instance, job) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\", rabbitmq_node=\"$rabbitmq_node\"})\n/\nsum (erlang_vm_allocators{alloc=~\"$memory_allocator\", usage=\"carriers_size\", kind=\"sbcs\"} * on(instance, job) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\", rabbitmq_node=\"$rabbitmq_node\"})\n* 100", "format": "time_series", "instant": false, "intervalFactor": 1, @@ -1620,7 +1620,7 @@ "pluginVersion": "8.3.4", "targets": [ { - "expr": "sum (erlang_vm_allocators{kind=\"sbcs\", alloc=\"$memory_allocator\", usage=\"carriers_size\"} * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\", rabbitmq_node=\"$rabbitmq_node\"})\n/\nsum (erlang_vm_allocators{kind=\"sbcs\", alloc=\"$memory_allocator\", usage=\"carriers\"} * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\", rabbitmq_node=\"$rabbitmq_node\"})", + "expr": "sum (erlang_vm_allocators{kind=\"sbcs\", alloc=\"$memory_allocator\", usage=\"carriers_size\"} * on(instance, job) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\", rabbitmq_node=\"$rabbitmq_node\"})\n/\nsum (erlang_vm_allocators{kind=\"sbcs\", alloc=\"$memory_allocator\", usage=\"carriers\"} * on(instance, job) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\", rabbitmq_node=\"$rabbitmq_node\"})", "format": "time_series", "instant": false, "intervalFactor": 1, @@ -1696,7 +1696,7 @@ "pluginVersion": "8.3.4", "targets": [ { - "expr": "sum (erlang_vm_allocators{kind=\"sbcs\", alloc=\"$memory_allocator\", usage=\"blocks_size\"} * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\", rabbitmq_node=\"$rabbitmq_node\"})\n/\nsum (erlang_vm_allocators{kind=\"sbcs\", alloc=\"$memory_allocator\", usage=\"blocks\"} * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\", rabbitmq_node=\"$rabbitmq_node\"})", + "expr": "sum (erlang_vm_allocators{kind=\"sbcs\", alloc=\"$memory_allocator\", usage=\"blocks_size\"} * on(instance, job) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\", rabbitmq_node=\"$rabbitmq_node\"})\n/\nsum (erlang_vm_allocators{kind=\"sbcs\", alloc=\"$memory_allocator\", usage=\"blocks\"} * on(instance, job) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\", rabbitmq_node=\"$rabbitmq_node\"})", "format": "time_series", "instant": false, "intervalFactor": 1, @@ -1777,32 +1777,32 @@ "pluginVersion": "8.3.4", "targets": [ { - "expr": "sum (erlang_vm_allocators{alloc=~\"$memory_allocator\", usage=\"blocks_size\", kind=\"mbcs\"} * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\", rabbitmq_node=\"$rabbitmq_node\"})", + "expr": "sum (erlang_vm_allocators{alloc=~\"$memory_allocator\", usage=\"blocks_size\", kind=\"mbcs\"} * on(instance, job) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\", rabbitmq_node=\"$rabbitmq_node\"})", "legendFormat": "Multiblock - Used", "refId": "A" }, { - "expr": "sum (erlang_vm_allocators{alloc=~\"$memory_allocator\", usage=\"carriers_size\", kind=\"mbcs\"} * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\", rabbitmq_node=\"$rabbitmq_node\"})\n-\nsum (erlang_vm_allocators{alloc=~\"$memory_allocator\", usage=\"blocks_size\", kind=\"mbcs\"} * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\", rabbitmq_node=\"$rabbitmq_node\"})", + "expr": "sum (erlang_vm_allocators{alloc=~\"$memory_allocator\", usage=\"carriers_size\", kind=\"mbcs\"} * on(instance, job) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\", rabbitmq_node=\"$rabbitmq_node\"})\n-\nsum (erlang_vm_allocators{alloc=~\"$memory_allocator\", usage=\"blocks_size\", kind=\"mbcs\"} * on(instance, job) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\", rabbitmq_node=\"$rabbitmq_node\"})", "legendFormat": "Multiblock - Unused", "refId": "B" }, { - "expr": "sum (erlang_vm_allocators{alloc=~\"$memory_allocator\", usage=\"blocks_size\", kind=\"mbcs_pool\"} * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\", rabbitmq_node=\"$rabbitmq_node\"})", + "expr": "sum (erlang_vm_allocators{alloc=~\"$memory_allocator\", usage=\"blocks_size\", kind=\"mbcs_pool\"} * on(instance, job) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\", rabbitmq_node=\"$rabbitmq_node\"})", "legendFormat": "Multiblock Pool - Used", "refId": "C" }, { - "expr": "sum (erlang_vm_allocators{alloc=~\"$memory_allocator\", usage=\"carriers_size\", kind=\"mbcs_pool\"} * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\", rabbitmq_node=\"$rabbitmq_node\"})\n-\nsum (erlang_vm_allocators{alloc=~\"$memory_allocator\", usage=\"blocks_size\", kind=\"mbcs_pool\"} * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\", rabbitmq_node=\"$rabbitmq_node\"})", + "expr": "sum (erlang_vm_allocators{alloc=~\"$memory_allocator\", usage=\"carriers_size\", kind=\"mbcs_pool\"} * on(instance, job) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\", rabbitmq_node=\"$rabbitmq_node\"})\n-\nsum (erlang_vm_allocators{alloc=~\"$memory_allocator\", usage=\"blocks_size\", kind=\"mbcs_pool\"} * on(instance, job) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\", rabbitmq_node=\"$rabbitmq_node\"})", "legendFormat": "Multiblock Pool - Unused", "refId": "D" }, { - "expr": "sum (erlang_vm_allocators{alloc=~\"$memory_allocator\", usage=\"blocks_size\", kind=\"sbcs\"} * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\", rabbitmq_node=\"$rabbitmq_node\"})", + "expr": "sum (erlang_vm_allocators{alloc=~\"$memory_allocator\", usage=\"blocks_size\", kind=\"sbcs\"} * on(instance, job) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\", rabbitmq_node=\"$rabbitmq_node\"})", "legendFormat": "Singleblock - Used", "refId": "E" }, { - "expr": "sum (erlang_vm_allocators{alloc=~\"$memory_allocator\", usage=\"carriers_size\", kind=\"sbcs\"} * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\", rabbitmq_node=\"$rabbitmq_node\"})\n-\nsum (erlang_vm_allocators{alloc=~\"$memory_allocator\", usage=\"blocks_size\", kind=\"sbcs\"} * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\", rabbitmq_node=\"$rabbitmq_node\"})", + "expr": "sum (erlang_vm_allocators{alloc=~\"$memory_allocator\", usage=\"carriers_size\", kind=\"sbcs\"} * on(instance, job) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\", rabbitmq_node=\"$rabbitmq_node\"})\n-\nsum (erlang_vm_allocators{alloc=~\"$memory_allocator\", usage=\"blocks_size\", kind=\"sbcs\"} * on(instance, job) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\", rabbitmq_node=\"$rabbitmq_node\"})", "legendFormat": "Singleblock - Unused", "refId": "F" } @@ -1995,34 +1995,34 @@ "pluginVersion": "8.3.4", "targets": [ { - "expr": "sum (erlang_vm_allocators{alloc=~\"$memory_allocator\", usage=\"blocks_size\", kind=\"mbcs\"} * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\", rabbitmq_node=\"$rabbitmq_node\"})", + "expr": "sum (erlang_vm_allocators{alloc=~\"$memory_allocator\", usage=\"blocks_size\", kind=\"mbcs\"} * on(instance, job) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\", rabbitmq_node=\"$rabbitmq_node\"})", "format": "time_series", "intervalFactor": 1, "legendFormat": "Multiblock - Used", "refId": "A" }, { - "expr": "sum (erlang_vm_allocators{alloc=~\"$memory_allocator\", usage=\"carriers_size\", kind=\"mbcs\"} * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\", rabbitmq_node=\"$rabbitmq_node\"})\n-\nsum (erlang_vm_allocators{alloc=~\"$memory_allocator\", usage=\"blocks_size\", kind=\"mbcs\"} * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\", rabbitmq_node=\"$rabbitmq_node\"})", + "expr": "sum (erlang_vm_allocators{alloc=~\"$memory_allocator\", usage=\"carriers_size\", kind=\"mbcs\"} * on(instance, job) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\", rabbitmq_node=\"$rabbitmq_node\"})\n-\nsum (erlang_vm_allocators{alloc=~\"$memory_allocator\", usage=\"blocks_size\", kind=\"mbcs\"} * on(instance, job) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\", rabbitmq_node=\"$rabbitmq_node\"})", "legendFormat": "Multiblock - Unused", "refId": "B" }, { - "expr": "sum (erlang_vm_allocators{alloc=~\"$memory_allocator\", usage=\"blocks_size\", kind=\"mbcs_pool\"} * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\", rabbitmq_node=\"$rabbitmq_node\"})", + "expr": "sum (erlang_vm_allocators{alloc=~\"$memory_allocator\", usage=\"blocks_size\", kind=\"mbcs_pool\"} * on(instance, job) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\", rabbitmq_node=\"$rabbitmq_node\"})", "legendFormat": "Multiblock Pool - Used", "refId": "C" }, { - "expr": "sum (erlang_vm_allocators{alloc=~\"$memory_allocator\", usage=\"carriers_size\", kind=\"mbcs_pool\"} * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\", rabbitmq_node=\"$rabbitmq_node\"})\n-\nsum (erlang_vm_allocators{alloc=~\"$memory_allocator\", usage=\"blocks_size\", kind=\"mbcs_pool\"} * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\", rabbitmq_node=\"$rabbitmq_node\"})", + "expr": "sum (erlang_vm_allocators{alloc=~\"$memory_allocator\", usage=\"carriers_size\", kind=\"mbcs_pool\"} * on(instance, job) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\", rabbitmq_node=\"$rabbitmq_node\"})\n-\nsum (erlang_vm_allocators{alloc=~\"$memory_allocator\", usage=\"blocks_size\", kind=\"mbcs_pool\"} * on(instance, job) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\", rabbitmq_node=\"$rabbitmq_node\"})", "legendFormat": "Multiblock Pool - Unused", "refId": "D" }, { - "expr": "sum (erlang_vm_allocators{alloc=~\"$memory_allocator\", usage=\"blocks_size\", kind=\"sbcs\"} * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\", rabbitmq_node=\"$rabbitmq_node\"})", + "expr": "sum (erlang_vm_allocators{alloc=~\"$memory_allocator\", usage=\"blocks_size\", kind=\"sbcs\"} * on(instance, job) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\", rabbitmq_node=\"$rabbitmq_node\"})", "legendFormat": "Singleblock - Used", "refId": "E" }, { - "expr": "sum (erlang_vm_allocators{alloc=~\"$memory_allocator\", usage=\"carriers_size\", kind=\"sbcs\"} * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\", rabbitmq_node=\"$rabbitmq_node\"})\n-\nsum (erlang_vm_allocators{alloc=~\"$memory_allocator\", usage=\"blocks_size\", kind=\"sbcs\"} * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\", rabbitmq_node=\"$rabbitmq_node\"})", + "expr": "sum (erlang_vm_allocators{alloc=~\"$memory_allocator\", usage=\"carriers_size\", kind=\"sbcs\"} * on(instance, job) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\", rabbitmq_node=\"$rabbitmq_node\"})\n-\nsum (erlang_vm_allocators{alloc=~\"$memory_allocator\", usage=\"blocks_size\", kind=\"sbcs\"} * on(instance, job) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\", rabbitmq_node=\"$rabbitmq_node\"})", "legendFormat": "Singleblock - Unused", "refId": "F" } @@ -2101,32 +2101,32 @@ "pluginVersion": "8.3.4", "targets": [ { - "expr": "sum (erlang_vm_allocators{kind=\"mbcs\", alloc=\"$memory_allocator\", usage=\"carriers_size\"} * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\", rabbitmq_node=\"$rabbitmq_node\"})\n/\nsum (erlang_vm_allocators{kind=\"mbcs\", alloc=\"$memory_allocator\", usage=\"carriers\"} * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\", rabbitmq_node=\"$rabbitmq_node\"})", + "expr": "sum (erlang_vm_allocators{kind=\"mbcs\", alloc=\"$memory_allocator\", usage=\"carriers_size\"} * on(instance, job) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\", rabbitmq_node=\"$rabbitmq_node\"})\n/\nsum (erlang_vm_allocators{kind=\"mbcs\", alloc=\"$memory_allocator\", usage=\"carriers\"} * on(instance, job) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\", rabbitmq_node=\"$rabbitmq_node\"})", "legendFormat": "Multiblock - Carrier", "refId": "A" }, { - "expr": "sum (erlang_vm_allocators{kind=\"mbcs\", alloc=\"$memory_allocator\", usage=\"blocks_size\"} * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\", rabbitmq_node=\"$rabbitmq_node\"})\n/\nsum (erlang_vm_allocators{kind=\"mbcs\", alloc=\"$memory_allocator\", usage=\"blocks\"} * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\", rabbitmq_node=\"$rabbitmq_node\"})", + "expr": "sum (erlang_vm_allocators{kind=\"mbcs\", alloc=\"$memory_allocator\", usage=\"blocks_size\"} * on(instance, job) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\", rabbitmq_node=\"$rabbitmq_node\"})\n/\nsum (erlang_vm_allocators{kind=\"mbcs\", alloc=\"$memory_allocator\", usage=\"blocks\"} * on(instance, job) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\", rabbitmq_node=\"$rabbitmq_node\"})", "legendFormat": "Multiblock - Block", "refId": "B" }, { - "expr": "sum (erlang_vm_allocators{kind=\"mbcs_pool\", alloc=\"$memory_allocator\", usage=\"carriers_size\"} * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\", rabbitmq_node=\"$rabbitmq_node\"})\n/\nsum (erlang_vm_allocators{kind=\"mbcs_pool\", alloc=\"$memory_allocator\", usage=\"carriers\"} * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\", rabbitmq_node=\"$rabbitmq_node\"})", + "expr": "sum (erlang_vm_allocators{kind=\"mbcs_pool\", alloc=\"$memory_allocator\", usage=\"carriers_size\"} * on(instance, job) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\", rabbitmq_node=\"$rabbitmq_node\"})\n/\nsum (erlang_vm_allocators{kind=\"mbcs_pool\", alloc=\"$memory_allocator\", usage=\"carriers\"} * on(instance, job) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\", rabbitmq_node=\"$rabbitmq_node\"})", "legendFormat": "Multiblock Pool - Carrier", "refId": "C" }, { - "expr": "sum (erlang_vm_allocators{kind=\"mbcs_pool\", alloc=\"$memory_allocator\", usage=\"blocks_size\"} * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\", rabbitmq_node=\"$rabbitmq_node\"})\n/\nsum (erlang_vm_allocators{kind=\"mbcs_pool\", alloc=\"$memory_allocator\", usage=\"blocks\"} * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\", rabbitmq_node=\"$rabbitmq_node\"})", + "expr": "sum (erlang_vm_allocators{kind=\"mbcs_pool\", alloc=\"$memory_allocator\", usage=\"blocks_size\"} * on(instance, job) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\", rabbitmq_node=\"$rabbitmq_node\"})\n/\nsum (erlang_vm_allocators{kind=\"mbcs_pool\", alloc=\"$memory_allocator\", usage=\"blocks\"} * on(instance, job) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\", rabbitmq_node=\"$rabbitmq_node\"})", "legendFormat": "Multiblock Pool - Block", "refId": "D" }, { - "expr": "sum (erlang_vm_allocators{kind=\"sbcs\", alloc=\"$memory_allocator\", usage=\"carriers_size\"} * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\", rabbitmq_node=\"$rabbitmq_node\"})\n/\nsum (erlang_vm_allocators{kind=\"sbcs\", alloc=\"$memory_allocator\", usage=\"carriers\"} * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\", rabbitmq_node=\"$rabbitmq_node\"})", + "expr": "sum (erlang_vm_allocators{kind=\"sbcs\", alloc=\"$memory_allocator\", usage=\"carriers_size\"} * on(instance, job) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\", rabbitmq_node=\"$rabbitmq_node\"})\n/\nsum (erlang_vm_allocators{kind=\"sbcs\", alloc=\"$memory_allocator\", usage=\"carriers\"} * on(instance, job) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\", rabbitmq_node=\"$rabbitmq_node\"})", "legendFormat": "Singleblock - Carrier", "refId": "E" }, { - "expr": "sum (erlang_vm_allocators{kind=\"sbcs\", alloc=\"$memory_allocator\", usage=\"blocks_size\"} * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\", rabbitmq_node=\"$rabbitmq_node\"})\n/\nsum (erlang_vm_allocators{kind=\"sbcs\", alloc=\"$memory_allocator\", usage=\"blocks\"} * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\", rabbitmq_node=\"$rabbitmq_node\"})", + "expr": "sum (erlang_vm_allocators{kind=\"sbcs\", alloc=\"$memory_allocator\", usage=\"blocks_size\"} * on(instance, job) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\", rabbitmq_node=\"$rabbitmq_node\"})\n/\nsum (erlang_vm_allocators{kind=\"sbcs\", alloc=\"$memory_allocator\", usage=\"blocks\"} * on(instance, job) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\", rabbitmq_node=\"$rabbitmq_node\"})", "legendFormat": "Singleblock - Block", "refId": "F" } @@ -2319,34 +2319,34 @@ "pluginVersion": "8.3.4", "targets": [ { - "expr": "sum (erlang_vm_allocators{kind=\"mbcs\", alloc=\"$memory_allocator\", usage=\"blocks_size\"} * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\", rabbitmq_node=\"$rabbitmq_node\"})\n/\nsum (erlang_vm_allocators{kind=\"mbcs\", alloc=\"$memory_allocator\", usage=\"blocks\"} * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\", rabbitmq_node=\"$rabbitmq_node\"})", + "expr": "sum (erlang_vm_allocators{kind=\"mbcs\", alloc=\"$memory_allocator\", usage=\"blocks_size\"} * on(instance, job) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\", rabbitmq_node=\"$rabbitmq_node\"})\n/\nsum (erlang_vm_allocators{kind=\"mbcs\", alloc=\"$memory_allocator\", usage=\"blocks\"} * on(instance, job) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\", rabbitmq_node=\"$rabbitmq_node\"})", "format": "time_series", "intervalFactor": 1, "legendFormat": "Multiblock - Block", "refId": "A" }, { - "expr": "sum (erlang_vm_allocators{kind=\"mbcs\", alloc=\"$memory_allocator\", usage=\"carriers_size\"} * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\", rabbitmq_node=\"$rabbitmq_node\"})\n/\nsum (erlang_vm_allocators{kind=\"mbcs\", alloc=\"$memory_allocator\", usage=\"carriers\"} * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\", rabbitmq_node=\"$rabbitmq_node\"})", + "expr": "sum (erlang_vm_allocators{kind=\"mbcs\", alloc=\"$memory_allocator\", usage=\"carriers_size\"} * on(instance, job) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\", rabbitmq_node=\"$rabbitmq_node\"})\n/\nsum (erlang_vm_allocators{kind=\"mbcs\", alloc=\"$memory_allocator\", usage=\"carriers\"} * on(instance, job) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\", rabbitmq_node=\"$rabbitmq_node\"})", "legendFormat": "Multiblock - Carrier", "refId": "B" }, { - "expr": "sum (erlang_vm_allocators{kind=\"mbcs_pool\", alloc=\"$memory_allocator\", usage=\"blocks_size\"} * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\", rabbitmq_node=\"$rabbitmq_node\"})\n/\nsum (erlang_vm_allocators{kind=\"mbcs_pool\", alloc=\"$memory_allocator\", usage=\"blocks\"} * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\", rabbitmq_node=\"$rabbitmq_node\"})", + "expr": "sum (erlang_vm_allocators{kind=\"mbcs_pool\", alloc=\"$memory_allocator\", usage=\"blocks_size\"} * on(instance, job) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\", rabbitmq_node=\"$rabbitmq_node\"})\n/\nsum (erlang_vm_allocators{kind=\"mbcs_pool\", alloc=\"$memory_allocator\", usage=\"blocks\"} * on(instance, job) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\", rabbitmq_node=\"$rabbitmq_node\"})", "legendFormat": "Multiblock Pool - Block", "refId": "C" }, { - "expr": "sum (erlang_vm_allocators{kind=\"mbcs_pool\", alloc=\"$memory_allocator\", usage=\"carriers_size\"} * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\", rabbitmq_node=\"$rabbitmq_node\"})\n/\nsum (erlang_vm_allocators{kind=\"mbcs_pool\", alloc=\"$memory_allocator\", usage=\"carriers\"} * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\", rabbitmq_node=\"$rabbitmq_node\"})", + "expr": "sum (erlang_vm_allocators{kind=\"mbcs_pool\", alloc=\"$memory_allocator\", usage=\"carriers_size\"} * on(instance, job) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\", rabbitmq_node=\"$rabbitmq_node\"})\n/\nsum (erlang_vm_allocators{kind=\"mbcs_pool\", alloc=\"$memory_allocator\", usage=\"carriers\"} * on(instance, job) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\", rabbitmq_node=\"$rabbitmq_node\"})", "legendFormat": "Multiblock Pool - Carrier", "refId": "D" }, { - "expr": "sum (erlang_vm_allocators{kind=\"sbcs\", alloc=\"$memory_allocator\", usage=\"blocks_size\"} * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\", rabbitmq_node=\"$rabbitmq_node\"})\n/\nsum (erlang_vm_allocators{kind=\"sbcs\", alloc=\"$memory_allocator\", usage=\"blocks\"} * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\", rabbitmq_node=\"$rabbitmq_node\"})", + "expr": "sum (erlang_vm_allocators{kind=\"sbcs\", alloc=\"$memory_allocator\", usage=\"blocks_size\"} * on(instance, job) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\", rabbitmq_node=\"$rabbitmq_node\"})\n/\nsum (erlang_vm_allocators{kind=\"sbcs\", alloc=\"$memory_allocator\", usage=\"blocks\"} * on(instance, job) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\", rabbitmq_node=\"$rabbitmq_node\"})", "legendFormat": "Singleblock - Block", "refId": "E" }, { - "expr": "sum (erlang_vm_allocators{kind=\"sbcs\", alloc=\"$memory_allocator\", usage=\"carriers_size\"} * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\", rabbitmq_node=\"$rabbitmq_node\"})\n/\nsum (erlang_vm_allocators{kind=\"sbcs\", alloc=\"$memory_allocator\", usage=\"carriers\"} * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\", rabbitmq_node=\"$rabbitmq_node\"})", + "expr": "sum (erlang_vm_allocators{kind=\"sbcs\", alloc=\"$memory_allocator\", usage=\"carriers_size\"} * on(instance, job) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\", rabbitmq_node=\"$rabbitmq_node\"})\n/\nsum (erlang_vm_allocators{kind=\"sbcs\", alloc=\"$memory_allocator\", usage=\"carriers\"} * on(instance, job) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\", rabbitmq_node=\"$rabbitmq_node\"})", "legendFormat": "Singleblock - Carrier", "refId": "F" } @@ -2517,4 +2517,4 @@ "uid": "o_rtdpWik", "version": 20220805, "weekStart": "" -} \ No newline at end of file +} diff --git a/deps/rabbitmq_prometheus/docker/grafana/dashboards/RabbitMQ-Overview.json b/deps/rabbitmq_prometheus/docker/grafana/dashboards/RabbitMQ-Overview.json index 01a505f42154..f0d50bc079cc 100644 --- a/deps/rabbitmq_prometheus/docker/grafana/dashboards/RabbitMQ-Overview.json +++ b/deps/rabbitmq_prometheus/docker/grafana/dashboards/RabbitMQ-Overview.json @@ -143,7 +143,7 @@ "pluginVersion": "8.3.4", "targets": [ { - "expr": "sum(rabbitmq_queue_messages_ready * on(instance) group_left(rabbitmq_cluster) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\"})", + "expr": "sum(rabbitmq_queue_messages_ready * on(instance, job) group_left(rabbitmq_cluster) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\"})", "format": "time_series", "hide": false, "instant": false, @@ -225,7 +225,7 @@ "pluginVersion": "8.3.4", "targets": [ { - "expr": "sum(rate(rabbitmq_global_messages_received_total[60s]) * on(instance) group_left(rabbitmq_cluster) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\"})", + "expr": "sum(rate(rabbitmq_global_messages_received_total[60s]) * on(instance, job) group_left(rabbitmq_cluster) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\"})", "format": "time_series", "instant": false, "interval": "", @@ -306,7 +306,7 @@ "pluginVersion": "8.3.4", "targets": [ { - "expr": "sum(rabbitmq_channels * on(instance) group_left(rabbitmq_cluster) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\"}) - sum(rabbitmq_channel_consumers * on(instance) group_left(rabbitmq_cluster) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\"})", + "expr": "sum(rabbitmq_global_publishers * on(instance, job) group_left(rabbitmq_cluster) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\"})", "format": "time_series", "instant": false, "intervalFactor": 1, @@ -386,7 +386,7 @@ "pluginVersion": "8.3.4", "targets": [ { - "expr": "sum(rabbitmq_connections * on(instance) group_left(rabbitmq_cluster) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\"})", + "expr": "sum(rabbitmq_connections * on(instance, job) group_left(rabbitmq_cluster) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\"})", "format": "time_series", "instant": false, "interval": "", @@ -467,7 +467,7 @@ "pluginVersion": "8.3.4", "targets": [ { - "expr": "sum(rabbitmq_queues * on(instance) group_left(rabbitmq_cluster) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\"})", + "expr": "sum(rabbitmq_queues * on(instance, job) group_left(rabbitmq_cluster) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\"})", "format": "time_series", "instant": false, "intervalFactor": 1, @@ -547,7 +547,7 @@ "pluginVersion": "8.3.4", "targets": [ { - "expr": "sum(rabbitmq_queue_messages_unacked * on(instance) group_left(rabbitmq_cluster) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\"})", + "expr": "sum(rabbitmq_queue_messages_unacked * on(instance, job) group_left(rabbitmq_cluster) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\"})", "format": "time_series", "hide": false, "instant": false, @@ -628,7 +628,7 @@ "pluginVersion": "8.3.4", "targets": [ { - "expr": "sum(rate(rabbitmq_global_messages_redelivered_total[60s]) * on(instance) group_left(rabbitmq_cluster) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\"}) +\nsum(rate(rabbitmq_global_messages_delivered_consume_auto_ack_total[60s]) * on(instance) group_left(rabbitmq_cluster) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\"}) +\nsum(rate(rabbitmq_global_messages_delivered_consume_manual_ack_total[60s]) * on(instance) group_left(rabbitmq_cluster) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\"}) +\nsum(rate(rabbitmq_global_messages_delivered_get_auto_ack_total[60s]) * on(instance) group_left(rabbitmq_cluster) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\"}) +\nsum(rate(rabbitmq_global_messages_delivered_get_manual_ack_total[60s]) * on(instance) group_left(rabbitmq_cluster) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\"})", + "expr": "sum(rate(rabbitmq_global_messages_redelivered_total[60s]) * on(instance, job) group_left(rabbitmq_cluster) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\"}) +\nsum(rate(rabbitmq_global_messages_delivered_consume_auto_ack_total[60s]) * on(instance, job) group_left(rabbitmq_cluster) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\"}) +\nsum(rate(rabbitmq_global_messages_delivered_consume_manual_ack_total[60s]) * on(instance, job) group_left(rabbitmq_cluster) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\"}) +\nsum(rate(rabbitmq_global_messages_delivered_get_auto_ack_total[60s]) * on(instance, job) group_left(rabbitmq_cluster) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\"}) +\nsum(rate(rabbitmq_global_messages_delivered_get_manual_ack_total[60s]) * on(instance, job) group_left(rabbitmq_cluster) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\"})", "format": "time_series", "hide": false, "instant": false, @@ -709,7 +709,7 @@ "pluginVersion": "8.3.4", "targets": [ { - "expr": "sum(rabbitmq_consumers * on(instance) group_left(rabbitmq_cluster) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\"})", + "expr": "sum(rabbitmq_consumers * on(instance, job) group_left(rabbitmq_cluster) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\"})", "format": "time_series", "instant": false, "intervalFactor": 1, @@ -789,7 +789,7 @@ "pluginVersion": "8.3.4", "targets": [ { - "expr": "sum(rabbitmq_channels * on(instance) group_left(rabbitmq_cluster) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\"})", + "expr": "sum(rabbitmq_channels * on(instance, job) group_left(rabbitmq_cluster) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\"})", "format": "time_series", "instant": false, "intervalFactor": 1, @@ -869,7 +869,7 @@ "pluginVersion": "8.3.4", "targets": [ { - "expr": "sum(rabbitmq_build_info * on(instance) group_left(rabbitmq_cluster) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\"})", + "expr": "sum(rabbitmq_build_info * on(instance, job) group_left(rabbitmq_cluster) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\"})", "format": "time_series", "instant": false, "intervalFactor": 1, @@ -1210,7 +1210,7 @@ "uid": "${DS_PROMETHEUS}" }, "exemplar": false, - "expr": "rabbitmq_build_info * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\"}", + "expr": "rabbitmq_build_info * on(instance, job) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\"}", "format": "table", "instant": true, "interval": "", @@ -1468,7 +1468,7 @@ "pluginVersion": "8.3.4", "targets": [ { - "expr": "(rabbitmq_resident_memory_limit_bytes * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\"}) -\n(rabbitmq_process_resident_memory_bytes * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\"})", + "expr": "(rabbitmq_resident_memory_limit_bytes * on(instance, job) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\"}) -\n(rabbitmq_process_resident_memory_bytes * on(instance, job) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\"})", "format": "time_series", "instant": false, "intervalFactor": 1, @@ -1718,7 +1718,7 @@ "pluginVersion": "8.3.4", "targets": [ { - "expr": "rabbitmq_disk_space_available_bytes * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\"}", + "expr": "rabbitmq_disk_space_available_bytes * on(instance, job) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\"}", "format": "time_series", "instant": false, "intervalFactor": 1, @@ -1967,7 +1967,7 @@ "pluginVersion": "8.3.4", "targets": [ { - "expr": "(rabbitmq_process_max_fds * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\"}) -\n(rabbitmq_process_open_fds * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\"})", + "expr": "(rabbitmq_process_max_fds * on(instance, job) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\"}) -\n(rabbitmq_process_open_fds * on(instance, job) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\"})", "format": "time_series", "instant": false, "intervalFactor": 1, @@ -2216,7 +2216,7 @@ "pluginVersion": "8.3.4", "targets": [ { - "expr": "(rabbitmq_process_max_tcp_sockets * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\"}) -\n(rabbitmq_process_open_tcp_sockets * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\"})", + "expr": "(rabbitmq_process_max_tcp_sockets * on(instance, job) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\"}) -\n(rabbitmq_process_open_tcp_sockets * on(instance, job) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\"})", "format": "time_series", "instant": false, "intervalFactor": 1, @@ -2480,7 +2480,7 @@ "pluginVersion": "8.3.4", "targets": [ { - "expr": "sum(rabbitmq_queue_messages_ready * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\"}) by(rabbitmq_node)", + "expr": "sum(rabbitmq_queue_messages_ready * on(instance, job) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\"}) by(rabbitmq_node)", "format": "time_series", "instant": false, "intervalFactor": 1, @@ -2727,7 +2727,7 @@ "pluginVersion": "8.3.4", "targets": [ { - "expr": "sum(rabbitmq_queue_messages_unacked * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\"}) by(rabbitmq_node)", + "expr": "sum(rabbitmq_queue_messages_unacked * on(instance, job) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\"}) by(rabbitmq_node)", "format": "time_series", "instant": false, "intervalFactor": 1, @@ -2991,7 +2991,7 @@ "pluginVersion": "8.3.4", "targets": [ { - "expr": "sum(rate(rabbitmq_global_messages_received_total[60s]) * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\"}) by(rabbitmq_node)", + "expr": "sum(rate(rabbitmq_global_messages_received_total[60s]) * on(instance, job) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\"}) by(rabbitmq_node)", "format": "time_series", "instant": false, "intervalFactor": 1, @@ -3238,7 +3238,7 @@ "pluginVersion": "8.3.4", "targets": [ { - "expr": "sum(rate(rabbitmq_global_messages_confirmed_total[60s]) * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\"}) by(rabbitmq_node)", + "expr": "sum(rate(rabbitmq_global_messages_confirmed_total[60s]) * on(instance, job) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\"}) by(rabbitmq_node)", "format": "time_series", "instant": false, "intervalFactor": 1, @@ -3485,7 +3485,7 @@ "pluginVersion": "8.3.4", "targets": [ { - "expr": "sum(rate(rabbitmq_global_messages_routed_total[60s]) * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\"}) by(rabbitmq_node)", + "expr": "sum(rate(rabbitmq_global_messages_routed_total[60s]) * on(instance, job) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\"}) by(rabbitmq_node)", "format": "time_series", "instant": false, "intervalFactor": 1, @@ -3732,7 +3732,7 @@ "pluginVersion": "8.3.4", "targets": [ { - "expr": "sum(rate(rabbitmq_global_messages_received_confirm_total[60s]) * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\"} - \nrate(rabbitmq_global_messages_confirmed_total[60s]) * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\"}\n) by(rabbitmq_node)", + "expr": "sum(rate(rabbitmq_global_messages_received_confirm_total[60s]) * on(instance, job) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\"} - \nrate(rabbitmq_global_messages_confirmed_total[60s]) * on(instance, job) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\"}\n) by(rabbitmq_node)", "format": "time_series", "instant": false, "intervalFactor": 1, @@ -3843,7 +3843,7 @@ "pluginVersion": "8.3.4", "targets": [ { - "expr": "sum(rate(rabbitmq_global_messages_unroutable_dropped_total[60s]) * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\"}) by(rabbitmq_node)", + "expr": "sum(rate(rabbitmq_global_messages_unroutable_dropped_total[60s]) * on(instance, job) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\"}) by(rabbitmq_node)", "format": "time_series", "instant": false, "intervalFactor": 1, @@ -3954,7 +3954,7 @@ "pluginVersion": "8.3.4", "targets": [ { - "expr": "sum(rate(rabbitmq_global_messages_unroutable_returned_total[60s]) * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\"}) by(rabbitmq_node)", + "expr": "sum(rate(rabbitmq_global_messages_unroutable_returned_total[60s]) * on(instance, job) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\"}) by(rabbitmq_node)", "format": "time_series", "instant": false, "intervalFactor": 1, @@ -4218,7 +4218,7 @@ "pluginVersion": "8.3.4", "targets": [ { - "expr": "sum(\n (rate(rabbitmq_global_messages_delivered_consume_auto_ack_total[60s]) * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\"}) +\n (rate(rabbitmq_global_messages_delivered_consume_manual_ack_total[60s]) * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\"})\n) by(rabbitmq_node)", + "expr": "sum(\n (rate(rabbitmq_global_messages_delivered_consume_auto_ack_total[60s]) * on(instance, job) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\"}) +\n (rate(rabbitmq_global_messages_delivered_consume_manual_ack_total[60s]) * on(instance, job) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\"})\n) by(rabbitmq_node)", "format": "time_series", "instant": false, "intervalFactor": 1, @@ -4468,7 +4468,7 @@ "pluginVersion": "8.3.4", "targets": [ { - "expr": "sum(rate(rabbitmq_global_messages_redelivered_total[60s]) * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\"}) by(rabbitmq_node)", + "expr": "sum(rate(rabbitmq_global_messages_redelivered_total[60s]) * on(instance, job) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\"}) by(rabbitmq_node)", "format": "time_series", "instant": false, "intervalFactor": 1, @@ -4715,7 +4715,7 @@ "pluginVersion": "8.3.4", "targets": [ { - "expr": "sum(rate(rabbitmq_global_messages_delivered_consume_manual_ack_total[60s]) * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\"}) by(rabbitmq_node)", + "expr": "sum(rate(rabbitmq_global_messages_delivered_consume_manual_ack_total[60s]) * on(instance, job) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\"}) by(rabbitmq_node)", "format": "time_series", "instant": false, "intervalFactor": 1, @@ -4962,7 +4962,7 @@ "pluginVersion": "8.3.4", "targets": [ { - "expr": "sum(rate(rabbitmq_global_messages_delivered_consume_auto_ack_total[60s]) * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\"}) by(rabbitmq_node)", + "expr": "sum(rate(rabbitmq_global_messages_delivered_consume_auto_ack_total[60s]) * on(instance, job) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\"}) by(rabbitmq_node)", "format": "time_series", "instant": false, "intervalFactor": 1, @@ -5209,7 +5209,7 @@ "pluginVersion": "8.3.4", "targets": [ { - "expr": "sum(rate(rabbitmq_global_messages_acknowledged_total[60s]) * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\"}) by(rabbitmq_node)", + "expr": "sum(rate(rabbitmq_global_messages_acknowledged_total[60s]) * on(instance, job) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\"}) by(rabbitmq_node)", "format": "time_series", "instant": false, "intervalFactor": 1, @@ -5320,7 +5320,7 @@ "pluginVersion": "8.3.4", "targets": [ { - "expr": "sum(rate(rabbitmq_global_messages_delivered_get_auto_ack_total[60s]) * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\"}) by(rabbitmq_node)", + "expr": "sum(rate(rabbitmq_global_messages_delivered_get_auto_ack_total[60s]) * on(instance, job) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\"}) by(rabbitmq_node)", "format": "time_series", "instant": false, "intervalFactor": 1, @@ -5431,7 +5431,7 @@ "pluginVersion": "8.3.4", "targets": [ { - "expr": "sum(rate(rabbitmq_global_messages_get_empty_total[60s]) * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\"}) by(rabbitmq_node)", + "expr": "sum(rate(rabbitmq_global_messages_get_empty_total[60s]) * on(instance, job) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\"}) by(rabbitmq_node)", "format": "time_series", "instant": false, "intervalFactor": 1, @@ -5542,7 +5542,7 @@ "pluginVersion": "8.3.4", "targets": [ { - "expr": "sum(rate(rabbitmq_global_messages_delivered_get_manual_ack_total[60s]) * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\"}) by(rabbitmq_node)", + "expr": "sum(rate(rabbitmq_global_messages_delivered_get_manual_ack_total[60s]) * on(instance, job) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\"}) by(rabbitmq_node)", "format": "time_series", "instant": false, "intervalFactor": 1, @@ -5805,7 +5805,7 @@ "pluginVersion": "8.3.4", "targets": [ { - "expr": "rabbitmq_queues * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\"}", + "expr": "rabbitmq_queues * on(instance, job) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\"}", "format": "time_series", "instant": false, "interval": "", @@ -6056,7 +6056,7 @@ "pluginVersion": "8.3.4", "targets": [ { - "expr": "sum(rate(rabbitmq_queues_declared_total[60s]) * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\"}) by(rabbitmq_node)", + "expr": "sum(rate(rabbitmq_queues_declared_total[60s]) * on(instance, job) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\"}) by(rabbitmq_node)", "format": "time_series", "instant": false, "intervalFactor": 1, @@ -6306,7 +6306,7 @@ "pluginVersion": "8.3.4", "targets": [ { - "expr": "sum(rate(rabbitmq_queues_created_total[60s]) * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\"}) by(rabbitmq_node)", + "expr": "sum(rate(rabbitmq_queues_created_total[60s]) * on(instance, job) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\"}) by(rabbitmq_node)", "format": "time_series", "instant": false, "intervalFactor": 1, @@ -6556,7 +6556,7 @@ "pluginVersion": "8.3.4", "targets": [ { - "expr": "sum(rate(rabbitmq_queues_deleted_total[60s]) * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\"}) by(rabbitmq_node)", + "expr": "sum(rate(rabbitmq_queues_deleted_total[60s]) * on(instance, job) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\"}) by(rabbitmq_node)", "format": "time_series", "instant": false, "intervalFactor": 1, @@ -6819,7 +6819,7 @@ "pluginVersion": "8.3.4", "targets": [ { - "expr": "rabbitmq_channels * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\"}", + "expr": "rabbitmq_channels * on(instance, job) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\"}", "format": "time_series", "instant": false, "intervalFactor": 1, @@ -7069,7 +7069,7 @@ "pluginVersion": "8.3.4", "targets": [ { - "expr": "sum(rate(rabbitmq_channels_opened_total[60s]) * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\"}) by(rabbitmq_node)", + "expr": "sum(rate(rabbitmq_channels_opened_total[60s]) * on(instance, job) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\"}) by(rabbitmq_node)", "format": "time_series", "instant": false, "intervalFactor": 1, @@ -7319,7 +7319,7 @@ "pluginVersion": "8.3.4", "targets": [ { - "expr": "sum(rate(rabbitmq_channels_closed_total[60s]) * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\"}) by(rabbitmq_node)", + "expr": "sum(rate(rabbitmq_channels_closed_total[60s]) * on(instance, job) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\"}) by(rabbitmq_node)", "format": "time_series", "instant": false, "intervalFactor": 1, @@ -7582,7 +7582,7 @@ "pluginVersion": "8.3.4", "targets": [ { - "expr": "rabbitmq_connections * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\"}", + "expr": "rabbitmq_connections * on(instance, job) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\"}", "format": "time_series", "instant": false, "intervalFactor": 1, @@ -7832,7 +7832,7 @@ "pluginVersion": "8.3.4", "targets": [ { - "expr": "sum(rate(rabbitmq_connections_opened_total[60s]) * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\"}) by(rabbitmq_node)", + "expr": "sum(rate(rabbitmq_connections_opened_total[60s]) * on(instance, job) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\"}) by(rabbitmq_node)", "format": "time_series", "instant": false, "interval": "", @@ -8083,7 +8083,7 @@ "pluginVersion": "8.3.4", "targets": [ { - "expr": "sum(rate(rabbitmq_connections_closed_total[60s]) * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\"}) by(rabbitmq_node)", + "expr": "sum(rate(rabbitmq_connections_closed_total[60s]) * on(instance, job) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\"}) by(rabbitmq_node)", "format": "time_series", "instant": false, "intervalFactor": 1, @@ -8205,4 +8205,4 @@ "uid": "Kn5xm-gZk", "version": 20220805, "weekStart": "" -} \ No newline at end of file +} diff --git a/deps/rabbitmq_prometheus/docker/grafana/dashboards/RabbitMQ-Quorum-Queues-Raft.json b/deps/rabbitmq_prometheus/docker/grafana/dashboards/RabbitMQ-Quorum-Queues-Raft.json index 6a9fdd88d58c..b184d213dad7 100644 --- a/deps/rabbitmq_prometheus/docker/grafana/dashboards/RabbitMQ-Quorum-Queues-Raft.json +++ b/deps/rabbitmq_prometheus/docker/grafana/dashboards/RabbitMQ-Quorum-Queues-Raft.json @@ -309,7 +309,7 @@ "pluginVersion": "8.3.4", "targets": [ { - "expr": "sum(rate(rabbitmq_raft_log_commit_index[60s]) * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\"}) by(rabbitmq_node)", + "expr": "sum(rate(rabbitmq_raft_log_commit_index[60s]) * on(instance, job) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\"}) by(rabbitmq_node)", "format": "time_series", "instant": false, "intervalFactor": 1, @@ -352,7 +352,7 @@ "reverseYBuckets": false, "targets": [ { - "expr": "rabbitmq_raft_entry_commit_latency_seconds * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\"}", + "expr": "rabbitmq_raft_entry_commit_latency_seconds * on(instance, job) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\"}", "format": "time_series", "instant": false, "intervalFactor": 1, @@ -611,7 +611,7 @@ "pluginVersion": "8.3.4", "targets": [ { - "expr": "sum(\n (rabbitmq_raft_log_last_written_index * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\"}) -\n (rabbitmq_raft_log_commit_index * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\"})\n) by(rabbitmq_node)", + "expr": "sum(\n (rabbitmq_raft_log_last_written_index * on(instance, job) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\"}) -\n (rabbitmq_raft_log_commit_index * on(instance, job) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\"})\n) by(rabbitmq_node)", "format": "time_series", "instant": false, "intervalFactor": 1, @@ -857,7 +857,7 @@ "pluginVersion": "8.3.4", "targets": [ { - "expr": "sum(rate(rabbitmq_raft_term_total[60s]) * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\"}) by(rabbitmq_node)", + "expr": "sum(rate(rabbitmq_raft_term_total[60s]) * on(instance, job) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\"}) by(rabbitmq_node)", "format": "time_series", "instant": false, "intervalFactor": 1, @@ -1106,7 +1106,7 @@ "pluginVersion": "8.3.4", "targets": [ { - "expr": "sum(\n (rabbitmq_raft_log_last_written_index * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\"}) - \n (rabbitmq_raft_log_snapshot_index * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\"})\n) by(queue, rabbitmq_node)", + "expr": "sum(\n (rabbitmq_raft_log_last_written_index * on(instance, job) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\"}) - \n (rabbitmq_raft_log_snapshot_index * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\"})\n) by(queue, rabbitmq_node)", "hide": false, "legendFormat": "{{rabbitmq_node}} {{queue}}", "refId": "A" @@ -1226,4 +1226,4 @@ "uid": "f1Mee9nZz", "version": 20220805, "weekStart": "" -} \ No newline at end of file +} diff --git a/deps/rabbitmq_prometheus/docker/grafana/dashboards/RabbitMQ-Stream.json b/deps/rabbitmq_prometheus/docker/grafana/dashboards/RabbitMQ-Stream.json index 2b62b65da591..3788af4b8c3a 100644 --- a/deps/rabbitmq_prometheus/docker/grafana/dashboards/RabbitMQ-Stream.json +++ b/deps/rabbitmq_prometheus/docker/grafana/dashboards/RabbitMQ-Stream.json @@ -128,7 +128,7 @@ "targets": [ { "exemplar": true, - "expr": "sum by(rabbitmq_cluster) (rabbitmq_global_publishers{protocol=\"stream\"} * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\"})", + "expr": "sum by(rabbitmq_cluster) (rabbitmq_global_publishers{protocol=\"stream\"} * on(instance, job) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\"})", "interval": "", "legendFormat": "", "refId": "A" @@ -188,7 +188,7 @@ "targets": [ { "exemplar": true, - "expr": "sum by(rabbitmq_cluster) (irate(rabbitmq_global_messages_received_total{protocol=\"stream\"}[60s]) * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\"})", + "expr": "sum by(rabbitmq_cluster) (irate(rabbitmq_global_messages_received_total{protocol=\"stream\"}[60s]) * on(instance, job) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\"})", "instant": false, "interval": "", "legendFormat": "", @@ -250,7 +250,7 @@ "targets": [ { "exemplar": true, - "expr": "sum by(rabbitmq_cluster) (irate(rabbitmq_global_messages_confirmed_total{protocol=\"stream\"}[60s]) * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\"})", + "expr": "sum by(rabbitmq_cluster) (irate(rabbitmq_global_messages_confirmed_total{protocol=\"stream\"}[60s]) * on(instance, job) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\"})", "instant": false, "interval": "", "legendFormat": "", @@ -311,7 +311,7 @@ "targets": [ { "exemplar": true, - "expr": "sum by(rabbitmq_cluster) (rabbitmq_global_consumers{protocol=\"stream\"} * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\"})", + "expr": "sum by(rabbitmq_cluster) (rabbitmq_global_consumers{protocol=\"stream\"} * on(instance, job) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\"})", "interval": "", "legendFormat": "", "refId": "A" @@ -371,7 +371,7 @@ "targets": [ { "exemplar": true, - "expr": "sum by(rabbitmq_cluster) (irate(rabbitmq_global_messages_delivered_total{protocol=\"stream\"}[60s]) * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\"})", + "expr": "sum by(rabbitmq_cluster) (irate(rabbitmq_global_messages_delivered_total{protocol=\"stream\"}[60s]) * on(instance, job) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\"})", "instant": false, "interval": "", "legendFormat": "", @@ -433,7 +433,7 @@ "targets": [ { "exemplar": true, - "expr": "sum by(rabbitmq_cluster) (rabbitmq_global_stream_error_access_refused_total{protocol=\"stream\"} * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\"}) +\n\nsum by(rabbitmq_cluster) (rabbitmq_global_stream_error_authentication_failure_total{protocol=\"stream\"} * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\"}) +\n\nsum by(rabbitmq_cluster) (rabbitmq_global_stream_error_frame_too_large_total{protocol=\"stream\"} * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\"}) +\n\nsum by(rabbitmq_cluster) (rabbitmq_global_stream_error_internal_error_total{protocol=\"stream\"} * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\"}) +\n\nsum by(rabbitmq_cluster) (rabbitmq_global_stream_error_precondition_failed_total{protocol=\"stream\"} * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\"}) +\n\nsum by(rabbitmq_cluster) (rabbitmq_global_stream_error_publisher_does_not_exist_total{protocol=\"stream\"} * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\"}) +\n\nsum by(rabbitmq_cluster) (rabbitmq_global_stream_error_sasl_authentication_failure_loopback_total{protocol=\"stream\"} * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\"}) +\n\nsum by(rabbitmq_cluster) (rabbitmq_global_stream_error_sasl_challenge_total{protocol=\"stream\"} * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\"}) +\n\nsum by(rabbitmq_cluster) (rabbitmq_global_stream_error_sasl_error_total{protocol=\"stream\"} * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\"}) +\n\nsum by(rabbitmq_cluster) (rabbitmq_global_stream_error_sasl_mechanism_not_supported_total{protocol=\"stream\"} * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\"}) +\n\nsum by(rabbitmq_cluster) (rabbitmq_global_stream_error_stream_already_exists_total{protocol=\"stream\"} * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\"}) +\n\nsum by(rabbitmq_cluster) (rabbitmq_global_stream_error_stream_does_not_exist_total{protocol=\"stream\"} * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\"}) +\n\nsum by(rabbitmq_cluster) (rabbitmq_global_stream_error_stream_not_available_total{protocol=\"stream\"} * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\"}) +\n\nsum by(rabbitmq_cluster) (rabbitmq_global_stream_error_subscription_id_already_exists_total{protocol=\"stream\"} * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\"}) +\n\nsum by(rabbitmq_cluster) (rabbitmq_global_stream_error_subscription_id_does_not_exist_total{protocol=\"stream\"} * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\"}) +\n\nsum by(rabbitmq_cluster) (rabbitmq_global_stream_error_unknown_frame_total{protocol=\"stream\"} * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\"}) +\n\nsum by(rabbitmq_cluster) (rabbitmq_global_stream_error_vhost_access_failure_total{protocol=\"stream\"} * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\"})\n", + "expr": "sum by(rabbitmq_cluster) (rabbitmq_global_stream_error_access_refused_total{protocol=\"stream\"} * on(instance, job) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\"}) +\n\nsum by(rabbitmq_cluster) (rabbitmq_global_stream_error_authentication_failure_total{protocol=\"stream\"} * on(instance, job) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\"}) +\n\nsum by(rabbitmq_cluster) (rabbitmq_global_stream_error_frame_too_large_total{protocol=\"stream\"} * on(instance, job) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\"}) +\n\nsum by(rabbitmq_cluster) (rabbitmq_global_stream_error_internal_error_total{protocol=\"stream\"} * on(instance, job) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\"}) +\n\nsum by(rabbitmq_cluster) (rabbitmq_global_stream_error_precondition_failed_total{protocol=\"stream\"} * on(instance, job) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\"}) +\n\nsum by(rabbitmq_cluster) (rabbitmq_global_stream_error_publisher_does_not_exist_total{protocol=\"stream\"} * on(instance, job) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\"}) +\n\nsum by(rabbitmq_cluster) (rabbitmq_global_stream_error_sasl_authentication_failure_loopback_total{protocol=\"stream\"} * on(instance, job) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\"}) +\n\nsum by(rabbitmq_cluster) (rabbitmq_global_stream_error_sasl_challenge_total{protocol=\"stream\"} * on(instance, job) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\"}) +\n\nsum by(rabbitmq_cluster) (rabbitmq_global_stream_error_sasl_error_total{protocol=\"stream\"} * on(instance, job) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\"}) +\n\nsum by(rabbitmq_cluster) (rabbitmq_global_stream_error_sasl_mechanism_not_supported_total{protocol=\"stream\"} * on(instance, job) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\"}) +\n\nsum by(rabbitmq_cluster) (rabbitmq_global_stream_error_stream_already_exists_total{protocol=\"stream\"} * on(instance, job) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\"}) +\n\nsum by(rabbitmq_cluster) (rabbitmq_global_stream_error_stream_does_not_exist_total{protocol=\"stream\"} * on(instance, job) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\"}) +\n\nsum by(rabbitmq_cluster) (rabbitmq_global_stream_error_stream_not_available_total{protocol=\"stream\"} * on(instance, job) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\"}) +\n\nsum by(rabbitmq_cluster) (rabbitmq_global_stream_error_subscription_id_already_exists_total{protocol=\"stream\"} * on(instance, job) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\"}) +\n\nsum by(rabbitmq_cluster) (rabbitmq_global_stream_error_subscription_id_does_not_exist_total{protocol=\"stream\"} * on(instance, job) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\"}) +\n\nsum by(rabbitmq_cluster) (rabbitmq_global_stream_error_unknown_frame_total{protocol=\"stream\"} * on(instance, job) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\"}) +\n\nsum by(rabbitmq_cluster) (rabbitmq_global_stream_error_vhost_access_failure_total{protocol=\"stream\"} * on(instance, job) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\"})\n", "instant": false, "interval": "", "legendFormat": "", @@ -652,7 +652,7 @@ "targets": [ { "exemplar": true, - "expr": "sort_desc(sum by(rabbitmq_node) (rabbitmq_global_publishers{protocol=\"stream\"} * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\"}))", + "expr": "sort_desc(sum by(rabbitmq_node) (rabbitmq_global_publishers{protocol=\"stream\"} * on(instance, job) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\"}))", "format": "time_series", "instant": false, "interval": "", @@ -866,7 +866,7 @@ "targets": [ { "exemplar": true, - "expr": "sort_desc(sum by(rabbitmq_node) (irate(rabbitmq_global_messages_received_total{protocol=\"stream\"}[60s]) * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\"}))", + "expr": "sort_desc(sum by(rabbitmq_node) (irate(rabbitmq_global_messages_received_total{protocol=\"stream\"}[60s]) * on(instance, job) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\"}))", "format": "time_series", "instant": false, "interval": "", @@ -1081,7 +1081,7 @@ "targets": [ { "exemplar": true, - "expr": "sort_desc(sum by(rabbitmq_node) (irate(rabbitmq_global_messages_confirmed_total{protocol=\"stream\"}[60s]) * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\"}))", + "expr": "sort_desc(sum by(rabbitmq_node) (irate(rabbitmq_global_messages_confirmed_total{protocol=\"stream\"}[60s]) * on(instance, job) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\"}))", "format": "time_series", "instant": false, "interval": "", @@ -1295,7 +1295,7 @@ "targets": [ { "exemplar": true, - "expr": "sort_desc(sum by(rabbitmq_node) (rabbitmq_global_consumers{protocol=\"stream\"} * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\"}))", + "expr": "sort_desc(sum by(rabbitmq_node) (rabbitmq_global_consumers{protocol=\"stream\"} * on(instance, job) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\"}))", "format": "time_series", "instant": false, "interval": "", @@ -1509,7 +1509,7 @@ "targets": [ { "exemplar": true, - "expr": "sort_desc(sum by(rabbitmq_node) (irate(rabbitmq_global_messages_delivered_total{protocol=\"stream\"}[60s]) * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\"}))", + "expr": "sort_desc(sum by(rabbitmq_node) (irate(rabbitmq_global_messages_delivered_total{protocol=\"stream\"}[60s]) * on(instance, job) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\"}))", "format": "time_series", "instant": false, "interval": "", @@ -2811,14 +2811,14 @@ "targets": [ { "exemplar": true, - "expr": "sum by(rabbitmq_cluster) (rabbitmq_global_stream_error_access_refused_total{protocol=\"stream\"} * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\"})", + "expr": "sum by(rabbitmq_cluster) (rabbitmq_global_stream_error_access_refused_total{protocol=\"stream\"} * on(instance, job) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\"})", "interval": "", "legendFormat": "access_refused", "refId": "A" }, { "exemplar": true, - "expr": "sum by(rabbitmq_cluster) (rabbitmq_global_stream_error_authentication_failure_total{protocol=\"stream\"} * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\"})", + "expr": "sum by(rabbitmq_cluster) (rabbitmq_global_stream_error_authentication_failure_total{protocol=\"stream\"} * on(instance, job) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\"})", "hide": false, "interval": "", "legendFormat": "error_authentication_failure", @@ -2826,7 +2826,7 @@ }, { "exemplar": true, - "expr": "sum by(rabbitmq_cluster) (rabbitmq_global_stream_error_frame_too_large_total{protocol=\"stream\"} * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\"})", + "expr": "sum by(rabbitmq_cluster) (rabbitmq_global_stream_error_frame_too_large_total{protocol=\"stream\"} * on(instance, job) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\"})", "hide": false, "interval": "", "legendFormat": "frame_too_large", @@ -2834,7 +2834,7 @@ }, { "exemplar": true, - "expr": "sum by(rabbitmq_cluster) (rabbitmq_global_stream_error_internal_error_total{protocol=\"stream\"} * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\"})", + "expr": "sum by(rabbitmq_cluster) (rabbitmq_global_stream_error_internal_error_total{protocol=\"stream\"} * on(instance, job) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\"})", "hide": false, "interval": "", "legendFormat": "internal_error", @@ -2842,7 +2842,7 @@ }, { "exemplar": true, - "expr": "sum by(rabbitmq_cluster) (rabbitmq_global_stream_error_precondition_failed_total{protocol=\"stream\"} * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\"})", + "expr": "sum by(rabbitmq_cluster) (rabbitmq_global_stream_error_precondition_failed_total{protocol=\"stream\"} * on(instance, job) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\"})", "hide": false, "interval": "", "legendFormat": "precondition_failed", @@ -2850,7 +2850,7 @@ }, { "exemplar": true, - "expr": "sum by(rabbitmq_cluster) (rabbitmq_global_stream_error_publisher_does_not_exist_total{protocol=\"stream\"} * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\"})", + "expr": "sum by(rabbitmq_cluster) (rabbitmq_global_stream_error_publisher_does_not_exist_total{protocol=\"stream\"} * on(instance, job) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\"})", "hide": false, "interval": "", "legendFormat": "publisher_does_not_exist", @@ -2858,7 +2858,7 @@ }, { "exemplar": true, - "expr": "sum by(rabbitmq_cluster) (rabbitmq_global_stream_error_sasl_authentication_failure_loopback_total{protocol=\"stream\"} * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\"})", + "expr": "sum by(rabbitmq_cluster) (rabbitmq_global_stream_error_sasl_authentication_failure_loopback_total{protocol=\"stream\"} * on(instance, job) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\"})", "hide": false, "interval": "", "legendFormat": "sasl_authentication_failure_loopback", @@ -2866,7 +2866,7 @@ }, { "exemplar": true, - "expr": "sum by(rabbitmq_cluster) (rabbitmq_global_stream_error_sasl_challenge_total{protocol=\"stream\"} * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\"})", + "expr": "sum by(rabbitmq_cluster) (rabbitmq_global_stream_error_sasl_challenge_total{protocol=\"stream\"} * on(instance, job) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\"})", "hide": false, "interval": "", "legendFormat": "sasl_challenge", @@ -2874,7 +2874,7 @@ }, { "exemplar": true, - "expr": "sum by(rabbitmq_cluster) (rabbitmq_global_stream_error_sasl_error_total{protocol=\"stream\"} * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\"})", + "expr": "sum by(rabbitmq_cluster) (rabbitmq_global_stream_error_sasl_error_total{protocol=\"stream\"} * on(instance, job) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\"})", "hide": false, "interval": "", "legendFormat": "sasl_error", @@ -2882,7 +2882,7 @@ }, { "exemplar": true, - "expr": "sum by(rabbitmq_cluster) (rabbitmq_global_stream_error_sasl_mechanism_not_supported_total{protocol=\"stream\"} * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\"})", + "expr": "sum by(rabbitmq_cluster) (rabbitmq_global_stream_error_sasl_mechanism_not_supported_total{protocol=\"stream\"} * on(instance, job) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\"})", "hide": false, "interval": "", "legendFormat": "sasl_mechanism_not_supported", @@ -2890,7 +2890,7 @@ }, { "exemplar": true, - "expr": "sum by(rabbitmq_cluster) (rabbitmq_global_stream_error_stream_already_exists_total{protocol=\"stream\"} * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\"})", + "expr": "sum by(rabbitmq_cluster) (rabbitmq_global_stream_error_stream_already_exists_total{protocol=\"stream\"} * on(instance, job) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\"})", "hide": false, "interval": "", "legendFormat": "stream_already_exists", @@ -2898,7 +2898,7 @@ }, { "exemplar": true, - "expr": "sum by(rabbitmq_cluster) (rabbitmq_global_stream_error_stream_does_not_exist_total{protocol=\"stream\"} * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\"})", + "expr": "sum by(rabbitmq_cluster) (rabbitmq_global_stream_error_stream_does_not_exist_total{protocol=\"stream\"} * on(instance, job) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\"})", "hide": false, "interval": "", "legendFormat": "stream_does_not_exist", @@ -2906,7 +2906,7 @@ }, { "exemplar": true, - "expr": "sum by(rabbitmq_cluster) (rabbitmq_global_stream_error_stream_not_available_total{protocol=\"stream\"} * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\"})", + "expr": "sum by(rabbitmq_cluster) (rabbitmq_global_stream_error_stream_not_available_total{protocol=\"stream\"} * on(instance, job) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\"})", "hide": false, "interval": "", "legendFormat": "stream_not_available", @@ -2914,7 +2914,7 @@ }, { "exemplar": true, - "expr": "sum by(rabbitmq_cluster) (rabbitmq_global_stream_error_subscription_id_already_exists_total{protocol=\"stream\"} * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\"})", + "expr": "sum by(rabbitmq_cluster) (rabbitmq_global_stream_error_subscription_id_already_exists_total{protocol=\"stream\"} * on(instance, job) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\"})", "hide": false, "interval": "", "legendFormat": "subscription_id_already_exists", @@ -2922,7 +2922,7 @@ }, { "exemplar": true, - "expr": "sum by(rabbitmq_cluster) (rabbitmq_global_stream_error_subscription_id_does_not_exist_total{protocol=\"stream\"} * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\"})", + "expr": "sum by(rabbitmq_cluster) (rabbitmq_global_stream_error_subscription_id_does_not_exist_total{protocol=\"stream\"} * on(instance, job) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\"})", "hide": false, "interval": "", "legendFormat": "subscription_id_does_not_exist", @@ -2930,7 +2930,7 @@ }, { "exemplar": true, - "expr": "sum by(rabbitmq_cluster) (rabbitmq_global_stream_error_unknown_frame_total{protocol=\"stream\"} * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\"})", + "expr": "sum by(rabbitmq_cluster) (rabbitmq_global_stream_error_unknown_frame_total{protocol=\"stream\"} * on(instance, job) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\"})", "hide": false, "interval": "", "legendFormat": "error_unknown_frame", @@ -2938,7 +2938,7 @@ }, { "exemplar": true, - "expr": "sum by(rabbitmq_cluster) (rabbitmq_global_stream_error_vhost_access_failure_total{protocol=\"stream\"} * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\"})", + "expr": "sum by(rabbitmq_cluster) (rabbitmq_global_stream_error_vhost_access_failure_total{protocol=\"stream\"} * on(instance, job) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", namespace=\"$namespace\"})", "hide": false, "interval": "", "legendFormat": "vhost_access_failure", @@ -3049,4 +3049,4 @@ "uid": "j7MCpqZ7k", "version": 20220805, "weekStart": "" -} \ No newline at end of file +} diff --git a/deps/rabbitmq_prometheus/docker/grafana/dashboards/inet_tcp_metrics.json b/deps/rabbitmq_prometheus/docker/grafana/dashboards/inet_tcp_metrics.json index b1ac0a95b56f..c0ad0fa5a935 100644 --- a/deps/rabbitmq_prometheus/docker/grafana/dashboards/inet_tcp_metrics.json +++ b/deps/rabbitmq_prometheus/docker/grafana/dashboards/inet_tcp_metrics.json @@ -238,7 +238,7 @@ ], "targets": [ { - "expr": "max(rabbitmq_build_info * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_node=~\"$src_node|$dst_node\"}) by(erlang_version, rabbitmq_version, product_version, prometheus_client_version, rabbitmq_node, rabbitmq_cluster)", + "expr": "max(rabbitmq_build_info * on(instance, job) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_node=~\"$src_node|$dst_node\"}) by(erlang_version, rabbitmq_version, product_version, prometheus_client_version, rabbitmq_node, rabbitmq_cluster)", "format": "table", "instant": true, "interval": "", diff --git a/deps/rabbitmq_prometheus/docker/prometheus.yml b/deps/rabbitmq_prometheus/docker/prometheus.yml index fb91751c0542..735062d7853a 100644 --- a/deps/rabbitmq_prometheus/docker/prometheus.yml +++ b/deps/rabbitmq_prometheus/docker/prometheus.yml @@ -49,6 +49,24 @@ scrape_configs: - 'rmq0-dist-metrics:15692' - 'rmq1-dist-metrics:15692' - 'rmq2-dist-metrics:15692' + - job_name: 'rabbitmq-server-detailed' + metrics_path: "/metrics/detailed" + params: + family: ["queue_coarse_metrics"] + static_configs: + - targets: + - 'rmq0:15692' + - 'rmq1:15692' + - 'rmq2:15692' + - 'rmq0-dist-tls:15692' + - 'rmq1-dist-tls:15692' + - 'rmq2-dist-tls:15692' + - 'rmq0-qq:15692' + - 'rmq1-qq:15692' + - 'rmq2-qq:15692' + - 'rmq0-dist-metrics:15692' + - 'rmq1-dist-metrics:15692' + - 'rmq2-dist-metrics:15692' - job_name: 'rabbitmq-perf-test' static_configs: - targets: diff --git a/deps/rabbitmq_prometheus/metrics.md b/deps/rabbitmq_prometheus/metrics.md index b67012b2ae18..5b173ac52191 100644 --- a/deps/rabbitmq_prometheus/metrics.md +++ b/deps/rabbitmq_prometheus/metrics.md @@ -132,9 +132,7 @@ These metrics are specific to the stream protocol. | rabbitmq_disk_space_available_limit_bytes | Free disk space low watermark in bytes | | rabbitmq_identity_info | RabbitMQ node & cluster identity info | | rabbitmq_process_max_fds | Open file descriptors limit | -| rabbitmq_process_max_tcp_sockets | Open TCP sockets limit | | rabbitmq_process_open_fds | Open file descriptors | -| rabbitmq_process_open_tcp_sockets | Open TCP sockets | | rabbitmq_process_resident_memory_bytes | Memory used in bytes | | rabbitmq_resident_memory_limit_bytes | Memory high watermark in bytes | @@ -258,6 +256,12 @@ These metrics are specific to the stream protocol. | rabbitmq_raft_log_snapshot_index | Raft log snapshot index | | rabbitmq_raft_term_total | Current Raft term number | +### Federation + +| Metric | Description | +| --- | --- | +| rabbitmq_federation_links | Federations Links count grouped by Link status | + ## Telemetry | Metric | Description | diff --git a/deps/rabbitmq_prometheus/priv/schema/rabbitmq_prometheus.schema b/deps/rabbitmq_prometheus/priv/schema/rabbitmq_prometheus.schema index a92084f5f6e4..1b3fbd4b240c 100644 --- a/deps/rabbitmq_prometheus/priv/schema/rabbitmq_prometheus.schema +++ b/deps/rabbitmq_prometheus/priv/schema/rabbitmq_prometheus.schema @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% %% ---------------------------------------------------------------------------- @@ -141,7 +141,9 @@ end}. {mapping, "prometheus.ssl.max_keepalive", "rabbitmq_prometheus.ssl_config.cowboy_opts.max_keepalive", [{datatype, integer}, {validators, ["non_negative_integer"]}]}. -{mapping, "prometheus.filter_aggregated_queue_metrics_pattern", "rabbitmq_prometheus.filter_aggregated_queue_metrics_pattern", [{datatype, string}]}. +%% Kept for compatibility reasons. This is a no-op. +{mapping, "prometheus.filter_aggregated_queue_metrics_pattern", "rabbitmq_prometheus.filter_aggregated_queue_metrics_pattern", + [{datatype, string}]}. %% Authentication options ======================================================== {mapping, "prometheus.authentication.enabled", "rabbitmq_prometheus.authentication.enabled", diff --git a/deps/rabbitmq_prometheus/src/collectors/prometheus_rabbitmq_alarm_metrics_collector.erl b/deps/rabbitmq_prometheus/src/collectors/prometheus_rabbitmq_alarm_metrics_collector.erl index f8c11e6216ac..3123c7716a7b 100644 --- a/deps/rabbitmq_prometheus/src/collectors/prometheus_rabbitmq_alarm_metrics_collector.erl +++ b/deps/rabbitmq_prometheus/src/collectors/prometheus_rabbitmq_alarm_metrics_collector.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(prometheus_rabbitmq_alarm_metrics_collector). diff --git a/deps/rabbitmq_prometheus/src/collectors/prometheus_rabbitmq_core_metrics_collector.erl b/deps/rabbitmq_prometheus/src/collectors/prometheus_rabbitmq_core_metrics_collector.erl index 5e49c181dda9..848e6c764fde 100644 --- a/deps/rabbitmq_prometheus/src/collectors/prometheus_rabbitmq_core_metrics_collector.erl +++ b/deps/rabbitmq_prometheus/src/collectors/prometheus_rabbitmq_core_metrics_collector.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(prometheus_rabbitmq_core_metrics_collector). -export([register/0, @@ -16,7 +16,8 @@ counter_metric/2, untyped_metric/2]). --include_lib("prometheus/include/prometheus.hrl"). +-import(prometheus_text_format, [escape_label_value/1]). + -include_lib("rabbit_common/include/rabbit.hrl"). -behaviour(prometheus_collector). @@ -56,7 +57,7 @@ -define(METRICS_RAW, [ -%%% Those are global, i.e. they contain no reference to queue/vhost/channel + %% Global metrics, as in, they contain no references to queues, virtual hosts or channel {connection_churn_metrics, [ {2, undefined, connections_opened_total, counter, "Total number of connections opened"}, {3, undefined, connections_closed_total, counter, "Total number of connections closed or terminated"}, @@ -68,7 +69,6 @@ ]}, {node_coarse_metrics, [ {2, undefined, process_open_fds, gauge, "Open file descriptors", fd_used}, - {2, undefined, process_open_tcp_sockets, gauge, "Open TCP sockets", sockets_used}, {2, undefined, process_resident_memory_bytes, gauge, "Memory used in bytes", mem_used}, {2, undefined, disk_space_available_bytes, gauge, "Disk space available in bytes", disk_free}, {2, undefined, erlang_processes_used, gauge, "Erlang processes used", proc_used}, @@ -78,7 +78,6 @@ ]}, {node_metrics, [ {2, undefined, process_max_fds, gauge, "Open file descriptors limit", fd_total}, - {2, undefined, process_max_tcp_sockets, gauge, "Open TCP sockets limit", sockets_total}, {2, undefined, resident_memory_limit_bytes, gauge, "Memory high watermark in bytes", mem_limit}, {2, undefined, disk_space_available_limit_bytes, gauge, "Free disk space low watermark in bytes", disk_free_limit}, {2, undefined, erlang_processes_limit, gauge, "Erlang processes limit", proc_total}, @@ -128,7 +127,7 @@ {4, undefined, auth_attempts_detailed_failed_total, counter, "Total number of failed authentication attempts with source info"} ]}, -%%% Those metrics have reference only to a queue name. This is the only group where filtering (e.g. by vhost) makes sense. + %%% These metrics only reference a queue name. This is the only group where filtering (e.g. by vhost) makes sense. {queue_coarse_metrics, [ {2, undefined, queue_messages_ready, gauge, "Messages ready to be delivered to consumers"}, {3, undefined, queue_messages_unacked, gauge, "Messages delivered to consumers but not yet acknowledged"}, @@ -158,9 +157,9 @@ {2, undefined, queue_messages_paged_out_bytes, gauge, "Size in bytes of messages paged out to disk", message_bytes_paged_out}, {2, undefined, queue_head_message_timestamp, gauge, "Timestamp of the first message in the queue, if any", head_message_timestamp}, {2, undefined, queue_disk_reads_total, counter, "Total number of times queue read messages from disk", disk_reads}, - {2, undefined, queue_disk_writes_total, counter, "Total number of times queue wrote messages to disk", disk_writes} + {2, undefined, queue_disk_writes_total, counter, "Total number of times queue wrote messages to disk", disk_writes}, + {2, undefined, stream_segments, counter, "Total number of stream segment files", segments} ]}, - %%% Metrics that contain reference to a channel. Some of them also have %%% a queue name, but in this case filtering on it doesn't make any %%% sense, as the queue is not an object of interest here. @@ -209,9 +208,32 @@ ]}, {channel_queue_exchange_metrics, [ - {2, undefined, queue_messages_published_total, counter, "Total number of messages published to queues"} - ]} -]). + {2, undefined, queue_messages_published_total, counter, "Total number of messages published into a queue through an exchange on a channel"} + ]}, + +%%% Metrics in the following 3 groups reference a queue and/or exchange. +%%% They each have a corresponding group in the above per-channel +%%% section but here the channel is not an object of interest. + {exchange_metrics, [ + {2, undefined, exchange_messages_published_total, counter, "Total number of messages published into an exchange"}, + {3, undefined, exchange_messages_confirmed_total, counter, "Total number of messages published into an exchange and confirmed"}, + {4, undefined, exchange_messages_unroutable_returned_total, counter, "Total number of messages published as mandatory into an exchange and returned to the publisher as unroutable"}, + {5, undefined, exchange_messages_unroutable_dropped_total, counter, "Total number of messages published as non-mandatory into an exchange and dropped as unroutable"} + ]}, + + {queue_delivery_metrics, [ + {2, undefined, queue_get_ack_total, counter, "Total number of messages fetched from a queue with basic.get in manual acknowledgement mode"}, + {3, undefined, queue_get_total, counter, "Total number of messages fetched from a queue with basic.get in automatic acknowledgement mode"}, + {4, undefined, queue_messages_delivered_ack_total, counter, "Total number of messages delivered from a queue to consumers in manual acknowledgement mode"}, + {5, undefined, queue_messages_delivered_total, counter, "Total number of messages delivered from a queue to consumers in automatic acknowledgement mode"}, + {6, undefined, queue_messages_redelivered_total, counter, "Total number of messages redelivered from a queue to consumers"}, + {7, undefined, queue_messages_acked_total, counter, "Total number of messages acknowledged by consumers on a queue"}, + {8, undefined, queue_get_empty_total, counter, "Total number of times basic.get operations fetched no message on a queue"} + ]}, + + {queue_exchange_metrics, [ + {2, undefined, queue_exchange_messages_published_total, counter, "Total number of messages published into a queue through an exchange"} + ]}]). %% Metrics that can be only requested through `/metrics/detailed` -define(METRICS_CLUSTER,[ @@ -226,6 +248,36 @@ ]} ]). +-define(METRICS_MEMORY_BREAKDOWN, [ + {node_memory, [ + {2, undefined, memory_code_module_bytes, gauge, "Code module memory footprint", code}, + {2, undefined, memory_client_connection_reader_bytes, gauge, "Client connection reader processes footprint in bytes", connection_readers}, + {2, undefined, memory_client_connection_writer_bytes, gauge, "Client connection writer processes footprint in bytes", connection_writers}, + {2, undefined, memory_client_connection_channel_bytes, gauge, "Client connection channel processes footprint in bytes", connection_channels}, + {2, undefined, memory_client_connection_other_bytes, gauge, "Client connection other processes footprint in bytes", connection_other}, + {2, undefined, memory_classic_queue_erlang_process_bytes, gauge, "Classic queue processes footprint in bytes", queue_procs}, + {2, undefined, memory_quorum_queue_erlang_process_bytes, gauge, "Quorum queue processes footprint in bytes", quorum_queue_procs}, + {2, undefined, memory_quorum_queue_dlx_erlang_process_bytes, gauge, "Quorum queue DLX worker processes footprint in bytes", quorum_queue_dlx_procs}, + {2, undefined, memory_stream_erlang_process_bytes, gauge, "Stream processes footprint in bytes", stream_queue_procs}, + {2, undefined, memory_stream_replica_reader_erlang_process_bytes, gauge, "Stream replica reader processes footprint in bytes", stream_queue_replica_reader_procs}, + {2, undefined, memory_stream_coordinator_erlang_process_bytes, gauge, "Stream coordinator processes footprint in bytes", stream_queue_coordinator_procs}, + {2, undefined, memory_plugin_bytes, gauge, "Total plugin footprint in bytes", plugins}, + {2, undefined, memory_modern_metadata_store_bytes, gauge, "Modern metadata store footprint in bytes", metadata_store}, + {2, undefined, memory_other_erlang_process_bytes, gauge, "Other processes footprint in bytes", other_proc}, + {2, undefined, memory_metrics_bytes, gauge, "Metric table footprint in bytes", metrics}, + {2, undefined, memory_management_stats_db_bytes, gauge, "Management stats database footprint in bytes", mgmt_db}, + {2, undefined, memory_classic_metadata_store_bytes, gauge, "Classic metadata store footprint in bytes", mnesia}, + {2, undefined, memory_quorum_queue_ets_table_bytes, gauge, "Quorum queue ETS tables footprint in bytes", quorum_ets}, + {2, undefined, memory_modern_metadata_store_ets_table_bytes, gauge, "Modern metadata store ETS tables footprint in bytes", metadata_store_ets}, + {2, undefined, memory_other_ets_table_bytes, gauge, "Other ETS tables footprint in bytes", other_ets}, + {2, undefined, memory_binary_heap_bytes, gauge, "Binary heap size in bytes", binary}, + {2, undefined, memory_message_index_bytes, gauge, "Message index footprint in bytes", msg_index}, + {2, undefined, memory_atom_table_bytes, gauge, "Atom table size in bytes", atom}, + {2, undefined, memory_other_system_bytes, gauge, "Other runtime footprint in bytes", other_system}, + {2, undefined, memory_runtime_allocated_unused_bytes, gauge, "Runtime allocated but unused blocks size in bytes", allocated_unused}, + {2, undefined, memory_runtime_reserved_unallocated_bytes, gauge, "Runtime reserved but unallocated blocks size in bytes", reserved_unallocated} + ]}]). + -define(TOTALS, [ %% ordering differs from metrics above, refer to list comprehension {connection_created, connections, gauge, "Connections currently open"}, @@ -244,26 +296,30 @@ register() -> deregister_cleanup(_) -> ok. collect_mf('detailed', Callback) -> - collect(true, ?DETAILED_METRIC_NAME_PREFIX, vhosts_filter_from_pdict(), queues_filter_from_pdict(), enabled_mfs_from_pdict(?METRICS_RAW), Callback), - collect(true, ?CLUSTER_METRIC_NAME_PREFIX, vhosts_filter_from_pdict(), queues_filter_from_pdict(), enabled_mfs_from_pdict(?METRICS_CLUSTER), Callback), + collect(true, ?DETAILED_METRIC_NAME_PREFIX, vhosts_filter_from_pdict(), enabled_mfs_from_pdict(?METRICS_RAW), Callback), + collect(true, ?CLUSTER_METRIC_NAME_PREFIX, vhosts_filter_from_pdict(), enabled_mfs_from_pdict(?METRICS_CLUSTER), Callback), %% identity is here to enable filtering on a cluster name (as already happens in existing dashboards) emit_identity_info(Callback), ok; collect_mf('per-object', Callback) -> - collect(true, ?METRIC_NAME_PREFIX, false, queues_filter_from_pdict(), ?METRICS_RAW, Callback), + collect(true, ?METRIC_NAME_PREFIX, false, ?METRICS_RAW, Callback), totals(Callback), emit_identity_info(Callback), ok; +collect_mf('memory-breakdown', Callback) -> + collect(false, ?METRIC_NAME_PREFIX, false, ?METRICS_MEMORY_BREAKDOWN, Callback), + emit_identity_info(Callback), + ok; collect_mf(_Registry, Callback) -> PerObjectMetrics = application:get_env(rabbitmq_prometheus, return_per_object_metrics, false), - collect(PerObjectMetrics, ?METRIC_NAME_PREFIX, false, queues_filter_from_pdict(), ?METRICS_RAW, Callback), + collect(PerObjectMetrics, ?METRIC_NAME_PREFIX, false, ?METRICS_RAW, Callback), totals(Callback), emit_identity_info(Callback), ok. -collect(PerObjectMetrics, Prefix, VHostsFilter, QueuesFilter, IncludedMFs, Callback) -> +collect(PerObjectMetrics, Prefix, VHostsFilter, IncludedMFs, Callback) -> _ = [begin - Data = get_data(Table, PerObjectMetrics, VHostsFilter, QueuesFilter), + Data = get_data(Table, PerObjectMetrics, VHostsFilter), mf(Callback, Prefix, Contents, Data) end || {Table, Contents} <- IncludedMFs, not mutually_exclusive_mf(PerObjectMetrics, Table, IncludedMFs)], ok. @@ -408,22 +464,24 @@ label(L) when is_binary(L) -> L; label(M) when is_map(M) -> maps:fold(fun (K, V, Acc = <<>>) -> - <>; + <>; (K, V, Acc) -> - <> + <> end, <<>>, M); label(#resource{virtual_host = VHost, kind = exchange, name = Name}) -> - <<"vhost=\"", VHost/binary, "\",exchange=\"", Name/binary, "\"">>; + <<"vhost=\"", (escape_label_value(VHost))/binary, "\",", + "exchange=\"", (escape_label_value(Name))/binary, "\"">>; label(#resource{virtual_host = VHost, kind = queue, name = Name}) -> - <<"vhost=\"", VHost/binary, "\",queue=\"", Name/binary, "\"">>; + <<"vhost=\"", (escape_label_value(VHost))/binary, "\",", + "queue=\"", (escape_label_value(Name))/binary, "\"">>; label({P, {#resource{virtual_host = QVHost, kind = queue, name = QName}, #resource{virtual_host = EVHost, kind = exchange, name = EName}}}) when is_pid(P) -> %% channel_queue_exchange_metrics {channel_id, {queue_id, exchange_id}} <<"channel=\"", (iolist_to_binary(pid_to_list(P)))/binary, "\",", - "queue_vhost=\"", QVHost/binary, "\",", - "queue=\"", QName/binary, "\",", - "exchange_vhost=\"", EVHost/binary, "\",", - "exchange=\"", EName/binary, "\"" + "queue_vhost=\"", (escape_label_value(QVHost))/binary, "\",", + "queue=\"", (escape_label_value(QName))/binary, "\",", + "exchange_vhost=\"", (escape_label_value(EVHost))/binary, "\",", + "exchange=\"", (escape_label_value(EName))/binary, "\"" >>; label({RemoteAddress, Username, Protocol}) when is_binary(RemoteAddress), is_binary(Username), is_atom(Protocol) -> @@ -488,7 +546,7 @@ emit_gauge_metric_if_defined(Labels, Value) -> gauge_metric(Labels, Value) end. -get_data(connection_metrics = Table, false, _, _) -> +get_data(connection_metrics = Table, false, _) -> {Table, A1, A2, A3, A4} = ets:foldl(fun({_, Props}, {T, A1, A2, A3, A4}) -> {T, sum(proplists:get_value(recv_cnt, Props), A1), @@ -497,7 +555,7 @@ get_data(connection_metrics = Table, false, _, _) -> sum(proplists:get_value(channels, Props), A4)} end, empty(Table), Table), [{Table, [{recv_cnt, A1}, {send_cnt, A2}, {send_pend, A3}, {channels, A4}]}]; -get_data(channel_metrics = Table, false, _, _) -> +get_data(channel_metrics = Table, false, _) -> {Table, A1, A2, A3, A4, A5, A6, A7} = ets:foldl(fun({_, Props}, {T, A1, A2, A3, A4, A5, A6, A7}) -> {T, @@ -512,40 +570,22 @@ get_data(channel_metrics = Table, false, _, _) -> [{Table, [{consumer_count, A1}, {messages_unacknowledged, A2}, {messages_unconfirmed, A3}, {messages_uncommitted, A4}, {acks_uncommitted, A5}, {prefetch_count, A6}, {global_prefetch_count, A7}]}]; -get_data(queue_consumer_count = MF, false, VHostsFilter, QueuesFilter) -> +get_data(queue_consumer_count = MF, false, VHostsFilter) -> Table = queue_metrics, %% Real table name {_, A1} = ets:foldl(fun - ({#resource{kind = queue, virtual_host = VHost}, _, _}, Acc) when is_map(VHostsFilter), map_get(VHost, VHostsFilter) == false -> + ({#resource{kind = queue, virtual_host = VHost}, _, _}, Acc) when is_map(VHostsFilter), map_get(VHost, VHostsFilter) == false -> Acc; - ({#resource{kind = queue, name = Name}, Props, _}, {T, A1} = Acc) - when is_list(QueuesFilter) -> - case re:run(Name, QueuesFilter, [{capture, none}]) of - match -> - Acc; - nomatch -> - {T, - sum(proplists:get_value(consumers, Props), A1) - } - end; ({_, Props, _}, {T, A1}) -> {T, sum(proplists:get_value(consumers, Props), A1) } end, empty(MF), Table), [{Table, [{consumers, A1}]}]; -get_data(queue_metrics = Table, false, VHostsFilter, QueuesFilter) -> - {Table, A1, A2, A3, A4, A5, A6, A7, A8, A9, A10, A11, A12, A13, A14, A15, A16} = +get_data(queue_metrics = Table, false, VHostsFilter) -> + {Table, A1, A2, A3, A4, A5, A6, A7, A8, A9, A10, A11, A12, A13, A14, A15, A16, A17} = ets:foldl(fun ({#resource{kind = queue, virtual_host = VHost}, _, _}, Acc) when is_map(VHostsFilter), map_get(VHost, VHostsFilter) == false -> Acc; - ({#resource{kind = queue, name = Name}, Props, _}, Acc) - when is_list(QueuesFilter) -> - case re:run(Name, QueuesFilter, [{capture, none}]) of - match -> - Acc; - nomatch -> - sum_queue_metrics(Props, Acc) - end; ({_, Props, _}, Acc) -> sum_queue_metrics(Props, Acc) end, empty(Table), Table), @@ -555,11 +595,14 @@ get_data(queue_metrics = Table, false, VHostsFilter, QueuesFilter) -> {messages_bytes_persistent, A9}, {message_bytes, A10}, {message_bytes_ready, A11}, {message_bytes_unacknowledged, A12}, {messages_paged_out, A13}, {message_bytes_paged_out, A14}, - {disk_reads, A15}, {disk_writes, A16}]}]; -get_data(Table, false, VHostsFilter, QueuesFilter) when Table == channel_exchange_metrics; + {disk_reads, A15}, {disk_writes, A16}, {segments, A17}]}]; +get_data(Table, false, VHostsFilter) when Table == channel_exchange_metrics; Table == queue_coarse_metrics; + Table == queue_delivery_metrics; Table == channel_queue_metrics; Table == connection_coarse_metrics; + Table == exchange_metrics; + Table == queue_exchange_metrics; Table == channel_queue_exchange_metrics; Table == ra_metrics; Table == channel_process_metrics -> @@ -567,14 +610,10 @@ get_data(Table, false, VHostsFilter, QueuesFilter) when Table == channel_exchang %% For queue_coarse_metrics ({#resource{kind = queue, virtual_host = VHost}, _, _, _, _}, Acc) when is_map(VHostsFilter), map_get(VHost, VHostsFilter) == false -> Acc; - ({#resource{kind = queue, name = Name}, V1, V2, V3, V4}, {T, A1, A2, A3, A4} = Acc) - when is_list(QueuesFilter) -> - case re:run(Name, QueuesFilter, [{capture, none}]) of - match -> - Acc; - nomatch -> - {T, V1 + A1, V2 + A2, V3 + A3, V4 + A4} - end; + ({#resource{kind = queue, virtual_host = VHost}, _, _, _, _, _, _, _, _}, Acc) when is_map(VHostsFilter), map_get(VHost, VHostsFilter) == false -> + Acc; + ({{#resource{kind = queue, virtual_host = VHost}, #resource{kind = exchange}}, _, _}, Acc) when is_map(VHostsFilter), map_get(VHost, VHostsFilter) == false -> + Acc; ({_, V1}, {T, A1}) -> {T, V1 + A1}; ({_, V1, _}, {T, A1}) -> @@ -601,14 +640,44 @@ get_data(Table, false, VHostsFilter, QueuesFilter) when Table == channel_exchang _ -> [Result] end; -get_data(queue_coarse_metrics = Table, true, VHostsFilter, _) when is_map(VHostsFilter) -> +get_data(exchange_metrics = Table, true, VHostsFilter) when is_map(VHostsFilter)-> + ets:foldl(fun + ({#resource{kind = exchange, virtual_host = VHost}, _, _, _, _, _} = Row, Acc) when + map_get(VHost, VHostsFilter) + -> + [Row | Acc]; + (_Row, Acc) -> + Acc + end, [], Table); +get_data(queue_delivery_metrics = Table, true, VHostsFilter) when is_map(VHostsFilter) -> + ets:foldl(fun + ({#resource{kind = queue, virtual_host = VHost}, _, _, _, _, _, _, _, _} = Row, Acc) when + map_get(VHost, VHostsFilter) + -> + [Row | Acc]; + (_Row, Acc) -> + Acc + end, [], Table); +get_data(queue_exchange_metrics = Table, true, VHostsFilter) when is_map(VHostsFilter) -> + ets:foldl(fun + ({{ + #resource{kind = queue, virtual_host = VHost}, + #resource{kind = exchange, virtual_host = VHost} + }, _, _} = Row, Acc) when + map_get(VHost, VHostsFilter) + -> + [Row | Acc]; + (_Row, Acc) -> + Acc + end, [], Table); +get_data(queue_coarse_metrics = Table, true, VHostsFilter) when is_map(VHostsFilter) -> ets:foldl(fun ({#resource{kind = queue, virtual_host = VHost}, _, _, _, _} = Row, Acc) when map_get(VHost, VHostsFilter) -> [Row|Acc]; (_, Acc) -> Acc end, [], Table); -get_data(MF, true, VHostsFilter, _) when is_map(VHostsFilter), MF == queue_metrics orelse MF == queue_consumer_count -> +get_data(MF, true, VHostsFilter) when is_map(VHostsFilter), MF == queue_metrics orelse MF == queue_consumer_count -> Table = queue_metrics, ets:foldl(fun ({#resource{kind = queue, virtual_host = VHost}, _, _} = Row, Acc) when map_get(VHost, VHostsFilter) -> @@ -616,16 +685,48 @@ get_data(MF, true, VHostsFilter, _) when is_map(VHostsFilter), MF == queue_metri (_, Acc) -> Acc end, [], Table); -get_data(queue_consumer_count, true, _, _) -> +get_data(queue_consumer_count, true, _) -> ets:tab2list(queue_metrics); -get_data(vhost_status, _, _, _) -> +get_data(vhost_status, _, _) -> [ { #{<<"vhost">> => VHost}, case rabbit_vhost_sup_sup:is_vhost_alive(VHost) of true -> 1; false -> 0 end} || VHost <- rabbit_vhost:list() ]; -get_data(exchange_bindings, _, _, _) -> +get_data(node_memory, _, _) -> + BreakdownPL = rabbit_vm:memory(), + KeysOfInterest = [ + code, + connection_readers, + connection_writers, + connection_channels, + connection_other, + queue_procs, + quorum_queue_procs, + quorum_queue_dlx_procs, + stream_queue_procs, + stream_queue_replica_reader_procs, + stream_queue_coordinator_procs, + plugins, + metadata_store, + other_proc, + metrics, + mgmt_db, + mnesia, + quorum_ets, + metadata_store_ets, + other_ets, + binary, + msg_index, + atom, + other_system, + allocated_unused, + reserved_unallocated + ], + Data = maps:to_list(maps:with(KeysOfInterest, maps:from_list(BreakdownPL))), + [{node_memory, Data}]; +get_data(exchange_bindings, _, _) -> Exchanges = lists:foldl(fun (#exchange{internal = true}, Acc) -> Acc; @@ -634,21 +735,22 @@ get_data(exchange_bindings, _, _, _) -> (#exchange{name = EName, type = EType}, Acc) -> maps:put(EName, #{type => atom_to_binary(EType), binding_count => 0}, Acc) end, #{}, rabbit_exchange:list()), - WithCount = ets:foldl( - fun (#route{binding = #binding{source = EName}}, Acc) -> + WithCount = rabbit_db_binding:fold( + fun (#binding{source = EName}, Acc) -> case maps:is_key(EName, Acc) of false -> Acc; true -> - maps:update_with(EName, fun (R = #{binding_count := Cnt}) -> - R#{binding_count => Cnt + 1} - end, Acc) + maps:update_with(EName, + fun (R = #{binding_count := Cnt}) -> + R#{binding_count => Cnt + 1} + end, Acc) end - end, Exchanges, rabbit_route), + end, Exchanges), maps:fold(fun(#resource{virtual_host = VHost, name = Name}, #{type := Type, binding_count := Bindings}, Acc) -> [{<<"vhost=\"", VHost/binary, "\",exchange=\"", Name/binary, "\",type=\"", Type/binary, "\"">>, Bindings}|Acc] end, [], WithCount); -get_data(exchange_names, _, _, _) -> +get_data(exchange_names, _, _) -> lists:foldl(fun (#exchange{internal = true}, Acc) -> Acc; @@ -658,12 +760,12 @@ get_data(exchange_names, _, _, _) -> Label = <<"vhost=\"", VHost/binary, "\",exchange=\"", Name/binary, "\",type=\"", (atom_to_binary(EType))/binary, "\"">>, [{Label, 1}|Acc] end, [], rabbit_exchange:list()); -get_data(Table, _, _, _) -> +get_data(Table, _, _) -> ets:tab2list(Table). sum_queue_metrics(Props, {T, A1, A2, A3, A4, A5, A6, A7, A8, A9, A10, A11, - A12, A13, A14, A15, A16}) -> + A12, A13, A14, A15, A16, A17}) -> {T, sum(proplists:get_value(consumers, Props), A1), sum(proplists:get_value(consumer_utilisation, Props), A2), @@ -680,7 +782,8 @@ sum_queue_metrics(Props, {T, A1, A2, A3, A4, A5, A6, A7, A8, A9, A10, A11, sum(proplists:get_value(messages_paged_out, Props), A13), sum(proplists:get_value(message_bytes_paged_out, Props), A14), sum(proplists:get_value(disk_reads, Props), A15), - sum(proplists:get_value(disk_writes, Props), A16) + sum(proplists:get_value(disk_writes, Props), A16), + sum(proplists:get_value(segments, Props), A17) }. division(0, 0) -> @@ -691,18 +794,18 @@ division(A, B) -> accumulate_count_and_sum(Value, {Count, Sum}) -> {Count + 1, Sum + Value}. -empty(T) when T == channel_queue_exchange_metrics; T == channel_process_metrics; T == queue_consumer_count -> +empty(T) when T == channel_queue_exchange_metrics; T == queue_exchange_metrics; T == channel_process_metrics; T == queue_consumer_count -> {T, 0}; empty(T) when T == connection_coarse_metrics; T == auth_attempt_metrics; T == auth_attempt_detailed_metrics -> {T, 0, 0, 0}; -empty(T) when T == channel_exchange_metrics; T == queue_coarse_metrics; T == connection_metrics -> +empty(T) when T == channel_exchange_metrics; T == exchange_metrics; T == queue_coarse_metrics; T == connection_metrics -> {T, 0, 0, 0, 0}; empty(T) when T == ra_metrics -> {T, 0, 0, 0, 0, 0, {0, 0}}; -empty(T) when T == channel_queue_metrics; T == channel_metrics -> +empty(T) when T == channel_queue_metrics; T == queue_delivery_metrics; T == channel_metrics -> {T, 0, 0, 0, 0, 0, 0, 0}; empty(queue_metrics = T) -> - {T, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}. + {T, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}. sum(undefined, B) -> B; @@ -731,10 +834,3 @@ vhosts_filter_from_pdict() -> maps:merge(All, Enabled) end. -queues_filter_from_pdict() -> - case get(prometheus_queue_filter) of - undefined -> - false; - Pattern -> - Pattern - end. diff --git a/deps/rabbitmq_prometheus/src/collectors/prometheus_rabbitmq_dynamic_collector.erl b/deps/rabbitmq_prometheus/src/collectors/prometheus_rabbitmq_dynamic_collector.erl new file mode 100644 index 000000000000..32f60937f965 --- /dev/null +++ b/deps/rabbitmq_prometheus/src/collectors/prometheus_rabbitmq_dynamic_collector.erl @@ -0,0 +1,42 @@ +%%% Collector for dynamic metrics that are calculated at collection time +-module(prometheus_rabbitmq_dynamic_collector). + +-behaviour(prometheus_collector). +-include_lib("prometheus/include/prometheus.hrl"). + +-export([deregister_cleanup/1, + collect_mf/2]). + +-define(METRIC_NAME_PREFIX, "rabbitmq_"). + +-define(METRICS, [{unreachable_cluster_peers_count, gauge, + "Number of peers in the cluster the current node cannot reach."} + ]). + +%%==================================================================== +%% Collector API +%%==================================================================== + +deregister_cleanup(_) -> ok. + +collect_mf(_Registry, Callback) -> + _ = lists:foreach( + fun({Name, Type, Help}) -> + Callback( + prometheus_model_helpers:create_mf( + ?METRIC_NAME(Name), + Help, + Type, + values(Name)) + ) + end, + ?METRICS + ), + ok. + +%%==================================================================== +%% Private Parts +%%==================================================================== + +values(unreachable_cluster_peers_count) -> + [{[], length(rabbit_nodes:list_unreachable())}]. diff --git a/deps/rabbitmq_prometheus/src/collectors/prometheus_rabbitmq_global_metrics_collector.erl b/deps/rabbitmq_prometheus/src/collectors/prometheus_rabbitmq_global_metrics_collector.erl index f1341116a41e..af2073737724 100644 --- a/deps/rabbitmq_prometheus/src/collectors/prometheus_rabbitmq_global_metrics_collector.erl +++ b/deps/rabbitmq_prometheus/src/collectors/prometheus_rabbitmq_global_metrics_collector.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(prometheus_rabbitmq_global_metrics_collector). diff --git a/deps/rabbitmq_prometheus/src/rabbit_prometheus_app.erl b/deps/rabbitmq_prometheus/src/rabbit_prometheus_app.erl index ee5ae37cd955..3a7debe05399 100644 --- a/deps/rabbitmq_prometheus/src/rabbit_prometheus_app.erl +++ b/deps/rabbitmq_prometheus/src/rabbit_prometheus_app.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_prometheus_app). diff --git a/deps/rabbitmq_prometheus/src/rabbit_prometheus_dispatcher.erl b/deps/rabbitmq_prometheus/src/rabbit_prometheus_dispatcher.erl index 8125ddcc4b2e..850494e00666 100644 --- a/deps/rabbitmq_prometheus/src/rabbit_prometheus_dispatcher.erl +++ b/deps/rabbitmq_prometheus/src/rabbit_prometheus_dispatcher.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_prometheus_dispatcher). @@ -17,6 +17,7 @@ build_dispatcher() -> prometheus_rabbitmq_core_metrics_collector, prometheus_rabbitmq_global_metrics_collector, prometheus_rabbitmq_alarm_metrics_collector, + prometheus_rabbitmq_dynamic_collector, prometheus_process_collector]), prometheus_registry:register_collectors('per-object', [ prometheus_vm_system_info_collector, @@ -31,6 +32,9 @@ build_dispatcher() -> prometheus_registry:register_collectors('detailed', [ prometheus_rabbitmq_core_metrics_collector ]), + prometheus_registry:register_collectors('memory-breakdown', [ + prometheus_rabbitmq_core_metrics_collector + ]), rabbit_prometheus_handler:setup(), cowboy_router:compile([{'_', dispatcher()}]). diff --git a/deps/rabbitmq_prometheus/src/rabbit_prometheus_handler.erl b/deps/rabbitmq_prometheus/src/rabbit_prometheus_handler.erl index b0df4b5f24ae..ff780d273042 100644 --- a/deps/rabbitmq_prometheus/src/rabbit_prometheus_handler.erl +++ b/deps/rabbitmq_prometheus/src/rabbit_prometheus_handler.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_prometheus_handler). @@ -10,7 +10,6 @@ -export([generate_response/2, content_types_provided/2, is_authorized/2]). -export([setup/0]). --include_lib("amqp_client/include/amqp_client.hrl"). -include_lib("rabbitmq_web_dispatch/include/rabbitmq_web_dispatch_records.hrl"). -define(SCRAPE_DURATION, telemetry_scrape_duration_seconds). @@ -47,6 +46,7 @@ is_authorized(ReqData, Context) -> setup() -> setup_metrics(telemetry_registry()), setup_metrics('per-object'), + setup_metrics('memory-breakdown'), setup_metrics('detailed'). setup_metrics(Registry) -> @@ -173,12 +173,6 @@ put_filtering_options_into_process_dictionary(Request) -> put(prometheus_mf_filter, Fs); _ -> ok end, - case application:get_env(rabbitmq_prometheus, filter_aggregated_queue_metrics_pattern, undefined) of - undefined -> ok; - Pattern -> - {ok, CompiledPattern} = re:compile(Pattern), - put(prometheus_queue_filter, CompiledPattern) - end, ok. parse_vhosts(N) when is_binary(N) -> diff --git a/deps/rabbitmq_prometheus/test/config_schema_SUITE.erl b/deps/rabbitmq_prometheus/test/config_schema_SUITE.erl index a66fdecca8a4..eafe59e5244d 100644 --- a/deps/rabbitmq_prometheus/test/config_schema_SUITE.erl +++ b/deps/rabbitmq_prometheus/test/config_schema_SUITE.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(config_schema_SUITE). diff --git a/deps/rabbitmq_prometheus/test/config_schema_SUITE_data/rabbitmq_prometheus.snippets b/deps/rabbitmq_prometheus/test/config_schema_SUITE_data/rabbitmq_prometheus.snippets index b29b52b99998..c31033e277ba 100644 --- a/deps/rabbitmq_prometheus/test/config_schema_SUITE_data/rabbitmq_prometheus.snippets +++ b/deps/rabbitmq_prometheus/test/config_schema_SUITE_data/rabbitmq_prometheus.snippets @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% [ diff --git a/deps/rabbitmq_prometheus/test/rabbit_prometheus_http_SUITE.erl b/deps/rabbitmq_prometheus/test/rabbit_prometheus_http_SUITE.erl index 86e83e35ac19..1a9c514391be 100644 --- a/deps/rabbitmq_prometheus/test/rabbit_prometheus_http_SUITE.erl +++ b/deps/rabbitmq_prometheus/test/rabbit_prometheus_http_SUITE.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_prometheus_http_SUITE). @@ -24,7 +24,9 @@ all() -> {group, per_object_endpoint_metrics}, {group, commercial}, {group, detailed_metrics}, - {group, authentication} + {group, special_chars}, + {group, authentication}, + {group, memory_breakdown_endpoint_metrics} ]. groups() -> @@ -33,7 +35,7 @@ groups() -> {config_path, [], generic_tests()}, {global_labels, [], generic_tests()}, {aggregated_metrics, [], [ - aggregated_metrics_test, + aggregated_metrics_test, specific_erlang_metrics_present_test, global_metrics_present_test, global_metrics_single_metric_family_test @@ -48,6 +50,9 @@ groups() -> endpoint_per_object_metrics, specific_erlang_metrics_present_test ]}, + {memory_breakdown_endpoint_metrics, [], [ + memory_breakdown_metrics_test + ]}, {commercial, [], [ build_info_product_test ]}, @@ -56,12 +61,16 @@ groups() -> queue_consumer_count_single_vhost_per_object_test, queue_consumer_count_all_vhosts_per_object_test, queue_coarse_metrics_per_object_test, + queue_delivery_metrics_per_object_test, + exchange_metrics_per_object_test, + queue_exchange_metrics_per_object_test, queue_metrics_per_object_test, queue_consumer_count_and_queue_metrics_mutually_exclusive_test, vhost_status_metric, exchange_bindings_metric, exchange_names_metric ]}, + {special_chars, [], [core_metrics_special_chars]}, {authentication, [], [basic_auth]} ]. @@ -206,10 +215,45 @@ init_per_group(commercial, Config0) -> Config1 = rabbit_ct_helpers:merge_app_env(Config0, ProductConfig), init_per_group(commercial, Config1, []); +init_per_group(special_chars, Config0) -> + StatsEnv = {rabbit, [{collect_statistics, fine}, {collect_statistics_interval, 100}]}, + Config1 = init_per_group(special_chars, rabbit_ct_helpers:merge_app_env(Config0, StatsEnv), []), + + VHost = <<"vhost\"\n\\">>, + rabbit_ct_broker_helpers:add_vhost(Config1, 0, VHost, <<"guest">>), + rabbit_ct_broker_helpers:set_full_permissions(Config1, VHost), + VHostConn = rabbit_ct_client_helpers:open_unmanaged_connection(Config1, 0, VHost), + {ok, VHostCh} = amqp_connection:open_channel(VHostConn), + + %% new line characters (\r and \n) are removed from queue and + %% exchange names during creation (unlike for vhosts) + QName = <<"queue\"\\">>, + #'queue.declare_ok'{} = amqp_channel:call(VHostCh, + #'queue.declare'{queue = QName, + durable = true + }), + Exchange = <<"exchange\"\\">>, + #'exchange.declare_ok'{} = amqp_channel:call(VHostCh, #'exchange.declare'{exchange = Exchange}), + #'queue.bind_ok'{} = amqp_channel:call(VHostCh, #'queue.bind'{queue = QName, exchange = Exchange, routing_key = QName}), + + amqp_channel:call(VHostCh, + #'basic.publish'{exchange = Exchange, routing_key = QName}, + #amqp_msg{payload = <<"msg">>}), + + Config2 = [{vhost_name, VHost}, + {queue_name, QName}, + {exchange_name, Exchange}, + {connection, VHostConn}, + {channel, VHostCh} + |Config1], + init_per_group(special_chars, Config2, []); + init_per_group(authentication, Config) -> Config1 = rabbit_ct_helpers:merge_app_env( Config, {rabbitmq_prometheus, [{authentication, [{enabled, true}]}]}), - init_per_group(authentication, Config1, []). + init_per_group(authentication, Config1, []); +init_per_group(memory_breakdown_endpoint_metrics, Config) -> + init_per_group(memory_breakdown_endpoint_metrics, Config, []). @@ -249,6 +293,11 @@ end_per_group(detailed_metrics, Config) -> amqp_channel:close(VHost2Ch), amqp_connection:close(?config(vhost2_conn, Config)), + %% Delete queues? + end_per_group_(Config); +end_per_group(special_chars, Config) -> + amqp_channel:close(?config(channel, Config)), + amqp_connection:close(?config(connection, Config)), %% Delete queues? end_per_group_(Config); end_per_group(authentication, Config) -> @@ -327,12 +376,15 @@ aggregated_metrics_test(Config) -> %% Check the first metric value from each ETS table owned by rabbitmq_metrics ?assertEqual(match, re:run(Body, "^rabbitmq_channel_consumers ", [{capture, none}, multiline])), ?assertEqual(match, re:run(Body, "^rabbitmq_channel_messages_published_total ", [{capture, none}, multiline])), + ?assertEqual(match, re:run(Body, "^rabbitmq_exchange_messages_published_total ", [{capture, none}, multiline])), ?assertEqual(match, re:run(Body, "^rabbitmq_channel_process_reductions_total ", [{capture, none}, multiline])), ?assertEqual(match, re:run(Body, "^rabbitmq_channel_get_ack_total ", [{capture, none}, multiline])), + ?assertEqual(match, re:run(Body, "^rabbitmq_queue_get_ack_total ", [{capture, none}, multiline])), ?assertEqual(match, re:run(Body, "^rabbitmq_connections_opened_total ", [{capture, none}, multiline])), ?assertEqual(match, re:run(Body, "^rabbitmq_connection_incoming_bytes_total ", [{capture, none}, multiline])), ?assertEqual(match, re:run(Body, "^rabbitmq_connection_incoming_packets_total ", [{capture, none}, multiline])), ?assertEqual(match, re:run(Body, "^rabbitmq_queue_messages_published_total ", [{capture, none}, multiline])), + ?assertEqual(match, re:run(Body, "^rabbitmq_queue_exchange_messages_published_total ", [{capture, none}, multiline])), ?assertEqual(match, re:run(Body, "^rabbitmq_process_open_fds ", [{capture, none}, multiline])), ?assertEqual(match, re:run(Body, "^rabbitmq_process_max_fds ", [{capture, none}, multiline])), ?assertEqual(match, re:run(Body, "^rabbitmq_io_read_ops_total ", [{capture, none}, multiline])), @@ -363,12 +415,15 @@ per_object_metrics_test(Config, Path) -> %% Check the first metric value from each ETS table owned by rabbitmq_metrics ?assertEqual(match, re:run(Body, "^rabbitmq_channel_consumers{", [{capture, none}, multiline])), ?assertEqual(match, re:run(Body, "^rabbitmq_channel_messages_published_total{", [{capture, none}, multiline])), + ?assertEqual(match, re:run(Body, "^rabbitmq_exchange_messages_published_total{", [{capture, none}, multiline])), ?assertEqual(match, re:run(Body, "^rabbitmq_channel_process_reductions_total{", [{capture, none}, multiline])), ?assertEqual(match, re:run(Body, "^rabbitmq_channel_get_ack_total{", [{capture, none}, multiline])), + ?assertEqual(match, re:run(Body, "^rabbitmq_queue_get_ack_total{", [{capture, none}, multiline])), ?assertEqual(match, re:run(Body, "^rabbitmq_connections_opened_total ", [{capture, none}, multiline])), ?assertEqual(match, re:run(Body, "^rabbitmq_connection_incoming_bytes_total{", [{capture, none}, multiline])), ?assertEqual(match, re:run(Body, "^rabbitmq_connection_incoming_packets_total{", [{capture, none}, multiline])), ?assertEqual(match, re:run(Body, "^rabbitmq_queue_messages_published_total{", [{capture, none}, multiline])), + ?assertEqual(match, re:run(Body, "^rabbitmq_queue_exchange_messages_published_total{", [{capture, none}, multiline])), ?assertEqual(match, re:run(Body, "^rabbitmq_process_open_fds ", [{capture, none}, multiline])), ?assertEqual(match, re:run(Body, "^rabbitmq_process_max_fds ", [{capture, none}, multiline])), ?assertEqual(match, re:run(Body, "^rabbitmq_io_read_ops_total ", [{capture, none}, multiline])), @@ -384,6 +439,12 @@ per_object_metrics_test(Config, Path) -> %% Check the first TOTALS metric value ?assertEqual(match, re:run(Body, "^rabbitmq_connections ", [{capture, none}, multiline])). +memory_breakdown_metrics_test(Config) -> + {_Headers, Body} = http_get_with_pal(Config, "/metrics/memory-breakdown", [], 200), + ?assertEqual(match, re:run(Body, "^rabbitmq_memory_quorum_queue_erlang_process_bytes ", [{capture, none}, multiline])), + ?assertEqual(match, re:run(Body, "^rabbitmq_memory_classic_queue_erlang_process_bytes ", [{capture, none}, multiline])), + ?assertEqual(match, re:run(Body, "^rabbitmq_memory_binary_heap_bytes ", [{capture, none}, multiline])). + build_info_test(Config) -> {_Headers, Body} = http_get_with_pal(Config, [], 200), ?assertEqual(match, re:run(Body, "^rabbitmq_build_info{", [{capture, none}, multiline])), @@ -483,6 +544,93 @@ queue_coarse_metrics_per_object_test(Config) -> map_get(rabbitmq_detailed_queue_messages, parse_response(Body3))), ok. +queue_delivery_metrics_per_object_test(Config) -> + Expected1 = #{#{queue => "vhost-1-queue-with-consumer", vhost => "vhost-1"} => [7]}, + + {_, Body1} = http_get_with_pal(Config, + "/metrics/detailed?vhost=vhost-1&family=queue_delivery_metrics", + [], 200), + ?assertEqual( + Expected1, + map_get( + rabbitmq_detailed_queue_messages_delivered_ack_total, + parse_response(Body1))), + + {_, Body2} = http_get_with_pal(Config, + "/metrics/detailed?vhost=vhost-2&family=queue_delivery_metrics", + [], 200), + Expected2 = #{#{queue => "vhost-2-queue-with-consumer", vhost => "vhost-2"} => [11]}, + + ?assertEqual( + Expected2, + map_get( + rabbitmq_detailed_queue_messages_delivered_ack_total, + parse_response(Body2))), + ok. + +exchange_metrics_per_object_test(Config) -> + Expected1 = #{#{exchange => "", vhost => "vhost-1"} => [14]}, + + {_, Body} = http_get_with_pal(Config, + "/metrics/detailed?vhost=vhost-1&family=exchange_metrics", + [], 200), + ?assertEqual( + Expected1, + map_get( + rabbitmq_detailed_exchange_messages_published_total, + parse_response(Body))), + ok. + +queue_exchange_metrics_per_object_test(Config) -> + Expected1 = #{ + #{ + queue => "vhost-1-queue-with-messages", + vhost => "vhost-1", + exchange => "" + } => [7], + #{ + exchange => "", + queue => "vhost-1-queue-with-consumer", + vhost => "vhost-1" + } => [7] + }, + + {_, Body1} = http_get_with_pal(Config, + "/metrics/detailed?vhost=vhost-1&family=queue_exchange_metrics", + [], 200), + ?assertEqual( + Expected1, + map_get( + rabbitmq_detailed_queue_exchange_messages_published_total, + parse_response(Body1))), + + + {_, Body2} = http_get_with_pal(Config, + "/metrics/detailed?vhost=vhost-2&family=queue_exchange_metrics", + [], 200), + + + Expected2 = #{ + #{ + queue => "vhost-2-queue-with-messages", + vhost => "vhost-2", + exchange => "" + } => [11], + #{ + exchange => "", + queue => "vhost-2-queue-with-consumer", + vhost => "vhost-2" + } => [11] + }, + + ?assertEqual( + Expected2, + map_get( + rabbitmq_detailed_queue_exchange_messages_published_total, + parse_response(Body2))), + + ok. + queue_metrics_per_object_test(Config) -> Expected1 = #{#{queue => "vhost-1-queue-with-consumer", vhost => "vhost-1"} => [7], #{queue => "vhost-1-queue-with-messages", vhost => "vhost-1"} => [1]}, @@ -560,6 +708,32 @@ exchange_names_metric(Config) -> }, Names), ok. +core_metrics_special_chars(Config) -> + {_, Body1} = http_get_with_pal(Config, "/metrics/detailed?family=queue_coarse_metrics", [], 200), + ?assertMatch(#{rabbitmq_detailed_queue_messages := + #{#{vhost => "vhost\\\"\\n\\\\", + queue => "queue\\\"\\\\"} := [I]}} + when I == 0; I == 1, + parse_response(Body1)), + + {_, Body2} = http_get_with_pal(Config, "/metrics/detailed?family=channel_exchange_metrics", [], 200), + #{rabbitmq_detailed_channel_messages_published_total := LabelValue2} = parse_response(Body2), + ?assertMatch([{#{channel := _, + vhost := "vhost\\\"\\n\\\\", + exchange := "exchange\\\"\\\\"}, [I]}] + when I == 0; I == 1, + maps:to_list(LabelValue2)), + + {_, Body3} = http_get_with_pal(Config, "/metrics/detailed?family=channel_queue_exchange_metrics", [], 200), + #{rabbitmq_detailed_queue_messages_published_total := LabelValue3} = parse_response(Body3), + ?assertMatch([{#{channel := _, + queue_vhost := "vhost\\\"\\n\\\\", + queue := "queue\\\"\\\\", + exchange_vhost := "vhost\\\"\\n\\\\", + exchange := "exchange\\\"\\\\"}, [I]}] + when I == 0; I == 1, + maps:to_list(LabelValue3)), + ok. basic_auth(Config) -> http_get(Config, [{"accept-encoding", "deflate"}], 401), diff --git a/deps/rabbitmq_prometheus/test/rabbitmq_prometheus_collector_test_proxy.erl b/deps/rabbitmq_prometheus/test/rabbitmq_prometheus_collector_test_proxy.erl new file mode 100644 index 000000000000..1e6b983cbefd --- /dev/null +++ b/deps/rabbitmq_prometheus/test/rabbitmq_prometheus_collector_test_proxy.erl @@ -0,0 +1,12 @@ +-module(rabbitmq_prometheus_collector_test_proxy). + +-export([collect_mf/2]). + +-define(PD_KEY, metric_families). + +collect_mf(Registry, Collector) -> + put(?PD_KEY, []), + Collector:collect_mf(Registry, fun(MF) -> put(?PD_KEY, [MF | get(?PD_KEY)]) end), + MFs = lists:reverse(get(?PD_KEY)), + erase(?PD_KEY), + MFs. diff --git a/deps/rabbitmq_random_exchange/.gitignore b/deps/rabbitmq_random_exchange/.gitignore deleted file mode 100644 index 1dde0fdd27af..000000000000 --- a/deps/rabbitmq_random_exchange/.gitignore +++ /dev/null @@ -1,19 +0,0 @@ -.DS_Store -erl_crash.dump -.sw? -.*.sw? -*.beam -/.erlang.mk/ -/cover/ -/deps/ -/doc/ -/ebin/ -/escript/ -/escript.lock -/logs/ -/plugins/ -/plugins.lock -/sbin/ -/sbin.lock - -/rabbitmq_random_exchange.d diff --git a/deps/rabbitmq_random_exchange/LICENSE-APACHE2 b/deps/rabbitmq_random_exchange/LICENSE-APACHE2 index 5e695243944b..bac71976fa6c 100644 --- a/deps/rabbitmq_random_exchange/LICENSE-APACHE2 +++ b/deps/rabbitmq_random_exchange/LICENSE-APACHE2 @@ -187,9 +187,8 @@ same "printed page" as the copyright notice for easier identification within third-party archives. - Copyright 2007-2013 VMware, Inc. Copyright 2011 Jon Brisbin. - Copyright 2013-2020 VMware, Inc. or its affiliates. + Copyright 2007-2024 Broadcom. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/deps/rabbitmq_random_exchange/src/rabbit_exchange_type_random.erl b/deps/rabbitmq_random_exchange/src/rabbit_exchange_type_random.erl index 7b86152d2775..1fae847eb209 100644 --- a/deps/rabbitmq_random_exchange/src/rabbit_exchange_type_random.erl +++ b/deps/rabbitmq_random_exchange/src/rabbit_exchange_type_random.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_exchange_type_random). @@ -19,8 +19,8 @@ -export([ add_binding/3, assert_args_equivalence/2, - create/2, - delete/2, + create/2, + delete/2, policy_changed/2, description/0, recover/2, diff --git a/deps/rabbitmq_recent_history_exchange/.gitignore b/deps/rabbitmq_recent_history_exchange/.gitignore deleted file mode 100644 index ce64405dd20e..000000000000 --- a/deps/rabbitmq_recent_history_exchange/.gitignore +++ /dev/null @@ -1,19 +0,0 @@ -.DS_Store -erl_crash.dump -.sw? -.*.sw? -*.beam -/.erlang.mk/ -/cover/ -/deps/ -/doc/ -/ebin/ -/escript/ -/escript.lock -/logs/ -/plugins/ -/plugins.lock -/sbin/ -/sbin.lock - -rabbitmq_recent_history_exchange.d diff --git a/deps/rabbitmq_recent_history_exchange/BUILD.bazel b/deps/rabbitmq_recent_history_exchange/BUILD.bazel index 47a8e881c4f3..73121ad44906 100644 --- a/deps/rabbitmq_recent_history_exchange/BUILD.bazel +++ b/deps/rabbitmq_recent_history_exchange/BUILD.bazel @@ -24,8 +24,6 @@ APP_DESCRIPTION = "RabbitMQ Recent History Exchange" all_beam_files(name = "all_beam_files") -all_test_beam_files(name = "all_test_beam_files") - all_srcs(name = "all_srcs") test_suite_beam_files(name = "test_suite_beam_files") @@ -43,6 +41,8 @@ rabbitmq_app( deps = [ "//deps/rabbit:erlang_app", "//deps/rabbit_common:erlang_app", + "@khepri//:erlang_app", + "@khepri_mnesia_migration//:erlang_app", ], ) @@ -86,3 +86,5 @@ alias( actual = ":erlang_app", visibility = ["//visibility:public"], ) + +all_test_beam_files(name = "all_test_beam_files") diff --git a/deps/rabbitmq_recent_history_exchange/Makefile b/deps/rabbitmq_recent_history_exchange/Makefile index d4b71fdc4f00..045382e11bff 100644 --- a/deps/rabbitmq_recent_history_exchange/Makefile +++ b/deps/rabbitmq_recent_history_exchange/Makefile @@ -5,9 +5,11 @@ define PROJECT_APP_EXTRA_KEYS {broker_version_requirements, []} endef -DEPS = rabbit_common rabbit +DEPS = rabbit_common rabbit khepri khepri_mnesia_migration TEST_DEPS = rabbitmq_ct_helpers rabbitmq_ct_client_helpers amqp_client +PLT_APPS += mnesia + DEP_EARLY_PLUGINS = rabbit_common/mk/rabbitmq-early-plugin.mk DEP_PLUGINS = rabbit_common/mk/rabbitmq-plugin.mk diff --git a/deps/rabbitmq_recent_history_exchange/app.bzl b/deps/rabbitmq_recent_history_exchange/app.bzl index db38feebd080..3bd05fe8ae54 100644 --- a/deps/rabbitmq_recent_history_exchange/app.bzl +++ b/deps/rabbitmq_recent_history_exchange/app.bzl @@ -10,6 +10,7 @@ def all_beam_files(name = "all_beam_files"): name = "other_beam", srcs = [ "src/rabbit_db_rh_exchange.erl", + "src/rabbit_db_rh_exchange_m2k_converter.erl", "src/rabbit_exchange_type_recent_history.erl", ], hdrs = [":public_and_private_hdrs"], @@ -19,6 +20,8 @@ def all_beam_files(name = "all_beam_files"): deps = [ "//deps/rabbit:erlang_app", "//deps/rabbit_common:erlang_app", + "@khepri//:erlang_app", + "@khepri_mnesia_migration//:erlang_app", ], ) @@ -33,6 +36,7 @@ def all_test_beam_files(name = "all_test_beam_files"): testonly = True, srcs = [ "src/rabbit_db_rh_exchange.erl", + "src/rabbit_db_rh_exchange_m2k_converter.erl", "src/rabbit_exchange_type_recent_history.erl", ], hdrs = [":public_and_private_hdrs"], @@ -42,6 +46,8 @@ def all_test_beam_files(name = "all_test_beam_files"): deps = [ "//deps/rabbit:erlang_app", "//deps/rabbit_common:erlang_app", + "@khepri//:erlang_app", + "@khepri_mnesia_migration//:erlang_app", ], ) @@ -63,6 +69,7 @@ def all_srcs(name = "all_srcs"): name = "srcs", srcs = [ "src/rabbit_db_rh_exchange.erl", + "src/rabbit_db_rh_exchange_m2k_converter.erl", "src/rabbit_exchange_type_recent_history.erl", ], ) diff --git a/deps/rabbitmq_recent_history_exchange/include/rabbit_recent_history.hrl b/deps/rabbitmq_recent_history_exchange/include/rabbit_recent_history.hrl index d127d0853c48..fe70feb45110 100644 --- a/deps/rabbitmq_recent_history_exchange/include/rabbit_recent_history.hrl +++ b/deps/rabbitmq_recent_history_exchange/include/rabbit_recent_history.hrl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2020-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -define(KEEP_NB, 20). diff --git a/deps/rabbitmq_recent_history_exchange/src/rabbit_db_rh_exchange.erl b/deps/rabbitmq_recent_history_exchange/src/rabbit_db_rh_exchange.erl index 915b27c885e3..c50fc93a189f 100644 --- a/deps/rabbitmq_recent_history_exchange/src/rabbit_db_rh_exchange.erl +++ b/deps/rabbitmq_recent_history_exchange/src/rabbit_db_rh_exchange.erl @@ -2,12 +2,13 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_db_rh_exchange). -include_lib("rabbit_common/include/rabbit.hrl"). +-include_lib("khepri/include/khepri.hrl"). -include("rabbit_recent_history.hrl"). -export([ @@ -18,12 +19,21 @@ delete/1 ]). +-export([khepri_recent_history_path/1, + khepri_recent_history_path/0]). + +-rabbit_mnesia_tables_to_khepri_db( + [{?RH_TABLE, rabbit_db_rh_exchange_m2k_converter}]). + %% ------------------------------------------------------------------- %% setup_schema(). %% ------------------------------------------------------------------- setup_schema() -> - setup_schema_in_mnesia(). + rabbit_khepri:handle_fallback( + #{mnesia => fun() -> setup_schema_in_mnesia() end, + khepri => fun() -> ok end + }). setup_schema_in_mnesia() -> _ = mnesia:create_table(?RH_TABLE, @@ -39,12 +49,24 @@ setup_schema_in_mnesia() -> %% ------------------------------------------------------------------- get(XName) -> - get_in_mnesia(XName). + rabbit_khepri:handle_fallback( + #{mnesia => fun() -> get_in_mnesia(XName) end, + khepri => fun() -> get_in_khepri(XName) end + }). get_in_mnesia(XName) -> rabbit_mnesia:execute_mnesia_transaction( fun() -> get_in_mnesia_tx(XName) end). +get_in_khepri(XName) -> + Path = khepri_recent_history_path(XName), + case rabbit_khepri:get(Path) of + {ok, Cached} -> + Cached; + _ -> + [] + end. + get_in_mnesia_tx(XName) -> case mnesia:read(?RH_TABLE, XName) of [] -> @@ -58,7 +80,10 @@ get_in_mnesia_tx(XName) -> %% ------------------------------------------------------------------- insert(XName, Message, Length) -> - insert_in_mnesia(XName, Message, Length). + rabbit_khepri:handle_fallback( + #{mnesia => fun() -> insert_in_mnesia(XName, Message, Length) end, + khepri => fun() -> insert_in_khepri(XName, Message, Length) end + }). insert_in_mnesia(XName, Message, Length) -> rabbit_mnesia:execute_mnesia_transaction( @@ -78,21 +103,76 @@ insert0_in_mnesia(Key, Cached, Message, Length) -> content = [Message|lists:sublist(Cached, Length-1)]}, write). +insert_in_khepri(XName, Message, Length) -> + Path = khepri_recent_history_path(XName), + case rabbit_khepri:adv_get(Path) of + {ok, #{data := Cached0, payload_version := DVersion}} -> + Cached = add_to_cache(Cached0, Message, Length), + Path1 = khepri_path:combine_with_conditions( + Path, [#if_payload_version{version = DVersion}]), + Ret = rabbit_khepri:put(Path1, Cached), + case Ret of + ok -> + ok; + {error, {khepri, mismatching_node, _}} -> + insert_in_khepri(XName, Message, Length); + {error, _} = Error -> + Error + end; + _ -> + Cached = add_to_cache([], Message, Length), + rabbit_khepri:put(Path, Cached) + end. + +add_to_cache(Cached, Message, undefined) -> + add_to_cache(Cached, Message, ?KEEP_NB); +add_to_cache(Cached, Message, {_Type, Length}) -> + add_to_cache(Cached, Message, Length); +add_to_cache(Cached, Message, Length) -> + [Message|lists:sublist(Cached, Length-1)]. + %% ------------------------------------------------------------------- %% delete(). %% ------------------------------------------------------------------- delete() -> - delete_in_mnesia(). + rabbit_khepri:handle_fallback( + #{mnesia => fun() -> delete_in_mnesia() end, + khepri => fun() -> delete_in_khepri() end + }). delete_in_mnesia() -> - _ = mnesia:delete_table(?RH_TABLE). + case mnesia:delete_table(?RH_TABLE) of + {atomic, ok} -> + ok; + {aborted, Reason} -> + {error, Reason} + end. + +delete_in_khepri() -> + rabbit_khepri:delete(khepri_recent_history_path()). delete(XName) -> - delete_in_mnesia(XName). + rabbit_khepri:handle_fallback( + #{mnesia => fun() -> delete_in_mnesia(XName) end, + khepri => fun() -> delete_in_khepri(XName) end + }). delete_in_mnesia(XName) -> rabbit_mnesia:execute_mnesia_transaction( fun() -> mnesia:delete(?RH_TABLE, XName, write) end). + +delete_in_khepri(XName) -> + rabbit_khepri:delete(khepri_recent_history_path(XName)). + +%% ------------------------------------------------------------------- +%% paths +%% ------------------------------------------------------------------- + +khepri_recent_history_path() -> + [?MODULE, recent_history_exchange]. + +khepri_recent_history_path(#resource{virtual_host = VHost, name = Name}) -> + [?MODULE, recent_history_exchange, VHost, Name]. diff --git a/deps/rabbitmq_recent_history_exchange/src/rabbit_db_rh_exchange_m2k_converter.erl b/deps/rabbitmq_recent_history_exchange/src/rabbit_db_rh_exchange_m2k_converter.erl new file mode 100644 index 000000000000..c3e17dd525d8 --- /dev/null +++ b/deps/rabbitmq_recent_history_exchange/src/rabbit_db_rh_exchange_m2k_converter.erl @@ -0,0 +1,99 @@ +%% This Source Code Form is subject to the terms of the Mozilla Public +%% License, v. 2.0. If a copy of the MPL was not distributed with this +%% file, You can obtain one at https://mozilla.org/MPL/2.0/. +%% +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. +%% + +-module(rabbit_db_rh_exchange_m2k_converter). + +-behaviour(mnesia_to_khepri_converter). + +-include_lib("kernel/include/logger.hrl"). +-include_lib("khepri_mnesia_migration/src/kmm_logging.hrl"). +-include_lib("rabbit_common/include/rabbit.hrl"). + +-include("rabbit_recent_history.hrl"). + +-export([init_copy_to_khepri/3, + copy_to_khepri/3, + delete_from_khepri/3, + clear_data_in_khepri/1]). + +-record(?MODULE, {}). + +-spec init_copy_to_khepri(StoreId, MigrationId, Tables) -> Ret when + StoreId :: khepri:store_id(), + MigrationId :: mnesia_to_khepri:migration_id(), + Tables :: [mnesia_to_khepri:mnesia_table()], + Ret :: {ok, Priv}, + Priv :: #?MODULE{}. +%% @private + +init_copy_to_khepri(_StoreId, _MigrationId, _Tables) -> + State = #?MODULE{}, + {ok, State}. + +-spec copy_to_khepri(Table, Record, State) -> Ret when + Table :: mnesia_to_khepri:mnesia_table(), + Record :: tuple(), + State :: rabbit_db_m2k_converter:state(), + Ret :: {ok, NewState} | {error, Reason}, + NewState :: rabbit_db_m2k_converter:state(), + Reason :: any(). +%% @private + +copy_to_khepri(?RH_TABLE = Table, #cached{key = Key, content = Content}, + State) -> + ?LOG_DEBUG( + "Mnesia->Khepri data copy: [~0p] key: ~0p", + [Table, Key], + #{domain => ?KMM_M2K_TABLE_COPY_LOG_DOMAIN}), + Path = rabbit_db_rh_exchange:khepri_recent_history_path(Key), + rabbit_db_m2k_converter:with_correlation_id( + fun(CorrId) -> + Extra = #{async => CorrId}, + ?LOG_DEBUG( + "Mnesia->Khepri data copy: [~0p] path: ~0p corr: ~0p", + [Table, Path, CorrId], + #{domain => ?KMM_M2K_TABLE_COPY_LOG_DOMAIN}), + rabbit_khepri:put(Path, Content, Extra) + end, State); +copy_to_khepri(Table, Record, State) -> + ?LOG_DEBUG("Mnesia->Khepri unexpected record table ~0p record ~0p state ~0p", + [Table, Record, State]), + {error, unexpected_record}. + +-spec delete_from_khepri(Table, Key, State) -> Ret when + Table :: mnesia_to_khepri:mnesia_table(), + Key :: any(), + State :: rabbit_db_m2k_converter:state(), + Ret :: {ok, NewState} | {error, Reason}, + NewState :: rabbit_db_m2k_converter:state(), + Reason :: any(). +%% @private + +delete_from_khepri(?RH_TABLE = Table, Key, State) -> + ?LOG_DEBUG( + "Mnesia->Khepri data delete: [~0p] key: ~0p", + [Table, Key], + #{domain => ?KMM_M2K_TABLE_COPY_LOG_DOMAIN}), + Path = rabbit_db_rh_exchange:khepri_recent_history_path(Key), + rabbit_db_m2k_converter:with_correlation_id( + fun(CorrId) -> + Extra = #{async => CorrId}, + ?LOG_DEBUG( + "Mnesia->Khepri data delete: [~0p] path: ~0p corr: ~0p", + [Table, Path, CorrId], + #{domain => ?KMM_M2K_TABLE_COPY_LOG_DOMAIN}), + rabbit_khepri:delete(Path, Extra) + end, State). + +clear_data_in_khepri(?RH_TABLE) -> + Path = rabbit_db_rh_exchange:khepri_recent_history_path(), + case rabbit_khepri:delete(Path) of + ok -> + ok; + Error -> + throw(Error) + end. diff --git a/deps/rabbitmq_recent_history_exchange/src/rabbit_exchange_type_recent_history.erl b/deps/rabbitmq_recent_history_exchange/src/rabbit_exchange_type_recent_history.erl index 525d30f76d9f..24bb8bccff47 100644 --- a/deps/rabbitmq_recent_history_exchange/src/rabbit_exchange_type_recent_history.erl +++ b/deps/rabbitmq_recent_history_exchange/src/rabbit_exchange_type_recent_history.erl @@ -2,12 +2,11 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. -module(rabbit_exchange_type_recent_history). -include_lib("rabbit_common/include/rabbit.hrl"). --include("rabbit_recent_history.hrl"). -behaviour(rabbit_exchange_type). @@ -68,13 +67,13 @@ validate(#exchange{arguments = Args}) -> end. validate_binding(_X, _B) -> ok. -create(_Tx, _X) -> ok. +create(_Serial, _X) -> ok. policy_changed(_X1, _X2) -> ok. -delete(none, #exchange{ name = XName }) -> +delete(_Tx, #exchange{ name = XName }) -> rabbit_db_rh_exchange:delete(XName). -add_binding(none, #exchange{ name = XName }, +add_binding(_Tx, #exchange{ name = XName }, #binding{ destination = #resource{kind = queue} = QName }) -> _ = case rabbit_amqqueue:lookup(QName) of {error, not_found} -> @@ -84,7 +83,7 @@ add_binding(none, #exchange{ name = XName }, deliver_messages([Q], Msgs) end, ok; -add_binding(none, #exchange{ name = XName }, +add_binding(_Tx, #exchange{ name = XName }, #binding{ destination = #resource{kind = exchange} = DestName }) -> _ = case rabbit_exchange:lookup(DestName) of {error, not_found} -> @@ -101,11 +100,9 @@ add_binding(none, #exchange{ name = XName }, end end || Msg <- Msgs] end, - ok; -add_binding(none, _Exchange, _Binding) -> ok. -remove_bindings(_Tx, _X, _Bs) -> ok. +remove_bindings(_Serial, _X, _Bs) -> ok. assert_args_equivalence(X, Args) -> rabbit_exchange:assert_args_equivalence(X, Args). @@ -117,8 +114,7 @@ setup_schema() -> disable_plugin() -> rabbit_registry:unregister(exchange, <<"x-recent-history">>), - _ = rabbit_db_rh_exchange:delete(), - ok. + rabbit_db_rh_exchange:delete(). %%---------------------------------------------------------------------------- %%private diff --git a/deps/rabbitmq_recent_history_exchange/test/system_SUITE.erl b/deps/rabbitmq_recent_history_exchange/test/system_SUITE.erl index 61862ee288a0..2e05ddb30eba 100644 --- a/deps/rabbitmq_recent_history_exchange/test/system_SUITE.erl +++ b/deps/rabbitmq_recent_history_exchange/test/system_SUITE.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. -module(system_SUITE). @@ -16,19 +16,33 @@ all() -> [ - {group, non_parallel_tests} + {group, mnesia_store}, + {group, khepri_store}, + {group, khepri_migration} ]. groups() -> [ - {non_parallel_tests, [], [ - default_length_test, - length_argument_test, - wrong_argument_type_test, - no_store_test, - e2e_test, - multinode_test - ]} + {mnesia_store, [], [ + {non_parallel_tests, [], all_tests()} + ]}, + {khepri_store, [], [ + {non_parallel_tests, [], all_tests()} + ]}, + {khepri_migration, [], [ + from_mnesia_to_khepri + ]} + ]. + +all_tests() -> + [ + default_length_test, + length_argument_test, + wrong_argument_type_test, + no_store_test, + e2e_test, + multinode_test, + lifecycle_test ]. %% ------------------------------------------------------------------- @@ -38,24 +52,34 @@ groups() -> init_per_suite(Config) -> inets:start(), rabbit_ct_helpers:log_environment(), - Config1 = rabbit_ct_helpers:set_config(Config, [ - {rmq_nodename_suffix, ?MODULE}, - {rmq_nodes_count, 2} - ]), - rabbit_ct_helpers:run_setup_steps(Config1, - rabbit_ct_broker_helpers:setup_steps() ++ - rabbit_ct_client_helpers:setup_steps()). + rabbit_ct_helpers:run_setup_steps(Config). end_per_suite(Config) -> - rabbit_ct_helpers:run_teardown_steps(Config, - rabbit_ct_client_helpers:teardown_steps() ++ - rabbit_ct_broker_helpers:teardown_steps()). - + rabbit_ct_helpers:run_teardown_steps(Config). + +init_per_group(mnesia_store, Config) -> + rabbit_ct_helpers:set_config(Config, [{metadata_store, mnesia}]); +init_per_group(khepri_store, Config) -> + rabbit_ct_helpers:set_config( + Config, + [{metadata_store, {khepri, [khepri_db]}}]); init_per_group(_, Config) -> - Config. + Config1 = rabbit_ct_helpers:set_config(Config, [ + {rmq_nodename_suffix, ?MODULE}, + {rmq_nodes_count, 2} + ]), + rabbit_ct_helpers:run_setup_steps(Config1, + rabbit_ct_broker_helpers:setup_steps() ++ + rabbit_ct_client_helpers:setup_steps()). +end_per_group(mnesia_store, Config) -> + Config; +end_per_group(khepri_store, Config) -> + Config; end_per_group(_, Config) -> - Config. + rabbit_ct_helpers:run_teardown_steps(Config, + rabbit_ct_client_helpers:teardown_steps() ++ + rabbit_ct_broker_helpers:teardown_steps()). init_per_testcase(Testcase, Config) -> TestCaseName = rabbit_ct_helpers:config_to_testcase_name(Config, Testcase), @@ -212,6 +236,16 @@ multinode_test(Config) -> rabbit_ct_client_helpers:close_connection_and_channel(Conn2, Chan2), ok. +lifecycle_test(Config) -> + %% Ensure that the boot and cleanup steps run as expected and return 'ok'. + ok = rabbit_ct_broker_helpers:rpc( + Config, + rabbit, stop_apps, [[rabbitmq_recent_history_exchange]]), + ok = rabbit_ct_broker_helpers:rpc( + Config, + rabbit, start_apps, [[rabbitmq_recent_history_exchange]]), + ok. + test0(Config, MakeMethod, MakeMsg, DeclareArgs, Queues, MsgCount, ExpectedCount) -> Chan = rabbit_ct_client_helpers:open_channel(Config), #'exchange.declare_ok'{} = @@ -279,3 +313,74 @@ qs() -> make_exchange_name(Config, Suffix) -> B = rabbit_ct_helpers:get_config(Config, test_resource_name), erlang:list_to_binary("x-" ++ B ++ "-" ++ Suffix). + +from_mnesia_to_khepri(Config) -> + MsgCount = 10, + + {Conn, Chan} = rabbit_ct_client_helpers:open_connection_and_channel(Config, 0), + + #'exchange.declare_ok'{} = + amqp_channel:call(Chan, + #'exchange.declare' { + exchange = make_exchange_name(Config, "1"), + type = <<"x-recent-history">>, + auto_delete = true + }), + + #'exchange.declare_ok'{} = + amqp_channel:call(Chan, + #'exchange.declare' { + exchange = make_exchange_name(Config, "2"), + type = <<"direct">>, + auto_delete = true + }), + + #'queue.declare_ok'{queue = Q} = + amqp_channel:call(Chan, #'queue.declare' { + queue = <<"q">> + }), + + #'queue.bind_ok'{} = + amqp_channel:call(Chan, #'queue.bind' { + queue = Q, + exchange = make_exchange_name(Config, "2"), + routing_key = <<"">> + }), + + #'tx.select_ok'{} = amqp_channel:call(Chan, #'tx.select'{}), + [amqp_channel:call(Chan, + #'basic.publish'{exchange = make_exchange_name(Config, "1")}, + #amqp_msg{props = #'P_basic'{}, payload = <<>>}) || + _ <- lists:duplicate(MsgCount, const)], + amqp_channel:call(Chan, #'tx.commit'{}), + + amqp_channel:call(Chan, + #'exchange.bind' { + source = make_exchange_name(Config, "1"), + destination = make_exchange_name(Config, "2"), + routing_key = <<"">> + }), + + case rabbit_ct_broker_helpers:enable_feature_flag(Config, khepri_db) of + ok -> + case rabbit_ct_broker_helpers:enable_feature_flag(Config, rabbit_recent_history_exchange_raft_based_metadata_store) of + ok -> + #'queue.declare_ok'{message_count = Count, queue = Q} = + amqp_channel:call(Chan, #'queue.declare' { + passive = true, + queue = Q + }), + ?assertEqual(MsgCount, Count), + + amqp_channel:call(Chan, #'exchange.delete' { exchange = make_exchange_name(Config, "1") }), + amqp_channel:call(Chan, #'exchange.delete' { exchange = make_exchange_name(Config, "2") }), + amqp_channel:call(Chan, #'queue.delete' { queue = Q }), + + rabbit_ct_client_helpers:close_connection_and_channel(Conn, Chan), + ok; + Skip -> + Skip + end; + Skip -> + Skip + end. diff --git a/deps/rabbitmq_sharding/.gitignore b/deps/rabbitmq_sharding/.gitignore deleted file mode 100644 index 855de11b429d..000000000000 --- a/deps/rabbitmq_sharding/.gitignore +++ /dev/null @@ -1,18 +0,0 @@ -.sw? -.*.sw? -*.beam -/.erlang.mk/ -/cover/ -/deps/ -/doc/ -/ebin/ -/escript/ -/escript.lock -/logs/ -/plugins/ -/plugins.lock -/sbin/ -/sbin.lock -/xrefr - -/rabbitmq_sharding.d diff --git a/deps/rabbitmq_sharding/src/rabbit_sharding_exchange_decorator.erl b/deps/rabbitmq_sharding/src/rabbit_sharding_exchange_decorator.erl index 2dc2986fe7ed..d6a7c71e9752 100644 --- a/deps/rabbitmq_sharding/src/rabbit_sharding_exchange_decorator.erl +++ b/deps/rabbitmq_sharding/src/rabbit_sharding_exchange_decorator.erl @@ -2,13 +2,11 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_sharding_exchange_decorator). --include_lib("rabbit_common/include/rabbit.hrl"). - -rabbit_boot_step({?MODULE, [{description, "sharding exchange decorator"}, {mfa, {rabbit_registry, register, @@ -32,7 +30,7 @@ description() -> serialise_events(_X) -> false. -create(none, X) -> +create(_Tx, X) -> _ = maybe_start_sharding(X), ok. @@ -48,7 +46,7 @@ active_for(X) -> end. %% we have to remove the policy from ?SHARDING_TABLE -delete(none, X) -> +delete(_Tx, X) -> _ = maybe_stop_sharding(X), ok. diff --git a/deps/rabbitmq_sharding/src/rabbit_sharding_exchange_type_modulus_hash.erl b/deps/rabbitmq_sharding/src/rabbit_sharding_exchange_type_modulus_hash.erl index 98c78ac18977..cb834b039dfe 100644 --- a/deps/rabbitmq_sharding/src/rabbit_sharding_exchange_type_modulus_hash.erl +++ b/deps/rabbitmq_sharding/src/rabbit_sharding_exchange_type_modulus_hash.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_sharding_exchange_type_modulus_hash). @@ -34,7 +34,7 @@ description() -> serialise_events() -> false. route(#exchange{name = Name}, Msg, _Options) -> - Routes = mc:get_annotation(routing_keys, Msg), + Routes = mc:routing_keys(Msg), Qs = rabbit_router:match_routing_key(Name, ['_']), case length(Qs) of 0 -> []; diff --git a/deps/rabbitmq_sharding/src/rabbit_sharding_interceptor.erl b/deps/rabbitmq_sharding/src/rabbit_sharding_interceptor.erl index 3be038d6f74c..04287df61f14 100644 --- a/deps/rabbitmq_sharding/src/rabbit_sharding_interceptor.erl +++ b/deps/rabbitmq_sharding/src/rabbit_sharding_interceptor.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_sharding_interceptor). diff --git a/deps/rabbitmq_sharding/src/rabbit_sharding_policy_validator.erl b/deps/rabbitmq_sharding/src/rabbit_sharding_policy_validator.erl index bfd243a3c055..152538455b84 100644 --- a/deps/rabbitmq_sharding/src/rabbit_sharding_policy_validator.erl +++ b/deps/rabbitmq_sharding/src/rabbit_sharding_policy_validator.erl @@ -2,15 +2,13 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_sharding_policy_validator). -behaviour(rabbit_policy_validator). --include_lib("rabbit_common/include/rabbit.hrl"). - -export([register/0, validate_policy/1]). -rabbit_boot_step({?MODULE, diff --git a/deps/rabbitmq_sharding/src/rabbit_sharding_shard.erl b/deps/rabbitmq_sharding/src/rabbit_sharding_shard.erl index 81b127190dc2..7e61a3d66ae6 100644 --- a/deps/rabbitmq_sharding/src/rabbit_sharding_shard.erl +++ b/deps/rabbitmq_sharding/src/rabbit_sharding_shard.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_sharding_shard). diff --git a/deps/rabbitmq_sharding/src/rabbit_sharding_util.erl b/deps/rabbitmq_sharding/src/rabbit_sharding_util.erl index 3e99ffb1033a..110e8f2b8d67 100644 --- a/deps/rabbitmq_sharding/src/rabbit_sharding_util.erl +++ b/deps/rabbitmq_sharding/src/rabbit_sharding_util.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_sharding_util). diff --git a/deps/rabbitmq_sharding/test/rabbit_hash_exchange_SUITE.erl b/deps/rabbitmq_sharding/test/rabbit_hash_exchange_SUITE.erl index 899405bb9e5e..84461bf54ef9 100644 --- a/deps/rabbitmq_sharding/test/rabbit_hash_exchange_SUITE.erl +++ b/deps/rabbitmq_sharding/test/rabbit_hash_exchange_SUITE.erl @@ -2,13 +2,12 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_hash_exchange_SUITE). -compile(export_all). --include_lib("common_test/include/ct.hrl"). -include_lib("amqp_client/include/amqp_client.hrl"). -include_lib("eunit/include/eunit.hrl"). diff --git a/deps/rabbitmq_sharding/test/rabbit_sharding_SUITE.erl b/deps/rabbitmq_sharding/test/rabbit_sharding_SUITE.erl index aaf96636de21..a4196e79d5ef 100644 --- a/deps/rabbitmq_sharding/test/rabbit_sharding_SUITE.erl +++ b/deps/rabbitmq_sharding/test/rabbit_sharding_SUITE.erl @@ -2,14 +2,13 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_sharding_SUITE). -compile(export_all). --include_lib("common_test/include/ct.hrl"). -include_lib("eunit/include/eunit.hrl"). -include_lib("rabbit/include/amqqueue.hrl"). -include_lib("amqp_client/include/amqp_client.hrl"). diff --git a/deps/rabbitmq_shovel/.gitignore b/deps/rabbitmq_shovel/.gitignore index 899c1dcefdce..ea5368cf9be6 100644 --- a/deps/rabbitmq_shovel/.gitignore +++ b/deps/rabbitmq_shovel/.gitignore @@ -1,22 +1 @@ -.sw? -.*.sw? -*.beam -*.plt -/.erlang.mk/ -/cover/ -/deps/ -/doc/ -/ebin/ -/escript/ -/escript.lock -/logs/ -/plugins/ -/plugins.lock -/sbin/ -/sbin.lock -/xrefr - -elvis elvis.config - -/rabbitmq_shovel.d diff --git a/deps/rabbitmq_shovel/BUILD.bazel b/deps/rabbitmq_shovel/BUILD.bazel index f156c434686f..0f40edd821a3 100644 --- a/deps/rabbitmq_shovel/BUILD.bazel +++ b/deps/rabbitmq_shovel/BUILD.bazel @@ -79,7 +79,7 @@ plt( name = "deps_plt", for_target = ":erlang_app", ignore_warnings = True, - libs = ["//deps/rabbitmq_cli:elixir"], # keep + libs = ["@rules_elixir//elixir"], # keep plt = "//:base_plt", deps = ["//deps/rabbitmq_cli:erlang_app"], # keep ) @@ -119,6 +119,13 @@ rabbitmq_integration_suite( flaky = True, ) +rabbitmq_integration_suite( + name = "amqp10_inter_cluster_SUITE", + additional_beam = [ + "test/shovel_test_utils.beam", + ], +) + rabbitmq_suite( name = "amqp10_shovel_SUITE", size = "small", @@ -164,6 +171,19 @@ rabbitmq_suite( ], ) +rabbitmq_integration_suite( + name = "rolling_upgrade_SUITE", + additional_beam = [ + "test/shovel_test_utils.beam", + ], + # FIXME: As of this writing, there is a bug in Khepri that makes this + # testsuite unstable. + flaky = True, + deps = [ + "@khepri//:erlang_app", + ], +) + rabbitmq_integration_suite( name = "shovel_status_command_SUITE", additional_beam = [ diff --git a/deps/rabbitmq_shovel/Makefile b/deps/rabbitmq_shovel/Makefile index df31371fb528..759423cc3f56 100644 --- a/deps/rabbitmq_shovel/Makefile +++ b/deps/rabbitmq_shovel/Makefile @@ -25,6 +25,8 @@ LOCAL_DEPS = crypto TEST_DEPS = rabbitmq_ct_helpers rabbitmq_ct_client_helpers rabbitmq_amqp1_0 meck +PLT_APPS += rabbitmqctl + DEP_EARLY_PLUGINS = rabbit_common/mk/rabbitmq-early-plugin.mk DEP_PLUGINS = rabbit_common/mk/rabbitmq-plugin.mk elvis_mk dep_elvis_mk = git https://github.com/inaka/elvis.mk.git master diff --git a/deps/rabbitmq_shovel/README.md b/deps/rabbitmq_shovel/README.md index 08b0f6650010..476a18e6d20f 100644 --- a/deps/rabbitmq_shovel/README.md +++ b/deps/rabbitmq_shovel/README.md @@ -11,7 +11,7 @@ This plugin ships with RabbitMQ, there is no need to install it separately. -## Documentation +## Documentation See [RabbitMQ shovel plugin](https://www.rabbitmq.com/shovel.html) on rabbitmq.com. @@ -20,4 +20,4 @@ See [RabbitMQ shovel plugin](https://www.rabbitmq.com/shovel.html) on rabbitmq.c Released under [the same license as RabbitMQ](https://www.rabbitmq.com/mpl.html). -2007-2020 (c) 2007-2020 VMware, Inc. or its affiliates. +2007-2020 (c) 2007-2024 Broadcom. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. diff --git a/deps/rabbitmq_shovel/app.bzl b/deps/rabbitmq_shovel/app.bzl index 7ce47fc8dfe9..509242770a22 100644 --- a/deps/rabbitmq_shovel/app.bzl +++ b/deps/rabbitmq_shovel/app.bzl @@ -110,6 +110,7 @@ def all_srcs(name = "all_srcs"): filegroup( name = "priv", + srcs = ["priv/schema/rabbitmq_shovel.schema"], ) filegroup( @@ -170,7 +171,6 @@ def test_suite_beam_files(name = "test_suite_beam_files"): outs = ["test/amqp10_dynamic_SUITE.beam"], app_name = "rabbitmq_shovel", erlc_opts = "//:test_erlc_opts", - deps = ["//deps/amqp_client:erlang_app"], ) erlang_bytecode( name = "amqp10_shovel_SUITE_beam_files", @@ -225,6 +225,15 @@ def test_suite_beam_files(name = "test_suite_beam_files"): erlc_opts = "//:test_erlc_opts", deps = ["//deps/rabbit_common:erlang_app"], ) + erlang_bytecode( + name = "rolling_upgrade_SUITE_beam_files", + testonly = True, + srcs = ["test/rolling_upgrade_SUITE.erl"], + outs = ["test/rolling_upgrade_SUITE.beam"], + app_name = "rabbitmq_shovel", + erlc_opts = "//:test_erlc_opts", + deps = ["//deps/amqp_client:erlang_app", "@khepri//:erlang_app"], + ) erlang_bytecode( name = "shovel_status_command_SUITE_beam_files", testonly = True, @@ -242,3 +251,11 @@ def test_suite_beam_files(name = "test_suite_beam_files"): app_name = "rabbitmq_shovel", erlc_opts = "//:test_erlc_opts", ) + erlang_bytecode( + name = "amqp10_inter_cluster_SUITE_beam_files", + testonly = True, + srcs = ["test/amqp10_inter_cluster_SUITE.erl"], + outs = ["test/amqp10_inter_cluster_SUITE.beam"], + app_name = "rabbitmq_shovel", + erlc_opts = "//:test_erlc_opts", + ) diff --git a/deps/rabbitmq_shovel/include/rabbit_shovel.hrl b/deps/rabbitmq_shovel/include/rabbit_shovel.hrl index c82e26ae2993..b75d82aad5c7 100644 --- a/deps/rabbitmq_shovel/include/rabbit_shovel.hrl +++ b/deps/rabbitmq_shovel/include/rabbit_shovel.hrl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2020-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -record(endpoint, @@ -28,4 +28,4 @@ -define(DEFAULT_ACK_MODE, on_confirm). -define(DEFAULT_RECONNECT_DELAY, 5). --define(SHOVEL_GUIDE_URL, <<"https://rabbitmq.com/shovel.html">>). +-define(SHOVEL_GUIDE_URL, <<"https://rabbitmq.com/docs/shovel">>). diff --git a/deps/rabbitmq_shovel/priv/schema/rabbitmq_shovel.schema b/deps/rabbitmq_shovel/priv/schema/rabbitmq_shovel.schema new file mode 100644 index 000000000000..15e80be698de --- /dev/null +++ b/deps/rabbitmq_shovel/priv/schema/rabbitmq_shovel.schema @@ -0,0 +1,11 @@ +%% ---------------------------------------------------------------------------- +%% RabbitMQ Shovel plugin +%% +%% See https://github.com/rabbitmq/rabbitmq-shovel/blob/stable/README.md +%% for details +%% ---------------------------------------------------------------------------- + + +{mapping, "shovel.topology.predeclared", "rabbitmq_shovel.topology.predeclared", [ + [{datatype, {enum, [true, false]}}] +]}. diff --git a/deps/rabbitmq_shovel/src/Elixir.RabbitMQ.CLI.Ctl.Commands.DeleteShovelCommand.erl b/deps/rabbitmq_shovel/src/Elixir.RabbitMQ.CLI.Ctl.Commands.DeleteShovelCommand.erl index d96a150cde79..752de195bded 100644 --- a/deps/rabbitmq_shovel/src/Elixir.RabbitMQ.CLI.Ctl.Commands.DeleteShovelCommand.erl +++ b/deps/rabbitmq_shovel/src/Elixir.RabbitMQ.CLI.Ctl.Commands.DeleteShovelCommand.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module('Elixir.RabbitMQ.CLI.Ctl.Commands.DeleteShovelCommand'). @@ -69,10 +69,12 @@ run([Name], #{node := Node, vhost := VHost}) -> Error; Xs when is_list(Xs) -> ErrMsg = rabbit_misc:format("Shovel with the given name was not found " - "on the target node '~ts' and / or virtual host '~ts'", + "on the target node '~ts' and/or virtual host '~ts'. " + "It may be failing to connect and report its state, will delete its runtime parameter...", [Node, VHost]), case rabbit_shovel_status:find_matching_shovel(VHost, Name, Xs) of undefined -> + try_force_removing(Node, VHost, Name, ActingUser), {error, rabbit_data_coercion:to_binary(ErrMsg)}; Match -> {{_Name, _VHost}, _Type, {_State, Opts}, _Timestamp} = Match, @@ -83,10 +85,14 @@ run([Name], #{node := Node, vhost := VHost}) -> Error; {error, not_found} -> ErrMsg = rabbit_misc:format("Shovel with the given name was not found " - "on the target node '~ts' and / or virtual host '~ts'", + "on the target node '~ts' and/or virtual host '~ts'. " + "It may be failing to connect and report its state, will delete its runtime parameter...", [Node, VHost]), + try_force_removing(HostingNode, VHost, Name, ActingUser), {error, rabbit_data_coercion:to_binary(ErrMsg)}; - ok -> ok + ok -> + _ = try_clearing_runtime_parameter(Node, VHost, Name, ActingUser), + ok end end end. @@ -99,3 +105,16 @@ aliases() -> output(E, _Opts) -> 'Elixir.RabbitMQ.CLI.DefaultOutput':output(E). + +try_force_removing(Node, VHost, ShovelName, ActingUser) -> + %% Deleting the runtime parameter will cause the dynamic Shovel's child tree to be stopped eventually + %% regardless of the node it is hosted on. MK. + _ = try_clearing_runtime_parameter(Node, VHost, ShovelName, ActingUser), + %% These are best effort attempts to delete the Shovel. Clearing the parameter does all the heavy lifting. MK. + _ = try_stopping_child_process(Node, VHost, ShovelName). + +try_clearing_runtime_parameter(Node, VHost, ShovelName, ActingUser) -> + _ = rabbit_misc:rpc_call(Node, rabbit_runtime_parameters, clear, [VHost, <<"shovel">>, ShovelName, ActingUser]). + +try_stopping_child_process(Node, VHost, ShovelName) -> + _ = rabbit_misc:rpc_call(Node, rabbit_shovel_dyn_worker_sup_sup, stop_and_delete_child, [{VHost, ShovelName}]). diff --git a/deps/rabbitmq_shovel/src/Elixir.RabbitMQ.CLI.Ctl.Commands.RestartShovelCommand.erl b/deps/rabbitmq_shovel/src/Elixir.RabbitMQ.CLI.Ctl.Commands.RestartShovelCommand.erl index 411ccdbf7d58..5a994b0a4ef6 100644 --- a/deps/rabbitmq_shovel/src/Elixir.RabbitMQ.CLI.Ctl.Commands.RestartShovelCommand.erl +++ b/deps/rabbitmq_shovel/src/Elixir.RabbitMQ.CLI.Ctl.Commands.RestartShovelCommand.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module('Elixir.RabbitMQ.CLI.Ctl.Commands.RestartShovelCommand'). diff --git a/deps/rabbitmq_shovel/src/Elixir.RabbitMQ.CLI.Ctl.Commands.ShovelStatusCommand.erl b/deps/rabbitmq_shovel/src/Elixir.RabbitMQ.CLI.Ctl.Commands.ShovelStatusCommand.erl index 4c6775ed8217..9644fd2bdd18 100644 --- a/deps/rabbitmq_shovel/src/Elixir.RabbitMQ.CLI.Ctl.Commands.ShovelStatusCommand.erl +++ b/deps/rabbitmq_shovel/src/Elixir.RabbitMQ.CLI.Ctl.Commands.ShovelStatusCommand.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module('Elixir.RabbitMQ.CLI.Ctl.Commands.ShovelStatusCommand'). @@ -61,7 +61,7 @@ banner(_, #{node := Node}) -> atom_to_binary(Node, utf8)]). run(_Args, #{node := Node}) -> - case rabbit_misc:rpc_call(Node, rabbit_shovel_status, status, []) of + case rabbit_misc:rpc_call(Node, rabbit_shovel_status, cluster_status, []) of {badrpc, _} = Error -> Error; Status -> @@ -112,7 +112,9 @@ fmt_status({'running', Proplist}, Map) -> fmt_status('starting' = St, Map) -> Map#{state => St, source => <<>>, + source_protocol => <<>>, destination => <<>>, + destination_protocol => <<>>, termination_reason => <<>>}; fmt_status({'terminated' = St, Reason}, Map) -> Map#{state => St, diff --git a/deps/rabbitmq_shovel/src/rabbit_amqp091_shovel.erl b/deps/rabbitmq_shovel/src/rabbit_amqp091_shovel.erl index e85f245f5880..e3c173d20601 100644 --- a/deps/rabbitmq_shovel/src/rabbit_amqp091_shovel.erl +++ b/deps/rabbitmq_shovel/src/rabbit_amqp091_shovel.erl @@ -2,11 +2,13 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_amqp091_shovel). +-define(APP, rabbitmq_shovel). + -behaviour(rabbit_shovel_behaviour). -include_lib("amqp_client/include/amqp_client.hrl"). @@ -34,6 +36,14 @@ forward/4 ]). +%% Function references should not be stored on the metadata store. +%% They are only valid for the version of the module they were created +%% from and can break with the next upgrade. It should not be used by +%% another one that the one who created it or survive a node restart. +%% Thus, function references have been replace by the following MFA. +-export([decl_fun/3, check_fun/3, publish_fun/4, props_fun_timestamp_header/4, + props_fun_forward_header/5]). + -define(MAX_CONNECTION_CLOSE_TIMEOUT, 10000). parse(_Name, {source, Source}) -> @@ -46,7 +56,7 @@ parse(_Name, {source, Source}) -> CArgs = proplists:get_value(consumer_args, Source, []), #{module => ?MODULE, uris => proplists:get_value(uris, Source), - resource_decl => decl_fun(Source), + resource_decl => decl_fun({source, Source}), queue => Queue, delete_after => proplists:get_value(delete_after, Source, never), prefetch_count => Prefetch, @@ -62,7 +72,7 @@ parse(Name, {destination, Dest}) -> PropsFun2 = add_timestamp_header_fun(ATH, PropsFun1), #{module => ?MODULE, uris => proplists:get_value(uris, Dest), - resource_decl => decl_fun(Dest), + resource_decl => decl_fun({destination, Dest}), props_fun => PropsFun2, fields_fun => PubFieldsFun, add_forward_headers => AFH, @@ -77,9 +87,9 @@ init_source(Conf = #{ack_mode := AckMode, source := #{queue := Queue, current := {Conn, Chan, _}, prefetch_count := Prefetch, - resource_decl := Decl, + resource_decl := {M, F, MFArgs}, consumer_args := Args} = Src}) -> - Decl(Conn, Chan), + apply(M, F, MFArgs ++ [Conn, Chan]), NoAck = AckMode =:= no_ack, case NoAck of @@ -108,9 +118,9 @@ connect_dest(Conf = #{name := Name, dest := #{uris := Uris} = Dst}) -> init_dest(Conf = #{ack_mode := AckMode, dest := #{current := {Conn, Chan, _}, - resource_decl := Decl} = Dst}) -> + resource_decl := {M, F, MFArgs}} = Dst}) -> - Decl(Conn, Chan), + apply(M, F, MFArgs ++ [Conn, Chan]), case AckMode of on_confirm -> @@ -187,16 +197,16 @@ forward(IncomingTag, Props, Payload, State) -> end. do_forward(IncomingTag, Props, Payload, - State0 = #{dest := #{props_fun := PropsFun, + State0 = #{dest := #{props_fun := {M, F, Args}, current := {_, _, DstUri}, - fields_fun := FieldsFun}}) -> + fields_fun := {Mf, Ff, Argsf}}}) -> SrcUri = rabbit_shovel_behaviour:source_uri(State0), % do publish Exchange = maps:get(exchange, Props, undefined), RoutingKey = maps:get(routing_key, Props, undefined), Method = #'basic.publish'{exchange = Exchange, routing_key = RoutingKey}, - Method1 = FieldsFun(SrcUri, DstUri, Method), - Msg1 = #amqp_msg{props = PropsFun(SrcUri, DstUri, props_from_map(Props)), + Method1 = apply(Mf, Ff, Argsf ++ [SrcUri, DstUri, Method]), + Msg1 = #amqp_msg{props = apply(M, F, Args ++ [SrcUri, DstUri, props_from_map(Props)]), payload = Payload}, publish(IncomingTag, Method1, Msg1, State0). @@ -519,11 +529,7 @@ make_publish_fun(Fields, ValidFields) when is_list(Fields) -> case SuppliedFields -- ValidFields of [] -> FieldIndices = make_field_indices(ValidFields, Fields), - fun (_SrcUri, _DestUri, Publish) -> - lists:foldl(fun ({Pos1, Value}, Pub) -> - setelement(Pos1, Pub, Value) - end, Publish, FieldIndices) - end; + {?MODULE, publish_fun, [FieldIndices]}; Unexpected -> fail({invalid_parameter_value, publish_properties, {unexpected_fields, Unexpected, ValidFields}}) @@ -532,6 +538,11 @@ make_publish_fun(Fields, _) -> fail({invalid_parameter_value, publish_properties, {require_list, Fields}}). +publish_fun(FieldIndices, _SrcUri, _DestUri, Publish) -> + lists:foldl(fun ({Pos1, Value}, Pub) -> + setelement(Pos1, Pub, Value) + end, Publish, FieldIndices). + make_field_indices(Valid, Fields) -> make_field_indices(Fields, field_map(Valid, 2), []). @@ -551,23 +562,25 @@ field_map(Fields, Idx0) -> fail(Reason) -> throw({error, Reason}). add_forward_headers_fun(Name, true, PubProps) -> - fun(SrcUri, DestUri, Props) -> - rabbit_shovel_util:update_headers( - [{<<"shovelled-by">>, rabbit_nodes:cluster_name()}, - {<<"shovel-type">>, <<"static">>}, - {<<"shovel-name">>, list_to_binary(atom_to_list(Name))}], - [], SrcUri, DestUri, PubProps(SrcUri, DestUri, Props)) - end; + {?MODULE, props_fun_forward_header, [Name, PubProps]}; add_forward_headers_fun(_Name, false, PubProps) -> PubProps. +props_fun_forward_header(Name, {M, F, Args}, SrcUri, DestUri, Props) -> + rabbit_shovel_util:update_headers( + [{<<"shovelled-by">>, rabbit_nodes:cluster_name()}, + {<<"shovel-type">>, <<"static">>}, + {<<"shovel-name">>, list_to_binary(atom_to_list(Name))}], + [], SrcUri, DestUri, apply(M, F, Args ++ [SrcUri, DestUri, Props])). + add_timestamp_header_fun(true, PubProps) -> - fun(SrcUri, DestUri, Props) -> - rabbit_shovel_util:add_timestamp_header( - PubProps(SrcUri, DestUri, Props)) - end; + {?MODULE, props_fun_timestamp_header, [PubProps]}; add_timestamp_header_fun(false, PubProps) -> PubProps. +props_fun_timestamp_header({M, F, Args}, SrcUri, DestUri, Props) -> + rabbit_shovel_util:add_timestamp_header( + apply(M, F, Args ++ [SrcUri, DestUri, Props])). + parse_declaration({[], Acc}) -> Acc; parse_declaration({[{Method, Props} | Rest], Acc}) when is_list(Props) -> @@ -593,14 +606,30 @@ parse_declaration({[{Method, Props} | _Rest], _Acc}) -> parse_declaration({[Method | Rest], Acc}) -> parse_declaration({[{Method, []} | Rest], Acc}). -decl_fun(Endpoint) -> - Decl = parse_declaration({proplists:get_value(declarations, Endpoint, []), - []}), - fun (_Conn, Ch) -> - [begin - amqp_channel:call(Ch, M) - end || M <- lists:reverse(Decl)] - end. +decl_fun({source, Endpoint}) -> + case parse_declaration({proplists:get_value(declarations, Endpoint, []), []}) of + [] -> + case proplists:get_value(predeclared, application:get_env(?APP, topology, []), false) of + true -> case proplists:get_value(queue, Endpoint) of + <<>> -> fail({invalid_parameter_value, declarations, {require_non_empty}}); + Queue -> {?MODULE, check_fun, [Queue]} + end; + false -> {?MODULE, decl_fun, []} + end; + Decl -> {?MODULE, decl_fun, [Decl]} + end; +decl_fun({destination, Endpoint}) -> + Decl = parse_declaration({proplists:get_value(declarations, Endpoint, []), []}), + {?MODULE, decl_fun, [Decl]}. + +decl_fun(Decl, _Conn, Ch) -> + [begin + amqp_channel:call(Ch, M) + end || M <- lists:reverse(Decl)]. + +check_fun(Queue, _Conn, Ch) -> + amqp_channel:call(Ch, #'queue.declare'{queue = Queue, + passive = true}). parse_parameter(Param, Fun, Value) -> try diff --git a/deps/rabbitmq_shovel/src/rabbit_amqp10_shovel.erl b/deps/rabbitmq_shovel/src/rabbit_amqp10_shovel.erl index de76cf95136d..eafe5e15a1ff 100644 --- a/deps/rabbitmq_shovel/src/rabbit_amqp10_shovel.erl +++ b/deps/rabbitmq_shovel/src/rabbit_amqp10_shovel.erl @@ -2,14 +2,13 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_amqp10_shovel). -behaviour(rabbit_shovel_behaviour). --include_lib("amqp_client/include/amqp_client.hrl"). -include("rabbit_shovel.hrl"). -export([ @@ -38,7 +37,7 @@ -import(rabbit_data_coercion, [to_binary/1]). -define(INFO(Text, Args), rabbit_log_shovel:info(Text, Args)). --define(LINK_CREDIT_TIMEOUT, 5000). +-define(LINK_CREDIT_TIMEOUT, 20_000). -type state() :: rabbit_shovel_behaviour:state(). -type uri() :: rabbit_shovel_behaviour:uri(). @@ -101,7 +100,12 @@ connect_dest(State = #{name := Name, uri => Uri}}}. connect(Name, AckMode, Uri, Postfix, Addr, Map, AttachFun) -> - {ok, Config} = amqp10_client:parse_uri(Uri), + {ok, Config0} = amqp10_client:parse_uri(Uri), + %% As done for AMQP 0.9.1, exclude AMQP 1.0 shovel connections from maintenance mode + %% to prevent crashes and errors being logged by the shovel plugin when a node gets drained. + %% A better solution would be that the shovel plugin subscribes to event + %% maintenance_connections_closed to gracefully transfer shovels over to other live nodes. + Config = Config0#{properties => #{<<"ignore-maintenance">> => {boolean, true}}}, {ok, Conn} = amqp10_client:open_connection(Config), {ok, Sess} = amqp10_client:begin_session(Conn), link(Conn), @@ -125,13 +129,13 @@ connect(Name, AckMode, Uri, Postfix, Addr, Map, AttachFun) -> -spec init_source(state()) -> state(). init_source(State = #{source := #{current := #{link := Link}, prefetch_count := Prefetch} = Src}) -> - {Credit, RenewAfter} = case Src of - #{delete_after := R} when is_integer(R) -> - {R, never}; - #{prefetch_count := Pre} -> - {Pre, round(Prefetch/10)} - end, - ok = amqp10_client:flow_link_credit(Link, Credit, RenewAfter), + {Credit, RenewWhenBelow} = case Src of + #{delete_after := R} when is_integer(R) -> + {R, never}; + #{prefetch_count := Pre} -> + {Pre, max(1, round(Prefetch/10))} + end, + ok = amqp10_client:flow_link_credit(Link, Credit, RenewWhenBelow), Remaining = case Src of #{delete_after := never} -> unlimited; #{delete_after := Rem} -> Rem; @@ -173,7 +177,8 @@ dest_endpoint(#{shovel_type := dynamic, dest := #{target_address := Addr}}) -> [{dest_address, Addr}]. --spec handle_source(Msg :: any(), state()) -> not_handled | state(). +-spec handle_source(Msg :: any(), state()) -> + not_handled | state() | {stop, any()}. handle_source({amqp10_msg, _LinkRef, Msg}, State) -> Tag = amqp10_msg:delivery_id(Msg), Payload = amqp10_msg:body_bin(Msg), @@ -277,30 +282,24 @@ close_dest(#{dest := #{current := #{conn := Conn}}}) -> close_dest(_Config) -> ok. -spec ack(Tag :: tag(), Multi :: boolean(), state()) -> state(). -ack(Tag, true, State = #{source := #{current := #{session := Session}, +ack(Tag, true, State = #{source := #{current := #{link := LinkRef}, last_acked_tag := LastTag} = Src}) -> First = LastTag + 1, - ok = amqp10_client_session:disposition(Session, receiver, First, - Tag, true, accepted), + ok = amqp10_client_session:disposition(LinkRef, First, Tag, true, accepted), State#{source => Src#{last_acked_tag => Tag}}; -ack(Tag, false, State = #{source := #{current := - #{session := Session}} = Src}) -> - ok = amqp10_client_session:disposition(Session, receiver, Tag, - Tag, true, accepted), +ack(Tag, false, State = #{source := #{current := #{link := LinkRef}} = Src}) -> + ok = amqp10_client_session:disposition(LinkRef, Tag, Tag, true, accepted), State#{source => Src#{last_acked_tag => Tag}}. -spec nack(Tag :: tag(), Multi :: boolean(), state()) -> state(). -nack(Tag, false, State = #{source := - #{current := #{session := Session}} = Src}) -> +nack(Tag, false, State = #{source := #{current := #{link := LinkRef}} = Src}) -> % the tag is the same as the deliveryid - ok = amqp10_client_session:disposition(Session, receiver, Tag, - Tag, false, rejected), + ok = amqp10_client_session:disposition(LinkRef, Tag, Tag, true, rejected), State#{source => Src#{last_nacked_tag => Tag}}; -nack(Tag, true, State = #{source := #{current := #{session := Session}, - last_nacked_tag := LastTag} = Src}) -> +nack(Tag, true, State = #{source := #{current := #{link := LinkRef}, + last_nacked_tag := LastTag} = Src}) -> First = LastTag + 1, - ok = amqp10_client_session:disposition(Session, receiver, First, - Tag, true, accepted), + ok = amqp10_client_session:disposition(LinkRef, First, Tag, true, rejected), State#{source => Src#{last_nacked_tag => Tag}}. status(#{dest := #{current := #{link_state := attached}}}) -> @@ -312,7 +311,8 @@ status(_) -> ignore. -spec forward(Tag :: tag(), Props :: #{atom() => any()}, - Payload :: binary(), state()) -> state(). + Payload :: binary(), state()) -> + state() | {stop, any()}. forward(_Tag, _Props, _Payload, #{source := #{remaining_unacked := 0}} = State) -> State; @@ -331,17 +331,33 @@ forward(Tag, Props, Payload, Msg = add_timestamp_header( State, set_message_properties( Props, add_forward_headers(State, Msg0))), - ok = amqp10_client:send_msg(Link, Msg), - rabbit_shovel_behaviour:decr_remaining_unacked( - case AckMode of - no_ack -> - rabbit_shovel_behaviour:decr_remaining(1, State); - on_confirm -> - State#{dest => Dst#{unacked => Unacked#{OutTag => Tag}}}; - on_publish -> - State1 = rabbit_shovel_behaviour:ack(Tag, false, State), - rabbit_shovel_behaviour:decr_remaining(1, State1) - end). + case send_msg(Link, Msg) of + ok -> + rabbit_shovel_behaviour:decr_remaining_unacked( + case AckMode of + no_ack -> + rabbit_shovel_behaviour:decr_remaining(1, State); + on_confirm -> + State#{dest => Dst#{unacked => Unacked#{OutTag => Tag}}}; + on_publish -> + State1 = rabbit_shovel_behaviour:ack(Tag, false, State), + rabbit_shovel_behaviour:decr_remaining(1, State1) + end); + Stop -> + Stop + end. + +send_msg(Link, Msg) -> + case amqp10_client:send_msg(Link, Msg) of + ok -> + ok; + {error, insufficient_credit} -> + receive {amqp10_event, {link, Link, credited}} -> + ok = amqp10_client:send_msg(Link, Msg) + after ?LINK_CREDIT_TIMEOUT -> + {stop, credited_timeout} + end + end. new_message(Tag, Payload, #{ack_mode := AckMode, dest := #{properties := Props, @@ -372,6 +388,11 @@ set_message_properties(Props, Msg) -> #{content_encoding => to_binary(Ct)}, M); (delivery_mode, 2, M) -> amqp10_msg:set_headers(#{durable => true}, M); + (delivery_mode, 1, M) -> + % by default the durable flag is false + M; + (priority, P, M) when is_integer(P) -> + amqp10_msg:set_headers(#{priority => P}, M); (correlation_id, Ct, M) -> amqp10_msg:set_properties(#{correlation_id => to_binary(Ct)}, M); (reply_to, Ct, M) -> diff --git a/deps/rabbitmq_shovel/src/rabbit_log_shovel.erl b/deps/rabbitmq_shovel/src/rabbit_log_shovel.erl index 1e2b326c4a7a..b982bbadc17a 100644 --- a/deps/rabbitmq_shovel/src/rabbit_log_shovel.erl +++ b/deps/rabbitmq_shovel/src/rabbit_log_shovel.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2021 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% %% @doc Compatibility module for the old Lager-based logging API. diff --git a/deps/rabbitmq_shovel/src/rabbit_shovel.erl b/deps/rabbitmq_shovel/src/rabbit_shovel.erl index 02a4882833b4..daebbd0ecbb7 100644 --- a/deps/rabbitmq_shovel/src/rabbit_shovel.erl +++ b/deps/rabbitmq_shovel/src/rabbit_shovel.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_shovel). diff --git a/deps/rabbitmq_shovel/src/rabbit_shovel_behaviour.erl b/deps/rabbitmq_shovel/src/rabbit_shovel_behaviour.erl index af4f84f78f30..bf58326b7d6f 100644 --- a/deps/rabbitmq_shovel/src/rabbit_shovel_behaviour.erl +++ b/deps/rabbitmq_shovel/src/rabbit_shovel_behaviour.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_shovel_behaviour). @@ -80,7 +80,8 @@ -callback ack(Tag :: tag(), Multi :: boolean(), state()) -> state(). -callback nack(Tag :: tag(), Multi :: boolean(), state()) -> state(). -callback forward(Tag :: tag(), Props :: #{atom() => any()}, - Payload :: binary(), state()) -> state(). + Payload :: binary(), state()) -> + state() | {stop, any()}. -callback status(state()) -> rabbit_shovel_status:blocked_status() | ignore. -spec parse(atom(), binary(), {source | destination, proplists:proplist()}) -> @@ -140,7 +141,8 @@ source_endpoint(#{source := #{module := Mod}} = State) -> dest_endpoint(#{dest := #{module := Mod}} = State) -> Mod:dest_endpoint(State). --spec forward(tag(), #{atom() => any()}, binary(), state()) -> state(). +-spec forward(tag(), #{atom() => any()}, binary(), state()) -> + state() | {stop, any()}. forward(Tag, Props, Payload, #{dest := #{module := Mod}} = State) -> Mod:forward(Tag, Props, Payload, State). diff --git a/deps/rabbitmq_shovel/src/rabbit_shovel_config.erl b/deps/rabbitmq_shovel/src/rabbit_shovel_config.erl index 2003897b164d..16b61071108f 100644 --- a/deps/rabbitmq_shovel/src/rabbit_shovel_config.erl +++ b/deps/rabbitmq_shovel/src/rabbit_shovel_config.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_shovel_config). @@ -10,7 +10,6 @@ -export([parse/2, ensure_defaults/2]). --include_lib("amqp_client/include/amqp_client.hrl"). -include("rabbit_shovel.hrl"). resolve_module(amqp091) -> rabbit_amqp091_shovel; diff --git a/deps/rabbitmq_shovel/src/rabbit_shovel_dyn_worker_sup.erl b/deps/rabbitmq_shovel/src/rabbit_shovel_dyn_worker_sup.erl index 8710557e5bdc..ad1496ae9fdd 100644 --- a/deps/rabbitmq_shovel/src/rabbit_shovel_dyn_worker_sup.erl +++ b/deps/rabbitmq_shovel/src/rabbit_shovel_dyn_worker_sup.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_shovel_dyn_worker_sup). diff --git a/deps/rabbitmq_shovel/src/rabbit_shovel_dyn_worker_sup_sup.erl b/deps/rabbitmq_shovel/src/rabbit_shovel_dyn_worker_sup_sup.erl index c9b82335f05d..55bf3e8c0cce 100644 --- a/deps/rabbitmq_shovel/src/rabbit_shovel_dyn_worker_sup_sup.erl +++ b/deps/rabbitmq_shovel/src/rabbit_shovel_dyn_worker_sup_sup.erl @@ -2,18 +2,18 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_shovel_dyn_worker_sup_sup). -behaviour(mirrored_supervisor). -export([start_link/0, init/1, adjust/2, stop_child/1, cleanup_specs/0]). +-export([id_to_khepri_path/1]). -import(rabbit_misc, [pget/2]). -import(rabbit_data_coercion, [to_map/1, to_list/1]). --include("rabbit_shovel.hrl"). -include_lib("rabbit_common/include/rabbit.hrl"). -define(SUPERVISOR, ?MODULE). @@ -41,16 +41,18 @@ start_child({VHost, ShovelName} = Name, Def) -> LockId = rabbit_shovel_locks:lock(Name), cleanup_specs(), rabbit_log_shovel:debug("Starting a mirrored supervisor named '~ts' in virtual host '~ts'", [ShovelName, VHost]), - Result = case mirrored_supervisor:start_child( + case child_exists(Name) + orelse mirrored_supervisor:start_child( ?SUPERVISOR, - {Name, {rabbit_shovel_dyn_worker_sup, start_link, [Name, obfuscated_uris_parameters(Def)]}, + {id(Name), {rabbit_shovel_dyn_worker_sup, start_link, [Name, obfuscated_uris_parameters(Def)]}, transient, ?WORKER_WAIT, worker, [rabbit_shovel_dyn_worker_sup]}) of + true -> ok; {ok, _Pid} -> ok; {error, {already_started, _Pid}} -> ok end, %% release the lock if we managed to acquire one rabbit_shovel_locks:unlock(LockId), - Result. + ok. obfuscated_uris_parameters(Def) when is_map(Def) -> to_map(rabbit_shovel_parameters:obfuscate_uris_in_definition(to_list(Def))); @@ -58,7 +60,11 @@ obfuscated_uris_parameters(Def) when is_list(Def) -> rabbit_shovel_parameters:obfuscate_uris_in_definition(Def). child_exists(Name) -> - lists:any(fun ({N, _, _, _}) -> N =:= Name end, + Id = id(Name), + TmpExpId = temp_experimental_id(Name), + lists:any(fun ({ChildId, _, _, _}) -> + ChildId =:= Id orelse ChildId =:= TmpExpId + end, mirrored_supervisor:which_children(?SUPERVISOR)). stop_child({VHost, ShovelName} = Name) -> @@ -67,13 +73,27 @@ stop_child({VHost, ShovelName} = Name) -> case get({shovel_worker_autodelete, Name}) of true -> ok; %% [1] _ -> - ok = mirrored_supervisor:terminate_child(?SUPERVISOR, Name), - ok = mirrored_supervisor:delete_child(?SUPERVISOR, Name), - rabbit_shovel_status:remove(Name) + Id = id(Name), + case stop_and_delete_child(Id) of + ok -> + ok; + {error, not_found} -> + TmpExpId = temp_experimental_id(Name), + _ = stop_and_delete_child(TmpExpId), + ok + end end, rabbit_shovel_locks:unlock(LockId), ok. +stop_and_delete_child(Id) -> + case mirrored_supervisor:terminate_child(?SUPERVISOR, Id) of + ok -> + ok = mirrored_supervisor:delete_child(?SUPERVISOR, Id); + {error, not_found} = Error -> + Error + end. + %% [1] An autodeleting worker removes its own parameter, and thus ends %% up here via the parameter callback. It is a transient worker that %% is just about to terminate normally - so we don't need to tell the @@ -83,15 +103,55 @@ stop_child({VHost, ShovelName} = Name) -> %% See rabbit_shovel_worker:terminate/2 cleanup_specs() -> - SpecsSet = sets:from_list([element(1, S) || S <- mirrored_supervisor:which_children(?SUPERVISOR)]), - ParamsSet = sets:from_list(rabbit_runtime_parameters:list_component(<<"shovel">>)), - F = fun(Spec, ok) -> - _ = mirrored_supervisor:delete_child(?SUPERVISOR, Spec), - ok - end, - ok = sets:fold(F, ok, sets:subtract(SpecsSet, ParamsSet)). + Children = mirrored_supervisor:which_children(?SUPERVISOR), + ParamsSet = sets:from_list( + [id({proplists:get_value(vhost, S), + proplists:get_value(name, S)}) + || S <- rabbit_runtime_parameters:list_component( + <<"shovel">>)]), + %% Delete any supervisor children that do not have their respective runtime parameters in the database. + lists:foreach( + fun + ({{VHost, ShovelName} = ChildId, _, _, _}) + when is_binary(VHost) andalso is_binary(ShovelName) -> + case sets:is_element(ChildId, ParamsSet) of + false -> + _ = mirrored_supervisor:delete_child( + ?SUPERVISOR, ChildId); + true -> + ok + end; + ({{List, {VHost, ShovelName} = Id} = ChildId, _, _, _}) + when is_list(List) andalso + is_binary(VHost) andalso is_binary(ShovelName) -> + case sets:is_element(Id, ParamsSet) of + false -> + _ = mirrored_supervisor:delete_child( + ?SUPERVISOR, ChildId); + true -> + ok + end + end, Children). %%---------------------------------------------------------------------------- init([]) -> {ok, {{one_for_one, 3, 10}, []}}. + +id({VHost, ShovelName} = Name) + when is_binary(VHost) andalso is_binary(ShovelName) -> + Name. + +id_to_khepri_path({VHost, ShovelName}) + when is_binary(VHost) andalso is_binary(ShovelName) -> + [VHost, ShovelName]; +id_to_khepri_path({List, {VHost, ShovelName}}) + when is_list(List) andalso is_binary(VHost) andalso is_binary(ShovelName) -> + [VHost, ShovelName]. + +%% Temporary experimental format, erroneously backported to some 3.11.x and +%% 3.12.x releases in rabbitmq/rabbitmq-server#9796. +%% +%% See rabbitmq/rabbitmq-server#10306. +temp_experimental_id({V, S} = Name) -> + {[V, S], Name}. diff --git a/deps/rabbitmq_shovel/src/rabbit_shovel_locks.erl b/deps/rabbitmq_shovel/src/rabbit_shovel_locks.erl index 760d250ed178..c3eb7a19767b 100644 --- a/deps/rabbitmq_shovel/src/rabbit_shovel_locks.erl +++ b/deps/rabbitmq_shovel/src/rabbit_shovel_locks.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_shovel_locks). diff --git a/deps/rabbitmq_shovel/src/rabbit_shovel_parameters.erl b/deps/rabbitmq_shovel/src/rabbit_shovel_parameters.erl index b6682c25b8ed..b7d193b03a8b 100644 --- a/deps/rabbitmq_shovel/src/rabbit_shovel_parameters.erl +++ b/deps/rabbitmq_shovel/src/rabbit_shovel_parameters.erl @@ -2,12 +2,14 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_shovel_parameters). -behaviour(rabbit_runtime_parameter). +-define(APP, rabbitmq_shovel). + -include_lib("amqp_client/include/amqp_client.hrl"). -include("rabbit_shovel.hrl"). @@ -15,6 +17,15 @@ -export([register/0, unregister/0, parse/3]). -export([obfuscate_uris_in_definition/1]). +%% Function references should not be stored on the metadata store. +%% They are only valid for the version of the module they were created +%% from and can break with the next upgrade. It should not be used by +%% another one that the one who created it or survive a node restart. +%% Thus, function references have been replace by the following MFA. +-export([dest_decl/4, dest_check/4, + src_decl_exchange/4, src_decl_queue/4, src_check_queue/4, + fields_fun/5, props_fun/9]). + -import(rabbit_misc, [pget/2, pget/3, pset/3]). -rabbit_boot_step({?MODULE, @@ -138,7 +149,8 @@ amqp091_src_validation(_Def, User) -> %% a deprecated pre-3.7 setting {<<"delete-after">>, fun validate_delete_after/2, optional}, %% currently used multi-protocol friend name, introduced in 3.7 - {<<"src-delete-after">>, fun validate_delete_after/2, optional} + {<<"src-delete-after">>, fun validate_delete_after/2, optional}, + {<<"src-predeclared">>, fun rabbit_parameter_validation:boolean/2, optional} ]. dest_validation(Def0, User) -> @@ -170,7 +182,8 @@ amqp091_dest_validation(_Def, User) -> {<<"dest-add-forward-headers">>, fun rabbit_parameter_validation:boolean/2,optional}, {<<"dest-add-timestamp-header">>, fun rabbit_parameter_validation:boolean/2,optional}, {<<"publish-properties">>, fun validate_properties/2, optional}, - {<<"dest-publish-properties">>, fun validate_properties/2, optional} + {<<"dest-publish-properties">>, fun validate_properties/2, optional}, + {<<"dest-predeclared">>, fun rabbit_parameter_validation:boolean/2, optional} ]. validate_uri_fun(User) -> @@ -321,12 +334,13 @@ parse_amqp091_dest({VHost, Name}, ClusterName, Def, SourceHeaders) -> DestXKey = pget(<<"dest-exchange-key">>, Def, none), DestQ = pget(<<"dest-queue">>, Def, none), DestQArgs = pget(<<"dest-queue-args">>, Def, #{}), - DestDeclFun = fun (Conn, _Ch) -> - case DestQ of - none -> ok; - _ -> ensure_queue(Conn, DestQ, rabbit_misc:to_amqp_table(DestQArgs)) - end - end, + GlobalPredeclared = proplists:get_value(predeclared, application:get_env(?APP, topology, []), false), + Predeclared = pget(<<"dest-predeclared">>, Def, GlobalPredeclared), + DestDeclFun = case Predeclared of + true -> {?MODULE, dest_check, [DestQ, DestQArgs]}; + false -> {?MODULE, dest_decl, [DestQ, DestQArgs]} + end, + {X, Key} = case DestQ of none -> {DestX, DestXKey}; _ -> {<<>>, DestQ} @@ -335,16 +349,6 @@ parse_amqp091_dest({VHost, Name}, ClusterName, Def, SourceHeaders) -> {<<"dest-exchange-key">>, DestXKey}, {<<"dest-queue">>, DestQ}], V =/= none], - PubFun = fun (_SrcURI, _DestURI, P0) -> - P1 = case X of - none -> P0; - _ -> P0#'basic.publish'{exchange = X} - end, - case Key of - none -> P1; - _ -> P1#'basic.publish'{routing_key = Key} - end - end, AddHeadersLegacy = pget(<<"add-forward-headers">>, Def, false), AddHeaders = pget(<<"dest-add-forward-headers">>, Def, AddHeadersLegacy), Table0 = [{<<"shovelled-by">>, ClusterName}, @@ -357,19 +361,6 @@ parse_amqp091_dest({VHost, Name}, ClusterName, Def, SourceHeaders) -> AddTimestampHeaderLegacy = pget(<<"add-timestamp-header">>, Def, false), AddTimestampHeader = pget(<<"dest-add-timestamp-header">>, Def, AddTimestampHeaderLegacy), - PubPropsFun = fun (SrcURI, DestURI, P0) -> - P = set_properties(P0, SetProps), - P1 = case AddHeaders of - true -> rabbit_shovel_util:update_headers( - Table0, SourceHeaders ++ Table2, - SrcURI, DestURI, P); - false -> P - end, - case AddTimestampHeader of - true -> rabbit_shovel_util:add_timestamp_header(P1); - false -> P1 - end - end, %% Details are only used for status report in rabbitmqctl, as vhost is not %% available to query the runtime parameters. Details = maps:from_list([{K, V} || {K, V} <- [{dest_exchange, DestX}, @@ -379,10 +370,47 @@ parse_amqp091_dest({VHost, Name}, ClusterName, Def, SourceHeaders) -> maps:merge(#{module => rabbit_amqp091_shovel, uris => DestURIs, resource_decl => DestDeclFun, - fields_fun => PubFun, - props_fun => PubPropsFun + fields_fun => {?MODULE, fields_fun, [X, Key]}, + props_fun => {?MODULE, props_fun, [Table0, Table2, SetProps, + AddHeaders, SourceHeaders, + AddTimestampHeader]} }, Details). +fields_fun(X, Key, _SrcURI, _DestURI, P0) -> + P1 = case X of + none -> P0; + _ -> P0#'basic.publish'{exchange = X} + end, + case Key of + none -> P1; + _ -> P1#'basic.publish'{routing_key = Key} + end. + +props_fun(Table0, Table2, SetProps, AddHeaders, SourceHeaders, AddTimestampHeader, + SrcURI, DestURI, P0) -> + P = set_properties(P0, SetProps), + P1 = case AddHeaders of + true -> rabbit_shovel_util:update_headers( + Table0, SourceHeaders ++ Table2, + SrcURI, DestURI, P); + false -> P + end, + case AddTimestampHeader of + true -> rabbit_shovel_util:add_timestamp_header(P1); + false -> P1 + end. + +dest_decl(DestQ, DestQArgs, Conn, _Ch) -> + case DestQ of + none -> ok; + _ -> ensure_queue(Conn, DestQ, rabbit_misc:to_amqp_table(DestQArgs)) + end. +dest_check(DestQ, DestQArgs, Conn, _Ch) -> + case DestQ of + none -> ok; + _ -> check_queue(Conn, DestQ, rabbit_misc:to_amqp_table(DestQArgs)) + end. + parse_amqp10_source(Def) -> Uris = deobfuscated_uris(<<"src-uri">>, Def), Address = pget(<<"src-address">>, Def), @@ -403,18 +431,21 @@ parse_amqp091_source(Def) -> SrcQ = pget(<<"src-queue">>, Def, none), SrcQArgs = pget(<<"src-queue-args">>, Def, #{}), SrcCArgs = rabbit_misc:to_amqp_table(pget(<<"src-consumer-args">>, Def, [])), + GlobalPredeclared = proplists:get_value(predeclared, application:get_env(?APP, topology, []), false), + Predeclared = pget(<<"src-predeclared">>, Def, GlobalPredeclared), {SrcDeclFun, Queue, DestHeaders} = case SrcQ of - none -> {fun (_Conn, Ch) -> - Ms = [#'queue.declare'{exclusive = true}, - #'queue.bind'{routing_key = SrcXKey, - exchange = SrcX}], - [amqp_channel:call(Ch, M) || M <- Ms] - end, <<>>, [{<<"src-exchange">>, SrcX}, - {<<"src-exchange-key">>, SrcXKey}]}; - _ -> {fun (Conn, _Ch) -> - ensure_queue(Conn, SrcQ, rabbit_misc:to_amqp_table(SrcQArgs)) - end, SrcQ, [{<<"src-queue">>, SrcQ}]} + none -> {{?MODULE, src_decl_exchange, [SrcX, SrcXKey]}, <<>>, + [{<<"src-exchange">>, SrcX}, + {<<"src-exchange-key">>, SrcXKey}]}; + _ -> case Predeclared of + false -> + {{?MODULE, src_decl_queue, [SrcQ, SrcQArgs]}, + SrcQ, [{<<"src-queue">>, SrcQ}]}; + true -> + {{?MODULE, src_check_queue, [SrcQ, SrcQArgs]}, + SrcQ, [{<<"src-queue">>, SrcQ}]} + end end, DeleteAfter = pget(<<"src-delete-after">>, Def, pget(<<"delete-after">>, Def, <<"never">>)), @@ -434,6 +465,18 @@ parse_amqp091_source(Def) -> consumer_args => SrcCArgs }, Details), DestHeaders}. +src_decl_exchange(SrcX, SrcXKey, _Conn, Ch) -> + Ms = [#'queue.declare'{exclusive = true}, + #'queue.bind'{routing_key = SrcXKey, + exchange = SrcX}], + [amqp_channel:call(Ch, M) || M <- Ms]. + +src_decl_queue(SrcQ, SrcQArgs, Conn, _Ch) -> + ensure_queue(Conn, SrcQ, rabbit_misc:to_amqp_table(SrcQArgs)). + +src_check_queue(SrcQ, SrcQArgs, Conn, _Ch) -> + check_queue(Conn, SrcQ, rabbit_misc:to_amqp_table(SrcQArgs)). + get_uris(Key, Def) -> URIs = case pget(Key, Def) of B when is_binary(B) -> [B]; @@ -465,7 +508,14 @@ ensure_queue(Conn, Queue, XArgs) -> after catch amqp_channel:close(Ch) end. - +check_queue(Conn, Queue, _XArgs) -> + {ok, Ch} = amqp_connection:open_channel(Conn), + try + amqp_channel:call(Ch, #'queue.declare'{queue = Queue, + passive = true}) + after + catch amqp_channel:close(Ch) + end. opt_b2a(B) when is_binary(B) -> list_to_atom(binary_to_list(B)); opt_b2a(N) -> N. diff --git a/deps/rabbitmq_shovel/src/rabbit_shovel_status.erl b/deps/rabbitmq_shovel/src/rabbit_shovel_status.erl index c5b8ed72ac75..5fca473c6671 100644 --- a/deps/rabbitmq_shovel/src/rabbit_shovel_status.erl +++ b/deps/rabbitmq_shovel/src/rabbit_shovel_status.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_shovel_status). @@ -14,9 +14,12 @@ report_blocked_status/2, remove/1, status/0, + status/1, lookup/1, cluster_status/0, - cluster_status_with_nodes/0]). + cluster_status_with_nodes/0, + get_status_table/0 +]). -export([inject_node_info/2, find_matching_shovel/3]). -export([init/1, handle_call/3, handle_cast/2, handle_info/2, @@ -68,7 +71,9 @@ remove(Name) -> %% format without a feature flag. -spec status() -> [status_tuple()]. status() -> - gen_server:call(?SERVER, status, infinity). + status(infinity). +status(Timeout) -> + gen_server:call(?SERVER, status, Timeout). -spec cluster_status() -> [status_tuple()]. cluster_status() -> @@ -93,6 +98,10 @@ cluster_status_with_nodes() -> lookup(Name) -> gen_server:call(?SERVER, {lookup, Name}, infinity). +-spec get_status_table() -> ok. +get_status_table() -> + gen_server:call(?SERVER, get_status_table). + init([]) -> ?ETS_NAME = ets:new(?ETS_NAME, [named_table, {keypos, #entry.name}, private]), @@ -114,11 +123,20 @@ handle_call({lookup, Name}, _From, State) -> {timestamp, Entry#entry.timestamp}]; [] -> not_found end, - {reply, Link, State}. + {reply, Link, State}; + +handle_call(get_status_table, _From, State) -> + Entries = ets:tab2list(?ETS_NAME), + {reply, Entries, State}. handle_cast({report, Name, Type, Info, Timestamp}, State) -> - true = ets:insert(?ETS_NAME, #entry{name = Name, type = Type, info = Info, - timestamp = Timestamp}), + Entry = #entry{ + name = Name, + type = Type, + info = Info, + timestamp = Timestamp + }, + true = ets:insert(?ETS_NAME, Entry), rabbit_event:notify(shovel_worker_status, split_name(Name) ++ split_status(Info)), {noreply, State}; @@ -159,9 +177,17 @@ code_change(_OldVsn, State, _Extra) -> -spec inject_node_info(node(), [status_tuple()]) -> [status_tuple()]. inject_node_info(Node, Shovels) -> lists:map( - fun({Name, Type, {State, Opts}, Timestamp}) -> - Opts1 = Opts ++ [{node, Node}], - {Name, Type, {State, Opts1}, Timestamp} + %% starting + fun({Name, Type, State, Timestamp}) when is_atom(State) -> + Opts = [{node, Node}], + {Name, Type, {State, Opts}, Timestamp}; + %% terminated + ({Name, Type, {terminated, Reason}, Timestamp}) -> + {Name, Type, {terminated, Reason}, Timestamp}; + %% running + ({Name, Type, {State, Opts}, Timestamp}) -> + Opts1 = Opts ++ [{node, Node}], + {Name, Type, {State, Opts1}, Timestamp} end, Shovels). -spec find_matching_shovel(rabbit_types:vhost(), binary(), [status_tuple()]) -> status_tuple() | undefined. @@ -206,4 +232,3 @@ blocked_status_to_info(#entry{info = {running, Info}, {running, Info ++ [{blocked_status, BlockedStatus}]}; blocked_status_to_info(#entry{info = Info}) -> Info. - diff --git a/deps/rabbitmq_shovel/src/rabbit_shovel_sup.erl b/deps/rabbitmq_shovel/src/rabbit_shovel_sup.erl index 48a801b554d9..71b004a806f2 100644 --- a/deps/rabbitmq_shovel/src/rabbit_shovel_sup.erl +++ b/deps/rabbitmq_shovel/src/rabbit_shovel_sup.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_shovel_sup). @@ -12,8 +12,6 @@ -import(rabbit_shovel_config, []). --include("rabbit_shovel.hrl"). - start_link() -> case parse_configuration(application:get_env(shovels)) of {ok, Configurations} -> diff --git a/deps/rabbitmq_shovel/src/rabbit_shovel_util.erl b/deps/rabbitmq_shovel/src/rabbit_shovel_util.erl index 83f0eaea5280..c70d0c0e6f3d 100644 --- a/deps/rabbitmq_shovel/src/rabbit_shovel_util.erl +++ b/deps/rabbitmq_shovel/src/rabbit_shovel_util.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_shovel_util). @@ -39,8 +39,13 @@ add_timestamp_header(Props = #'P_basic'{headers = Headers}) -> delete_shovel(VHost, Name, ActingUser) -> case rabbit_shovel_status:lookup({VHost, Name}) of not_found -> + %% Follow the user's obvious intent and delete the runtime parameter just in case the Shovel is in + %% a starting-failing-restarting loop. MK. + rabbit_log:info("Will delete runtime parameters of shovel '~ts' in virtual host '~ts'", [Name, VHost]), + ok = rabbit_runtime_parameters:clear(VHost, <<"shovel">>, Name, ActingUser), {error, not_found}; _Obj -> + rabbit_log:info("Will delete runtime parameters of shovel '~ts' in virtual host '~ts'", [Name, VHost]), ok = rabbit_runtime_parameters:clear(VHost, <<"shovel">>, Name, ActingUser) end. diff --git a/deps/rabbitmq_shovel/src/rabbit_shovel_worker.erl b/deps/rabbitmq_shovel/src/rabbit_shovel_worker.erl index 62019e82fe74..3e5d5c5ec4cb 100644 --- a/deps/rabbitmq_shovel/src/rabbit_shovel_worker.erl +++ b/deps/rabbitmq_shovel/src/rabbit_shovel_worker.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_shovel_worker). @@ -62,6 +62,9 @@ handle_call(_Msg, _From, State) -> {noreply, State}. handle_cast(init, State = #state{config = Config0}) -> + rabbit_log_shovel:debug("Shovel ~ts is reporting its status", [human_readable_name(State#state.name)]), + rabbit_shovel_status:report(State#state.name, State#state.type, starting), + rabbit_log_shovel:info("Shovel ~ts will now try to connect...", [human_readable_name(State#state.name)]), try rabbit_shovel_behaviour:connect_source(Config0) of Config -> rabbit_log_shovel:debug("Shovel ~ts connected to source", [human_readable_name(maps:get(name, Config))]), @@ -150,10 +153,14 @@ terminate({shutdown, autodelete}, State = #state{name = Name, _ = rabbit_runtime_parameters:clear(VHost, <<"shovel">>, ShovelName, ?SHOVEL_USER), rabbit_shovel_status:remove(Name), ok; -terminate(shutdown, State) -> +terminate(shutdown, State = #state{name = Name}) -> close_connections(State), + rabbit_shovel_status:remove(Name), ok; -terminate(socket_closed_unexpectedly, State) -> +terminate(socket_closed_unexpectedly, State = #state{name = Name}) -> + rabbit_log_shovel:error("Shovel ~ts is stopping because of the socket closed unexpectedly", [human_readable_name(Name)]), + rabbit_shovel_status:report(State#state.name, State#state.type, + {terminated, "socket closed"}), close_connections(State), ok; terminate({'EXIT', heartbeat_timeout}, State = #state{name = Name}) -> diff --git a/deps/rabbitmq_shovel/src/rabbit_shovel_worker_sup.erl b/deps/rabbitmq_shovel/src/rabbit_shovel_worker_sup.erl index 35764c628da0..7e2eba66f118 100644 --- a/deps/rabbitmq_shovel/src/rabbit_shovel_worker_sup.erl +++ b/deps/rabbitmq_shovel/src/rabbit_shovel_worker_sup.erl @@ -2,15 +2,15 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_shovel_worker_sup). -behaviour(mirrored_supervisor). -export([start_link/2, init/1]). +-export([id_to_khepri_path/1]). --include("rabbit_shovel.hrl"). -include_lib("rabbit_common/include/rabbit.hrl"). start_link(ShovelName, ShovelConfig) -> @@ -18,7 +18,7 @@ start_link(ShovelName, ShovelConfig) -> ?MODULE, [ShovelName, ShovelConfig]). init([Name, Config]) -> - ChildSpecs = [{Name, + ChildSpecs = [{id(Name), {rabbit_shovel_worker, start_link, [static, Name, Config]}, case Config of #{reconnect_delay := N} @@ -29,3 +29,9 @@ init([Name, Config]) -> worker, [rabbit_shovel_worker]}], {ok, {{one_for_one, 1, ?MAX_WAIT}, ChildSpecs}}. + +id(Name) when is_atom(Name) -> + Name. + +id_to_khepri_path(Name) when is_atom(Name) -> + [Name]. diff --git a/deps/rabbitmq_shovel/test/amqp10_SUITE.erl b/deps/rabbitmq_shovel/test/amqp10_SUITE.erl index 51964f8a35f2..3b865d6d3d2d 100644 --- a/deps/rabbitmq_shovel/test/amqp10_SUITE.erl +++ b/deps/rabbitmq_shovel/test/amqp10_SUITE.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(amqp10_SUITE). diff --git a/deps/rabbitmq_shovel/test/amqp10_dynamic_SUITE.erl b/deps/rabbitmq_shovel/test/amqp10_dynamic_SUITE.erl index 543ec451932c..18b5ef3595e6 100644 --- a/deps/rabbitmq_shovel/test/amqp10_dynamic_SUITE.erl +++ b/deps/rabbitmq_shovel/test/amqp10_dynamic_SUITE.erl @@ -2,15 +2,13 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(amqp10_dynamic_SUITE). -include_lib("common_test/include/ct.hrl"). -include_lib("eunit/include/eunit.hrl"). --include_lib("amqp_client/include/amqp_client.hrl"). - -compile(export_all). all() -> @@ -131,12 +129,15 @@ test_amqp10_destination(Config, Src, Dest, Sess, Protocol, ProtocolSrc) -> <<"message-ann-value">>}] end}]), Msg = publish_expect(Sess, Src, Dest, <<"tag1">>, <<"hello">>), + AppProps = amqp10_msg:application_properties(Msg), + ?assertMatch((#{user_id := <<"guest">>, creation_time := _}), (amqp10_msg:properties(Msg))), ?assertMatch((#{<<"shovel-name">> := <<"test">>, <<"shovel-type">> := <<"dynamic">>, <<"shovelled-by">> := _, <<"app-prop-key">> := <<"app-prop-value">>}), - (amqp10_msg:application_properties(Msg))), + (AppProps)), + ?assertEqual(undefined, maps:get(<<"delivery_mode">>, AppProps, undefined)), ?assertMatch((#{<<"message-ann-key">> := <<"message-ann-value">>}), (amqp10_msg:message_annotations(Msg))). diff --git a/deps/rabbitmq_shovel/test/amqp10_inter_cluster_SUITE.erl b/deps/rabbitmq_shovel/test/amqp10_inter_cluster_SUITE.erl new file mode 100644 index 000000000000..f7c25a8af8f1 --- /dev/null +++ b/deps/rabbitmq_shovel/test/amqp10_inter_cluster_SUITE.erl @@ -0,0 +1,189 @@ +%% This Source Code Form is subject to the terms of the Mozilla Public +%% License, v. 2.0. If a copy of the MPL was not distributed with this +%% file, You can obtain one at https://mozilla.org/MPL/2.0/. +%% +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. +%% + +-module(amqp10_inter_cluster_SUITE). + +-include_lib("common_test/include/ct.hrl"). +-include_lib("eunit/include/eunit.hrl"). +-compile([export_all, nowarn_export_all]). + +-import(rabbit_ct_broker_helpers, [rpc/5]). + +all() -> + [ + {group, tests} + ]. + +groups() -> + [ + {tests, [shuffle], + [ + old_to_new_on_old, + old_to_new_on_new, + new_to_old_on_old, + new_to_old_on_new + ]} + ]. + +%% In mixed version tests: +%% * node 0 is the new version single node cluster +%% * node 1 is the old version single node cluster +-define(NEW, 0). +-define(OLD, 1). + +init_per_suite(Config0) -> + {ok, _} = application:ensure_all_started(amqp10_client), + rabbit_ct_helpers:log_environment(), + Config1 = rabbit_ct_helpers:set_config( + Config0, + [{rmq_nodename_suffix, ?MODULE}, + {rmq_nodes_count, 2}, + {rmq_nodes_clustered, false}]), + Config = rabbit_ct_helpers:run_setup_steps( + Config1, + rabbit_ct_broker_helpers:setup_steps() ++ + rabbit_ct_client_helpers:setup_steps()), + %% If node 1 runs 4.x, this is the new no-op plugin. + %% If node 1 runs 3.x, this is the old real plugin. + ok = rabbit_ct_broker_helpers:enable_plugin(Config, ?OLD, rabbitmq_amqp1_0), + Config. + +end_per_suite(Config) -> + application:stop(amqp10_client), + rabbit_ct_helpers:run_teardown_steps( + Config, + rabbit_ct_client_helpers:teardown_steps() ++ + rabbit_ct_broker_helpers:teardown_steps()). + +init_per_group(_, Config) -> + Config. + +end_per_group(_, Config) -> + Config. + +init_per_testcase(Testcase, Config) -> + rabbit_ct_helpers:testcase_started(Config, Testcase). + +end_per_testcase(Testcase, Config) -> + rabbit_ct_helpers:testcase_finished(Config, Testcase). + +old_to_new_on_old(Config) -> + case rabbit_ct_helpers:is_mixed_versions() of + true -> + {skip, "TODO: Unskip when lower version is >= 3.13.7 " + "because AMQP 1.0 client must use SASL when connecting to 4.0"}; + false -> + ok = shovel(?OLD, ?NEW, ?OLD, Config) + end. + +old_to_new_on_new(Config) -> + ok = shovel(?OLD, ?NEW, ?NEW, Config). + +new_to_old_on_old(Config) -> + case rabbit_ct_helpers:is_mixed_versions() of + true -> + {skip, "TODO: Unskip when lower version is >= 3.13.7 " + "because AMQP 1.0 client must use SASL when connecting to 4.0"}; + false -> + ok = shovel(?NEW, ?OLD, ?OLD, Config) + end. + +new_to_old_on_new(Config) -> + ok = shovel(?NEW, ?OLD, ?NEW, Config). + +shovel(SrcNode, DestNode, ShovelNode, Config) -> + SrcUri = shovel_test_utils:make_uri(Config, SrcNode), + DestUri = shovel_test_utils:make_uri(Config, DestNode), + SrcQ = <<"my source queue">>, + DestQ = <<"my destination queue">>, + Definition = [ + {<<"src-uri">>, SrcUri}, + {<<"src-protocol">>, <<"amqp10">>}, + {<<"src-address">>, SrcQ}, + {<<"dest-uri">>, [DestUri]}, + {<<"dest-protocol">>, <<"amqp10">>}, + {<<"dest-address">>, DestQ} + ], + ShovelName = <<"my shovel">>, + ok = rpc(Config, ShovelNode, rabbit_runtime_parameters, set, + [<<"/">>, <<"shovel">>, ShovelName, Definition, none]), + ok = shovel_test_utils:await_shovel(Config, ShovelNode, ShovelName), + + Hostname = ?config(rmq_hostname, Config), + SrcPort = rabbit_ct_broker_helpers:get_node_config(Config, SrcNode, tcp_port_amqp), + DestPort = rabbit_ct_broker_helpers:get_node_config(Config, DestNode, tcp_port_amqp), + {ok, SrcConn} = amqp10_client:open_connection(Hostname, SrcPort), + {ok, DestConn} = amqp10_client:open_connection(Hostname, DestPort), + {ok, SrcSess} = amqp10_client:begin_session_sync(SrcConn), + {ok, DestSess} = amqp10_client:begin_session_sync(DestConn), + {ok, Sender} = amqp10_client:attach_sender_link( + SrcSess, <<"my sender">>, <<"/amq/queue/", SrcQ/binary>>, settled), + {ok, Receiver} = amqp10_client:attach_receiver_link( + DestSess, <<"my receiver">>, <<"/amq/queue/", DestQ/binary>>, settled), + + ok = wait_for_credit(Sender), + NumMsgs = 20, + lists:map( + fun(N) -> + Bin = integer_to_binary(N), + Msg = amqp10_msg:new(Bin, Bin, true), + ok = amqp10_client:send_msg(Sender, Msg) + end, lists:seq(1, NumMsgs)), + ok = amqp10_client:close_connection(SrcConn), + + ok = amqp10_client:flow_link_credit(Receiver, NumMsgs, never), + Msgs = receive_messages(Receiver, NumMsgs), + lists:map( + fun(N) -> + Msg = lists:nth(N, Msgs), + ?assertEqual(integer_to_binary(N), + amqp10_msg:body_bin(Msg)) + end, lists:seq(1, NumMsgs)), + ok = amqp10_client:close_connection(DestConn), + + ok = rpc(Config, ShovelNode, rabbit_runtime_parameters, clear, + [<<"/">>, <<"shovel">>, ShovelName, none]), + ExpectedQueueLen = 0, + ?assertEqual([ExpectedQueueLen], rpc(Config, ?OLD, ?MODULE, delete_queues, [])), + ?assertEqual([ExpectedQueueLen], rpc(Config, ?NEW, ?MODULE, delete_queues, [])). + +wait_for_credit(Sender) -> + receive + {amqp10_event, {link, Sender, credited}} -> + ok + after 5000 -> + flush(?FUNCTION_NAME), + ct:fail(credited_timeout) + end. + +receive_messages(Receiver, N) -> + receive_messages0(Receiver, N, []). + +receive_messages0(_Receiver, 0, Acc) -> + lists:reverse(Acc); +receive_messages0(Receiver, N, Acc) -> + receive + {amqp10_msg, Receiver, Msg} -> + receive_messages0(Receiver, N - 1, [Msg | Acc]) + after 5000 -> + ct:fail({timeout, {num_received, length(Acc)}, {num_missing, N}}) + end. + +flush(Prefix) -> + receive + Msg -> + ct:pal("~p flushed: ~p~n", [Prefix, Msg]), + flush(Prefix) + after 1 -> + ok + end. + +delete_queues() -> + [begin + {ok, N} = rabbit_amqqueue:delete(Q, false, false, <<"tests">>), + N + end || Q <- rabbit_amqqueue:list()]. diff --git a/deps/rabbitmq_shovel/test/amqp10_shovel_SUITE.erl b/deps/rabbitmq_shovel/test/amqp10_shovel_SUITE.erl index eff2cede72fd..60cd0d89c785 100644 --- a/deps/rabbitmq_shovel/test/amqp10_shovel_SUITE.erl +++ b/deps/rabbitmq_shovel/test/amqp10_shovel_SUITE.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(amqp10_shovel_SUITE). @@ -12,7 +12,6 @@ -export([ ]). --include_lib("common_test/include/ct.hrl"). -include_lib("eunit/include/eunit.hrl"). -include_lib("amqp10_common/include/amqp10_framing.hrl"). diff --git a/deps/rabbitmq_shovel/test/config_SUITE.erl b/deps/rabbitmq_shovel/test/config_SUITE.erl index 61fccfa7792a..0294a74daeec 100644 --- a/deps/rabbitmq_shovel/test/config_SUITE.erl +++ b/deps/rabbitmq_shovel/test/config_SUITE.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(config_SUITE). diff --git a/deps/rabbitmq_shovel/test/configuration_SUITE.erl b/deps/rabbitmq_shovel/test/configuration_SUITE.erl index aba1e6bc38c4..41c9bda7d223 100644 --- a/deps/rabbitmq_shovel/test/configuration_SUITE.erl +++ b/deps/rabbitmq_shovel/test/configuration_SUITE.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(configuration_SUITE). @@ -12,6 +12,7 @@ -compile(export_all). +-define(QUEUE, <<"test_queue">>). -define(EXCHANGE, <<"test_exchange">>). -define(TO_SHOVEL, <<"to_the_shovel">>). -define(FROM_SHOVEL, <<"from_the_shovel">>). @@ -21,7 +22,8 @@ all() -> [ - {group, non_parallel_tests} + {group, non_parallel_tests}, + {group, with_predefined_topology} ]. groups() -> @@ -31,7 +33,10 @@ groups() -> invalid_legacy_configuration, valid_legacy_configuration, valid_configuration - ]} + ]}, + {with_predefined_topology, [], [ + valid_configuration_with_predefined_resources + ]} ]. %% ------------------------------------------------------------------- @@ -41,7 +46,9 @@ groups() -> init_per_suite(Config) -> rabbit_ct_helpers:log_environment(), Config1 = rabbit_ct_helpers:set_config(Config, [ - {rmq_nodename_suffix, ?MODULE} + {rmq_nodename_suffix, ?MODULE}, + {ignored_crashes, + ["server_initiated_close,404"]} ]), rabbit_ct_helpers:run_setup_steps(Config1, rabbit_ct_broker_helpers:setup_steps() ++ @@ -53,9 +60,19 @@ end_per_suite(Config) -> rabbit_ct_client_helpers:teardown_steps() ++ rabbit_ct_broker_helpers:teardown_steps()). +init_per_group(with_predefined_topology, Config) -> + ok = rabbit_ct_broker_helpers:rpc(Config, 0, application, set_env, + [rabbitmq_shovel, topology, [{predeclared, true}]]), + Config; + init_per_group(_, Config) -> Config. +end_per_group(with_predefined_topology, Config) -> + ok = rabbit_ct_broker_helpers:rpc(Config, 0, application, unset_env, + [rabbitmq_shovel, topology]), + Config; + end_per_group(_, Config) -> Config. @@ -209,6 +226,12 @@ valid_configuration(Config) -> ok = setup_shovels(Config), run_valid_test(Config). +valid_configuration_with_predefined_resources(Config) -> + ok = rabbit_ct_broker_helpers:rpc(Config, 0, ?MODULE, setup_shovels2, [Config]), + ok = rabbit_ct_broker_helpers:rpc(Config, 0, ?MODULE, await_terminated_shovel, [test_shovel]), + declare_queue(Config), + ok = rabbit_ct_broker_helpers:rpc(Config, 0, ?MODULE, await_running_shovel, [test_shovel]). + run_valid_test(Config) -> Chan = rabbit_ct_client_helpers:open_channel(Config, 0), @@ -271,6 +294,12 @@ run_valid_test(Config) -> rabbit_ct_client_helpers:close_channel(Chan). +declare_queue(Config) -> + Chan = rabbit_ct_client_helpers:open_channel(Config, 0), + amqp_channel:call(Chan, #'queue.declare'{queue = ?QUEUE, + durable = true}), + rabbit_ct_client_helpers:close_channel(Chan). + setup_legacy_shovels(Config) -> ok = rabbit_ct_broker_helpers:rpc(Config, 0, ?MODULE, setup_legacy_shovels1, [Config]). @@ -349,6 +378,34 @@ setup_shovels1(Config) -> ok = application:start(rabbitmq_shovel), await_running_shovel(test_shovel). +setup_shovels2(Config) -> + _ = application:stop(rabbitmq_shovel), + Hostname = ?config(rmq_hostname, Config), + TcpPort = rabbit_ct_broker_helpers:get_node_config(Config, 0, + tcp_port_amqp), + %% a working config + application:set_env( + rabbitmq_shovel, + shovels, + [{test_shovel, + [{source, + [{uris, [rabbit_misc:format("amqp://~ts:~b/%2f?heartbeat=5", + [Hostname, TcpPort])]}, + {queue, ?QUEUE}]}, + {destination, + [{uris, [rabbit_misc:format("amqp://~ts:~b/%2f", + [Hostname, TcpPort])]}, + {publish_fields, [{exchange, ?EXCHANGE}, {routing_key, ?FROM_SHOVEL}]}, + {publish_properties, [{delivery_mode, 2}, + {cluster_id, <<"my-cluster">>}, + {content_type, ?SHOVELLED}]}, + {add_forward_headers, true}, + {add_timestamp_header, true}]}, + {ack_mode, on_confirm}]}], + infinity), + + ok = application:start(rabbitmq_shovel). + await_running_shovel(Name) -> case [N || {N, _, {running, _}, _} <- rabbit_shovel_status:status(), @@ -357,3 +414,11 @@ await_running_shovel(Name) -> _ -> timer:sleep(100), await_running_shovel(Name) end. +await_terminated_shovel(Name) -> + case [N || {N, _, {terminated, _}, _} + <- rabbit_shovel_status:status(), + N =:= Name] of + [_] -> ok; + _ -> timer:sleep(100), + await_terminated_shovel(Name) + end. diff --git a/deps/rabbitmq_shovel/test/delete_shovel_command_SUITE.erl b/deps/rabbitmq_shovel/test/delete_shovel_command_SUITE.erl index cec919fe88f5..1ec09df3bc57 100644 --- a/deps/rabbitmq_shovel/test/delete_shovel_command_SUITE.erl +++ b/deps/rabbitmq_shovel/test/delete_shovel_command_SUITE.erl @@ -2,13 +2,13 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(delete_shovel_command_SUITE). --include_lib("common_test/include/ct.hrl"). -include_lib("amqp_client/include/amqp_client.hrl"). +-include_lib("stdlib/include/assert.hrl"). -compile(export_all). @@ -16,7 +16,8 @@ all() -> [ - {group, non_parallel_tests} + {group, non_parallel_tests}, + {group, cluster_size_2} ]. groups() -> @@ -24,6 +25,9 @@ groups() -> {non_parallel_tests, [], [ delete_not_found, delete + ]}, + {cluster_size_2, [], [ + clear_param_on_different_node ]} ]. @@ -33,25 +37,31 @@ groups() -> init_per_suite(Config) -> rabbit_ct_helpers:log_environment(), + rabbit_ct_helpers:run_setup_steps(Config). + +end_per_suite(Config) -> + rabbit_ct_helpers:run_teardown_steps(Config). + +init_per_group(cluster_size_2, Config) -> + init_per_multinode_group(cluster_size_2, Config, 2); +init_per_group(Group, Config) -> + init_per_multinode_group(Group, Config, 1). + +init_per_multinode_group(_Group, Config, NodeCount) -> + Suffix = rabbit_ct_helpers:testcase_absname(Config, "", "-"), Config1 = rabbit_ct_helpers:set_config(Config, [ - {rmq_nodename_suffix, ?MODULE} + {rmq_nodes_count, NodeCount}, + {rmq_nodename_suffix, Suffix} ]), - Config2 = rabbit_ct_helpers:run_setup_steps(Config1, - rabbit_ct_broker_helpers:setup_steps() ++ - rabbit_ct_client_helpers:setup_steps()), - Config2. + rabbit_ct_helpers:run_steps(Config1, + rabbit_ct_broker_helpers:setup_steps() ++ + rabbit_ct_client_helpers:setup_steps()). -end_per_suite(Config) -> - rabbit_ct_helpers:run_teardown_steps(Config, +end_per_group(_Group, Config) -> + rabbit_ct_helpers:run_steps(Config, rabbit_ct_client_helpers:teardown_steps() ++ rabbit_ct_broker_helpers:teardown_steps()). -init_per_group(_, Config) -> - Config. - -end_per_group(_, Config) -> - Config. - init_per_testcase(Testcase, Config) -> rabbit_ct_helpers:testcase_started(Config, Testcase). @@ -76,3 +86,18 @@ delete(Config) -> ok = ?CMD:run([<<"myshovel">>], Opts), [] = rabbit_ct_broker_helpers:rpc(Config, 0, rabbit_shovel_status, status, []). +clear_param_on_different_node(Config) -> + shovel_test_utils:set_param( + Config, + <<"myshovel">>, [{<<"src-queue">>, <<"src">>}, + {<<"dest-queue">>, <<"dest">>}]), + [A, B] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename), + [_] = rabbit_ct_broker_helpers:rpc(Config, A, rabbit_shovel_status, + status, []), + [] = rabbit_ct_broker_helpers:rpc(Config, B, rabbit_shovel_status, + status, []), + shovel_test_utils:clear_param(Config, B, <<"myshovel">>), + ?assertEqual([], rabbit_ct_broker_helpers:rpc(Config, A, rabbit_shovel_status, + status, []), "Deleted shovel still reported on node A"), + ?assertEqual([], rabbit_ct_broker_helpers:rpc(Config, B, rabbit_shovel_status, + status, []), "Deleted shovel still reported on node B"). diff --git a/deps/rabbitmq_shovel/test/dynamic_SUITE.erl b/deps/rabbitmq_shovel/test/dynamic_SUITE.erl index 94aed94f0a18..c526ceb2ce31 100644 --- a/deps/rabbitmq_shovel/test/dynamic_SUITE.erl +++ b/deps/rabbitmq_shovel/test/dynamic_SUITE.erl @@ -2,12 +2,11 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(dynamic_SUITE). --include_lib("common_test/include/ct.hrl"). -include_lib("eunit/include/eunit.hrl"). -include_lib("amqp_client/include/amqp_client.hrl"). @@ -18,6 +17,7 @@ all() -> [ {group, core_tests}, + {group, core_tests_with_preclared_topology}, {group, quorum_queue_tests}, {group, stream_queue_tests} ]. @@ -42,16 +42,22 @@ groups() -> credit_flow, dest_resource_alarm_on_confirm, dest_resource_alarm_on_publish, - dest_resource_alarm_no_ack + dest_resource_alarm_no_ack, + missing_src_queue_with_src_predeclared, + missing_dest_queue_with_dest_predeclared ]}, - - {quorum_queue_tests, [], [ + {core_tests_with_preclared_topology, [], [ + missing_src_queue_without_src_predeclared, + missing_dest_queue_without_dest_predeclared, + missing_src_and_dest_queue_with_false_src_and_dest_predeclared + ]}, + {quorum_queue_tests, [], [ quorum_queues - ]}, + ]}, - {stream_queue_tests, [], [ + {stream_queue_tests, [], [ stream_queues - ]} + ]} ]. %% ------------------------------------------------------------------- @@ -61,7 +67,11 @@ groups() -> init_per_suite(Config) -> rabbit_ct_helpers:log_environment(), Config1 = rabbit_ct_helpers:set_config(Config, [ - {rmq_nodename_suffix, ?MODULE} + {rmq_nodename_suffix, ?MODULE}, + {ignored_crashes, [ + "server_initiated_close,404", + "writer,send_failed,closed" + ]} ]), rabbit_ct_helpers:run_setup_steps(Config1, rabbit_ct_broker_helpers:setup_steps() ++ @@ -82,9 +92,18 @@ init_per_group(stream_queue_tests, Config) -> false -> Config; _ -> {skip, "stream queue tests are skipped in mixed mode"} end; +init_per_group(core_tests_with_preclared_topology, Config) -> + ok = rabbit_ct_broker_helpers:rpc(Config, 0, application, set_env, + [rabbitmq_shovel, topology, [{predeclared, true}]]), + Config; + init_per_group(_, Config) -> Config. +end_per_group(core_tests_with_preclared_topology, Config) -> + ok = rabbit_ct_broker_helpers:rpc(Config, 0, application, unset_env, + [rabbitmq_shovel, topology]), + Config; end_per_group(_, Config) -> Config. @@ -265,6 +284,155 @@ exchange(Config) -> <<"queue">>, <<"hello">>) end). +missing_src_queue_with_src_predeclared(Config) -> + with_ch(Config, + fun (Ch) -> + amqp_channel:call( + Ch, #'queue.declare'{queue = <<"dest">>, + durable = true}), + amqp_channel:call( + Ch, #'exchange.declare'{exchange = <<"dest-ex">>}), + amqp_channel:call( + Ch, #'queue.bind'{queue = <<"dest">>, + exchange = <<"dest-ex">>, + routing_key = <<"dest-key">>}), + + shovel_test_utils:set_param_nowait(Config, + <<"test">>, [{<<"src-queue">>, <<"src">>}, + {<<"src-predeclared">>, true}, + {<<"dest-exchange">>, <<"dest-ex">>}, + {<<"dest-exchange-key">>, <<"dest-key">>}, + {<<"src-prefetch-count">>, 1}]), + shovel_test_utils:await_shovel(Config, 0, <<"test">>, terminated), + expect_missing_queue(Ch, <<"src">>), + + with_newch(Config, + fun(Ch2) -> + amqp_channel:call( + Ch2, #'queue.declare'{queue = <<"src">>, + durable = true}), + amqp_channel:call( + Ch2, #'queue.bind'{queue = <<"src">>, + exchange = <<"amq.direct">>, + routing_key = <<"src-key">>}), + shovel_test_utils:await_shovel(Config, 0, <<"test">>, running), + + publish_expect(Ch2, <<"amq.direct">>, <<"src-key">>, <<"dest">>, <<"hello!">>) + end) + end). + + +missing_src_and_dest_queue_with_false_src_and_dest_predeclared(Config) -> + with_ch(Config, + fun (Ch) -> + + shovel_test_utils:set_param( + Config, + <<"test">>, [{<<"src-queue">>, <<"src">>}, + {<<"src-predeclared">>, false}, + {<<"dest-predeclared">>, false}, + {<<"dest-queue">>, <<"dest">>}]), + publish_expect(Ch, <<>>, <<"src">>, <<"dest">>, <<"hello">>) + + end). + +missing_dest_queue_with_dest_predeclared(Config) -> + with_ch(Config, + fun (Ch) -> + amqp_channel:call( + Ch, #'queue.declare'{queue = <<"src">>, + durable = true}), + amqp_channel:call( + Ch, #'queue.bind'{queue = <<"src">>, + exchange = <<"amq.direct">>, + routing_key = <<"src-key">>}), + + shovel_test_utils:set_param_nowait(Config, + <<"test">>, [{<<"src-queue">>, <<"src">>}, + {<<"dest-predeclared">>, true}, + {<<"dest-queue">>, <<"dest">>}, + {<<"src-prefetch-count">>, 1}]), + shovel_test_utils:await_shovel(Config, 0, <<"test">>, terminated), + expect_missing_queue(Ch, <<"dest">>), + + with_newch(Config, + fun(Ch2) -> + amqp_channel:call( + Ch2, #'queue.declare'{queue = <<"dest">>, + durable = true}), + + shovel_test_utils:await_shovel(Config, 0, <<"test">>, running), + + publish_expect(Ch2, <<"amq.direct">>, <<"src-key">>, <<"dest">>, <<"hello!">>) + end) + end). + +missing_src_queue_without_src_predeclared(Config) -> + with_ch(Config, + fun (Ch) -> + amqp_channel:call( + Ch, #'queue.declare'{queue = <<"dest">>, + durable = true}), + amqp_channel:call( + Ch, #'exchange.declare'{exchange = <<"dest-ex">>}), + amqp_channel:call( + Ch, #'queue.bind'{queue = <<"dest">>, + exchange = <<"dest-ex">>, + routing_key = <<"dest-key">>}), + + shovel_test_utils:set_param_nowait(Config, + <<"test">>, [{<<"src-queue">>, <<"src">>}, + {<<"dest-exchange">>, <<"dest-ex">>}, + {<<"dest-exchange-key">>, <<"dest-key">>}, + {<<"src-prefetch-count">>, 1}]), + shovel_test_utils:await_shovel(Config, 0, <<"test">>, terminated), + expect_missing_queue(Ch, <<"src">>), + + with_newch(Config, + fun(Ch2) -> + amqp_channel:call( + Ch2, #'queue.declare'{queue = <<"src">>, + durable = true}), + amqp_channel:call( + Ch2, #'queue.bind'{queue = <<"src">>, + exchange = <<"amq.direct">>, + routing_key = <<"src-key">>}), + shovel_test_utils:await_shovel(Config, 0, <<"test">>, running), + + publish_expect(Ch2, <<"amq.direct">>, <<"src-key">>, <<"dest">>, <<"hello!">>) + end) + end). + + +missing_dest_queue_without_dest_predeclared(Config) -> + with_ch(Config, + fun (Ch) -> + amqp_channel:call( + Ch, #'queue.declare'{queue = <<"src">>, + durable = true}), + amqp_channel:call( + Ch, #'queue.bind'{queue = <<"src">>, + exchange = <<"amq.direct">>, + routing_key = <<"src-key">>}), + + shovel_test_utils:set_param_nowait(Config, + <<"test">>, [{<<"src-queue">>, <<"src">>}, + {<<"dest-queue">>, <<"dest">>}, + {<<"src-prefetch-count">>, 1}]), + shovel_test_utils:await_shovel(Config, 0, <<"test">>, terminated), + expect_missing_queue(Ch, <<"dest">>), + + with_newch(Config, + fun(Ch2) -> + amqp_channel:call( + Ch2, #'queue.declare'{queue = <<"dest">>, + durable = true}), + + shovel_test_utils:await_shovel(Config, 0, <<"test">>, running), + + publish_expect(Ch2, <<"amq.direct">>, <<"src-key">>, <<"dest">>, <<"hello!">>) + end) + end). missing_dest_exchange(Config) -> with_ch(Config, @@ -697,6 +865,12 @@ with_ch(Config, Fun) -> cleanup(Config), ok. +with_newch(Config, Fun) -> + Ch = rabbit_ct_client_helpers:open_channel(Config, 0), + Fun(Ch), + rabbit_ct_client_helpers:close_channel(Ch), + ok. + publish(Ch, X, Key, Payload) when is_binary(Payload) -> publish(Ch, X, Key, #amqp_msg{payload = Payload}); @@ -726,6 +900,23 @@ expect(Ch, Q, Payload) -> expect_empty(Ch, Q) -> #'basic.get_empty'{} = amqp_channel:call(Ch, #'basic.get'{ queue = Q }). +expect_missing_queue(Ch, Q) -> + try + amqp_channel:call(Ch, #'queue.declare'{queue = Q, + passive = true}), + ct:fail(queue_still_exists) + catch exit:{{shutdown, {server_initiated_close, ?NOT_FOUND, _Text}}, _} -> + ok + end. +expect_missing_exchange(Ch, X) -> + try + amqp_channel:call(Ch, #'exchange.declare'{exchange = X, + passive = true}), + ct:fail(exchange_still_exists) + catch exit:{{shutdown, {server_initiated_close, ?NOT_FOUND, _Text}}, _} -> + ok + end. + publish_count(Ch, X, Key, M, Count) -> [begin @@ -791,12 +982,9 @@ shovels_from_parameters() -> [rabbit_misc:pget(name, Shovel) || Shovel <- L]. set_default_credit(Config, Value) -> - {ok, OrigValue} = - rabbit_ct_broker_helpers:rpc( - Config, 0, application, get_env, [rabbit, credit_flow_default_credit]), - ok = - rabbit_ct_broker_helpers:rpc( - Config, 0, application, set_env, [rabbit, credit_flow_default_credit, Value]), + Key = credit_flow_default_credit, + OrigValue = rabbit_ct_broker_helpers:rpc(Config, persistent_term, get, [Key]), + ok = rabbit_ct_broker_helpers:rpc(Config, persistent_term, put, [Key, Value]), OrigValue. set_vm_memory_high_watermark(Config, Limit) -> diff --git a/deps/rabbitmq_shovel/test/parameters_SUITE.erl b/deps/rabbitmq_shovel/test/parameters_SUITE.erl index 837faea7bc3f..522acc20b6b0 100644 --- a/deps/rabbitmq_shovel/test/parameters_SUITE.erl +++ b/deps/rabbitmq_shovel/test/parameters_SUITE.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(parameters_SUITE). @@ -159,7 +159,7 @@ test_parse_amqp091(Params) -> reconnect_delay := 1001, dest := #{module := rabbit_amqp091_shovel, uris := ["amqp://remotehost:5672"], - props_fun := PropsFun + props_fun := {M, F, Args} }, source := #{module := rabbit_amqp091_shovel, uris := ["amqp://localhost:5672"], @@ -170,9 +170,9 @@ test_parse_amqp091(Params) -> #'P_basic'{headers = ActualHeaders, delivery_mode = 2, - cluster_id = <<"x">>} = PropsFun("amqp://localhost:5672", - "amqp://remotehost:5672", - #'P_basic'{headers = undefined}), + cluster_id = <<"x">>} = apply(M, F, Args ++ ["amqp://localhost:5672", + "amqp://remotehost:5672", + #'P_basic'{headers = undefined}]), assert_amqp901_headers(ActualHeaders), ok. @@ -185,7 +185,7 @@ test_parse_amqp091_with_blank_proprties(Params) -> reconnect_delay := 1001, dest := #{module := rabbit_amqp091_shovel, uris := ["amqp://remotehost:5672"], - props_fun := PropsFun + props_fun := {M, F, Args} }, source := #{module := rabbit_amqp091_shovel, uris := ["amqp://localhost:5672"], @@ -194,9 +194,9 @@ test_parse_amqp091_with_blank_proprties(Params) -> delete_after := 'queue-length'} } = Result, - #'P_basic'{headers = ActualHeaders} = PropsFun("amqp://localhost:5672", - "amqp://remotehost:5672", - #'P_basic'{headers = undefined}), + #'P_basic'{headers = ActualHeaders} = apply(M, F, Args ++ ["amqp://localhost:5672", + "amqp://remotehost:5672", + #'P_basic'{headers = undefined}]), assert_amqp901_headers(ActualHeaders), ok. diff --git a/deps/rabbitmq_shovel/test/rolling_upgrade_SUITE.erl b/deps/rabbitmq_shovel/test/rolling_upgrade_SUITE.erl new file mode 100644 index 000000000000..c4051ae3bba6 --- /dev/null +++ b/deps/rabbitmq_shovel/test/rolling_upgrade_SUITE.erl @@ -0,0 +1,268 @@ +%% This Source Code Form is subject to the terms of the Mozilla Public +%% License, v. 2.0. If a copy of the MPL was not distributed with this +%% file, You can obtain one at https://mozilla.org/MPL/2.0/. +%% +%% Copyright (c) 2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. +%% + +-module(rolling_upgrade_SUITE). + +-include_lib("common_test/include/ct.hrl"). +-include_lib("eunit/include/eunit.hrl"). +-include_lib("amqp_client/include/amqp_client.hrl"). +-include_lib("khepri/include/khepri.hrl"). + +-export([suite/0, + all/0, + groups/0, + init_per_suite/1, + end_per_suite/1, + init_per_group/2, + end_per_group/2, + init_per_testcase/2, + end_per_testcase/2, + + child_id_format/1]). + +suite() -> + [{timetrap, {minutes, 5}}]. + +all() -> + [ + {group, mnesia_store}, + {group, khepri_store} + ]. + +groups() -> + [{mnesia_store, [], [child_id_format]}, + {khepri_store, [], [child_id_format]}]. + +%% ------------------------------------------------------------------- +%% Testsuite setup/teardown. +%% ------------------------------------------------------------------- + +init_per_suite(Config) -> + rabbit_ct_helpers:log_environment(), + rabbit_ct_helpers:run_setup_steps(Config). + +end_per_suite(Config) -> + rabbit_ct_helpers:run_teardown_steps(Config). + +init_per_group(mnesia_store, Config) -> + rabbit_ct_helpers:set_config(Config, [{metadata_store__manual, mnesia}]); +init_per_group(khepri_store, Config) -> + rabbit_ct_helpers:set_config(Config, [{metadata_store__manual, khepri}]). + +end_per_group(_, Config) -> + Config. + +init_per_testcase(Testcase, Config) -> + rabbit_ct_helpers:testcase_started(Config, Testcase), + ClusterSize = 4, + TestNumber = rabbit_ct_helpers:testcase_number(Config, ?MODULE, Testcase), + Config1 = rabbit_ct_helpers:set_config( + Config, + [{rmq_nodes_count, ClusterSize}, + {rmq_nodes_clustered, false}, + {rmq_nodename_suffix, Testcase}, + {tcp_ports_base, {skip_n_nodes, TestNumber * ClusterSize}}, + {ignored_crashes, + ["process is stopped by supervisor", + "broker forced connection closure with reason 'shutdown'"]} + ]), + rabbit_ct_helpers:run_steps( + Config1, + rabbit_ct_broker_helpers:setup_steps() ++ + rabbit_ct_client_helpers:setup_steps()). + +end_per_testcase(Testcase, Config) -> + rabbit_ct_helpers:run_steps( + Config, + rabbit_ct_client_helpers:teardown_steps() ++ + rabbit_ct_broker_helpers:teardown_steps()), + rabbit_ct_helpers:testcase_finished(Config, Testcase). + +%% ------------------------------------------------------------------- +%% Testcases. +%% ------------------------------------------------------------------- + +child_id_format(Config) -> + [NewRefNode, + OldNode, + NewNode, + NodeWithQueues] = rabbit_ct_broker_helpers:get_node_configs( + Config, nodename), + + %% We build this test on the assumption that `rabbit_ct_broker_helpers' + %% starts nodes this way: + %% Node 1: the primary copy of RabbitMQ the test is started from + %% Node 2: the secondary umbrella (if any) + %% Node 3: the primary copy + %% Node 4: the secondary umbrella + %% ... + %% + %% Therefore, `Pouet' will use the primary copy, `OldNode' the secondary + %% umbrella, `NewRefNode' the primary copy, and `NodeWithQueues' the + %% secondary umbrella. + + %% Declare source and target queues on a node that won't run the shovel. + ct:pal("Declaring queues on node ~s", [NodeWithQueues]), + SourceQName = <<"source-queue">>, + TargetQName = <<"target-queue">>, + {Conn, Ch} = rabbit_ct_client_helpers:open_connection_and_channel( + Config, NodeWithQueues), + lists:foreach( + fun(QName) -> + ?assertEqual( + {'queue.declare_ok', QName, 0, 0}, + amqp_channel:call( + Ch, #'queue.declare'{queue = QName, durable = true})) + end, [SourceQName, TargetQName]), + rabbit_ct_client_helpers:close_channel(Ch), + rabbit_ct_client_helpers:close_connection(Conn), + + %% Declare a dynamic shovel on the old node. + ct:pal("Declaring queues on node ~s", [OldNode]), + VHost = <<"/">>, + ShovelName = <<"test-shovel">>, + shovel_test_utils:set_param( + Config, + OldNode, + NodeWithQueues, + ShovelName, + [{<<"src-queue">>, SourceQName}, + {<<"dest-queue">>, TargetQName}]), + + %% We declare the same shovel on a new node that won't be clustered with + %% the rest. It is only used as a reference node to determine which ID + %% format the new version is using. + ct:pal("Declaring queues on node ~s (as a reference)", [NewRefNode]), + shovel_test_utils:set_param( + Config, + NewRefNode, + NodeWithQueues, + ShovelName, + [{<<"src-queue">>, SourceQName}, + {<<"dest-queue">>, TargetQName}]), + + %% Verify the format of the child ID. Some versions of RabbitMQ 3.11.x and + %% 3.12.x use a temporary experimental format that was erroneously + %% backported from a work-in-progress happening in the main branch. + ct:pal("Checking mirrored_supervisor child ID formats"), + [{Id0, _, _, _}] = rabbit_ct_broker_helpers:rpc( + Config, NewRefNode, + mirrored_supervisor, which_children, + [rabbit_shovel_dyn_worker_sup_sup]), + PrimaryIdType = case Id0 of + {VHost, ShovelName} -> + ct:pal( + "The nodes from the primary umbrella are using " + "the NORMAL mirrored_supervisor child ID format " + "natively"), + normal; + {[VHost, ShovelName], {VHost, ShovelName}} -> + ct:pal( + "The nodes from the primary umbrella are using " + "the TEMPORARY EXPERIMENTAL mirrored_supervisor " + "child ID format natively"), + temp_exp + end, + + [{Id1, _, _, _}] = rabbit_ct_broker_helpers:rpc( + Config, OldNode, + mirrored_supervisor, which_children, + [rabbit_shovel_dyn_worker_sup_sup]), + SecondaryIdType = case Id1 of + {VHost, ShovelName} -> + ct:pal( + "The nodes from the secondary umbrella are " + "using the NORMAL mirrored_supervisor child " + "ID format natively"), + normal; + {[VHost, ShovelName], {VHost, ShovelName}} -> + ct:pal( + "The nodes from the secondary umbrella are " + "using the TEMPORARY EXPERIMENTAL " + "mirrored_supervisor child ID format " + "natively"), + temp_exp + end, + if + PrimaryIdType =/= SecondaryIdType -> + ct:pal( + "The mirrored_supervisor child ID format is changing between " + "the primary and the secondary umbrellas!"); + true -> + ok + end, + + %% Verify that the supervisors exist on all nodes. + ct:pal( + "Checking running mirrored_supervisor children on old node ~s", + [OldNode]), + lists:foreach( + fun(Node) -> + ?assertMatch( + [{Id, _, _, _}] + when (SecondaryIdType =:= normal andalso + Id =:= {VHost, ShovelName}) orelse + (SecondaryIdType =:= temp_exp andalso + Id =:= {[VHost, ShovelName], {VHost, ShovelName}}), + rabbit_ct_broker_helpers:rpc( + Config, Node, + mirrored_supervisor, which_children, + [rabbit_shovel_dyn_worker_sup_sup])) + end, [OldNode]), + + %% Simulate a rolling upgrade by: + %% 1. adding new nodes to the old cluster + %% 2. stopping the old nodes + %% + %% After that, the supervisors run on the new code. + ct:pal("Clustering nodes ~s and ~s", [OldNode, NewNode]), + Config1 = rabbit_ct_broker_helpers:cluster_nodes( + Config, [OldNode, NewNode]), + ok = rabbit_ct_broker_helpers:stop_broker(Config1, OldNode), + ok = rabbit_ct_broker_helpers:reset_node(Config1, OldNode), + + shovel_test_utils:await_shovel(Config, NewNode, ShovelName), + + case ?config(metadata_store__manual, Config) of + mnesia -> + ok; + khepri -> + ok = rabbit_ct_broker_helpers:enable_feature_flag( + Config, [NewNode], khepri_db) + end, + + %% Verify that the supervisors still use the same IDs. + ct:pal( + "Checking running mirrored_supervisor children on new node ~s", + [NewNode]), + lists:foreach( + fun(Node) -> + ?assertMatch( + [{Id, _, _, _}] + when (SecondaryIdType =:= normal andalso + Id =:= {VHost, ShovelName}) orelse + (SecondaryIdType =:= temp_exp andalso + Id =:= {[VHost, ShovelName], {VHost, ShovelName}}), + rabbit_ct_broker_helpers:rpc( + Config1, Node, + mirrored_supervisor, which_children, + [rabbit_shovel_dyn_worker_sup_sup])) + end, [NewNode]), + + case ?config(metadata_store__manual, Config) of + mnesia -> + ok; + khepri -> + Path = rabbit_db_msup:khepri_mirrored_supervisor_path(), + ?assertMatch( + {ok, + #{[rabbit_db_msup, mirrored_supervisor_childspec, + rabbit_shovel_dyn_worker_sup_sup, VHost, ShovelName] := _}}, + rabbit_ct_broker_helpers:rpc( + Config, NewNode, rabbit_khepri, list, + [Path ++ [?KHEPRI_WILDCARD_STAR_STAR]])) + end. diff --git a/deps/rabbitmq_shovel/test/shovel_status_command_SUITE.erl b/deps/rabbitmq_shovel/test/shovel_status_command_SUITE.erl index 8b80d8013b6e..8c8d015c3d3b 100644 --- a/deps/rabbitmq_shovel/test/shovel_status_command_SUITE.erl +++ b/deps/rabbitmq_shovel/test/shovel_status_command_SUITE.erl @@ -2,12 +2,11 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(shovel_status_command_SUITE). --include_lib("common_test/include/ct.hrl"). -include_lib("amqp_client/include/amqp_client.hrl"). -compile(export_all). diff --git a/deps/rabbitmq_shovel/test/shovel_test_utils.erl b/deps/rabbitmq_shovel/test/shovel_test_utils.erl index 896fc60bf5e7..f5a9947b300b 100644 --- a/deps/rabbitmq_shovel/test/shovel_test_utils.erl +++ b/deps/rabbitmq_shovel/test/shovel_test_utils.erl @@ -2,47 +2,77 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(shovel_test_utils). -include_lib("common_test/include/ct.hrl"). --export([set_param/3, set_param_nowait/3, await_shovel/2, await_shovel1/2, - shovels_from_status/0, get_shovel_status/2, - await/1, await/2, clear_param/2]). +-export([set_param/3, set_param/4, set_param/5, set_param_nowait/3, + await_shovel/2, await_shovel/3, await_shovel/4, await_shovel1/3, + shovels_from_status/0, shovels_from_status/1, + get_shovel_status/2, get_shovel_status/3, + restart_shovel/2, + await/1, await/2, clear_param/2, clear_param/3, make_uri/2]). -make_uri(Config) -> +make_uri(Config, Node) -> Hostname = ?config(rmq_hostname, Config), - Port = rabbit_ct_broker_helpers:get_node_config(Config, 0, tcp_port_amqp), + Port = rabbit_ct_broker_helpers:get_node_config(Config, Node, tcp_port_amqp), list_to_binary(lists:flatten(io_lib:format("amqp://~ts:~b", [Hostname, Port]))). + set_param(Config, Name, Value) -> - set_param_nowait(Config, Name, Value), - await_shovel(Config, Name). + set_param_nowait(Config, 0, 0, Name, Value), + await_shovel(Config, 0, Name). + +set_param(Config, Node, Name, Value) -> + set_param(Config, Node, Node, Name, Value). + +set_param(Config, Node, QueueNode, Name, Value) -> + set_param_nowait(Config, Node, QueueNode, Name, Value), + await_shovel(Config, Node, Name). set_param_nowait(Config, Name, Value) -> - Uri = make_uri(Config), - ok = rabbit_ct_broker_helpers:rpc(Config, 0, + set_param_nowait(Config, 0, 0, Name, Value). + +set_param_nowait(Config, Node, QueueNode, Name, Value) -> + Uri = make_uri(Config, QueueNode), + ok = rabbit_ct_broker_helpers:rpc(Config, Node, rabbit_runtime_parameters, set, [ <<"/">>, <<"shovel">>, Name, [{<<"src-uri">>, Uri}, {<<"dest-uri">>, [Uri]} | Value], none]). await_shovel(Config, Name) -> - rabbit_ct_broker_helpers:rpc(Config, 0, - ?MODULE, await_shovel1, [Config, Name]). + await_shovel(Config, 0, Name). + +await_shovel(Config, Node, Name) -> + await_shovel(Config, Node, Name, running). + +await_shovel(Config, Node, Name, ExpectedState) -> + rabbit_ct_broker_helpers:rpc(Config, Node, + ?MODULE, await_shovel1, [Config, Name, ExpectedState]). -await_shovel1(_Config, Name) -> - await(fun () -> lists:member(Name, shovels_from_status()) end). +await_shovel1(_Config, Name, ExpectedState) -> + Ret = await(fun() -> + Status = shovels_from_status(ExpectedState), + lists:member(Name, Status) + end, 30_000), + Ret. -shovels_from_status() -> +shovels_from_status() -> + shovels_from_status(running). + +shovels_from_status(ExpectedState) -> S = rabbit_shovel_status:status(), - [N || {{<<"/">>, N}, dynamic, {running, _}, _} <- S]. + [N || {{<<"/">>, N}, dynamic, {State, _}, _} <- S, State == ExpectedState]. get_shovel_status(Config, Name) -> + get_shovel_status(Config, 0, Name). + +get_shovel_status(Config, Node, Name) -> S = rabbit_ct_broker_helpers:rpc( - Config, 0, rabbit_shovel_status, lookup, [{<<"/">>, Name}]), + Config, Node, rabbit_shovel_status, lookup, [{<<"/">>, Name}]), case S of not_found -> not_found; @@ -70,5 +100,15 @@ await(Pred, Timeout) -> end. clear_param(Config, Name) -> - rabbit_ct_broker_helpers:rpc(Config, 0, + clear_param(Config, 0, Name). + +clear_param(Config, Node, Name) -> + rabbit_ct_broker_helpers:rpc(Config, Node, rabbit_runtime_parameters, clear, [<<"/">>, <<"shovel">>, Name, <<"acting-user">>]). + +restart_shovel(Config, Name) -> + restart_shovel(Config, 0, Name). + +restart_shovel(Config, Node, Name) -> + rabbit_ct_broker_helpers:rpc(Config, + Node, rabbit_shovel_util, restart_shovel, [<<"/">>, Name]). \ No newline at end of file diff --git a/deps/rabbitmq_shovel_management/.gitignore b/deps/rabbitmq_shovel_management/.gitignore deleted file mode 100644 index e909ef368916..000000000000 --- a/deps/rabbitmq_shovel_management/.gitignore +++ /dev/null @@ -1,17 +0,0 @@ -.sw? -.*.sw? -*.beam -/.erlang.mk/ -/cover/ -/deps/ -/doc/ -/ebin/ -/escript/ -/escript.lock -/logs/ -/plugins/ -/plugins.lock -/sbin/ -/sbin.lock - -/rabbitmq_shovel_management.d diff --git a/deps/rabbitmq_shovel_management/BUILD.bazel b/deps/rabbitmq_shovel_management/BUILD.bazel index dafb102efb5e..f92f0c86deef 100644 --- a/deps/rabbitmq_shovel_management/BUILD.bazel +++ b/deps/rabbitmq_shovel_management/BUILD.bazel @@ -65,7 +65,7 @@ plt( name = "deps_plt", for_target = ":erlang_app", ignore_warnings = True, - libs = ["//deps/rabbitmq_cli:elixir"], # keep + libs = ["@rules_elixir//elixir"], # keep plt = "//:base_plt", ) @@ -85,7 +85,6 @@ rabbitmq_home( name = "broker-for-tests-home", plugins = [ "//deps/rabbit:erlang_app", - "//deps/rabbitmq_amqp1_0:erlang_app", ":erlang_app", ], ) @@ -100,16 +99,7 @@ rabbitmq_integration_suite( ) rabbitmq_suite( - name = "rabbit_shovel_mgmt_SUITE", - deps = [ - "//deps/rabbit_common:erlang_app", - "//deps/rabbitmq_management_agent:erlang_app", - "@meck//:erlang_app", - ], -) - -rabbitmq_suite( - name = "rabbit_shovel_mgmt_util_SUITE", + name = "unit_SUITE", deps = [ "//deps/rabbit_common:erlang_app", "//deps/rabbitmq_shovel:erlang_app", diff --git a/deps/rabbitmq_shovel_management/Makefile b/deps/rabbitmq_shovel_management/Makefile index 8fc599c1e45a..1b5f98d02936 100644 --- a/deps/rabbitmq_shovel_management/Makefile +++ b/deps/rabbitmq_shovel_management/Makefile @@ -6,7 +6,7 @@ define PROJECT_APP_EXTRA_KEYS endef DEPS = rabbit_common rabbit rabbitmq_management rabbitmq_shovel -TEST_DEPS = rabbitmq_ct_helpers rabbitmq_ct_client_helpers rabbitmq_amqp1_0 meck +TEST_DEPS = rabbitmq_ct_helpers rabbitmq_ct_client_helpers meck DEP_EARLY_PLUGINS = rabbit_common/mk/rabbitmq-early-plugin.mk DEP_PLUGINS = rabbit_common/mk/rabbitmq-plugin.mk diff --git a/deps/rabbitmq_shovel_management/README.md b/deps/rabbitmq_shovel_management/README.md index 8e2bcfc8a433..25ab90cc4582 100644 --- a/deps/rabbitmq_shovel_management/README.md +++ b/deps/rabbitmq_shovel_management/README.md @@ -73,7 +73,7 @@ curl -u guest:guest -v -X PUT -H 'Content-Type: application/json' -d @./shovel.j #### `GET /api/parameters/shovel/{vhost}/{name}` Shows the configurtion parameters for a shovel. -**Example** +**Example** ```bash curl -u guest:guest -v http://localhost:15672/api/parameters/shovel/%2F/my-shovel @@ -83,7 +83,7 @@ curl -u guest:guest -v http://localhost:15672/api/parameters/shovel/%2F/my-shove Deletes a shovel. -**Example** +**Example** ```bash curl -u guest:guest -v -X DELETE http://localhost:15672/api/parameters/shovel/%2F/my-shovel @@ -93,4 +93,4 @@ curl -u guest:guest -v -X DELETE http://localhost:15672/api/parameters/shovel/%2 Released under [the same license as RabbitMQ](https://www.rabbitmq.com/mpl.html). -2007-2018 (c) 2007-2020 VMware, Inc. or its affiliates. +2007-2018 (c) 2007-2024 Broadcom. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. diff --git a/deps/rabbitmq_shovel_management/app.bzl b/deps/rabbitmq_shovel_management/app.bzl index 0e7a6169cbd4..3c338cf4f318 100644 --- a/deps/rabbitmq_shovel_management/app.bzl +++ b/deps/rabbitmq_shovel_management/app.bzl @@ -9,7 +9,8 @@ def all_beam_files(name = "all_beam_files"): erlang_bytecode( name = "other_beam", srcs = [ - "src/rabbit_shovel_mgmt.erl", + "src/rabbit_shovel_mgmt_shovel.erl", + "src/rabbit_shovel_mgmt_shovels.erl", "src/rabbit_shovel_mgmt_util.erl", ], hdrs = [":public_and_private_hdrs"], @@ -33,7 +34,8 @@ def all_test_beam_files(name = "all_test_beam_files"): name = "test_other_beam", testonly = True, srcs = [ - "src/rabbit_shovel_mgmt.erl", + "src/rabbit_shovel_mgmt_shovel.erl", + "src/rabbit_shovel_mgmt_shovels.erl", "src/rabbit_shovel_mgmt_util.erl", ], hdrs = [":public_and_private_hdrs"], @@ -72,7 +74,8 @@ def all_srcs(name = "all_srcs"): filegroup( name = "srcs", srcs = [ - "src/rabbit_shovel_mgmt.erl", + "src/rabbit_shovel_mgmt_shovel.erl", + "src/rabbit_shovel_mgmt_shovels.erl", "src/rabbit_shovel_mgmt_util.erl", ], ) @@ -99,19 +102,10 @@ def test_suite_beam_files(name = "test_suite_beam_files"): deps = ["//deps/rabbit_common:erlang_app", "//deps/rabbitmq_ct_helpers:erlang_app"], ) erlang_bytecode( - name = "rabbit_shovel_mgmt_SUITE_beam_files", + name = "unit_SUITE_beam_files", testonly = True, - srcs = ["test/rabbit_shovel_mgmt_SUITE.erl"], - outs = ["test/rabbit_shovel_mgmt_SUITE.beam"], - app_name = "rabbitmq_shovel_management", - erlc_opts = "//:test_erlc_opts", - deps = ["//deps/rabbit_common:erlang_app", "//deps/rabbitmq_management_agent:erlang_app"], - ) - erlang_bytecode( - name = "rabbit_shovel_mgmt_util_SUITE_beam_files", - testonly = True, - srcs = ["test/rabbit_shovel_mgmt_util_SUITE.erl"], - outs = ["test/rabbit_shovel_mgmt_util_SUITE.beam"], + srcs = ["test/unit_SUITE.erl"], + outs = ["test/unit_SUITE.beam"], app_name = "rabbitmq_shovel_management", erlc_opts = "//:test_erlc_opts", ) diff --git a/deps/rabbitmq_shovel_management/priv/www/js/tmpl/dynamic-shovels.ejs b/deps/rabbitmq_shovel_management/priv/www/js/tmpl/dynamic-shovels.ejs index 837674f062d5..979bd420bf6f 100644 --- a/deps/rabbitmq_shovel_management/priv/www/js/tmpl/dynamic-shovels.ejs +++ b/deps/rabbitmq_shovel_management/priv/www/js/tmpl/dynamic-shovels.ejs @@ -357,7 +357,7 @@ connect to server-name, with credentials and SSL
    7. - amqps://server-name?cacertfile=/path/to/cacert.pem&certfile=/path/to/cert.pem&keyfile=/path/to/key.pem&verify=verify_peer&fail_if_no_peer_cert=true&auth_mechanism=external
      + amqps://server-name?cacertfile=/path/to/cacert.pem&certfile=/path/to/cert.pem&keyfile=/path/to/key.pem&verify=verify_peer&auth_mechanism=external
      connect to server-name, with SSL and EXTERNAL authentication
    8. diff --git a/deps/rabbitmq_shovel_management/src/rabbit_shovel_mgmt.hrl b/deps/rabbitmq_shovel_management/src/rabbit_shovel_mgmt.hrl index de13c5a61631..10021a277878 100644 --- a/deps/rabbitmq_shovel_management/src/rabbit_shovel_mgmt.hrl +++ b/deps/rabbitmq_shovel_management/src/rabbit_shovel_mgmt.hrl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% --define(SHOVEL_CALLS_TIMEOUT_MS, 25000). \ No newline at end of file +-define(SHOVEL_CALLS_TIMEOUT_MS, 25000). diff --git a/deps/rabbitmq_shovel_management/src/rabbit_shovel_mgmt.erl b/deps/rabbitmq_shovel_management/src/rabbit_shovel_mgmt_shovel.erl similarity index 64% rename from deps/rabbitmq_shovel_management/src/rabbit_shovel_mgmt.erl rename to deps/rabbitmq_shovel_management/src/rabbit_shovel_mgmt_shovel.erl index e270baa45800..929743702918 100644 --- a/deps/rabbitmq_shovel_management/src/rabbit_shovel_mgmt.erl +++ b/deps/rabbitmq_shovel_management/src/rabbit_shovel_mgmt_shovel.erl @@ -2,10 +2,10 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% --module(rabbit_shovel_mgmt). +-module(rabbit_shovel_mgmt_shovel). -behaviour(rabbit_mgmt_extension). @@ -19,9 +19,9 @@ -include_lib("amqp_client/include/amqp_client.hrl"). -include("rabbit_shovel_mgmt.hrl"). -dispatcher() -> [{"/shovels", ?MODULE, []}, - {"/shovels/:vhost", ?MODULE, []}, - {"/shovels/vhost/:vhost/:name", ?MODULE, []}, +-define(COMPONENT, <<"shovel">>). + +dispatcher() -> [{"/shovels/vhost/:vhost/:name", ?MODULE, []}, {"/shovels/vhost/:vhost/:name/restart", ?MODULE, []}]. web_ui() -> [{javascript, <<"shovel.js">>}]. @@ -42,15 +42,21 @@ resource_exists(ReqData, Context) -> not_found -> false; VHost -> - case rabbit_mgmt_util:id(name, ReqData) of + case name(ReqData) of none -> true; Name -> %% Deleting or restarting a shovel case get_shovel_node(VHost, Name, ReqData, Context) of undefined -> - rabbit_log:error("Shovel with the name '~ts' was not found on virtual host '~ts'", + rabbit_log:error("Shovel with the name '~ts' was not found on virtual host '~ts'. " + "It may be failing to connect and report its status.", [Name, VHost]), - false; + case is_restart(ReqData) of + true -> false; + %% this is a deletion attempt, it can continue and idempotently try to + %% delete the shovel + false -> true + end; _ -> true end @@ -59,8 +65,9 @@ resource_exists(ReqData, Context) -> {Reply, ReqData, Context}. to_json(ReqData, Context) -> - rabbit_mgmt_util:reply_list( - filter_vhost_req(rabbit_shovel_mgmt_util:status(ReqData, Context), ReqData), ReqData, Context). + Shovel = parameter(ReqData), + rabbit_mgmt_util:reply(rabbit_mgmt_format:parameter(Shovel), + ReqData, Context). is_authorized(ReqData, Context) -> rabbit_mgmt_util:is_authorized_monitor(ReqData, Context). @@ -73,9 +80,17 @@ delete_resource(ReqData, #context{user = #user{username = Username}}=Context) -> Name -> case get_shovel_node(VHost, Name, ReqData, Context) of undefined -> rabbit_log:error("Could not find shovel data for shovel '~ts' in vhost: '~ts'", [Name, VHost]), - false; + case is_restart(ReqData) of + true -> + false; + %% this is a deletion attempt + false -> + %% if we do not know the node, use the local one + try_delete(node(), VHost, Name, Username), + true + end; Node -> - %% We must distinguish between a delete and restart + %% We must distinguish between a delete and a restart case is_restart(ReqData) of true -> rabbit_log:info("Asked to restart shovel '~ts' in vhost '~ts' on node '~s'", [Name, VHost, Node]), @@ -91,17 +106,8 @@ delete_resource(ReqData, #context{user = #user{username = Username}}=Context) -> end; _ -> - rabbit_log:info("Asked to delete shovel '~ts' in vhost '~ts' on node '~s'", [Name, VHost, Node]), - try erpc:call(Node, rabbit_shovel_util, delete_shovel, [VHost, Name, Username], ?SHOVEL_CALLS_TIMEOUT_MS) of - ok -> true; - {error, not_found} -> - rabbit_log:error("Could not find shovel data for shovel '~s' in vhost: '~s'", [Name, VHost]), - false - catch _:Reason -> - rabbit_log:error("Failed to delete shovel '~s' on vhost '~s', reason: ~p", - [Name, VHost, Reason]), - false - end + try_delete(Node, VHost, Name, Username), + true end end @@ -110,6 +116,19 @@ delete_resource(ReqData, #context{user = #user{username = Username}}=Context) -> %%-------------------------------------------------------------------- +name(ReqData) -> rabbit_mgmt_util:id(name, ReqData). + +parameter(ReqData) -> + VHostName = rabbit_mgmt_util:vhost(ReqData), + Name = name(ReqData), + if + VHostName =/= not_found andalso + Name =/= none -> + rabbit_runtime_parameters:lookup(VHostName, ?COMPONENT, Name); + true -> + not_found + end. + is_restart(ReqData) -> Path = cowboy_req:path(ReqData), case string:find(Path, "/restart", trailing) of @@ -117,13 +136,6 @@ is_restart(ReqData) -> _ -> true end. -filter_vhost_req(List, ReqData) -> - case rabbit_mgmt_util:vhost(ReqData) of - none -> List; - VHost -> [I || I <- List, - pget(vhost, I) =:= VHost] - end. - get_shovel_node(VHost, Name, ReqData, Context) -> Shovels = rabbit_shovel_mgmt_util:status(ReqData, Context), Match = find_matching_shovel(VHost, Name, Shovels), @@ -150,3 +162,18 @@ find_matching_shovel(VHost, Name, Shovels) -> _ -> undefined end. + +-spec try_delete(node(), vhost:name(), any(), rabbit_types:username()) -> boolean(). +try_delete(Node, VHost, Name, Username) -> + rabbit_log:info("Asked to delete shovel '~ts' in vhost '~ts' on node '~s'", [Name, VHost, Node]), + %% this will clear the runtime parameter, the ultimate way of deleting a dynamic Shovel eventually. MK. + try erpc:call(Node, rabbit_shovel_util, delete_shovel, [VHost, Name, Username], ?SHOVEL_CALLS_TIMEOUT_MS) of + ok -> true; + {error, not_found} -> + rabbit_log:error("Could not find shovel data for shovel '~s' in vhost: '~s'", [Name, VHost]), + false + catch _:Reason -> + rabbit_log:error("Failed to delete shovel '~s' on vhost '~s', reason: ~p", + [Name, VHost, Reason]), + false + end. diff --git a/deps/rabbitmq_shovel_management/src/rabbit_shovel_mgmt_shovels.erl b/deps/rabbitmq_shovel_management/src/rabbit_shovel_mgmt_shovels.erl new file mode 100644 index 000000000000..ca5a5f528556 --- /dev/null +++ b/deps/rabbitmq_shovel_management/src/rabbit_shovel_mgmt_shovels.erl @@ -0,0 +1,57 @@ +%% This Source Code Form is subject to the terms of the Mozilla Public +%% License, v. 2.0. If a copy of the MPL was not distributed with this +%% file, You can obtain one at https://mozilla.org/MPL/2.0/. +%% +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. +%% + +-module(rabbit_shovel_mgmt_shovels). + +-behaviour(rabbit_mgmt_extension). + +-export([dispatcher/0, web_ui/0]). +-export([init/2, to_json/2, resource_exists/2, content_types_provided/2, + is_authorized/2, allowed_methods/2]). + +-import(rabbit_misc, [pget/2]). + +-include_lib("rabbitmq_management_agent/include/rabbit_mgmt_records.hrl"). +-include_lib("amqp_client/include/amqp_client.hrl"). +-include("rabbit_shovel_mgmt.hrl"). + +dispatcher() -> [{"/shovels", ?MODULE, []}, + {"/shovels/:vhost", ?MODULE, []}]. + +web_ui() -> [{javascript, <<"shovel.js">>}]. + +%%-------------------------------------------------------------------- + +init(Req, _Opts) -> + {cowboy_rest, rabbit_mgmt_cors:set_headers(Req, ?MODULE), #context{}}. + +content_types_provided(ReqData, Context) -> + {[{<<"application/json">>, to_json}], ReqData, Context}. + +allowed_methods(ReqData, Context) -> + {[<<"HEAD">>, <<"GET">>, <<"OPTIONS">>], ReqData, Context}. + +resource_exists(ReqData, Context) -> + Reply = case rabbit_mgmt_util:vhost(ReqData) of + not_found -> false; + _Found -> true + end, + {Reply, ReqData, Context}. + +to_json(ReqData, Context) -> + rabbit_mgmt_util:reply_list( + filter_vhost_req(rabbit_shovel_mgmt_util:status(ReqData, Context), ReqData), ReqData, Context). + +is_authorized(ReqData, Context) -> + rabbit_mgmt_util:is_authorized_monitor(ReqData, Context). + +filter_vhost_req(List, ReqData) -> + case rabbit_mgmt_util:vhost(ReqData) of + none -> List; + VHost -> [I || I <- List, + pget(vhost, I) =:= VHost] + end. diff --git a/deps/rabbitmq_shovel_management/src/rabbit_shovel_mgmt_util.erl b/deps/rabbitmq_shovel_management/src/rabbit_shovel_mgmt_util.erl index 705c2e88bc79..0cbfdd1d553a 100644 --- a/deps/rabbitmq_shovel_management/src/rabbit_shovel_mgmt_util.erl +++ b/deps/rabbitmq_shovel_management/src/rabbit_shovel_mgmt_util.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_shovel_mgmt_util). diff --git a/deps/rabbitmq_shovel_management/test/http_SUITE.erl b/deps/rabbitmq_shovel_management/test/http_SUITE.erl index b007bbdef44b..d4e93c91ebf9 100644 --- a/deps/rabbitmq_shovel_management/test/http_SUITE.erl +++ b/deps/rabbitmq_shovel_management/test/http_SUITE.erl @@ -2,69 +2,101 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(http_SUITE). -include_lib("common_test/include/ct.hrl"). +-include_lib("eunit/include/eunit.hrl"). -include_lib("rabbit_common/include/rabbit_framing.hrl"). -include_lib("rabbitmq_ct_helpers/include/rabbit_mgmt_test.hrl"). +-import(rabbit_mgmt_test_util, [http_get/3, http_get/5, http_put/4, http_post/4, http_delete/3, http_delete/4, http_get_fails/2]). +-import(rabbit_ct_helpers, [await_condition/2]). + -compile(export_all). all() -> [ - {group, non_parallel_tests} + {group, dynamic_shovels}, + {group, static_shovels}, + {group, plugin_management} ]. groups() -> [ - {non_parallel_tests, [], [ - amqp10_shovels, - shovels, - dynamic_plugin_enable_disable - ]} + {dynamic_shovels, [], [ + start_and_list_a_dynamic_amqp10_shovel, + start_and_get_a_dynamic_amqp10_shovel, + start_and_get_a_dynamic_amqp091_shovel_with_publish_properties, + start_and_get_a_dynamic_amqp091_shovel_with_missing_publish_properties, + start_and_get_a_dynamic_amqp091_shovel_with_empty_publish_properties, + create_and_delete_a_dynamic_shovel_that_successfully_connects, + create_and_delete_a_dynamic_shovel_that_fails_to_connect + ]}, + + {static_shovels, [], [ + start_static_shovels + ]}, + + {plugin_management, [], [ + dynamic_plugin_enable_disable + ]} ]. %% ------------------------------------------------------------------- %% Testsuite setup/teardown. %% ------------------------------------------------------------------- -init_per_suite(Config) -> +init_per_group(static_shovels, Config) -> rabbit_ct_helpers:log_environment(), Config1 = rabbit_ct_helpers:set_config(Config, [ {rmq_nodename_suffix, ?MODULE} - ]), + ]), rabbit_ct_helpers:run_setup_steps(Config1, [ fun configure_shovels/1, fun start_inets/1 + ] ++ rabbit_ct_broker_helpers:setup_steps() ++ + rabbit_ct_client_helpers:setup_steps()); +init_per_group(_Group, Config) -> + rabbit_ct_helpers:log_environment(), + Config1 = rabbit_ct_helpers:set_config(Config, [ + {rmq_nodename_suffix, ?MODULE} + ]), + rabbit_ct_helpers:run_setup_steps(Config1, [ + fun start_inets/1 ] ++ rabbit_ct_broker_helpers:setup_steps() ++ rabbit_ct_client_helpers:setup_steps()). -end_per_suite(Config) -> - rabbit_ct_helpers:run_teardown_steps(Config, - rabbit_ct_client_helpers:teardown_steps() ++ - rabbit_ct_broker_helpers:teardown_steps()). +end_per_group(start_static_shovels, Config) -> + http_delete(Config, "/vhosts/v", ?NO_CONTENT), + http_delete(Config, "/users/admin", ?NO_CONTENT), + http_delete(Config, "/users/mon", ?NO_CONTENT), -init_per_group(_, Config) -> - Config. + remove_all_dynamic_shovels(Config, <<"/">>), + rabbit_ct_helpers:run_teardown_steps(Config, + rabbit_ct_client_helpers:teardown_steps() ++ + rabbit_ct_broker_helpers:teardown_steps()); end_per_group(_, Config) -> - Config. + remove_all_dynamic_shovels(Config, <<"/">>), + rabbit_ct_helpers:run_teardown_steps(Config, + rabbit_ct_client_helpers:teardown_steps() ++ + rabbit_ct_broker_helpers:teardown_steps()). + +init_per_testcase(create_and_delete_a_dynamic_shovel_that_fails_to_connect = Testcase, Config) -> + case rabbit_ct_helpers:is_mixed_versions() of + true -> + {skip, "not mixed versions compatible"}; + _ -> + rabbit_ct_helpers:testcase_started(Config, Testcase) + end; init_per_testcase(Testcase, Config) -> rabbit_ct_helpers:testcase_started(Config, Testcase). -end_per_testcase(amqp10_shovels = Testcase, Config) -> - http_delete(Config, "/parameters/shovel/%2f/my-dynamic-amqp10", ?NO_CONTENT), - rabbit_ct_helpers:testcase_finished(Config, Testcase); -end_per_testcase(shovels = Testcase, Config) -> - http_delete(Config, "/vhosts/v", ?NO_CONTENT), - http_delete(Config, "/users/admin", ?NO_CONTENT), - http_delete(Config, "/users/mon", ?NO_CONTENT), - rabbit_ct_helpers:testcase_finished(Config, Testcase); end_per_testcase(Testcase, Config) -> rabbit_ct_helpers:testcase_finished(Config, Testcase). @@ -89,49 +121,43 @@ configure_shovels(Config) -> ]}). start_inets(Config) -> - ok = application:start(inets), + _ = application:start(inets), Config. %% ------------------------------------------------------------------- -%% Testcases. +%% Testcases %% ------------------------------------------------------------------- -amqp10_shovels(Config) -> - Port = integer_to_binary( - rabbit_ct_broker_helpers:get_node_config(Config, 0, tcp_port_amqp)), - http_put(Config, "/parameters/shovel/%2f/my-dynamic-amqp10", - #{value => #{'src-protocol' => <<"amqp10">>, - 'src-uri' => <<"amqp://localhost:", Port/binary>>, - 'src-address' => <<"test">>, - 'dest-protocol' => <<"amqp10">>, - 'dest-uri' => <<"amqp://localhost:", Port/binary>>, - 'dest-address' => <<"test2">>, - 'dest-properties' => #{}, - 'dest-application-properties' => #{}, - 'dest-message-annotations' => #{}}}, ?CREATED), - % sleep to give the shovel time to emit a full report - % that includes the protocols used. - wait_until(fun () -> - case lists:sort(fun(#{name := AName}, #{name := BName}) -> - AName < BName - end, - http_get(Config, "/shovels", "guest", "guest", ?OK)) - of - [#{name := <<"my-dynamic-amqp10">>, - src_protocol := <<"amqp10">>, - dest_protocol := <<"amqp10">>, - type := <<"dynamic">>}, - #{name := <<"my-static">>, - src_protocol := <<"amqp091">>, - dest_protocol := <<"amqp091">>, - type := <<"static">>}] -> - true; - _ -> - false - end - end, 20), +start_and_list_a_dynamic_amqp10_shovel(Config) -> + remove_all_dynamic_shovels(Config, <<"/">>), + Name = rabbit_data_coercion:to_binary(?FUNCTION_NAME), + ID = {<<"/">>, Name}, + await_shovel_removed(Config, ID), + + declare_shovel(Config, Name), + await_shovel_startup(Config, ID), + Shovels = list_shovels(Config), + ?assert(lists:any( + fun(M) -> + maps:get(name, M) =:= Name + end, Shovels)), + delete_shovel(Config, <<"dynamic-amqp10-await-startup-1">>), + ok. +start_and_get_a_dynamic_amqp10_shovel(Config) -> + remove_all_dynamic_shovels(Config, <<"/">>), + Name = rabbit_data_coercion:to_binary(?FUNCTION_NAME), + ID = {<<"/">>, Name}, + await_shovel_removed(Config, ID), + + declare_shovel(Config, Name), + await_shovel_startup(Config, ID), + Sh = get_shovel(Config, Name), + ?assertEqual(Name, maps:get(name, Sh)), + delete_shovel(Config, Name), + + ok. -define(StaticPattern, #{name := <<"my-static">>, type := <<"static">>}). @@ -144,7 +170,49 @@ amqp10_shovels(Config) -> vhost := <<"v">>, type := <<"dynamic">>}). -shovels(Config) -> +start_and_get_a_dynamic_amqp091_shovel_with_publish_properties(Config) -> + remove_all_dynamic_shovels(Config, <<"/">>), + Name = rabbit_data_coercion:to_binary(?FUNCTION_NAME), + ID = {<<"/">>, Name}, + await_shovel_removed(Config, ID), + + declare_amqp091_shovel_with_publish_properties(Config, Name), + await_shovel_startup(Config, ID), + Sh = get_shovel(Config, Name), + ?assertEqual(Name, maps:get(name, Sh)), + delete_shovel(Config, Name), + + ok. + +start_and_get_a_dynamic_amqp091_shovel_with_missing_publish_properties(Config) -> + remove_all_dynamic_shovels(Config, <<"/">>), + Name = rabbit_data_coercion:to_binary(?FUNCTION_NAME), + ID = {<<"/">>, Name}, + await_shovel_removed(Config, ID), + + declare_amqp091_shovel(Config, Name), + await_shovel_startup(Config, ID), + Sh = get_shovel(Config, Name), + ?assertEqual(Name, maps:get(name, Sh)), + delete_shovel(Config, Name), + + ok. + +start_and_get_a_dynamic_amqp091_shovel_with_empty_publish_properties(Config) -> + remove_all_dynamic_shovels(Config, <<"/">>), + Name = rabbit_data_coercion:to_binary(?FUNCTION_NAME), + ID = {<<"/">>, Name}, + await_shovel_removed(Config, ID), + + declare_amqp091_shovel_with_publish_properties(Config, Name, #{}), + await_shovel_startup(Config, ID), + Sh = get_shovel(Config, Name), + ?assertEqual(Name, maps:get(name, Sh)), + delete_shovel(Config, Name), + + ok. + +start_static_shovels(Config) -> http_put(Config, "/users/admin", #{password => <<"admin">>, tags => <<"administrator">>}, ?CREATED), http_put(Config, "/users/mon", @@ -209,9 +277,56 @@ shovels(Config) -> http_get(Config, "/shovels/v", "mon", "mon", ?OK)), ok. -%% It's a bit arbitrary to be testing this here, but we want to be -%% able to test that mgmt extensions can be started and stopped -%% *somewhere*, and here is as good a place as any. +create_and_delete_a_dynamic_shovel_that_successfully_connects(Config) -> + Port = integer_to_binary( + rabbit_ct_broker_helpers:get_node_config(Config, 0, tcp_port_amqp)), + + remove_all_dynamic_shovels(Config, <<"/">>), + Name = <<"dynamic-amqp10-to-delete-1">>, + ID = {<<"/">>, Name}, + await_shovel_removed(Config, ID), + + http_put(Config, "/parameters/shovel/%2f/dynamic-amqp10-to-delete-1", + #{value => #{'src-protocol' => <<"amqp10">>, + 'src-uri' => <<"amqp://localhost:", Port/binary>>, + 'src-address' => <<"test">>, + 'dest-protocol' => <<"amqp10">>, + 'dest-uri' => <<"amqp://localhost:", Port/binary>>, + 'dest-address' => <<"test2">>, + 'dest-properties' => #{}, + 'dest-application-properties' => #{}, + 'dest-message-annotations' => #{}}}, ?CREATED), + + await_shovel_startup(Config, ID), + timer:sleep(3_000), + delete_shovel(Config, Name), + await_shovel_removed(Config, ID). + +create_and_delete_a_dynamic_shovel_that_fails_to_connect(Config) -> + Port = integer_to_binary( + rabbit_ct_broker_helpers:get_node_config(Config, 0, tcp_port_amqp)), + + remove_all_dynamic_shovels(Config, <<"/">>), + Name = <<"dynamic-amqp10-to-delete-2">>, + ID = {<<"/">>, Name}, + await_shovel_removed(Config, ID), + + http_put(Config, "/parameters/shovel/%2f/dynamic-amqp10-to-delete-2", + #{value => #{'src-protocol' => <<"amqp10">>, + 'src-uri' => <<"amqp://non-existing-hostname.lolz.wut:", Port/binary>>, + 'src-address' => <<"test">>, + 'dest-protocol' => <<"amqp10">>, + 'dest-uri' => <<"amqp://non-existing-hostname.lolz.wut:", Port/binary>>, + 'dest-address' => <<"test2">>, + 'dest-properties' => #{}, + 'dest-application-properties' => #{}, + 'dest-message-annotations' => #{}}}, ?CREATED), + + await_shovel_startup(Config, ID), + timer:sleep(3_000), + delete_shovel(Config, Name), + await_shovel_removed(Config, ID). + dynamic_plugin_enable_disable(Config) -> http_get(Config, "/shovels", ?OK), rabbit_ct_broker_helpers:disable_plugin(Config, 0, @@ -220,8 +335,8 @@ dynamic_plugin_enable_disable(Config) -> http_get(Config, "/overview", ?OK), rabbit_ct_broker_helpers:disable_plugin(Config, 0, "rabbitmq_management"), - http_fail(Config, "/shovels"), - http_fail(Config, "/overview"), + http_get_fails(Config, "/shovels"), + http_get_fails(Config, "/overview"), rabbit_ct_broker_helpers:enable_plugin(Config, 0, "rabbitmq_management"), http_get(Config, "/shovels", ?NOT_FOUND), @@ -232,97 +347,9 @@ dynamic_plugin_enable_disable(Config) -> http_get(Config, "/overview", ?OK), passed. -%%--------------------------------------------------------------------------- -%% TODO this is mostly copypasta from the mgmt tests - -http_get(Config, Path) -> - http_get(Config, Path, ?OK). - -http_get(Config, Path, CodeExp) -> - http_get(Config, Path, "guest", "guest", CodeExp). - -http_get(Config, Path, User, Pass, CodeExp) -> - {ok, {{_HTTP, CodeAct, _}, Headers, ResBody}} = - req(Config, get, Path, [auth_header(User, Pass)]), - assert_code(CodeExp, CodeAct, "GET", Path, ResBody), - decode(CodeExp, Headers, ResBody). - -http_fail(Config, Path) -> - {error, {failed_connect, _}} = req(Config, get, Path, []). - -http_put(Config, Path, List, CodeExp) -> - http_put_raw(Config, Path, format_for_upload(List), CodeExp). - -http_put(Config, Path, List, User, Pass, CodeExp) -> - http_put_raw(Config, Path, format_for_upload(List), User, Pass, CodeExp). - -http_post(Config, Path, List, CodeExp) -> - http_post_raw(Config, Path, format_for_upload(List), CodeExp). - -http_post(Config, Path, List, User, Pass, CodeExp) -> - http_post_raw(Config, Path, format_for_upload(List), User, Pass, CodeExp). - -format_for_upload(none) -> - <<"">>; -format_for_upload(Map) -> - iolist_to_binary(rabbit_json:encode(convert_keys(Map))). - -convert_keys(Map) -> - maps:fold(fun - (K, V, Acc) when is_map(V) -> - Acc#{atom_to_binary(K, latin1) => convert_keys(V)}; - (K, V, Acc) -> - Acc#{atom_to_binary(K, latin1) => V} - end, #{}, Map). - -http_put_raw(Config, Path, Body, CodeExp) -> - http_upload_raw(Config, put, Path, Body, "guest", "guest", CodeExp). - -http_put_raw(Config, Path, Body, User, Pass, CodeExp) -> - http_upload_raw(Config, put, Path, Body, User, Pass, CodeExp). - -http_post_raw(Config, Path, Body, CodeExp) -> - http_upload_raw(Config, post, Path, Body, "guest", "guest", CodeExp). - -http_post_raw(Config, Path, Body, User, Pass, CodeExp) -> - http_upload_raw(Config, post, Path, Body, User, Pass, CodeExp). - -http_upload_raw(Config, Type, Path, Body, User, Pass, CodeExp) -> - {ok, {{_HTTP, CodeAct, _}, Headers, ResBody}} = - req(Config, Type, Path, [auth_header(User, Pass)], Body), - assert_code(CodeExp, CodeAct, Type, Path, ResBody), - decode(CodeExp, Headers, ResBody). - -http_delete(Config, Path, CodeExp) -> - http_delete(Config, Path, "guest", "guest", CodeExp). - -http_delete(Config, Path, User, Pass, CodeExp) -> - {ok, {{_HTTP, CodeAct, _}, Headers, ResBody}} = - req(Config, delete, Path, [auth_header(User, Pass)]), - assert_code(CodeExp, CodeAct, "DELETE", Path, ResBody), - decode(CodeExp, Headers, ResBody). - -assert_code(CodeExp, CodeAct, _Type, _Path, _Body) -> - ?assertEqual(CodeExp, CodeAct). - -req_uri(Config, Path) -> - rabbit_misc:format("~ts/api~ts", [ - rabbit_ct_broker_helpers:node_uri(Config, 0, management), - Path - ]). - -req(Config, Type, Path, Headers) -> - httpc:request(Type, - {req_uri(Config, Path), Headers}, - ?HTTPC_OPTS, []). - -req(Config, Type, Path, Headers, Body) -> - httpc:request(Type, - {req_uri(Config, Path), Headers, "application/json", Body}, - ?HTTPC_OPTS, []). - -decode(?OK, _Headers, ResBody) -> cleanup(rabbit_json:decode(rabbit_data_coercion:to_binary(ResBody))); -decode(_, Headers, _ResBody) -> Headers. +%% +%% Implementation +%% cleanup(L) when is_list(L) -> [cleanup(I) || I <- L]; @@ -345,13 +372,112 @@ assert_item(ExpI, ActI) -> ExpI = maps:with(maps:keys(ExpI), ActI), ok. -wait_until(_Fun, 0) -> - ?assert(wait_failed); -wait_until(Fun, N) -> - case Fun() of - true -> - ok; - false -> - timer:sleep(500), - wait_until(Fun, N - 1) - end. +list_shovels(Config) -> + list_shovels(Config, "%2F"). + +list_shovels(Config, VirtualHost) -> + Path = io_lib:format("/shovels/~s", [VirtualHost]), + http_get(Config, Path, ?OK). + +get_shovel(Config, Name) -> + get_shovel(Config, "%2F", Name). + +get_shovel(Config, VirtualHost, Name) -> + Path = io_lib:format("/shovels/vhost/~s/~s", [VirtualHost, Name]), + http_get(Config, Path, ?OK). + +delete_shovel(Config, Name) -> + delete_shovel(Config, "%2F", Name). + +delete_shovel(Config, VirtualHost, Name) -> + Path = io_lib:format("/shovels/vhost/~s/~s", [VirtualHost, Name]), + http_delete(Config, Path, ?NO_CONTENT). + +remove_all_dynamic_shovels(Config, VHost) -> + rabbit_ct_broker_helpers:rpc(Config, 0, + rabbit_runtime_parameters, clear_vhost, [VHost, <<"CT tests">>]). + +declare_shovel(Config, Name) -> + Port = integer_to_binary( + rabbit_ct_broker_helpers:get_node_config(Config, 0, tcp_port_amqp)), + http_put(Config, io_lib:format("/parameters/shovel/%2f/~ts", [Name]), + #{ + value => #{ + 'src-protocol' => <<"amqp10">>, + 'src-uri' => <<"amqp://localhost:", Port/binary>>, + 'src-address' => <<"test">>, + 'dest-protocol' => <<"amqp10">>, + 'dest-uri' => <<"amqp://localhost:", Port/binary>>, + 'dest-address' => <<"test2">>, + 'dest-properties' => #{}, + 'dest-application-properties' => #{}, + 'dest-message-annotations' => #{} + } + }, ?CREATED). + +declare_amqp091_shovel(Config, Name) -> + Port = integer_to_binary( + rabbit_ct_broker_helpers:get_node_config(Config, 0, tcp_port_amqp)), + http_put(Config, io_lib:format("/parameters/shovel/%2f/~ts", [Name]), + #{ + value => #{ + <<"src-protocol">> => <<"amqp091">>, + <<"src-uri">> => <<"amqp://localhost:", Port/binary>>, + <<"src-queue">> => <<"amqp091.src.test">>, + <<"src-delete-after">> => <<"never">>, + <<"dest-protocol">> => <<"amqp091">>, + <<"dest-uri">> => <<"amqp://localhost:", Port/binary>>, + <<"dest-queue">> => <<"amqp091.dest.test">> + } + }, ?CREATED). + +declare_amqp091_shovel_with_publish_properties(Config, Name) -> + Props = #{ + <<"delivery_mode">> => 2, + <<"app_id">> => <<"shovel_management:http_SUITE">> + }, + declare_amqp091_shovel_with_publish_properties(Config, Name, Props). + +declare_amqp091_shovel_with_publish_properties(Config, Name, Props) -> + Port = integer_to_binary( + rabbit_ct_broker_helpers:get_node_config(Config, 0, tcp_port_amqp)), + http_put(Config, io_lib:format("/parameters/shovel/%2f/~ts", [Name]), + #{ + value => #{ + <<"src-protocol">> => <<"amqp091">>, + <<"src-uri">> => <<"amqp://localhost:", Port/binary>>, + <<"src-queue">> => <<"amqp091.src.test">>, + <<"src-delete-after">> => <<"never">>, + <<"dest-protocol">> => <<"amqp091">>, + <<"dest-uri">> => <<"amqp://localhost:", Port/binary>>, + <<"dest-queue">> => <<"amqp091.dest.test">>, + <<"dest-publish-properties">> => Props + } + }, ?CREATED). + +await_shovel_startup(Config, Name) -> + await_shovel_startup(Config, Name, 10_000). + +await_shovel_startup(Config, Name, Timeout) -> + await_condition( + fun() -> + does_shovel_exist(Config, Name) + end, Timeout). + +await_shovel_removed(Config, Name) -> + await_shovel_removed(Config, Name, 10_000). + +await_shovel_removed(Config, Name, Timeout) -> + await_condition( + fun() -> + not does_shovel_exist(Config, Name) + end, Timeout). + +lookup_shovel_status(Config, Name) -> + rabbit_ct_broker_helpers:rpc(Config, 0, rabbit_shovel_status, lookup, [Name]). + +does_shovel_exist(Config, Name) -> + case lookup_shovel_status(Config, Name) of + not_found -> false; + _Found -> true + end. \ No newline at end of file diff --git a/deps/rabbitmq_shovel_management/test/rabbit_shovel_mgmt_SUITE.erl b/deps/rabbitmq_shovel_management/test/rabbit_shovel_mgmt_SUITE.erl deleted file mode 100644 index 1953aa35851c..000000000000 --- a/deps/rabbitmq_shovel_management/test/rabbit_shovel_mgmt_SUITE.erl +++ /dev/null @@ -1,103 +0,0 @@ --module(rabbit_shovel_mgmt_SUITE). - --include_lib("common_test/include/ct.hrl"). --include_lib("eunit/include/eunit.hrl"). --include_lib("rabbit_common/include/rabbit.hrl"). --include_lib("rabbitmq_management_agent/include/rabbit_mgmt_records.hrl"). - --compile(export_all). - --define(MOCK_SHOVELS, - [[ - {node,node()}, - {name,<<"shovel1">>}, - {vhost,<<"/">>}, - {type,dynamic}, - {state,running}, - {src_uri,<<"amqp://">>}, - {src_protocol,<<"amqp091">>}, - {dest_protocol,<<"amqp091">>}, - {dest_uri,<<"amqp://">>}, - {src_queue,<<"q1">>}, - {dest_queue,<<"q2">>} - ], - [ - {node,'node2'}, - {name,<<"shovel2">>}, - {vhost,<<"otherVhost">>}, - {type,dynamic}, - {state,running}, - {src_uri,<<"amqp://">>}, - {src_protocol,<<"amqp091">>}, - {dest_protocol,<<"amqp091">>}, - {dest_uri,<<"amqp://">>}, - {src_queue,<<"q1">>}, - {dest_queue,<<"q2">>} - ]]). - -all() -> - [ - get_shovel_node_shovel_different_name, - get_shovel_node_shovel_different_vhost_name, - get_shovel_node_shovel_found, - delete_resource_badrpc - ]. - -init_per_testcase(delete_resource_badrpc, _Config) -> - meck:expect(rabbit_shovel_mgmt_util, status, fun(_,_) -> ?MOCK_SHOVELS end), - meck:expect(rabbit_shovel_status, lookup, - fun({_, Name}) -> - case [S || S <- ?MOCK_SHOVELS, proplists:get_value(name, S) =:= Name] of - [Obj] -> Obj; - [] -> not_found - end - end), - _Config; -init_per_testcase(_, _Config) -> - meck:new(rabbit_shovel_mgmt_util), - meck:expect(rabbit_shovel_mgmt_util, status, fun(_,_) -> ?MOCK_SHOVELS end), - _Config. - -end_per_testcase(delete_resource_badrpc, _Config) -> - meck:unload(rabbit_shovel_mgmt_util), - meck:unload(rabbit_shovel_status), - _Config; -end_per_testcase(_, _Config) -> - meck:unload(rabbit_shovel_mgmt_util), - _Config. - -get_shovel_node_shovel_different_name(_Config) -> - VHost = <<"otherVhost">>, - Name= <<"shovelThatDoesntExist">>, - User = #user{username="admin",tags = [administrator]}, - Node = rabbit_shovel_mgmt:get_shovel_node(VHost, Name, {}, #context{user = User}), - ?assertEqual(undefined, Node). - -get_shovel_node_shovel_different_vhost_name(_Config) -> - VHost = <<"VHostThatDoesntExist">>, - Name= <<"shovel1">>, - User = #user{username="admin",tags = [administrator]}, - Node = rabbit_shovel_mgmt:get_shovel_node(VHost, Name, {}, #context{user = User}), - ?assertEqual(undefined, Node). - -get_shovel_node_shovel_found(_Config) -> - VHost = <<"otherVhost">>, - Name= <<"shovel2">>, - User = #user{username="admin",tags = [administrator]}, - Node = rabbit_shovel_mgmt:get_shovel_node(VHost, Name, {}, #context{user = User}), - ?assertEqual('node2', Node). - -delete_resource_badrpc(_Config) -> - VHost = <<"/">>, - Name= <<"shovel1">>, - User = #user{username="admin",tags = [administrator]}, - Context = #context{user = User}, - ReqData = #{path => <<"/shovels/vhost/././restart">>, - bindings => #{vhost => VHost, name => Name}}, - {Reply, ReqData, Context} = rabbit_shovel_mgmt:delete_resource(ReqData, Context), - ?assertEqual(false, Reply), - - ReqData2 = #{path => <<"/shovels/vhost/./.">>, - bindings => #{vhost => VHost, name => Name}}, - {Reply, ReqData2, Context} = rabbit_shovel_mgmt:delete_resource(ReqData2, Context), - ?assertEqual(false, Reply). diff --git a/deps/rabbitmq_shovel_management/test/rabbit_shovel_mgmt_util_SUITE.erl b/deps/rabbitmq_shovel_management/test/unit_SUITE.erl similarity index 98% rename from deps/rabbitmq_shovel_management/test/rabbit_shovel_mgmt_util_SUITE.erl rename to deps/rabbitmq_shovel_management/test/unit_SUITE.erl index f4426c875792..b7ea700ed2ad 100644 --- a/deps/rabbitmq_shovel_management/test/rabbit_shovel_mgmt_util_SUITE.erl +++ b/deps/rabbitmq_shovel_management/test/unit_SUITE.erl @@ -1,5 +1,5 @@ %%% @doc Unit tests of rabbit_shovel_mgmt_util --module(rabbit_shovel_mgmt_util_SUITE). +-module(unit_SUITE). -compile([export_all, nowarn_export_all]). diff --git a/deps/rabbitmq_shovel_prometheus/BUILD.bazel b/deps/rabbitmq_shovel_prometheus/BUILD.bazel new file mode 100644 index 000000000000..d34bd895525a --- /dev/null +++ b/deps/rabbitmq_shovel_prometheus/BUILD.bazel @@ -0,0 +1,115 @@ +load("@rules_erlang//:eunit2.bzl", "eunit") +load("@rules_erlang//:xref2.bzl", "xref") +load("@rules_erlang//:dialyze.bzl", "dialyze", "plt") +load("//:rabbitmq_home.bzl", "rabbitmq_home") +load("//:rabbitmq_run.bzl", "rabbitmq_run") +load( + "//:rabbitmq.bzl", + "BROKER_VERSION_REQUIREMENTS_ANY", + "RABBITMQ_DIALYZER_OPTS", + "assert_suites", + "rabbitmq_app", + "rabbitmq_integration_suite", +) +load( + ":app.bzl", + "all_beam_files", + "all_srcs", + "all_test_beam_files", + "test_suite_beam_files", +) + +APP_NAME = "rabbitmq_shovel_prometheus" + +APP_DESCRIPTION = "Prometheus extension for the Shovel plugin" + +APP_ENV = """[ +]""" + +all_srcs(name = "all_srcs") + +all_beam_files(name = "all_beam_files") + +all_test_beam_files(name = "all_test_beam_files") + +test_suite_beam_files(name = "test_suite_beam_files") + +# gazelle:erlang_app_extra_app crypto +# gazelle:erlang_app_dep rabbit +# gazelle:erlang_app_dep rabbitmq_prometheus +# gazelle:erlang_app_dep_exclude prometheus + +rabbitmq_app( + name = "erlang_app", + srcs = [":all_srcs"], + hdrs = [":public_hdrs"], + app_description = APP_DESCRIPTION, + app_env = APP_ENV, + app_extra_keys = BROKER_VERSION_REQUIREMENTS_ANY, + app_module = "rabbit_shovel_prometheus_app", + app_name = APP_NAME, + beam_files = [":beam_files"], + extra_apps = [ + "crypto", + ], + license_files = [":license_files"], + priv = [":priv"], + deps = [ + "//deps/rabbit:erlang_app", + "//deps/rabbitmq_prometheus:erlang_app", + "//deps/rabbitmq_shovel:erlang_app", + ], +) + +xref( + name = "xref", + target = ":erlang_app", +) + +plt( + name = "deps_plt", + for_target = ":erlang_app", + ignore_warnings = True, + libs = ["@rules_elixir//elixir"], # keep + plt = "//:base_plt", +) + +dialyze( + name = "dialyze", + dialyzer_opts = RABBITMQ_DIALYZER_OPTS, + plt = ":deps_plt", + target = ":erlang_app", +) + +eunit( + name = "eunit", + target = ":test_erlang_app", +) + +rabbitmq_home( + name = "broker-for-tests-home", + plugins = [ + "//deps/rabbit:erlang_app", + ":erlang_app", + ], +) + +rabbitmq_run( + name = "rabbitmq-for-tests-run", + home = ":broker-for-tests-home", +) + +rabbitmq_integration_suite( + name = "prometheus_rabbitmq_shovel_collector_SUITE", + size = "small", + additional_beam = [ + ], +) + +assert_suites() + +alias( + name = "rabbitmq_shovel_prometheus", + actual = ":erlang_app", + visibility = ["//visibility:public"], +) diff --git a/deps/rabbitmq_shovel_prometheus/CODE_OF_CONDUCT.md b/deps/rabbitmq_shovel_prometheus/CODE_OF_CONDUCT.md new file mode 120000 index 000000000000..a3613c99f0b0 --- /dev/null +++ b/deps/rabbitmq_shovel_prometheus/CODE_OF_CONDUCT.md @@ -0,0 +1 @@ +../../CODE_OF_CONDUCT.md \ No newline at end of file diff --git a/deps/rabbitmq_shovel_prometheus/CONTRIBUTING.md b/deps/rabbitmq_shovel_prometheus/CONTRIBUTING.md new file mode 120000 index 000000000000..f939e75f21a8 --- /dev/null +++ b/deps/rabbitmq_shovel_prometheus/CONTRIBUTING.md @@ -0,0 +1 @@ +../../CONTRIBUTING.md \ No newline at end of file diff --git a/deps/rabbitmq_shovel_prometheus/LICENSE b/deps/rabbitmq_shovel_prometheus/LICENSE new file mode 100644 index 000000000000..46e08bb41d0b --- /dev/null +++ b/deps/rabbitmq_shovel_prometheus/LICENSE @@ -0,0 +1 @@ +This package is licensed under the MPL 2.0. For the MPL 2.0, please see LICENSE-MPL-RabbitMQ. \ No newline at end of file diff --git a/deps/rabbitmq_shovel_prometheus/LICENSE-MPL-RabbitMQ b/deps/rabbitmq_shovel_prometheus/LICENSE-MPL-RabbitMQ new file mode 100644 index 000000000000..14e2f777f6c3 --- /dev/null +++ b/deps/rabbitmq_shovel_prometheus/LICENSE-MPL-RabbitMQ @@ -0,0 +1,373 @@ +Mozilla Public License Version 2.0 +================================== + +1. Definitions +-------------- + +1.1. "Contributor" + means each individual or legal entity that creates, contributes to + the creation of, or owns Covered Software. + +1.2. "Contributor Version" + means the combination of the Contributions of others (if any) used + by a Contributor and that particular Contributor's Contribution. + +1.3. "Contribution" + means Covered Software of a particular Contributor. + +1.4. "Covered Software" + means Source Code Form to which the initial Contributor has attached + the notice in Exhibit A, the Executable Form of such Source Code + Form, and Modifications of such Source Code Form, in each case + including portions thereof. + +1.5. "Incompatible With Secondary Licenses" + means + + (a) that the initial Contributor has attached the notice described + in Exhibit B to the Covered Software; or + + (b) that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the + terms of a Secondary License. + +1.6. "Executable Form" + means any form of the work other than Source Code Form. + +1.7. "Larger Work" + means a work that combines Covered Software with other material, in + a separate file or files, that is not Covered Software. + +1.8. "License" + means this document. + +1.9. "Licensable" + means having the right to grant, to the maximum extent possible, + whether at the time of the initial grant or subsequently, any and + all of the rights conveyed by this License. + +1.10. "Modifications" + means any of the following: + + (a) any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered + Software; or + + (b) any new file in Source Code Form that contains any Covered + Software. + +1.11. "Patent Claims" of a Contributor + means any patent claim(s), including without limitation, method, + process, and apparatus claims, in any patent Licensable by such + Contributor that would be infringed, but for the grant of the + License, by the making, using, selling, offering for sale, having + made, import, or transfer of either its Contributions or its + Contributor Version. + +1.12. "Secondary License" + means either the GNU General Public License, Version 2.0, the GNU + Lesser General Public License, Version 2.1, the GNU Affero General + Public License, Version 3.0, or any later versions of those + licenses. + +1.13. "Source Code Form" + means the form of the work preferred for making modifications. + +1.14. "You" (or "Your") + means an individual or a legal entity exercising rights under this + License. For legal entities, "You" includes any entity that + controls, is controlled by, or is under common control with You. For + purposes of this definition, "control" means (a) the power, direct + or indirect, to cause the direction or management of such entity, + whether by contract or otherwise, or (b) ownership of more than + fifty percent (50%) of the outstanding shares or beneficial + ownership of such entity. + +2. License Grants and Conditions +-------------------------------- + +2.1. Grants + +Each Contributor hereby grants You a world-wide, royalty-free, +non-exclusive license: + +(a) under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or + as part of a Larger Work; and + +(b) under Patent Claims of such Contributor to make, use, sell, offer + for sale, have made, import, and otherwise transfer either its + Contributions or its Contributor Version. + +2.2. Effective Date + +The licenses granted in Section 2.1 with respect to any Contribution +become effective for each Contribution on the date the Contributor first +distributes such Contribution. + +2.3. Limitations on Grant Scope + +The licenses granted in this Section 2 are the only rights granted under +this License. No additional rights or licenses will be implied from the +distribution or licensing of Covered Software under this License. +Notwithstanding Section 2.1(b) above, no patent license is granted by a +Contributor: + +(a) for any code that a Contributor has removed from Covered Software; + or + +(b) for infringements caused by: (i) Your and any other third party's + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + +(c) under Patent Claims infringed by Covered Software in the absence of + its Contributions. + +This License does not grant any rights in the trademarks, service marks, +or logos of any Contributor (except as may be necessary to comply with +the notice requirements in Section 3.4). + +2.4. Subsequent Licenses + +No Contributor makes additional grants as a result of Your choice to +distribute the Covered Software under a subsequent version of this +License (see Section 10.2) or under the terms of a Secondary License (if +permitted under the terms of Section 3.3). + +2.5. Representation + +Each Contributor represents that the Contributor believes its +Contributions are its original creation(s) or it has sufficient rights +to grant the rights to its Contributions conveyed by this License. + +2.6. Fair Use + +This License is not intended to limit any rights You have under +applicable copyright doctrines of fair use, fair dealing, or other +equivalents. + +2.7. Conditions + +Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted +in Section 2.1. + +3. Responsibilities +------------------- + +3.1. Distribution of Source Form + +All distribution of Covered Software in Source Code Form, including any +Modifications that You create or to which You contribute, must be under +the terms of this License. You must inform recipients that the Source +Code Form of the Covered Software is governed by the terms of this +License, and how they can obtain a copy of this License. You may not +attempt to alter or restrict the recipients' rights in the Source Code +Form. + +3.2. Distribution of Executable Form + +If You distribute Covered Software in Executable Form then: + +(a) such Covered Software must also be made available in Source Code + Form, as described in Section 3.1, and You must inform recipients of + the Executable Form how they can obtain a copy of such Source Code + Form by reasonable means in a timely manner, at a charge no more + than the cost of distribution to the recipient; and + +(b) You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter + the recipients' rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + +You may create and distribute a Larger Work under terms of Your choice, +provided that You also comply with the requirements of this License for +the Covered Software. If the Larger Work is a combination of Covered +Software with a work governed by one or more Secondary Licenses, and the +Covered Software is not Incompatible With Secondary Licenses, this +License permits You to additionally distribute such Covered Software +under the terms of such Secondary License(s), so that the recipient of +the Larger Work may, at their option, further distribute the Covered +Software under the terms of either this License or such Secondary +License(s). + +3.4. Notices + +You may not remove or alter the substance of any license notices +(including copyright notices, patent notices, disclaimers of warranty, +or limitations of liability) contained within the Source Code Form of +the Covered Software, except that You may alter any license notices to +the extent required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + +You may choose to offer, and to charge a fee for, warranty, support, +indemnity or liability obligations to one or more recipients of Covered +Software. However, You may do so only on Your own behalf, and not on +behalf of any Contributor. You must make it absolutely clear that any +such warranty, support, indemnity, or liability obligation is offered by +You alone, and You hereby agree to indemnify every Contributor for any +liability incurred by such Contributor as a result of warranty, support, +indemnity or liability terms You offer. You may include additional +disclaimers of warranty and limitations of liability specific to any +jurisdiction. + +4. Inability to Comply Due to Statute or Regulation +--------------------------------------------------- + +If it is impossible for You to comply with any of the terms of this +License with respect to some or all of the Covered Software due to +statute, judicial order, or regulation then You must: (a) comply with +the terms of this License to the maximum extent possible; and (b) +describe the limitations and the code they affect. Such description must +be placed in a text file included with all distributions of the Covered +Software under this License. Except to the extent prohibited by statute +or regulation, such description must be sufficiently detailed for a +recipient of ordinary skill to be able to understand it. + +5. Termination +-------------- + +5.1. The rights granted under this License will terminate automatically +if You fail to comply with any of its terms. However, if You become +compliant, then the rights granted under this License from a particular +Contributor are reinstated (a) provisionally, unless and until such +Contributor explicitly and finally terminates Your grants, and (b) on an +ongoing basis, if such Contributor fails to notify You of the +non-compliance by some reasonable means prior to 60 days after You have +come back into compliance. Moreover, Your grants from a particular +Contributor are reinstated on an ongoing basis if such Contributor +notifies You of the non-compliance by some reasonable means, this is the +first time You have received notice of non-compliance with this License +from such Contributor, and You become compliant prior to 30 days after +Your receipt of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent +infringement claim (excluding declaratory judgment actions, +counter-claims, and cross-claims) alleging that a Contributor Version +directly or indirectly infringes any patent, then the rights granted to +You by any and all Contributors for the Covered Software under Section +2.1 of this License shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all +end user license agreements (excluding distributors and resellers) which +have been validly granted by You or Your distributors under this License +prior to termination shall survive termination. + +************************************************************************ +* * +* 6. Disclaimer of Warranty * +* ------------------------- * +* * +* Covered Software is provided under this License on an "as is" * +* basis, without warranty of any kind, either expressed, implied, or * +* statutory, including, without limitation, warranties that the * +* Covered Software is free of defects, merchantable, fit for a * +* particular purpose or non-infringing. The entire risk as to the * +* quality and performance of the Covered Software is with You. * +* Should any Covered Software prove defective in any respect, You * +* (not any Contributor) assume the cost of any necessary servicing, * +* repair, or correction. This disclaimer of warranty constitutes an * +* essential part of this License. No use of any Covered Software is * +* authorized under this License except under this disclaimer. * +* * +************************************************************************ + +************************************************************************ +* * +* 7. Limitation of Liability * +* -------------------------- * +* * +* Under no circumstances and under no legal theory, whether tort * +* (including negligence), contract, or otherwise, shall any * +* Contributor, or anyone who distributes Covered Software as * +* permitted above, be liable to You for any direct, indirect, * +* special, incidental, or consequential damages of any character * +* including, without limitation, damages for lost profits, loss of * +* goodwill, work stoppage, computer failure or malfunction, or any * +* and all other commercial damages or losses, even if such party * +* shall have been informed of the possibility of such damages. This * +* limitation of liability shall not apply to liability for death or * +* personal injury resulting from such party's negligence to the * +* extent applicable law prohibits such limitation. Some * +* jurisdictions do not allow the exclusion or limitation of * +* incidental or consequential damages, so this exclusion and * +* limitation may not apply to You. * +* * +************************************************************************ + +8. Litigation +------------- + +Any litigation relating to this License may be brought only in the +courts of a jurisdiction where the defendant maintains its principal +place of business and such litigation shall be governed by laws of that +jurisdiction, without reference to its conflict-of-law provisions. +Nothing in this Section shall prevent a party's ability to bring +cross-claims or counter-claims. + +9. Miscellaneous +---------------- + +This License represents the complete agreement concerning the subject +matter hereof. If any provision of this License is held to be +unenforceable, such provision shall be reformed only to the extent +necessary to make it enforceable. Any law or regulation which provides +that the language of a contract shall be construed against the drafter +shall not be used to construe this License against a Contributor. + +10. Versions of the License +--------------------------- + +10.1. New Versions + +Mozilla Foundation is the license steward. Except as provided in Section +10.3, no one other than the license steward has the right to modify or +publish new versions of this License. Each version will be given a +distinguishing version number. + +10.2. Effect of New Versions + +You may distribute the Covered Software under the terms of the version +of the License under which You originally received the Covered Software, +or under the terms of any subsequent version published by the license +steward. + +10.3. Modified Versions + +If you create software not governed by this License, and you want to +create a new license for such software, you may create and use a +modified version of this License if you rename the license and remove +any references to the name of the license steward (except to note that +such modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary +Licenses + +If You choose to distribute Source Code Form that is Incompatible With +Secondary Licenses under the terms of this version of the License, the +notice described in Exhibit B of this License must be attached. + +Exhibit A - Source Code Form License Notice +------------------------------------------- + + This Source Code Form is subject to the terms of the Mozilla Public + License, v. 2.0. If a copy of the MPL was not distributed with this + file, You can obtain one at http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular +file, then You may include the notice in a location (such as a LICENSE +file in a relevant directory) where a recipient would be likely to look +for such a notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - "Incompatible With Secondary Licenses" Notice +--------------------------------------------------------- + + This Source Code Form is "Incompatible With Secondary Licenses", as + defined by the Mozilla Public License, v. 2.0. diff --git a/deps/rabbitmq_shovel_prometheus/Makefile b/deps/rabbitmq_shovel_prometheus/Makefile new file mode 100644 index 000000000000..f448bde8c6ca --- /dev/null +++ b/deps/rabbitmq_shovel_prometheus/Makefile @@ -0,0 +1,16 @@ +PROJECT = rabbitmq_shovel_prometheus +PROJECT_DESCRIPTION = Exposes rabbitmq_shovel metrics to Prometheus +PROJECT_MOD = rabbit_shovel_prometheus_app + +define PROJECT_APP_EXTRA_KEYS + {broker_version_requirements, []} +endef + +DEPS = rabbit_common rabbit rabbitmq_shovel rabbitmq_prometheus +TEST_DEPS = rabbitmq_ct_helpers rabbitmq_ct_client_helpers eunit_formatters + +DEP_EARLY_PLUGINS = rabbit_common/mk/rabbitmq-early-plugin.mk +DEP_PLUGINS = rabbit_common/mk/rabbitmq-plugin.mk + +include ../../rabbitmq-components.mk +include ../../erlang.mk diff --git a/deps/rabbitmq_shovel_prometheus/README.md b/deps/rabbitmq_shovel_prometheus/README.md new file mode 100644 index 000000000000..0a1b6882f9e3 --- /dev/null +++ b/deps/rabbitmq_shovel_prometheus/README.md @@ -0,0 +1,16 @@ +# RabbitMQ Shovel Prometheus + +This plugin adds Shovel metrics to prometheus + +## Installation + +This plugin ships with RabbitMQ. Like all other plugins, it must be enabled +before it can be used: + +```bash +[sudo] rabbitmq-plugins enable rabbitmq_shovel_prometheus +``` + +## License + +See [LICENSE](./LICENSE). diff --git a/deps/rabbitmq_shovel_prometheus/app.bzl b/deps/rabbitmq_shovel_prometheus/app.bzl new file mode 100644 index 000000000000..b79594dc27a4 --- /dev/null +++ b/deps/rabbitmq_shovel_prometheus/app.bzl @@ -0,0 +1,89 @@ +load("@rules_erlang//:erlang_bytecode2.bzl", "erlang_bytecode") +load("@rules_erlang//:filegroup.bzl", "filegroup") + +def all_beam_files(name = "all_beam_files"): + filegroup( + name = "beam_files", + srcs = [":other_beam"], + ) + erlang_bytecode( + name = "other_beam", + srcs = [ + "src/rabbit_shovel_prometheus_app.erl", + "src/rabbit_shovel_prometheus_collector.erl", + "src/rabbit_shovel_prometheus_sup.erl", + ], + hdrs = [":public_and_private_hdrs"], + app_name = "rabbitmq_shovel_prometheus", + dest = "ebin", + erlc_opts = "//:erlc_opts", + deps = ["@prometheus//:erlang_app"], + ) + +def all_srcs(name = "all_srcs"): + filegroup( + name = "all_srcs", + srcs = [":public_and_private_hdrs", ":srcs"], + ) + filegroup( + name = "public_and_private_hdrs", + srcs = [":private_hdrs", ":public_hdrs"], + ) + + filegroup( + name = "priv", + ) + + filegroup( + name = "srcs", + srcs = [ + "src/rabbit_shovel_prometheus_app.erl", + "src/rabbit_shovel_prometheus_collector.erl", + "src/rabbit_shovel_prometheus_sup.erl", + ], + ) + filegroup( + name = "private_hdrs", + ) + filegroup( + name = "public_hdrs", + ) + filegroup( + name = "license_files", + srcs = [ + "LICENSE", + "LICENSE-MPL-RabbitMQ", + ], + ) + +def all_test_beam_files(name = "all_test_beam_files"): + filegroup( + name = "test_beam_files", + testonly = True, + srcs = [":test_other_beam"], + ) + erlang_bytecode( + name = "test_other_beam", + testonly = True, + srcs = [ + "src/rabbit_shovel_prometheus_app.erl", + "src/rabbit_shovel_prometheus_collector.erl", + "src/rabbit_shovel_prometheus_sup.erl", + ], + hdrs = [":public_and_private_hdrs"], + app_name = "rabbitmq_shovel_prometheus", + dest = "test", + erlc_opts = "//:test_erlc_opts", + deps = ["@prometheus//:erlang_app"], + ) + +def test_suite_beam_files(name = "test_suite_beam_files"): + erlang_bytecode( + name = "prometheus_rabbitmq_shovel_collector_SUITE_beam_files", + testonly = True, + srcs = ["test/prometheus_rabbitmq_shovel_collector_SUITE.erl"], + outs = ["test/prometheus_rabbitmq_shovel_collector_SUITE.beam"], + app_name = "rabbitmq_shovel_prometheus", + erlc_opts = "//:test_erlc_opts", + deps = ["//deps/amqp_client:erlang_app", "@prometheus//:erlang_app"], + ) diff --git a/deps/rabbitmq_shovel_prometheus/src/rabbit_shovel_prometheus_app.erl b/deps/rabbitmq_shovel_prometheus/src/rabbit_shovel_prometheus_app.erl new file mode 100644 index 000000000000..662ff4a73b30 --- /dev/null +++ b/deps/rabbitmq_shovel_prometheus/src/rabbit_shovel_prometheus_app.erl @@ -0,0 +1,27 @@ +%% This Source Code Form is subject to the terms of the Mozilla Public +%% License, v. 2.0. If a copy of the MPL was not distributed with this +%% file, You can obtain one at https://mozilla.org/MPL/2.0/. +%% +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. +%% +-module(rabbit_shovel_prometheus_app). + +-behavior(application). + +-export([start/0, stop/0, start/2, stop/1]). + +start(normal, []) -> + {ok, _} = application:ensure_all_started(prometheus), + _ = rabbit_shovel_prometheus_collector:start(), + rabbit_shovel_prometheus_sup:start_link(). + +stop(_State) -> + _ = rabbit_shovel_prometheus_collector:stop(), + ok. + + +start() -> + _ = rabbit_shovel_prometheus_collector:start(). + +stop() -> ok. + diff --git a/deps/rabbitmq_shovel_prometheus/src/rabbit_shovel_prometheus_collector.erl b/deps/rabbitmq_shovel_prometheus/src/rabbit_shovel_prometheus_collector.erl new file mode 100644 index 000000000000..acdc6d9df736 --- /dev/null +++ b/deps/rabbitmq_shovel_prometheus/src/rabbit_shovel_prometheus_collector.erl @@ -0,0 +1,51 @@ +%% This Source Code Form is subject to the terms of the Mozilla Public +%% License, v. 2.0. If a copy of the MPL was not distributed with this +%% file, You can obtain one at https://mozilla.org/MPL/2.0/. +%% +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. +%% +-module(rabbit_shovel_prometheus_collector). + +-behaviour(prometheus_collector). + +-export([start/0, stop/0]). +-export([deregister_cleanup/1, + collect_mf/2]). + +-import(prometheus_model_helpers, [create_mf/4]). + +%%==================================================================== +%% Collector API +%%==================================================================== + +start() -> + {ok, _} = application:ensure_all_started(prometheus), + prometheus_registry:register_collector(?MODULE). + +stop() -> + prometheus_registry:deregister_collector(?MODULE). + +deregister_cleanup(_) -> ok. + +collect_mf(_Registry, Callback) -> + Status = rabbit_shovel_status:status(500), + {StaticStatusGroups, DynamicStatusGroups} = lists:foldl(fun({_,static,{S, _}, _}, {SMap, DMap}) -> + {maps:update_with(S, fun(C) -> C + 1 end, 1, SMap), DMap}; + ({_,dynamic,{S, _}, _}, {SMap, DMap}) -> + {SMap, maps:update_with(S, fun(C) -> C + 1 end, 1, DMap)} + end, {#{}, #{}}, Status), + + Metrics = [{rabbitmq_shovel_dynamic, gauge, "Number of dynamic shovels", + [{[{status, S}], C} || {S, C} <- maps:to_list(DynamicStatusGroups)]}, + {rabbitmq_shovel_static, gauge, "Number of static shovels", + [{[{status, S}], C} || {S, C} <- maps:to_list(StaticStatusGroups)]} + ], + _ = [add_metric_family(Metric, Callback) || Metric <- Metrics], + ok. + +add_metric_family({Name, Type, Help, Metrics}, Callback) -> + Callback(create_mf(Name, Help, Type, Metrics)). + +%%==================================================================== +%% Private Parts +%%==================================================================== diff --git a/deps/rabbitmq_shovel_prometheus/src/rabbit_shovel_prometheus_sup.erl b/deps/rabbitmq_shovel_prometheus/src/rabbit_shovel_prometheus_sup.erl new file mode 100644 index 000000000000..433c016af9f7 --- /dev/null +++ b/deps/rabbitmq_shovel_prometheus/src/rabbit_shovel_prometheus_sup.erl @@ -0,0 +1,20 @@ +%% This Source Code Form is subject to the terms of the Mozilla Public +%% License, v. 2.0. If a copy of the MPL was not distributed with this +%% file, You can obtain one at https://mozilla.org/MPL/2.0/. +%% +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. +%% +-module(rabbit_shovel_prometheus_sup). + +-behaviour(supervisor). + +-export([start_link/0]). +-export([init/1]). + +start_link() -> + supervisor:start_link(?MODULE, []). + +init(_Args) -> + SupFlags = #{strategy => one_for_one, intensity => 1, period => 5}, + ChildSpecs = [], + {ok, {SupFlags, ChildSpecs}}. diff --git a/deps/rabbitmq_shovel_prometheus/test/prometheus_rabbitmq_shovel_collector_SUITE.erl b/deps/rabbitmq_shovel_prometheus/test/prometheus_rabbitmq_shovel_collector_SUITE.erl new file mode 100644 index 000000000000..3aa9efe93168 --- /dev/null +++ b/deps/rabbitmq_shovel_prometheus/test/prometheus_rabbitmq_shovel_collector_SUITE.erl @@ -0,0 +1,279 @@ +%% This Source Code Form is subject to the terms of the Mozilla Public +%% License, v. 2.0. If a copy of the MPL was not distributed with this +%% file, You can obtain one at https://mozilla.org/MPL/2.0/. +%% +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. +%% + +-module(prometheus_rabbitmq_shovel_collector_SUITE). + +-include_lib("eunit/include/eunit.hrl"). +-include_lib("common_test/include/ct.hrl"). +-include_lib("amqp_client/include/amqp_client.hrl"). +-include_lib("prometheus/include/prometheus_model.hrl"). + +-compile(export_all). + +-define(DYN_RUNNING_METRIC(Gauge), + #'MetricFamily'{name = <<"rabbitmq_shovel_dynamic">>, + help = "Number of dynamic shovels",type = 'GAUGE', + metric = [#'Metric'{label = [#'LabelPair'{name = <<"status">>, + value = <<"running">>}], + gauge = #'Gauge'{value = Gauge}, + counter = undefined,summary = undefined,untyped = undefined, + histogram = undefined,timestamp_ms = undefined}]}). + +-define(STAT_RUNNING_METRIC(Gauge), + #'MetricFamily'{name = <<"rabbitmq_shovel_static">>, + help = "Number of static shovels",type = 'GAUGE', + metric = [#'Metric'{label = [#'LabelPair'{name = <<"status">>, + value = <<"running">>}], + gauge = #'Gauge'{value = Gauge}, + counter = undefined,summary = undefined,untyped = undefined, + histogram = undefined,timestamp_ms = undefined}]}). + +-define(EMPTY_DYN_METRIC, #'MetricFamily'{name = <<"rabbitmq_shovel_dynamic">>, + help = "Number of dynamic shovels",type = 'GAUGE', + metric = []}). + +-define(EMPTY_STAT_METRIC, #'MetricFamily'{name = <<"rabbitmq_shovel_static">>, + help = "Number of static shovels",type = 'GAUGE', + metric = []}). + + +all() -> + [ + {group, non_parallel_tests} + ]. + +groups() -> + [ + {non_parallel_tests, [], [ + dynamic, + static, + mix + ]} + ]. + +suite() -> + [{timetrap, {minutes, 5}}]. + +%% ------------------------------------------------------------------- +%% Testsuite setup/teardown. +%% ------------------------------------------------------------------- +init_per_suite(Config) -> + rabbit_ct_helpers:log_environment(), + Config1 = rabbit_ct_helpers:set_config(Config, [ + {rmq_nodename_suffix, ?MODULE}, + {ignored_crashes, [ + "server_initiated_close,404", + "writer,send_failed,closed" + ]} + ]), + rabbit_ct_helpers:run_setup_steps(Config1, + rabbit_ct_broker_helpers:setup_steps() ++ + rabbit_ct_client_helpers:setup_steps()). + +end_per_suite(Config) -> + rabbit_ct_helpers:run_teardown_steps(Config, + rabbit_ct_client_helpers:teardown_steps() ++ + rabbit_ct_broker_helpers:teardown_steps()). + +init_per_group(_, Config) -> + Config. + +end_per_group(_, Config) -> + Config. + +init_per_testcase(Testcase, Config) -> + rabbit_ct_helpers:testcase_started(Config, Testcase). + +end_per_testcase(Testcase, Config) -> + rabbit_ct_helpers:testcase_finished(Config, Testcase). + +%% ------------------------------------------------------------------- +%% Test cases +%% ------------------------------------------------------------------- + +dynamic(Config) -> + create_dynamic_shovel(Config, <<"test">>), + running = get_shovel_status(Config, <<"test">>), + [?DYN_RUNNING_METRIC(1), ?EMPTY_STAT_METRIC] = get_metrics(Config), + create_dynamic_shovel(Config, <<"test2">>), + running = get_shovel_status(Config, <<"test2">>), + [?DYN_RUNNING_METRIC(2), ?EMPTY_STAT_METRIC] = get_metrics(Config), + clear_param(Config, <<"test">>), + clear_param(Config, <<"test2">>), + [?EMPTY_DYN_METRIC, ?EMPTY_STAT_METRIC] = get_metrics(Config), + ok. + +static(Config) -> + create_static_shovel(Config, static_shovel), + [?EMPTY_DYN_METRIC, ?STAT_RUNNING_METRIC(1)] = get_metrics(Config), + ok = rabbit_ct_broker_helpers:rpc(Config, 0, ?MODULE, clear_shovel, + []), + [?EMPTY_DYN_METRIC, ?EMPTY_STAT_METRIC] = get_metrics(Config), + ok. + + +mix(Config) -> + create_dynamic_shovel(Config, <<"test">>), + running = get_shovel_status(Config, <<"test">>), + create_static_shovel(Config, static_shovel), + + [?DYN_RUNNING_METRIC(1), ?STAT_RUNNING_METRIC(1)] = get_metrics(Config), + + clear_param(Config, <<"test">>), + ok = rabbit_ct_broker_helpers:rpc(Config, 0, ?MODULE, clear_shovel, + []), + [?EMPTY_DYN_METRIC, ?EMPTY_STAT_METRIC] = get_metrics(Config), + ok. + +%% ------------------------------------------------------------------- +%% Internal +%% ------------------------------------------------------------------- + +get_metrics(Config) -> + rabbit_ct_broker_helpers:rpc(Config, 0, + ?MODULE, collect_mf, + [default, rabbit_shovel_prometheus_collector]). + +create_static_shovel(Config, Name) -> + Hostname = ?config(rmq_hostname, Config), + Port = rabbit_ct_broker_helpers:get_node_config(Config, 0, tcp_port_amqp), + Shovel = [{Name, + [{source, + [{uris, [rabbit_misc:format("amqp://~ts:~b", + [Hostname, Port])]}, + + {declarations, [ {'exchange.declare', + [ {exchange, <<"my_fanout">>}, + {type, <<"fanout">>}, + durable + ]}, + {'queue.declare', + [{arguments, + [{<<"x-message-ttl">>, long, 60000}]}]}, + {'queue.bind', + [ {exchange, <<"my_fanout">>}, + {queue, <<>>} + ]} + ]}, + {queue, <<>>}] + }, + {destination, + [ {protocol, amqp091}, + {uris, ["amqp://"]}, + {declarations, [ {'exchange.declare', + [ {exchange, <<"my_direct">>}, + {type, <<"direct">>}, + durable + ]} + ]}, + {publish_properties, [ {delivery_mode, 2} ]}, + {add_forward_headers, true}, + {publish_fields, [ {exchange, <<"my_direct">>}, + {routing_key, <<"from_shovel">>} + ]} + ]}, + {ack_mode, on_confirm}, + {reconnect_delay, 5} + + ]}], + ok = rabbit_ct_broker_helpers:rpc(Config, 0, ?MODULE, setup_shovel, + [Shovel, Name]). + +setup_shovel(ShovelConfig, Name) -> + _ = application:stop(rabbitmq_shovel), + application:set_env(rabbitmq_shovel, shovels, ShovelConfig, infinity), + ok = application:start(rabbitmq_shovel), + await_shovel(Name, static). + +clear_shovel() -> + _ = application:stop(rabbitmq_shovel), + application:unset_env(rabbitmq_shovel, shovels, infinity), + ok = application:start(rabbitmq_shovel). + +make_uri(Config, Node) -> + Hostname = ?config(rmq_hostname, Config), + Port = rabbit_ct_broker_helpers:get_node_config(Config, Node, tcp_port_amqp), + list_to_binary(lists:flatten(io_lib:format("amqp://~ts:~b", + [Hostname, Port]))). + +create_dynamic_shovel(Config, Name) -> + Node = 0, + QueueNode = 0, + Uri = make_uri(Config, QueueNode), + Value = [{<<"src-queue">>, <<"src">>}, + {<<"dest-queue">>, <<"dest">>}], + ok = rabbit_ct_broker_helpers:rpc( + Config, + Node, + rabbit_runtime_parameters, + set, [ + <<"/">>, <<"shovel">>, Name, [{<<"src-uri">>, Uri}, + {<<"dest-uri">>, [Uri]} | + Value], none]), + ok = rabbit_ct_broker_helpers:rpc(Config, 0, ?MODULE, await_shovel, + [Name, dynamic]). + +await_shovel(Name, Type) -> + Ret = await(fun() -> + Status = shovels_from_status(running, Type), + lists:member(Name, Status) + end, 30_000), + Ret. + +shovels_from_status(ExpectedState, dynamic) -> + S = rabbit_shovel_status:status(), + [N || {{<<"/">>, N}, dynamic, {State, _}, _} <- S, State == ExpectedState]; +shovels_from_status(ExpectedState, static) -> + S = rabbit_shovel_status:status(), + [N || {N, static, {State, _}, _} <- S, State == ExpectedState]. + +get_shovel_status(Config, Name) -> + get_shovel_status(Config, 0, Name). + +get_shovel_status(Config, Node, Name) -> + S = rabbit_ct_broker_helpers:rpc( + Config, Node, rabbit_shovel_status, lookup, [{<<"/">>, Name}]), + case S of + not_found -> + not_found; + _ -> + {Status, Info} = proplists:get_value(info, S), + proplists:get_value(blocked_status, Info, Status) + end. + +await(Pred) -> + case Pred() of + true -> ok; + false -> timer:sleep(100), + await(Pred) + end. + +await(_Pred, Timeout) when Timeout =< 0 -> + error(await_timeout); +await(Pred, Timeout) -> + case Pred() of + true -> ok; + Other when Timeout =< 100 -> + error({await_timeout, Other}); + _ -> timer:sleep(100), + await(Pred, Timeout - 100) + end. + +clear_param(Config, Name) -> + clear_param(Config, 0, Name). + +clear_param(Config, Node, Name) -> + rabbit_ct_broker_helpers:rpc(Config, Node, + rabbit_runtime_parameters, clear, [<<"/">>, <<"shovel">>, Name, <<"acting-user">>]). + +-define(PD_KEY, metric_families). +collect_mf(Registry, Collector) -> + put(?PD_KEY, []), + Collector:collect_mf(Registry, fun(MF) -> put(?PD_KEY, [MF | get(?PD_KEY)]) end), + MFs = lists:reverse(get(?PD_KEY)), + erase(?PD_KEY), + MFs. diff --git a/deps/rabbitmq_stomp/.gitignore b/deps/rabbitmq_stomp/.gitignore index 14dbfd18d369..1ef1becabe4e 100644 --- a/deps/rabbitmq_stomp/.gitignore +++ b/deps/rabbitmq_stomp/.gitignore @@ -1,23 +1,3 @@ -.sw? -.*.sw? -*.beam -/.erlang.mk/ -/cover/ -/debug/ -/deps/ -/doc/ -/ebin/ -/escript/ -/escript.lock -/logs/ -/plugins/ -/plugins.lock -/sbin/ -/sbin.lock -/xrefr - -rabbitmq_stomp.d - # Python testsuite. .python-version *.pyc diff --git a/deps/rabbitmq_stomp/BUILD.bazel b/deps/rabbitmq_stomp/BUILD.bazel index 4e0e6c1411e8..e8193b124257 100644 --- a/deps/rabbitmq_stomp/BUILD.bazel +++ b/deps/rabbitmq_stomp/BUILD.bazel @@ -89,7 +89,7 @@ plt( name = "deps_plt", for_target = ":erlang_app", ignore_warnings = True, - libs = ["//deps/rabbitmq_cli:elixir"], # keep + libs = ["@rules_elixir//elixir"], # keep plt = "//:base_plt", deps = ["//deps/rabbitmq_cli:erlang_app"], # keep ) diff --git a/deps/rabbitmq_stomp/Makefile b/deps/rabbitmq_stomp/Makefile index 753ffcf48981..0b14a1f95ab3 100644 --- a/deps/rabbitmq_stomp/Makefile +++ b/deps/rabbitmq_stomp/Makefile @@ -33,6 +33,8 @@ endef DEPS = ranch rabbit_common rabbit amqp_client TEST_DEPS = rabbitmq_ct_helpers rabbitmq_ct_client_helpers +PLT_APPS += rabbitmqctl elixir + DEP_EARLY_PLUGINS = rabbit_common/mk/rabbitmq-early-plugin.mk DEP_PLUGINS = rabbit_common/mk/rabbitmq-plugin.mk diff --git a/deps/rabbitmq_stomp/README.md b/deps/rabbitmq_stomp/README.md index 922793ba6611..31dd46034114 100644 --- a/deps/rabbitmq_stomp/README.md +++ b/deps/rabbitmq_stomp/README.md @@ -12,7 +12,3 @@ it, use [rabbitmq-plugins](https://www.rabbitmq.com/man/rabbitmq-plugins.1.man.h ## Documentation [RabbitMQ STOMP plugin documentation](https://www.rabbitmq.com/stomp.html). - -## Continuous Integration - -[![Build Status](https://travis-ci.org/rabbitmq/rabbitmq-stomp.svg?branch=master)](https://travis-ci.org/rabbitmq/rabbitmq-stomp) diff --git a/deps/rabbitmq_stomp/app.bzl b/deps/rabbitmq_stomp/app.bzl index 03e7b28eb2d5..90c3f0da04a1 100644 --- a/deps/rabbitmq_stomp/app.bzl +++ b/deps/rabbitmq_stomp/app.bzl @@ -201,7 +201,7 @@ def test_suite_beam_files(name = "test_suite_beam_files"): testonly = True, srcs = ["test/topic_SUITE.erl"], outs = ["test/topic_SUITE.beam"], - hdrs = ["include/rabbit_stomp.hrl", "include/rabbit_stomp_frame.hrl", "include/rabbit_stomp_headers.hrl"], + hdrs = ["include/rabbit_stomp.hrl", "include/rabbit_stomp_frame.hrl"], app_name = "rabbitmq_stomp", erlc_opts = "//:test_erlc_opts", deps = ["//deps/amqp_client:erlang_app"], diff --git a/deps/rabbitmq_stomp/include/rabbit_stomp.hrl b/deps/rabbitmq_stomp/include/rabbit_stomp.hrl index dbafd518f9a2..c7e89ddf8cc1 100644 --- a/deps/rabbitmq_stomp/include/rabbit_stomp.hrl +++ b/deps/rabbitmq_stomp/include/rabbit_stomp.hrl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2020-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -record(stomp_configuration, {default_login, @@ -39,6 +39,6 @@ ssl_cipher, ssl_hash]). --define(STOMP_GUIDE_URL, <<"https://rabbitmq.com/stomp.html">>). +-define(STOMP_GUIDE_URL, <<"https://rabbitmq.com/docs/stomp">>). -define(DEFAULT_MAX_FRAME_SIZE, 4 * 1024 * 1024). diff --git a/deps/rabbitmq_stomp/include/rabbit_stomp_frame.hrl b/deps/rabbitmq_stomp/include/rabbit_stomp_frame.hrl index 47e1f9c977b0..5859ef662ef2 100644 --- a/deps/rabbitmq_stomp/include/rabbit_stomp_frame.hrl +++ b/deps/rabbitmq_stomp/include/rabbit_stomp_frame.hrl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2020-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -record(stomp_frame, {command, headers, body_iolist}). diff --git a/deps/rabbitmq_stomp/include/rabbit_stomp_headers.hrl b/deps/rabbitmq_stomp/include/rabbit_stomp_headers.hrl index 1fe8d382acc1..a0283dea2044 100644 --- a/deps/rabbitmq_stomp/include/rabbit_stomp_headers.hrl +++ b/deps/rabbitmq_stomp/include/rabbit_stomp_headers.hrl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2020-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -define(HEADER_ACCEPT_VERSION, "accept-version"). @@ -27,7 +27,10 @@ -define(HEADER_PERSISTENT, "persistent"). -define(HEADER_PREFETCH_COUNT, "prefetch-count"). -define(HEADER_X_STREAM_OFFSET, "x-stream-offset"). +-define(HEADER_X_STREAM_FILTER, "x-stream-filter"). +-define(HEADER_X_STREAM_MATCH_UNFILTERED, "x-stream-match-unfiltered"). -define(HEADER_PRIORITY, "priority"). +-define(HEADER_X_PRIORITY, "x-priority"). -define(HEADER_RECEIPT, "receipt"). -define(HEADER_REDELIVERED, "redelivered"). -define(HEADER_REPLY_TO, "reply-to"). @@ -50,6 +53,7 @@ -define(HEADER_X_MESSAGE_TTL, "x-message-ttl"). -define(HEADER_X_QUEUE_NAME, "x-queue-name"). -define(HEADER_X_QUEUE_TYPE, "x-queue-type"). +-define(HEADER_X_STREAM_FILTER_SIZE_BYTES, "x-stream-filter-size-bytes"). -define(MESSAGE_ID_SEPARATOR, "@@"). @@ -67,7 +71,8 @@ ?HEADER_X_MAX_LENGTH_BYTES, ?HEADER_X_MAX_PRIORITY, ?HEADER_X_MESSAGE_TTL, - ?HEADER_X_QUEUE_TYPE + ?HEADER_X_QUEUE_TYPE, + ?HEADER_X_STREAM_FILTER_SIZE_BYTES ]). -define(HEADER_PARAMS, [ diff --git a/deps/rabbitmq_stomp/priv/schema/rabbitmq_stomp.schema b/deps/rabbitmq_stomp/priv/schema/rabbitmq_stomp.schema index 341ee26ba954..3bb653224f8d 100644 --- a/deps/rabbitmq_stomp/priv/schema/rabbitmq_stomp.schema +++ b/deps/rabbitmq_stomp/priv/schema/rabbitmq_stomp.schema @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% %% ========================================================================== diff --git a/deps/rabbitmq_stomp/src/Elixir.RabbitMQ.CLI.Ctl.Commands.ListStompConnectionsCommand.erl b/deps/rabbitmq_stomp/src/Elixir.RabbitMQ.CLI.Ctl.Commands.ListStompConnectionsCommand.erl index c67524712619..c676638a92db 100644 --- a/deps/rabbitmq_stomp/src/Elixir.RabbitMQ.CLI.Ctl.Commands.ListStompConnectionsCommand.erl +++ b/deps/rabbitmq_stomp/src/Elixir.RabbitMQ.CLI.Ctl.Commands.ListStompConnectionsCommand.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module('Elixir.RabbitMQ.CLI.Ctl.Commands.ListStompConnectionsCommand'). diff --git a/deps/rabbitmq_stomp/src/rabbit_stomp.erl b/deps/rabbitmq_stomp/src/rabbit_stomp.erl index 2ab1218b2a39..c514e5cab100 100644 --- a/deps/rabbitmq_stomp/src/rabbit_stomp.erl +++ b/deps/rabbitmq_stomp/src/rabbit_stomp.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_stomp). diff --git a/deps/rabbitmq_stomp/src/rabbit_stomp_client_sup.erl b/deps/rabbitmq_stomp/src/rabbit_stomp_client_sup.erl index e50989d60012..b039030b11a1 100644 --- a/deps/rabbitmq_stomp/src/rabbit_stomp_client_sup.erl +++ b/deps/rabbitmq_stomp/src/rabbit_stomp_client_sup.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_stomp_client_sup). @@ -15,12 +15,16 @@ start_link(Ref, _Transport, Configuration) -> {ok, SupPid} = supervisor:start_link(?MODULE, []), + ConnectionHelperSupFlags = #{strategy => one_for_one, + intensity => 10, + period => 10, + auto_shutdown => any_significant}, {ok, HelperPid} = supervisor:start_child( SupPid, #{ id => rabbit_stomp_heartbeat_sup, - start => {rabbit_connection_helper_sup, start_link, []}, + start => {rabbit_connection_helper_sup, start_link, [ConnectionHelperSupFlags]}, restart => transient, significant => true, shutdown => infinity, diff --git a/deps/rabbitmq_stomp/src/rabbit_stomp_connection_info.erl b/deps/rabbitmq_stomp/src/rabbit_stomp_connection_info.erl index ce5ffe4477d1..c8632419e522 100644 --- a/deps/rabbitmq_stomp/src/rabbit_stomp_connection_info.erl +++ b/deps/rabbitmq_stomp/src/rabbit_stomp_connection_info.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_stomp_connection_info). diff --git a/deps/rabbitmq_stomp/src/rabbit_stomp_frame.erl b/deps/rabbitmq_stomp/src/rabbit_stomp_frame.erl index 5fe08e81ef84..c3c92b90cd97 100644 --- a/deps/rabbitmq_stomp/src/rabbit_stomp_frame.erl +++ b/deps/rabbitmq_stomp/src/rabbit_stomp_frame.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_stomp_frame). @@ -15,7 +15,7 @@ boolean_header/2, boolean_header/3, integer_header/2, integer_header/3, binary_header/2, binary_header/3]). --export([stream_offset_header/2]). +-export([stream_offset_header/1, stream_filter_header/1]). -export([serialize/1, serialize/2]). initial_state() -> none. @@ -211,20 +211,33 @@ binary_header(F, K) -> binary_header(F, K, D) -> default_value(binary_header(F, K), D). -stream_offset_header(F, D) -> - case binary_header(F, ?HEADER_X_STREAM_OFFSET, D) of - <<"first">> -> +stream_offset_header(F) -> + case binary_header(F, ?HEADER_X_STREAM_OFFSET) of + {ok, <<"first">>} -> {longstr, <<"first">>}; - <<"last">> -> + {ok, <<"last">>} -> {longstr, <<"last">>}; - <<"next">> -> + {ok, <<"next">>} -> {longstr, <<"next">>}; - <<"offset=", OffsetValue/binary>> -> + {ok, <<"offset=", OffsetValue/binary>>} -> {long, binary_to_integer(OffsetValue)}; - <<"timestamp=", TimestampValue/binary>> -> + {ok, <<"timestamp=", TimestampValue/binary>>} -> {timestamp, binary_to_integer(TimestampValue)}; _ -> - D + not_found + end. + +stream_filter_header(F) -> + case binary_header(F, ?HEADER_X_STREAM_FILTER) of + {ok, Str} -> + {array, lists:reverse( + lists:foldl(fun(V, Acc) -> + [{longstr, V}] ++ Acc + end, + [], + binary:split(Str, <<",">>, [global])))}; + not_found -> + not_found end. serialize(Frame) -> diff --git a/deps/rabbitmq_stomp/src/rabbit_stomp_internal_event_handler.erl b/deps/rabbitmq_stomp/src/rabbit_stomp_internal_event_handler.erl index 7aaa06ee6f67..5de1b3730b04 100644 --- a/deps/rabbitmq_stomp/src/rabbit_stomp_internal_event_handler.erl +++ b/deps/rabbitmq_stomp/src/rabbit_stomp_internal_event_handler.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_stomp_internal_event_handler). diff --git a/deps/rabbitmq_stomp/src/rabbit_stomp_processor.erl b/deps/rabbitmq_stomp/src/rabbit_stomp_processor.erl index 524d427ffe12..50a1b68fabf8 100644 --- a/deps/rabbitmq_stomp/src/rabbit_stomp_processor.erl +++ b/deps/rabbitmq_stomp/src/rabbit_stomp_processor.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_stomp_processor). @@ -19,7 +19,6 @@ -export([info/2]). -include_lib("amqp_client/include/amqp_client.hrl"). --include_lib("amqp_client/include/rabbit_routing_prefixes.hrl"). -include("rabbit_stomp_frame.hrl"). -include("rabbit_stomp.hrl"). -include("rabbit_stomp_headers.hrl"). @@ -46,7 +45,7 @@ adapter_name(State) -> #stomp_configuration{}, {SendFun, AdapterInfo, SSLLoginName, PeerAddr}) -> #proc_state{} - when SendFun :: fun((atom(), binary()) -> term()), + when SendFun :: fun((binary()) -> term()), AdapterInfo :: #amqp_adapter_info{}, SSLLoginName :: atom() | binary(), PeerAddr :: inet:ip_address(). @@ -506,7 +505,7 @@ tidy_canceled_subscription(ConsumerTag, _Subscription, tidy_canceled_subscription(ConsumerTag, #subscription{dest_hdr = DestHdr}, Frame, State = #proc_state{subscriptions = Subs}) -> Subs1 = maps:remove(ConsumerTag, Subs), - {ok, Dest} = rabbit_routing_util:parse_endpoint(DestHdr), + {ok, Dest} = rabbit_routing_parser:parse_endpoint(DestHdr), maybe_delete_durable_sub(Dest, Frame, State#proc_state{subscriptions = Subs1}). maybe_delete_durable_sub({topic, Name}, Frame, @@ -528,7 +527,7 @@ maybe_delete_durable_sub(_Destination, _Frame, State) -> with_destination(Command, Frame, State, Fun) -> case rabbit_stomp_frame:header(Frame, ?HEADER_DESTINATION) of {ok, DestHdr} -> - case rabbit_routing_util:parse_endpoint(DestHdr) of + case rabbit_routing_parser:parse_endpoint(DestHdr) of {ok, Destination} -> case Fun(Destination, DestHdr, Frame, State) of {error, invalid_endpoint} -> @@ -676,13 +675,7 @@ do_subscribe(Destination, DestHdr, Frame, {stop, normal, close_connection(State)}; error -> ExchangeAndKey = parse_routing(Destination, DfltTopicEx), - StreamOffset = rabbit_stomp_frame:stream_offset_header(Frame, undefined), - Arguments = case StreamOffset of - undefined -> - []; - {Type, Value} -> - [{<<"x-stream-offset">>, Type, Value}] - end, + Arguments = subscribe_arguments(Frame), try amqp_channel:subscribe(Channel, #'basic.consume'{ @@ -722,6 +715,51 @@ do_subscribe(Destination, DestHdr, Frame, Err end. +subscribe_arguments(Frame) -> + subscribe_arguments([?HEADER_X_STREAM_OFFSET, + ?HEADER_X_STREAM_FILTER, + ?HEADER_X_STREAM_MATCH_UNFILTERED, + ?HEADER_X_PRIORITY], Frame, []). + +subscribe_arguments([], _Frame , Acc) -> + Acc; +subscribe_arguments([K | T], Frame, Acc0) -> + Acc1 = subscribe_argument(K, Frame, Acc0), + subscribe_arguments(T, Frame, Acc1). + +subscribe_argument(?HEADER_X_STREAM_OFFSET, Frame, Acc) -> + StreamOffset = rabbit_stomp_frame:stream_offset_header(Frame), + case StreamOffset of + not_found -> + Acc; + {OffsetType, OffsetValue} -> + [{list_to_binary(?HEADER_X_STREAM_OFFSET), OffsetType, OffsetValue}] ++ Acc + end; +subscribe_argument(?HEADER_X_STREAM_FILTER, Frame, Acc) -> + StreamFilter = rabbit_stomp_frame:stream_filter_header(Frame), + case StreamFilter of + not_found -> + Acc; + {FilterType, FilterValue} -> + [{list_to_binary(?HEADER_X_STREAM_FILTER), FilterType, FilterValue}] ++ Acc + end; +subscribe_argument(?HEADER_X_STREAM_MATCH_UNFILTERED, Frame, Acc) -> + MatchUnfiltered = rabbit_stomp_frame:boolean_header(Frame, ?HEADER_X_STREAM_MATCH_UNFILTERED), + case MatchUnfiltered of + {ok, MU} -> + [{list_to_binary(?HEADER_X_STREAM_MATCH_UNFILTERED), bool, MU}] ++ Acc; + not_found -> + Acc + end; +subscribe_argument(?HEADER_X_PRIORITY, Frame, Acc) -> + Priority = rabbit_stomp_frame:integer_header(Frame, ?HEADER_X_PRIORITY), + case Priority of + {ok, P} -> + [{list_to_binary(?HEADER_X_PRIORITY), byte, P}] ++ Acc; + not_found -> + Acc + end. + check_subscription_access(Destination = {topic, _Topic}, #proc_state{auth_login = _User, connection = Connection, @@ -872,7 +910,7 @@ ensure_reply_to(Frame = #stomp_frame{headers = Headers}, State) -> not_found -> {Frame, State}; {ok, ReplyTo} -> - {ok, Destination} = rabbit_routing_util:parse_endpoint(ReplyTo), + {ok, Destination} = rabbit_routing_parser:parse_endpoint(ReplyTo), case rabbit_routing_util:dest_temp_queue(Destination) of none -> {Frame, State}; @@ -1096,7 +1134,7 @@ ensure_endpoint(source, EndPoint, {_, _, Headers, _} = Frame, Channel, State) -> Id = build_subscription_id(Frame), % Note: we discard the exchange here so there's no need to use % the default_topic_exchange configuration key - {_, Name} = rabbit_routing_util:parse_routing(EndPoint), + {_, Name} = rabbit_routing_parser:parse_routing(EndPoint), list_to_binary(rabbit_stomp_util:subscription_queue_name(Name, Id, Frame)) end }] ++ rabbit_stomp_util:build_params(EndPoint, Headers), @@ -1174,7 +1212,7 @@ send_frame(Command, Headers, BodyFragments, State) -> send_frame(Frame, State = #proc_state{send_fun = SendFun, trailing_lf = TrailingLF}) -> - SendFun(async, rabbit_stomp_frame:serialize(Frame, TrailingLF)), + SendFun(rabbit_stomp_frame:serialize(Frame, TrailingLF)), State. send_error_frame(Message, ExtraHeaders, Format, Args, State) -> @@ -1200,7 +1238,7 @@ additional_info(Key, proplists:get_value(Key, AddInfo). parse_routing(Destination, DefaultTopicExchange) -> - {Exchange0, RoutingKey} = rabbit_routing_util:parse_routing(Destination), + {Exchange0, RoutingKey} = rabbit_routing_parser:parse_routing(Destination), Exchange1 = maybe_apply_default_topic_exchange(Exchange0, DefaultTopicExchange), {Exchange1, RoutingKey}. diff --git a/deps/rabbitmq_stomp/src/rabbit_stomp_reader.erl b/deps/rabbitmq_stomp/src/rabbit_stomp_reader.erl index b5f3e4c04625..ccf7af95f24a 100644 --- a/deps/rabbitmq_stomp/src/rabbit_stomp_reader.erl +++ b/deps/rabbitmq_stomp/src/rabbit_stomp_reader.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_stomp_reader). @@ -63,51 +63,55 @@ close_connection(Pid, Reason) -> init([SupHelperPid, Ref, Configuration]) -> process_flag(trap_exit, true), - {ok, Sock} = rabbit_networking:handshake(Ref, - application:get_env(rabbitmq_stomp, proxy_protocol, false)), - RealSocket = rabbit_net:unwrap_socket(Sock), - - case rabbit_net:connection_string(Sock, inbound) of - {ok, ConnStr} -> - ConnName = rabbit_data_coercion:to_binary(ConnStr), - ProcInitArgs = processor_args(Configuration, Sock), - ProcState = rabbit_stomp_processor:initial_state(Configuration, - ProcInitArgs), - - rabbit_log_connection:info("accepting STOMP connection ~tp (~ts)", - [self(), ConnName]), - - ParseState = rabbit_stomp_frame:initial_state(), - _ = register_resource_alarm(), - - LoginTimeout = application:get_env(rabbitmq_stomp, login_timeout, 10_000), - MaxFrameSize = application:get_env(rabbitmq_stomp, max_frame_size, ?DEFAULT_MAX_FRAME_SIZE), - erlang:send_after(LoginTimeout, self(), login_timeout), - - gen_server2:enter_loop(?MODULE, [], - rabbit_event:init_stats_timer( - run_socket(control_throttle( - #reader_state{socket = RealSocket, - conn_name = ConnName, - parse_state = ParseState, - processor_state = ProcState, - heartbeat_sup = SupHelperPid, - heartbeat = {none, none}, - max_frame_size = MaxFrameSize, - current_frame_size = 0, - state = running, - conserve_resources = false, - recv_outstanding = false})), #reader_state.stats_timer), - {backoff, 1000, 1000, 10000}); - {error, enotconn} -> - rabbit_net:fast_close(RealSocket), - terminate(shutdown, undefined); + ProxyProtocolEnabled = application:get_env(rabbitmq_stomp, proxy_protocol, false), + case rabbit_networking:handshake(Ref, ProxyProtocolEnabled) of {error, Reason} -> - rabbit_net:fast_close(RealSocket), - terminate({network_error, Reason}, undefined) + rabbit_log_connection:error( + "STOMP could not establish connection: ~s", [Reason]), + {stop, Reason}; + {ok, Sock} -> + RealSocket = rabbit_net:unwrap_socket(Sock), + case rabbit_net:connection_string(Sock, inbound) of + {ok, ConnStr} -> + ConnName = rabbit_data_coercion:to_binary(ConnStr), + ProcInitArgs = processor_args(Configuration, Sock), + ProcState = rabbit_stomp_processor:initial_state(Configuration, + ProcInitArgs), + + rabbit_log_connection:info("accepting STOMP connection ~tp (~ts)", + [self(), ConnName]), + + ParseState = rabbit_stomp_frame:initial_state(), + _ = register_resource_alarm(), + + LoginTimeout = application:get_env(rabbitmq_stomp, login_timeout, 10_000), + MaxFrameSize = application:get_env(rabbitmq_stomp, max_frame_size, ?DEFAULT_MAX_FRAME_SIZE), + erlang:send_after(LoginTimeout, self(), login_timeout), + + gen_server2:enter_loop(?MODULE, [], + rabbit_event:init_stats_timer( + run_socket(control_throttle( + #reader_state{socket = RealSocket, + conn_name = ConnName, + parse_state = ParseState, + processor_state = ProcState, + heartbeat_sup = SupHelperPid, + heartbeat = {none, none}, + max_frame_size = MaxFrameSize, + current_frame_size = 0, + state = running, + conserve_resources = false, + recv_outstanding = false})), #reader_state.stats_timer), + {backoff, 1000, 1000, 10000}); + {error, enotconn} -> + rabbit_net:fast_close(RealSocket), + terminate(shutdown, undefined); + {error, Reason} -> + rabbit_net:fast_close(RealSocket), + terminate({network_error, Reason}, undefined) + end end. - handle_call({info, InfoItems}, _From, State) -> Infos = lists:map( fun(InfoItem) -> @@ -140,12 +144,6 @@ handle_info({Tag, Sock}, State=#reader_state{socket=Sock}) handle_info({Tag, Sock, Reason}, State=#reader_state{socket=Sock}) when Tag =:= tcp_error; Tag =:= ssl_error -> {stop, {inet_error, Reason}, State}; -handle_info({inet_reply, _Sock, {error, closed}}, State) -> - {stop, normal, State}; -handle_info({inet_reply, _, ok}, State) -> - {noreply, State, hibernate}; -handle_info({inet_reply, _, Status}, State) -> - {stop, Status, State}; handle_info(emit_stats, State) -> {noreply, emit_stats(State), hibernate}; handle_info({conserve_resources, Conserve}, State) -> @@ -259,7 +257,7 @@ process_received_bytes(Bytes, log_reason({network_error, {frame_too_big, {FrameLength1, MaxFrameSize}}}, State), {stop, normal, State}; false -> - case rabbit_stomp_processor:process_frame(Frame, ProcState) of + try rabbit_stomp_processor:process_frame(Frame, ProcState) of {ok, NewProcState, Conn} -> PS = rabbit_stomp_frame:initial_state(), NextState = maybe_block(State, Frame), @@ -271,6 +269,10 @@ process_received_bytes(Bytes, {stop, Reason, NewProcState} -> {stop, Reason, processor_state(NewProcState, State)} + catch exit:{send_failed, closed} -> + {stop, normal, State}; + exit:{send_failed, Reason} -> + {stop, Reason, State} end end; {error, Reason} -> @@ -404,16 +406,13 @@ log_tls_alert(Alert, ConnName) -> processor_args(Configuration, Sock) -> RealSocket = rabbit_net:unwrap_socket(Sock), - SendFun = fun (sync, IoData) -> - %% no messages emitted - catch rabbit_net:send(RealSocket, IoData); - (async, IoData) -> - %% {inet_reply, _, _} will appear soon - %% We ignore certain errors here, as we will be - %% receiving an asynchronous notification of the - %% same (or a related) fault shortly anyway. See - %% bug 21365. - catch rabbit_net:port_command(RealSocket, IoData) + SendFun = fun(IoData) -> + case rabbit_net:send(RealSocket, IoData) of + ok -> + ok; + {error, Reason} -> + exit({send_failed, Reason}) + end end, {ok, {PeerAddr, _PeerPort}} = rabbit_net:sockname(RealSocket), {SendFun, adapter_info(Sock), diff --git a/deps/rabbitmq_stomp/src/rabbit_stomp_sup.erl b/deps/rabbitmq_stomp/src/rabbit_stomp_sup.erl index ad016664c472..d77073efae73 100644 --- a/deps/rabbitmq_stomp/src/rabbit_stomp_sup.erl +++ b/deps/rabbitmq_stomp/src/rabbit_stomp_sup.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_stomp_sup). @@ -26,10 +26,7 @@ init([{Listeners, SslListeners0}, Configuration]) -> [] -> {none, 0, []}; _ -> {rabbit_networking:ensure_ssl(), application:get_env(rabbitmq_stomp, num_ssl_acceptors, 10), - case rabbit_networking:poodle_check('STOMP') of - ok -> SslListeners0; - danger -> [] - end} + SslListeners0} end, Flags = #{ strategy => one_for_all, diff --git a/deps/rabbitmq_stomp/src/rabbit_stomp_util.erl b/deps/rabbitmq_stomp/src/rabbit_stomp_util.erl index 25c818a93123..5c20490091d1 100644 --- a/deps/rabbitmq_stomp/src/rabbit_stomp_util.erl +++ b/deps/rabbitmq_stomp/src/rabbit_stomp_util.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_stomp_util). @@ -296,8 +296,12 @@ build_argument(?HEADER_X_STREAM_MAX_SEGMENT_SIZE_BYTES, Val) -> {list_to_binary(?HEADER_X_STREAM_MAX_SEGMENT_SIZE_BYTES), long, list_to_integer(string:strip(Val))}; build_argument(?HEADER_X_QUEUE_TYPE, Val) -> - {list_to_binary(?HEADER_X_QUEUE_TYPE), longstr, - list_to_binary(string:strip(Val))}. + {list_to_binary(?HEADER_X_QUEUE_TYPE), longstr, + list_to_binary(string:strip(Val))}; +build_argument(?HEADER_X_STREAM_FILTER_SIZE_BYTES, Val) -> + {list_to_binary(?HEADER_X_STREAM_FILTER_SIZE_BYTES), long, + list_to_integer(string:strip(Val))}. + build_params(EndPoint, Headers) -> Params = lists:foldl(fun({K, V}, Acc) -> diff --git a/deps/rabbitmq_stomp/test/command_SUITE.erl b/deps/rabbitmq_stomp/test/command_SUITE.erl index f736593d648b..5b39aeab8d6c 100644 --- a/deps/rabbitmq_stomp/test/command_SUITE.erl +++ b/deps/rabbitmq_stomp/test/command_SUITE.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(command_SUITE). diff --git a/deps/rabbitmq_stomp/test/config_schema_SUITE.erl b/deps/rabbitmq_stomp/test/config_schema_SUITE.erl index 2565be2b2660..32259b552eb8 100644 --- a/deps/rabbitmq_stomp/test/config_schema_SUITE.erl +++ b/deps/rabbitmq_stomp/test/config_schema_SUITE.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(config_schema_SUITE). diff --git a/deps/rabbitmq_stomp/test/connections_SUITE.erl b/deps/rabbitmq_stomp/test/connections_SUITE.erl index 19fe5c2aaa41..3bcac3e5a7a7 100644 --- a/deps/rabbitmq_stomp/test/connections_SUITE.erl +++ b/deps/rabbitmq_stomp/test/connections_SUITE.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(connections_SUITE). @@ -10,7 +10,6 @@ -import(rabbit_misc, [pget/2]). --include_lib("common_test/include/ct.hrl"). -include_lib("amqp_client/include/amqp_client.hrl"). -include("rabbit_stomp_frame.hrl"). -define(DESTINATION, "/queue/bulk-test"). diff --git a/deps/rabbitmq_stomp/test/frame_SUITE.erl b/deps/rabbitmq_stomp/test/frame_SUITE.erl index 1f5470c2c641..2688fec246ee 100644 --- a/deps/rabbitmq_stomp/test/frame_SUITE.erl +++ b/deps/rabbitmq_stomp/test/frame_SUITE.erl @@ -2,12 +2,11 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(frame_SUITE). --include_lib("common_test/include/ct.hrl"). -include_lib("eunit/include/eunit.hrl"). -include_lib("amqp_client/include/amqp_client.hrl"). -include("rabbit_stomp_frame.hrl"). @@ -39,7 +38,8 @@ all() -> header_value_with_colon, headers_escaping_roundtrip, headers_escaping_roundtrip_without_trailing_lf, - stream_offset_header + stream_offset_header, + stream_filter_header ]. parse_simple_frame(_) -> @@ -170,17 +170,35 @@ stream_offset_header(_) -> {{"x-stream-offset", "next"}, {longstr, <<"next">>}}, {{"x-stream-offset", "offset=5000"}, {long, 5000}}, {{"x-stream-offset", "timestamp=1000"}, {timestamp, 1000}}, - {{"x-stream-offset", "foo"}, undefined}, - {{"some-header", "some value"}, undefined} + {{"x-stream-offset", "foo"}, not_found}, + {{"some-header", "some value"}, not_found} ], lists:foreach(fun({Header, Expected}) -> ?assertEqual( Expected, - rabbit_stomp_frame:stream_offset_header(#stomp_frame{headers = [Header]}, undefined) + rabbit_stomp_frame:stream_offset_header(#stomp_frame{headers = [Header]}) ) end, TestCases). +stream_filter_header(_) -> + TestCases = [ + {{"x-stream-filter", "banana"}, {array, [{longstr, <<"banana">>}]}}, + {{"x-stream-filter", "banana,apple"}, {array, [{longstr, <<"banana">>}, + {longstr, <<"apple">>}]}}, + {{"x-stream-filter", "banana,apple,orange"}, {array, [{longstr, <<"banana">>}, + {longstr, <<"apple">>}, + {longstr, <<"orange">>}]}}, + {{"some-header", "some value"}, not_found} + ], + + lists:foreach(fun({Header, Expected}) -> + ?assertEqual( + Expected, + rabbit_stomp_frame:stream_filter_header(#stomp_frame{headers = [Header]}) + ) + end, TestCases). + test_frame_serialization(Expected, TrailingLF) -> {ok, Frame, _} = parse(Expected), {ok, Val} = rabbit_stomp_frame:header(Frame, "head\r:\ner"), diff --git a/deps/rabbitmq_stomp/test/proxy_protocol_SUITE.erl b/deps/rabbitmq_stomp/test/proxy_protocol_SUITE.erl index b71d6c4c2a80..8e72442355a4 100644 --- a/deps/rabbitmq_stomp/test/proxy_protocol_SUITE.erl +++ b/deps/rabbitmq_stomp/test/proxy_protocol_SUITE.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(proxy_protocol_SUITE). @@ -35,9 +35,9 @@ init_per_suite(Config) -> {rmq_certspwd, "bunnychow"}, {rabbitmq_ct_tls_verify, verify_none} ]), - MqttConfig = stomp_config(), + StompConfig = stomp_config(), rabbit_ct_helpers:run_setup_steps(Config1, - [ fun(Conf) -> merge_app_env(MqttConfig, Conf) end ] ++ + [ fun(Conf) -> merge_app_env(StompConfig, Conf) end ] ++ rabbit_ct_broker_helpers:setup_steps() ++ rabbit_ct_client_helpers:setup_steps()). @@ -111,8 +111,8 @@ connection_name() -> {_, Name} = lists:keyfind(name, 1, Values), Name. -merge_app_env(MqttConfig, Config) -> - rabbit_ct_helpers:merge_app_env(Config, MqttConfig). +merge_app_env(StompConfig, Config) -> + rabbit_ct_helpers:merge_app_env(Config, StompConfig). stomp_connect_frame() -> <<"CONNECT\n", diff --git a/deps/rabbitmq_stomp/test/python_SUITE.erl b/deps/rabbitmq_stomp/test/python_SUITE.erl index 6409c402490f..945421cf0a31 100644 --- a/deps/rabbitmq_stomp/test/python_SUITE.erl +++ b/deps/rabbitmq_stomp/test/python_SUITE.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(python_SUITE). diff --git a/deps/rabbitmq_stomp/test/python_SUITE_data/src/ack.py b/deps/rabbitmq_stomp/test/python_SUITE_data/src/ack.py index d21c3c432d38..f138bb0103eb 100644 --- a/deps/rabbitmq_stomp/test/python_SUITE_data/src/ack.py +++ b/deps/rabbitmq_stomp/test/python_SUITE_data/src/ack.py @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. ## import unittest diff --git a/deps/rabbitmq_stomp/test/python_SUITE_data/src/amqp_headers.py b/deps/rabbitmq_stomp/test/python_SUITE_data/src/amqp_headers.py index 0a161164f16d..92a0af0d4347 100644 --- a/deps/rabbitmq_stomp/test/python_SUITE_data/src/amqp_headers.py +++ b/deps/rabbitmq_stomp/test/python_SUITE_data/src/amqp_headers.py @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. ## import pika diff --git a/deps/rabbitmq_stomp/test/python_SUITE_data/src/base.py b/deps/rabbitmq_stomp/test/python_SUITE_data/src/base.py index 6ea54de275b6..4b13463093f2 100644 --- a/deps/rabbitmq_stomp/test/python_SUITE_data/src/base.py +++ b/deps/rabbitmq_stomp/test/python_SUITE_data/src/base.py @@ -2,7 +2,7 @@ # License, v. 2.0. If a copy of the MPL was not distributed with this # file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -# Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved. +# Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. ## import unittest diff --git a/deps/rabbitmq_stomp/test/python_SUITE_data/src/connect_disconnect.py b/deps/rabbitmq_stomp/test/python_SUITE_data/src/connect_disconnect.py index b1f0c49f50d9..78b6ff501527 100644 --- a/deps/rabbitmq_stomp/test/python_SUITE_data/src/connect_disconnect.py +++ b/deps/rabbitmq_stomp/test/python_SUITE_data/src/connect_disconnect.py @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. ## import unittest diff --git a/deps/rabbitmq_stomp/test/python_SUITE_data/src/destinations.py b/deps/rabbitmq_stomp/test/python_SUITE_data/src/destinations.py index 256ebba1b6cb..727307bfee32 100644 --- a/deps/rabbitmq_stomp/test/python_SUITE_data/src/destinations.py +++ b/deps/rabbitmq_stomp/test/python_SUITE_data/src/destinations.py @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. ## import unittest diff --git a/deps/rabbitmq_stomp/test/python_SUITE_data/src/errors.py b/deps/rabbitmq_stomp/test/python_SUITE_data/src/errors.py index 172ce43d5f7a..d066ca221c16 100644 --- a/deps/rabbitmq_stomp/test/python_SUITE_data/src/errors.py +++ b/deps/rabbitmq_stomp/test/python_SUITE_data/src/errors.py @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. ## import unittest diff --git a/deps/rabbitmq_stomp/test/python_SUITE_data/src/implicit_connect.py b/deps/rabbitmq_stomp/test/python_SUITE_data/src/implicit_connect.py index 4fce35c1856c..ed0987930adc 100644 --- a/deps/rabbitmq_stomp/test/python_SUITE_data/src/implicit_connect.py +++ b/deps/rabbitmq_stomp/test/python_SUITE_data/src/implicit_connect.py @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. ## import unittest diff --git a/deps/rabbitmq_stomp/test/python_SUITE_data/src/parsing.py b/deps/rabbitmq_stomp/test/python_SUITE_data/src/parsing.py index 99947f58e320..2ae6569182d2 100644 --- a/deps/rabbitmq_stomp/test/python_SUITE_data/src/parsing.py +++ b/deps/rabbitmq_stomp/test/python_SUITE_data/src/parsing.py @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. ## import unittest diff --git a/deps/rabbitmq_stomp/test/python_SUITE_data/src/queue_properties.py b/deps/rabbitmq_stomp/test/python_SUITE_data/src/queue_properties.py index 4826b828df84..dbd5562f64c6 100644 --- a/deps/rabbitmq_stomp/test/python_SUITE_data/src/queue_properties.py +++ b/deps/rabbitmq_stomp/test/python_SUITE_data/src/queue_properties.py @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. ## import unittest diff --git a/deps/rabbitmq_stomp/test/python_SUITE_data/src/redelivered.py b/deps/rabbitmq_stomp/test/python_SUITE_data/src/redelivered.py index 4adcfe8a2eaa..71f5c1009531 100644 --- a/deps/rabbitmq_stomp/test/python_SUITE_data/src/redelivered.py +++ b/deps/rabbitmq_stomp/test/python_SUITE_data/src/redelivered.py @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. ## import unittest diff --git a/deps/rabbitmq_stomp/test/python_SUITE_data/src/reliability.py b/deps/rabbitmq_stomp/test/python_SUITE_data/src/reliability.py index e4396b3249e1..a2153a4da173 100644 --- a/deps/rabbitmq_stomp/test/python_SUITE_data/src/reliability.py +++ b/deps/rabbitmq_stomp/test/python_SUITE_data/src/reliability.py @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. ## import base diff --git a/deps/rabbitmq_stomp/test/python_SUITE_data/src/test_runner.py b/deps/rabbitmq_stomp/test/python_SUITE_data/src/test_runner.py index 804e6b17e55d..e43a2a7cf4f1 100644 --- a/deps/rabbitmq_stomp/test/python_SUITE_data/src/test_runner.py +++ b/deps/rabbitmq_stomp/test/python_SUITE_data/src/test_runner.py @@ -4,7 +4,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. ## import unittest diff --git a/deps/rabbitmq_stomp/test/python_SUITE_data/src/test_util.py b/deps/rabbitmq_stomp/test/python_SUITE_data/src/test_util.py index e0b8171e6558..5085a2251883 100644 --- a/deps/rabbitmq_stomp/test/python_SUITE_data/src/test_util.py +++ b/deps/rabbitmq_stomp/test/python_SUITE_data/src/test_util.py @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. ## import subprocess diff --git a/deps/rabbitmq_stomp/test/python_SUITE_data/src/tls_connect_disconnect.py b/deps/rabbitmq_stomp/test/python_SUITE_data/src/tls_connect_disconnect.py index 883ae1626f85..357e94d8d2fa 100644 --- a/deps/rabbitmq_stomp/test/python_SUITE_data/src/tls_connect_disconnect.py +++ b/deps/rabbitmq_stomp/test/python_SUITE_data/src/tls_connect_disconnect.py @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. ## import unittest diff --git a/deps/rabbitmq_stomp/test/python_SUITE_data/src/tls_runner.py b/deps/rabbitmq_stomp/test/python_SUITE_data/src/tls_runner.py index 10e829667d61..6bec8feb67ac 100755 --- a/deps/rabbitmq_stomp/test/python_SUITE_data/src/tls_runner.py +++ b/deps/rabbitmq_stomp/test/python_SUITE_data/src/tls_runner.py @@ -4,7 +4,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. ## import test_runner diff --git a/deps/rabbitmq_stomp/test/python_SUITE_data/src/topic_permissions.py b/deps/rabbitmq_stomp/test/python_SUITE_data/src/topic_permissions.py index ef39c3254acb..5141658e5e15 100644 --- a/deps/rabbitmq_stomp/test/python_SUITE_data/src/topic_permissions.py +++ b/deps/rabbitmq_stomp/test/python_SUITE_data/src/topic_permissions.py @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. ## import base diff --git a/deps/rabbitmq_stomp/test/python_SUITE_data/src/transactions.py b/deps/rabbitmq_stomp/test/python_SUITE_data/src/transactions.py index 43f9f88f4025..20e23bc0c584 100644 --- a/deps/rabbitmq_stomp/test/python_SUITE_data/src/transactions.py +++ b/deps/rabbitmq_stomp/test/python_SUITE_data/src/transactions.py @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. ## import unittest diff --git a/deps/rabbitmq_stomp/test/python_SUITE_data/src/unsubscribe.py b/deps/rabbitmq_stomp/test/python_SUITE_data/src/unsubscribe.py index 5388e44d20a0..bca4a02f986d 100644 --- a/deps/rabbitmq_stomp/test/python_SUITE_data/src/unsubscribe.py +++ b/deps/rabbitmq_stomp/test/python_SUITE_data/src/unsubscribe.py @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. ## import unittest diff --git a/deps/rabbitmq_stomp/test/python_SUITE_data/src/x_queue_name.py b/deps/rabbitmq_stomp/test/python_SUITE_data/src/x_queue_name.py index 5f0d92bd5cda..be9f43cb0d79 100644 --- a/deps/rabbitmq_stomp/test/python_SUITE_data/src/x_queue_name.py +++ b/deps/rabbitmq_stomp/test/python_SUITE_data/src/x_queue_name.py @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. ## import unittest diff --git a/deps/rabbitmq_stomp/test/python_SUITE_data/src/x_queue_type_quorum.py b/deps/rabbitmq_stomp/test/python_SUITE_data/src/x_queue_type_quorum.py index 2dcf3094b2da..f808bd217c05 100644 --- a/deps/rabbitmq_stomp/test/python_SUITE_data/src/x_queue_type_quorum.py +++ b/deps/rabbitmq_stomp/test/python_SUITE_data/src/x_queue_type_quorum.py @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. ## import pika diff --git a/deps/rabbitmq_stomp/test/python_SUITE_data/src/x_queue_type_stream.py b/deps/rabbitmq_stomp/test/python_SUITE_data/src/x_queue_type_stream.py index eb626aeef6be..f10334411f44 100644 --- a/deps/rabbitmq_stomp/test/python_SUITE_data/src/x_queue_type_stream.py +++ b/deps/rabbitmq_stomp/test/python_SUITE_data/src/x_queue_type_stream.py @@ -2,7 +2,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. ## import pika @@ -27,6 +27,8 @@ def test_stream_queue(self): 'x-queue-type': 'stream', 'x-max-age' : '10h', 'x-stream-max-segment-size-bytes' : 1048576, + 'x-stream-filter-size-bytes' : 32, + 'x-stream-match-unfiltered' : True, 'durable': True, 'auto-delete': False, 'id': 1234, diff --git a/deps/rabbitmq_stomp/test/src/rabbit_stomp_client.erl b/deps/rabbitmq_stomp/test/src/rabbit_stomp_client.erl index e1c1b44137dc..102d52b6a615 100644 --- a/deps/rabbitmq_stomp/test/src/rabbit_stomp_client.erl +++ b/deps/rabbitmq_stomp/test/src/rabbit_stomp_client.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% %% The stupidest client imaginable, just for testing. diff --git a/deps/rabbitmq_stomp/test/src/rabbit_stomp_publish_test.erl b/deps/rabbitmq_stomp/test/src/rabbit_stomp_publish_test.erl index bf6c7ebb398a..ac5e1e17d2a8 100644 --- a/deps/rabbitmq_stomp/test/src/rabbit_stomp_publish_test.erl +++ b/deps/rabbitmq_stomp/test/src/rabbit_stomp_publish_test.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_stomp_publish_test). diff --git a/deps/rabbitmq_stomp/test/system_SUITE.erl b/deps/rabbitmq_stomp/test/system_SUITE.erl index d38824af43c2..c583f2102d1b 100644 --- a/deps/rabbitmq_stomp/test/system_SUITE.erl +++ b/deps/rabbitmq_stomp/test/system_SUITE.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(system_SUITE). @@ -17,7 +17,9 @@ -include("rabbit_stomp_headers.hrl"). -define(QUEUE, <<"TestQueue">>). +-define(QUEUE_QQ, <<"TestQueueQQ">>). -define(DESTINATION, "/amq/queue/TestQueue"). +-define(DESTINATION_QQ, "/amq/queue/TestQueueQQ"). all() -> [{group, version_to_group_name(V)} || V <- ?SUPPORTED_VERSIONS]. @@ -28,13 +30,15 @@ groups() -> publish_unauthorized_error, subscribe_error, subscribe, + subscribe_with_x_priority, unsubscribe_ack, subscribe_ack, send, delete_queue_subscribe, temp_destination_queue, temp_destination_in_send, - blank_destination_in_send + blank_destination_in_send, + stream_filtering ], [{version_to_group_name(V), [sequence], Tests} @@ -160,6 +164,44 @@ subscribe(Config) -> {ok, _Client2, _, [<<"hello">>]} = stomp_receive(Client1, "MESSAGE"), ok. +subscribe_with_x_priority(Config) -> + Version = ?config(version, Config), + StompPort = rabbit_ct_broker_helpers:get_node_config(Config, 0, tcp_port_stomp), + Channel = ?config(amqp_channel, Config), + ClientA = ?config(stomp_client, Config), + #'queue.declare_ok'{} = + amqp_channel:call(Channel, #'queue.declare'{queue = ?QUEUE_QQ, + durable = true, + arguments = [{<<"x-queue-type">>, longstr, <<"quorum">>}, + {<<"x-single-active-consumer">>, bool, true} + ]}), + + %% subscribe and wait for receipt + rabbit_stomp_client:send( + ClientA, "SUBSCRIBE", [{"destination", ?DESTINATION_QQ}, {"receipt", "foo"}]), + {ok, _ClientA1, _, _} = stomp_receive(ClientA, "RECEIPT"), + + %% subscribe with a higher priority and wait for receipt + {ok, ClientB} = rabbit_stomp_client:connect(Version, StompPort), + rabbit_stomp_client:send( + ClientB, "SUBSCRIBE", [{"destination", ?DESTINATION_QQ}, + {"receipt", "foo"}, + {"x-priority", 10} + ]), + {ok, ClientB1, _, _} = stomp_receive(ClientB, "RECEIPT"), + + %% send from amqp + Method = #'basic.publish'{exchange = <<"">>, routing_key = ?QUEUE_QQ}, + + amqp_channel:call(Channel, Method, #amqp_msg{props = #'P_basic'{}, + payload = <<"hello">>}), + + %% ClientB should receive the message since it has a higher priority + {ok, _ClientB2, _, [<<"hello">>]} = stomp_receive(ClientB1, "MESSAGE"), + #'queue.delete_ok'{} = + amqp_channel:call(Channel, #'queue.delete'{queue = ?QUEUE_QQ}), + ok. + unsubscribe_ack(Config) -> Channel = ?config(amqp_channel, Config), Client = ?config(stomp_client, Config), @@ -310,6 +352,162 @@ blank_destination_in_send(Config) -> "Invalid destination" = proplists:get_value("message", Hdrs), ok. +stream_filtering(Config) -> + Version = ?config(version, Config), + Client = ?config(stomp_client, Config), + Stream = atom_to_list(?FUNCTION_NAME) ++ "-" ++ integer_to_list(rand:uniform(10000)), + %% subscription just to create the stream from STOMP + SubDestination = "/topic/stream-queue-test", + rabbit_stomp_client:send( + Client, "SUBSCRIBE", + [{"destination", SubDestination}, + {"receipt", "foo"}, + {"x-queue-name", Stream}, + {"x-queue-type", "stream"}, + {?HEADER_X_STREAM_FILTER_SIZE_BYTES, "32"}, + {"durable", "true"}, + {"auto-delete", "false"}, + {"id", "1234"}, + {"prefetch-count", "1"}, + {"ack", "client"}]), + {ok, Client1, _, _} = stomp_receive(Client, "RECEIPT"), + rabbit_stomp_client:send( + Client1, "UNSUBSCRIBE", [{"destination", SubDestination}, + {"id", "1234"}, + {"receipt", "bar"}]), + {ok, Client2, _, _} = stomp_receive(Client1, "RECEIPT"), + + %% we are going to publish several waves of messages with and without filter values. + %% we will then create subscriptions with various filter options + %% and make sure we receive only what we asked for and not all the messages. + + StreamDestination = "/amq/queue/" ++ Stream, + %% logic to publish a wave of messages with or without a filter value + WaveCount = 1000, + Publish = + fun(C, FilterValue) -> + lists:foldl(fun(Seq, C0) -> + Headers0 = [{"destination", StreamDestination}, + {"receipt", integer_to_list(Seq)}], + Headers = case FilterValue of + undefined -> + Headers0; + _ -> + [{"x-stream-filter-value", FilterValue}] ++ Headers0 + end, + rabbit_stomp_client:send( + C0, "SEND", Headers, ["hello"]), + {ok, C1, _, _} = stomp_receive(C0, "RECEIPT"), + C1 + end, C, lists:seq(1, WaveCount)) + end, + %% publishing messages with the "apple" filter value + Client3 = Publish(Client2, "apple"), + %% publishing messages with no filter value + Client4 = Publish(Client3, undefined), + %% publishing messages with the "orange" filter value + Client5 = Publish(Client4, "orange"), + + %% filtering on "apple" + rabbit_stomp_client:send( + Client5, "SUBSCRIBE", + [{"destination", StreamDestination}, + {"id", "0"}, + {"ack", "client"}, + {"prefetch-count", "1"}, + {"x-stream-filter", "apple"}, + {"x-stream-offset", "first"}]), + {Client6, AppleMessages} = stomp_receive_messages(Client5, Version), + %% we should get less than all the waves combined + ?assert(length(AppleMessages) < WaveCount * 3), + %% client-side filtering + AppleFilteredMessages = + lists:filter(fun(H) -> + proplists:get_value("x-stream-filter-value", H) =:= "apple" + end, AppleMessages), + %% we should have only the "apple" messages + ?assert(length(AppleFilteredMessages) =:= WaveCount), + rabbit_stomp_client:send( + Client6, "UNSUBSCRIBE", [{"destination", StreamDestination}, + {"id", "0"}, + {"receipt", "bar"}]), + {ok, Client7, _, _} = stomp_receive(Client6, "RECEIPT"), + + %% filtering on "apple" and "orange" + rabbit_stomp_client:send( + Client7, "SUBSCRIBE", + [{"destination", StreamDestination}, + {"id", "0"}, + {"ack", "client"}, + {"prefetch-count", "1"}, + {"x-stream-filter", "apple,orange"}, + {"x-stream-offset", "first"}]), + {Client8, AppleOrangeMessages} = stomp_receive_messages(Client7, Version), + %% we should get less than all the waves combined + ?assert(length(AppleOrangeMessages) < WaveCount * 3), + %% client-side filtering + AppleOrangeFilteredMessages = + lists:filter(fun(H) -> + proplists:get_value("x-stream-filter-value", H) =:= "apple" orelse + proplists:get_value("x-stream-filter-value", H) =:= "orange" + end, AppleOrangeMessages), + %% we should have only the "apple" and "orange" messages + ?assert(length(AppleOrangeFilteredMessages) =:= WaveCount * 2), + rabbit_stomp_client:send( + Client8, "UNSUBSCRIBE", [{"destination", StreamDestination}, + {"id", "0"}, + {"receipt", "bar"}]), + {ok, Client9, _, _} = stomp_receive(Client8, "RECEIPT"), + + %% filtering on "apple" and messages without a filter value + rabbit_stomp_client:send( + Client9, "SUBSCRIBE", + [{"destination", StreamDestination}, + {"id", "0"}, + {"ack", "client"}, + {"prefetch-count", "1"}, + {"x-stream-filter", "apple"}, + {"x-stream-match-unfiltered", "true"}, + {"x-stream-offset", "first"}]), + {Client10, AppleUnfilteredMessages} = stomp_receive_messages(Client9, Version), + %% we should get less than all the waves combined + ?assert(length(AppleUnfilteredMessages) < WaveCount * 3), + %% client-side filtering + AppleUnfilteredFilteredMessages = + lists:filter(fun(H) -> + proplists:get_value("x-stream-filter-value", H) =:= "apple" orelse + proplists:get_value("x-stream-filter-value", H) =:= undefined + end, AppleUnfilteredMessages), + %% we should have only the "apple" messages and messages without a filter value + ?assert(length(AppleUnfilteredFilteredMessages) =:= WaveCount * 2), + rabbit_stomp_client:send( + Client10, "UNSUBSCRIBE", [{"destination", StreamDestination}, + {"id", "0"}, + {"receipt", "bar"}]), + {ok, _, _, _} = stomp_receive(Client10, "RECEIPT"), + + Channel = ?config(amqp_channel, Config), + #'queue.delete_ok'{} = amqp_channel:call(Channel, + #'queue.delete'{queue = list_to_binary(Stream)}), + ok. + +stomp_receive_messages(Client, Version) -> + stomp_receive_messages(Client, [], Version). + +stomp_receive_messages(Client, Acc, Version) -> + try rabbit_stomp_client:recv(Client) of + {#stomp_frame{command = "MESSAGE", + headers = Headers}, Client1} -> + MsgHeader = rabbit_stomp_util:msg_header_name(Version), + AckValue = proplists:get_value(MsgHeader, Headers), + AckHeader = rabbit_stomp_util:ack_header_name(Version), + rabbit_stomp_client:send(Client1, "ACK", [{AckHeader, AckValue}]), + stomp_receive_messages(Client1, [Headers] ++ Acc, Version) + catch + error:{badmatch, {error, timeout}} -> + {Client, Acc} + end. + stomp_receive(Client, Command) -> {#stomp_frame{command = Command, headers = Hdrs, diff --git a/deps/rabbitmq_stomp/test/topic_SUITE.erl b/deps/rabbitmq_stomp/test/topic_SUITE.erl index 9a397be7b080..ed7c5790d95b 100644 --- a/deps/rabbitmq_stomp/test/topic_SUITE.erl +++ b/deps/rabbitmq_stomp/test/topic_SUITE.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(topic_SUITE). @@ -14,8 +14,6 @@ -include_lib("amqp_client/include/amqp_client.hrl"). -include("rabbit_stomp.hrl"). -include("rabbit_stomp_frame.hrl"). --include("rabbit_stomp_headers.hrl"). - all() -> [{group, list_to_atom("version_" ++ V)} || V <- ?SUPPORTED_VERSIONS]. diff --git a/deps/rabbitmq_stomp/test/util_SUITE.erl b/deps/rabbitmq_stomp/test/util_SUITE.erl index ca97486ff7a0..9e9cbe28514e 100644 --- a/deps/rabbitmq_stomp/test/util_SUITE.erl +++ b/deps/rabbitmq_stomp/test/util_SUITE.erl @@ -2,12 +2,11 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(util_SUITE). --include_lib("common_test/include/ct.hrl"). -include_lib("eunit/include/eunit.hrl"). -include_lib("amqp_client/include/amqp_client.hrl"). -include_lib("amqp_client/include/rabbit_routing_prefixes.hrl"). diff --git a/deps/rabbitmq_stream/.gitignore b/deps/rabbitmq_stream/.gitignore index 77fa2c1962be..eee53b11a899 100644 --- a/deps/rabbitmq_stream/.gitignore +++ b/deps/rabbitmq_stream/.gitignore @@ -1,58 +1,11 @@ .eunit -*.o -*.beam -*.plt -erl_crash.dump .concrete/DEV_MODE -# rebar 2.x -.rebar -rel/example_project -ebin/*.beam -deps - -# rebar 3 -.rebar3 -_build/ -_checkouts/ - -erl_crash.dump -.sw? -.*.sw? -*.beam -/.erlang.mk/ -/cover/ -/deps/ -/ebin/ -/escript/ -/logs/ -/plugins/ -/sbin/ -/xrefr -elvis -callgrind* -ct.coverdata -test/ct.cover.spec -_build - -rabbitmq_stream.d -*.plt -*.d - *.jar - -*~ -.sw? -.*.sw? -*.beam *.class *.dat *.dump *.iml *.ipr *.iws -.DS_Store -\#~ -/.idea/ -/deps/ diff --git a/deps/rabbitmq_stream/BUILD.bazel b/deps/rabbitmq_stream/BUILD.bazel index 2018bef39ceb..c6534a375081 100644 --- a/deps/rabbitmq_stream/BUILD.bazel +++ b/deps/rabbitmq_stream/BUILD.bazel @@ -83,7 +83,7 @@ plt( name = "deps_plt", for_target = ":erlang_app", ignore_warnings = True, - libs = ["//deps/rabbitmq_cli:elixir"], # keep + libs = ["@rules_elixir//elixir"], # keep plt = "//:base_plt", deps = ["//deps/rabbitmq_cli:erlang_app"], # keep ) @@ -100,7 +100,8 @@ eunit( target = ":test_erlang_app", ) -broker_for_integration_suites() +broker_for_integration_suites( +) rabbitmq_integration_suite( name = "commands_SUITE", @@ -126,6 +127,13 @@ rabbitmq_integration_suite( name = "rabbit_stream_manager_SUITE", ) +rabbitmq_integration_suite( + name = "rabbit_stream_reader_SUITE", + deps = [ + "//deps/rabbitmq_stream_common:erlang_app", + ], +) + rabbitmq_integration_suite( name = "rabbit_stream_SUITE", shard_count = 3, @@ -135,6 +143,14 @@ rabbitmq_integration_suite( ], ) +rabbitmq_integration_suite( + name = "protocol_interop_SUITE", + size = "medium", + runtime_deps = [ + "//deps/amqp10_client:erlang_app", + ], +) + assert_suites() alias( diff --git a/deps/rabbitmq_stream/Makefile b/deps/rabbitmq_stream/Makefile index e63a7ab94733..54b1237a589a 100644 --- a/deps/rabbitmq_stream/Makefile +++ b/deps/rabbitmq_stream/Makefile @@ -23,7 +23,9 @@ endef LOCAL_DEPS = ssl DEPS = rabbit rabbitmq_stream_common osiris ranch -TEST_DEPS = rabbitmq_ct_helpers rabbitmq_ct_client_helpers amqp_client +TEST_DEPS = rabbitmq_ct_helpers rabbitmq_ct_client_helpers amqp_client amqp10_client + +PLT_APPS += rabbitmqctl elixir DEP_EARLY_PLUGINS = rabbit_common/mk/rabbitmq-early-plugin.mk DEP_PLUGINS = rabbit_common/mk/rabbitmq-plugin.mk diff --git a/deps/rabbitmq_stream/README.adoc b/deps/rabbitmq_stream/README.adoc index 3da81ce0589b..a99967e7f0b3 100644 --- a/deps/rabbitmq_stream/README.adoc +++ b/deps/rabbitmq_stream/README.adoc @@ -27,4 +27,4 @@ Released under the link:LICENSE-MPL-RabbitMQ[MPL 2.0]. == Copyright -(c) 2020-2023 VMware, Inc. or its affiliates. +(c) 2007-2024 Broadcom. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. diff --git a/deps/rabbitmq_stream/app.bzl b/deps/rabbitmq_stream/app.bzl index fa4c8e801491..0f0b0b5153b3 100644 --- a/deps/rabbitmq_stream/app.bzl +++ b/deps/rabbitmq_stream/app.bzl @@ -16,6 +16,7 @@ def all_beam_files(name = "all_beam_files"): "src/Elixir.RabbitMQ.CLI.Ctl.Commands.ListStreamConsumersCommand.erl", "src/Elixir.RabbitMQ.CLI.Ctl.Commands.ListStreamGroupConsumersCommand.erl", "src/Elixir.RabbitMQ.CLI.Ctl.Commands.ListStreamPublishersCommand.erl", + "src/Elixir.RabbitMQ.CLI.Ctl.Commands.ListStreamTrackingCommand.erl", "src/rabbit_stream.erl", "src/rabbit_stream_connection_sup.erl", "src/rabbit_stream_manager.erl", @@ -55,6 +56,7 @@ def all_test_beam_files(name = "all_test_beam_files"): "src/Elixir.RabbitMQ.CLI.Ctl.Commands.ListStreamConsumersCommand.erl", "src/Elixir.RabbitMQ.CLI.Ctl.Commands.ListStreamGroupConsumersCommand.erl", "src/Elixir.RabbitMQ.CLI.Ctl.Commands.ListStreamPublishersCommand.erl", + "src/Elixir.RabbitMQ.CLI.Ctl.Commands.ListStreamTrackingCommand.erl", "src/rabbit_stream.erl", "src/rabbit_stream_connection_sup.erl", "src/rabbit_stream_manager.erl", @@ -93,6 +95,7 @@ def all_srcs(name = "all_srcs"): ) filegroup( name = "private_hdrs", + srcs = ["src/rabbit_stream_reader.hrl"], ) filegroup( name = "srcs", @@ -104,6 +107,7 @@ def all_srcs(name = "all_srcs"): "src/Elixir.RabbitMQ.CLI.Ctl.Commands.ListStreamConsumersCommand.erl", "src/Elixir.RabbitMQ.CLI.Ctl.Commands.ListStreamGroupConsumersCommand.erl", "src/Elixir.RabbitMQ.CLI.Ctl.Commands.ListStreamPublishersCommand.erl", + "src/Elixir.RabbitMQ.CLI.Ctl.Commands.ListStreamTrackingCommand.erl", "src/rabbit_stream.erl", "src/rabbit_stream_connection_sup.erl", "src/rabbit_stream_manager.erl", @@ -172,3 +176,25 @@ def test_suite_beam_files(name = "test_suite_beam_files"): erlc_opts = "//:test_erlc_opts", deps = ["//deps/rabbit_common:erlang_app"], ) + erlang_bytecode( + name = "rabbit_stream_reader_SUITE_beam_files", + testonly = True, + srcs = ["test/rabbit_stream_reader_SUITE.erl"], + outs = ["test/rabbit_stream_reader_SUITE.beam"], + hdrs = ["src/rabbit_stream_reader.hrl"], + app_name = "rabbitmq_stream", + erlc_opts = "//:test_erlc_opts", + deps = [ + "//deps/rabbit_common:erlang_app", #keep + "//deps/rabbitmq_stream_common:erlang_app", + ], + ) + erlang_bytecode( + name = "protocol_interop_SUITE_beam_files", + testonly = True, + srcs = ["test/protocol_interop_SUITE.erl"], + outs = ["test/protocol_interop_SUITE.beam"], + app_name = "rabbitmq_stream", + erlc_opts = "//:test_erlc_opts", + deps = ["//deps/amqp10_common:erlang_app", "//deps/amqp_client:erlang_app"], + ) diff --git a/deps/rabbitmq_stream/docs/PROTOCOL.adoc b/deps/rabbitmq_stream/docs/PROTOCOL.adoc index 581aad3270a9..c4f2f12f05c3 100644 --- a/deps/rabbitmq_stream/docs/PROTOCOL.adoc +++ b/deps/rabbitmq_stream/docs/PROTOCOL.adoc @@ -230,6 +230,16 @@ used to make the difference between a request (0) and a response (1). Example fo |0x001c |Yes +|<> +|Client +|0x001d +|Yes + +|<> +|Client +|0x001e +|Yes + |=== === DeclarePublisher @@ -279,6 +289,9 @@ Publish => Key Version PublisherId PublishedMessages Message => bytes ``` +1. Use version 1 if there is no filter value. +2. Use version 2 if there is a filter value. + === PublishConfirm ``` @@ -373,7 +386,7 @@ Deliver => Key Version SubscriptionId OsirisChunk Key => uint16 // 0x0008 Version => uint16 SubscriptionId => uint8 - OsirisChunk => MagicVersion NumEntries NumRecords Epoch ChunkFirstOffset ChunkCrc DataLength Messages + OsirisChunk => MagicVersion ChunkType NumEntries NumRecords Timestamp Epoch ChunkFirstOffset ChunkCrc DataLength TrailerLength BloomSize Reserved Messages MagicVersion => int8 ChunkType => int8 // 0: user, 1: tracking delta, 2: tracking snapshot NumEntries => uint16 @@ -384,8 +397,9 @@ Deliver => Key Version SubscriptionId OsirisChunk ChunkCrc => int32 DataLength => uint32 TrailerLength => uint32 - Reserved => unit32 // unused 4 bytes - Messages => [Message] // no int32 for the size for this array; the size is defined by NumEntries field above + BloomSize => uint8 // size of bloom filter data, ignored at the moment + Reserved => uint24 // 24 bits reserved for future use + Messages => [Message] // a continous collection of messages, the size of the array is defined by NumEntries Message => EntryTypeAndSize Data => bytes ``` @@ -398,7 +412,7 @@ Deliver => Key Version SubscriptionId CommittedOffset OsirisChunk Version => uint16 SubscriptionId => uint8 CommittedChunkId => uint64 - OsirisChunk => MagicVersion NumEntries NumRecords Epoch ChunkFirstOffset ChunkCrc DataLength Messages + OsirisChunk => MagicVersion ChunkType NumEntries NumRecords Timestamp Epoch ChunkFirstOffset ChunkCrc DataLength TrailerLength BloomSize Reserved Messages MagicVersion => int8 ChunkType => int8 // 0: user, 1: tracking delta, 2: tracking snapshot NumEntries => uint16 @@ -409,14 +423,15 @@ Deliver => Key Version SubscriptionId CommittedOffset OsirisChunk ChunkCrc => int32 DataLength => uint32 TrailerLength => uint32 - Reserved => unit32 // unused 4 bytes - Messages => [Message] // no int32 for the size for this array; the size is defined by NumEntries field above + BloomSize => uint8 // size of bloom filter data, ignored at the moment + Reserved => uint24 // 24 bits reserved for future use + Messages => [Message] // a continous collection of messages, the size of the array is defined by NumEntries Message => EntryTypeAndSize Data => bytes ``` -NB: See the https://github.com/rabbitmq/osiris/blob/f32df7563a036b1687c0208a3cb5f9e8f5cee937/src/osiris_log.erl#L101[Osiris project] +NB: See the https://github.com/rabbitmq/osiris/blob/12a430b11be2c2be3f26ce4f2d7268954c7ec02b/src/osiris_log.erl#L126-L195[Osiris project] for details on the structure of messages. === Credit @@ -754,6 +769,31 @@ StreamStatsResponse => Key Version CorrelationId ResponseCode Stats Value => int64 ``` +=== CreateSuperStream + +``` +CreateSuperStream => Key Version CorrelationId Name [Partition] [BindingKey] Arguments + Key => uint16 // 0x001d + Version => uint16 + CorrelationId => uint32 + Name => string + Partition => string + BindingKey => string + Arguments => [Argument] + Argument => Key Value + Key => string + Value => string +``` + +=== DeleteSuperStream + +``` +Delete => Key Version CorrelationId Name + Key => uint16 // 0x001e + Version => uint16 + CorrelationId => uint32 + Name => string +``` == Authentication @@ -798,8 +838,3 @@ The client answers with a `Tune` frame with the settings he agrees on, possibly from the server's suggestions. * Open: the client sends an `Open` frame to pick a virtual host to connect to. The server answers whether it accepts the access or not. - -== Resources - -- https://docs.google.com/presentation/d/1Hlv4qaWm2PRU04dVPmShP9wU7TEQEttXdsbV8P54Uvw/edit#slide=id.gdbeadf9676_0_37[RabbitMQ Streams client] : a general guide line to write a streams client -- https://docs.google.com/presentation/d/1BFwf01LcicZ-SyxE1CycZv2gUQMPFGdtFkVuXhgkoTE/edit#slide=id.p1[RabbitMQ Streams Internals]: how the streams work internally diff --git a/deps/rabbitmq_stream/include/rabbit_stream_metrics.hrl b/deps/rabbitmq_stream/include/rabbit_stream_metrics.hrl index 066962c4def3..ad788edd6063 100644 --- a/deps/rabbitmq_stream/include/rabbit_stream_metrics.hrl +++ b/deps/rabbitmq_stream/include/rabbit_stream_metrics.hrl @@ -1,3 +1,19 @@ +%% The contents of this file are subject to the Mozilla Public License +%% Version 2.0 (the "License"); you may not use this file except in +%% compliance with the License. You may obtain a copy of the License +%% at https://www.mozilla.org/en-US/MPL/2.0/ +%% +%% Software distributed under the License is distributed on an "AS IS" +%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See +%% the License for the specific language governing rights and +%% limitations under the License. +%% +%% The Original Code is RabbitMQ. +%% +%% The Initial Developer of the Original Code is Pivotal Software, Inc. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. +%% + -include_lib("rabbit/include/rabbit_global_counters.hrl"). -define(TABLE_CONSUMER, rabbit_stream_consumer_created). diff --git a/deps/rabbitmq_stream/priv/schema/rabbitmq_stream.schema b/deps/rabbitmq_stream/priv/schema/rabbitmq_stream.schema index 0a4fc277accb..1dfbdf259a51 100644 --- a/deps/rabbitmq_stream/priv/schema/rabbitmq_stream.schema +++ b/deps/rabbitmq_stream/priv/schema/rabbitmq_stream.schema @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2020 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% %% ========================================================================== diff --git a/deps/rabbitmq_stream/src/Elixir.RabbitMQ.CLI.Ctl.Commands.AddSuperStreamCommand.erl b/deps/rabbitmq_stream/src/Elixir.RabbitMQ.CLI.Ctl.Commands.AddSuperStreamCommand.erl index 92a80458f64c..a7d1a4f251bb 100644 --- a/deps/rabbitmq_stream/src/Elixir.RabbitMQ.CLI.Ctl.Commands.AddSuperStreamCommand.erl +++ b/deps/rabbitmq_stream/src/Elixir.RabbitMQ.CLI.Ctl.Commands.AddSuperStreamCommand.erl @@ -11,7 +11,7 @@ %% The Original Code is RabbitMQ. %% %% The Initial Developer of the Original Code is GoPivotal, Inc. -%% Copyright (c) 2021 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. -module('Elixir.RabbitMQ.CLI.Ctl.Commands.AddSuperStreamCommand'). @@ -40,6 +40,7 @@ description() -> switches() -> [{partitions, integer}, + {binding_keys, string}, {routing_keys, string}, {max_length_bytes, string}, {max_age, string}, @@ -52,9 +53,15 @@ help_section() -> validate([], _Opts) -> {validation_failure, not_enough_args}; +validate([_Name], #{routing_keys := _, binding_keys := _}) -> + {validation_failure, + "Specify --binding-keys only."}; +validate([_Name], #{partitions := _, binding_keys := _}) -> + {validation_failure, + "Specify --partitions or --binding-keys, not both."}; validate([_Name], #{partitions := _, routing_keys := _}) -> {validation_failure, - "Specify --partitions or routing-keys, not both."}; + "Specify --partitions or --binding-keys, not both."}; validate([_Name], #{partitions := Partitions}) when Partitions < 1 -> {validation_failure, "The partition number must be greater than 0"}; validate([_Name], Opts) -> @@ -128,14 +135,17 @@ validate_stream_arguments(#{initial_cluster_size := Value} = Opts) -> validate_stream_arguments(_) -> ok. -merge_defaults(_Args, #{routing_keys := _V} = Opts) -> +merge_defaults(_Args, #{binding_keys := _V} = Opts) -> {_Args, maps:merge(#{vhost => <<"/">>}, Opts)}; +merge_defaults(_Args, #{routing_keys := RKs} = Opts) -> + {_Args, maps:merge(#{vhost => <<"/">>, binding_keys => RKs}, + maps:remove(routing_keys, Opts))}; merge_defaults(_Args, Opts) -> {_Args, maps:merge(#{partitions => 3, vhost => <<"/">>}, Opts)}. usage() -> <<"add_super_stream [--vhost ] [--partition" - "s ] [--routing-keys ]">>. + "s ] [--binding-keys ]">>. usage_additional() -> [[<<"">>, @@ -144,8 +154,8 @@ usage_additional() -> <<"The virtual host the super stream is added to.">>], [<<"--partitions ">>, <<"The number of partitions, default is 3. Mutually exclusive with --routing-keys.">>], - [<<"--routing-keys ">>, - <<"Comma-separated list of routing keys. Mutually exclusive with --partitions.">>], + [<<"--binding-keys ">>, + <<"Comma-separated list of binding keys. Mutually exclusive with --partitions.">>], [<<"--max-length-bytes ">>, <<"The maximum size of partition streams, example values: 20gb, 500mb.">>], [<<"--max-age ">>, @@ -158,7 +168,7 @@ usage_additional() -> <<"The initial cluster size of partition streams.">>]]. usage_doc_guides() -> - [?STREAM_GUIDE_URL]. + [?STREAMS_GUIDE_URL]. run([SuperStream], #{node := NodeName, @@ -184,26 +194,26 @@ run([SuperStream], #{node := NodeName, vhost := VHost, timeout := Timeout, - routing_keys := RoutingKeysStr} = + binding_keys := BindingKeysStr} = Opts) -> - RoutingKeys = + BindingKeys = [rabbit_data_coercion:to_binary( string:strip(K)) || K <- string:tokens( - rabbit_data_coercion:to_list(RoutingKeysStr), ",")], + rabbit_data_coercion:to_list(BindingKeysStr), ",")], Streams = [list_to_binary(binary_to_list(SuperStream) ++ "-" ++ binary_to_list(K)) - || K <- RoutingKeys], + || K <- BindingKeys], create_super_stream(NodeName, Timeout, VHost, SuperStream, Streams, stream_arguments(Opts), - RoutingKeys). + BindingKeys). stream_arguments(Opts) -> stream_arguments(#{}, Opts). diff --git a/deps/rabbitmq_stream/src/Elixir.RabbitMQ.CLI.Ctl.Commands.DeleteSuperStreamCommand.erl b/deps/rabbitmq_stream/src/Elixir.RabbitMQ.CLI.Ctl.Commands.DeleteSuperStreamCommand.erl index e7a5e7545b4c..be046ddc0790 100644 --- a/deps/rabbitmq_stream/src/Elixir.RabbitMQ.CLI.Ctl.Commands.DeleteSuperStreamCommand.erl +++ b/deps/rabbitmq_stream/src/Elixir.RabbitMQ.CLI.Ctl.Commands.DeleteSuperStreamCommand.erl @@ -11,7 +11,7 @@ %% The Original Code is RabbitMQ. %% %% The Initial Developer of the Original Code is GoPivotal, Inc. -%% Copyright (c) 2021 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. -module('Elixir.RabbitMQ.CLI.Ctl.Commands.DeleteSuperStreamCommand'). @@ -58,7 +58,7 @@ usage_additional() -> [<<"--vhost ">>, <<"The virtual host of the super stream.">>]]. usage_doc_guides() -> - [?STREAM_GUIDE_URL]. + [?STREAMS_GUIDE_URL]. run([SuperStream], #{node := NodeName, diff --git a/deps/rabbitmq_stream/src/Elixir.RabbitMQ.CLI.Ctl.Commands.ListStreamConnectionsCommand.erl b/deps/rabbitmq_stream/src/Elixir.RabbitMQ.CLI.Ctl.Commands.ListStreamConnectionsCommand.erl index 0bfb90c71124..79eae6c0629b 100644 --- a/deps/rabbitmq_stream/src/Elixir.RabbitMQ.CLI.Ctl.Commands.ListStreamConnectionsCommand.erl +++ b/deps/rabbitmq_stream/src/Elixir.RabbitMQ.CLI.Ctl.Commands.ListStreamConnectionsCommand.erl @@ -11,7 +11,7 @@ %% The Original Code is RabbitMQ. %% %% The Initial Developer of the Original Code is GoPivotal, Inc. -%% Copyright (c) 2020-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. -module('Elixir.RabbitMQ.CLI.Ctl.Commands.ListStreamConnectionsCommand'). @@ -64,7 +64,7 @@ validate(Args, _) -> end. merge_defaults([], Opts) -> - merge_defaults([<<"conn_name">>], Opts); + merge_defaults([<<"node">>, <<"conn_name">>], Opts); merge_defaults(Args, Opts) -> {Args, maps:merge(#{verbose => false}, Opts)}. @@ -79,7 +79,7 @@ usage_additional() -> [{<<"">>, <>}]. usage_doc_guides() -> - [?STREAM_GUIDE_URL]. + [?STREAMS_GUIDE_URL]. run(Args, #{node := NodeName, diff --git a/deps/rabbitmq_stream/src/Elixir.RabbitMQ.CLI.Ctl.Commands.ListStreamConsumerGroupsCommand.erl b/deps/rabbitmq_stream/src/Elixir.RabbitMQ.CLI.Ctl.Commands.ListStreamConsumerGroupsCommand.erl index 603f9998b274..699b3db75d65 100644 --- a/deps/rabbitmq_stream/src/Elixir.RabbitMQ.CLI.Ctl.Commands.ListStreamConsumerGroupsCommand.erl +++ b/deps/rabbitmq_stream/src/Elixir.RabbitMQ.CLI.Ctl.Commands.ListStreamConsumerGroupsCommand.erl @@ -11,7 +11,7 @@ %% The Original Code is RabbitMQ. %% %% The Initial Developer of the Original Code is GoPivotal, Inc. -%% Copyright (c) 2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. -module('Elixir.RabbitMQ.CLI.Ctl.Commands.ListStreamConsumerGroupsCommand'). @@ -54,7 +54,7 @@ help_section() -> {plugin, stream}. validate(Args, _) -> - ValidKeys = lists:map(fun atom_to_list/1, ?CONSUMER_INFO_ITEMS), + ValidKeys = lists:map(fun atom_to_list/1, ?CONSUMER_GROUP_INFO_ITEMS), case 'Elixir.RabbitMQ.CLI.Ctl.InfoKeys':validate_info_keys(Args, ValidKeys) of @@ -83,7 +83,7 @@ usage_additional() -> [{<<"">>, <>}]. usage_doc_guides() -> - [?STREAM_GUIDE_URL]. + [?STREAMS_GUIDE_URL]. run(Args, #{node := NodeName, diff --git a/deps/rabbitmq_stream/src/Elixir.RabbitMQ.CLI.Ctl.Commands.ListStreamConsumersCommand.erl b/deps/rabbitmq_stream/src/Elixir.RabbitMQ.CLI.Ctl.Commands.ListStreamConsumersCommand.erl index 924ebbc6424c..155744fa5352 100644 --- a/deps/rabbitmq_stream/src/Elixir.RabbitMQ.CLI.Ctl.Commands.ListStreamConsumersCommand.erl +++ b/deps/rabbitmq_stream/src/Elixir.RabbitMQ.CLI.Ctl.Commands.ListStreamConsumersCommand.erl @@ -11,7 +11,7 @@ %% The Original Code is RabbitMQ. %% %% The Initial Developer of the Original Code is GoPivotal, Inc. -%% Copyright (c) 2021 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. -module('Elixir.RabbitMQ.CLI.Ctl.Commands.ListStreamConsumersCommand'). @@ -65,7 +65,7 @@ validate(Args, _) -> merge_defaults([], Opts) -> merge_defaults([rabbit_data_coercion:to_binary(Item) - || Item <- ?CONSUMER_INFO_ITEMS], + || Item <- ?CONSUMER_INFO_ITEMS -- [connection_pid, node]], Opts); merge_defaults(Args, Opts) -> {Args, maps:merge(#{verbose => false, vhost => <<"/">>}, Opts)}. @@ -82,7 +82,7 @@ usage_additional() -> [{<<"">>, <>}]. usage_doc_guides() -> - [?STREAM_GUIDE_URL]. + [?STREAMS_GUIDE_URL]. run(Args, #{node := NodeName, diff --git a/deps/rabbitmq_stream/src/Elixir.RabbitMQ.CLI.Ctl.Commands.ListStreamGroupConsumersCommand.erl b/deps/rabbitmq_stream/src/Elixir.RabbitMQ.CLI.Ctl.Commands.ListStreamGroupConsumersCommand.erl index c2b20444aa0c..4f3c7e299ade 100644 --- a/deps/rabbitmq_stream/src/Elixir.RabbitMQ.CLI.Ctl.Commands.ListStreamGroupConsumersCommand.erl +++ b/deps/rabbitmq_stream/src/Elixir.RabbitMQ.CLI.Ctl.Commands.ListStreamGroupConsumersCommand.erl @@ -11,7 +11,7 @@ %% The Original Code is RabbitMQ. %% %% The Initial Developer of the Original Code is GoPivotal, Inc. -%% Copyright (c) 2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. -module('Elixir.RabbitMQ.CLI.Ctl.Commands.ListStreamGroupConsumersCommand'). @@ -86,7 +86,7 @@ usage_additional() -> [{<<"">>, <>}]. usage_doc_guides() -> - [?STREAM_GUIDE_URL]. + [?STREAMS_GUIDE_URL]. run(Args, #{node := NodeName, diff --git a/deps/rabbitmq_stream/src/Elixir.RabbitMQ.CLI.Ctl.Commands.ListStreamPublishersCommand.erl b/deps/rabbitmq_stream/src/Elixir.RabbitMQ.CLI.Ctl.Commands.ListStreamPublishersCommand.erl index 052112832661..b9791c0b3769 100644 --- a/deps/rabbitmq_stream/src/Elixir.RabbitMQ.CLI.Ctl.Commands.ListStreamPublishersCommand.erl +++ b/deps/rabbitmq_stream/src/Elixir.RabbitMQ.CLI.Ctl.Commands.ListStreamPublishersCommand.erl @@ -11,7 +11,7 @@ %% The Original Code is RabbitMQ. %% %% The Initial Developer of the Original Code is GoPivotal, Inc. -%% Copyright (c) 2021 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. -module('Elixir.RabbitMQ.CLI.Ctl.Commands.ListStreamPublishersCommand'). @@ -65,7 +65,7 @@ validate(Args, _) -> merge_defaults([], Opts) -> merge_defaults([rabbit_data_coercion:to_binary(Item) - || Item <- ?PUBLISHER_INFO_ITEMS], + || Item <- ?PUBLISHER_INFO_ITEMS -- [connection_pid, node]], Opts); merge_defaults(Args, Opts) -> {Args, maps:merge(#{verbose => false, vhost => <<"/">>}, Opts)}. @@ -82,7 +82,7 @@ usage_additional() -> [{<<"">>, <>}]. usage_doc_guides() -> - [?STREAM_GUIDE_URL]. + [?STREAMS_GUIDE_URL]. run(Args, #{node := NodeName, diff --git a/deps/rabbitmq_stream/src/Elixir.RabbitMQ.CLI.Ctl.Commands.ListStreamTrackingCommand.erl b/deps/rabbitmq_stream/src/Elixir.RabbitMQ.CLI.Ctl.Commands.ListStreamTrackingCommand.erl new file mode 100644 index 000000000000..3ae3d7e19ebf --- /dev/null +++ b/deps/rabbitmq_stream/src/Elixir.RabbitMQ.CLI.Ctl.Commands.ListStreamTrackingCommand.erl @@ -0,0 +1,162 @@ +%% The contents of this file are subject to the Mozilla Public License +%% Version 2.0 (the "License"); you may not use this file except in +%% compliance with the License. You may obtain a copy of the License +%% at https://www.mozilla.org/MPL/ +%% +%% Software distributed under the License is distributed on an "AS IS" +%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See +%% the License for the specific language governing rights and +%% limitations under the License. +%% +%% The Original Code is RabbitMQ. +%% +%% The Initial Developer of the Original Code is GoPivotal, Inc. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. + +-module('Elixir.RabbitMQ.CLI.Ctl.Commands.ListStreamTrackingCommand'). + +-include_lib("rabbitmq_stream_common/include/rabbit_stream.hrl"). + +-behaviour('Elixir.RabbitMQ.CLI.CommandBehaviour'). + +-export([formatter/0, + scopes/0, + switches/0, + aliases/0, + usage/0, + usage_additional/0, + usage_doc_guides/0, + banner/2, + validate/2, + merge_defaults/2, + run/2, + output/2, + description/0, + help_section/0, + tracking_info/3]). + +formatter() -> + 'Elixir.RabbitMQ.CLI.Formatters.PrettyTable'. + +scopes() -> + [streams]. + +switches() -> + [{stream, string}, {all, boolean}, {offset, boolean}, {writer, boolean}]. + +aliases() -> + []. + +description() -> + <<"Lists tracking information for a stream">>. + +help_section() -> + {plugin, stream}. + +validate([], _Opts) -> + {validation_failure, not_enough_args}; +validate([_Stream], Opts) -> + case maps:with([all, writer, offset], Opts) of + M when map_size(M) > 1 -> + {validation_failure, + "Specify only one of --all, --offset, --writer."}; + _ -> + ok + end; +validate(_, _Opts) -> + {validation_failure, too_many_args}. + +merge_defaults(Args, Opts) -> + case maps:with([all, writer, offset], Opts) of + M when map_size(M) =:= 0 -> + {Args, maps:merge(#{all => true, vhost => <<"/">>}, Opts)}; + _ -> + {Args, maps:merge(#{vhost => <<"/">>}, Opts)} + end. + +usage() -> + <<"list_stream_tracking [--all | --offset | --writer] " + "[--vhost ]">>. + +usage_additional() -> + [[<<"">>, + <<"The name of the stream.">>], + [<<"--all">>, + <<"List offset and writer tracking information.">>], + [<<"--offset">>, + <<"List only offset tracking information.">>], + [<<"--writer">>, + <<"List only writer deduplication tracking information.">>], + [<<"--vhost ">>, + <<"The virtual host of the stream.">>]]. + +usage_doc_guides() -> + [?STREAMS_GUIDE_URL]. + +run([Stream], + #{node := NodeName, + vhost := VHost, + timeout := Timeout} = Opts) -> + + TrackingType = case Opts of + #{all := true} -> + all; + #{offset := true} -> + offset; + #{writer := true} -> + writer + end, + case rabbit_misc:rpc_call(NodeName, + ?MODULE, + tracking_info, + [VHost, Stream, TrackingType], + Timeout) of + {error, not_found} -> + {error, "The stream does not exist."}; + {error, not_available} -> + {error, "The stream is not available."}; + {error, _} = E -> + E; + R -> + R + end. + +banner([Stream], _) -> + <<"Listing tracking information for stream ", Stream/binary, <<" ...">>/binary>>. + +output({ok, []}, _Opts) -> + ok; +output([], _Opts) -> + ok; +output(Result, _Opts) -> + 'Elixir.RabbitMQ.CLI.DefaultOutput':output(Result). + +tracking_info(VHost, Stream, TrackingType) -> + case rabbit_stream_manager:lookup_leader(VHost, Stream) of + {ok, Leader} -> + TrackingInfo = osiris:read_tracking(Leader), + FieldsLabels = case TrackingType of + all -> + [{offsets, offset}, {sequences, writer}]; + offset -> + [{offsets, offset}]; + writer -> + [{sequences, writer}] + end, + lists:foldl(fun({F, L}, Acc) -> + Tracking = maps:get(F, TrackingInfo, #{}), + maps:fold(fun(Reference, {_, Sequence}, AccType) -> + [[{type, L}, + {name, Reference}, + {tracking_value, Sequence} + ] | AccType]; + (Reference, Offset, AccType) -> + [[{type, L}, + {name, Reference}, + {tracking_value, Offset} + ] | AccType] + end, Acc, Tracking) + end, [], FieldsLabels); + {error, _} = E -> + E + end. diff --git a/deps/rabbitmq_stream/src/rabbit_stream.erl b/deps/rabbitmq_stream/src/rabbit_stream.erl index 40c612bc8d83..3b20b8dcf519 100644 --- a/deps/rabbitmq_stream/src/rabbit_stream.erl +++ b/deps/rabbitmq_stream/src/rabbit_stream.erl @@ -11,7 +11,7 @@ %% The Original Code is RabbitMQ. %% %% The Initial Developer of the Original Code is Pivotal Software, Inc. -%% Copyright (c) 2020-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_stream). @@ -87,15 +87,19 @@ port() -> end. port_from_listener() -> - Listeners = rabbit_networking:node_listeners(node()), - Port = + try + Listeners = rabbit_networking:node_listeners(node()), lists:foldl(fun (#listener{port = Port, protocol = stream}, _Acc) -> Port; (_, Acc) -> Acc end, - undefined, Listeners), - Port. + undefined, Listeners) + catch error:Reason -> + %% can happen if a remote node calls and the current has not fully started yet + rabbit_log:info("Error while retrieving stream plugin port: ~tp", [Reason]), + {error, Reason} + end. tls_port() -> case application:get_env(rabbitmq_stream, advertised_tls_port, @@ -108,16 +112,20 @@ tls_port() -> end. tls_port_from_listener() -> - Listeners = rabbit_networking:node_listeners(node()), - Port = + try + Listeners = rabbit_networking:node_listeners(node()), lists:foldl(fun (#listener{port = Port, protocol = 'stream/ssl'}, _Acc) -> Port; (_, Acc) -> Acc end, - undefined, Listeners), - Port. + undefined, Listeners) + catch error:Reason -> + %% can happen if a remote node calls and the current has not fully started yet + rabbit_log:info("Error while retrieving stream plugin port: ~tp", [Reason]), + {error, Reason} + end. stop(_State) -> ok. diff --git a/deps/rabbitmq_stream/src/rabbit_stream_connection_sup.erl b/deps/rabbitmq_stream/src/rabbit_stream_connection_sup.erl index 9b18a5f51416..6ce36c5d3167 100644 --- a/deps/rabbitmq_stream/src/rabbit_stream_connection_sup.erl +++ b/deps/rabbitmq_stream/src/rabbit_stream_connection_sup.erl @@ -11,7 +11,7 @@ %% The Original Code is RabbitMQ. %% %% The Initial Developer of the Original Code is Pivotal Software, Inc. -%% Copyright (c) 2020-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_stream_connection_sup). diff --git a/deps/rabbitmq_stream/src/rabbit_stream_manager.erl b/deps/rabbitmq_stream/src/rabbit_stream_manager.erl index f1d36b2033ff..51257fe64a90 100644 --- a/deps/rabbitmq_stream/src/rabbit_stream_manager.erl +++ b/deps/rabbitmq_stream/src/rabbit_stream_manager.erl @@ -11,11 +11,13 @@ %% The Original Code is RabbitMQ. %% %% The Initial Developer of the Original Code is Pivotal Software, Inc. -%% Copyright (c) 2020-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_stream_manager). +-feature(maybe_expr, enable). + -behaviour(gen_server). -include_lib("rabbit_common/include/rabbit_framing.hrl"). @@ -73,7 +75,7 @@ create_super_stream(VirtualHost, Name, Partitions, Arguments, - RoutingKeys, + BindingKeys, Username) -> gen_server:call(?MODULE, {create_super_stream, @@ -81,7 +83,7 @@ create_super_stream(VirtualHost, Name, Partitions, Arguments, - RoutingKeys, + BindingKeys, Username}). -spec delete_super_stream(binary(), binary(), binary()) -> @@ -226,10 +228,10 @@ handle_call({create_super_stream, Name, Partitions, Arguments, - RoutingKeys, + BindingKeys, Username}, _From, State) -> - case validate_super_stream_creation(VirtualHost, Name, Partitions) of + case validate_super_stream_creation(VirtualHost, Name, Partitions, BindingKeys) of {error, Reason} -> {reply, {error, Reason}, State}; ok -> @@ -273,7 +275,7 @@ handle_call({create_super_stream, add_super_stream_bindings(VirtualHost, Name, Partitions, - RoutingKeys, + BindingKeys, Username), case BindingsResult of ok -> @@ -416,8 +418,10 @@ handle_call({topology, VirtualHost, Stream}, _From, State) -> #{leader_node => undefined, replica_nodes => []}, Members)}; - _ -> - {error, not_available} + Err -> + rabbit_log:info("Error locating ~tp stream members: ~tp", + [StreamName, Err]), + {error, stream_not_available} end; {error, not_found} -> {error, stream_not_found}; @@ -431,9 +435,9 @@ handle_call({route, RoutingKey, VirtualHost, SuperStream}, _From, Res = try Exchange = rabbit_exchange:lookup_or_die(ExchangeName), Content = #content{properties = #'P_basic'{}}, - DummyMsg = mc_amqpl:message(ExchangeName, - RoutingKey, - Content), + {ok, DummyMsg} = mc_amqpl:message(ExchangeName, + RoutingKey, + Content), case rabbit_exchange:route(Exchange, DummyMsg) of [] -> {ok, no_route}; @@ -445,8 +449,8 @@ handle_call({route, RoutingKey, VirtualHost, SuperStream}, _From, end catch exit:Error -> - rabbit_log:error("Error while looking up exchange ~tp, ~tp", - [rabbit_misc:rs(ExchangeName), Error]), + rabbit_log:warning("Error while looking up exchange ~tp, ~tp", + [rabbit_misc:rs(ExchangeName), Error]), {error, stream_not_found} end, {reply, Res, State}; @@ -518,85 +522,96 @@ handle_info(Info, State) -> {noreply, State}. create_stream(VirtualHost, Reference, Arguments, Username) -> - Name = - #resource{virtual_host = VirtualHost, - kind = queue, - name = Reference}, StreamQueueArguments = stream_queue_arguments(Arguments), - case validate_stream_queue_arguments(StreamQueueArguments) of - ok -> - Q0 = amqqueue:new(Name, - none, - true, - false, - none, - StreamQueueArguments, - VirtualHost, - #{user => Username}, - rabbit_stream_queue), - try - QueueLookup = - rabbit_amqqueue:with(Name, - fun(Q) -> - ok = - rabbit_amqqueue:assert_equivalence(Q, - true, - false, - StreamQueueArguments, - none) - end), - - case QueueLookup of - ok -> - {error, reference_already_exists}; - {error, not_found} -> - try - case rabbit_queue_type:declare(Q0, node()) of - {new, Q} -> - {ok, amqqueue:get_type_state(Q)}; - {existing, _} -> - {error, reference_already_exists}; - {error, Err} -> - rabbit_log:warning("Error while creating ~tp stream, ~tp", - [Reference, Err]), - {error, internal_error}; - {protocol_error, - precondition_failed, - Msg, - Args} -> - rabbit_log:warning("Error while creating ~tp stream, " - ++ Msg, - [Reference] ++ Args), - {error, validation_failed} - end - catch - exit:Error -> - rabbit_log:error("Error while creating ~tp stream, ~tp", - [Reference, Error]), - {error, internal_error} - end; - {error, {absent, _, Reason}} -> - rabbit_log:error("Error while creating ~tp stream, ~tp", - [Reference, Reason]), - {error, internal_error} - end - catch - exit:ExitError -> - case ExitError of - % likely a problem of inequivalent args on an existing stream - {amqp_error, precondition_failed, M, _} -> - rabbit_log:info("Error while creating ~tp stream, " - ++ M, - [Reference]), - {error, validation_failed}; - E -> + maybe + ok ?= validate_stream_queue_arguments(StreamQueueArguments), + do_create_stream(VirtualHost, Reference, StreamQueueArguments, Username) + else + error -> + {error, validation_failed}; + {error, _} = Err -> + Err + end. + +do_create_stream(VirtualHost, Reference, StreamQueueArguments, Username) -> + Name = #resource{virtual_host = VirtualHost, + kind = queue, + name = Reference}, + Q0 = amqqueue:new(Name, + none, + true, + false, + none, + StreamQueueArguments, + VirtualHost, + #{user => Username}, + rabbit_stream_queue), + try + QueueLookup = + rabbit_amqqueue:with(Name, + fun(Q) -> + ok = + rabbit_amqqueue:assert_equivalence(Q, + true, + false, + StreamQueueArguments, + none) + end), + + case QueueLookup of + ok -> + {error, reference_already_exists}; + {error, not_found} -> + try + case rabbit_queue_type:declare(Q0, node()) of + {new, Q} -> + {ok, amqqueue:get_type_state(Q)}; + {existing, _} -> + {error, reference_already_exists}; + {error, Err} -> rabbit_log:warning("Error while creating ~tp stream, ~tp", - [Reference, E]), + [Reference, Err]), + {error, internal_error}; + {error, + queue_limit_exceeded, Reason, ReasonArg} -> + rabbit_log:warning("Cannot declare stream ~tp because, " + ++ Reason, + [Reference] ++ ReasonArg), + {error, validation_failed}; + {protocol_error, + precondition_failed, + Msg, + Args} -> + rabbit_log:warning("Error while creating ~tp stream, " + ++ Msg, + [Reference] ++ Args), {error, validation_failed} end - end; - error -> - {error, validation_failed} + catch + exit:Error -> + rabbit_log:error("Error while creating ~tp stream, ~tp", + [Reference, Error]), + {error, internal_error} + end; + {error, {absent, _, Reason}} -> + rabbit_log:error("Error while creating ~tp stream, ~tp", + [Reference, Reason]), + {error, internal_error} + end + catch + exit:ExitError -> + case ExitError of + % likely a problem of inequivalent args on an existing stream + {amqp_error, precondition_failed, M, _} -> + rabbit_log:info("Error while creating ~tp stream, " + ++ M, + [Reference]), + {error, validation_failed}; + E -> + rabbit_log:warning("Error while creating ~tp stream, ~tp", + [Reference, E]), + {error, validation_failed} + end end. delete_stream(VirtualHost, Reference, Username) -> @@ -655,25 +670,56 @@ super_stream_partitions(VirtualHost, SuperStream) -> {error, stream_not_found} end. -validate_super_stream_creation(VirtualHost, Name, Partitions) -> - case exchange_exists(VirtualHost, Name) of - {error, validation_failed} -> - {error, - {validation_failed, - rabbit_misc:format("~ts is not a correct name for a super stream", - [Name])}}; - {ok, true} -> - {error, - {reference_already_exists, - rabbit_misc:format("there is already an exchange named ~ts", - [Name])}}; - {ok, false} -> - case check_already_existing_queue(VirtualHost, Partitions) of - {error, Reason} -> - {error, Reason}; - ok -> - ok - end +validate_super_stream_creation(_VirtualHost, _Name, Partitions, BindingKeys) + when length(Partitions) =/= length(BindingKeys) -> + {error, {validation_failed, "There must be the same number of partitions and binding keys"}}; +validate_super_stream_creation(VirtualHost, Name, Partitions, _BindingKeys) -> + maybe + ok ?= validate_super_stream_partitions(Partitions), + ok ?= case rabbit_vhost_limit:would_exceed_queue_limit(length(Partitions), VirtualHost) of + false -> + ok; + {true, Limit, _} -> + {error, {validation_failed, + rabbit_misc:format("Cannot declare super stream ~tp with ~tp partition(s) " + "because queue limit ~tp in vhost '~tp' is reached", + [Name, length(Partitions), Limit, VirtualHost])}} + end, + ok ?= case exchange_exists(VirtualHost, Name) of + {error, validation_failed} -> + {error, + {validation_failed, + rabbit_misc:format("~ts is not a correct name for a super stream", + [Name])}}; + {ok, true} -> + {error, + {reference_already_exists, + rabbit_misc:format("there is already an exchange named ~ts", + [Name])}}; + {ok, false} -> + ok + end, + ok ?= check_already_existing_queue(VirtualHost, Partitions) + end. + +validate_super_stream_partitions(Partitions) -> + case erlang:length(Partitions) == sets:size(sets:from_list(Partitions)) of + true -> + case lists:dropwhile(fun(Partition) -> + case rabbit_stream_utils:enforce_correct_name(Partition) of + {ok, _} -> true; + _ -> false + end + end, Partitions) of + [] -> + ok; + InvalidPartitions -> {error, {validation_failed, + {rabbit_misc:format("~ts is not a correct partition names", + [InvalidPartitions])}}} + end; + _ -> {error, {validation_failed, + {rabbit_misc:format("Duplicate partition names found ~ts", + [Partitions])}}} end. exchange_exists(VirtualHost, Name) -> @@ -723,33 +769,38 @@ declare_super_stream_exchange(VirtualHost, Name, Username) -> true), CheckedType = rabbit_exchange:check_type(<<"direct">>), ExchangeName = rabbit_misc:r(VirtualHost, exchange, CorrectName), - X = case rabbit_exchange:lookup(ExchangeName) of - {ok, FoundX} -> - FoundX; - {error, not_found} -> - rabbit_exchange:declare(ExchangeName, - CheckedType, - true, - false, - false, - Args, - Username) - end, - try - ok = - rabbit_exchange:assert_equivalence(X, - CheckedType, - true, - false, - false, - Args) - catch - exit:ExitError -> - % likely to be a problem of inequivalent args on an existing stream - rabbit_log:error("Error while creating ~tp super stream exchange: " - "~tp", - [Name, ExitError]), - {error, validation_failed} + XResult = case rabbit_exchange:lookup(ExchangeName) of + {ok, FoundX} -> + {ok, FoundX}; + {error, not_found} -> + rabbit_exchange:declare(ExchangeName, + CheckedType, + true, + false, + false, + Args, + Username) + end, + case XResult of + {ok, X} -> + try + ok = + rabbit_exchange:assert_equivalence(X, + CheckedType, + true, + false, + false, + Args) + catch + exit:ExitError -> + % likely to be a problem of inequivalent args on an existing stream + rabbit_log:error("Error while creating ~tp super stream exchange: " + "~tp", + [Name, ExitError]), + {error, validation_failed} + end; + {error, timeout} = Err -> + Err end; error -> {error, validation_failed} @@ -758,15 +809,15 @@ declare_super_stream_exchange(VirtualHost, Name, Username) -> add_super_stream_bindings(VirtualHost, Name, Partitions, - RoutingKeys, + BindingKeys, Username) -> - PartitionsRoutingKeys = lists:zip(Partitions, RoutingKeys), + PartitionsBindingKeys = lists:zip(Partitions, BindingKeys), BindingsResult = - lists:foldl(fun ({Partition, RoutingKey}, {ok, Order}) -> + lists:foldl(fun ({Partition, BindingKey}, {ok, Order}) -> case add_super_stream_binding(VirtualHost, Name, Partition, - RoutingKey, + BindingKey, Order, Username) of @@ -778,7 +829,7 @@ add_super_stream_bindings(VirtualHost, (_, {{error, _Reason}, _Order} = Acc) -> Acc end, - {ok, 0}, PartitionsRoutingKeys), + {ok, 0}, PartitionsBindingKeys), case BindingsResult of {ok, _} -> ok; @@ -789,7 +840,7 @@ add_super_stream_bindings(VirtualHost, add_super_stream_binding(VirtualHost, SuperStream, Partition, - RoutingKey, + BindingKey, Order, Username) -> {ok, ExchangeNameBin} = @@ -806,7 +857,7 @@ add_super_stream_binding(VirtualHost, Order), case rabbit_binding:add(#binding{source = ExchangeName, destination = QueueName, - key = RoutingKey, + key = BindingKey, args = Arguments}, fun (_X, Q) when ?is_amqqueue(Q) -> try @@ -833,6 +884,8 @@ add_super_stream_binding(VirtualHost, {error, {binding_invalid, rabbit_misc:format(Fmt, Args)}}; {error, #amqp_error{} = Error} -> {error, {internal_error, rabbit_misc:format("~tp", [Error])}}; + {error, timeout} -> + {error, {internal_error, "failed to add binding due to a timeout"}}; ok -> ok end. @@ -841,11 +894,12 @@ delete_super_stream_exchange(VirtualHost, Name, Username) -> case rabbit_stream_utils:enforce_correct_name(Name) of {ok, CorrectName} -> ExchangeName = rabbit_misc:r(VirtualHost, exchange, CorrectName), - case rabbit_exchange:delete(ExchangeName, false, Username) of - {error, not_found} -> - ok; + case rabbit_exchange:ensure_deleted( + ExchangeName, false, Username) of ok -> - ok + ok; + {error, timeout} = Err -> + Err end; error -> {error, validation_failed} diff --git a/deps/rabbitmq_stream/src/rabbit_stream_metrics.erl b/deps/rabbitmq_stream/src/rabbit_stream_metrics.erl index 73dd1535d01a..c52d2353bb3b 100644 --- a/deps/rabbitmq_stream/src/rabbit_stream_metrics.erl +++ b/deps/rabbitmq_stream/src/rabbit_stream_metrics.erl @@ -11,7 +11,7 @@ %% The Original Code is RabbitMQ. %% %% The Initial Developer of the Original Code is Pivotal Software, Inc. -%% Copyright (c) 2020-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_stream_metrics). diff --git a/deps/rabbitmq_stream/src/rabbit_stream_metrics_gc.erl b/deps/rabbitmq_stream/src/rabbit_stream_metrics_gc.erl index e5f688b3e8ab..e36d735f4a59 100644 --- a/deps/rabbitmq_stream/src/rabbit_stream_metrics_gc.erl +++ b/deps/rabbitmq_stream/src/rabbit_stream_metrics_gc.erl @@ -11,7 +11,7 @@ %% The Original Code is RabbitMQ. %% %% The Initial Developer of the Original Code is Pivotal Software, Inc. -%% Copyright (c) 2020-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_stream_metrics_gc). diff --git a/deps/rabbitmq_stream/src/rabbit_stream_reader.erl b/deps/rabbitmq_stream/src/rabbit_stream_reader.erl index bb53e5c70efe..d736b35212fd 100644 --- a/deps/rabbitmq_stream/src/rabbit_stream_reader.erl +++ b/deps/rabbitmq_stream/src/rabbit_stream_reader.erl @@ -9,100 +9,21 @@ %% The Original Code is RabbitMQ. %% %% The Initial Developer of the Original Code is Pivotal Software, Inc. -%% Copyright (c) 2020-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2020-2024 Broadcom. All Rights Reserved. +%% The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_stream_reader). --behaviour(gen_statem). +-feature(maybe_expr, enable). --include_lib("rabbit_common/include/rabbit.hrl"). --include_lib("rabbitmq_stream_common/include/rabbit_stream.hrl"). +-behaviour(gen_statem). +-include("rabbit_stream_reader.hrl"). -include("rabbit_stream_metrics.hrl"). --type stream() :: binary(). --type publisher_id() :: byte(). --type publisher_reference() :: binary(). --type subscription_id() :: byte(). - --record(publisher, - {publisher_id :: publisher_id(), - stream :: stream(), - reference :: undefined | publisher_reference(), - leader :: pid(), - message_counters :: atomics:atomics_ref()}). --record(consumer_configuration, - {socket :: rabbit_net:socket(), %% ranch_transport:socket(), - member_pid :: pid(), - subscription_id :: subscription_id(), - stream :: stream(), - offset :: osiris:offset(), - counters :: atomics:atomics_ref(), - properties :: map(), - active :: boolean()}). --record(consumer, - {configuration :: #consumer_configuration{}, - credit :: non_neg_integer(), - send_limit :: non_neg_integer(), - log :: undefined | osiris_log:state(), - last_listener_offset = undefined :: undefined | osiris:offset()}). --record(request, - {start :: integer(), - content :: term()}). --record(stream_connection_state, - {data :: rabbit_stream_core:state(), blocked :: boolean(), - consumers :: #{subscription_id() => #consumer{}}}). --record(stream_connection, - {name :: binary(), - %% server host - host, - %% client host - peer_host, - %% server port - port, - %% client port - peer_port, - auth_mechanism, - authentication_state :: any(), - connected_at :: integer(), - helper_sup :: pid(), - socket :: rabbit_net:socket(), - publishers :: - #{publisher_id() => - #publisher{}}, %% FIXME replace with a list (0-255 lookup faster?) - publisher_to_ids :: - #{{stream(), publisher_reference()} => publisher_id()}, - stream_leaders :: #{stream() => pid()}, - stream_subscriptions :: #{stream() => [subscription_id()]}, - credits :: atomics:atomics_ref(), - user :: undefined | #user{}, - virtual_host :: undefined | binary(), - connection_step :: - atom(), % tcp_connected, peer_properties_exchanged, authenticating, authenticated, tuning, tuned, opened, failure, closing, closing_done - frame_max :: integer(), - heartbeat :: undefined | integer(), - heartbeater :: any(), - client_properties = #{} :: #{binary() => binary()}, - monitors = #{} :: #{reference() => stream()}, - stats_timer :: undefined | rabbit_event:state(), - resource_alarm :: boolean(), - send_file_oct :: - atomics:atomics_ref(), % number of bytes sent with send_file (for metrics) - transport :: tcp | ssl, - proxy_socket :: undefined | ranch_transport:socket(), - correlation_id_sequence :: integer(), - outstanding_requests :: #{integer() => #request{}}, - deliver_version :: rabbit_stream_core:command_version(), - request_timeout :: pos_integer(), - outstanding_requests_timer :: undefined | erlang:reference(), - filtering_supported :: boolean()}). --record(configuration, - {initial_credits :: integer(), - credits_required_for_unblocking :: integer(), - frame_max :: integer(), - heartbeat :: integer(), - connection_negotiation_step_timeout :: integer()}). +-include_lib("rabbitmq_stream_common/include/rabbit_stream.hrl"). + -record(statem_data, {transport :: module(), connection :: #stream_connection{}, @@ -158,6 +79,7 @@ peer_cert_subject, peer_cert_validity]). -define(UNKNOWN_FIELD, unknown_field). +-define(SILENT_CLOSE_DELAY, 3_000). %% client API -export([start_link/4, @@ -178,10 +100,10 @@ tuned/3, open/3, close_sent/3]). - - %% not called by gen_statem since gen_statem:enter_loop/4 is used - - %% states +-ifdef(TEST). +-export([ensure_token_expiry_timer/2, + evaluate_state_after_secret_update/4]). +-endif. callback_mode() -> [state_functions, state_enter]. @@ -214,10 +136,13 @@ init([KeepaliveSup, heartbeat := Heartbeat, transport := ConnTransport}]) -> process_flag(trap_exit, true), - {ok, Sock} = - rabbit_networking:handshake(Ref, - application:get_env(rabbitmq_stream, - proxy_protocol, false)), + ProxyProtocolEnabled = + application:get_env(rabbitmq_stream, proxy_protocol, false), + %% Note: + %% This function could return an error if the handshake times out. + %% It is less likely to happen here as compared to MQTT, so + %% crashing with a `badmatch` seems appropriate. + {ok, Sock} = rabbit_networking:handshake(Ref, ProxyProtocolEnabled), RealSocket = rabbit_net:unwrap_socket(Sock), case rabbit_net:connection_string(Sock, inbound) of {ok, ConnStr} -> @@ -258,8 +183,7 @@ init([KeepaliveSup, correlation_id_sequence = 0, outstanding_requests = #{}, request_timeout = RequestTimeout, - deliver_version = DeliverVersion, - filtering_supported = rabbit_stream_utils:filtering_supported()}, + deliver_version = DeliverVersion}, State = #stream_connection_state{consumers = #{}, blocked = false, @@ -625,9 +549,6 @@ increase_messages_confirmed(Counters, Count) -> rabbit_global_counters:messages_confirmed(stream, Count), atomics:add(Counters, 2, Count). -increase_messages_errored(Counters, Count) -> - atomics:add(Counters, 3, Count). - messages_consumed(Counters) -> atomics:get(Counters, 1). @@ -790,19 +711,6 @@ open(info, {OK, S, Data}, StatemData#statem_data{connection = Connection1, connection_state = State2}} end; -open(info, - {sac, {{subscription_id, SubId}, - {active, Active}, {extra, Extra}}}, - State) -> - Msg0 = #{subscription_id => SubId, - active => Active}, - Msg1 = case Extra of - [{stepping_down, true}] -> - Msg0#{stepping_down => true}; - _ -> - Msg0 - end, - open(info, {sac, Msg1}, State); open(info, {sac, #{subscription_id := SubId, active := Active} = Msg}, @@ -912,10 +820,10 @@ open(info, {'DOWN', MonitorRef, process, _OsirisPid, _Reason}, StatemData) -> {Connection1, State1} = case Monitors of - #{MonitorRef := Stream} -> + #{MonitorRef := {MemberPid, Stream}} -> Monitors1 = maps:remove(MonitorRef, Monitors), C = Connection#stream_connection{monitors = Monitors1}, - case clean_state_after_stream_deletion_or_failure(Stream, C, + case clean_state_after_stream_deletion_or_failure(MemberPid, Stream, C, State) of {cleaned, NewConnection, NewState} -> @@ -997,6 +905,11 @@ open(info, check_outstanding_requests, ), {keep_state, StatemData#statem_data{connection = Connection1}} end; +open(info, token_expired, #statem_data{connection = Connection}) -> + _ = demonitor_all_streams(Connection), + rabbit_log_connection:info("Forcing stream connection ~tp closing because token expired", + [self()]), + {stop, {shutdown, <<"Token expired">>}}; open(info, {shutdown, Explanation} = Reason, #statem_data{connection = Connection}) -> %% rabbitmq_management or rabbitmq_stream_management plugin @@ -1038,16 +951,17 @@ open(cast, config = Configuration} = StatemData) -> ByPublisher = - lists:foldr(fun({PublisherId, PublishingId}, Acc) -> - case maps:is_key(PublisherId, Publishers) of - true -> + lists:foldr(fun({PublisherId, InternalId, PublishingId}, Acc) -> + case Publishers of + #{PublisherId := #publisher{internal_id = InternalId}} -> case maps:get(PublisherId, Acc, undefined) of undefined -> Acc#{PublisherId => [PublishingId]}; Ids -> Acc#{PublisherId => [PublishingId | Ids]} end; - false -> Acc + _ -> + Acc end end, #{}, CorrelationList), @@ -1412,6 +1326,7 @@ handle_frame_pre_auth(Transport, stream), auth_fail(Username, Msg, Args, C1, State), rabbit_log_connection:warning(Msg, Args), + silent_close_delay(), {C1#stream_connection{connection_step = failure}, {sasl_authenticate, ?RESPONSE_AUTHENTICATION_FAILURE, <<>>}}; @@ -1461,8 +1376,7 @@ handle_frame_pre_auth(Transport, [Username]), {C1#stream_connection{connection_step = failure}, - {sasl_authenticate, - ?RESPONSE_SASL_AUTHENTICATION_FAILURE_LOOPBACK, + {sasl_authenticate, ?RESPONSE_SASL_AUTHENTICATION_FAILURE_LOOPBACK, <<>>}} end end, @@ -1571,16 +1485,20 @@ handle_frame_pre_auth(Transport, send(Transport, S, Frame), %% FIXME check if vhost is alive (see rabbit_reader:is_vhost_alive/2) - Connection#stream_connection{connection_step = opened, - virtual_host = VirtualHost} - catch - exit:_ -> - F = rabbit_stream_core:frame({response, CorrelationId, - {open, - ?RESPONSE_VHOST_ACCESS_FAILURE, - #{}}}), - send(Transport, S, F), - Connection#stream_connection{connection_step = failure} + + {_, Conn} = ensure_token_expiry_timer(User, + Connection#stream_connection{connection_step = opened, + virtual_host = VirtualHost}), + Conn + catch exit:#amqp_error{explanation = Explanation} -> + rabbit_log:warning("Opening connection failed: ~ts", [Explanation]), + silent_close_delay(), + F = rabbit_stream_core:frame({response, CorrelationId, + {open, + ?RESPONSE_VHOST_ACCESS_FAILURE, + #{}}}), + send(Transport, S, F), + Connection#stream_connection{connection_step = failure} end, {Connection1, State}; @@ -1643,6 +1561,103 @@ handle_frame_post_auth(Transport, rabbit_global_counters:increase_protocol_counter(stream, ?PRECONDITION_FAILED, 1), {Connection0, State}; + +handle_frame_post_auth(Transport, + #stream_connection{user = #user{username = Username} = _User, + socket = Socket, + host = Host, + auth_mechanism = Auth_Mechanism, + authentication_state = AuthState, + resource_alarm = false} = C1, + S1, + {request, CorrelationId, + {sasl_authenticate, NewMechanism, NewSaslBin}}) -> + rabbit_log:debug("Open frame received sasl_authenticate for username '~ts'", [Username]), + + {Connection1, State1} = + case Auth_Mechanism of + {NewMechanism, AuthMechanism} -> %% Mechanism is the same used during the pre-auth phase + {C2, CmdBody} = + case AuthMechanism:handle_response(NewSaslBin, AuthState) of + {refused, NewUsername, Msg, Args} -> + rabbit_core_metrics:auth_attempt_failed(Host, + NewUsername, + stream), + auth_fail(NewUsername, Msg, Args, C1, S1), + rabbit_log_connection:warning(Msg, Args), + {C1#stream_connection{connection_step = failure}, + {sasl_authenticate, + ?RESPONSE_AUTHENTICATION_FAILURE, <<>>}}; + {protocol_error, Msg, Args} -> + rabbit_core_metrics:auth_attempt_failed(Host, + <<>>, + stream), + notify_auth_result(none, + user_authentication_failure, + [{error, + rabbit_misc:format(Msg, + Args)}], + C1, + S1), + rabbit_log_connection:warning(Msg, Args), + {C1#stream_connection{connection_step = failure}, + {sasl_authenticate, ?RESPONSE_SASL_ERROR, <<>>}}; + {challenge, Challenge, AuthState1} -> + {C1#stream_connection{authentication_state = AuthState1, + connection_step = authenticating}, + {sasl_authenticate, ?RESPONSE_SASL_CHALLENGE, + Challenge}}; + {ok, NewUser = #user{username = NewUsername}} -> + case NewUsername of + Username -> + rabbit_core_metrics:auth_attempt_succeeded(Host, + Username, + stream), + notify_auth_result(Username, + user_authentication_success, + [], + C1, + S1), + rabbit_log:debug("Successfully updated secret for username '~ts'", [Username]), + {C1#stream_connection{user = NewUser, + authentication_state = done, + connection_step = authenticated}, + {sasl_authenticate, ?RESPONSE_CODE_OK, + <<>>}}; + _ -> + rabbit_core_metrics:auth_attempt_failed(Host, + Username, + stream), + rabbit_log_connection:warning("Not allowed to change username '~ts'. Only password", + [Username]), + {C1#stream_connection{connection_step = + failure}, + {sasl_authenticate, + ?RESPONSE_SASL_CANNOT_CHANGE_USERNAME, + <<>>}} + end + end, + Frame = + rabbit_stream_core:frame({response, CorrelationId, + CmdBody}), + send(Transport, Socket, Frame), + case CmdBody of + {sasl_authenticate, ?RESPONSE_CODE_OK, _} -> + #stream_connection{user = NewUsr} = C2, + evaluate_state_after_secret_update(Transport, NewUsr, C2, S1); + _ -> + {C2, S1} + end; + {OtherMechanism, _} -> + rabbit_log_connection:warning("User '~ts' cannot change initial auth mechanism '~ts' for '~ts'", + [Username, NewMechanism, OtherMechanism]), + CmdBody = + {sasl_authenticate, ?RESPONSE_SASL_CANNOT_CHANGE_MECHANISM, <<>>}, + Frame = rabbit_stream_core:frame({response, CorrelationId, CmdBody}), + send(Transport, Socket, Frame), + {C1#stream_connection{connection_step = failure}, S1} + end, + {Connection1, State1}; handle_frame_post_auth(Transport, #stream_connection{user = User, publishers = Publishers0, @@ -1654,7 +1669,7 @@ handle_frame_post_auth(Transport, {declare_publisher, PublisherId, WriterRef, Stream}}) -> case rabbit_stream_utils:check_write_permitted(stream_r(Stream, Connection0), - User, #{}) + User) of ok -> case {maps:is_key(PublisherId, Publishers0), @@ -1684,7 +1699,8 @@ handle_frame_post_auth(Transport, {Connection0, State}; {ClusterLeader, #stream_connection{publishers = Publishers0, - publisher_to_ids = RefIds0} = + publisher_to_ids = RefIds0, + internal_sequence = InternalSequence} = Connection1} -> {PublisherReference, RefIds1} = case WriterRef of @@ -1702,7 +1718,8 @@ handle_frame_post_auth(Transport, leader = ClusterLeader, message_counters = atomics:new(3, - [{signed, false}])}, + [{signed, false}]), + internal_id = InternalSequence}, response(Transport, Connection0, declare_publisher, @@ -1713,15 +1730,18 @@ handle_frame_post_auth(Transport, Connection1), PublisherId, PublisherReference), - {Connection1#stream_connection{publishers = - Publishers0#{PublisherId - => - Publisher}, - publisher_to_ids = - RefIds1}, + {Connection1#stream_connection{ + publishers = Publishers0#{PublisherId => Publisher}, + publisher_to_ids = RefIds1, + internal_sequence = InternalSequence + 1}, State} end; - {_, _} -> + {PublisherIdTaken, ReferenceTaken} -> + rabbit_log:warning("Error while declaring publisher ~tp for stream '~ts', " + "with reference '~ts'. ID already taken: ~tp. " + "Reference already taken: ~tp.", + [PublisherId, Stream, WriterRef, + PublisherIdTaken, ReferenceTaken]), response(Transport, Connection0, declare_publisher, @@ -1749,31 +1769,6 @@ handle_frame_post_auth(Transport, {publish, PublisherId, MessageCount, Messages}) -> handle_frame_post_auth(Transport, Connection, State, {publish, ?VERSION_1, PublisherId, MessageCount, Messages}); -handle_frame_post_auth(Transport, - #stream_connection{filtering_supported = false, - publishers = Publishers, - socket = S} = Connection, - State, - {publish_v2, PublisherId, MessageCount, Messages}) -> - case Publishers of - #{PublisherId := #publisher{message_counters = Counters}} -> - increase_messages_received(Counters, MessageCount), - increase_messages_errored(Counters, MessageCount), - ok; - _ -> - ok - end, - rabbit_global_counters:increase_protocol_counter(stream, - ?PRECONDITION_FAILED, - 1), - PublishingIds = publishing_ids_from_messages(?VERSION_2, Messages), - Command = {publish_error, - PublisherId, - ?RESPONSE_CODE_PRECONDITION_FAILED, - PublishingIds}, - Frame = rabbit_stream_core:frame(Command), - send(Transport, S, Frame), - {Connection, State}; handle_frame_post_auth(Transport, Connection, State, @@ -1783,51 +1778,25 @@ handle_frame_post_auth(Transport, handle_frame_post_auth(Transport, #stream_connection{socket = S, credits = Credits, - virtual_host = VirtualHost, - user = User, publishers = Publishers} = Connection, State, {publish, Version, PublisherId, MessageCount, Messages}) -> case Publishers of #{PublisherId := Publisher} -> - #publisher{stream = Stream, - reference = Reference, + #publisher{reference = Reference, + internal_id = InternalId, leader = Leader, message_counters = Counters} = Publisher, - increase_messages_received(Counters, MessageCount), - case rabbit_stream_utils:check_write_permitted(#resource{name = - Stream, - kind = - queue, - virtual_host - = - VirtualHost}, - User, #{}) - of - ok -> - rabbit_stream_utils:write_messages(Version, Leader, - Reference, - PublisherId, - Messages), - sub_credits(Credits, MessageCount), - {Connection, State}; - error -> - PublishingIds = publishing_ids_from_messages(Version, Messages), - Command = - {publish_error, - PublisherId, - ?RESPONSE_CODE_ACCESS_REFUSED, - PublishingIds}, - Frame = rabbit_stream_core:frame(Command), - send(Transport, S, Frame), - rabbit_global_counters:increase_protocol_counter(stream, - ?ACCESS_REFUSED, - 1), - increase_messages_errored(Counters, MessageCount), - {Connection, State} - end; + increase_messages_received(Counters, MessageCount), + rabbit_stream_utils:write_messages(Version, Leader, + Reference, + PublisherId, + InternalId, + Messages), + sub_credits(Credits, MessageCount), + {Connection, State}; _ -> PublishingIds = publishing_ids_from_messages(Version, Messages), Command = @@ -1893,7 +1862,7 @@ handle_frame_post_auth(Transport, {request, CorrelationId, {delete_publisher, PublisherId}}) -> case Publishers of - #{PublisherId := #publisher{stream = Stream, reference = Ref}} -> + #{PublisherId := #publisher{stream = Stream, reference = Ref, leader = LeaderPid}} -> Connection1 = Connection0#stream_connection{publishers = maps:remove(PublisherId, @@ -1902,7 +1871,7 @@ handle_frame_post_auth(Transport, maps:remove({Stream, Ref}, PubToIds)}, Connection2 = - maybe_clean_connection_from_stream(Stream, Connection1), + maybe_clean_connection_from_stream(LeaderPid, Stream, Connection1), response(Transport, Connection1, delete_publisher, @@ -1924,29 +1893,6 @@ handle_frame_post_auth(Transport, 1), {Connection0, State} end; -handle_frame_post_auth(Transport, - #stream_connection{filtering_supported = false} = Connection, - State, - {request, CorrelationId, - {subscribe, - SubscriptionId, _, _, _, Properties}} = Request) -> - case rabbit_stream_utils:filter_defined(Properties) of - true -> - rabbit_log:warning("Cannot create subcription ~tp, it defines a filter " - "and filtering is not active", - [SubscriptionId]), - response(Transport, - Connection, - subscribe, - CorrelationId, - ?RESPONSE_CODE_PRECONDITION_FAILED), - rabbit_global_counters:increase_protocol_counter(stream, - ?PRECONDITION_FAILED, - 1), - {Connection, State}; - false -> - handle_frame_post_auth(Transport, {ok, Connection}, State, Request) - end; handle_frame_post_auth(Transport, #stream_connection{} = Connection, State, {request, _, {subscribe, @@ -2201,36 +2147,25 @@ handle_frame_post_auth(Transport, {Connection, State} end; handle_frame_post_auth(_Transport, - #stream_connection{virtual_host = VirtualHost, - user = User} = - Connection, + #stream_connection{stream_subscriptions = Subscriptions, + user = User} = Connection0, State, {store_offset, Reference, Stream, Offset}) -> - case rabbit_stream_utils:check_write_permitted(#resource{name = - Stream, - kind = queue, - virtual_host = - VirtualHost}, - User, #{}) - of - ok -> - case lookup_leader(Stream, Connection) of - {error, Error} -> - rabbit_log:warning("Could not find leader to store offset on ~tp: " - "~tp", - [Stream, Error]), - %% FIXME store offset is fire-and-forget, so no response even if error, change this? - {Connection, State}; - {ClusterLeader, Connection1} -> - osiris:write_tracking(ClusterLeader, Reference, Offset), - {Connection1, State} - end; - error -> - %% FIXME store offset is fire-and-forget, so no response even if error, change this? - rabbit_log:warning("Not authorized to store offset on stream ~tp", - [Stream]), - {Connection, State} - end; + Connection1 = + case Subscriptions of + #{Stream := _} -> + store_offset(Reference, Stream, Offset, Connection0); + _ -> + case rabbit_stream_utils:check_read_permitted(stream_r(Stream, Connection0), User, #{}) of + ok -> + store_offset(Reference, Stream, Offset, Connection0); + _ -> + rabbit_log:warning("Not authorized to store offset on stream ~tp", + [Stream]), + Connection0 + end + end, + {Connection1, State}; handle_frame_post_auth(Transport, #stream_connection{socket = S, virtual_host = VirtualHost, @@ -2305,24 +2240,13 @@ handle_frame_post_auth(Transport, end; handle_frame_post_auth(Transport, #stream_connection{virtual_host = VirtualHost, - user = - #user{username = Username} = - User} = - Connection, + user = #user{username = Username} = User} = Connection, State, {request, CorrelationId, {create_stream, Stream, Arguments}}) -> case rabbit_stream_utils:enforce_correct_name(Stream) of {ok, StreamName} -> - case rabbit_stream_utils:check_configure_permitted(#resource{name = - StreamName, - kind = - queue, - virtual_host - = - VirtualHost}, - User, #{}) - of + case rabbit_stream_utils:check_configure_permitted(stream_r(StreamName, Connection), User) of ok -> case rabbit_stream_manager:create(VirtualHost, StreamName, @@ -2396,19 +2320,10 @@ handle_frame_post_auth(Transport, handle_frame_post_auth(Transport, #stream_connection{socket = S, virtual_host = VirtualHost, - user = - #user{username = Username} = - User} = - Connection, + user = #user{username = Username} = User} = Connection, State, {request, CorrelationId, {delete_stream, Stream}}) -> - case rabbit_stream_utils:check_configure_permitted(#resource{name = - Stream, - kind = queue, - virtual_host = - VirtualHost}, - User, #{}) - of + case rabbit_stream_utils:check_configure_permitted(stream_r(Stream, Connection), User) of ok -> case rabbit_stream_manager:delete(VirtualHost, Stream, Username) of {ok, deleted} -> @@ -2418,7 +2333,7 @@ handle_frame_post_auth(Transport, CorrelationId), {Connection1, State1} = case - clean_state_after_stream_deletion_or_failure(Stream, + clean_state_after_stream_deletion_or_failure(undefined, Stream, Connection, State) of @@ -2824,6 +2739,154 @@ handle_frame_post_auth(Transport, Frame = rabbit_stream_core:frame({response, CorrelationId, Response}), send(Transport, S, Frame), {Connection, State}; +handle_frame_post_auth(Transport, + #stream_connection{virtual_host = VirtualHost, + user = #user{username = Username} = User} = Connection, + State, + {request, CorrelationId, + {create_super_stream, SuperStream, Partitions, BindingKeys, Arguments}}) -> + case rabbit_stream_utils:enforce_correct_name(SuperStream) of + {ok, SuperStreamName} -> + case rabbit_stream_utils:check_super_stream_management_permitted(VirtualHost, + SuperStreamName, + Partitions, + User) of + ok -> + case rabbit_stream_manager:create_super_stream(VirtualHost, + SuperStreamName, + Partitions, + Arguments, + BindingKeys, + Username) of + ok -> + rabbit_log:debug("Created super stream ~tp", [SuperStreamName]), + response_ok(Transport, + Connection, + create_super_stream, + CorrelationId), + {Connection, State}; + {error, {validation_failed, Msg}} -> + rabbit_log:warning("Error while trying to create super stream ~tp: ~tp", + [SuperStreamName, Msg]), + response(Transport, + Connection, + create_super_stream, + CorrelationId, + ?RESPONSE_CODE_PRECONDITION_FAILED), + rabbit_global_counters:increase_protocol_counter(stream, + ?PRECONDITION_FAILED, + 1), + {Connection, State}; + {error, {reference_already_exists, Msg}} -> + rabbit_log:warning("Error while trying to create super stream ~tp: ~tp", + [SuperStreamName, Msg]), + response(Transport, + Connection, + create_super_stream, + CorrelationId, + ?RESPONSE_CODE_STREAM_ALREADY_EXISTS), + rabbit_global_counters:increase_protocol_counter(stream, + ?STREAM_ALREADY_EXISTS, + 1), + {Connection, State}; + {error, Error} -> + rabbit_log:warning("Error while trying to create super stream ~tp: ~tp", + [SuperStreamName, Error]), + response(Transport, + Connection, + create_super_stream, + CorrelationId, + ?RESPONSE_CODE_INTERNAL_ERROR), + rabbit_global_counters:increase_protocol_counter(stream, + ?INTERNAL_ERROR, + 1), + {Connection, State} + end; + error -> + response(Transport, + Connection, + create_super_stream, + CorrelationId, + ?RESPONSE_CODE_ACCESS_REFUSED), + rabbit_global_counters:increase_protocol_counter(stream, + ?ACCESS_REFUSED, + 1), + {Connection, State} + end; + _ -> + response(Transport, + Connection, + create_super_stream, + CorrelationId, + ?RESPONSE_CODE_PRECONDITION_FAILED), + rabbit_global_counters:increase_protocol_counter(stream, + ?PRECONDITION_FAILED, + 1), + {Connection, State} + end; +handle_frame_post_auth(Transport, + #stream_connection{socket = S, + virtual_host = VirtualHost, + user = #user{username = Username} = User} = Connection, + State, + {request, CorrelationId, {delete_super_stream, SuperStream}}) -> + Partitions = case rabbit_stream_manager:partitions(VirtualHost, SuperStream) of + {ok, Ps} -> + Ps; + _ -> + [] + end, + case rabbit_stream_utils:check_super_stream_management_permitted(VirtualHost, + SuperStream, + Partitions, + User) of + ok -> + case rabbit_stream_manager:delete_super_stream(VirtualHost, SuperStream, Username) of + ok -> + response_ok(Transport, + Connection, + delete_super_stream, + CorrelationId), + {Connection1, State1} = clean_state_after_super_stream_deletion(Partitions, + Connection, + State, + Transport, S), + {Connection1, State1}; + {error, stream_not_found} -> + response(Transport, + Connection, + delete_super_stream, + CorrelationId, + ?RESPONSE_CODE_STREAM_DOES_NOT_EXIST), + rabbit_global_counters:increase_protocol_counter(stream, + ?STREAM_DOES_NOT_EXIST, + 1), + {Connection, State}; + {error, Error} -> + rabbit_log:warning("Error while trying to delete super stream ~tp: ~tp", + [SuperStream, Error]), + response(Transport, + Connection, + delete_super_stream, + CorrelationId, + ?RESPONSE_CODE_PRECONDITION_FAILED), + rabbit_global_counters:increase_protocol_counter(stream, + ?PRECONDITION_FAILED, + 1), + {Connection, State} + + end; + error -> + response(Transport, + Connection, + delete_super_stream, + CorrelationId, + ?RESPONSE_CODE_ACCESS_REFUSED), + rabbit_global_counters:increase_protocol_counter(stream, + ?ACCESS_REFUSED, + 1), + {Connection, State} + end; handle_frame_post_auth(Transport, #stream_connection{socket = S} = Connection, State, @@ -3033,6 +3096,56 @@ request(Content) -> #request{start = erlang:monotonic_time(millisecond), content = Content}. +evaluate_state_after_secret_update(Transport, + User, + #stream_connection{socket = Socket, + publishers = Publishers, + stream_subscriptions = Subscriptions} = Conn0, + State0) -> + {_, Conn1} = ensure_token_expiry_timer(User, Conn0), + PublisherStreams = + lists:foldl(fun(#publisher{stream = Str}, Acc) -> + case rabbit_stream_utils:check_write_permitted(stream_r(Str, Conn0), User) of + ok -> + Acc; + _ -> + Acc#{Str => ok} + end + end, #{}, maps:values(Publishers)), + {SubscriptionStreams, Conn2, State1} = + maps:fold(fun(Str, Subs, {Acc, C0, S0}) -> + case rabbit_stream_utils:check_read_permitted(stream_r(Str, Conn0), User, #{}) of + ok -> + {Acc, C0, S0}; + _ -> + {C1, S1} = + lists:foldl(fun(SubId, {Conn, St}) -> + remove_subscription(SubId, Conn, St) + end, {C0, S0}, Subs), + {Acc#{Str => ok}, C1, S1} + end + end, {#{}, Conn1, State0}, Subscriptions), + Streams = maps:merge(PublisherStreams, SubscriptionStreams), + {Conn3, State2} = + case maps:size(Streams) of + 0 -> + {Conn2, State1}; + _ -> + maps:fold(fun(Str, _, {C0, S0}) -> + {_, C1, S1} = clean_state_after_stream_deletion_or_failure( + undefined, Str, C0, S0), + Command = {metadata_update, Str, + ?RESPONSE_CODE_STREAM_NOT_AVAILABLE}, + Frame = rabbit_stream_core:frame(Command), + send(Transport, Socket, Frame), + rabbit_global_counters:increase_protocol_counter(stream, + ?STREAM_NOT_AVAILABLE, + 1), + {C1, S1} + end, {Conn2, State1}, Streams) + end, + {Conn3, State2}. + ensure_outstanding_requests_timer(#stream_connection{ outstanding_requests = Requests, outstanding_requests_timer = undefined @@ -3054,6 +3167,33 @@ ensure_outstanding_requests_timer(#stream_connection{ ensure_outstanding_requests_timer(C) -> C. +ensure_token_expiry_timer(User, #stream_connection{token_expiry_timer = Timer} = Conn) -> + TimerRef = + maybe + rabbit_log:debug("Checking token expiry"), + true ?= rabbit_access_control:permission_cache_can_expire(User), + rabbit_log:debug("Token can expire"), + Ts = rabbit_access_control:expiry_timestamp(User), + rabbit_log:debug("Token expiry timestamp: ~tp", [Ts]), + true ?= is_integer(Ts), + Time = (Ts - os:system_time(second)) * 1000, + rabbit_log:debug("Token expires in ~tp ms, setting timer to close connection", [Time]), + true ?= Time > 0, + erlang:send_after(Time, self(), token_expired) + else + false -> + undefined; + {error, _} -> + undefined + end, + Cancel = case Timer of + undefined -> + ok; + _ -> + erlang:cancel_timer(Timer, [{async, false}, {info, true}]) + end, + {Cancel, Conn#stream_connection{token_expiry_timer = TimerRef}}. + maybe_unregister_consumer(_, _, false = _Sac, Requests) -> Requests; maybe_unregister_consumer(VirtualHost, @@ -3155,7 +3295,28 @@ stream_r(Stream, #stream_connection{virtual_host = VHost}) -> kind = queue, virtual_host = VHost}. -clean_state_after_stream_deletion_or_failure(Stream, +clean_state_after_super_stream_deletion(Partitions, Connection, State, Transport, S) -> + lists:foldl(fun(Partition, {Conn, St}) -> + case + clean_state_after_stream_deletion_or_failure(undefined, Partition, + Conn, + St) + of + {cleaned, NewConnection, NewState} -> + Command = {metadata_update, Partition, + ?RESPONSE_CODE_STREAM_NOT_AVAILABLE}, + Frame = rabbit_stream_core:frame(Command), + send(Transport, S, Frame), + rabbit_global_counters:increase_protocol_counter(stream, + ?STREAM_NOT_AVAILABLE, + 1), + {NewConnection, NewState}; + {not_cleaned, SameConnection, SameState} -> + {SameConnection, SameState} + end + end, {Connection, State}, Partitions). + +clean_state_after_stream_deletion_or_failure(MemberPid, Stream, #stream_connection{virtual_host = VirtualHost, stream_subscriptions @@ -3180,16 +3341,30 @@ clean_state_after_stream_deletion_or_failure(Stream, #{Stream := SubscriptionIds} = StreamSubscriptions, Requests1 = lists:foldl( fun(SubId, Rqsts0) -> - rabbit_stream_metrics:consumer_cancelled(self(), - stream_r(Stream, - C0), - SubId), #{SubId := Consumer} = Consumers, - Rqsts1 = maybe_unregister_consumer( - VirtualHost, Consumer, - single_active_consumer(Consumer), - Rqsts0), - Rqsts1 + case {MemberPid, Consumer} of + {undefined, _C} -> + rabbit_stream_metrics:consumer_cancelled(self(), + stream_r(Stream, + C0), + SubId), + maybe_unregister_consumer( + VirtualHost, Consumer, + single_active_consumer(Consumer), + Rqsts0); + {MemberPid, #consumer{configuration = + #consumer_configuration{member_pid = MemberPid}}} -> + rabbit_stream_metrics:consumer_cancelled(self(), + stream_r(Stream, + C0), + SubId), + maybe_unregister_consumer( + VirtualHost, Consumer, + single_active_consumer(Consumer), + Rqsts0); + _ -> + Rqsts0 + end end, Requests0, SubscriptionIds), {true, C0#stream_connection{stream_subscriptions = @@ -3208,17 +3383,25 @@ clean_state_after_stream_deletion_or_failure(Stream, {PurgedPubs, PurgedPubToIds} = maps:fold(fun(PubId, #publisher{stream = S, reference = Ref}, - {Pubs, PubToIds}) -> - case S of - Stream -> + {Pubs, PubToIds}) when S =:= Stream andalso MemberPid =:= undefined -> + rabbit_stream_metrics:publisher_deleted(self(), + stream_r(Stream, + C1), + PubId), + {maps:remove(PubId, Pubs), + maps:remove({Stream, Ref}, PubToIds)}; + (PubId, + #publisher{stream = S, reference = Ref, leader = MPid}, + {Pubs, PubToIds}) when S =:= Stream andalso MPid =:= MemberPid -> rabbit_stream_metrics:publisher_deleted(self(), - stream_r(S, + stream_r(Stream, C1), PubId), {maps:remove(PubId, Pubs), maps:remove({Stream, Ref}, PubToIds)}; - _ -> {Pubs, PubToIds} - end + + (_PubId, _Publisher, {Pubs, PubToIds}) -> + {Pubs, PubToIds} end, {Publishers, PublisherToIds}, Publishers), {true, @@ -3240,12 +3423,24 @@ clean_state_after_stream_deletion_or_failure(Stream, orelse LeadersCleaned of true -> - C3 = demonitor_stream(Stream, C2), + C3 = demonitor_stream(MemberPid, Stream, C2), {cleaned, C3#stream_connection{stream_leaders = Leaders1}, S2}; false -> {not_cleaned, C2#stream_connection{stream_leaders = Leaders1}, S2} end. +store_offset(Reference, Stream, Offset, Connection0) -> + case lookup_leader(Stream, Connection0) of + {error, Error} -> + rabbit_log:warning("Could not find leader to store offset on ~tp: " + "~tp", + [Stream, Error]), + Connection0; + {ClusterLeader, Connection1} -> + osiris:write_tracking(ClusterLeader, Reference, Offset), + Connection1 + end. + lookup_leader(Stream, #stream_connection{stream_leaders = StreamLeaders, virtual_host = VirtualHost} = @@ -3279,7 +3474,7 @@ remove_subscription(SubscriptionId, #stream_connection_state{consumers = Consumers} = State) -> #{SubscriptionId := Consumer} = Consumers, #consumer{log = Log, - configuration = #consumer_configuration{stream = Stream}} = + configuration = #consumer_configuration{stream = Stream, member_pid = MemberPid}} = Consumer, rabbit_log:debug("Deleting subscription ~tp (stream ~tp)", [SubscriptionId, Stream]), @@ -3299,7 +3494,7 @@ remove_subscription(SubscriptionId, Connection#stream_connection{stream_subscriptions = StreamSubscriptions1}, Consumers1 = maps:remove(SubscriptionId, Consumers), - Connection2 = maybe_clean_connection_from_stream(Stream, Connection1), + Connection2 = maybe_clean_connection_from_stream(MemberPid, Stream, Connection1), rabbit_stream_metrics:consumer_cancelled(self(), stream_r(Stream, Connection2), SubscriptionId), @@ -3312,7 +3507,7 @@ remove_subscription(SubscriptionId, {Connection2#stream_connection{outstanding_requests = Requests1}, State#stream_connection_state{consumers = Consumers1}}. -maybe_clean_connection_from_stream(Stream, +maybe_clean_connection_from_stream(MemberPid, Stream, #stream_connection{stream_leaders = Leaders} = Connection0) -> @@ -3321,7 +3516,7 @@ maybe_clean_connection_from_stream(Stream, stream_has_subscriptions(Stream, Connection0)} of {false, false} -> - demonitor_stream(Stream, Connection0); + demonitor_stream(MemberPid, Stream, Connection0); _ -> Connection0 end, @@ -3330,26 +3525,27 @@ maybe_clean_connection_from_stream(Stream, maybe_monitor_stream(Pid, Stream, #stream_connection{monitors = Monitors} = Connection) -> - case lists:member(Stream, maps:values(Monitors)) of + case lists:member({Pid, Stream}, maps:values(Monitors)) of true -> Connection; false -> MonitorRef = monitor(process, Pid), Connection#stream_connection{monitors = - maps:put(MonitorRef, Stream, + maps:put(MonitorRef, {Pid, Stream}, Monitors)} end. -demonitor_stream(Stream, +demonitor_stream(MemberPid, Stream, #stream_connection{monitors = Monitors0} = Connection) -> Monitors = - maps:fold(fun(MonitorRef, Strm, Acc) -> - case Strm of - Stream -> - demonitor(MonitorRef, [flush]), + maps:fold(fun(MonitorRef, {MPid, Strm}, Acc) when MPid =:= MemberPid andalso Strm =:= Stream -> + demonitor(MonitorRef, [flush]), Acc; - _ -> maps:put(MonitorRef, Strm, Acc) - end + (MonitorRef, {_MPid, Strm}, Acc) when MemberPid =:= undefined andalso Strm =:= Stream -> + demonitor(MonitorRef, [flush]), + Acc; + (MonitorRef, {MPid, Strm}, Acc) -> + maps:put(MonitorRef, {MPid, Strm}, Acc) end, #{}, Monitors0), Connection#stream_connection{monitors = Monitors}. @@ -3635,7 +3831,7 @@ in_vhost(Pid, VHost) -> end. consumers_info(Pid, InfoItems) -> - gen_server2:call(Pid, {consumers_info, InfoItems}). + gen_statem:call(Pid, {consumers_info, InfoItems}). consumers_infos(Items, #stream_connection_state{consumers = Consumers}) -> @@ -3653,18 +3849,25 @@ consumer_i(messages_consumed, #consumer_configuration{counters = Counters}}) -> messages_consumed(Counters); consumer_i(offset, - #consumer{configuration = - #consumer_configuration{counters = Counters}}) -> - consumer_offset(Counters); + #consumer{configuration = #consumer_configuration{counters = Counters}, + last_listener_offset = LLO}) -> + rabbit_stream_utils:consumer_offset(consumer_offset(Counters), + messages_consumed(Counters), + LLO); consumer_i(offset_lag, #consumer{log = undefined}) -> 0; consumer_i(offset_lag, - #consumer{configuration = - #consumer_configuration{counters = Counters}, + #consumer{configuration = #consumer_configuration{counters = Counters}, + last_listener_offset = LLO, log = Log}) -> - stream_stored_offset(Log) - consumer_offset(Counters); + rabbit_stream_utils:offset_lag(stream_stored_offset(Log), + consumer_offset(Counters), + messages_consumed(Counters), + LLO); consumer_i(connection_pid, _) -> self(); +consumer_i(node, _) -> + node(); consumer_i(properties, #consumer{configuration = #consumer_configuration{properties = Properties}}) -> @@ -3686,7 +3889,7 @@ consumer_i(_Unknown, _) -> ?UNKNOWN_FIELD. publishers_info(Pid, InfoItems) -> - gen_server2:call(Pid, {publishers_info, InfoItems}). + gen_statem:call(Pid, {publishers_info, InfoItems}). publishers_infos(Items, #stream_connection{publishers = Publishers}) -> @@ -3697,6 +3900,8 @@ publisher_i(stream, #publisher{stream = S}) -> S; publisher_i(connection_pid, _) -> self(); +publisher_i(node, _) -> + node(); publisher_i(publisher_id, #publisher{publisher_id = Id}) -> Id; publisher_i(reference, #publisher{reference = undefined}) -> @@ -3716,7 +3921,7 @@ publisher_i(_Unknow, _) -> ?UNKNOWN_FIELD. info(Pid, InfoItems) -> - gen_server2:call(Pid, {info, InfoItems}, infinity). + gen_statem:call(Pid, {info, InfoItems}, infinity). infos(Items, Connection, State) -> [{Item, i(Item, Connection, State)} || Item <- Items]. @@ -3840,3 +4045,7 @@ stream_from_consumers(SubId, Consumers) -> undefined end. +%% We don't trust the client at this point - force them to wait +%% for a bit so they can't DOS us with repeated failed logins etc. +silent_close_delay() -> + timer:sleep(?SILENT_CLOSE_DELAY). diff --git a/deps/rabbitmq_stream/src/rabbit_stream_reader.hrl b/deps/rabbitmq_stream/src/rabbit_stream_reader.hrl new file mode 100644 index 000000000000..0c1bc2dcc683 --- /dev/null +++ b/deps/rabbitmq_stream/src/rabbit_stream_reader.hrl @@ -0,0 +1,104 @@ +%% The contents of this file are subject to the Mozilla Public License +%% at https://www.mozilla.org/en-US/MPL/2.0/ +%% +%% Software distributed under the License is distributed on an "AS IS" +%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See +%% the License for the specific language governing rights and +%% limitations under the License. +%% +%% The Original Code is RabbitMQ. +%% +%% The Initial Developer of the Original Code is Pivotal Software, Inc. +%% Copyright (c) 2020-2024 Broadcom. All Rights Reserved. +%% The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. +%% + +-include_lib("rabbit_common/include/rabbit.hrl"). + +-type stream() :: binary(). +-type publisher_id() :: byte(). +-type publisher_reference() :: binary(). +-type subscription_id() :: byte(). +-type internal_id() :: integer(). + +-record(publisher, + {publisher_id :: publisher_id(), + stream :: stream(), + reference :: undefined | publisher_reference(), + leader :: pid(), + %% We do not use atomics here for concurrent access. Instead, we use atomics + %% to reduce memory copy overhead for record fields that change often. + message_counters :: atomics:atomics_ref(), + %% use to distinguish a stale publisher from a live publisher with the same ID + %% used only for publishers without a reference (dedup off) + internal_id :: internal_id()}). +-record(consumer_configuration, + {socket :: rabbit_net:socket(), %% ranch_transport:socket(), + member_pid :: pid(), + subscription_id :: subscription_id(), + stream :: stream(), + offset :: osiris:offset(), + counters :: atomics:atomics_ref(), + properties :: map(), + active :: boolean()}). +-record(consumer, + {configuration :: #consumer_configuration{}, + credit :: non_neg_integer(), + send_limit :: non_neg_integer(), + log = undefined :: undefined | osiris_log:state(), + last_listener_offset = undefined :: undefined | osiris:offset()}). +-record(request, + {start :: integer(), + content :: term()}). +-record(stream_connection_state, + {data :: rabbit_stream_core:state(), blocked :: boolean(), + consumers :: #{subscription_id() => #consumer{}}}). +-record(stream_connection, + {name :: binary(), + %% server host + host, + %% client host + peer_host, + %% server port + port, + %% client port + peer_port, + auth_mechanism, + authentication_state :: any(), + connected_at :: integer(), + helper_sup :: pid(), + socket :: rabbit_net:socket(), + publishers = #{} :: #{publisher_id() => #publisher{}}, + publisher_to_ids = #{} :: #{{stream(), publisher_reference()} => publisher_id()}, + stream_leaders = #{} :: #{stream() => pid()}, + stream_subscriptions = #{} :: #{stream() => [subscription_id()]}, + credits :: atomics:atomics_ref(), + user :: undefined | #user{}, + virtual_host :: undefined | binary(), + connection_step :: + atom(), % tcp_connected, peer_properties_exchanged, authenticating, authenticated, tuning, tuned, opened, failure, closing, closing_done + frame_max :: integer(), + heartbeat :: undefined | integer(), + heartbeater :: any(), + client_properties = #{} :: #{binary() => binary()}, + monitors = #{} :: #{reference() => {pid(), stream()}}, + stats_timer :: undefined | rabbit_event:state(), + resource_alarm :: boolean(), + send_file_oct :: + atomics:atomics_ref(), % number of bytes sent with send_file (for metrics) + transport :: tcp | ssl, + proxy_socket :: undefined | ranch_transport:socket(), + correlation_id_sequence :: integer(), + outstanding_requests :: #{integer() => #request{}}, + deliver_version :: rabbit_stream_core:command_version(), + request_timeout :: pos_integer(), + outstanding_requests_timer :: undefined | erlang:reference(), + %% internal sequence used for publishers + internal_sequence = 0 :: integer(), + token_expiry_timer = undefined :: undefined | erlang:reference()}). +-record(configuration, + {initial_credits :: integer(), + credits_required_for_unblocking :: integer(), + frame_max :: integer(), + heartbeat :: integer(), + connection_negotiation_step_timeout :: integer()}). diff --git a/deps/rabbitmq_stream/src/rabbit_stream_sup.erl b/deps/rabbitmq_stream/src/rabbit_stream_sup.erl index ff917a6fa8c9..f94f7165be7f 100644 --- a/deps/rabbitmq_stream/src/rabbit_stream_sup.erl +++ b/deps/rabbitmq_stream/src/rabbit_stream_sup.erl @@ -11,7 +11,7 @@ %% The Original Code is RabbitMQ. %% %% The Initial Developer of the Original Code is Pivotal Software, Inc. -%% Copyright (c) 2020-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_stream_sup). @@ -44,12 +44,7 @@ init([]) -> _ -> {rabbit_networking:ensure_ssl(), application:get_env(rabbitmq_stream, num_ssl_acceptors, 10), - case rabbit_networking:poodle_check('STREAM') of - ok -> - SslListeners0; - danger -> - [] - end} + SslListeners0} end, Nodes = rabbit_nodes:list_members(), diff --git a/deps/rabbitmq_stream/src/rabbit_stream_utils.erl b/deps/rabbitmq_stream/src/rabbit_stream_utils.erl index 8cb3731781c0..6f9c2edef5d2 100644 --- a/deps/rabbitmq_stream/src/rabbit_stream_utils.erl +++ b/deps/rabbitmq_stream/src/rabbit_stream_utils.erl @@ -11,19 +11,21 @@ %% The Original Code is RabbitMQ. %% %% The Initial Developer of the Original Code is Pivotal Software, Inc. -%% Copyright (c) 2020-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_stream_utils). +-feature(maybe_expr, enable). + %% API -export([enforce_correct_name/1, - write_messages/5, + write_messages/6, parse_map/2, auth_mechanisms/1, auth_mechanism_to_module/2, - check_configure_permitted/3, - check_write_permitted/3, + check_configure_permitted/2, + check_write_permitted/2, check_read_permitted/3, extract_stream_list/2, sort_partitions/1, @@ -32,9 +34,9 @@ filter_defined/1, filter_spec/1, command_versions/0, - filtering_supported/0]). - --define(MAX_PERMISSION_CACHE_SIZE, 12). + check_super_stream_management_permitted/4, + offset_lag/4, + consumer_offset/3]). -include_lib("rabbit_common/include/rabbit.hrl"). -include_lib("rabbitmq_stream_common/include/rabbit_stream.hrl"). @@ -57,57 +59,23 @@ check_name(<<"">>) -> check_name(_Name) -> ok. -write_messages(_Version, _ClusterLeader, _PublisherRef, _PublisherId, <<>>) -> +write_messages(_Version, _ClusterLeader, _PublisherRef, _PublisherId, _InternalId, <<>>) -> ok; -write_messages(?VERSION_1 = V, ClusterLeader, - undefined, - PublisherId, - <>) -> - ok = - osiris:write(ClusterLeader, - undefined, - {PublisherId, PublishingId}, - Message), - write_messages(V, ClusterLeader, undefined, PublisherId, Rest); -write_messages(?VERSION_1 = V, ClusterLeader, - undefined, - PublisherId, - <>) -> - ok = - osiris:write(ClusterLeader, - undefined, - {PublisherId, PublishingId}, - {batch, - MessageCount, - CompressionType, - UncompressedSize, - Batch}), - write_messages(V, ClusterLeader, undefined, PublisherId, Rest); write_messages(?VERSION_1 = V, ClusterLeader, PublisherRef, PublisherId, + InternalId, <>) -> - ok = osiris:write(ClusterLeader, PublisherRef, PublishingId, Message), - write_messages(V, ClusterLeader, PublisherRef, PublisherId, Rest); + write_messages0(V, ClusterLeader, PublisherRef, PublisherId, InternalId, + PublishingId, Message, Rest); write_messages(?VERSION_1 = V, ClusterLeader, PublisherRef, PublisherId, + InternalId, <>) -> - ok = - osiris:write(ClusterLeader, - PublisherRef, - PublishingId, - {batch, - MessageCount, - CompressionType, - UncompressedSize, - Batch}), - write_messages(V, ClusterLeader, PublisherRef, PublisherId, Rest); -write_messages(?VERSION_2 = V, ClusterLeader, - undefined, - PublisherId, - <>) -> - ok = - osiris:write(ClusterLeader, - undefined, - {PublisherId, PublishingId}, - Message), - write_messages(V, ClusterLeader, undefined, PublisherId, Rest); -write_messages(?VERSION_2 = V, ClusterLeader, - undefined, - PublisherId, - <>) -> - ok = - osiris:write(ClusterLeader, - undefined, - {PublisherId, PublishingId}, - {FilterValue, Message}), - write_messages(V, ClusterLeader, undefined, PublisherId, Rest); + Data = {batch, MessageCount, CompressionType, UncompressedSize, Batch}, + write_messages0(V, ClusterLeader, PublisherRef, PublisherId, InternalId, + PublishingId, Data, Rest); write_messages(?VERSION_2 = V, ClusterLeader, PublisherRef, PublisherId, + InternalId, <>) -> - ok = osiris:write(ClusterLeader, PublisherRef, PublishingId, Message), - write_messages(V, ClusterLeader, PublisherRef, PublisherId, Rest); + write_messages0(V, ClusterLeader, PublisherRef, PublisherId, InternalId, + PublishingId, Message, Rest); write_messages(?VERSION_2 = V, ClusterLeader, PublisherRef, PublisherId, + InternalId, <>) -> - ok = osiris:write(ClusterLeader, PublisherRef, PublishingId, {FilterValue, Message}), - write_messages(V, ClusterLeader, PublisherRef, PublisherId, Rest). + write_messages0(V, ClusterLeader, PublisherRef, PublisherId, InternalId, + PublishingId, {FilterValue, Message}, Rest). + +write_messages0(Vsn, ClusterLeader, PublisherRef, PublisherId, InternalId, PublishingId, Data, Rest) -> + Corr = case PublisherRef of + undefined -> + %% we add the internal ID to detect late confirms from a stale publisher + {PublisherId, InternalId, PublishingId}; + _ -> + %% we cannot add the internal ID because the correlation ID must be an integer + %% when deduplication is activated. + PublishingId + end, + ok = osiris:write(ClusterLeader, PublisherRef, Corr, Data), + write_messages(Vsn, ClusterLeader, PublisherRef, PublisherId, InternalId, Rest). parse_map(<<>>, _Count) -> {#{}, <<>>}; @@ -227,43 +173,60 @@ auth_mechanism_to_module(TypeBin, Sock) -> end. check_resource_access(User, Resource, Perm, Context) -> - V = {Resource, Context, Perm}, - - Cache = - case get(permission_cache) of - undefined -> - []; - Other -> - Other - end, - case lists:member(V, Cache) of - true -> - ok; - false -> - try - rabbit_access_control:check_resource_access(User, - Resource, - Perm, - Context), - CacheTail = - lists:sublist(Cache, ?MAX_PERMISSION_CACHE_SIZE - 1), - put(permission_cache, [V | CacheTail]), - ok - catch - exit:_ -> - error - end + try + rabbit_access_control:check_resource_access(User, + Resource, + Perm, + Context), + ok + catch + exit:_ -> + error end. -check_configure_permitted(Resource, User, Context) -> - check_resource_access(User, Resource, configure, Context). +check_configure_permitted(Resource, User) -> + check_resource_access(User, Resource, configure, #{}). -check_write_permitted(Resource, User, Context) -> - check_resource_access(User, Resource, write, Context). +check_write_permitted(Resource, User) -> + check_resource_access(User, Resource, write, #{}). check_read_permitted(Resource, User, Context) -> check_resource_access(User, Resource, read, Context). +-spec check_super_stream_management_permitted(rabbit_types:vhost(), binary(), [binary()], rabbit_types:user()) -> + ok | error. +check_super_stream_management_permitted(VirtualHost, SuperStream, Partitions, User) -> + Exchange = e(VirtualHost, SuperStream), + maybe + %% exchange creation + ok ?= check_configure_permitted(Exchange, User), + %% stream creations + ok ?= check_streams_permissions(fun check_configure_permitted/2, + VirtualHost, Partitions, + User), + %% binding from exchange + ok ?= check_read_permitted(Exchange, User, #{}), + %% binding to streams + check_streams_permissions(fun check_write_permitted/2, + VirtualHost, Partitions, + User) + end. + +check_streams_permissions(Fun, VirtualHost, List, User) -> + case lists:all(fun(S) -> + case Fun(q(VirtualHost, S), User) of + ok -> + true; + _ -> + false + end + end, List) of + true -> + ok; + _ -> + error + end. + extract_stream_list(<<>>, Streams) -> Streams; extract_stream_list(<>, @@ -334,14 +297,8 @@ filter_spec(Properties) -> end. command_versions() -> - PublishMaxVersion = case filtering_supported() of - false -> - ?VERSION_1; - true -> - ?VERSION_2 - end, [{declare_publisher, ?VERSION_1, ?VERSION_1}, - {publish, ?VERSION_1, PublishMaxVersion}, + {publish, ?VERSION_1, ?VERSION_2}, {query_publisher_sequence, ?VERSION_1, ?VERSION_1}, {delete_publisher, ?VERSION_1, ?VERSION_1}, {subscribe, ?VERSION_1, ?VERSION_1}, @@ -356,7 +313,36 @@ command_versions() -> {heartbeat, ?VERSION_1, ?VERSION_1}, {route, ?VERSION_1, ?VERSION_1}, {partitions, ?VERSION_1, ?VERSION_1}, - {stream_stats, ?VERSION_1, ?VERSION_1}]. + {stream_stats, ?VERSION_1, ?VERSION_1}, + {create_super_stream, ?VERSION_1, ?VERSION_1}, + {delete_super_stream, ?VERSION_1, ?VERSION_1}]. + +q(VirtualHost, Name) -> + rabbit_misc:r(VirtualHost, queue, Name). + +e(VirtualHost, Name) -> + rabbit_misc:r(VirtualHost, exchange, Name). + +-spec consumer_offset(ConsumerOffsetFromCounter :: integer(), + MessageConsumed :: non_neg_integer(), + LastListenerOffset :: integer() | undefined) -> integer(). +consumer_offset(0, 0, undefined) -> + 0; +consumer_offset(0, 0, LastListenerOffset) when LastListenerOffset > 0 -> + %% consumer at "next" waiting for messages most likely + LastListenerOffset; +consumer_offset(ConsumerOffsetFromCounter, _, _) -> + ConsumerOffsetFromCounter. -filtering_supported() -> - rabbit_feature_flags:is_enabled(stream_filtering). +-spec offset_lag(CommittedOffset :: integer(), + ConsumerOffsetFromCounter :: integer(), + MessageConsumed :: non_neg_integer(), + LastListenerOffset :: integer() | undefined) -> integer(). +offset_lag(-1, _, _, _) -> + %% -1 is for an empty stream, so no lag + 0; +offset_lag(_, 0, 0, LastListenerOffset) when LastListenerOffset > 0 -> + %% consumer waiting for messages at the end of the stream, most likely + 0; +offset_lag(CommittedOffset, ConsumerOffset, _, _) -> + CommittedOffset - ConsumerOffset. diff --git a/deps/rabbitmq_stream/test/commands_SUITE.erl b/deps/rabbitmq_stream/test/commands_SUITE.erl index b8d22cc1e2a5..2a651506d10a 100644 --- a/deps/rabbitmq_stream/test/commands_SUITE.erl +++ b/deps/rabbitmq_stream/test/commands_SUITE.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2020-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(commands_SUITE). @@ -25,12 +25,14 @@ 'Elixir.RabbitMQ.CLI.Ctl.Commands.ListStreamPublishersCommand'). -define(COMMAND_ADD_SUPER_STREAM, 'Elixir.RabbitMQ.CLI.Ctl.Commands.AddSuperStreamCommand'). --define(COMMAND_DELETE_SUPER_STREAM, +-define(COMMAND_DELETE_SUPER_STREAM_CLI, 'Elixir.RabbitMQ.CLI.Ctl.Commands.DeleteSuperStreamCommand'). -define(COMMAND_LIST_CONSUMER_GROUPS, 'Elixir.RabbitMQ.CLI.Ctl.Commands.ListStreamConsumerGroupsCommand'). -define(COMMAND_LIST_GROUP_CONSUMERS, 'Elixir.RabbitMQ.CLI.Ctl.Commands.ListStreamGroupConsumersCommand'). +-define(COMMAND_LIST_STREAM_TRACKING, + 'Elixir.RabbitMQ.CLI.Ctl.Commands.ListStreamTrackingCommand'). all() -> [{group, list_connections}, @@ -38,6 +40,7 @@ all() -> {group, list_publishers}, {group, list_consumer_groups}, {group, list_group_consumers}, + {group, list_stream_tracking}, {group, super_streams}]. groups() -> @@ -49,10 +52,14 @@ groups() -> {list_publishers, [], [list_publishers_merge_defaults, list_publishers_run]}, {list_consumer_groups, [], - [list_consumer_groups_merge_defaults, list_consumer_groups_run]}, + [list_consumer_groups_validate, list_consumer_groups_merge_defaults, + list_consumer_groups_run]}, {list_group_consumers, [], [list_group_consumers_validate, list_group_consumers_merge_defaults, list_group_consumers_run]}, + {list_stream_tracking, [], + [list_stream_tracking_validate, list_stream_tracking_merge_defaults, + list_stream_tracking_run]}, {super_streams, [], [add_super_stream_merge_defaults, add_super_stream_validate, @@ -96,7 +103,7 @@ end_per_testcase(Testcase, Config) -> rabbit_ct_helpers:testcase_finished(Config, Testcase). list_connections_merge_defaults(_Config) -> - {[<<"conn_name">>], #{verbose := false}} = + {[<<"node">>, <<"conn_name">>], #{verbose := false}} = ?COMMAND_LIST_CONNECTIONS:merge_defaults([], #{}), {[<<"other_key">>], #{verbose := true}} = @@ -197,7 +204,7 @@ list_tls_connections_run(Config) -> list_consumers_merge_defaults(_Config) -> DefaultItems = [rabbit_data_coercion:to_binary(Item) - || Item <- ?CONSUMER_INFO_ITEMS], + || Item <- ?CONSUMER_INFO_ITEMS -- [connection_pid, node]], {DefaultItems, #{verbose := false}} = ?COMMAND_LIST_CONSUMERS:merge_defaults([], #{}), @@ -266,7 +273,7 @@ list_consumers_run(Config) -> list_publishers_merge_defaults(_Config) -> DefaultItems = [rabbit_data_coercion:to_binary(Item) - || Item <- ?PUBLISHER_INFO_ITEMS], + || Item <- ?PUBLISHER_INFO_ITEMS -- [connection_pid, node]], {DefaultItems, #{verbose := false}} = ?COMMAND_LIST_PUBLISHERS:merge_defaults([], #{}), @@ -332,6 +339,18 @@ list_publishers_run(Config) -> ?awaitMatch(0, publisher_count(Config), ?WAIT), ok. +list_consumer_groups_validate(_) -> + ValidOpts = #{vhost => <<"/">>}, + ?assertMatch({validation_failure, {bad_info_key, [foo]}}, + ?COMMAND_LIST_CONSUMER_GROUPS:validate([<<"foo">>], + ValidOpts)), + ?assertMatch(ok, + ?COMMAND_LIST_CONSUMER_GROUPS:validate([<<"reference">>], + ValidOpts)), + ?assertMatch(ok, + ?COMMAND_LIST_CONSUMER_GROUPS:validate([], ValidOpts)). + + list_consumer_groups_merge_defaults(_Config) -> DefaultItems = [rabbit_data_coercion:to_binary(Item) @@ -521,6 +540,106 @@ assertConsumerGroup(S, R, PI, Cs, Record) -> ?assertEqual(Cs, proplists:get_value(consumers, Record)), ok. +list_stream_tracking_validate(_) -> + ValidOpts = #{vhost => <<"/">>, <<"writer">> => true}, + ?assertMatch({validation_failure, not_enough_args}, + ?COMMAND_LIST_STREAM_TRACKING:validate([], #{})), + ?assertMatch({validation_failure, not_enough_args}, + ?COMMAND_LIST_STREAM_TRACKING:validate([], + #{vhost => + <<"test">>})), + ?assertMatch({validation_failure, "Specify only one of --all, --offset, --writer."}, + ?COMMAND_LIST_STREAM_TRACKING:validate([<<"stream">>], + #{all => true, writer => true})), + ?assertMatch({validation_failure, too_many_args}, + ?COMMAND_LIST_STREAM_TRACKING:validate([<<"stream">>, <<"bad">>], + ValidOpts)), + + ?assertMatch(ok, + ?COMMAND_LIST_STREAM_TRACKING:validate([<<"stream">>], + ValidOpts)). +list_stream_tracking_merge_defaults(_Config) -> + ?assertMatch({[<<"s">>], #{all := true, vhost := <<"/">>}}, + ?COMMAND_LIST_STREAM_TRACKING:merge_defaults([<<"s">>], #{})), + + ?assertMatch({[<<"s">>], #{writer := true, vhost := <<"/">>}}, + ?COMMAND_LIST_STREAM_TRACKING:merge_defaults([<<"s">>], #{writer => true})), + + ?assertMatch({[<<"s">>], #{all := true, vhost := <<"dev">>}}, + ?COMMAND_LIST_STREAM_TRACKING:merge_defaults([<<"s">>], #{vhost => <<"dev">>})), + + ?assertMatch({[<<"s">>], #{writer := true, vhost := <<"dev">>}}, + ?COMMAND_LIST_STREAM_TRACKING:merge_defaults([<<"s">>], #{writer => true, vhost => <<"dev">>})). + +list_stream_tracking_run(Config) -> + Node = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename), + Stream = <<"list_stream_tracking_run">>, + ConsumerReference = <<"foo">>, + PublisherReference = <<"bar">>, + Opts = + #{node => Node, + timeout => 10000, + vhost => <<"/">>}, + Args = [Stream], + + %% the stream does not exist yet + ?assertMatch({error, "The stream does not exist."}, + ?COMMAND_LIST_STREAM_TRACKING:run(Args, Opts#{all => true})), + + StreamPort = rabbit_stream_SUITE:get_stream_port(Config), + {S, C} = start_stream_connection(StreamPort), + ?awaitMatch(1, connection_count(Config), ?WAIT), + + create_stream(S, Stream, C), + + ?assertMatch([], + ?COMMAND_LIST_STREAM_TRACKING:run(Args, Opts#{all => true})), + + store_offset(S, Stream, ConsumerReference, 42, C), + + ?assertMatch([[{type,offset}, {name, ConsumerReference}, {tracking_value, 42}]], + ?COMMAND_LIST_STREAM_TRACKING:run(Args, Opts#{all => true})), + + ?assertMatch([[{type,offset}, {name, ConsumerReference}, {tracking_value, 42}]], + ?COMMAND_LIST_STREAM_TRACKING:run(Args, Opts#{offset => true})), + + ok = store_offset(S, Stream, ConsumerReference, 55, C), + ?assertMatch([[{type,offset}, {name, ConsumerReference}, {tracking_value, 55}]], + ?COMMAND_LIST_STREAM_TRACKING:run(Args, Opts#{offset => true})), + + + PublisherId = 1, + rabbit_stream_SUITE:test_declare_publisher(gen_tcp, S, PublisherId, + PublisherReference, Stream, C), + rabbit_stream_SUITE:test_publish_confirm(gen_tcp, S, PublisherId, 42, <<"">>, C), + + ok = check_publisher_sequence(S, Stream, PublisherReference, 42, C), + + ?assertMatch([ + [{type,writer},{name,<<"bar">>},{tracking_value, 42}], + [{type,offset},{name,<<"foo">>},{tracking_value, 55}] + ], + ?COMMAND_LIST_STREAM_TRACKING:run(Args, Opts#{all => true})), + + ?assertMatch([ + [{type,writer},{name,<<"bar">>},{tracking_value, 42}] + ], + ?COMMAND_LIST_STREAM_TRACKING:run(Args, Opts#{writer => true})), + + rabbit_stream_SUITE:test_publish_confirm(gen_tcp, S, PublisherId, 66, <<"">>, C), + + ok = check_publisher_sequence(S, Stream, PublisherReference, 66, C), + + ?assertMatch([ + [{type,writer},{name,<<"bar">>},{tracking_value, 66}] + ], + ?COMMAND_LIST_STREAM_TRACKING:run(Args, Opts#{writer => true})), + + delete_stream(S, Stream, C), + + close(S, C), + ok. + add_super_stream_merge_defaults(_Config) -> ?assertMatch({[<<"super-stream">>], #{partitions := 3, vhost := <<"/">>}}, @@ -531,17 +650,27 @@ add_super_stream_merge_defaults(_Config) -> #{partitions := 5, vhost := <<"/">>}}, ?COMMAND_ADD_SUPER_STREAM:merge_defaults([<<"super-stream">>], #{partitions => 5})), + DefaultWithBindingKeys = + ?COMMAND_ADD_SUPER_STREAM:merge_defaults([<<"super-stream">>], + #{binding_keys => + <<"amer,emea,apac">>}), + ?assertMatch({[<<"super-stream">>], + #{binding_keys := <<"amer,emea,apac">>, vhost := <<"/">>}}, + DefaultWithBindingKeys), + + {_, OptsBks} = DefaultWithBindingKeys, + ?assertEqual(false, maps:is_key(partitions, OptsBks)), DefaultWithRoutingKeys = ?COMMAND_ADD_SUPER_STREAM:merge_defaults([<<"super-stream">>], #{routing_keys => <<"amer,emea,apac">>}), ?assertMatch({[<<"super-stream">>], - #{routing_keys := <<"amer,emea,apac">>, vhost := <<"/">>}}, + #{binding_keys := <<"amer,emea,apac">>, vhost := <<"/">>}}, DefaultWithRoutingKeys), - {_, Opts} = DefaultWithRoutingKeys, - ?assertEqual(false, maps:is_key(partitions, Opts)). + {_, OptsRks} = DefaultWithRoutingKeys, + ?assertEqual(false, maps:is_key(partitions, OptsRks)). add_super_stream_validate(_Config) -> ?assertMatch({validation_failure, not_enough_args}, @@ -553,6 +682,17 @@ add_super_stream_validate(_Config) -> #{partitions => 1, routing_keys => <<"a,b,c">>})), + ?assertMatch({validation_failure, _}, + ?COMMAND_ADD_SUPER_STREAM:validate([<<"a">>], + #{partitions => 1, + binding_keys => <<"a,b,c">>})), + + ?assertMatch({validation_failure, _}, + ?COMMAND_ADD_SUPER_STREAM:validate([<<"a">>], + #{routing_keys => 1, + binding_keys => <<"a,b,c">>} + )), + ?assertMatch({validation_failure, _}, ?COMMAND_ADD_SUPER_STREAM:validate([<<"a">>], #{partitions => 0})), @@ -563,6 +703,10 @@ add_super_stream_validate(_Config) -> ?COMMAND_ADD_SUPER_STREAM:validate([<<"a">>], #{routing_keys => <<"a,b,c">>})), + ?assertEqual(ok, + ?COMMAND_ADD_SUPER_STREAM:validate([<<"a">>], + #{binding_keys => + <<"a,b,c">>})), [case Expected of ok -> @@ -600,15 +744,15 @@ add_super_stream_validate(_Config) -> delete_super_stream_merge_defaults(_Config) -> ?assertMatch({[<<"super-stream">>], #{vhost := <<"/">>}}, - ?COMMAND_DELETE_SUPER_STREAM:merge_defaults([<<"super-stream">>], + ?COMMAND_DELETE_SUPER_STREAM_CLI:merge_defaults([<<"super-stream">>], #{})), ok. delete_super_stream_validate(_Config) -> ?assertMatch({validation_failure, not_enough_args}, - ?COMMAND_DELETE_SUPER_STREAM:validate([], #{})), + ?COMMAND_DELETE_SUPER_STREAM_CLI:validate([], #{})), ?assertMatch({validation_failure, too_many_args}, - ?COMMAND_DELETE_SUPER_STREAM:validate([<<"a">>, <<"b">>], + ?COMMAND_DELETE_SUPER_STREAM_CLI:validate([<<"a">>, <<"b">>], #{})), ?assertEqual(ok, ?COMMAND_ADD_SUPER_STREAM:validate([<<"a">>], #{})), ok. @@ -629,22 +773,21 @@ add_delete_super_stream_run(Config) -> [<<"invoices-0">>, <<"invoices-1">>, <<"invoices-2">>]}, partitions(Config, <<"invoices">>)), ?assertMatch({ok, _}, - ?COMMAND_DELETE_SUPER_STREAM:run([<<"invoices">>], Opts)), + ?COMMAND_DELETE_SUPER_STREAM_CLI:run([<<"invoices">>], Opts)), ?assertEqual({error, stream_not_found}, partitions(Config, <<"invoices">>)), - % with routing keys + % with binding keys ?assertMatch({ok, _}, ?COMMAND_ADD_SUPER_STREAM:run([<<"invoices">>], - maps:merge(#{routing_keys => - <<" amer,emea , apac">>}, + maps:merge(#{binding_keys => <<" amer,emea , apac">>}, Opts))), ?assertEqual({ok, [<<"invoices-amer">>, <<"invoices-emea">>, <<"invoices-apac">>]}, partitions(Config, <<"invoices">>)), ?assertMatch({ok, _}, - ?COMMAND_DELETE_SUPER_STREAM:run([<<"invoices">>], Opts)), + ?COMMAND_DELETE_SUPER_STREAM_CLI:run([<<"invoices">>], Opts)), ?assertEqual({error, stream_not_found}, partitions(Config, <<"invoices">>)), @@ -678,7 +821,7 @@ add_delete_super_stream_run(Config) -> rabbit_misc:table_lookup(Args, <<"x-queue-type">>)), ?assertMatch({ok, _}, - ?COMMAND_DELETE_SUPER_STREAM:run([<<"invoices">>], Opts)), + ?COMMAND_DELETE_SUPER_STREAM_CLI:run([<<"invoices">>], Opts)), ok. @@ -714,6 +857,9 @@ declare_publisher(S, PubId, Stream, C) -> delete_stream(S, Stream, C) -> rabbit_stream_SUITE:test_delete_stream(gen_tcp, S, Stream, C). +delete_stream_no_metadata_update(S, Stream, C) -> + rabbit_stream_SUITE:test_delete_stream(gen_tcp, S, Stream, C, false). + metadata_update_stream_deleted(S, Stream, C) -> rabbit_stream_SUITE:test_metadata_update_stream_deleted(gen_tcp, S, @@ -798,3 +944,52 @@ queue_lookup(Config, Q) -> rabbit_amqqueue, lookup, [QueueName]). + +store_offset(S, Stream, Reference, Value, C) -> + StoreOffsetFrame = + rabbit_stream_core:frame({store_offset, Reference, Stream, Value}), + ok = gen_tcp:send(S, StoreOffsetFrame), + case check_stored_offset(S, Stream, Reference, Value, C, 20) of + ok -> + ok; + _ -> + {error, offset_not_stored} + end. + +check_stored_offset(_, _, _, _, _, 0) -> + error; +check_stored_offset(S, Stream, Reference, Expected, C, Attempt) -> + QueryOffsetFrame = + rabbit_stream_core:frame({request, 1, {query_offset, Reference, Stream}}), + ok = gen_tcp:send(S, QueryOffsetFrame), + {Cmd, _} = rabbit_stream_SUITE:receive_commands(gen_tcp, S, C), + ?assertMatch({response, 1, {query_offset, ?RESPONSE_CODE_OK, _}}, Cmd), + {response, 1, {query_offset, ?RESPONSE_CODE_OK, StoredValue}} = Cmd, + case StoredValue of + Expected -> + ok; + _ -> + timer:sleep(50), + check_stored_offset(S, Stream, Reference, Expected, C, Attempt - 1) + end. + +check_publisher_sequence(S, Stream, Reference, Expected, C) -> + check_publisher_sequence(S, Stream, Reference, Expected, C, 20). + +check_publisher_sequence(_, _, _, _, _, 0) -> + error; +check_publisher_sequence(S, Stream, Reference, Expected, C, Attempt) -> + QueryFrame = + rabbit_stream_core:frame({request, 1, {query_publisher_sequence, Reference, Stream}}), + ok = gen_tcp:send(S, QueryFrame), + {Cmd, _} = rabbit_stream_SUITE:receive_commands(gen_tcp, S, C), + ?assertMatch({response, 1, {query_publisher_sequence, _, _}}, Cmd), + {response, 1, {query_publisher_sequence, _, StoredValue}} = Cmd, + case StoredValue of + Expected -> + ok; + _ -> + timer:sleep(50), + check_publisher_sequence(S, Stream, Reference, Expected, C, Attempt - 1) + end. + diff --git a/deps/rabbitmq_stream/test/config_schema_SUITE.erl b/deps/rabbitmq_stream/test/config_schema_SUITE.erl index f1b0c3057c9e..1a278a5e1063 100644 --- a/deps/rabbitmq_stream/test/config_schema_SUITE.erl +++ b/deps/rabbitmq_stream/test/config_schema_SUITE.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2020-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(config_schema_SUITE). diff --git a/deps/rabbitmq_stream/test/protocol_interop_SUITE.erl b/deps/rabbitmq_stream/test/protocol_interop_SUITE.erl new file mode 100644 index 000000000000..872424f53224 --- /dev/null +++ b/deps/rabbitmq_stream/test/protocol_interop_SUITE.erl @@ -0,0 +1,410 @@ +%% This Source Code Form is subject to the terms of the Mozilla Public +%% License, v. 2.0. If a copy of the MPL was not distributed with this +%% file, You can obtain one at https://mozilla.org/MPL/2.0/. +%% +%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% + +-module(protocol_interop_SUITE). + +-compile([export_all, + nowarn_export_all]). + +-include_lib("common_test/include/ct.hrl"). +-include_lib("eunit/include/eunit.hrl"). +-include_lib("amqp_client/include/amqp_client.hrl"). +-include_lib("amqp10_common/include/amqp10_framing.hrl"). + +all() -> + [{group, tests}]. + +groups() -> + [{tests, [shuffle], + [ + amqpl, + amqp_credit_multiple_grants, + amqp_credit_single_grant, + amqp_attach_sub_batch + ] + }]. + +%% ------------------------------------------------------------------- +%% Testsuite setup/teardown. +%% ------------------------------------------------------------------- + +init_per_suite(Config) -> + {ok, _} = application:ensure_all_started(amqp10_client), + rabbit_ct_helpers:log_environment(), + rabbit_ct_helpers:run_setup_steps(Config). + +end_per_suite(Config) -> + rabbit_ct_helpers:run_teardown_steps(Config). + +init_per_group(_Group, Config) -> + rabbit_ct_helpers:run_steps( + Config, + rabbit_ct_broker_helpers:setup_steps() ++ + rabbit_ct_client_helpers:setup_steps()). + +end_per_group(_Group, Config) -> + rabbit_ct_helpers:run_steps( + Config, + rabbit_ct_client_helpers:teardown_steps() ++ + rabbit_ct_broker_helpers:teardown_steps()). + +init_per_testcase(Testcase, Config) -> + rabbit_ct_helpers:testcase_started(Config, Testcase). + +end_per_testcase(Testcase, Config) -> + %% Wait for exclusive or auto-delete queues being deleted. + timer:sleep(800), + rabbit_ct_broker_helpers:rpc(Config, ?MODULE, delete_queues, []), + rabbit_ct_helpers:testcase_finished(Config, Testcase). + +%% ------------------------------------------------------------------- +%% Testsuite cases +%% ------------------------------------------------------------------- + +amqpl(Config) -> + [Server] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename), + Ch = rabbit_ct_client_helpers:open_channel(Config, Server), + Ctag = Stream = atom_to_binary(?FUNCTION_NAME), + publish_via_stream_protocol(Stream, Config), + + #'basic.qos_ok'{} = amqp_channel:call(Ch, #'basic.qos'{prefetch_count = 2}), + amqp_channel:subscribe(Ch, + #'basic.consume'{queue = Stream, + consumer_tag = Ctag, + arguments = [{<<"x-stream-offset">>, long, 0}]}, + self()), + receive #'basic.consume_ok'{consumer_tag = Ctag} -> ok + after 5000 -> ct:fail(consume_timeout) + end, + + %% Since prefetch is 2, we expect to receive exactly 2 messages. + %% Whenever we ack both messages, we should receive exactly 2 more messages. + ExpectedPayloads = [{<<"m1">>, <<"m2">>}, + {<<"m3">>, <<"m4">>}, + {<<"m5">>, <<"m6">>}, + %% The broker skips delivery of compressed sub batches to non Stream protocol + %% consumers, i.e. skips delivery of m7, m8, m9. + {<<"m10">>, <<"m11">>}], + lists:foreach( + fun({P1, P2}) -> + ok = process_2_amqpl_messages(Ch, P1, P2) + end, ExpectedPayloads), + + ok = amqp_channel:close(Ch). + +process_2_amqpl_messages(Ch, P1, P2) -> + %% We expect to receive exactly 2 messages. + receive {#'basic.deliver'{}, + #amqp_msg{payload = P1}} -> ok + after 5000 -> ct:fail({missing_delivery, P1}) + end, + DTag = receive {#'basic.deliver'{delivery_tag = Tag}, + #amqp_msg{payload = P2}} -> Tag + after 5000 -> ct:fail({missing_delivery, P2}) + end, + receive Msg -> ct:fail({unexpected_message, Msg}) + after 10 -> ok + end, + ok = amqp_channel:cast(Ch, #'basic.ack'{delivery_tag = DTag, + multiple = true}). + +amqp_credit_single_grant(Config) -> + Stream = atom_to_binary(?FUNCTION_NAME), + publish_via_stream_protocol(Stream, Config), + + %% Consume from the stream via AMQP 1.0. + OpnConf = connection_config(Config), + {ok, Connection} = amqp10_client:open_connection(OpnConf), + {ok, Session} = amqp10_client:begin_session_sync(Connection), + Address = <<"/queue/", Stream/binary>>, + {ok, Receiver} = amqp10_client:attach_receiver_link( + Session, <<"test-receiver">>, Address, settled, + configuration, #{<<"rabbitmq:stream-offset-spec">> => <<"first">>}), + + %% There are 8 uncompressed messages in the stream. + ok = amqp10_client:flow_link_credit(Receiver, 8, never), + + Msgs = receive_amqp_messages(Receiver, 8), + ?assertEqual([<<"m1">>], amqp10_msg:body(hd(Msgs))), + ?assertEqual([<<"m11">>], amqp10_msg:body(lists:last(Msgs))), + ok = amqp10_client:close_connection(Connection). + +amqp_credit_multiple_grants(Config) -> + Stream = atom_to_binary(?FUNCTION_NAME), + publish_via_stream_protocol(Stream, Config), + + %% Consume from the stream via AMQP 1.0. + OpnConf = connection_config(Config), + {ok, Connection} = amqp10_client:open_connection(OpnConf), + {ok, Session} = amqp10_client:begin_session_sync(Connection), + Address = <<"/queue/", Stream/binary>>, + {ok, Receiver} = amqp10_client:attach_receiver_link( + Session, <<"test-receiver">>, Address, unsettled, + configuration, #{<<"rabbitmq:stream-offset-spec">> => <<"first">>}), + + %% Granting 1 credit should deliver us exactly 1 message. + {ok, M1} = amqp10_client:get_msg(Receiver), + ?assertEqual([<<"m1">>], amqp10_msg:body(M1)), + receive {amqp10_event, {link, Receiver, credit_exhausted}} -> ok + after 5000 -> ct:fail("expected credit_exhausted") + end, + receive {amqp10_msg, _, _} = Unexp1 -> ct:fail({unexpected_message, Unexp1}) + after 10 -> ok + end, + + ok = amqp10_client:flow_link_credit(Receiver, 3, never), + %% We expect to receive exactly 3 more messages + receive {amqp10_msg, Receiver, Msg2} -> + ?assertEqual([<<"m2">>], amqp10_msg:body(Msg2)) + after 5000 -> ct:fail("missing m2") + end, + receive {amqp10_msg, Receiver, Msg3} -> + ?assertEqual([<<"m3">>], amqp10_msg:body(Msg3)) + after 5000 -> ct:fail("missing m3") + end, + %% Messages in an uncompressed subbatch should be delivered individually. + M4 = receive {amqp10_msg, Receiver, Msg4} -> + ?assertEqual([<<"m4">>], amqp10_msg:body(Msg4)), + Msg4 + after 5000 -> ct:fail("missing m4") + end, + receive {amqp10_event, {link, Receiver, credit_exhausted}} -> ok + after 5000 -> ct:fail("expected credit_exhausted") + end, + + %% Let's ack all of them. + ok = amqp10_client_session:disposition( + Receiver, + amqp10_msg:delivery_id(M1), + amqp10_msg:delivery_id(M4), + true, + accepted), + %% Acking shouldn't grant more credits. + receive {amqp10_msg, _, _} = Unexp2 -> ct:fail({unexpected_message, Unexp2}) + after 10 -> ok + end, + + ok = amqp10_client:flow_link_credit(Receiver, 3, never), + M5 = receive {amqp10_msg, Receiver, Msg5} -> + ?assertEqual([<<"m5">>], amqp10_msg:body(Msg5)), + Msg5 + after 5000 -> ct:fail("missing m5") + end, + receive {amqp10_msg, Receiver, Msg6} -> + ?assertEqual([<<"m6">>], amqp10_msg:body(Msg6)) + after 5000 -> ct:fail("missing m6") + end, + %% The broker skips delivery of compressed sub batches to non Stream protocol + %% consumers, i.e. skips delivery of m7, m8, m9. + receive {amqp10_msg, Receiver, Msg10} -> + ?assertEqual([<<"m10">>], amqp10_msg:body(Msg10)) + after 5000 -> ct:fail("missing m10") + end, + receive {amqp10_event, {link, Receiver, credit_exhausted}} -> ok + after 5000 -> ct:fail("expected credit_exhausted") + end, + receive {amqp10_msg, _, _} = Unexp3 -> ct:fail({unexpected_message, Unexp3}) + after 10 -> ok + end, + + %% 1 message should be left in the stream. + %% Let's drain the stream. + ok = amqp10_client:flow_link_credit(Receiver, 1000, never, true), + M11 = receive {amqp10_msg, Receiver, Msg11} -> + ?assertEqual([<<"m11">>], amqp10_msg:body(Msg11)), + Msg11 + after 5000 -> ct:fail("missing m11") + end, + receive {amqp10_event, {link, Receiver, credit_exhausted}} -> ok + after 5000 -> ct:fail("expected credit_exhausted") + end, + + %% Let's ack them all. + ok = amqp10_client_session:disposition( + Receiver, + amqp10_msg:delivery_id(M5), + amqp10_msg:delivery_id(M11), + true, + accepted), + + receive {amqp10_msg, _, _} = Unexp4 -> ct:fail({unexpected_message, Unexp4}) + after 10 -> ok + end, + + ok = amqp10_client:detach_link(Receiver), + ok = amqp10_client:close_connection(Connection). + +amqp_attach_sub_batch(Config) -> + Stream = atom_to_binary(?FUNCTION_NAME), + publish_via_stream_protocol(Stream, Config), + + %% Consume from the stream via AMQP 1.0. + OpnConf = connection_config(Config), + {ok, Connection} = amqp10_client:open_connection(OpnConf), + {ok, Session} = amqp10_client:begin_session_sync(Connection), + Address = <<"/queue/", Stream/binary>>, + {ok, Receiver} = amqp10_client:attach_receiver_link( + Session, <<"test-receiver">>, Address, settled, configuration, + %% Attach in the middle of an uncompresssed sub batch. + #{<<"rabbitmq:stream-offset-spec">> => 4}), + + {ok, M5} = amqp10_client:get_msg(Receiver), + ?assertEqual([<<"m5">>], amqp10_msg:body(M5)), + + {ok, M6} = amqp10_client:get_msg(Receiver), + ?assertEqual([<<"m6">>], amqp10_msg:body(M6)), + + %% The broker skips delivery of compressed sub batches to non Stream protocol + %% consumers, i.e. skips delivery of m7, m8, m9. + + {ok, M10} = amqp10_client:get_msg(Receiver), + ?assertEqual([<<"m10">>], amqp10_msg:body(M10)), + + {ok, M11} = amqp10_client:get_msg(Receiver), + ?assertEqual([<<"m11">>], amqp10_msg:body(M11)), + + ok = amqp10_client:detach_link(Receiver), + ok = amqp10_client:close_connection(Connection). + +%% ------------------------------------------------------------------- +%% Helpers +%% ------------------------------------------------------------------- + +publish_via_stream_protocol(Stream, Config) -> + %% There is no open source Erlang RabbitMQ Stream client. + %% Therefore, we have to build the Stream protocol commands manually. + + StreamPort = rabbit_ct_broker_helpers:get_node_config(Config, 0, tcp_port_stream), + {ok, S} = gen_tcp:connect("localhost", StreamPort, [{active, false}, {mode, binary}]), + + C0 = rabbit_stream_core:init(0), + PeerPropertiesFrame = rabbit_stream_core:frame({request, 1, {peer_properties, #{}}}), + ok = gen_tcp:send(S, PeerPropertiesFrame), + {{response, 1, {peer_properties, _, _}}, C1} = receive_stream_commands(S, C0), + + ok = gen_tcp:send(S, rabbit_stream_core:frame({request, 1, sasl_handshake})), + {{response, _, {sasl_handshake, _, _}}, C2} = receive_stream_commands(S, C1), + Username = <<"guest">>, + Password = <<"guest">>, + Null = 0, + PlainSasl = <>, + ok = gen_tcp:send(S, rabbit_stream_core:frame({request, 2, {sasl_authenticate, <<"PLAIN">>, PlainSasl}})), + {{response, 2, {sasl_authenticate, _}}, C3} = receive_stream_commands(S, C2), + {{tune, DefaultFrameMax, _}, C4} = receive_stream_commands(S, C3), + + ok = gen_tcp:send(S, rabbit_stream_core:frame({response, 0, {tune, DefaultFrameMax, 0}})), + ok = gen_tcp:send(S, rabbit_stream_core:frame({request, 3, {open, <<"/">>}})), + {{response, 3, {open, _, _ConnectionProperties}}, C5} = receive_stream_commands(S, C4), + + CreateStreamFrame = rabbit_stream_core:frame({request, 1, {create_stream, Stream, #{}}}), + ok = gen_tcp:send(S, CreateStreamFrame), + {{response, 1, {create_stream, _}}, C6} = receive_stream_commands(S, C5), + + PublisherId = 99, + DeclarePublisherFrame = rabbit_stream_core:frame({request, 1, {declare_publisher, PublisherId, <<>>, Stream}}), + ok = gen_tcp:send(S, DeclarePublisherFrame), + {{response, 1, {declare_publisher, _}}, C7} = receive_stream_commands(S, C6), + + M1 = simple_entry(1, <<"m1">>), + M2 = simple_entry(2, <<"m2">>), + M3 = simple_entry(3, <<"m3">>), + Messages1 = [M1, M2, M3], + PublishFrame1 = rabbit_stream_core:frame({publish, PublisherId, length(Messages1), Messages1}), + ok = gen_tcp:send(S, PublishFrame1), + {{publish_confirm, PublisherId, _}, C8} = receive_stream_commands(S, C7), + + UncompressedSubbatch = sub_batch_entry_uncompressed(4, [<<"m4">>, <<"m5">>, <<"m6">>]), + PublishFrame2 = rabbit_stream_core:frame({publish, PublisherId, 3, UncompressedSubbatch}), + ok = gen_tcp:send(S, PublishFrame2), + {{publish_confirm, PublisherId, _}, C9} = receive_stream_commands(S, C8), + + CompressedSubbatch = sub_batch_entry_compressed(5, [<<"m7">>, <<"m8">>, <<"m9">>]), + PublishFrame3 = rabbit_stream_core:frame({publish, PublisherId, 3, CompressedSubbatch}), + ok = gen_tcp:send(S, PublishFrame3), + {{publish_confirm, PublisherId, _}, C10} = receive_stream_commands(S, C9), + + M10 = simple_entry(6, <<"m10">>), + M11 = simple_entry(7, <<"m11">>), + Messages2 = [M10, M11], + PublishFrame4 = rabbit_stream_core:frame({publish, PublisherId, length(Messages2), Messages2}), + ok = gen_tcp:send(S, PublishFrame4), + {{publish_confirm, PublisherId, _}, _C11} = receive_stream_commands(S, C10). + +%% Streams contain AMQP 1.0 encoded messages. +%% In this case, the AMQP 1.0 encoded message contains a single data section. +simple_entry(Sequence, Body) + when is_binary(Body) -> + DataSect = iolist_to_binary(amqp10_framing:encode_bin(#'v1_0.data'{content = Body})), + DataSectSize = byte_size(DataSect), + <>. + +%% Here, each AMQP 1.0 encoded message contains a single data section. +%% All data sections are delivered uncompressed in 1 batch. +sub_batch_entry_uncompressed(Sequence, Bodies) -> + Batch = lists:foldl(fun(Body, Acc) -> + Sect = iolist_to_binary(amqp10_framing:encode_bin(#'v1_0.data'{content = Body})), + <> + end, <<>>, Bodies), + Size = byte_size(Batch), + <>. + +%% Here, each AMQP 1.0 encoded message contains a single data section. +%% All data sections are delivered in 1 gzip compressed batch. +sub_batch_entry_compressed(Sequence, Bodies) -> + Uncompressed = lists:foldl(fun(Body, Acc) -> + Bin = iolist_to_binary(amqp10_framing:encode_bin(#'v1_0.data'{content = Body})), + <> + end, <<>>, Bodies), + Compressed = zlib:gzip(Uncompressed), + CompressedLen = byte_size(Compressed), + <>. + +connection_config(Config) -> + Host = ?config(rmq_hostname, Config), + Port = rabbit_ct_broker_helpers:get_node_config(Config, 0, tcp_port_amqp), + #{address => Host, + port => Port, + container_id => <<"my container">>, + sasl => {plain, <<"guest">>, <<"guest">>}}. + +receive_stream_commands(Sock, C0) -> + case rabbit_stream_core:next_command(C0) of + empty -> + case gen_tcp:recv(Sock, 0, 5000) of + {ok, Data} -> + C1 = rabbit_stream_core:incoming_data(Data, C0), + case rabbit_stream_core:next_command(C1) of + empty -> + {ok, Data2} = gen_tcp:recv(Sock, 0, 5000), + rabbit_stream_core:next_command( + rabbit_stream_core:incoming_data(Data2, C1)); + Res -> + Res + end; + {error, Err} -> + ct:fail("error receiving stream data ~w", [Err]) + end; + Res -> + Res + end. + +receive_amqp_messages(Receiver, N) -> + receive_amqp_messages0(Receiver, N, []). + +receive_amqp_messages0(_Receiver, 0, Acc) -> + lists:reverse(Acc); +receive_amqp_messages0(Receiver, N, Acc) -> + receive + {amqp10_msg, Receiver, Msg} -> + receive_amqp_messages0(Receiver, N - 1, [Msg | Acc]) + after 5000 -> + exit({timeout, {num_received, length(Acc)}, {num_missing, N}}) + end. + +delete_queues() -> + [{ok, 0} = rabbit_amqqueue:delete(Q, false, false, <<"dummy">>) || Q <- rabbit_amqqueue:list()]. diff --git a/deps/rabbitmq_stream/test/rabbit_stream_SUITE.erl b/deps/rabbitmq_stream/test/rabbit_stream_SUITE.erl index d004456c231b..06792b4e739d 100644 --- a/deps/rabbitmq_stream/test/rabbit_stream_SUITE.erl +++ b/deps/rabbitmq_stream/test/rabbit_stream_SUITE.erl @@ -11,7 +11,8 @@ %% The Original Code is RabbitMQ. %% %% The Initial Developer of the Original Code is Pivotal Software, Inc. -%% Copyright (c) 2020-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2020-2024 Broadcom. All Rights Reserved. +%% The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_stream_SUITE). @@ -26,6 +27,9 @@ -compile(nowarn_export_all). -compile(export_all). +-import(rabbit_stream_core, [frame/1]). +-import(rabbit_ct_broker_helpers, [rpc/5]). + -define(WAIT, 5000). all() -> @@ -33,12 +37,15 @@ all() -> groups() -> [{single_node, [], - [filtering_ff, %% must stay at the top, feature flag disabled for this one - test_stream, + [test_stream, test_stream_tls, test_publish_v2, + test_super_stream_creation_deletion, test_gc_consumers, test_gc_publishers, + test_update_secret, + cannot_update_username_after_authenticated, + cannot_use_another_authmechanism_when_updating_secret, unauthenticated_client_rejected_tcp_connected, timeout_tcp_connected, unauthenticated_client_rejected_peer_properties_exchanged, @@ -48,7 +55,17 @@ groups() -> timeout_close_sent, max_segment_size_bytes_validation, close_connection_on_consumer_update_timeout, - set_filter_size]}, + set_filter_size, + vhost_queue_limit, + connection_should_be_closed_on_token_expiry, + should_receive_metadata_update_after_update_secret, + store_offset_requires_read_access, + offset_lag_calculation, + test_super_stream_duplicate_partitions, + authentication_error_should_close_with_delay, + unauthorized_vhost_access_should_close_with_delay, + sasl_anonymous + ]}, %% Run `test_global_counters` on its own so the global metrics are %% initialised to 0 for each testcase {single_node_1, [], [test_global_counters]}, @@ -111,7 +128,8 @@ init_per_group(cluster = Group, Config) -> {rmq_nodes_count, 3}, {rmq_nodename_suffix, Group}, {tcp_ports_base}, - {rabbitmq_ct_tls_verify, verify_none} + {rabbitmq_ct_tls_verify, verify_none}, + {find_crashes, false} %% we kill stream members in some tests ]), rabbit_ct_helpers:run_setup_steps( Config1, @@ -131,6 +149,13 @@ end_per_group(_, Config) -> rabbit_ct_helpers:run_steps(Config, rabbit_ct_broker_helpers:teardown_steps()). +init_per_testcase(test_update_secret = TestCase, Config) -> + rabbit_ct_helpers:testcase_started(Config, TestCase); + +init_per_testcase(cannot_update_username_after_authenticated = TestCase, Config) -> + ok = rabbit_ct_broker_helpers:add_user(Config, <<"other">>), + rabbit_ct_helpers:testcase_started(Config, TestCase); + init_per_testcase(close_connection_on_consumer_update_timeout = TestCase, Config) -> ok = rabbit_ct_broker_helpers:rpc(Config, 0, @@ -138,16 +163,34 @@ init_per_testcase(close_connection_on_consumer_update_timeout = TestCase, Config set_env, [rabbitmq_stream, request_timeout, 2000]), rabbit_ct_helpers:testcase_started(Config, TestCase); +init_per_testcase(vhost_queue_limit = TestCase, Config) -> + QueueCount = rabbit_ct_broker_helpers:rpc(Config, + 0, + rabbit_amqqueue, + count, + [<<"/">>]), + ok = rabbit_ct_broker_helpers:set_vhost_limit(Config, 0, <<"/">>, max_queues, QueueCount + 5), + rabbit_ct_helpers:testcase_started(Config, TestCase); + +init_per_testcase(store_offset_requires_read_access = TestCase, Config) -> + ok = rabbit_ct_broker_helpers:add_user(Config, <<"test">>), + rabbit_ct_helpers:testcase_started(Config, TestCase); + +init_per_testcase(unauthorized_vhost_access_should_close_with_delay = TestCase, Config) -> + ok = rabbit_ct_broker_helpers:add_user(Config, <<"other">>), + rabbit_ct_helpers:testcase_started(Config, TestCase); + init_per_testcase(TestCase, Config) -> rabbit_ct_helpers:testcase_started(Config, TestCase). -end_per_testcase(filtering_ff = TestCase, Config) -> - _ = rabbit_ct_broker_helpers:rpc(Config, - 0, - rabbit_feature_flags, - enable, - [stream_filtering]), +end_per_testcase(test_update_secret = TestCase, Config) -> + ok = rabbit_ct_broker_helpers:change_password(Config, <<"guest">>, <<"guest">>), + rabbit_ct_helpers:testcase_finished(Config, TestCase); + +end_per_testcase(cannot_update_username_after_authenticated = TestCase, Config) -> + ok = rabbit_ct_broker_helpers:delete_user(Config, <<"other">>), rabbit_ct_helpers:testcase_finished(Config, TestCase); + end_per_testcase(close_connection_on_consumer_update_timeout = TestCase, Config) -> ok = rabbit_ct_broker_helpers:rpc(Config, 0, @@ -155,35 +198,22 @@ end_per_testcase(close_connection_on_consumer_update_timeout = TestCase, Config) set_env, [rabbitmq_stream, request_timeout, 60000]), rabbit_ct_helpers:testcase_finished(Config, TestCase); +end_per_testcase(vhost_queue_limit = TestCase, Config) -> + _ = rabbit_ct_broker_helpers:rpc(Config, + 0, + rabbit_vhost_limit, + clear, + [<<"/">>, <<"guest">>]), + rabbit_ct_helpers:testcase_finished(Config, TestCase); +end_per_testcase(store_offset_requires_read_access = TestCase, Config) -> + ok = rabbit_ct_broker_helpers:delete_user(Config, <<"test">>), + rabbit_ct_helpers:testcase_finished(Config, TestCase); +end_per_testcase(unauthorized_vhost_access_should_close_with_delay = TestCase, Config) -> + ok = rabbit_ct_broker_helpers:delete_user(Config, <<"other">>), + rabbit_ct_helpers:testcase_finished(Config, TestCase); end_per_testcase(TestCase, Config) -> rabbit_ct_helpers:testcase_finished(Config, TestCase). -filtering_ff(Config) -> - Stream = atom_to_binary(?FUNCTION_NAME, utf8), - Transport = gen_tcp, - Port = get_stream_port(Config), - Opts = [{active, false}, {mode, binary}], - {ok, S} = Transport:connect("localhost", Port, Opts), - C0 = rabbit_stream_core:init(0), - C1 = test_peer_properties(Transport, S, C0), - C2 = test_authenticate(Transport, S, C1), - C3 = test_create_stream(Transport, S, Stream, C2), - PublisherId = 42, - C4 = test_declare_publisher(Transport, S, PublisherId, Stream, C3), - Body = <<"hello">>, - C5 = test_publish_confirm(Transport, S, publish_v2, PublisherId, Body, - publish_error, C4), - SubscriptionId = 42, - C6 = test_subscribe(Transport, S, SubscriptionId, Stream, - #{<<"filter.0">> => <<"foo">>}, - ?RESPONSE_CODE_PRECONDITION_FAILED, - C5), - - C7 = test_delete_stream(Transport, S, Stream, C6), - _C8 = test_close(Transport, S, C7), - closed = wait_for_socket_close(Transport, S, 10), - ok. - test_global_counters(Config) -> Stream = atom_to_binary(?FUNCTION_NAME, utf8), test_server(gen_tcp, Stream, Config), @@ -220,6 +250,44 @@ test_stream(Config) -> test_server(gen_tcp, Stream, Config), ok. +sasl_anonymous(Config) -> + Port = get_port(gen_tcp, Config), + Opts = get_opts(gen_tcp), + {ok, S} = gen_tcp:connect("localhost", Port, Opts), + C0 = rabbit_stream_core:init(0), + C1 = test_peer_properties(gen_tcp, S, C0), + C2 = sasl_handshake(gen_tcp, S, C1), + C3 = test_anonymous_sasl_authenticate(gen_tcp, S, C2), + _C = tune(gen_tcp, S, C3). + +test_update_secret(Config) -> + Transport = gen_tcp, + {S, C0} = connect_and_authenticate(Transport, Config), + rabbit_ct_broker_helpers:change_password(Config, <<"guest">>, <<"password">>), + C1 = expect_successful_authentication( + try_authenticate(Transport, S, C0, <<"PLAIN">>, <<"guest">>, <<"password">>)), + _C2 = test_close(Transport, S, C1), + closed = wait_for_socket_close(Transport, S, 10), + ok. + +cannot_update_username_after_authenticated(Config) -> + {S, C0} = connect_and_authenticate(gen_tcp, Config), + C1 = expect_unsuccessful_authentication( + try_authenticate(gen_tcp, S, C0, <<"PLAIN">>, <<"other">>, <<"other">>), + ?RESPONSE_SASL_CANNOT_CHANGE_USERNAME), + _C2 = test_close(gen_tcp, S, C1), + closed = wait_for_socket_close(gen_tcp, S, 10), + ok. + +cannot_use_another_authmechanism_when_updating_secret(Config) -> + {S, C0} = connect_and_authenticate(gen_tcp, Config), + C1 = expect_unsuccessful_authentication( + try_authenticate(gen_tcp, S, C0, <<"EXTERNAL">>, <<"guest">>, <<"new_password">>), + ?RESPONSE_SASL_CANNOT_CHANGE_MECHANISM), + _C2 = test_close(gen_tcp, S, C1), + closed = wait_for_socket_close(gen_tcp, S, 10), + ok. + test_stream_tls(Config) -> Stream = atom_to_binary(?FUNCTION_NAME, utf8), test_server(ssl, Stream, Config), @@ -258,6 +326,83 @@ test_publish_v2(Config) -> closed = wait_for_socket_close(Transport, S, 10), ok. + +test_super_stream_creation_deletion(Config) -> + T = gen_tcp, + Port = get_port(T, Config), + Opts = get_opts(T), + {ok, S} = T:connect("localhost", Port, Opts), + C = rabbit_stream_core:init(0), + test_peer_properties(T, S, C), + test_authenticate(T, S, C), + + Ss = atom_to_binary(?FUNCTION_NAME, utf8), + Partitions = [unicode:characters_to_binary([Ss, <<"-">>, integer_to_binary(N)]) || N <- lists:seq(0, 2)], + Bks = [integer_to_binary(N) || N <- lists:seq(0, 2)], + SsCreationFrame = request({create_super_stream, Ss, Partitions, Bks, #{}}), + ok = T:send(S, SsCreationFrame), + {Cmd1, _} = receive_commands(T, S, C), + ?assertMatch({response, 1, {create_super_stream, ?RESPONSE_CODE_OK}}, + Cmd1), + + PartitionsFrame = request({partitions, Ss}), + ok = T:send(S, PartitionsFrame), + {Cmd2, _} = receive_commands(T, S, C), + ?assertMatch({response, 1, {partitions, ?RESPONSE_CODE_OK, Partitions}}, + Cmd2), + [begin + RouteFrame = request({route, Rk, Ss}), + ok = T:send(S, RouteFrame), + {Command, _} = receive_commands(T, S, C), + ?assertMatch({response, 1, {route, ?RESPONSE_CODE_OK, _}}, Command), + {response, 1, {route, ?RESPONSE_CODE_OK, [P]}} = Command, + ?assertEqual(unicode:characters_to_binary([Ss, <<"-">>, Rk]), P) + end || Rk <- Bks], + + SsDeletionFrame = request({delete_super_stream, Ss}), + ok = T:send(S, SsDeletionFrame), + {Cmd3, _} = receive_commands(T, S, C), + ?assertMatch({response, 1, {delete_super_stream, ?RESPONSE_CODE_OK}}, + Cmd3), + + ok = T:send(S, PartitionsFrame), + {Cmd4, _} = receive_commands(T, S, C), + ?assertMatch({response, 1, {partitions, ?RESPONSE_CODE_STREAM_DOES_NOT_EXIST, []}}, + Cmd4), + + %% not the same number of partitions and binding keys + SsCreationBadFrame = request({create_super_stream, Ss, + [<<"s1">>, <<"s2">>], [<<"bk1">>], #{}}), + ok = T:send(S, SsCreationBadFrame), + {Cmd5, _} = receive_commands(T, S, C), + ?assertMatch({response, 1, {create_super_stream, ?RESPONSE_CODE_PRECONDITION_FAILED}}, + Cmd5), + + test_close(T, S, C), + closed = wait_for_socket_close(T, S, 10), + ok. + +test_super_stream_duplicate_partitions(Config) -> + T = gen_tcp, + Port = get_port(T, Config), + Opts = get_opts(T), + {ok, S} = T:connect("localhost", Port, Opts), + C = rabbit_stream_core:init(0), + test_peer_properties(T, S, C), + test_authenticate(T, S, C), + + Ss = atom_to_binary(?FUNCTION_NAME, utf8), + Partitions = [<<"same-name">>, <<"same-name">>], + SsCreationFrame = request({create_super_stream, Ss, Partitions, [<<"1">>, <<"2">>], #{}}), + ok = T:send(S, SsCreationFrame), + {Cmd1, _} = receive_commands(T, S, C), + ?assertMatch({response, 1, {create_super_stream, ?RESPONSE_CODE_PRECONDITION_FAILED}}, + Cmd1), + + test_close(T, S, C), + closed = wait_for_socket_close(T, S, 10), + ok. + test_metadata(Config) -> Stream = atom_to_binary(?FUNCTION_NAME, utf8), Transport = gen_tcp, @@ -273,8 +418,7 @@ test_metadata(Config) -> C3 = test_create_stream(Transport, S, Stream, C2), GetStreamNodes = fun() -> - MetadataFrame = - rabbit_stream_core:frame({request, 1, {metadata, [Stream]}}), + MetadataFrame = request({metadata, [Stream]}), ok = Transport:send(S, MetadataFrame), {CmdMetadata, _} = receive_commands(Transport, S, C3), {response, 1, @@ -318,8 +462,7 @@ test_metadata(Config) -> length(GetStreamNodes()) == 3 end), - DeleteStreamFrame = - rabbit_stream_core:frame({request, 1, {delete_stream, Stream}}), + DeleteStreamFrame = request({delete_stream, Stream}), ok = Transport:send(S, DeleteStreamFrame), {CmdDelete, C4} = receive_commands(Transport, S, C3), ?assertMatch({response, 1, {delete_stream, ?RESPONSE_CODE_OK}}, @@ -365,21 +508,18 @@ test_gc_publishers(Config) -> unauthenticated_client_rejected_tcp_connected(Config) -> Port = get_stream_port(Config), - {ok, S} = - gen_tcp:connect("localhost", Port, [{active, false}, {mode, binary}]), + {ok, S} = gen_tcp:connect("localhost", Port, [{active, false}, {mode, binary}]), ?assertEqual(ok, gen_tcp:send(S, <<"invalid data">>)), ?assertEqual(closed, wait_for_socket_close(gen_tcp, S, 1)). timeout_tcp_connected(Config) -> Port = get_stream_port(Config), - {ok, S} = - gen_tcp:connect("localhost", Port, [{active, false}, {mode, binary}]), + {ok, S} = gen_tcp:connect("localhost", Port, [{active, false}, {mode, binary}]), ?assertEqual(closed, wait_for_socket_close(gen_tcp, S, 1)). unauthenticated_client_rejected_peer_properties_exchanged(Config) -> Port = get_stream_port(Config), - {ok, S} = - gen_tcp:connect("localhost", Port, [{active, false}, {mode, binary}]), + {ok, S} = gen_tcp:connect("localhost", Port, [{active, false}, {mode, binary}]), C0 = rabbit_stream_core:init(0), test_peer_properties(gen_tcp, S, C0), ?assertEqual(ok, gen_tcp:send(S, <<"invalid data">>)), @@ -387,37 +527,32 @@ unauthenticated_client_rejected_peer_properties_exchanged(Config) -> timeout_peer_properties_exchanged(Config) -> Port = get_stream_port(Config), - {ok, S} = - gen_tcp:connect("localhost", Port, [{active, false}, {mode, binary}]), + {ok, S} = gen_tcp:connect("localhost", Port, [{active, false}, {mode, binary}]), C0 = rabbit_stream_core:init(0), test_peer_properties(gen_tcp, S, C0), ?assertEqual(closed, wait_for_socket_close(gen_tcp, S, 1)). unauthenticated_client_rejected_authenticating(Config) -> Port = get_stream_port(Config), - {ok, S} = - gen_tcp:connect("localhost", Port, [{active, false}, {mode, binary}]), + {ok, S} = gen_tcp:connect("localhost", Port, [{active, false}, {mode, binary}]), C0 = rabbit_stream_core:init(0), test_peer_properties(gen_tcp, S, C0), - SaslHandshakeFrame = - rabbit_stream_core:frame({request, 1, sasl_handshake}), + SaslHandshakeFrame = request(sasl_handshake), ?assertEqual(ok, gen_tcp:send(S, SaslHandshakeFrame)), ?awaitMatch({error, closed}, gen_tcp:send(S, <<"invalid data">>), ?WAIT). timeout_authenticating(Config) -> Port = get_stream_port(Config), - {ok, S} = - gen_tcp:connect("localhost", Port, [{active, false}, {mode, binary}]), + {ok, S} = gen_tcp:connect("localhost", Port, [{active, false}, {mode, binary}]), C0 = rabbit_stream_core:init(0), test_peer_properties(gen_tcp, S, C0), - _Frame = rabbit_stream_core:frame({request, 1, sasl_handshake}), + _Frame = request(sasl_handshake), ?assertEqual(closed, wait_for_socket_close(gen_tcp, S, 1)). timeout_close_sent(Config) -> Port = get_stream_port(Config), - {ok, S} = - gen_tcp:connect("localhost", Port, [{active, false}, {mode, binary}]), + {ok, S} = gen_tcp:connect("localhost", Port, [{active, false}, {mode, binary}]), C0 = rabbit_stream_core:init(0), C1 = test_peer_properties(gen_tcp, S, C0), C2 = test_authenticate(gen_tcp, S, C1), @@ -437,18 +572,15 @@ timeout_close_sent(Config) -> max_segment_size_bytes_validation(Config) -> Transport = gen_tcp, Port = get_stream_port(Config), - {ok, S} = - Transport:connect("localhost", Port, - [{active, false}, {mode, binary}]), + {ok, S} = Transport:connect("localhost", Port, + [{active, false}, {mode, binary}]), C0 = rabbit_stream_core:init(0), C1 = test_peer_properties(Transport, S, C0), C2 = test_authenticate(Transport, S, C1), Stream = <<"stream-max-segment-size">>, - CreateStreamFrame = - rabbit_stream_core:frame({request, 1, - {create_stream, Stream, - #{<<"stream-max-segment-size-bytes">> => - <<"3000000001">>}}}), + CreateStreamFrame = request({create_stream, Stream, + #{<<"stream-max-segment-size-bytes">> => + <<"3000000001">>}}), ok = Transport:send(S, CreateStreamFrame), {Cmd, C3} = receive_commands(Transport, S, C2), ?assertMatch({response, 1, @@ -506,10 +638,8 @@ set_filter_size(Config) -> ], C3 = lists:foldl(fun({Size, ExpectedResponseCode}, Conn0) -> - Frame = rabbit_stream_core:frame( - {request, 1, - {create_stream, Stream, - #{<<"stream-filter-size-bytes">> => integer_to_binary(Size)}}}), + Frame = request({create_stream, Stream, + #{<<"stream-filter-size-bytes">> => integer_to_binary(Size)}}), ok = Transport:send(S, Frame), {Cmd, Conn1} = receive_commands(Transport, S, Conn0), ?assertMatch({response, 1, {create_stream, ExpectedResponseCode}}, Cmd), @@ -520,6 +650,351 @@ set_filter_size(Config) -> closed = wait_for_socket_close(Transport, S, 10), ok. +vhost_queue_limit(Config) -> + T = gen_tcp, + Port = get_port(T, Config), + Opts = get_opts(T), + {ok, S} = T:connect("localhost", Port, Opts), + C = rabbit_stream_core:init(0), + test_peer_properties(T, S, C), + test_authenticate(T, S, C), + QueueCount = rabbit_ct_broker_helpers:rpc(Config, + 0, + rabbit_amqqueue, + count, + [<<"/">>]), + {ok, QueueLimit} = rabbit_ct_broker_helpers:rpc(Config, + 0, + rabbit_vhost_limit, + queue_limit, + [<<"/">>]), + + PartitionCount = QueueLimit - 1 - QueueCount, + Name = atom_to_binary(?FUNCTION_NAME, utf8), + Partitions = [unicode:characters_to_binary([Name, <<"-">>, integer_to_binary(N)]) || N <- lists:seq(0, PartitionCount)], + Bks = [integer_to_binary(N) || N <- lists:seq(0, PartitionCount)], + SsCreationFrame = request({create_super_stream, Name, Partitions, Bks, #{}}), + ok = T:send(S, SsCreationFrame), + {Cmd1, _} = receive_commands(T, S, C), + ?assertMatch({response, 1, {create_super_stream, ?RESPONSE_CODE_OK}}, + Cmd1), + + SsCreationFrameKo = request({create_super_stream, + <<"exceed-queue-limit">>, + [<<"s1">>, <<"s2">>, <<"s3">>], + [<<"1">>, <<"2">>, <<"3">>], #{}}), + + ok = T:send(S, SsCreationFrameKo), + {Cmd2, _} = receive_commands(T, S, C), + ?assertMatch({response, 1, {create_super_stream, ?RESPONSE_CODE_PRECONDITION_FAILED}}, + Cmd2), + + CreateStreamFrame = request({create_stream, <<"exceed-queue-limit">>, #{}}), + ok = T:send(S, CreateStreamFrame), + {Cmd3, C} = receive_commands(T, S, C), + ?assertMatch({response, 1, {create_stream, ?RESPONSE_CODE_PRECONDITION_FAILED}}, Cmd3), + + SsDeletionFrame = request({delete_super_stream, Name}), + ok = T:send(S, SsDeletionFrame), + {Cmd4, _} = receive_commands(T, S, C), + ?assertMatch({response, 1, {delete_super_stream, ?RESPONSE_CODE_OK}}, + Cmd4), + + ok = T:send(S, request({create_stream, Name, #{}})), + {Cmd5, C} = receive_commands(T, S, C), + ?assertMatch({response, 1, {create_stream, ?RESPONSE_CODE_OK}}, Cmd5), + + ok = T:send(S, request({delete_stream, Name})), + {Cmd6, C} = receive_commands(T, S, C), + ?assertMatch({response, 1, {delete_stream, ?RESPONSE_CODE_OK}}, Cmd6), + + ok. + +connection_should_be_closed_on_token_expiry(Config) -> + rabbit_ct_broker_helpers:setup_meck(Config), + Mod = rabbit_access_control, + ok = rpc(Config, 0, meck, new, [Mod, [no_link, passthrough]]), + ok = rpc(Config, 0, meck, expect, [Mod, check_user_loopback, 2, ok]), + ok = rpc(Config, 0, meck, expect, [Mod, check_vhost_access, 4, ok]), + ok = rpc(Config, 0, meck, expect, [Mod, permission_cache_can_expire, 1, true]), + Expiry = os:system_time(seconds) + 2, + ok = rpc(Config, 0, meck, expect, [Mod, expiry_timestamp, 1, Expiry]), + + T = gen_tcp, + Port = get_port(T, Config), + Opts = get_opts(T), + {ok, S} = T:connect("localhost", Port, Opts), + C = rabbit_stream_core:init(0), + test_peer_properties(T, S, C), + test_authenticate(T, S, C), + closed = wait_for_socket_close(T, S, 10), + ok = rpc(Config, 0, meck, unload, [Mod]). + +should_receive_metadata_update_after_update_secret(Config) -> + T = gen_tcp, + Port = get_port(T, Config), + Opts = get_opts(T), + {ok, S} = T:connect("localhost", Port, Opts), + C = rabbit_stream_core:init(0), + test_peer_properties(T, S, C), + test_authenticate(T, S, C), + + Prefix = atom_to_binary(?FUNCTION_NAME, utf8), + PublishStream = <>/binary>>, + test_create_stream(T, S, PublishStream, C), + ConsumeStream = <>/binary>>, + test_create_stream(T, S, ConsumeStream, C), + + test_declare_publisher(T, S, 1, PublishStream, C), + test_subscribe(T, S, 1, ConsumeStream, C), + + rabbit_ct_broker_helpers:setup_meck(Config), + Mod = rabbit_stream_utils, + ok = rpc(Config, 0, meck, new, [Mod, [no_link, passthrough]]), + ok = rpc(Config, 0, meck, expect, [Mod, check_write_permitted, 2, error]), + ok = rpc(Config, 0, meck, expect, [Mod, check_read_permitted, 3, error]), + + C01 = expect_successful_authentication(try_authenticate(T, S, C, <<"PLAIN">>, <<"guest">>, <<"guest">>)), + + {Meta1, C02} = receive_commands(T, S, C01), + {metadata_update, Stream1, ?RESPONSE_CODE_STREAM_NOT_AVAILABLE} = Meta1, + {Meta2, C03} = receive_commands(T, S, C02), + {metadata_update, Stream2, ?RESPONSE_CODE_STREAM_NOT_AVAILABLE} = Meta2, + ImpactedStreams = #{Stream1 => ok, Stream2 => ok}, + ?assert(maps:is_key(PublishStream, ImpactedStreams)), + ?assert(maps:is_key(ConsumeStream, ImpactedStreams)), + + test_close(T, S, C03), + closed = wait_for_socket_close(T, S, 10), + + ok = rpc(Config, 0, meck, unload, [Mod]), + + {ok, S2} = T:connect("localhost", Port, Opts), + C2 = rabbit_stream_core:init(0), + test_peer_properties(T, S2, C2), + test_authenticate(T, S2, C2), + test_delete_stream(T, S2, PublishStream, C2, false), + test_delete_stream(T, S2, ConsumeStream, C2, false), + test_close(T, S2, C2), + closed = wait_for_socket_close(T, S2, 10), + ok. + +store_offset_requires_read_access(Config) -> + Username = <<"test">>, + rabbit_ct_broker_helpers:set_full_permissions(Config, Username, <<"/">>), + + T = gen_tcp, + Port = get_port(T, Config), + Opts = get_opts(T), + {ok, S} = T:connect("localhost", Port, Opts), + C0 = rabbit_stream_core:init(0), + C1 = test_peer_properties(T, S, C0), + C2 = test_authenticate(T, S, C1, Username), + Stream = atom_to_binary(?FUNCTION_NAME, utf8), + C3 = test_create_stream(T, S, Stream, C2), + + C4 = test_subscribe(T, S, 1, Stream, C3), + %% store_offset should work because the subscription is still active + Reference = <<"foo">>, + ok = store_offset(T, S, Reference, Stream, 42), + {O42, C5} = query_expected_offset(T, S, C4, Reference, Stream, 42), + ?assertEqual(42, O42), + + C6 = test_unsubscribe(T, S, 1, C5), + %% store_offset should still work because the user has read access to the stream + ok = store_offset(T, S, Reference, Stream, 43), + {O43, C7} = query_expected_offset(T, S, C6, Reference, Stream, 43), + ?assertEqual(43, O43), + + %% no read access anymore + rabbit_ct_broker_helpers:set_permissions(Config, Username, <<"/">>, + <<".*">>, <<".*">>, <<"foobar">>), + %% this store_offset request will not work because no read access + ok = store_offset(T, S, Reference, Stream, 44), + + %% providing read access back to be able to query_offset + rabbit_ct_broker_helpers:set_full_permissions(Config, Username, <<"/">>), + %% we never get the offset from the last query_offset attempt + {Timeout, C8} = query_expected_offset(T, S, C7, Reference, Stream, 44), + ?assertMatch(timeout, Timeout), + + C9 = test_delete_stream(T, S, Stream, C8, true), + test_close(T, S, C9), + closed = wait_for_socket_close(T, S, 10), + ok. + +offset_lag_calculation(Config) -> + FunctionName = atom_to_binary(?FUNCTION_NAME, utf8), + T = gen_tcp, + Port = get_port(T, Config), + Opts = get_opts(T), + {ok, S} = T:connect("localhost", Port, Opts), + C = rabbit_stream_core:init(0), + ConnectionName = FunctionName, + test_peer_properties(T, S, #{<<"connection_name">> => ConnectionName}, C), + test_authenticate(T, S, C), + + Stream = FunctionName, + test_create_stream(T, S, Stream, C), + + SubId = 1, + TheFuture = os:system_time(millisecond) + 60 * 60 * 1_000, + lists:foreach(fun(OffsetSpec) -> + test_subscribe(T, S, SubId, Stream, + OffsetSpec, 10, #{}, + ?RESPONSE_CODE_OK, C), + ConsumerInfo = consumer_offset_info(Config, ConnectionName), + ?assertEqual({0, 0}, ConsumerInfo), + test_unsubscribe(T, S, SubId, C) + end, [first, last, next, 0, 1_000, {timestamp, TheFuture}]), + + + PublisherId = 1, + test_declare_publisher(T, S, PublisherId, Stream, C), + MessageCount = 10, + Body = <<"hello">>, + lists:foreach(fun(_) -> + test_publish_confirm(T, S, PublisherId, Body, C) + end, lists:seq(1, MessageCount - 1)), + %% to make sure to have 2 chunks + timer:sleep(200), + test_publish_confirm(T, S, PublisherId, Body, C), + test_delete_publisher(T, S, PublisherId, C), + + NextOffset = MessageCount, + lists:foreach(fun({OffsetSpec, ReceiveDeliver, CheckFun}) -> + test_subscribe(T, S, SubId, Stream, + OffsetSpec, 1, #{}, + ?RESPONSE_CODE_OK, C), + case ReceiveDeliver of + true -> + {{deliver, SubId, _}, _} = receive_commands(T, S, C); + _ -> + ok + end, + {Offset, Lag} = consumer_offset_info(Config, ConnectionName), + CheckFun(Offset, Lag), + test_unsubscribe(T, S, SubId, C) + end, [{first, true, + fun(Offset, Lag) -> + ?assert(Offset >= 0, "first, at least one chunk consumed"), + ?assert(Lag > 0, "first, not all messages consumed") + end}, + {last, true, + fun(Offset, _Lag) -> + ?assert(Offset > 0, "offset expected for last") + end}, + {next, false, + fun(Offset, Lag) -> + ?assertEqual(NextOffset, Offset, "next, offset should be at the end of the stream"), + ?assert(Lag =:= 0, "next, offset lag should be 0") + end}, + {0, true, + fun(Offset, Lag) -> + ?assert(Offset >= 0, "offset spec = 0, at least one chunk consumed"), + ?assert(Lag > 0, "offset spec = 0, not all messages consumed") + end}, + {1_000, false, + fun(Offset, Lag) -> + ?assertEqual(NextOffset, Offset, "offset spec = 1000, offset should be at the end of the stream"), + ?assert(Lag =:= 0, "offset spec = 1000, offset lag should be 0") + end}, + {{timestamp, TheFuture}, false, + fun(Offset, Lag) -> + ?assertEqual(NextOffset, Offset, "offset spec in future, offset should be at the end of the stream"), + ?assert(Lag =:= 0, "offset spec in future , offset lag should be 0") + end}]), + + test_delete_stream(T, S, Stream, C, false), + test_close(T, S, C), + + ok. + +authentication_error_should_close_with_delay(Config) -> + T = gen_tcp, + Port = get_port(T, Config), + Opts = get_opts(T), + {ok, S} = T:connect("localhost", Port, Opts), + C0 = rabbit_stream_core:init(0), + C1 = test_peer_properties(T, S, C0), + Start = erlang:monotonic_time(millisecond), + _ = expect_unsuccessful_authentication( + try_authenticate(T, S, C1, <<"PLAIN">>, <<"guest">>, <<"wrong password">>), + ?RESPONSE_AUTHENTICATION_FAILURE), + End = erlang:monotonic_time(millisecond), + %% the stream reader module defines the delay (3 seconds) + ?assert(End - Start > 2_000), + closed = wait_for_socket_close(T, S, 10), + ok. + +unauthorized_vhost_access_should_close_with_delay(Config) -> + T = gen_tcp, + Port = get_port(T, Config), + Opts = get_opts(T), + {ok, S} = T:connect("localhost", Port, Opts), + C0 = rabbit_stream_core:init(0), + C1 = test_peer_properties(T, S, C0), + User = <<"other">>, + C2 = test_plain_sasl_authenticate(T, S, sasl_handshake(T, S, C1), User), + Start = erlang:monotonic_time(millisecond), + R = do_tune(T, S, C2), + ?assertMatch({{response,_,{open,12}}, _}, R), + End = erlang:monotonic_time(millisecond), + %% the stream reader module defines the delay (3 seconds) + ?assert(End - Start > 2_000), + closed = wait_for_socket_close(T, S, 10), + ok. + +consumer_offset_info(Config, ConnectionName) -> + [[{offset, Offset}, + {offset_lag, Lag}]] = rpc(Config, 0, ?MODULE, + list_consumer_info, [ConnectionName, [offset, offset_lag]]), + {Offset, Lag}. + +list_consumer_info(ConnectionName, Infos) -> + Pids = rabbit_stream:list(<<"/">>), + [ConnPid] = lists:filter(fun(ConnectionPid) -> + ConnectionPid ! {infos, self()}, + receive + {ConnectionPid, + #{<<"connection_name">> := ConnectionName}} -> + true; + {ConnectionPid, _ClientProperties} -> + false + after 1000 -> + false + end + end, + Pids), + rabbit_stream_reader:consumers_info(ConnPid, Infos). + +store_offset(Transport, S, Reference, Stream, Offset) -> + StoreFrame = rabbit_stream_core:frame({store_offset, Reference, Stream, Offset}), + ok = Transport:send(S, StoreFrame). + +query_expected_offset(T, S, C, Reference, Stream, Expected) -> + query_expected_offset(T, S, C, Reference, Stream, Expected, 10). + +query_expected_offset(_, _, C, _, _, _, 0) -> + {timeout, C}; +query_expected_offset(T, S, C0, Reference, Stream, Expected, Count) -> + case query_offset(T, S, C0, Reference, Stream) of + {Expected, _} = R -> + R; + {_, C1} -> + timer:sleep(100), + query_expected_offset(T, S, C1, Reference, Stream, Expected, Count - 1) + end. + +query_offset(T, S, C0, Reference, Stream) -> + QueryFrame = request({query_offset, Reference, Stream}), + ok = T:send(S, QueryFrame), + + {Cmd, C1} = receive_commands(T, S, C0), + {response, 1, {query_offset, _, Offset}} = Cmd, + + {Offset, C1}. + consumer_count(Config) -> ets_count(Config, ?TABLE_CONSUMER). @@ -576,23 +1051,43 @@ get_node_name(Config) -> get_node_name(Config, Node) -> rabbit_ct_broker_helpers:get_node_config(Config, Node, nodename). +get_port(Transport, Config) -> + case Transport of + gen_tcp -> + get_stream_port(Config); + ssl -> + application:ensure_all_started(ssl), + get_stream_port_tls(Config) + end. +get_opts(Transport) -> + case Transport of + gen_tcp -> + [{active, false}, {mode, binary}]; + ssl -> + [{active, false}, {mode, binary}, {verify, verify_none}] + end. + +connect_and_authenticate(Transport, Config) -> + Port = get_port(Transport, Config), + Opts = get_opts(Transport), + {ok, S} = Transport:connect("localhost", Port, Opts), + C0 = rabbit_stream_core:init(0), + C1 = test_peer_properties(Transport, S, C0), + {S, test_authenticate(Transport, S, C1)}. + +try_authenticate(Transport, S, C, AuthMethod, Username, Password) -> + case AuthMethod of + <<"PLAIN">> -> + plain_sasl_authenticate(Transport, S, C, Username, Password); + _ -> + Null = 0, + sasl_authenticate(Transport, S, C, AuthMethod, <>) + end. + test_server(Transport, Stream, Config) -> QName = rabbit_misc:r(<<"/">>, queue, Stream), - Port = - case Transport of - gen_tcp -> - get_stream_port(Config); - ssl -> - application:ensure_all_started(ssl), - get_stream_port_tls(Config) - end, - Opts = - case Transport of - gen_tcp -> - [{active, false}, {mode, binary}]; - ssl -> - [{active, false}, {mode, binary}, {verify, verify_none}] - end, + Port = get_port(Transport, Config), + Opts = get_opts(Transport), {ok, S} = Transport:connect("localhost", Port, Opts), C0 = rabbit_stream_core:init(0), @@ -642,8 +1137,10 @@ test_server(Transport, Stream, Config) -> ok. test_peer_properties(Transport, S, C0) -> - PeerPropertiesFrame = - rabbit_stream_core:frame({request, 1, {peer_properties, #{}}}), + test_peer_properties(Transport, S, #{}, C0). + +test_peer_properties(Transport, S, Properties, C0) -> + PeerPropertiesFrame = request({peer_properties, Properties}), ok = Transport:send(S, PeerPropertiesFrame), {Cmd, C} = receive_commands(Transport, S, C0), ?assertMatch({response, 1, {peer_properties, ?RESPONSE_CODE_OK, _}}, @@ -651,30 +1148,63 @@ test_peer_properties(Transport, S, C0) -> C. test_authenticate(Transport, S, C0) -> - SaslHandshakeFrame = - rabbit_stream_core:frame({request, 1, sasl_handshake}), + tune(Transport, S, + test_plain_sasl_authenticate(Transport, S, sasl_handshake(Transport, S, C0), <<"guest">>)). + +test_authenticate(Transport, S, C0, Username) -> + test_authenticate(Transport, S, C0, Username, Username). + +test_authenticate(Transport, S, C0, Username, Password) -> + tune(Transport, S, + test_plain_sasl_authenticate(Transport, S, sasl_handshake(Transport, S, C0), Username, Password)). + +sasl_handshake(Transport, S, C0) -> + SaslHandshakeFrame = request(sasl_handshake), ok = Transport:send(S, SaslHandshakeFrame), - Plain = <<"PLAIN">>, - AmqPlain = <<"AMQPLAIN">>, {Cmd, C1} = receive_commands(Transport, S, C0), case Cmd of {response, _, {sasl_handshake, ?RESPONSE_CODE_OK, Mechanisms}} -> - ?assertEqual([AmqPlain, Plain], lists:sort(Mechanisms)); + ?assertEqual([<<"AMQPLAIN">>, <<"ANONYMOUS">>, <<"PLAIN">>], + lists:sort(Mechanisms)); _ -> ct:fail("invalid cmd ~tp", [Cmd]) end, + C1. + +test_anonymous_sasl_authenticate(Transport, S, C) -> + Res = sasl_authenticate(Transport, S, C, <<"ANONYMOUS">>, <<>>), + expect_successful_authentication(Res). + +test_plain_sasl_authenticate(Transport, S, C1, Username) -> + test_plain_sasl_authenticate(Transport, S, C1, Username, Username). + +test_plain_sasl_authenticate(Transport, S, C1, Username, Password) -> + expect_successful_authentication(plain_sasl_authenticate(Transport, S, C1, Username, Password)). - Username = <<"guest">>, - Password = <<"guest">>, +plain_sasl_authenticate(Transport, S, C1, Username, Password) -> Null = 0, - PlainSasl = <>, + sasl_authenticate(Transport, S, C1, <<"PLAIN">>, <>). - SaslAuthenticateFrame = - rabbit_stream_core:frame({request, 2, - {sasl_authenticate, Plain, PlainSasl}}), +expect_successful_authentication({SaslAuth, C2} = _SaslReponse) -> + ?assertEqual({response, 2, {sasl_authenticate, ?RESPONSE_CODE_OK}}, + SaslAuth), + C2. + +expect_unsuccessful_authentication({SaslAuth, C2} = _SaslReponse, ExpectedError) -> + ?assertEqual({response, 2, {sasl_authenticate, ExpectedError}}, + SaslAuth), + C2. + +sasl_authenticate(Transport, S, C1, AuthMethod, AuthBody) -> + SaslAuthenticateFrame = request(2, {sasl_authenticate, AuthMethod, AuthBody}), ok = Transport:send(S, SaslAuthenticateFrame), - {SaslAuth, C2} = receive_commands(Transport, S, C1), - {response, 2, {sasl_authenticate, ?RESPONSE_CODE_OK}} = SaslAuth, + receive_commands(Transport, S, C1). + +tune(Transport, S, C2) -> + {{response, _, {open, ?RESPONSE_CODE_OK, _}}, C3} = do_tune(Transport, S, C2), + C3. + +do_tune(Transport, S, C2) -> {Tune, C3} = receive_commands(Transport, S, C2), {tune, ?DEFAULT_FRAME_MAX, ?DEFAULT_HEARTBEAT} = Tune, @@ -684,17 +1214,12 @@ test_authenticate(Transport, S, C0) -> ok = Transport:send(S, TuneFrame), VirtualHost = <<"/">>, - OpenFrame = - rabbit_stream_core:frame({request, 3, {open, VirtualHost}}), + OpenFrame = request(3, {open, VirtualHost}), ok = Transport:send(S, OpenFrame), - {{response, 3, {open, ?RESPONSE_CODE_OK, _ConnectionProperties}}, - C4} = - receive_commands(Transport, S, C3), - C4. + receive_commands(Transport, S, C3). test_create_stream(Transport, S, Stream, C0) -> - CreateStreamFrame = - rabbit_stream_core:frame({request, 1, {create_stream, Stream, #{}}}), + CreateStreamFrame = request({create_stream, Stream, #{}}), ok = Transport:send(S, CreateStreamFrame), {Cmd, C} = receive_commands(Transport, S, C0), ?assertMatch({response, 1, {create_stream, ?RESPONSE_CODE_OK}}, Cmd), @@ -710,8 +1235,7 @@ test_delete_stream(Transport, S, Stream, C0, true) -> test_metadata_update_stream_deleted(Transport, S, Stream, C1). do_test_delete_stream(Transport, S, Stream, C0) -> - DeleteStreamFrame = - rabbit_stream_core:frame({request, 1, {delete_stream, Stream}}), + DeleteStreamFrame = request({delete_stream, Stream}), ok = Transport:send(S, DeleteStreamFrame), {Cmd, C1} = receive_commands(Transport, S, C0), ?assertMatch({response, 1, {delete_stream, ?RESPONSE_CODE_OK}}, Cmd), @@ -723,12 +1247,13 @@ test_metadata_update_stream_deleted(Transport, S, Stream, C0) -> C1. test_declare_publisher(Transport, S, PublisherId, Stream, C0) -> - DeclarePublisherFrame = - rabbit_stream_core:frame({request, 1, - {declare_publisher, - PublisherId, - <<>>, - Stream}}), + test_declare_publisher(Transport, S, PublisherId, <<>>, Stream, C0). + +test_declare_publisher(Transport, S, PublisherId, Reference, Stream, C0) -> + DeclarePublisherFrame = request({declare_publisher, + PublisherId, + Reference, + Stream}), ok = Transport:send(S, DeclarePublisherFrame), {Cmd, C} = receive_commands(Transport, S, C0), ?assertMatch({response, 1, {declare_publisher, ?RESPONSE_CODE_OK}}, @@ -736,38 +1261,53 @@ test_declare_publisher(Transport, S, PublisherId, Stream, C0) -> C. test_publish_confirm(Transport, S, PublisherId, Body, C0) -> - test_publish_confirm(Transport, S, publish, PublisherId, Body, + test_publish_confirm(Transport, S, PublisherId, 1, Body, C0). + +test_publish_confirm(Transport, S, PublisherId, Sequence, Body, C0) -> + test_publish_confirm(Transport, S, publish, PublisherId, Sequence, Body, publish_confirm, C0). -test_publish_confirm(Transport, S, publish = PublishCmd, PublisherId, Body, - ExpectedConfirmCommand,C0) -> +test_publish_confirm(Transport, S, PublishCmd, PublisherId, Body, + ExpectedConfirmCommand, C0) -> + test_publish_confirm(Transport, S, PublishCmd, PublisherId, 1, Body, + ExpectedConfirmCommand, C0). + +test_publish_confirm(Transport, S, publish = PublishCmd, PublisherId, + Sequence, Body, + ExpectedConfirmCommand, C0) -> BodySize = byte_size(Body), - Messages = [<<1:64, 0:1, BodySize:31, Body:BodySize/binary>>], - PublishFrame = - rabbit_stream_core:frame({PublishCmd, PublisherId, 1, Messages}), + Messages = [<>], + PublishFrame = frame({PublishCmd, PublisherId, 1, Messages}), ok = Transport:send(S, PublishFrame), {Cmd, C} = receive_commands(Transport, S, C0), - ?assertMatch({ExpectedConfirmCommand, PublisherId, [1]}, Cmd), + ?assertMatch({ExpectedConfirmCommand, PublisherId, [Sequence]}, Cmd), C; -test_publish_confirm(Transport, S, publish_v2 = PublishCmd, PublisherId, Body, +test_publish_confirm(Transport, S, publish_v2 = PublishCmd, PublisherId, + Sequence, Body, ExpectedConfirmCommand, C0) -> BodySize = byte_size(Body), FilterValue = <<"foo">>, FilterValueSize = byte_size(FilterValue), - Messages = [<<1:64, FilterValueSize:16, FilterValue:FilterValueSize/binary, + Messages = [<>], - PublishFrame = - rabbit_stream_core:frame({PublishCmd, PublisherId, 1, Messages}), + PublishFrame = frame({PublishCmd, PublisherId, 1, Messages}), ok = Transport:send(S, PublishFrame), {Cmd, C} = receive_commands(Transport, S, C0), case ExpectedConfirmCommand of publish_confirm -> - ?assertMatch({ExpectedConfirmCommand, PublisherId, [1]}, Cmd); + ?assertMatch({ExpectedConfirmCommand, PublisherId, [Sequence]}, Cmd); publish_error -> - ?assertMatch({ExpectedConfirmCommand, PublisherId, _, [1]}, Cmd) + ?assertMatch({ExpectedConfirmCommand, PublisherId, _, [Sequence]}, Cmd) end, C. +test_delete_publisher(Transport, Socket, PublisherId, C0) -> + Frame = request({delete_publisher, PublisherId}), + ok = Transport:send(Socket, Frame), + {Cmd, C} = receive_commands(Transport, Socket, C0), + ?assertMatch({response, 1, {delete_publisher, ?RESPONSE_CODE_OK}}, Cmd), + C. + test_subscribe(Transport, S, SubscriptionId, Stream, C0) -> test_subscribe(Transport, S, @@ -784,27 +1324,37 @@ test_subscribe(Transport, SubscriptionProperties, ExpectedResponseCode, C0) -> - SubCmd = - {request, 1, - {subscribe, SubscriptionId, Stream, 0, 10, SubscriptionProperties}}, - SubscribeFrame = rabbit_stream_core:frame(SubCmd), + test_subscribe(Transport, S, SubscriptionId, Stream, 0, 10, + SubscriptionProperties, + ExpectedResponseCode, C0). + +test_subscribe(Transport, + S, + SubscriptionId, + Stream, + OffsetSpec, + Credit, + SubscriptionProperties, + ExpectedResponseCode, + C0) -> + SubscribeFrame = request({subscribe, SubscriptionId, Stream, + OffsetSpec, Credit, SubscriptionProperties}), ok = Transport:send(S, SubscribeFrame), {Cmd, C} = receive_commands(Transport, S, C0), ?assertMatch({response, 1, {subscribe, ExpectedResponseCode}}, Cmd), C. test_unsubscribe(Transport, Socket, SubscriptionId, C0) -> - UnsubCmd = {request, 1, {unsubscribe, SubscriptionId}}, - UnsubscribeFrame = rabbit_stream_core:frame(UnsubCmd), + UnsubscribeFrame = request({unsubscribe, SubscriptionId}), ok = Transport:send(Socket, UnsubscribeFrame), {Cmd, C} = receive_commands(Transport, Socket, C0), ?assertMatch({response, 1, {unsubscribe, ?RESPONSE_CODE_OK}}, Cmd), C. test_deliver(Transport, S, SubscriptionId, COffset, Body, C0) -> - ct:pal("test_deliver ", []), {{deliver, SubscriptionId, Chunk}, C} = receive_commands(Transport, S, C0), + ct:pal("test_deliver ~p", [Chunk]), <<5:4/unsigned, 0:4/unsigned, 0:8, @@ -824,9 +1374,9 @@ test_deliver(Transport, S, SubscriptionId, COffset, Body, C0) -> C. test_deliver_v2(Transport, S, SubscriptionId, COffset, Body, C0) -> - ct:pal("test_deliver ", []), {{deliver_v2, SubscriptionId, _CommittedOffset, Chunk}, C} = receive_commands(Transport, S, C0), + ct:pal("test_deliver_v2 ~p", [Chunk]), <<5:4/unsigned, 0:4/unsigned, 0:8, @@ -846,10 +1396,7 @@ test_deliver_v2(Transport, S, SubscriptionId, COffset, Body, C0) -> C. test_exchange_command_versions(Transport, S, C0) -> - ExCmd = - {request, 1, - {exchange_command_versions, [{deliver, ?VERSION_1, ?VERSION_2}]}}, - ExFrame = rabbit_stream_core:frame(ExCmd), + ExFrame = request({exchange_command_versions, [{deliver, ?VERSION_1, ?VERSION_2}]}), ok = Transport:send(S, ExFrame), {Cmd, C} = receive_commands(Transport, S, C0), ?assertMatch({response, 1, @@ -859,8 +1406,7 @@ test_exchange_command_versions(Transport, S, C0) -> C. test_stream_stats(Transport, S, Stream, C0) -> - SICmd = {request, 1, {stream_stats, Stream}}, - SIFrame = rabbit_stream_core:frame(SICmd), + SIFrame = request({stream_stats, Stream}), ok = Transport:send(S, SIFrame), {Cmd, C} = receive_commands(Transport, S, C0), ?assertMatch({response, 1, @@ -872,9 +1418,7 @@ test_stream_stats(Transport, S, Stream, C0) -> test_close(Transport, S, C0) -> CloseReason = <<"OK">>, - CloseFrame = - rabbit_stream_core:frame({request, 1, - {close, ?RESPONSE_CODE_OK, CloseReason}}), + CloseFrame = request({close, ?RESPONSE_CODE_OK, CloseReason}), ok = Transport:send(S, CloseFrame), {{response, 1, {close, ?RESPONSE_CODE_OK}}, C} = receive_commands(Transport, S, C0), @@ -925,3 +1469,9 @@ get_global_counters(Config) -> rabbit_global_counters, overview, [])). + +request(Cmd) -> + request(1, Cmd). + +request(CorrId, Cmd) -> + rabbit_stream_core:frame({request, CorrId, Cmd}). diff --git a/deps/rabbitmq_stream/test/rabbit_stream_SUITE_data/.mvn/wrapper/MavenWrapperDownloader.java b/deps/rabbitmq_stream/test/rabbit_stream_SUITE_data/.mvn/wrapper/MavenWrapperDownloader.java deleted file mode 100644 index b901097f2db6..000000000000 --- a/deps/rabbitmq_stream/test/rabbit_stream_SUITE_data/.mvn/wrapper/MavenWrapperDownloader.java +++ /dev/null @@ -1,117 +0,0 @@ -/* - * Copyright 2007-present the original author or authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -import java.net.*; -import java.io.*; -import java.nio.channels.*; -import java.util.Properties; - -public class MavenWrapperDownloader { - - private static final String WRAPPER_VERSION = "0.5.6"; - /** - * Default URL to download the maven-wrapper.jar from, if no 'downloadUrl' is provided. - */ - private static final String DEFAULT_DOWNLOAD_URL = "https://repo.maven.apache.org/maven2/io/takari/maven-wrapper/" - + WRAPPER_VERSION + "/maven-wrapper-" + WRAPPER_VERSION + ".jar"; - - /** - * Path to the maven-wrapper.properties file, which might contain a downloadUrl property to - * use instead of the default one. - */ - private static final String MAVEN_WRAPPER_PROPERTIES_PATH = - ".mvn/wrapper/maven-wrapper.properties"; - - /** - * Path where the maven-wrapper.jar will be saved to. - */ - private static final String MAVEN_WRAPPER_JAR_PATH = - ".mvn/wrapper/maven-wrapper.jar"; - - /** - * Name of the property which should be used to override the default download url for the wrapper. - */ - private static final String PROPERTY_NAME_WRAPPER_URL = "wrapperUrl"; - - public static void main(String args[]) { - System.out.println("- Downloader started"); - File baseDirectory = new File(args[0]); - System.out.println("- Using base directory: " + baseDirectory.getAbsolutePath()); - - // If the maven-wrapper.properties exists, read it and check if it contains a custom - // wrapperUrl parameter. - File mavenWrapperPropertyFile = new File(baseDirectory, MAVEN_WRAPPER_PROPERTIES_PATH); - String url = DEFAULT_DOWNLOAD_URL; - if(mavenWrapperPropertyFile.exists()) { - FileInputStream mavenWrapperPropertyFileInputStream = null; - try { - mavenWrapperPropertyFileInputStream = new FileInputStream(mavenWrapperPropertyFile); - Properties mavenWrapperProperties = new Properties(); - mavenWrapperProperties.load(mavenWrapperPropertyFileInputStream); - url = mavenWrapperProperties.getProperty(PROPERTY_NAME_WRAPPER_URL, url); - } catch (IOException e) { - System.out.println("- ERROR loading '" + MAVEN_WRAPPER_PROPERTIES_PATH + "'"); - } finally { - try { - if(mavenWrapperPropertyFileInputStream != null) { - mavenWrapperPropertyFileInputStream.close(); - } - } catch (IOException e) { - // Ignore ... - } - } - } - System.out.println("- Downloading from: " + url); - - File outputFile = new File(baseDirectory.getAbsolutePath(), MAVEN_WRAPPER_JAR_PATH); - if(!outputFile.getParentFile().exists()) { - if(!outputFile.getParentFile().mkdirs()) { - System.out.println( - "- ERROR creating output directory '" + outputFile.getParentFile().getAbsolutePath() + "'"); - } - } - System.out.println("- Downloading to: " + outputFile.getAbsolutePath()); - try { - downloadFileFromURL(url, outputFile); - System.out.println("Done"); - System.exit(0); - } catch (Throwable e) { - System.out.println("- Error downloading"); - e.printStackTrace(); - System.exit(1); - } - } - - private static void downloadFileFromURL(String urlString, File destination) throws Exception { - if (System.getenv("MVNW_USERNAME") != null && System.getenv("MVNW_PASSWORD") != null) { - String username = System.getenv("MVNW_USERNAME"); - char[] password = System.getenv("MVNW_PASSWORD").toCharArray(); - Authenticator.setDefault(new Authenticator() { - @Override - protected PasswordAuthentication getPasswordAuthentication() { - return new PasswordAuthentication(username, password); - } - }); - } - URL website = new URL(urlString); - ReadableByteChannel rbc; - rbc = Channels.newChannel(website.openStream()); - FileOutputStream fos = new FileOutputStream(destination); - fos.getChannel().transferFrom(rbc, 0, Long.MAX_VALUE); - fos.close(); - rbc.close(); - } - -} diff --git a/deps/rabbitmq_stream/test/rabbit_stream_SUITE_data/.mvn/wrapper/maven-wrapper.jar b/deps/rabbitmq_stream/test/rabbit_stream_SUITE_data/.mvn/wrapper/maven-wrapper.jar deleted file mode 100644 index 2cc7d4a55c0c..000000000000 Binary files a/deps/rabbitmq_stream/test/rabbit_stream_SUITE_data/.mvn/wrapper/maven-wrapper.jar and /dev/null differ diff --git a/deps/rabbitmq_stream/test/rabbit_stream_SUITE_data/.mvn/wrapper/maven-wrapper.properties b/deps/rabbitmq_stream/test/rabbit_stream_SUITE_data/.mvn/wrapper/maven-wrapper.properties index 642d572ce90e..f95f1ee80715 100644 --- a/deps/rabbitmq_stream/test/rabbit_stream_SUITE_data/.mvn/wrapper/maven-wrapper.properties +++ b/deps/rabbitmq_stream/test/rabbit_stream_SUITE_data/.mvn/wrapper/maven-wrapper.properties @@ -1,2 +1,19 @@ -distributionUrl=https://repo.maven.apache.org/maven2/org/apache/maven/apache-maven/3.6.3/apache-maven-3.6.3-bin.zip -wrapperUrl=https://repo.maven.apache.org/maven2/io/takari/maven-wrapper/0.5.6/maven-wrapper-0.5.6.jar +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +wrapperVersion=3.3.2 +distributionType=only-script +distributionUrl=https://repo.maven.apache.org/maven2/org/apache/maven/apache-maven/3.9.8/apache-maven-3.9.8-bin.zip diff --git a/deps/rabbitmq_stream/test/rabbit_stream_SUITE_data/mvnw b/deps/rabbitmq_stream/test/rabbit_stream_SUITE_data/mvnw index 41c0f0c23db5..19529ddf8c6e 100755 --- a/deps/rabbitmq_stream/test/rabbit_stream_SUITE_data/mvnw +++ b/deps/rabbitmq_stream/test/rabbit_stream_SUITE_data/mvnw @@ -19,292 +19,241 @@ # ---------------------------------------------------------------------------- # ---------------------------------------------------------------------------- -# Maven Start Up Batch script -# -# Required ENV vars: -# ------------------ -# JAVA_HOME - location of a JDK home dir +# Apache Maven Wrapper startup batch script, version 3.3.2 # # Optional ENV vars # ----------------- -# M2_HOME - location of maven2's installed home dir -# MAVEN_OPTS - parameters passed to the Java VM when running Maven -# e.g. to debug Maven itself, use -# set MAVEN_OPTS=-Xdebug -Xrunjdwp:transport=dt_socket,server=y,suspend=y,address=8000 -# MAVEN_SKIP_RC - flag to disable loading of mavenrc files +# JAVA_HOME - location of a JDK home dir, required when download maven via java source +# MVNW_REPOURL - repo url base for downloading maven distribution +# MVNW_USERNAME/MVNW_PASSWORD - user and password for downloading maven +# MVNW_VERBOSE - true: enable verbose log; debug: trace the mvnw script; others: silence the output # ---------------------------------------------------------------------------- -if [ -z "$MAVEN_SKIP_RC" ] ; then - - if [ -f /etc/mavenrc ] ; then - . /etc/mavenrc - fi +set -euf +[ "${MVNW_VERBOSE-}" != debug ] || set -x - if [ -f "$HOME/.mavenrc" ] ; then - . "$HOME/.mavenrc" - fi +# OS specific support. +native_path() { printf %s\\n "$1"; } +case "$(uname)" in +CYGWIN* | MINGW*) + [ -z "${JAVA_HOME-}" ] || JAVA_HOME="$(cygpath --unix "$JAVA_HOME")" + native_path() { cygpath --path --windows "$1"; } + ;; +esac -fi +# set JAVACMD and JAVACCMD +set_java_home() { + # For Cygwin and MinGW, ensure paths are in Unix format before anything is touched + if [ -n "${JAVA_HOME-}" ]; then + if [ -x "$JAVA_HOME/jre/sh/java" ]; then + # IBM's JDK on AIX uses strange locations for the executables + JAVACMD="$JAVA_HOME/jre/sh/java" + JAVACCMD="$JAVA_HOME/jre/sh/javac" + else + JAVACMD="$JAVA_HOME/bin/java" + JAVACCMD="$JAVA_HOME/bin/javac" -# OS specific support. $var _must_ be set to either true or false. -cygwin=false; -darwin=false; -mingw=false -case "`uname`" in - CYGWIN*) cygwin=true ;; - MINGW*) mingw=true;; - Darwin*) darwin=true - # Use /usr/libexec/java_home if available, otherwise fall back to /Library/Java/Home - # See https://developer.apple.com/library/mac/qa/qa1170/_index.html - if [ -z "$JAVA_HOME" ]; then - if [ -x "/usr/libexec/java_home" ]; then - export JAVA_HOME="`/usr/libexec/java_home`" - else - export JAVA_HOME="/Library/Java/Home" + if [ ! -x "$JAVACMD" ] || [ ! -x "$JAVACCMD" ]; then + echo "The JAVA_HOME environment variable is not defined correctly, so mvnw cannot run." >&2 + echo "JAVA_HOME is set to \"$JAVA_HOME\", but \"\$JAVA_HOME/bin/java\" or \"\$JAVA_HOME/bin/javac\" does not exist." >&2 + return 1 fi fi - ;; -esac - -if [ -z "$JAVA_HOME" ] ; then - if [ -r /etc/gentoo-release ] ; then - JAVA_HOME=`java-config --jre-home` + else + JAVACMD="$( + 'set' +e + 'unset' -f command 2>/dev/null + 'command' -v java + )" || : + JAVACCMD="$( + 'set' +e + 'unset' -f command 2>/dev/null + 'command' -v javac + )" || : + + if [ ! -x "${JAVACMD-}" ] || [ ! -x "${JAVACCMD-}" ]; then + echo "The java/javac command does not exist in PATH nor is JAVA_HOME set, so mvnw cannot run." >&2 + return 1 + fi fi -fi - -if [ -z "$M2_HOME" ] ; then - ## resolve links - $0 may be a link to maven's home - PRG="$0" +} - # need this for relative symlinks - while [ -h "$PRG" ] ; do - ls=`ls -ld "$PRG"` - link=`expr "$ls" : '.*-> \(.*\)$'` - if expr "$link" : '/.*' > /dev/null; then - PRG="$link" - else - PRG="`dirname "$PRG"`/$link" - fi +# hash string like Java String::hashCode +hash_string() { + str="${1:-}" h=0 + while [ -n "$str" ]; do + char="${str%"${str#?}"}" + h=$(((h * 31 + $(LC_CTYPE=C printf %d "'$char")) % 4294967296)) + str="${str#?}" done + printf %x\\n $h +} - saveddir=`pwd` +verbose() { :; } +[ "${MVNW_VERBOSE-}" != true ] || verbose() { printf %s\\n "${1-}"; } - M2_HOME=`dirname "$PRG"`/.. +die() { + printf %s\\n "$1" >&2 + exit 1 +} - # make it fully qualified - M2_HOME=`cd "$M2_HOME" && pwd` +trim() { + # MWRAPPER-139: + # Trims trailing and leading whitespace, carriage returns, tabs, and linefeeds. + # Needed for removing poorly interpreted newline sequences when running in more + # exotic environments such as mingw bash on Windows. + printf "%s" "${1}" | tr -d '[:space:]' +} - cd "$saveddir" - # echo Using m2 at $M2_HOME -fi +# parse distributionUrl and optional distributionSha256Sum, requires .mvn/wrapper/maven-wrapper.properties +while IFS="=" read -r key value; do + case "${key-}" in + distributionUrl) distributionUrl=$(trim "${value-}") ;; + distributionSha256Sum) distributionSha256Sum=$(trim "${value-}") ;; + esac +done <"${0%/*}/.mvn/wrapper/maven-wrapper.properties" +[ -n "${distributionUrl-}" ] || die "cannot read distributionUrl property in ${0%/*}/.mvn/wrapper/maven-wrapper.properties" + +case "${distributionUrl##*/}" in +maven-mvnd-*bin.*) + MVN_CMD=mvnd.sh _MVNW_REPO_PATTERN=/maven/mvnd/ + case "${PROCESSOR_ARCHITECTURE-}${PROCESSOR_ARCHITEW6432-}:$(uname -a)" in + *AMD64:CYGWIN* | *AMD64:MINGW*) distributionPlatform=windows-amd64 ;; + :Darwin*x86_64) distributionPlatform=darwin-amd64 ;; + :Darwin*arm64) distributionPlatform=darwin-aarch64 ;; + :Linux*x86_64*) distributionPlatform=linux-amd64 ;; + *) + echo "Cannot detect native platform for mvnd on $(uname)-$(uname -m), use pure java version" >&2 + distributionPlatform=linux-amd64 + ;; + esac + distributionUrl="${distributionUrl%-bin.*}-$distributionPlatform.zip" + ;; +maven-mvnd-*) MVN_CMD=mvnd.sh _MVNW_REPO_PATTERN=/maven/mvnd/ ;; +*) MVN_CMD="mvn${0##*/mvnw}" _MVNW_REPO_PATTERN=/org/apache/maven/ ;; +esac -# For Cygwin, ensure paths are in UNIX format before anything is touched -if $cygwin ; then - [ -n "$M2_HOME" ] && - M2_HOME=`cygpath --unix "$M2_HOME"` - [ -n "$JAVA_HOME" ] && - JAVA_HOME=`cygpath --unix "$JAVA_HOME"` - [ -n "$CLASSPATH" ] && - CLASSPATH=`cygpath --path --unix "$CLASSPATH"` -fi +# apply MVNW_REPOURL and calculate MAVEN_HOME +# maven home pattern: ~/.m2/wrapper/dists/{apache-maven-,maven-mvnd--}/ +[ -z "${MVNW_REPOURL-}" ] || distributionUrl="$MVNW_REPOURL$_MVNW_REPO_PATTERN${distributionUrl#*"$_MVNW_REPO_PATTERN"}" +distributionUrlName="${distributionUrl##*/}" +distributionUrlNameMain="${distributionUrlName%.*}" +distributionUrlNameMain="${distributionUrlNameMain%-bin}" +MAVEN_USER_HOME="${MAVEN_USER_HOME:-${HOME}/.m2}" +MAVEN_HOME="${MAVEN_USER_HOME}/wrapper/dists/${distributionUrlNameMain-}/$(hash_string "$distributionUrl")" + +exec_maven() { + unset MVNW_VERBOSE MVNW_USERNAME MVNW_PASSWORD MVNW_REPOURL || : + exec "$MAVEN_HOME/bin/$MVN_CMD" "$@" || die "cannot exec $MAVEN_HOME/bin/$MVN_CMD" +} -# For Mingw, ensure paths are in UNIX format before anything is touched -if $mingw ; then - [ -n "$M2_HOME" ] && - M2_HOME="`(cd "$M2_HOME"; pwd)`" - [ -n "$JAVA_HOME" ] && - JAVA_HOME="`(cd "$JAVA_HOME"; pwd)`" +if [ -d "$MAVEN_HOME" ]; then + verbose "found existing MAVEN_HOME at $MAVEN_HOME" + exec_maven "$@" fi -if [ -z "$JAVA_HOME" ]; then - javaExecutable="`which javac`" - if [ -n "$javaExecutable" ] && ! [ "`expr \"$javaExecutable\" : '\([^ ]*\)'`" = "no" ]; then - # readlink(1) is not available as standard on Solaris 10. - readLink=`which readlink` - if [ ! `expr "$readLink" : '\([^ ]*\)'` = "no" ]; then - if $darwin ; then - javaHome="`dirname \"$javaExecutable\"`" - javaExecutable="`cd \"$javaHome\" && pwd -P`/javac" - else - javaExecutable="`readlink -f \"$javaExecutable\"`" - fi - javaHome="`dirname \"$javaExecutable\"`" - javaHome=`expr "$javaHome" : '\(.*\)/bin'` - JAVA_HOME="$javaHome" - export JAVA_HOME - fi - fi -fi +case "${distributionUrl-}" in +*?-bin.zip | *?maven-mvnd-?*-?*.zip) ;; +*) die "distributionUrl is not valid, must match *-bin.zip or maven-mvnd-*.zip, but found '${distributionUrl-}'" ;; +esac -if [ -z "$JAVACMD" ] ; then - if [ -n "$JAVA_HOME" ] ; then - if [ -x "$JAVA_HOME/jre/sh/java" ] ; then - # IBM's JDK on AIX uses strange locations for the executables - JAVACMD="$JAVA_HOME/jre/sh/java" - else - JAVACMD="$JAVA_HOME/bin/java" - fi - else - JAVACMD="`which java`" - fi +# prepare tmp dir +if TMP_DOWNLOAD_DIR="$(mktemp -d)" && [ -d "$TMP_DOWNLOAD_DIR" ]; then + clean() { rm -rf -- "$TMP_DOWNLOAD_DIR"; } + trap clean HUP INT TERM EXIT +else + die "cannot create temp dir" fi -if [ ! -x "$JAVACMD" ] ; then - echo "Error: JAVA_HOME is not defined correctly." >&2 - echo " We cannot execute $JAVACMD" >&2 - exit 1 -fi +mkdir -p -- "${MAVEN_HOME%/*}" + +# Download and Install Apache Maven +verbose "Couldn't find MAVEN_HOME, downloading and installing it ..." +verbose "Downloading from: $distributionUrl" +verbose "Downloading to: $TMP_DOWNLOAD_DIR/$distributionUrlName" -if [ -z "$JAVA_HOME" ] ; then - echo "Warning: JAVA_HOME environment variable is not set." +# select .zip or .tar.gz +if ! command -v unzip >/dev/null; then + distributionUrl="${distributionUrl%.zip}.tar.gz" + distributionUrlName="${distributionUrl##*/}" fi -CLASSWORLDS_LAUNCHER=org.codehaus.plexus.classworlds.launcher.Launcher +# verbose opt +__MVNW_QUIET_WGET=--quiet __MVNW_QUIET_CURL=--silent __MVNW_QUIET_UNZIP=-q __MVNW_QUIET_TAR='' +[ "${MVNW_VERBOSE-}" != true ] || __MVNW_QUIET_WGET='' __MVNW_QUIET_CURL='' __MVNW_QUIET_UNZIP='' __MVNW_QUIET_TAR=v -# traverses directory structure from process work directory to filesystem root -# first directory with .mvn subdirectory is considered project base directory -find_maven_basedir() { +# normalize http auth +case "${MVNW_PASSWORD:+has-password}" in +'') MVNW_USERNAME='' MVNW_PASSWORD='' ;; +has-password) [ -n "${MVNW_USERNAME-}" ] || MVNW_USERNAME='' MVNW_PASSWORD='' ;; +esac - if [ -z "$1" ] - then - echo "Path not specified to find_maven_basedir" - return 1 - fi +if [ -z "${MVNW_USERNAME-}" ] && command -v wget >/dev/null; then + verbose "Found wget ... using wget" + wget ${__MVNW_QUIET_WGET:+"$__MVNW_QUIET_WGET"} "$distributionUrl" -O "$TMP_DOWNLOAD_DIR/$distributionUrlName" || die "wget: Failed to fetch $distributionUrl" +elif [ -z "${MVNW_USERNAME-}" ] && command -v curl >/dev/null; then + verbose "Found curl ... using curl" + curl ${__MVNW_QUIET_CURL:+"$__MVNW_QUIET_CURL"} -f -L -o "$TMP_DOWNLOAD_DIR/$distributionUrlName" "$distributionUrl" || die "curl: Failed to fetch $distributionUrl" +elif set_java_home; then + verbose "Falling back to use Java to download" + javaSource="$TMP_DOWNLOAD_DIR/Downloader.java" + targetZip="$TMP_DOWNLOAD_DIR/$distributionUrlName" + cat >"$javaSource" <<-END + public class Downloader extends java.net.Authenticator + { + protected java.net.PasswordAuthentication getPasswordAuthentication() + { + return new java.net.PasswordAuthentication( System.getenv( "MVNW_USERNAME" ), System.getenv( "MVNW_PASSWORD" ).toCharArray() ); + } + public static void main( String[] args ) throws Exception + { + setDefault( new Downloader() ); + java.nio.file.Files.copy( java.net.URI.create( args[0] ).toURL().openStream(), java.nio.file.Paths.get( args[1] ).toAbsolutePath().normalize() ); + } + } + END + # For Cygwin/MinGW, switch paths to Windows format before running javac and java + verbose " - Compiling Downloader.java ..." + "$(native_path "$JAVACCMD")" "$(native_path "$javaSource")" || die "Failed to compile Downloader.java" + verbose " - Running Downloader.java ..." + "$(native_path "$JAVACMD")" -cp "$(native_path "$TMP_DOWNLOAD_DIR")" Downloader "$distributionUrl" "$(native_path "$targetZip")" +fi - basedir="$1" - wdir="$1" - while [ "$wdir" != '/' ] ; do - if [ -d "$wdir"/.mvn ] ; then - basedir=$wdir - break +# If specified, validate the SHA-256 sum of the Maven distribution zip file +if [ -n "${distributionSha256Sum-}" ]; then + distributionSha256Result=false + if [ "$MVN_CMD" = mvnd.sh ]; then + echo "Checksum validation is not supported for maven-mvnd." >&2 + echo "Please disable validation by removing 'distributionSha256Sum' from your maven-wrapper.properties." >&2 + exit 1 + elif command -v sha256sum >/dev/null; then + if echo "$distributionSha256Sum $TMP_DOWNLOAD_DIR/$distributionUrlName" | sha256sum -c >/dev/null 2>&1; then + distributionSha256Result=true fi - # workaround for JBEAP-8937 (on Solaris 10/Sparc) - if [ -d "${wdir}" ]; then - wdir=`cd "$wdir/.."; pwd` + elif command -v shasum >/dev/null; then + if echo "$distributionSha256Sum $TMP_DOWNLOAD_DIR/$distributionUrlName" | shasum -a 256 -c >/dev/null 2>&1; then + distributionSha256Result=true fi - # end of workaround - done - echo "${basedir}" -} - -# concatenates all lines of a file -concat_lines() { - if [ -f "$1" ]; then - echo "$(tr -s '\n' ' ' < "$1")" + else + echo "Checksum validation was requested but neither 'sha256sum' or 'shasum' are available." >&2 + echo "Please install either command, or disable validation by removing 'distributionSha256Sum' from your maven-wrapper.properties." >&2 + exit 1 + fi + if [ $distributionSha256Result = false ]; then + echo "Error: Failed to validate Maven distribution SHA-256, your Maven distribution might be compromised." >&2 + echo "If you updated your Maven version, you need to update the specified distributionSha256Sum property." >&2 + exit 1 fi -} - -BASE_DIR=`find_maven_basedir "$(pwd)"` -if [ -z "$BASE_DIR" ]; then - exit 1; fi -########################################################################################## -# Extension to allow automatically downloading the maven-wrapper.jar from Maven-central -# This allows using the maven wrapper in projects that prohibit checking in binary data. -########################################################################################## -if [ -r "$BASE_DIR/.mvn/wrapper/maven-wrapper.jar" ]; then - if [ "$MVNW_VERBOSE" = true ]; then - echo "Found .mvn/wrapper/maven-wrapper.jar" - fi +# unzip and move +if command -v unzip >/dev/null; then + unzip ${__MVNW_QUIET_UNZIP:+"$__MVNW_QUIET_UNZIP"} "$TMP_DOWNLOAD_DIR/$distributionUrlName" -d "$TMP_DOWNLOAD_DIR" || die "failed to unzip" else - if [ "$MVNW_VERBOSE" = true ]; then - echo "Couldn't find .mvn/wrapper/maven-wrapper.jar, downloading it ..." - fi - if [ -n "$MVNW_REPOURL" ]; then - jarUrl="$MVNW_REPOURL/io/takari/maven-wrapper/0.5.6/maven-wrapper-0.5.6.jar" - else - jarUrl="https://repo.maven.apache.org/maven2/io/takari/maven-wrapper/0.5.6/maven-wrapper-0.5.6.jar" - fi - while IFS="=" read key value; do - case "$key" in (wrapperUrl) jarUrl="$value"; break ;; - esac - done < "$BASE_DIR/.mvn/wrapper/maven-wrapper.properties" - if [ "$MVNW_VERBOSE" = true ]; then - echo "Downloading from: $jarUrl" - fi - wrapperJarPath="$BASE_DIR/.mvn/wrapper/maven-wrapper.jar" - if $cygwin; then - wrapperJarPath=`cygpath --path --windows "$wrapperJarPath"` - fi - - if command -v wget > /dev/null; then - if [ "$MVNW_VERBOSE" = true ]; then - echo "Found wget ... using wget" - fi - if [ -z "$MVNW_USERNAME" ] || [ -z "$MVNW_PASSWORD" ]; then - wget "$jarUrl" -O "$wrapperJarPath" - else - wget --http-user=$MVNW_USERNAME --http-password=$MVNW_PASSWORD "$jarUrl" -O "$wrapperJarPath" - fi - elif command -v curl > /dev/null; then - if [ "$MVNW_VERBOSE" = true ]; then - echo "Found curl ... using curl" - fi - if [ -z "$MVNW_USERNAME" ] || [ -z "$MVNW_PASSWORD" ]; then - curl -o "$wrapperJarPath" "$jarUrl" -f - else - curl --user $MVNW_USERNAME:$MVNW_PASSWORD -o "$wrapperJarPath" "$jarUrl" -f - fi - - else - if [ "$MVNW_VERBOSE" = true ]; then - echo "Falling back to using Java to download" - fi - javaClass="$BASE_DIR/.mvn/wrapper/MavenWrapperDownloader.java" - # For Cygwin, switch paths to Windows format before running javac - if $cygwin; then - javaClass=`cygpath --path --windows "$javaClass"` - fi - if [ -e "$javaClass" ]; then - if [ ! -e "$BASE_DIR/.mvn/wrapper/MavenWrapperDownloader.class" ]; then - if [ "$MVNW_VERBOSE" = true ]; then - echo " - Compiling MavenWrapperDownloader.java ..." - fi - # Compiling the Java class - ("$JAVA_HOME/bin/javac" "$javaClass") - fi - if [ -e "$BASE_DIR/.mvn/wrapper/MavenWrapperDownloader.class" ]; then - # Running the downloader - if [ "$MVNW_VERBOSE" = true ]; then - echo " - Running MavenWrapperDownloader.java ..." - fi - ("$JAVA_HOME/bin/java" -cp .mvn/wrapper MavenWrapperDownloader "$MAVEN_PROJECTBASEDIR") - fi - fi - fi -fi -########################################################################################## -# End of extension -########################################################################################## - -export MAVEN_PROJECTBASEDIR=${MAVEN_BASEDIR:-"$BASE_DIR"} -if [ "$MVNW_VERBOSE" = true ]; then - echo $MAVEN_PROJECTBASEDIR + tar xzf${__MVNW_QUIET_TAR:+"$__MVNW_QUIET_TAR"} "$TMP_DOWNLOAD_DIR/$distributionUrlName" -C "$TMP_DOWNLOAD_DIR" || die "failed to untar" fi -MAVEN_OPTS="$(concat_lines "$MAVEN_PROJECTBASEDIR/.mvn/jvm.config") $MAVEN_OPTS" - -# For Cygwin, switch paths to Windows format before running java -if $cygwin; then - [ -n "$M2_HOME" ] && - M2_HOME=`cygpath --path --windows "$M2_HOME"` - [ -n "$JAVA_HOME" ] && - JAVA_HOME=`cygpath --path --windows "$JAVA_HOME"` - [ -n "$CLASSPATH" ] && - CLASSPATH=`cygpath --path --windows "$CLASSPATH"` - [ -n "$MAVEN_PROJECTBASEDIR" ] && - MAVEN_PROJECTBASEDIR=`cygpath --path --windows "$MAVEN_PROJECTBASEDIR"` -fi - -# Provide a "standardized" way to retrieve the CLI args that will -# work with both Windows and non-Windows executions. -MAVEN_CMD_LINE_ARGS="$MAVEN_CONFIG $@" -export MAVEN_CMD_LINE_ARGS - -WRAPPER_LAUNCHER=org.apache.maven.wrapper.MavenWrapperMain +printf %s\\n "$distributionUrl" >"$TMP_DOWNLOAD_DIR/$distributionUrlNameMain/mvnw.url" +mv -- "$TMP_DOWNLOAD_DIR/$distributionUrlNameMain" "$MAVEN_HOME" || [ -d "$MAVEN_HOME" ] || die "fail to move MAVEN_HOME" -exec "$JAVACMD" \ - $MAVEN_OPTS \ - -classpath "$MAVEN_PROJECTBASEDIR/.mvn/wrapper/maven-wrapper.jar" \ - "-Dmaven.home=${M2_HOME}" "-Dmaven.multiModuleProjectDirectory=${MAVEN_PROJECTBASEDIR}" \ - ${WRAPPER_LAUNCHER} $MAVEN_CONFIG "$@" +clean || : +exec_maven "$@" diff --git a/deps/rabbitmq_stream/test/rabbit_stream_SUITE_data/mvnw.cmd b/deps/rabbitmq_stream/test/rabbit_stream_SUITE_data/mvnw.cmd index 86115719e538..b150b91ed500 100644 --- a/deps/rabbitmq_stream/test/rabbit_stream_SUITE_data/mvnw.cmd +++ b/deps/rabbitmq_stream/test/rabbit_stream_SUITE_data/mvnw.cmd @@ -1,182 +1,149 @@ -@REM ---------------------------------------------------------------------------- -@REM Licensed to the Apache Software Foundation (ASF) under one -@REM or more contributor license agreements. See the NOTICE file -@REM distributed with this work for additional information -@REM regarding copyright ownership. The ASF licenses this file -@REM to you under the Apache License, Version 2.0 (the -@REM "License"); you may not use this file except in compliance -@REM with the License. You may obtain a copy of the License at -@REM -@REM http://www.apache.org/licenses/LICENSE-2.0 -@REM -@REM Unless required by applicable law or agreed to in writing, -@REM software distributed under the License is distributed on an -@REM "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -@REM KIND, either express or implied. See the License for the -@REM specific language governing permissions and limitations -@REM under the License. -@REM ---------------------------------------------------------------------------- - -@REM ---------------------------------------------------------------------------- -@REM Maven Start Up Batch script -@REM -@REM Required ENV vars: -@REM JAVA_HOME - location of a JDK home dir -@REM -@REM Optional ENV vars -@REM M2_HOME - location of maven2's installed home dir -@REM MAVEN_BATCH_ECHO - set to 'on' to enable the echoing of the batch commands -@REM MAVEN_BATCH_PAUSE - set to 'on' to wait for a keystroke before ending -@REM MAVEN_OPTS - parameters passed to the Java VM when running Maven -@REM e.g. to debug Maven itself, use -@REM set MAVEN_OPTS=-Xdebug -Xrunjdwp:transport=dt_socket,server=y,suspend=y,address=8000 -@REM MAVEN_SKIP_RC - flag to disable loading of mavenrc files -@REM ---------------------------------------------------------------------------- - -@REM Begin all REM lines with '@' in case MAVEN_BATCH_ECHO is 'on' -@echo off -@REM set title of command window -title %0 -@REM enable echoing by setting MAVEN_BATCH_ECHO to 'on' -@if "%MAVEN_BATCH_ECHO%" == "on" echo %MAVEN_BATCH_ECHO% - -@REM set %HOME% to equivalent of $HOME -if "%HOME%" == "" (set "HOME=%HOMEDRIVE%%HOMEPATH%") - -@REM Execute a user defined script before this one -if not "%MAVEN_SKIP_RC%" == "" goto skipRcPre -@REM check for pre script, once with legacy .bat ending and once with .cmd ending -if exist "%HOME%\mavenrc_pre.bat" call "%HOME%\mavenrc_pre.bat" -if exist "%HOME%\mavenrc_pre.cmd" call "%HOME%\mavenrc_pre.cmd" -:skipRcPre - -@setlocal - -set ERROR_CODE=0 - -@REM To isolate internal variables from possible post scripts, we use another setlocal -@setlocal - -@REM ==== START VALIDATION ==== -if not "%JAVA_HOME%" == "" goto OkJHome - -echo. -echo Error: JAVA_HOME not found in your environment. >&2 -echo Please set the JAVA_HOME variable in your environment to match the >&2 -echo location of your Java installation. >&2 -echo. -goto error - -:OkJHome -if exist "%JAVA_HOME%\bin\java.exe" goto init - -echo. -echo Error: JAVA_HOME is set to an invalid directory. >&2 -echo JAVA_HOME = "%JAVA_HOME%" >&2 -echo Please set the JAVA_HOME variable in your environment to match the >&2 -echo location of your Java installation. >&2 -echo. -goto error - -@REM ==== END VALIDATION ==== - -:init - -@REM Find the project base dir, i.e. the directory that contains the folder ".mvn". -@REM Fallback to current working directory if not found. - -set MAVEN_PROJECTBASEDIR=%MAVEN_BASEDIR% -IF NOT "%MAVEN_PROJECTBASEDIR%"=="" goto endDetectBaseDir - -set EXEC_DIR=%CD% -set WDIR=%EXEC_DIR% -:findBaseDir -IF EXIST "%WDIR%"\.mvn goto baseDirFound -cd .. -IF "%WDIR%"=="%CD%" goto baseDirNotFound -set WDIR=%CD% -goto findBaseDir - -:baseDirFound -set MAVEN_PROJECTBASEDIR=%WDIR% -cd "%EXEC_DIR%" -goto endDetectBaseDir - -:baseDirNotFound -set MAVEN_PROJECTBASEDIR=%EXEC_DIR% -cd "%EXEC_DIR%" - -:endDetectBaseDir - -IF NOT EXIST "%MAVEN_PROJECTBASEDIR%\.mvn\jvm.config" goto endReadAdditionalConfig - -@setlocal EnableExtensions EnableDelayedExpansion -for /F "usebackq delims=" %%a in ("%MAVEN_PROJECTBASEDIR%\.mvn\jvm.config") do set JVM_CONFIG_MAVEN_PROPS=!JVM_CONFIG_MAVEN_PROPS! %%a -@endlocal & set JVM_CONFIG_MAVEN_PROPS=%JVM_CONFIG_MAVEN_PROPS% - -:endReadAdditionalConfig - -SET MAVEN_JAVA_EXE="%JAVA_HOME%\bin\java.exe" -set WRAPPER_JAR="%MAVEN_PROJECTBASEDIR%\.mvn\wrapper\maven-wrapper.jar" -set WRAPPER_LAUNCHER=org.apache.maven.wrapper.MavenWrapperMain - -set DOWNLOAD_URL="https://repo.maven.apache.org/maven2/io/takari/maven-wrapper/0.5.6/maven-wrapper-0.5.6.jar" - -FOR /F "tokens=1,2 delims==" %%A IN ("%MAVEN_PROJECTBASEDIR%\.mvn\wrapper\maven-wrapper.properties") DO ( - IF "%%A"=="wrapperUrl" SET DOWNLOAD_URL=%%B -) - -@REM Extension to allow automatically downloading the maven-wrapper.jar from Maven-central -@REM This allows using the maven wrapper in projects that prohibit checking in binary data. -if exist %WRAPPER_JAR% ( - if "%MVNW_VERBOSE%" == "true" ( - echo Found %WRAPPER_JAR% - ) -) else ( - if not "%MVNW_REPOURL%" == "" ( - SET DOWNLOAD_URL="%MVNW_REPOURL%/io/takari/maven-wrapper/0.5.6/maven-wrapper-0.5.6.jar" - ) - if "%MVNW_VERBOSE%" == "true" ( - echo Couldn't find %WRAPPER_JAR%, downloading it ... - echo Downloading from: %DOWNLOAD_URL% - ) - - powershell -Command "&{"^ - "$webclient = new-object System.Net.WebClient;"^ - "if (-not ([string]::IsNullOrEmpty('%MVNW_USERNAME%') -and [string]::IsNullOrEmpty('%MVNW_PASSWORD%'))) {"^ - "$webclient.Credentials = new-object System.Net.NetworkCredential('%MVNW_USERNAME%', '%MVNW_PASSWORD%');"^ - "}"^ - "[Net.ServicePointManager]::SecurityProtocol = [Net.SecurityProtocolType]::Tls12; $webclient.DownloadFile('%DOWNLOAD_URL%', '%WRAPPER_JAR%')"^ - "}" - if "%MVNW_VERBOSE%" == "true" ( - echo Finished downloading %WRAPPER_JAR% - ) -) -@REM End of extension - -@REM Provide a "standardized" way to retrieve the CLI args that will -@REM work with both Windows and non-Windows executions. -set MAVEN_CMD_LINE_ARGS=%* - -%MAVEN_JAVA_EXE% %JVM_CONFIG_MAVEN_PROPS% %MAVEN_OPTS% %MAVEN_DEBUG_OPTS% -classpath %WRAPPER_JAR% "-Dmaven.multiModuleProjectDirectory=%MAVEN_PROJECTBASEDIR%" %WRAPPER_LAUNCHER% %MAVEN_CONFIG% %* -if ERRORLEVEL 1 goto error -goto end - -:error -set ERROR_CODE=1 - -:end -@endlocal & set ERROR_CODE=%ERROR_CODE% - -if not "%MAVEN_SKIP_RC%" == "" goto skipRcPost -@REM check for post script, once with legacy .bat ending and once with .cmd ending -if exist "%HOME%\mavenrc_post.bat" call "%HOME%\mavenrc_post.bat" -if exist "%HOME%\mavenrc_post.cmd" call "%HOME%\mavenrc_post.cmd" -:skipRcPost - -@REM pause the script if MAVEN_BATCH_PAUSE is set to 'on' -if "%MAVEN_BATCH_PAUSE%" == "on" pause - -if "%MAVEN_TERMINATE_CMD%" == "on" exit %ERROR_CODE% - -exit /B %ERROR_CODE% +<# : batch portion +@REM ---------------------------------------------------------------------------- +@REM Licensed to the Apache Software Foundation (ASF) under one +@REM or more contributor license agreements. See the NOTICE file +@REM distributed with this work for additional information +@REM regarding copyright ownership. The ASF licenses this file +@REM to you under the Apache License, Version 2.0 (the +@REM "License"); you may not use this file except in compliance +@REM with the License. You may obtain a copy of the License at +@REM +@REM http://www.apache.org/licenses/LICENSE-2.0 +@REM +@REM Unless required by applicable law or agreed to in writing, +@REM software distributed under the License is distributed on an +@REM "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +@REM KIND, either express or implied. See the License for the +@REM specific language governing permissions and limitations +@REM under the License. +@REM ---------------------------------------------------------------------------- + +@REM ---------------------------------------------------------------------------- +@REM Apache Maven Wrapper startup batch script, version 3.3.2 +@REM +@REM Optional ENV vars +@REM MVNW_REPOURL - repo url base for downloading maven distribution +@REM MVNW_USERNAME/MVNW_PASSWORD - user and password for downloading maven +@REM MVNW_VERBOSE - true: enable verbose log; others: silence the output +@REM ---------------------------------------------------------------------------- + +@IF "%__MVNW_ARG0_NAME__%"=="" (SET __MVNW_ARG0_NAME__=%~nx0) +@SET __MVNW_CMD__= +@SET __MVNW_ERROR__= +@SET __MVNW_PSMODULEP_SAVE=%PSModulePath% +@SET PSModulePath= +@FOR /F "usebackq tokens=1* delims==" %%A IN (`powershell -noprofile "& {$scriptDir='%~dp0'; $script='%__MVNW_ARG0_NAME__%'; icm -ScriptBlock ([Scriptblock]::Create((Get-Content -Raw '%~f0'))) -NoNewScope}"`) DO @( + IF "%%A"=="MVN_CMD" (set __MVNW_CMD__=%%B) ELSE IF "%%B"=="" (echo %%A) ELSE (echo %%A=%%B) +) +@SET PSModulePath=%__MVNW_PSMODULEP_SAVE% +@SET __MVNW_PSMODULEP_SAVE= +@SET __MVNW_ARG0_NAME__= +@SET MVNW_USERNAME= +@SET MVNW_PASSWORD= +@IF NOT "%__MVNW_CMD__%"=="" (%__MVNW_CMD__% %*) +@echo Cannot start maven from wrapper >&2 && exit /b 1 +@GOTO :EOF +: end batch / begin powershell #> + +$ErrorActionPreference = "Stop" +if ($env:MVNW_VERBOSE -eq "true") { + $VerbosePreference = "Continue" +} + +# calculate distributionUrl, requires .mvn/wrapper/maven-wrapper.properties +$distributionUrl = (Get-Content -Raw "$scriptDir/.mvn/wrapper/maven-wrapper.properties" | ConvertFrom-StringData).distributionUrl +if (!$distributionUrl) { + Write-Error "cannot read distributionUrl property in $scriptDir/.mvn/wrapper/maven-wrapper.properties" +} + +switch -wildcard -casesensitive ( $($distributionUrl -replace '^.*/','') ) { + "maven-mvnd-*" { + $USE_MVND = $true + $distributionUrl = $distributionUrl -replace '-bin\.[^.]*$',"-windows-amd64.zip" + $MVN_CMD = "mvnd.cmd" + break + } + default { + $USE_MVND = $false + $MVN_CMD = $script -replace '^mvnw','mvn' + break + } +} + +# apply MVNW_REPOURL and calculate MAVEN_HOME +# maven home pattern: ~/.m2/wrapper/dists/{apache-maven-,maven-mvnd--}/ +if ($env:MVNW_REPOURL) { + $MVNW_REPO_PATTERN = if ($USE_MVND) { "/org/apache/maven/" } else { "/maven/mvnd/" } + $distributionUrl = "$env:MVNW_REPOURL$MVNW_REPO_PATTERN$($distributionUrl -replace '^.*'+$MVNW_REPO_PATTERN,'')" +} +$distributionUrlName = $distributionUrl -replace '^.*/','' +$distributionUrlNameMain = $distributionUrlName -replace '\.[^.]*$','' -replace '-bin$','' +$MAVEN_HOME_PARENT = "$HOME/.m2/wrapper/dists/$distributionUrlNameMain" +if ($env:MAVEN_USER_HOME) { + $MAVEN_HOME_PARENT = "$env:MAVEN_USER_HOME/wrapper/dists/$distributionUrlNameMain" +} +$MAVEN_HOME_NAME = ([System.Security.Cryptography.MD5]::Create().ComputeHash([byte[]][char[]]$distributionUrl) | ForEach-Object {$_.ToString("x2")}) -join '' +$MAVEN_HOME = "$MAVEN_HOME_PARENT/$MAVEN_HOME_NAME" + +if (Test-Path -Path "$MAVEN_HOME" -PathType Container) { + Write-Verbose "found existing MAVEN_HOME at $MAVEN_HOME" + Write-Output "MVN_CMD=$MAVEN_HOME/bin/$MVN_CMD" + exit $? +} + +if (! $distributionUrlNameMain -or ($distributionUrlName -eq $distributionUrlNameMain)) { + Write-Error "distributionUrl is not valid, must end with *-bin.zip, but found $distributionUrl" +} + +# prepare tmp dir +$TMP_DOWNLOAD_DIR_HOLDER = New-TemporaryFile +$TMP_DOWNLOAD_DIR = New-Item -Itemtype Directory -Path "$TMP_DOWNLOAD_DIR_HOLDER.dir" +$TMP_DOWNLOAD_DIR_HOLDER.Delete() | Out-Null +trap { + if ($TMP_DOWNLOAD_DIR.Exists) { + try { Remove-Item $TMP_DOWNLOAD_DIR -Recurse -Force | Out-Null } + catch { Write-Warning "Cannot remove $TMP_DOWNLOAD_DIR" } + } +} + +New-Item -Itemtype Directory -Path "$MAVEN_HOME_PARENT" -Force | Out-Null + +# Download and Install Apache Maven +Write-Verbose "Couldn't find MAVEN_HOME, downloading and installing it ..." +Write-Verbose "Downloading from: $distributionUrl" +Write-Verbose "Downloading to: $TMP_DOWNLOAD_DIR/$distributionUrlName" + +$webclient = New-Object System.Net.WebClient +if ($env:MVNW_USERNAME -and $env:MVNW_PASSWORD) { + $webclient.Credentials = New-Object System.Net.NetworkCredential($env:MVNW_USERNAME, $env:MVNW_PASSWORD) +} +[Net.ServicePointManager]::SecurityProtocol = [Net.SecurityProtocolType]::Tls12 +$webclient.DownloadFile($distributionUrl, "$TMP_DOWNLOAD_DIR/$distributionUrlName") | Out-Null + +# If specified, validate the SHA-256 sum of the Maven distribution zip file +$distributionSha256Sum = (Get-Content -Raw "$scriptDir/.mvn/wrapper/maven-wrapper.properties" | ConvertFrom-StringData).distributionSha256Sum +if ($distributionSha256Sum) { + if ($USE_MVND) { + Write-Error "Checksum validation is not supported for maven-mvnd. `nPlease disable validation by removing 'distributionSha256Sum' from your maven-wrapper.properties." + } + Import-Module $PSHOME\Modules\Microsoft.PowerShell.Utility -Function Get-FileHash + if ((Get-FileHash "$TMP_DOWNLOAD_DIR/$distributionUrlName" -Algorithm SHA256).Hash.ToLower() -ne $distributionSha256Sum) { + Write-Error "Error: Failed to validate Maven distribution SHA-256, your Maven distribution might be compromised. If you updated your Maven version, you need to update the specified distributionSha256Sum property." + } +} + +# unzip and move +Expand-Archive "$TMP_DOWNLOAD_DIR/$distributionUrlName" -DestinationPath "$TMP_DOWNLOAD_DIR" | Out-Null +Rename-Item -Path "$TMP_DOWNLOAD_DIR/$distributionUrlNameMain" -NewName $MAVEN_HOME_NAME | Out-Null +try { + Move-Item -Path "$TMP_DOWNLOAD_DIR/$MAVEN_HOME_NAME" -Destination $MAVEN_HOME_PARENT | Out-Null +} catch { + if (! (Test-Path -Path "$MAVEN_HOME" -PathType Container)) { + Write-Error "fail to move MAVEN_HOME" + } +} finally { + try { Remove-Item $TMP_DOWNLOAD_DIR -Recurse -Force | Out-Null } + catch { Write-Warning "Cannot remove $TMP_DOWNLOAD_DIR" } +} + +Write-Output "MVN_CMD=$MAVEN_HOME/bin/$MVN_CMD" diff --git a/deps/rabbitmq_stream/test/rabbit_stream_SUITE_data/pom.xml b/deps/rabbitmq_stream/test/rabbit_stream_SUITE_data/pom.xml index 5622ec9ac356..8f322c7d474b 100644 --- a/deps/rabbitmq_stream/test/rabbit_stream_SUITE_data/pom.xml +++ b/deps/rabbitmq_stream/test/rabbit_stream_SUITE_data/pom.xml @@ -20,19 +20,19 @@ rabbitmq-core@groups.vmware.com Team RabbitMQ - VMware, Inc. or its affiliates. + Broadcom, Inc. or its affiliates. https://rabbitmq.com [0.12.0-SNAPSHOT,) - 5.9.3 - 3.24.2 - 1.2.12 - 3.11.0 - 3.1.2 - 2.37.0 + 5.11.0 + 3.26.3 + 1.2.13 + 3.12.1 + 3.4.0 + 2.43.0 1.17.0 UTF-8 diff --git a/deps/rabbitmq_stream/test/rabbit_stream_SUITE_data/src/test/java/com/rabbitmq/stream/ClusterSizeTest.java b/deps/rabbitmq_stream/test/rabbit_stream_SUITE_data/src/test/java/com/rabbitmq/stream/ClusterSizeTest.java index 7817fdccec15..def62c74abd6 100644 --- a/deps/rabbitmq_stream/test/rabbit_stream_SUITE_data/src/test/java/com/rabbitmq/stream/ClusterSizeTest.java +++ b/deps/rabbitmq_stream/test/rabbit_stream_SUITE_data/src/test/java/com/rabbitmq/stream/ClusterSizeTest.java @@ -11,7 +11,7 @@ // The Original Code is RabbitMQ. // // The Initial Developer of the Original Code is Pivotal Software, Inc. -// Copyright (c) 2020-2023 VMware, Inc. or its affiliates. All rights reserved. +// Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. // package com.rabbitmq.stream; diff --git a/deps/rabbitmq_stream/test/rabbit_stream_SUITE_data/src/test/java/com/rabbitmq/stream/FailureTest.java b/deps/rabbitmq_stream/test/rabbit_stream_SUITE_data/src/test/java/com/rabbitmq/stream/FailureTest.java index d4315a131180..889bbcad45bf 100644 --- a/deps/rabbitmq_stream/test/rabbit_stream_SUITE_data/src/test/java/com/rabbitmq/stream/FailureTest.java +++ b/deps/rabbitmq_stream/test/rabbit_stream_SUITE_data/src/test/java/com/rabbitmq/stream/FailureTest.java @@ -11,7 +11,7 @@ // The Original Code is RabbitMQ. // // The Initial Developer of the Original Code is Pivotal Software, Inc. -// Copyright (c) 2020-2023 VMware, Inc. or its affiliates. All rights reserved. +// Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. // package com.rabbitmq.stream; @@ -30,8 +30,10 @@ import java.util.*; import java.util.concurrent.*; import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicLong; import java.util.concurrent.atomic.AtomicReference; + import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.extension.ExtendWith; @@ -65,7 +67,7 @@ void tearDown() { @Test void leaderFailureWhenPublisherConnectedToReplica() throws Exception { Set messages = new HashSet<>(); - Client client = cf.get(new Client.ClientParameters().port(TestUtils.streamPortNode1())); + Client client = cf.get(new Client.ClientParameters().port(streamPortNode1())); Map metadata = client.metadata(stream); Client.StreamMetadata streamMetadata = metadata.get(stream); assertThat(streamMetadata).isNotNull(); @@ -73,11 +75,11 @@ void leaderFailureWhenPublisherConnectedToReplica() throws Exception { waitUntil(() -> client.metadata(stream).get(stream).getReplicas().size() == 2); streamMetadata = client.metadata(stream).get(stream); - assertThat(streamMetadata.getLeader().getPort()).isEqualTo(TestUtils.streamPortNode1()); + assertThat(streamMetadata.getLeader().getPort()).isEqualTo(streamPortNode1()); assertThat(streamMetadata.getReplicas()).isNotEmpty(); Client.Broker replica = streamMetadata.getReplicas().get(0); - assertThat(replica.getPort()).isNotEqualTo(TestUtils.streamPortNode1()); + assertThat(replica.getPort()).isNotEqualTo(streamPortNode1()); AtomicReference confirmLatch = new AtomicReference<>(new CountDownLatch(1)); @@ -102,7 +104,7 @@ void leaderFailureWhenPublisherConnectedToReplica() throws Exception { try { Host.rabbitmqctl("stop_app"); try { - cf.get(new Client.ClientParameters().port(TestUtils.streamPortNode1())); + cf.get(new Client.ClientParameters().port(streamPortNode1())); fail("Node app stopped, connecting should not be possible"); } catch (Exception e) { // OK @@ -115,7 +117,7 @@ void leaderFailureWhenPublisherConnectedToReplica() throws Exception { Duration.ofSeconds(10), () -> { Client.StreamMetadata m = publisher.metadata(stream).get(stream); - return m.getLeader() != null && m.getLeader().getPort() != TestUtils.streamPortNode1(); + return m.getLeader() != null && m.getLeader().getPort() != streamPortNode1(); }); confirmLatch.set(new CountDownLatch(1)); @@ -161,7 +163,7 @@ void leaderFailureWhenPublisherConnectedToReplica() throws Exception { Client consumer = cf.get( new Client.ClientParameters() - .port(TestUtils.streamPortNode1()) + .port(streamPortNode1()) .messageListener( (subscriptionId, offset, chunkTimestamp, committedChunkId, context, msg) -> { bodies.add(new String(msg.getBodyAsBinary(), StandardCharsets.UTF_8)); @@ -182,13 +184,13 @@ void leaderFailureWhenPublisherConnectedToReplica() throws Exception { @Test void noLostConfirmedMessagesWhenLeaderGoesAway() throws Exception { executorService = Executors.newCachedThreadPool(); - Client client = cf.get(new Client.ClientParameters().port(TestUtils.streamPortNode1())); + Client client = cf.get(new Client.ClientParameters().port(streamPortNode1())); Map metadata = client.metadata(stream); Client.StreamMetadata streamMetadata = metadata.get(stream); assertThat(streamMetadata).isNotNull(); assertThat(streamMetadata.getLeader()).isNotNull(); - assertThat(streamMetadata.getLeader().getPort()).isEqualTo(TestUtils.streamPortNode1()); + assertThat(streamMetadata.getLeader().getPort()).isEqualTo(streamPortNode1()); Map published = new ConcurrentHashMap<>(); Set confirmed = ConcurrentHashMap.newKeySet(); @@ -220,7 +222,7 @@ void noLostConfirmedMessagesWhenLeaderGoesAway() throws Exception { connected.set(false); Client locator = - cf.get(new Client.ClientParameters().port(TestUtils.streamPortNode2())); + cf.get(new Client.ClientParameters().port(streamPortNode2())); // wait until there's a new leader try { waitAtMost( @@ -228,7 +230,7 @@ void noLostConfirmedMessagesWhenLeaderGoesAway() throws Exception { () -> { Client.StreamMetadata m = locator.metadata(stream).get(stream); return m.getLeader() != null - && m.getLeader().getPort() != TestUtils.streamPortNode1(); + && m.getLeader().getPort() != streamPortNode1(); }); } catch (Throwable e) { reconnectionLatch.countDown(); @@ -316,7 +318,7 @@ void noLostConfirmedMessagesWhenLeaderGoesAway() throws Exception { assertThat(confirmed).hasSizeGreaterThan(confirmedCount); confirmedCount = confirmed.size(); - Client metadataClient = cf.get(new Client.ClientParameters().port(TestUtils.streamPortNode2())); + Client metadataClient = cf.get(new Client.ClientParameters().port(streamPortNode2())); // wait until all the replicas are there waitAtMost( Duration.ofSeconds(5), @@ -374,7 +376,7 @@ void noLostConfirmedMessagesWhenLeaderGoesAway() throws Exception { @Test void consumerReattachesToOtherReplicaWhenReplicaGoesAway() throws Exception { executorService = Executors.newCachedThreadPool(); - Client metadataClient = cf.get(new Client.ClientParameters().port(TestUtils.streamPortNode1())); + Client metadataClient = cf.get(new Client.ClientParameters().port(streamPortNode1())); Map metadata = metadataClient.metadata(stream); Client.StreamMetadata streamMetadata = metadata.get(stream); assertThat(streamMetadata).isNotNull(); @@ -384,7 +386,7 @@ void consumerReattachesToOtherReplicaWhenReplicaGoesAway() throws Exception { metadata = metadataClient.metadata(stream); streamMetadata = metadata.get(stream); assertThat(streamMetadata.getLeader()).isNotNull(); - assertThat(streamMetadata.getLeader().getPort()).isEqualTo(TestUtils.streamPortNode1()); + assertThat(streamMetadata.getLeader().getPort()).isEqualTo(streamPortNode1()); Map published = new ConcurrentHashMap<>(); Set confirmed = ConcurrentHashMap.newKeySet(); @@ -441,7 +443,7 @@ void consumerReattachesToOtherReplicaWhenReplicaGoesAway() throws Exception { Client.Broker replica = streamMetadata.getReplicas().stream() - .filter(broker -> broker.getPort() == TestUtils.streamPortNode2()) + .filter(broker -> broker.getPort() == streamPortNode2()) .findFirst() .orElseThrow(() -> new NoSuchElementException()); @@ -565,7 +567,7 @@ void declarePublisherShouldNotReturnStreamDoesNotExistOnRestart() throws Excepti waitUntil( () -> { try { - client.set(cf.get(new ClientParameters().port(TestUtils.streamPortNode1()))); + client.set(cf.get(new ClientParameters().port(streamPortNode1()))); } catch (Exception e) { } @@ -582,4 +584,42 @@ void declarePublisherShouldNotReturnStreamDoesNotExistOnRestart() throws Excepti assertThat(responseCodes).doesNotContain(Constants.RESPONSE_CODE_STREAM_DOES_NOT_EXIST); } + + @Test + void shouldReceiveMetadataUpdateWhenReplicaIsKilledWithPublisherAndConsumerOnSameConnection() throws Exception { + Client metadataClient = cf.get(new Client.ClientParameters().port(streamPortNode1())); + Map metadata = metadataClient.metadata(stream); + Client.StreamMetadata streamMetadata = metadata.get(stream); + assertThat(streamMetadata).isNotNull(); + + waitUntil(() -> metadataClient.metadata(stream).get(stream).getReplicas().size() == 2); + + metadata = metadataClient.metadata(stream); + streamMetadata = metadata.get(stream); + assertThat(streamMetadata.getLeader()).isNotNull(); + assertThat(streamMetadata.getLeader().getPort()).isEqualTo(streamPortNode1()); + Client.Broker broker = + streamMetadata.getReplicas().stream() + .filter( + r -> r.getPort() == streamPortNode1() || r.getPort() == streamPortNode2()) + .findFirst() + .get(); + + AtomicInteger metadataNotifications = new AtomicInteger(); + Client client = + cf.get( + new ClientParameters() + .port(broker.getPort()) + .metadataListener( + (stream, code) -> metadataNotifications.incrementAndGet())); + client.declarePublisher((byte) 42, null, stream); + client.subscribe((byte) 66, stream, OffsetSpecification.first(), 1); + + String node = broker.getPort() == streamPortNode1() ? Host.node1name() : Host.node2name(); + Host.killStreamLocalMemberProcess(stream, node); + waitUntil(() -> metadataNotifications.get() == 1); + + Host.killStreamLeaderProcess(stream); + waitUntil(() -> metadataNotifications.get() == 2); + } } diff --git a/deps/rabbitmq_stream/test/rabbit_stream_SUITE_data/src/test/java/com/rabbitmq/stream/Host.java b/deps/rabbitmq_stream/test/rabbit_stream_SUITE_data/src/test/java/com/rabbitmq/stream/Host.java index 9fdf41617ecf..cd873228f807 100644 --- a/deps/rabbitmq_stream/test/rabbit_stream_SUITE_data/src/test/java/com/rabbitmq/stream/Host.java +++ b/deps/rabbitmq_stream/test/rabbit_stream_SUITE_data/src/test/java/com/rabbitmq/stream/Host.java @@ -11,7 +11,7 @@ // The Original Code is RabbitMQ. // // The Initial Developer of the Original Code is Pivotal Software, Inc. -// Copyright (c) 2020-2023 VMware, Inc. or its affiliates. All rights reserved. +// Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. // package com.rabbitmq.stream; @@ -101,6 +101,21 @@ public static String node2name() { return System.getProperty("node2.name", "rabbit-2@" + hostname()); } + public static Process killStreamLocalMemberProcess(String stream, String nodename) throws IOException { + return rabbitmqctl( + "eval 'case rabbit_stream_manager:lookup_local_member(<<\"/\">>, <<\"" + + stream + + "\">>) of {ok, Pid} -> exit(Pid, kill); Pid -> exit(Pid, kill) end.'", + nodename); + } + + public static Process killStreamLeaderProcess(String stream) throws IOException { + return rabbitmqctl( + "eval 'case rabbit_stream_manager:lookup_leader(<<\"/\">>, <<\"" + + stream + + "\">>) of {ok, Pid} -> exit(Pid, kill); Pid -> exit(Pid, kill) end.'"); + } + public static String hostname() { try { return InetAddress.getLocalHost().getHostName(); diff --git a/deps/rabbitmq_stream/test/rabbit_stream_SUITE_data/src/test/java/com/rabbitmq/stream/LeaderLocatorTest.java b/deps/rabbitmq_stream/test/rabbit_stream_SUITE_data/src/test/java/com/rabbitmq/stream/LeaderLocatorTest.java index 43bba765d1af..ae573987f34f 100644 --- a/deps/rabbitmq_stream/test/rabbit_stream_SUITE_data/src/test/java/com/rabbitmq/stream/LeaderLocatorTest.java +++ b/deps/rabbitmq_stream/test/rabbit_stream_SUITE_data/src/test/java/com/rabbitmq/stream/LeaderLocatorTest.java @@ -11,7 +11,7 @@ // The Original Code is RabbitMQ. // // The Initial Developer of the Original Code is Pivotal Software, Inc. -// Copyright (c) 2020-2023 VMware, Inc. or its affiliates. All rights reserved. +// Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. // package com.rabbitmq.stream; diff --git a/deps/rabbitmq_stream/test/rabbit_stream_SUITE_data/src/test/java/com/rabbitmq/stream/StreamTest.java b/deps/rabbitmq_stream/test/rabbit_stream_SUITE_data/src/test/java/com/rabbitmq/stream/StreamTest.java index 7a4d214c3933..4107d143ca60 100644 --- a/deps/rabbitmq_stream/test/rabbit_stream_SUITE_data/src/test/java/com/rabbitmq/stream/StreamTest.java +++ b/deps/rabbitmq_stream/test/rabbit_stream_SUITE_data/src/test/java/com/rabbitmq/stream/StreamTest.java @@ -11,7 +11,7 @@ // The Original Code is RabbitMQ. // // The Initial Developer of the Original Code is Pivotal Software, Inc. -// Copyright (c) 2020-2023 VMware, Inc. or its affiliates. All rights reserved. +// Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. // package com.rabbitmq.stream; diff --git a/deps/rabbitmq_stream/test/rabbit_stream_SUITE_data/src/test/java/com/rabbitmq/stream/TestUtils.java b/deps/rabbitmq_stream/test/rabbit_stream_SUITE_data/src/test/java/com/rabbitmq/stream/TestUtils.java index 3b54147a0bdc..08517388ee70 100644 --- a/deps/rabbitmq_stream/test/rabbit_stream_SUITE_data/src/test/java/com/rabbitmq/stream/TestUtils.java +++ b/deps/rabbitmq_stream/test/rabbit_stream_SUITE_data/src/test/java/com/rabbitmq/stream/TestUtils.java @@ -11,7 +11,7 @@ // The Original Code is RabbitMQ. // // The Initial Developer of the Original Code is Pivotal Software, Inc. -// Copyright (c) 2020-2023 VMware, Inc. or its affiliates. All rights reserved. +// Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. // package com.rabbitmq.stream; @@ -44,7 +44,7 @@ static int streamPortNode1() { } static int streamPortNode2() { - String port = System.getProperty("node2.stream.port", "5552"); + String port = System.getProperty("node2.stream.port", "5553"); return Integer.valueOf(port); } diff --git a/deps/rabbitmq_stream/test/rabbit_stream_manager_SUITE.erl b/deps/rabbitmq_stream/test/rabbit_stream_manager_SUITE.erl index 7ccc2deb4685..4f0e95d7d7d0 100644 --- a/deps/rabbitmq_stream/test/rabbit_stream_manager_SUITE.erl +++ b/deps/rabbitmq_stream/test/rabbit_stream_manager_SUITE.erl @@ -2,13 +2,12 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_stream_manager_SUITE). -include_lib("eunit/include/eunit.hrl"). --include_lib("common_test/include/ct.hrl"). -include_lib("amqp_client/include/amqp_client.hrl"). -compile(export_all). @@ -99,14 +98,14 @@ lookup_member(Config) -> ?assertEqual({ok, deleted}, delete_stream(Config, Stream)). manage_super_stream(Config) -> - % create super stream + %% create super stream ?assertEqual(ok, create_super_stream(Config, <<"invoices">>, [<<"invoices-0">>, <<"invoices-1">>, <<"invoices-2">>], [<<"0">>, <<"1">>, <<"2">>])), - % get the correct partitions + %% get the correct partitions ?assertEqual({ok, [<<"invoices-0">>, <<"invoices-1">>, <<"invoices-2">>]}, partitions(Config, <<"invoices">>)), @@ -117,7 +116,7 @@ manage_super_stream(Config) -> <- [{<<"invoices-0">>, <<"0">>}, {<<"invoices-1">>, <<"1">>}, {<<"invoices-2">>, <<"2">>}]], - % get an error if trying to re-create it + %% get an error if trying to re-create it ?assertMatch({error, _}, create_super_stream(Config, <<"invoices">>, @@ -125,13 +124,13 @@ manage_super_stream(Config) -> <<"invoices-2">>], [<<"0">>, <<"1">>, <<"2">>])), - % can delete it + %% can delete it ?assertEqual(ok, delete_super_stream(Config, <<"invoices">>)), - % create a stream with the same name as a potential partition + %% create a stream with the same name as a potential partition ?assertMatch({ok, _}, create_stream(Config, <<"invoices-1">>)), - % cannot create the super stream because a partition already exists + %% cannot create the super stream because a partition already exists ?assertMatch({error, _}, create_super_stream(Config, <<"invoices">>, @@ -140,6 +139,14 @@ manage_super_stream(Config) -> [<<"0">>, <<"1">>, <<"2">>])), ?assertMatch({ok, _}, delete_stream(Config, <<"invoices-1">>)), + + %% not the same number of partitions and binding keys + ?assertMatch({error, {validation_failed, _}}, + create_super_stream(Config, + <<"invoices">>, + [<<"invoices-0">>, <<"invoices-1">>], + [<<"0">>])), + ok. partition_index(Config) -> diff --git a/deps/rabbitmq_stream/test/rabbit_stream_reader_SUITE.erl b/deps/rabbitmq_stream/test/rabbit_stream_reader_SUITE.erl new file mode 100644 index 000000000000..b4916a04de13 --- /dev/null +++ b/deps/rabbitmq_stream/test/rabbit_stream_reader_SUITE.erl @@ -0,0 +1,188 @@ +%% The contents of this file are subject to the Mozilla Public License +%% at https://www.mozilla.org/en-US/MPL/2.0/ +%% +%% Software distributed under the License is distributed on an "AS IS" +%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See +%% the License for the specific language governing rights and +%% limitations under the License. +%% +%% The Original Code is RabbitMQ. +%% +%% The Initial Developer of the Original Code is Pivotal Software, Inc. +%% Copyright (c) 2024 Broadcom. All Rights Reserved. +%% The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. +%% + +-module(rabbit_stream_reader_SUITE). + +-compile(export_all). + +-include_lib("eunit/include/eunit.hrl"). +-include_lib("rabbitmq_stream/src/rabbit_stream_reader.hrl"). +-include_lib("rabbitmq_stream_common/include/rabbit_stream.hrl"). + +-import(rabbit_stream_reader, [ensure_token_expiry_timer/2]). + +%%%=================================================================== +%%% Common Test callbacks +%%%=================================================================== + +all() -> + [{group, tests}]. + +%% replicate eunit like test resolution +all_tests() -> + [F + || {F, _} <- ?MODULE:module_info(functions), + re:run(atom_to_list(F), "_test$") /= nomatch]. + +groups() -> + [{tests, [], all_tests()}]. + +init_per_suite(Config) -> + Config. + +end_per_suite(_Config) -> + ok. + +init_per_group(_Group, Config) -> + Config. + +end_per_group(_Group, _Config) -> + ok. + +init_per_testcase(_TestCase, Config) -> + Config. + +end_per_testcase(_TestCase, _Config) -> + meck:unload(), + ok. + +ensure_token_expiry_timer_test(_) -> + ok = meck:new(rabbit_access_control), + + meck:expect(rabbit_access_control, permission_cache_can_expire, fun (_) -> false end), + {_, #stream_connection{token_expiry_timer = TR1}} = ensure_token_expiry_timer(#user{}, #stream_connection{}), + ?assertEqual(undefined, TR1), + + meck:expect(rabbit_access_control, permission_cache_can_expire, fun (_) -> true end), + meck:expect(rabbit_access_control, expiry_timestamp, fun (_) -> never end), + {_, #stream_connection{token_expiry_timer = TR2}} = ensure_token_expiry_timer(#user{}, #stream_connection{}), + ?assertEqual(undefined, TR2), + + Now = os:system_time(second), + meck:expect(rabbit_access_control, expiry_timestamp, fun (_) -> Now + 60 end), + {_, #stream_connection{token_expiry_timer = TR3}} = ensure_token_expiry_timer(#user{}, #stream_connection{}), + Cancel3 = erlang:cancel_timer(TR3, [{async, false}, {info, true}]), + ?assert(is_integer(Cancel3)), + + meck:expect(rabbit_access_control, expiry_timestamp, fun (_) -> Now - 60 end), + {_, #stream_connection{token_expiry_timer = TR4}} = ensure_token_expiry_timer(#user{}, #stream_connection{}), + ?assertEqual(undefined, TR4), + + DummyTRef = erlang:send_after(1_000 * 1_000, self(), dummy), + meck:expect(rabbit_access_control, permission_cache_can_expire, fun (_) -> false end), + {Cancel5, #stream_connection{token_expiry_timer = TR5}} = ensure_token_expiry_timer(#user{}, + #stream_connection{token_expiry_timer = DummyTRef}), + ?assertEqual(undefined, TR5), + ?assert(is_integer(Cancel5)), + + ok. + +evaluate_state_after_secret_update_test(_) -> + Mod = rabbit_stream_reader, + meck:new(Mod, [passthrough]), + + ModUtils = rabbit_stream_utils, + meck:new(ModUtils, [passthrough]), + CheckFun = fun(N) -> + case binary:match(N, <<"ok_">>) of + nomatch -> + error; + _ -> + ok + end + end, + meck:expect(ModUtils, check_write_permitted, fun(#resource{name = N}, _) -> CheckFun(N) end), + meck:expect(ModUtils, check_read_permitted, fun(#resource{name = N}, _, _) -> CheckFun(N) end), + + ModAccess = rabbit_access_control, + meck:new(ModAccess), + meck:expect(ModAccess, permission_cache_can_expire, 1, false), + + meck:new(rabbit_stream_metrics, [stub_all]), + meck:new(rabbit_global_counters, [stub_all]), + + ModTransport = dummy_transport, + meck:new(ModTransport, [non_strict]), + meck:expect(ModTransport, send, 2, ok), + + ModLog = osiris_log, + meck:new(ModLog), + meck:expect(ModLog, init, 1, ok), + put(close_log_count, 0), + meck:expect(ModLog, close, fun(_) -> put(close_log_count, get(close_log_count) + 1) end), + + ModCore = rabbit_stream_core, + meck:new(ModCore), + put(metadata_update, []), + meck:expect(ModCore, frame, fun(Cmd) -> put(metadata_update, [Cmd | get(metadata_update)]) end), + + Publishers = #{0 => #publisher{stream = <<"ok_publish">>}, + 1 => #publisher{stream = <<"ko_publish">>}, + 2 => #publisher{stream = <<"ok_publish_consume">>}, + 3 => #publisher{stream = <<"ko_publish_consume">>}}, + Subscriptions = #{<<"ok_consume">> => [0], + <<"ko_consume">> => [1], + <<"ok_publish_consume">> => [2], + <<"ko_publish_consume">> => [3]}, + Consumers = #{0 => consumer(<<"ok_consume">>), + 1 => consumer(<<"ko_consume">>), + 2 => consumer(<<"ok_publish_consume">>), + 3 => consumer(<<"ko_publish_consume">>)}, + + {C1, S1} = Mod:evaluate_state_after_secret_update(ModTransport, #user{}, + #stream_connection{publishers = Publishers, + stream_subscriptions = Subscriptions}, + #stream_connection_state{consumers = Consumers}), + + meck:validate(ModLog), + ?assertEqual(2, get(close_log_count)), + erase(close_log_count), + + Cmds = get(metadata_update), + ?assertEqual(3, length(Cmds)), + ?assert(lists:member({metadata_update, <<"ko_publish">>, ?RESPONSE_CODE_STREAM_NOT_AVAILABLE}, Cmds)), + ?assert(lists:member({metadata_update, <<"ko_consume">>, ?RESPONSE_CODE_STREAM_NOT_AVAILABLE}, Cmds)), + ?assert(lists:member({metadata_update, <<"ko_publish_consume">>, ?RESPONSE_CODE_STREAM_NOT_AVAILABLE}, Cmds)), + erase(metadata_update), + + #stream_connection{token_expiry_timer = TRef1, + publishers = Pubs1, + stream_subscriptions = Subs1} = C1, + ?assertEqual(undefined, TRef1), %% no expiry set in the mock + ?assertEqual(2, maps:size(Pubs1)), + ?assertEqual(#publisher{stream = <<"ok_publish">>}, maps:get(0, Pubs1)), + ?assertEqual(#publisher{stream = <<"ok_publish_consume">>}, maps:get(2, Pubs1)), + + #stream_connection_state{consumers = Cons1} = S1, + ?assertEqual([0], maps:get(<<"ok_consume">>, Subs1)), + ?assertEqual([2], maps:get(<<"ok_publish_consume">>, Subs1)), + ?assertEqual(consumer(<<"ok_consume">>), maps:get(0, Cons1)), + ?assertEqual(consumer(<<"ok_publish_consume">>), maps:get(2, Cons1)), + + %% making sure the token expiry timer is set if the token expires + meck:expect(ModAccess, permission_cache_can_expire, 1, true), + Now = os:system_time(second), + meck:expect(rabbit_access_control, expiry_timestamp, fun (_) -> Now + 60 end), + {C2, _} = Mod:evaluate_state_after_secret_update(ModTransport, #user{}, + #stream_connection{}, + #stream_connection_state{}), + #stream_connection{token_expiry_timer = TRef2} = C2, + Cancel2 = erlang:cancel_timer(TRef2, [{async, false}, {info, true}]), + ?assert(is_integer(Cancel2)), + ok. + +consumer(S) -> + #consumer{configuration = #consumer_configuration{stream = S}, + log = osiris_log:init(#{})}. diff --git a/deps/rabbitmq_stream_common/.gitignore b/deps/rabbitmq_stream_common/.gitignore index 30a1e0bed550..eee53b11a899 100644 --- a/deps/rabbitmq_stream_common/.gitignore +++ b/deps/rabbitmq_stream_common/.gitignore @@ -1,56 +1,11 @@ .eunit -*.o -*.beam -*.plt -erl_crash.dump .concrete/DEV_MODE -# rebar 2.x -.rebar -rel/example_project -ebin/*.beam -deps - -# rebar 3 -.rebar3 -_build/ -_checkouts/ - -erl_crash.dump -.sw? -.*.sw? -*.beam -/.erlang.mk/ -/cover/ -/deps/ -/ebin/ -/logs/ -/plugins/ -/xrefr -elvis -callgrind* -ct.coverdata -test/ct.cover.spec -_build - -rabbitmq_stream_common.d -*.plt -*.d - *.jar - -*~ -.sw? -.*.sw? -*.beam *.class *.dat *.dump *.iml *.ipr *.iws -.DS_Store -\#~ -/.idea/ -/deps/ diff --git a/deps/rabbitmq_stream_common/Makefile b/deps/rabbitmq_stream_common/Makefile index 080ec248f65d..914a868f1c7c 100644 --- a/deps/rabbitmq_stream_common/Makefile +++ b/deps/rabbitmq_stream_common/Makefile @@ -10,6 +10,8 @@ endef DEPS = TEST_DEPS = rabbitmq_ct_helpers rabbitmq_ct_client_helpers +PLT_APPS = osiris + DEP_EARLY_PLUGINS = rabbit_common/mk/rabbitmq-early-plugin.mk DEP_PLUGINS = rabbit_common/mk/rabbitmq-plugin.mk diff --git a/deps/rabbitmq_stream_common/README.adoc b/deps/rabbitmq_stream_common/README.adoc index 843a23f870d6..99581511cd59 100644 --- a/deps/rabbitmq_stream_common/README.adoc +++ b/deps/rabbitmq_stream_common/README.adoc @@ -1,18 +1,8 @@ = RabbitMQ Stream Common Plugin -== Project Maturity +== What is this plugin? -The project is in early stages of development and is considered experimental. -It is not ready for production use. - -== Support - -* For questions: https://groups.google.com/forum/#!forum/rabbitmq-users[RabbitMQ Users] -* For bugs and feature requests: https://github.com/rabbitmq/rabbitmq-server/issues[GitHub Issues] - -The project is currently under development, there is no guarantee yet that it will be maintained and supported -in the future (read: you are welcome to experiment with it and give feedback, but please do not base -your whole business on it). +The plugin is a part of the https://www.rabbitmq.com/docs/streams[streaming subsystem in RabbitMQ]. == Licensing @@ -20,4 +10,4 @@ Released under the link:LICENSE-MPL-RabbitMQ[MPL 2.0]. == Copyright -(c) 2020-2023 VMware, Inc. or its affiliates. \ No newline at end of file +(c) 2007-2024 Broadcom. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. diff --git a/deps/rabbitmq_stream_common/include/rabbit_stream.hrl b/deps/rabbitmq_stream_common/include/rabbit_stream.hrl index 5f46cd8bffb8..5e7bf6f36ea0 100644 --- a/deps/rabbitmq_stream_common/include/rabbit_stream.hrl +++ b/deps/rabbitmq_stream_common/include/rabbit_stream.hrl @@ -1,3 +1,19 @@ +%% The contents of this file are subject to the Mozilla Public License +%% Version 2.0 (the "License"); you may not use this file except in +%% compliance with the License. You may obtain a copy of the License +%% at https://www.mozilla.org/en-US/MPL/2.0/ +%% +%% Software distributed under the License is distributed on an "AS IS" +%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See +%% the License for the specific language governing rights and +%% limitations under the License. +%% +%% The Original Code is RabbitMQ. +%% +%% The Initial Developer of the Original Code is Pivotal Software, Inc. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. +%% + -define(COMMAND_DECLARE_PUBLISHER, 1). -define(COMMAND_PUBLISH, 2). -define(COMMAND_PUBLISH_CONFIRM, 3). @@ -26,6 +42,8 @@ -define(COMMAND_CONSUMER_UPDATE, 26). -define(COMMAND_EXCHANGE_COMMAND_VERSIONS, 27). -define(COMMAND_STREAM_STATS, 28). +-define(COMMAND_CREATE_SUPER_STREAM, 29). +-define(COMMAND_DELETE_SUPER_STREAM, 30). -define(REQUEST, 0). -define(RESPONSE, 1). @@ -52,7 +70,8 @@ -define(RESPONSE_CODE_PRECONDITION_FAILED, 17). -define(RESPONSE_CODE_PUBLISHER_DOES_NOT_EXIST, 18). -define(RESPONSE_CODE_NO_OFFSET, 19). - +-define(RESPONSE_SASL_CANNOT_CHANGE_MECHANISM, 20). +-define(RESPONSE_SASL_CANNOT_CHANGE_USERNAME, 21). -define(OFFSET_TYPE_NONE, 0). -define(OFFSET_TYPE_FIRST, 1). @@ -70,6 +89,8 @@ -define(INFO_ITEMS, [conn_name, + pid, + node, port, peer_port, host, @@ -95,6 +116,7 @@ -define(CONSUMER_INFO_ITEMS, [ connection_pid, + node, subscription_id, stream, messages_consumed, @@ -108,6 +130,7 @@ -define(PUBLISHER_INFO_ITEMS, [ connection_pid, + node, publisher_id, stream, reference, @@ -129,4 +152,4 @@ state ]). --define(STREAM_GUIDE_URL, <<"https://rabbitmq.com/stream.html">>). +-define(STREAMS_GUIDE_URL, <<"https://rabbitmq.com/docs/streams">>). diff --git a/deps/rabbitmq_stream_common/src/rabbit_stream_core.erl b/deps/rabbitmq_stream_common/src/rabbit_stream_core.erl index 2704cea4d773..19009b51d654 100644 --- a/deps/rabbitmq_stream_common/src/rabbit_stream_core.erl +++ b/deps/rabbitmq_stream_common/src/rabbit_stream_core.erl @@ -1,3 +1,19 @@ +%% The contents of this file are subject to the Mozilla Public License +%% Version 2.0 (the "License"); you may not use this file except in +%% compliance with the License. You may obtain a copy of the License +%% at https://www.mozilla.org/en-US/MPL/2.0/ +%% +%% Software distributed under the License is distributed on an "AS IS" +%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See +%% the License for the specific language governing rights and +%% limitations under the License. +%% +%% The Original Code is RabbitMQ. +%% +%% The Initial Developer of the Original Code is Pivotal Software, Inc. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. +%% + -module(rabbit_stream_core). -include("rabbit_stream.hrl"). @@ -58,7 +74,9 @@ ?RESPONSE_CODE_ACCESS_REFUSED | ?RESPONSE_CODE_PRECONDITION_FAILED | ?RESPONSE_CODE_PUBLISHER_DOES_NOT_EXIST | - ?RESPONSE_CODE_NO_OFFSET. + ?RESPONSE_CODE_NO_OFFSET | + ?RESPONSE_SASL_CANNOT_CHANGE_MECHANISM | + ?RESPONSE_SASL_CANNOT_CHANGE_USERNAME . -type error_code() :: response_code(). -type sequence() :: non_neg_integer(). -type credit() :: non_neg_integer(). @@ -113,7 +131,9 @@ {exchange_command_versions, [{Command :: atom(), MinVersion :: command_version(), MaxVersion :: command_version()}]} | - {stream_stats, Stream :: binary()}} | + {stream_stats, Stream :: binary()} | + {create_super_stream, stream_name(), Partitions :: [binary()], BindingKeys :: [binary()], Args :: #{binary() => binary()}} | + {delete_super_stream, stream_name()}} | {response, correlation_id(), {declare_publisher | delete_publisher | @@ -122,7 +142,9 @@ create_stream | delete_stream | close | - sasl_authenticate, + sasl_authenticate | + create_super_stream | + delete_super_stream, response_code()} | {query_publisher_sequence, response_code(), sequence()} | {open, response_code(), #{binary() => binary()}} | @@ -559,9 +581,7 @@ request_body({create_stream = Tag, Stream, Args}) -> request_body({delete_stream = Tag, Stream}) -> {Tag, <>}; request_body({metadata = Tag, Streams}) -> - StreamsBin = - lists:foldr(fun(Stream, Acc) -> [<> | Acc] end, [], - Streams), + StreamsBin = generate_list(Streams), {Tag, [<<(length(Streams)):32>>, StreamsBin]}; request_body({peer_properties = Tag, Props}) -> PropsBin = generate_map(Props), @@ -603,7 +623,16 @@ request_body({exchange_command_versions = Tag, CommandVersions}) -> CommandVersionsLength = length(CommandVersions), {Tag, [<>, CommandVersionsBin]}; request_body({stream_stats = Tag, Stream}) -> - {Tag, <>}. + {Tag, <>}; +request_body({create_super_stream = Tag, SuperStream, Partitions, BindingKeys, Args}) -> + PartitionsBin = generate_list(Partitions), + BindingKeysBin = generate_list(BindingKeys), + ArgsBin = generate_map(Args), + {Tag, [<>, PartitionsBin, + <<(length(BindingKeys)):32>>, BindingKeysBin, + <<(map_size(Args)):32>>, ArgsBin]}; +request_body({delete_super_stream = Tag, SuperStream}) -> + {Tag, <>}. append_data(Prev, Data) when is_binary(Prev) -> [Prev, Data]; @@ -883,6 +912,23 @@ parse_request(<>) -> request(CorrelationId, {stream_stats, Stream}); +parse_request(<>) -> + {Partitions, <>} = list_of_strings(PartitionsCount, Rest0), + {BindingKeys, <<_ArgumentsCount:32, Rest2/binary>>} = list_of_strings(BindingKeysCount, Rest1), + Args = parse_map(Rest2, #{}), + request(CorrelationId, {create_super_stream, Stream, Partitions, BindingKeys, Args}); +parse_request(<>) -> + request(CorrelationId, {delete_super_stream, SuperStream}); parse_request(Bin) -> {unknown, Bin}. @@ -1050,10 +1096,24 @@ parse_int_map(<<>>, Acc) -> parse_int_map(<>, Acc) -> parse_int_map(Rem, Acc#{Key => Value}). +generate_list(List) -> + lists:foldr(fun(E, Acc) -> [<> | Acc] end, [], + List). + generate_map(Map) -> maps:fold(fun(K, V, Acc) -> [<> | Acc] end, [], Map). +list_of_strings(Count, Bin) -> + list_of_strings(Count, [], Bin). + +list_of_strings(_, Acc, <<>>) -> + {lists:reverse(Acc), <<>>}; +list_of_strings(0, Acc, Rest) -> + {lists:reverse(Acc), Rest}; +list_of_strings(Count, Acc, <>) -> + list_of_strings(Count - 1, [String | Acc], Rem). + list_of_strings(<<>>) -> []; list_of_strings(<>) -> @@ -1133,7 +1193,11 @@ command_id(consumer_update) -> command_id(exchange_command_versions) -> ?COMMAND_EXCHANGE_COMMAND_VERSIONS; command_id(stream_stats) -> - ?COMMAND_STREAM_STATS. + ?COMMAND_STREAM_STATS; +command_id(create_super_stream) -> + ?COMMAND_CREATE_SUPER_STREAM; +command_id(delete_super_stream) -> + ?COMMAND_DELETE_SUPER_STREAM. parse_command_id(?COMMAND_DECLARE_PUBLISHER) -> declare_publisher; @@ -1190,7 +1254,11 @@ parse_command_id(?COMMAND_CONSUMER_UPDATE) -> parse_command_id(?COMMAND_EXCHANGE_COMMAND_VERSIONS) -> exchange_command_versions; parse_command_id(?COMMAND_STREAM_STATS) -> - stream_stats. + stream_stats; +parse_command_id(?COMMAND_CREATE_SUPER_STREAM) -> + create_super_stream; +parse_command_id(?COMMAND_DELETE_SUPER_STREAM) -> + delete_super_stream. element_index(Element, List) -> element_index(Element, List, 0). diff --git a/deps/rabbitmq_stream_common/test/rabbit_stream_core_SUITE.erl b/deps/rabbitmq_stream_common/test/rabbit_stream_core_SUITE.erl index 4934b02affa8..2e859b99d1fa 100644 --- a/deps/rabbitmq_stream_common/test/rabbit_stream_core_SUITE.erl +++ b/deps/rabbitmq_stream_common/test/rabbit_stream_core_SUITE.erl @@ -110,6 +110,15 @@ roundtrip(_Config) -> {exchange_command_versions, [{deliver, ?VERSION_1, ?VERSION_1}]}}), test_roundtrip({request, 99, {stream_stats, <<"stream_name">>}}), + test_roundtrip({request, 99, + {create_super_stream, <<"hello">>, + [<<"stream1">>, <<"stream2">>, <<"stream3">>], [<<"bk1">>, <<"bk2">>, <<"bk3">>], + Args}}), + test_roundtrip({request, 99, + {create_super_stream, <<"super_stream_name">>, + [<<"stream1">>, <<"stream2">>, <<"stream3">>], [<<"bk1">>, <<"bk2">>, <<"bk3">>], + #{}}}), + test_roundtrip({request, 99, {delete_super_stream, <<"super_stream_name">>}}), %% RESPONSES [test_roundtrip({response, 99, {Tag, 53}}) @@ -120,6 +129,8 @@ roundtrip(_Config) -> unsubscribe, create_stream, delete_stream, + create_super_stream, + delete_super_stream, open, close]], diff --git a/deps/rabbitmq_stream_management/.gitignore b/deps/rabbitmq_stream_management/.gitignore index 2fa1c77e8736..eee53b11a899 100644 --- a/deps/rabbitmq_stream_management/.gitignore +++ b/deps/rabbitmq_stream_management/.gitignore @@ -1,60 +1,11 @@ .eunit -*.o -*.beam -*.plt -erl_crash.dump .concrete/DEV_MODE -# rebar 2.x -.rebar -rel/example_project -ebin/*.beam -deps - -# rebar 3 -.rebar3 -_build/ -_checkouts/ - -erl_crash.dump -.sw? -.*.sw? -*.beam -/.erlang.mk/ -/cover/ -/deps/ -/ebin/ -/escript/ -/escript.lock -/logs/ -/plugins/ -/plugins.lock -/sbin/ -/sbin.lock -/xrefr -elvis -callgrind* -ct.coverdata -test/ct.cover.spec -_build - -rabbitmq_stream.d -*.plt -*.d - *.jar - -*~ -.sw? -.*.sw? -*.beam *.class *.dat *.dump *.iml *.ipr *.iws -.DS_Store -\#~ -/.idea/ diff --git a/deps/rabbitmq_stream_management/BUILD.bazel b/deps/rabbitmq_stream_management/BUILD.bazel index 61f7ac3df5aa..539fdce66fc5 100644 --- a/deps/rabbitmq_stream_management/BUILD.bazel +++ b/deps/rabbitmq_stream_management/BUILD.bazel @@ -51,6 +51,7 @@ rabbitmq_app( "//deps/rabbit:erlang_app", "//deps/rabbitmq_management:erlang_app", "//deps/rabbitmq_stream:erlang_app", + "@osiris//:erlang_app", ], ) @@ -63,7 +64,7 @@ plt( name = "deps_plt", for_target = ":erlang_app", ignore_warnings = True, - libs = ["//deps/rabbitmq_cli:elixir"], # keep + libs = ["@rules_elixir//elixir"], # keep plt = "//:base_plt", ) @@ -83,7 +84,6 @@ rabbitmq_home( name = "broker-for-tests-home", plugins = [ "//deps/rabbit:erlang_app", - "//deps/rabbitmq_amqp1_0:erlang_app", ":erlang_app", ], ) diff --git a/deps/rabbitmq_stream_management/Makefile b/deps/rabbitmq_stream_management/Makefile index abd95655144e..cb2b4b0ff9cc 100644 --- a/deps/rabbitmq_stream_management/Makefile +++ b/deps/rabbitmq_stream_management/Makefile @@ -8,7 +8,7 @@ define PROJECT_ENV endef -DEPS = rabbit rabbitmq_management rabbitmq_stream +DEPS = rabbit rabbitmq_management rabbitmq_stream osiris TEST_DEPS = rabbitmq_ct_helpers rabbitmq_ct_client_helpers DEP_EARLY_PLUGINS = rabbit_common/mk/rabbitmq-early-plugin.mk diff --git a/deps/rabbitmq_stream_management/README.adoc b/deps/rabbitmq_stream_management/README.adoc index 6f2738c26cbf..bfdbdfb86850 100644 --- a/deps/rabbitmq_stream_management/README.adoc +++ b/deps/rabbitmq_stream_management/README.adoc @@ -1,18 +1,8 @@ = RabbitMQ Stream Management Plugin -== Project Maturity +== What is this plugin? -The project is in early stages of development and is considered experimental. -It is not ready for production use. - -== Support - -* For questions: https://groups.google.com/forum/#!forum/rabbitmq-users[RabbitMQ Users] -* For bugs and feature requests: https://github.com/rabbitmq/rabbitmq-server/issues[GitHub Issues] - -The project is currently under development, there is no guarantee yet that it will be maintained and supported -in the future (read: you are welcome to experiment with it and give feedback, but please do not base -your whole business on it). +The plugin is a management UI extension for https://www.rabbitmq.com/docs/streams[RabbitMQ streams]. == Licensing @@ -20,4 +10,4 @@ Released under the link:LICENSE-MPL-RabbitMQ[MPL 2.0]. == Copyright -(c) 2020-2023 VMware, Inc. or its affiliates. \ No newline at end of file +(c) 2007-2024 Broadcom. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. diff --git a/deps/rabbitmq_stream_management/app.bzl b/deps/rabbitmq_stream_management/app.bzl index cabaf00f4eea..561ce83df507 100644 --- a/deps/rabbitmq_stream_management/app.bzl +++ b/deps/rabbitmq_stream_management/app.bzl @@ -18,13 +18,13 @@ def all_beam_files(name = "all_beam_files"): "src/rabbit_stream_management_utils.erl", "src/rabbit_stream_mgmt_db.erl", "src/rabbit_stream_publishers_mgmt.erl", + "src/rabbit_stream_tracking_mgmt.erl", ], hdrs = [":public_and_private_hdrs"], app_name = "rabbitmq_stream_management", dest = "ebin", erlc_opts = "//:erlc_opts", deps = [ - "//deps/amqp_client:erlang_app", "//deps/rabbit_common:erlang_app", "//deps/rabbitmq_management:erlang_app", "//deps/rabbitmq_management_agent:erlang_app", @@ -51,13 +51,13 @@ def all_test_beam_files(name = "all_test_beam_files"): "src/rabbit_stream_management_utils.erl", "src/rabbit_stream_mgmt_db.erl", "src/rabbit_stream_publishers_mgmt.erl", + "src/rabbit_stream_tracking_mgmt.erl", ], hdrs = [":public_and_private_hdrs"], app_name = "rabbitmq_stream_management", dest = "test", erlc_opts = "//:test_erlc_opts", deps = [ - "//deps/amqp_client:erlang_app", "//deps/rabbit_common:erlang_app", "//deps/rabbitmq_management:erlang_app", "//deps/rabbitmq_management_agent:erlang_app", @@ -101,6 +101,7 @@ def all_srcs(name = "all_srcs"): "src/rabbit_stream_management_utils.erl", "src/rabbit_stream_mgmt_db.erl", "src/rabbit_stream_publishers_mgmt.erl", + "src/rabbit_stream_tracking_mgmt.erl", ], ) filegroup( diff --git a/deps/rabbitmq_stream_management/src/rabbit_stream_connection_consumers_mgmt.erl b/deps/rabbitmq_stream_management/src/rabbit_stream_connection_consumers_mgmt.erl index 5b98e3763727..352a45598134 100644 --- a/deps/rabbitmq_stream_management/src/rabbit_stream_connection_consumers_mgmt.erl +++ b/deps/rabbitmq_stream_management/src/rabbit_stream_connection_consumers_mgmt.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2020-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_stream_connection_consumers_mgmt). @@ -19,8 +19,6 @@ -export([variances/2]). -include_lib("rabbitmq_management_agent/include/rabbit_mgmt_records.hrl"). --include_lib("rabbit_common/include/rabbit.hrl"). - dispatcher() -> [{"/stream/connections/:vhost/:connection/consumers", ?MODULE, []}]. diff --git a/deps/rabbitmq_stream_management/src/rabbit_stream_connection_mgmt.erl b/deps/rabbitmq_stream_management/src/rabbit_stream_connection_mgmt.erl index a52457840e9a..1db9a0fabf24 100644 --- a/deps/rabbitmq_stream_management/src/rabbit_stream_connection_mgmt.erl +++ b/deps/rabbitmq_stream_management/src/rabbit_stream_connection_mgmt.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2020-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_stream_connection_mgmt). diff --git a/deps/rabbitmq_stream_management/src/rabbit_stream_connection_publishers_mgmt.erl b/deps/rabbitmq_stream_management/src/rabbit_stream_connection_publishers_mgmt.erl index 7ffc9f6a6fd0..7c223d82cf85 100644 --- a/deps/rabbitmq_stream_management/src/rabbit_stream_connection_publishers_mgmt.erl +++ b/deps/rabbitmq_stream_management/src/rabbit_stream_connection_publishers_mgmt.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2020-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_stream_connection_publishers_mgmt). @@ -19,8 +19,6 @@ -export([variances/2]). -include_lib("rabbitmq_management_agent/include/rabbit_mgmt_records.hrl"). --include_lib("rabbit_common/include/rabbit.hrl"). - dispatcher() -> [{"/stream/connections/:vhost/:connection/publishers", ?MODULE, []}]. diff --git a/deps/rabbitmq_stream_management/src/rabbit_stream_connections_mgmt.erl b/deps/rabbitmq_stream_management/src/rabbit_stream_connections_mgmt.erl index d047deaf3e8f..2bb6a0ce0840 100644 --- a/deps/rabbitmq_stream_management/src/rabbit_stream_connections_mgmt.erl +++ b/deps/rabbitmq_stream_management/src/rabbit_stream_connections_mgmt.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2020-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_stream_connections_mgmt). diff --git a/deps/rabbitmq_stream_management/src/rabbit_stream_connections_vhost_mgmt.erl b/deps/rabbitmq_stream_management/src/rabbit_stream_connections_vhost_mgmt.erl index 8c79f1eb011f..7097c66e347c 100644 --- a/deps/rabbitmq_stream_management/src/rabbit_stream_connections_vhost_mgmt.erl +++ b/deps/rabbitmq_stream_management/src/rabbit_stream_connections_vhost_mgmt.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2020-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_stream_connections_vhost_mgmt). @@ -18,8 +18,6 @@ is_authorized/2]). -include_lib("rabbitmq_management_agent/include/rabbit_mgmt_records.hrl"). --include_lib("amqp_client/include/amqp_client.hrl"). - dispatcher() -> [{"/stream/connections/:vhost", ?MODULE, []}]. diff --git a/deps/rabbitmq_stream_management/src/rabbit_stream_consumers_mgmt.erl b/deps/rabbitmq_stream_management/src/rabbit_stream_consumers_mgmt.erl index 3101afbff486..aa0a08fa23a4 100644 --- a/deps/rabbitmq_stream_management/src/rabbit_stream_consumers_mgmt.erl +++ b/deps/rabbitmq_stream_management/src/rabbit_stream_consumers_mgmt.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2020-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_stream_consumers_mgmt). diff --git a/deps/rabbitmq_stream_management/src/rabbit_stream_management_utils.erl b/deps/rabbitmq_stream_management/src/rabbit_stream_management_utils.erl index 7d59638894a0..f94091f212d0 100644 --- a/deps/rabbitmq_stream_management/src/rabbit_stream_management_utils.erl +++ b/deps/rabbitmq_stream_management/src/rabbit_stream_management_utils.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2020-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_stream_management_utils). diff --git a/deps/rabbitmq_stream_management/src/rabbit_stream_mgmt_db.erl b/deps/rabbitmq_stream_management/src/rabbit_stream_mgmt_db.erl index ea748a46816a..5199f598859e 100644 --- a/deps/rabbitmq_stream_management/src/rabbit_stream_mgmt_db.erl +++ b/deps/rabbitmq_stream_management/src/rabbit_stream_mgmt_db.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2020-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_stream_mgmt_db). @@ -112,13 +112,13 @@ augment_entity(?ENTITY_PUBLISHER, {{Q, ConnPid, PubId}, Props}) -> | Props]. consumers_by_vhost(VHost) -> - ets:select(?TABLE_CONSUMER, + ets_select(?TABLE_CONSUMER, [{{{#resource{virtual_host = '$1', _ = '_'}, '_', '_'}, '_'}, [{'orelse', {'==', all, VHost}, {'==', VHost, '$1'}}], ['$_']}]). publishers_by_vhost(VHost) -> - ets:select(?TABLE_PUBLISHER, + ets_select(?TABLE_PUBLISHER, [{{{#resource{virtual_host = '$1', _ = '_'}, '_', '_'}, '_'}, [{'orelse', {'==', all, VHost}, {'==', VHost, '$1'}}], ['$_']}]). @@ -133,13 +133,13 @@ publishers_by_stream(QueueResource) -> get_entity_stats_by_resource(?TABLE_PUBLISHER, QueueResource). get_entity_stats(Table, Id) -> - ets:select(Table, match_entity_spec(Id)). + ets_select(Table, match_entity_spec(Id)). match_entity_spec(ConnectionId) -> [{{{'_', '$1', '_'}, '_'}, [{'==', ConnectionId, '$1'}], ['$_']}]. get_entity_stats_by_resource(Table, Resource) -> - ets:select(Table, match_entity_spec_by_resource(Resource)). + ets_select(Table, match_entity_spec_by_resource(Resource)). match_entity_spec_by_resource(#resource{virtual_host = VHost, name = Name}) -> @@ -177,3 +177,13 @@ format_resource(_, unknown) -> format_resource(NameAs, #resource{name = Name, virtual_host = VHost}) -> [{NameAs, Name}, {vhost, VHost}]. + +ets_select(T, Spec) -> + try + ets:select(T, Spec) + catch error:Reason -> + %% badarg can occur if the table has no been created yet + rabbit_log:warning("Error while querying ETS table '~tp': ~tp", + [T, Reason]), + [] + end. diff --git a/deps/rabbitmq_stream_management/src/rabbit_stream_publishers_mgmt.erl b/deps/rabbitmq_stream_management/src/rabbit_stream_publishers_mgmt.erl index 02d4c1da13e2..e56c2f3e4e0b 100644 --- a/deps/rabbitmq_stream_management/src/rabbit_stream_publishers_mgmt.erl +++ b/deps/rabbitmq_stream_management/src/rabbit_stream_publishers_mgmt.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2020-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_stream_publishers_mgmt). @@ -86,8 +86,9 @@ to_json(ReqData, Context = #context{user = User}) -> ReqData, Context); true -> - rabbit_mgmt_util:bad_request(<<"Stats in management UI are disabled on this node">>, - ReqData, Context) + %% if we don't return a 200 it is not possible to view the queue page + %% for a queue if the stream mgmt is enabled + rabbit_mgmt_util:reply_list([], [], ReqData, Context) end. is_authorized(ReqData, Context) -> diff --git a/deps/rabbitmq_stream_management/src/rabbit_stream_tracking_mgmt.erl b/deps/rabbitmq_stream_management/src/rabbit_stream_tracking_mgmt.erl new file mode 100644 index 000000000000..b72fab4bb173 --- /dev/null +++ b/deps/rabbitmq_stream_management/src/rabbit_stream_tracking_mgmt.erl @@ -0,0 +1,100 @@ +%% This Source Code Form is subject to the terms of the Mozilla Public +%% License, v. 2.0. If a copy of the MPL was not distributed with this +%% file, You can obtain one at https://mozilla.org/MPL/2.0/. +%% +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. +%% + +-module(rabbit_stream_tracking_mgmt). + +-behaviour(rabbit_mgmt_extension). + +-export([dispatcher/0, + web_ui/0]). +-export([init/2, + resource_exists/2, + to_json/2, + content_types_provided/2, + is_authorized/2]). + +-include_lib("rabbitmq_management_agent/include/rabbit_mgmt_records.hrl"). +dispatcher() -> + [{"/stream/:vhost/:queue/tracking", ?MODULE, []}]. + +web_ui() -> + []. + +%%-------------------------------------------------------------------- + +init(Req, _Opts) -> + {cowboy_rest, rabbit_mgmt_cors:set_headers(Req, ?MODULE), #context{}}. + +content_types_provided(ReqData, Context) -> + {[{<<"application/json">>, to_json}], ReqData, Context}. + +resource_exists(ReqData, Context) -> + {case rabbit_mgmt_util:vhost(ReqData) of + not_found -> + false; + none -> + false; % none means `all` + _ -> + case rabbit_mgmt_util:id(queue, ReqData) of + none -> + false; + _ -> + case rabbit_mgmt_wm_queue:queue(ReqData) of + not_found -> + false; + _ -> + true + end + end + end, + ReqData, Context}. + +to_json(ReqData, Context) -> + case rabbit_mgmt_util:disable_stats(ReqData) of + false -> + VHost = rabbit_mgmt_util:vhost(ReqData), + Stream = rabbit_mgmt_util:id(queue, ReqData), + case rabbit_stream_manager:lookup_leader(VHost, Stream) of + {ok, Leader} -> + Type = tracking_type(rabbit_mgmt_util:get_value_param(<<"type">>, ReqData)), + TrackingInfo = maps:remove(timestamps, osiris:read_tracking(Leader)), + rabbit_mgmt_util:reply(transform_tracking(Type, TrackingInfo), + ReqData, + Context); + {error, _} -> + rabbit_mgmt_util:service_unavailable(<<"The stream leader is not available">>, + ReqData, Context) + end; + true -> + rabbit_mgmt_util:bad_request(<<"Stats in management UI are disabled on this node">>, + ReqData, Context) + end. + +tracking_type(undefined) -> + all; +tracking_type("offset") -> + offset; +tracking_type("writer") -> + writer; +tracking_type(_) -> + all. + +transform_tracking(offset, Tracking) -> + maps:remove(sequences, Tracking); +transform_tracking(writer, Tracking) -> + #{writers => convert_writer_tracking(maps:get(sequences, Tracking))}; +transform_tracking(all, Tracking) -> + #{offsets => maps:get(offsets, Tracking), + writers => convert_writer_tracking(maps:get(sequences, Tracking))}. + +convert_writer_tracking(Writers) -> + maps:fold(fun(Ref, {_, Seq}, Acc) -> + Acc#{Ref => Seq} + end, #{}, Writers). + +is_authorized(ReqData, Context) -> + rabbit_mgmt_util:is_authorized(ReqData, Context). diff --git a/deps/rabbitmq_stream_management/test/http_SUITE.erl b/deps/rabbitmq_stream_management/test/http_SUITE.erl index ba1cbccb28b5..0cac87e16c0c 100644 --- a/deps/rabbitmq_stream_management/test/http_SUITE.erl +++ b/deps/rabbitmq_stream_management/test/http_SUITE.erl @@ -2,12 +2,11 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(http_SUITE). --include_lib("common_test/include/ct.hrl"). -include_lib("rabbit_common/include/rabbit_framing.hrl"). -include_lib("rabbitmq_ct_helpers/include/rabbit_mgmt_test.hrl"). diff --git a/deps/rabbitmq_stream_management/test/http_SUITE_data/.mvn/wrapper/MavenWrapperDownloader.java b/deps/rabbitmq_stream_management/test/http_SUITE_data/.mvn/wrapper/MavenWrapperDownloader.java deleted file mode 100644 index b901097f2db6..000000000000 --- a/deps/rabbitmq_stream_management/test/http_SUITE_data/.mvn/wrapper/MavenWrapperDownloader.java +++ /dev/null @@ -1,117 +0,0 @@ -/* - * Copyright 2007-present the original author or authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -import java.net.*; -import java.io.*; -import java.nio.channels.*; -import java.util.Properties; - -public class MavenWrapperDownloader { - - private static final String WRAPPER_VERSION = "0.5.6"; - /** - * Default URL to download the maven-wrapper.jar from, if no 'downloadUrl' is provided. - */ - private static final String DEFAULT_DOWNLOAD_URL = "https://repo.maven.apache.org/maven2/io/takari/maven-wrapper/" - + WRAPPER_VERSION + "/maven-wrapper-" + WRAPPER_VERSION + ".jar"; - - /** - * Path to the maven-wrapper.properties file, which might contain a downloadUrl property to - * use instead of the default one. - */ - private static final String MAVEN_WRAPPER_PROPERTIES_PATH = - ".mvn/wrapper/maven-wrapper.properties"; - - /** - * Path where the maven-wrapper.jar will be saved to. - */ - private static final String MAVEN_WRAPPER_JAR_PATH = - ".mvn/wrapper/maven-wrapper.jar"; - - /** - * Name of the property which should be used to override the default download url for the wrapper. - */ - private static final String PROPERTY_NAME_WRAPPER_URL = "wrapperUrl"; - - public static void main(String args[]) { - System.out.println("- Downloader started"); - File baseDirectory = new File(args[0]); - System.out.println("- Using base directory: " + baseDirectory.getAbsolutePath()); - - // If the maven-wrapper.properties exists, read it and check if it contains a custom - // wrapperUrl parameter. - File mavenWrapperPropertyFile = new File(baseDirectory, MAVEN_WRAPPER_PROPERTIES_PATH); - String url = DEFAULT_DOWNLOAD_URL; - if(mavenWrapperPropertyFile.exists()) { - FileInputStream mavenWrapperPropertyFileInputStream = null; - try { - mavenWrapperPropertyFileInputStream = new FileInputStream(mavenWrapperPropertyFile); - Properties mavenWrapperProperties = new Properties(); - mavenWrapperProperties.load(mavenWrapperPropertyFileInputStream); - url = mavenWrapperProperties.getProperty(PROPERTY_NAME_WRAPPER_URL, url); - } catch (IOException e) { - System.out.println("- ERROR loading '" + MAVEN_WRAPPER_PROPERTIES_PATH + "'"); - } finally { - try { - if(mavenWrapperPropertyFileInputStream != null) { - mavenWrapperPropertyFileInputStream.close(); - } - } catch (IOException e) { - // Ignore ... - } - } - } - System.out.println("- Downloading from: " + url); - - File outputFile = new File(baseDirectory.getAbsolutePath(), MAVEN_WRAPPER_JAR_PATH); - if(!outputFile.getParentFile().exists()) { - if(!outputFile.getParentFile().mkdirs()) { - System.out.println( - "- ERROR creating output directory '" + outputFile.getParentFile().getAbsolutePath() + "'"); - } - } - System.out.println("- Downloading to: " + outputFile.getAbsolutePath()); - try { - downloadFileFromURL(url, outputFile); - System.out.println("Done"); - System.exit(0); - } catch (Throwable e) { - System.out.println("- Error downloading"); - e.printStackTrace(); - System.exit(1); - } - } - - private static void downloadFileFromURL(String urlString, File destination) throws Exception { - if (System.getenv("MVNW_USERNAME") != null && System.getenv("MVNW_PASSWORD") != null) { - String username = System.getenv("MVNW_USERNAME"); - char[] password = System.getenv("MVNW_PASSWORD").toCharArray(); - Authenticator.setDefault(new Authenticator() { - @Override - protected PasswordAuthentication getPasswordAuthentication() { - return new PasswordAuthentication(username, password); - } - }); - } - URL website = new URL(urlString); - ReadableByteChannel rbc; - rbc = Channels.newChannel(website.openStream()); - FileOutputStream fos = new FileOutputStream(destination); - fos.getChannel().transferFrom(rbc, 0, Long.MAX_VALUE); - fos.close(); - rbc.close(); - } - -} diff --git a/deps/rabbitmq_stream_management/test/http_SUITE_data/.mvn/wrapper/maven-wrapper.jar b/deps/rabbitmq_stream_management/test/http_SUITE_data/.mvn/wrapper/maven-wrapper.jar deleted file mode 100644 index 2cc7d4a55c0c..000000000000 Binary files a/deps/rabbitmq_stream_management/test/http_SUITE_data/.mvn/wrapper/maven-wrapper.jar and /dev/null differ diff --git a/deps/rabbitmq_stream_management/test/http_SUITE_data/.mvn/wrapper/maven-wrapper.properties b/deps/rabbitmq_stream_management/test/http_SUITE_data/.mvn/wrapper/maven-wrapper.properties index 642d572ce90e..f95f1ee80715 100644 --- a/deps/rabbitmq_stream_management/test/http_SUITE_data/.mvn/wrapper/maven-wrapper.properties +++ b/deps/rabbitmq_stream_management/test/http_SUITE_data/.mvn/wrapper/maven-wrapper.properties @@ -1,2 +1,19 @@ -distributionUrl=https://repo.maven.apache.org/maven2/org/apache/maven/apache-maven/3.6.3/apache-maven-3.6.3-bin.zip -wrapperUrl=https://repo.maven.apache.org/maven2/io/takari/maven-wrapper/0.5.6/maven-wrapper-0.5.6.jar +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +wrapperVersion=3.3.2 +distributionType=only-script +distributionUrl=https://repo.maven.apache.org/maven2/org/apache/maven/apache-maven/3.9.8/apache-maven-3.9.8-bin.zip diff --git a/deps/rabbitmq_stream_management/test/http_SUITE_data/mvnw b/deps/rabbitmq_stream_management/test/http_SUITE_data/mvnw index 41c0f0c23db5..19529ddf8c6e 100755 --- a/deps/rabbitmq_stream_management/test/http_SUITE_data/mvnw +++ b/deps/rabbitmq_stream_management/test/http_SUITE_data/mvnw @@ -19,292 +19,241 @@ # ---------------------------------------------------------------------------- # ---------------------------------------------------------------------------- -# Maven Start Up Batch script -# -# Required ENV vars: -# ------------------ -# JAVA_HOME - location of a JDK home dir +# Apache Maven Wrapper startup batch script, version 3.3.2 # # Optional ENV vars # ----------------- -# M2_HOME - location of maven2's installed home dir -# MAVEN_OPTS - parameters passed to the Java VM when running Maven -# e.g. to debug Maven itself, use -# set MAVEN_OPTS=-Xdebug -Xrunjdwp:transport=dt_socket,server=y,suspend=y,address=8000 -# MAVEN_SKIP_RC - flag to disable loading of mavenrc files +# JAVA_HOME - location of a JDK home dir, required when download maven via java source +# MVNW_REPOURL - repo url base for downloading maven distribution +# MVNW_USERNAME/MVNW_PASSWORD - user and password for downloading maven +# MVNW_VERBOSE - true: enable verbose log; debug: trace the mvnw script; others: silence the output # ---------------------------------------------------------------------------- -if [ -z "$MAVEN_SKIP_RC" ] ; then - - if [ -f /etc/mavenrc ] ; then - . /etc/mavenrc - fi +set -euf +[ "${MVNW_VERBOSE-}" != debug ] || set -x - if [ -f "$HOME/.mavenrc" ] ; then - . "$HOME/.mavenrc" - fi +# OS specific support. +native_path() { printf %s\\n "$1"; } +case "$(uname)" in +CYGWIN* | MINGW*) + [ -z "${JAVA_HOME-}" ] || JAVA_HOME="$(cygpath --unix "$JAVA_HOME")" + native_path() { cygpath --path --windows "$1"; } + ;; +esac -fi +# set JAVACMD and JAVACCMD +set_java_home() { + # For Cygwin and MinGW, ensure paths are in Unix format before anything is touched + if [ -n "${JAVA_HOME-}" ]; then + if [ -x "$JAVA_HOME/jre/sh/java" ]; then + # IBM's JDK on AIX uses strange locations for the executables + JAVACMD="$JAVA_HOME/jre/sh/java" + JAVACCMD="$JAVA_HOME/jre/sh/javac" + else + JAVACMD="$JAVA_HOME/bin/java" + JAVACCMD="$JAVA_HOME/bin/javac" -# OS specific support. $var _must_ be set to either true or false. -cygwin=false; -darwin=false; -mingw=false -case "`uname`" in - CYGWIN*) cygwin=true ;; - MINGW*) mingw=true;; - Darwin*) darwin=true - # Use /usr/libexec/java_home if available, otherwise fall back to /Library/Java/Home - # See https://developer.apple.com/library/mac/qa/qa1170/_index.html - if [ -z "$JAVA_HOME" ]; then - if [ -x "/usr/libexec/java_home" ]; then - export JAVA_HOME="`/usr/libexec/java_home`" - else - export JAVA_HOME="/Library/Java/Home" + if [ ! -x "$JAVACMD" ] || [ ! -x "$JAVACCMD" ]; then + echo "The JAVA_HOME environment variable is not defined correctly, so mvnw cannot run." >&2 + echo "JAVA_HOME is set to \"$JAVA_HOME\", but \"\$JAVA_HOME/bin/java\" or \"\$JAVA_HOME/bin/javac\" does not exist." >&2 + return 1 fi fi - ;; -esac - -if [ -z "$JAVA_HOME" ] ; then - if [ -r /etc/gentoo-release ] ; then - JAVA_HOME=`java-config --jre-home` + else + JAVACMD="$( + 'set' +e + 'unset' -f command 2>/dev/null + 'command' -v java + )" || : + JAVACCMD="$( + 'set' +e + 'unset' -f command 2>/dev/null + 'command' -v javac + )" || : + + if [ ! -x "${JAVACMD-}" ] || [ ! -x "${JAVACCMD-}" ]; then + echo "The java/javac command does not exist in PATH nor is JAVA_HOME set, so mvnw cannot run." >&2 + return 1 + fi fi -fi - -if [ -z "$M2_HOME" ] ; then - ## resolve links - $0 may be a link to maven's home - PRG="$0" +} - # need this for relative symlinks - while [ -h "$PRG" ] ; do - ls=`ls -ld "$PRG"` - link=`expr "$ls" : '.*-> \(.*\)$'` - if expr "$link" : '/.*' > /dev/null; then - PRG="$link" - else - PRG="`dirname "$PRG"`/$link" - fi +# hash string like Java String::hashCode +hash_string() { + str="${1:-}" h=0 + while [ -n "$str" ]; do + char="${str%"${str#?}"}" + h=$(((h * 31 + $(LC_CTYPE=C printf %d "'$char")) % 4294967296)) + str="${str#?}" done + printf %x\\n $h +} - saveddir=`pwd` +verbose() { :; } +[ "${MVNW_VERBOSE-}" != true ] || verbose() { printf %s\\n "${1-}"; } - M2_HOME=`dirname "$PRG"`/.. +die() { + printf %s\\n "$1" >&2 + exit 1 +} - # make it fully qualified - M2_HOME=`cd "$M2_HOME" && pwd` +trim() { + # MWRAPPER-139: + # Trims trailing and leading whitespace, carriage returns, tabs, and linefeeds. + # Needed for removing poorly interpreted newline sequences when running in more + # exotic environments such as mingw bash on Windows. + printf "%s" "${1}" | tr -d '[:space:]' +} - cd "$saveddir" - # echo Using m2 at $M2_HOME -fi +# parse distributionUrl and optional distributionSha256Sum, requires .mvn/wrapper/maven-wrapper.properties +while IFS="=" read -r key value; do + case "${key-}" in + distributionUrl) distributionUrl=$(trim "${value-}") ;; + distributionSha256Sum) distributionSha256Sum=$(trim "${value-}") ;; + esac +done <"${0%/*}/.mvn/wrapper/maven-wrapper.properties" +[ -n "${distributionUrl-}" ] || die "cannot read distributionUrl property in ${0%/*}/.mvn/wrapper/maven-wrapper.properties" + +case "${distributionUrl##*/}" in +maven-mvnd-*bin.*) + MVN_CMD=mvnd.sh _MVNW_REPO_PATTERN=/maven/mvnd/ + case "${PROCESSOR_ARCHITECTURE-}${PROCESSOR_ARCHITEW6432-}:$(uname -a)" in + *AMD64:CYGWIN* | *AMD64:MINGW*) distributionPlatform=windows-amd64 ;; + :Darwin*x86_64) distributionPlatform=darwin-amd64 ;; + :Darwin*arm64) distributionPlatform=darwin-aarch64 ;; + :Linux*x86_64*) distributionPlatform=linux-amd64 ;; + *) + echo "Cannot detect native platform for mvnd on $(uname)-$(uname -m), use pure java version" >&2 + distributionPlatform=linux-amd64 + ;; + esac + distributionUrl="${distributionUrl%-bin.*}-$distributionPlatform.zip" + ;; +maven-mvnd-*) MVN_CMD=mvnd.sh _MVNW_REPO_PATTERN=/maven/mvnd/ ;; +*) MVN_CMD="mvn${0##*/mvnw}" _MVNW_REPO_PATTERN=/org/apache/maven/ ;; +esac -# For Cygwin, ensure paths are in UNIX format before anything is touched -if $cygwin ; then - [ -n "$M2_HOME" ] && - M2_HOME=`cygpath --unix "$M2_HOME"` - [ -n "$JAVA_HOME" ] && - JAVA_HOME=`cygpath --unix "$JAVA_HOME"` - [ -n "$CLASSPATH" ] && - CLASSPATH=`cygpath --path --unix "$CLASSPATH"` -fi +# apply MVNW_REPOURL and calculate MAVEN_HOME +# maven home pattern: ~/.m2/wrapper/dists/{apache-maven-,maven-mvnd--}/ +[ -z "${MVNW_REPOURL-}" ] || distributionUrl="$MVNW_REPOURL$_MVNW_REPO_PATTERN${distributionUrl#*"$_MVNW_REPO_PATTERN"}" +distributionUrlName="${distributionUrl##*/}" +distributionUrlNameMain="${distributionUrlName%.*}" +distributionUrlNameMain="${distributionUrlNameMain%-bin}" +MAVEN_USER_HOME="${MAVEN_USER_HOME:-${HOME}/.m2}" +MAVEN_HOME="${MAVEN_USER_HOME}/wrapper/dists/${distributionUrlNameMain-}/$(hash_string "$distributionUrl")" + +exec_maven() { + unset MVNW_VERBOSE MVNW_USERNAME MVNW_PASSWORD MVNW_REPOURL || : + exec "$MAVEN_HOME/bin/$MVN_CMD" "$@" || die "cannot exec $MAVEN_HOME/bin/$MVN_CMD" +} -# For Mingw, ensure paths are in UNIX format before anything is touched -if $mingw ; then - [ -n "$M2_HOME" ] && - M2_HOME="`(cd "$M2_HOME"; pwd)`" - [ -n "$JAVA_HOME" ] && - JAVA_HOME="`(cd "$JAVA_HOME"; pwd)`" +if [ -d "$MAVEN_HOME" ]; then + verbose "found existing MAVEN_HOME at $MAVEN_HOME" + exec_maven "$@" fi -if [ -z "$JAVA_HOME" ]; then - javaExecutable="`which javac`" - if [ -n "$javaExecutable" ] && ! [ "`expr \"$javaExecutable\" : '\([^ ]*\)'`" = "no" ]; then - # readlink(1) is not available as standard on Solaris 10. - readLink=`which readlink` - if [ ! `expr "$readLink" : '\([^ ]*\)'` = "no" ]; then - if $darwin ; then - javaHome="`dirname \"$javaExecutable\"`" - javaExecutable="`cd \"$javaHome\" && pwd -P`/javac" - else - javaExecutable="`readlink -f \"$javaExecutable\"`" - fi - javaHome="`dirname \"$javaExecutable\"`" - javaHome=`expr "$javaHome" : '\(.*\)/bin'` - JAVA_HOME="$javaHome" - export JAVA_HOME - fi - fi -fi +case "${distributionUrl-}" in +*?-bin.zip | *?maven-mvnd-?*-?*.zip) ;; +*) die "distributionUrl is not valid, must match *-bin.zip or maven-mvnd-*.zip, but found '${distributionUrl-}'" ;; +esac -if [ -z "$JAVACMD" ] ; then - if [ -n "$JAVA_HOME" ] ; then - if [ -x "$JAVA_HOME/jre/sh/java" ] ; then - # IBM's JDK on AIX uses strange locations for the executables - JAVACMD="$JAVA_HOME/jre/sh/java" - else - JAVACMD="$JAVA_HOME/bin/java" - fi - else - JAVACMD="`which java`" - fi +# prepare tmp dir +if TMP_DOWNLOAD_DIR="$(mktemp -d)" && [ -d "$TMP_DOWNLOAD_DIR" ]; then + clean() { rm -rf -- "$TMP_DOWNLOAD_DIR"; } + trap clean HUP INT TERM EXIT +else + die "cannot create temp dir" fi -if [ ! -x "$JAVACMD" ] ; then - echo "Error: JAVA_HOME is not defined correctly." >&2 - echo " We cannot execute $JAVACMD" >&2 - exit 1 -fi +mkdir -p -- "${MAVEN_HOME%/*}" + +# Download and Install Apache Maven +verbose "Couldn't find MAVEN_HOME, downloading and installing it ..." +verbose "Downloading from: $distributionUrl" +verbose "Downloading to: $TMP_DOWNLOAD_DIR/$distributionUrlName" -if [ -z "$JAVA_HOME" ] ; then - echo "Warning: JAVA_HOME environment variable is not set." +# select .zip or .tar.gz +if ! command -v unzip >/dev/null; then + distributionUrl="${distributionUrl%.zip}.tar.gz" + distributionUrlName="${distributionUrl##*/}" fi -CLASSWORLDS_LAUNCHER=org.codehaus.plexus.classworlds.launcher.Launcher +# verbose opt +__MVNW_QUIET_WGET=--quiet __MVNW_QUIET_CURL=--silent __MVNW_QUIET_UNZIP=-q __MVNW_QUIET_TAR='' +[ "${MVNW_VERBOSE-}" != true ] || __MVNW_QUIET_WGET='' __MVNW_QUIET_CURL='' __MVNW_QUIET_UNZIP='' __MVNW_QUIET_TAR=v -# traverses directory structure from process work directory to filesystem root -# first directory with .mvn subdirectory is considered project base directory -find_maven_basedir() { +# normalize http auth +case "${MVNW_PASSWORD:+has-password}" in +'') MVNW_USERNAME='' MVNW_PASSWORD='' ;; +has-password) [ -n "${MVNW_USERNAME-}" ] || MVNW_USERNAME='' MVNW_PASSWORD='' ;; +esac - if [ -z "$1" ] - then - echo "Path not specified to find_maven_basedir" - return 1 - fi +if [ -z "${MVNW_USERNAME-}" ] && command -v wget >/dev/null; then + verbose "Found wget ... using wget" + wget ${__MVNW_QUIET_WGET:+"$__MVNW_QUIET_WGET"} "$distributionUrl" -O "$TMP_DOWNLOAD_DIR/$distributionUrlName" || die "wget: Failed to fetch $distributionUrl" +elif [ -z "${MVNW_USERNAME-}" ] && command -v curl >/dev/null; then + verbose "Found curl ... using curl" + curl ${__MVNW_QUIET_CURL:+"$__MVNW_QUIET_CURL"} -f -L -o "$TMP_DOWNLOAD_DIR/$distributionUrlName" "$distributionUrl" || die "curl: Failed to fetch $distributionUrl" +elif set_java_home; then + verbose "Falling back to use Java to download" + javaSource="$TMP_DOWNLOAD_DIR/Downloader.java" + targetZip="$TMP_DOWNLOAD_DIR/$distributionUrlName" + cat >"$javaSource" <<-END + public class Downloader extends java.net.Authenticator + { + protected java.net.PasswordAuthentication getPasswordAuthentication() + { + return new java.net.PasswordAuthentication( System.getenv( "MVNW_USERNAME" ), System.getenv( "MVNW_PASSWORD" ).toCharArray() ); + } + public static void main( String[] args ) throws Exception + { + setDefault( new Downloader() ); + java.nio.file.Files.copy( java.net.URI.create( args[0] ).toURL().openStream(), java.nio.file.Paths.get( args[1] ).toAbsolutePath().normalize() ); + } + } + END + # For Cygwin/MinGW, switch paths to Windows format before running javac and java + verbose " - Compiling Downloader.java ..." + "$(native_path "$JAVACCMD")" "$(native_path "$javaSource")" || die "Failed to compile Downloader.java" + verbose " - Running Downloader.java ..." + "$(native_path "$JAVACMD")" -cp "$(native_path "$TMP_DOWNLOAD_DIR")" Downloader "$distributionUrl" "$(native_path "$targetZip")" +fi - basedir="$1" - wdir="$1" - while [ "$wdir" != '/' ] ; do - if [ -d "$wdir"/.mvn ] ; then - basedir=$wdir - break +# If specified, validate the SHA-256 sum of the Maven distribution zip file +if [ -n "${distributionSha256Sum-}" ]; then + distributionSha256Result=false + if [ "$MVN_CMD" = mvnd.sh ]; then + echo "Checksum validation is not supported for maven-mvnd." >&2 + echo "Please disable validation by removing 'distributionSha256Sum' from your maven-wrapper.properties." >&2 + exit 1 + elif command -v sha256sum >/dev/null; then + if echo "$distributionSha256Sum $TMP_DOWNLOAD_DIR/$distributionUrlName" | sha256sum -c >/dev/null 2>&1; then + distributionSha256Result=true fi - # workaround for JBEAP-8937 (on Solaris 10/Sparc) - if [ -d "${wdir}" ]; then - wdir=`cd "$wdir/.."; pwd` + elif command -v shasum >/dev/null; then + if echo "$distributionSha256Sum $TMP_DOWNLOAD_DIR/$distributionUrlName" | shasum -a 256 -c >/dev/null 2>&1; then + distributionSha256Result=true fi - # end of workaround - done - echo "${basedir}" -} - -# concatenates all lines of a file -concat_lines() { - if [ -f "$1" ]; then - echo "$(tr -s '\n' ' ' < "$1")" + else + echo "Checksum validation was requested but neither 'sha256sum' or 'shasum' are available." >&2 + echo "Please install either command, or disable validation by removing 'distributionSha256Sum' from your maven-wrapper.properties." >&2 + exit 1 + fi + if [ $distributionSha256Result = false ]; then + echo "Error: Failed to validate Maven distribution SHA-256, your Maven distribution might be compromised." >&2 + echo "If you updated your Maven version, you need to update the specified distributionSha256Sum property." >&2 + exit 1 fi -} - -BASE_DIR=`find_maven_basedir "$(pwd)"` -if [ -z "$BASE_DIR" ]; then - exit 1; fi -########################################################################################## -# Extension to allow automatically downloading the maven-wrapper.jar from Maven-central -# This allows using the maven wrapper in projects that prohibit checking in binary data. -########################################################################################## -if [ -r "$BASE_DIR/.mvn/wrapper/maven-wrapper.jar" ]; then - if [ "$MVNW_VERBOSE" = true ]; then - echo "Found .mvn/wrapper/maven-wrapper.jar" - fi +# unzip and move +if command -v unzip >/dev/null; then + unzip ${__MVNW_QUIET_UNZIP:+"$__MVNW_QUIET_UNZIP"} "$TMP_DOWNLOAD_DIR/$distributionUrlName" -d "$TMP_DOWNLOAD_DIR" || die "failed to unzip" else - if [ "$MVNW_VERBOSE" = true ]; then - echo "Couldn't find .mvn/wrapper/maven-wrapper.jar, downloading it ..." - fi - if [ -n "$MVNW_REPOURL" ]; then - jarUrl="$MVNW_REPOURL/io/takari/maven-wrapper/0.5.6/maven-wrapper-0.5.6.jar" - else - jarUrl="https://repo.maven.apache.org/maven2/io/takari/maven-wrapper/0.5.6/maven-wrapper-0.5.6.jar" - fi - while IFS="=" read key value; do - case "$key" in (wrapperUrl) jarUrl="$value"; break ;; - esac - done < "$BASE_DIR/.mvn/wrapper/maven-wrapper.properties" - if [ "$MVNW_VERBOSE" = true ]; then - echo "Downloading from: $jarUrl" - fi - wrapperJarPath="$BASE_DIR/.mvn/wrapper/maven-wrapper.jar" - if $cygwin; then - wrapperJarPath=`cygpath --path --windows "$wrapperJarPath"` - fi - - if command -v wget > /dev/null; then - if [ "$MVNW_VERBOSE" = true ]; then - echo "Found wget ... using wget" - fi - if [ -z "$MVNW_USERNAME" ] || [ -z "$MVNW_PASSWORD" ]; then - wget "$jarUrl" -O "$wrapperJarPath" - else - wget --http-user=$MVNW_USERNAME --http-password=$MVNW_PASSWORD "$jarUrl" -O "$wrapperJarPath" - fi - elif command -v curl > /dev/null; then - if [ "$MVNW_VERBOSE" = true ]; then - echo "Found curl ... using curl" - fi - if [ -z "$MVNW_USERNAME" ] || [ -z "$MVNW_PASSWORD" ]; then - curl -o "$wrapperJarPath" "$jarUrl" -f - else - curl --user $MVNW_USERNAME:$MVNW_PASSWORD -o "$wrapperJarPath" "$jarUrl" -f - fi - - else - if [ "$MVNW_VERBOSE" = true ]; then - echo "Falling back to using Java to download" - fi - javaClass="$BASE_DIR/.mvn/wrapper/MavenWrapperDownloader.java" - # For Cygwin, switch paths to Windows format before running javac - if $cygwin; then - javaClass=`cygpath --path --windows "$javaClass"` - fi - if [ -e "$javaClass" ]; then - if [ ! -e "$BASE_DIR/.mvn/wrapper/MavenWrapperDownloader.class" ]; then - if [ "$MVNW_VERBOSE" = true ]; then - echo " - Compiling MavenWrapperDownloader.java ..." - fi - # Compiling the Java class - ("$JAVA_HOME/bin/javac" "$javaClass") - fi - if [ -e "$BASE_DIR/.mvn/wrapper/MavenWrapperDownloader.class" ]; then - # Running the downloader - if [ "$MVNW_VERBOSE" = true ]; then - echo " - Running MavenWrapperDownloader.java ..." - fi - ("$JAVA_HOME/bin/java" -cp .mvn/wrapper MavenWrapperDownloader "$MAVEN_PROJECTBASEDIR") - fi - fi - fi -fi -########################################################################################## -# End of extension -########################################################################################## - -export MAVEN_PROJECTBASEDIR=${MAVEN_BASEDIR:-"$BASE_DIR"} -if [ "$MVNW_VERBOSE" = true ]; then - echo $MAVEN_PROJECTBASEDIR + tar xzf${__MVNW_QUIET_TAR:+"$__MVNW_QUIET_TAR"} "$TMP_DOWNLOAD_DIR/$distributionUrlName" -C "$TMP_DOWNLOAD_DIR" || die "failed to untar" fi -MAVEN_OPTS="$(concat_lines "$MAVEN_PROJECTBASEDIR/.mvn/jvm.config") $MAVEN_OPTS" - -# For Cygwin, switch paths to Windows format before running java -if $cygwin; then - [ -n "$M2_HOME" ] && - M2_HOME=`cygpath --path --windows "$M2_HOME"` - [ -n "$JAVA_HOME" ] && - JAVA_HOME=`cygpath --path --windows "$JAVA_HOME"` - [ -n "$CLASSPATH" ] && - CLASSPATH=`cygpath --path --windows "$CLASSPATH"` - [ -n "$MAVEN_PROJECTBASEDIR" ] && - MAVEN_PROJECTBASEDIR=`cygpath --path --windows "$MAVEN_PROJECTBASEDIR"` -fi - -# Provide a "standardized" way to retrieve the CLI args that will -# work with both Windows and non-Windows executions. -MAVEN_CMD_LINE_ARGS="$MAVEN_CONFIG $@" -export MAVEN_CMD_LINE_ARGS - -WRAPPER_LAUNCHER=org.apache.maven.wrapper.MavenWrapperMain +printf %s\\n "$distributionUrl" >"$TMP_DOWNLOAD_DIR/$distributionUrlNameMain/mvnw.url" +mv -- "$TMP_DOWNLOAD_DIR/$distributionUrlNameMain" "$MAVEN_HOME" || [ -d "$MAVEN_HOME" ] || die "fail to move MAVEN_HOME" -exec "$JAVACMD" \ - $MAVEN_OPTS \ - -classpath "$MAVEN_PROJECTBASEDIR/.mvn/wrapper/maven-wrapper.jar" \ - "-Dmaven.home=${M2_HOME}" "-Dmaven.multiModuleProjectDirectory=${MAVEN_PROJECTBASEDIR}" \ - ${WRAPPER_LAUNCHER} $MAVEN_CONFIG "$@" +clean || : +exec_maven "$@" diff --git a/deps/rabbitmq_stream_management/test/http_SUITE_data/mvnw.cmd b/deps/rabbitmq_stream_management/test/http_SUITE_data/mvnw.cmd index 86115719e538..b150b91ed500 100644 --- a/deps/rabbitmq_stream_management/test/http_SUITE_data/mvnw.cmd +++ b/deps/rabbitmq_stream_management/test/http_SUITE_data/mvnw.cmd @@ -1,182 +1,149 @@ -@REM ---------------------------------------------------------------------------- -@REM Licensed to the Apache Software Foundation (ASF) under one -@REM or more contributor license agreements. See the NOTICE file -@REM distributed with this work for additional information -@REM regarding copyright ownership. The ASF licenses this file -@REM to you under the Apache License, Version 2.0 (the -@REM "License"); you may not use this file except in compliance -@REM with the License. You may obtain a copy of the License at -@REM -@REM http://www.apache.org/licenses/LICENSE-2.0 -@REM -@REM Unless required by applicable law or agreed to in writing, -@REM software distributed under the License is distributed on an -@REM "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -@REM KIND, either express or implied. See the License for the -@REM specific language governing permissions and limitations -@REM under the License. -@REM ---------------------------------------------------------------------------- - -@REM ---------------------------------------------------------------------------- -@REM Maven Start Up Batch script -@REM -@REM Required ENV vars: -@REM JAVA_HOME - location of a JDK home dir -@REM -@REM Optional ENV vars -@REM M2_HOME - location of maven2's installed home dir -@REM MAVEN_BATCH_ECHO - set to 'on' to enable the echoing of the batch commands -@REM MAVEN_BATCH_PAUSE - set to 'on' to wait for a keystroke before ending -@REM MAVEN_OPTS - parameters passed to the Java VM when running Maven -@REM e.g. to debug Maven itself, use -@REM set MAVEN_OPTS=-Xdebug -Xrunjdwp:transport=dt_socket,server=y,suspend=y,address=8000 -@REM MAVEN_SKIP_RC - flag to disable loading of mavenrc files -@REM ---------------------------------------------------------------------------- - -@REM Begin all REM lines with '@' in case MAVEN_BATCH_ECHO is 'on' -@echo off -@REM set title of command window -title %0 -@REM enable echoing by setting MAVEN_BATCH_ECHO to 'on' -@if "%MAVEN_BATCH_ECHO%" == "on" echo %MAVEN_BATCH_ECHO% - -@REM set %HOME% to equivalent of $HOME -if "%HOME%" == "" (set "HOME=%HOMEDRIVE%%HOMEPATH%") - -@REM Execute a user defined script before this one -if not "%MAVEN_SKIP_RC%" == "" goto skipRcPre -@REM check for pre script, once with legacy .bat ending and once with .cmd ending -if exist "%HOME%\mavenrc_pre.bat" call "%HOME%\mavenrc_pre.bat" -if exist "%HOME%\mavenrc_pre.cmd" call "%HOME%\mavenrc_pre.cmd" -:skipRcPre - -@setlocal - -set ERROR_CODE=0 - -@REM To isolate internal variables from possible post scripts, we use another setlocal -@setlocal - -@REM ==== START VALIDATION ==== -if not "%JAVA_HOME%" == "" goto OkJHome - -echo. -echo Error: JAVA_HOME not found in your environment. >&2 -echo Please set the JAVA_HOME variable in your environment to match the >&2 -echo location of your Java installation. >&2 -echo. -goto error - -:OkJHome -if exist "%JAVA_HOME%\bin\java.exe" goto init - -echo. -echo Error: JAVA_HOME is set to an invalid directory. >&2 -echo JAVA_HOME = "%JAVA_HOME%" >&2 -echo Please set the JAVA_HOME variable in your environment to match the >&2 -echo location of your Java installation. >&2 -echo. -goto error - -@REM ==== END VALIDATION ==== - -:init - -@REM Find the project base dir, i.e. the directory that contains the folder ".mvn". -@REM Fallback to current working directory if not found. - -set MAVEN_PROJECTBASEDIR=%MAVEN_BASEDIR% -IF NOT "%MAVEN_PROJECTBASEDIR%"=="" goto endDetectBaseDir - -set EXEC_DIR=%CD% -set WDIR=%EXEC_DIR% -:findBaseDir -IF EXIST "%WDIR%"\.mvn goto baseDirFound -cd .. -IF "%WDIR%"=="%CD%" goto baseDirNotFound -set WDIR=%CD% -goto findBaseDir - -:baseDirFound -set MAVEN_PROJECTBASEDIR=%WDIR% -cd "%EXEC_DIR%" -goto endDetectBaseDir - -:baseDirNotFound -set MAVEN_PROJECTBASEDIR=%EXEC_DIR% -cd "%EXEC_DIR%" - -:endDetectBaseDir - -IF NOT EXIST "%MAVEN_PROJECTBASEDIR%\.mvn\jvm.config" goto endReadAdditionalConfig - -@setlocal EnableExtensions EnableDelayedExpansion -for /F "usebackq delims=" %%a in ("%MAVEN_PROJECTBASEDIR%\.mvn\jvm.config") do set JVM_CONFIG_MAVEN_PROPS=!JVM_CONFIG_MAVEN_PROPS! %%a -@endlocal & set JVM_CONFIG_MAVEN_PROPS=%JVM_CONFIG_MAVEN_PROPS% - -:endReadAdditionalConfig - -SET MAVEN_JAVA_EXE="%JAVA_HOME%\bin\java.exe" -set WRAPPER_JAR="%MAVEN_PROJECTBASEDIR%\.mvn\wrapper\maven-wrapper.jar" -set WRAPPER_LAUNCHER=org.apache.maven.wrapper.MavenWrapperMain - -set DOWNLOAD_URL="https://repo.maven.apache.org/maven2/io/takari/maven-wrapper/0.5.6/maven-wrapper-0.5.6.jar" - -FOR /F "tokens=1,2 delims==" %%A IN ("%MAVEN_PROJECTBASEDIR%\.mvn\wrapper\maven-wrapper.properties") DO ( - IF "%%A"=="wrapperUrl" SET DOWNLOAD_URL=%%B -) - -@REM Extension to allow automatically downloading the maven-wrapper.jar from Maven-central -@REM This allows using the maven wrapper in projects that prohibit checking in binary data. -if exist %WRAPPER_JAR% ( - if "%MVNW_VERBOSE%" == "true" ( - echo Found %WRAPPER_JAR% - ) -) else ( - if not "%MVNW_REPOURL%" == "" ( - SET DOWNLOAD_URL="%MVNW_REPOURL%/io/takari/maven-wrapper/0.5.6/maven-wrapper-0.5.6.jar" - ) - if "%MVNW_VERBOSE%" == "true" ( - echo Couldn't find %WRAPPER_JAR%, downloading it ... - echo Downloading from: %DOWNLOAD_URL% - ) - - powershell -Command "&{"^ - "$webclient = new-object System.Net.WebClient;"^ - "if (-not ([string]::IsNullOrEmpty('%MVNW_USERNAME%') -and [string]::IsNullOrEmpty('%MVNW_PASSWORD%'))) {"^ - "$webclient.Credentials = new-object System.Net.NetworkCredential('%MVNW_USERNAME%', '%MVNW_PASSWORD%');"^ - "}"^ - "[Net.ServicePointManager]::SecurityProtocol = [Net.SecurityProtocolType]::Tls12; $webclient.DownloadFile('%DOWNLOAD_URL%', '%WRAPPER_JAR%')"^ - "}" - if "%MVNW_VERBOSE%" == "true" ( - echo Finished downloading %WRAPPER_JAR% - ) -) -@REM End of extension - -@REM Provide a "standardized" way to retrieve the CLI args that will -@REM work with both Windows and non-Windows executions. -set MAVEN_CMD_LINE_ARGS=%* - -%MAVEN_JAVA_EXE% %JVM_CONFIG_MAVEN_PROPS% %MAVEN_OPTS% %MAVEN_DEBUG_OPTS% -classpath %WRAPPER_JAR% "-Dmaven.multiModuleProjectDirectory=%MAVEN_PROJECTBASEDIR%" %WRAPPER_LAUNCHER% %MAVEN_CONFIG% %* -if ERRORLEVEL 1 goto error -goto end - -:error -set ERROR_CODE=1 - -:end -@endlocal & set ERROR_CODE=%ERROR_CODE% - -if not "%MAVEN_SKIP_RC%" == "" goto skipRcPost -@REM check for post script, once with legacy .bat ending and once with .cmd ending -if exist "%HOME%\mavenrc_post.bat" call "%HOME%\mavenrc_post.bat" -if exist "%HOME%\mavenrc_post.cmd" call "%HOME%\mavenrc_post.cmd" -:skipRcPost - -@REM pause the script if MAVEN_BATCH_PAUSE is set to 'on' -if "%MAVEN_BATCH_PAUSE%" == "on" pause - -if "%MAVEN_TERMINATE_CMD%" == "on" exit %ERROR_CODE% - -exit /B %ERROR_CODE% +<# : batch portion +@REM ---------------------------------------------------------------------------- +@REM Licensed to the Apache Software Foundation (ASF) under one +@REM or more contributor license agreements. See the NOTICE file +@REM distributed with this work for additional information +@REM regarding copyright ownership. The ASF licenses this file +@REM to you under the Apache License, Version 2.0 (the +@REM "License"); you may not use this file except in compliance +@REM with the License. You may obtain a copy of the License at +@REM +@REM http://www.apache.org/licenses/LICENSE-2.0 +@REM +@REM Unless required by applicable law or agreed to in writing, +@REM software distributed under the License is distributed on an +@REM "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +@REM KIND, either express or implied. See the License for the +@REM specific language governing permissions and limitations +@REM under the License. +@REM ---------------------------------------------------------------------------- + +@REM ---------------------------------------------------------------------------- +@REM Apache Maven Wrapper startup batch script, version 3.3.2 +@REM +@REM Optional ENV vars +@REM MVNW_REPOURL - repo url base for downloading maven distribution +@REM MVNW_USERNAME/MVNW_PASSWORD - user and password for downloading maven +@REM MVNW_VERBOSE - true: enable verbose log; others: silence the output +@REM ---------------------------------------------------------------------------- + +@IF "%__MVNW_ARG0_NAME__%"=="" (SET __MVNW_ARG0_NAME__=%~nx0) +@SET __MVNW_CMD__= +@SET __MVNW_ERROR__= +@SET __MVNW_PSMODULEP_SAVE=%PSModulePath% +@SET PSModulePath= +@FOR /F "usebackq tokens=1* delims==" %%A IN (`powershell -noprofile "& {$scriptDir='%~dp0'; $script='%__MVNW_ARG0_NAME__%'; icm -ScriptBlock ([Scriptblock]::Create((Get-Content -Raw '%~f0'))) -NoNewScope}"`) DO @( + IF "%%A"=="MVN_CMD" (set __MVNW_CMD__=%%B) ELSE IF "%%B"=="" (echo %%A) ELSE (echo %%A=%%B) +) +@SET PSModulePath=%__MVNW_PSMODULEP_SAVE% +@SET __MVNW_PSMODULEP_SAVE= +@SET __MVNW_ARG0_NAME__= +@SET MVNW_USERNAME= +@SET MVNW_PASSWORD= +@IF NOT "%__MVNW_CMD__%"=="" (%__MVNW_CMD__% %*) +@echo Cannot start maven from wrapper >&2 && exit /b 1 +@GOTO :EOF +: end batch / begin powershell #> + +$ErrorActionPreference = "Stop" +if ($env:MVNW_VERBOSE -eq "true") { + $VerbosePreference = "Continue" +} + +# calculate distributionUrl, requires .mvn/wrapper/maven-wrapper.properties +$distributionUrl = (Get-Content -Raw "$scriptDir/.mvn/wrapper/maven-wrapper.properties" | ConvertFrom-StringData).distributionUrl +if (!$distributionUrl) { + Write-Error "cannot read distributionUrl property in $scriptDir/.mvn/wrapper/maven-wrapper.properties" +} + +switch -wildcard -casesensitive ( $($distributionUrl -replace '^.*/','') ) { + "maven-mvnd-*" { + $USE_MVND = $true + $distributionUrl = $distributionUrl -replace '-bin\.[^.]*$',"-windows-amd64.zip" + $MVN_CMD = "mvnd.cmd" + break + } + default { + $USE_MVND = $false + $MVN_CMD = $script -replace '^mvnw','mvn' + break + } +} + +# apply MVNW_REPOURL and calculate MAVEN_HOME +# maven home pattern: ~/.m2/wrapper/dists/{apache-maven-,maven-mvnd--}/ +if ($env:MVNW_REPOURL) { + $MVNW_REPO_PATTERN = if ($USE_MVND) { "/org/apache/maven/" } else { "/maven/mvnd/" } + $distributionUrl = "$env:MVNW_REPOURL$MVNW_REPO_PATTERN$($distributionUrl -replace '^.*'+$MVNW_REPO_PATTERN,'')" +} +$distributionUrlName = $distributionUrl -replace '^.*/','' +$distributionUrlNameMain = $distributionUrlName -replace '\.[^.]*$','' -replace '-bin$','' +$MAVEN_HOME_PARENT = "$HOME/.m2/wrapper/dists/$distributionUrlNameMain" +if ($env:MAVEN_USER_HOME) { + $MAVEN_HOME_PARENT = "$env:MAVEN_USER_HOME/wrapper/dists/$distributionUrlNameMain" +} +$MAVEN_HOME_NAME = ([System.Security.Cryptography.MD5]::Create().ComputeHash([byte[]][char[]]$distributionUrl) | ForEach-Object {$_.ToString("x2")}) -join '' +$MAVEN_HOME = "$MAVEN_HOME_PARENT/$MAVEN_HOME_NAME" + +if (Test-Path -Path "$MAVEN_HOME" -PathType Container) { + Write-Verbose "found existing MAVEN_HOME at $MAVEN_HOME" + Write-Output "MVN_CMD=$MAVEN_HOME/bin/$MVN_CMD" + exit $? +} + +if (! $distributionUrlNameMain -or ($distributionUrlName -eq $distributionUrlNameMain)) { + Write-Error "distributionUrl is not valid, must end with *-bin.zip, but found $distributionUrl" +} + +# prepare tmp dir +$TMP_DOWNLOAD_DIR_HOLDER = New-TemporaryFile +$TMP_DOWNLOAD_DIR = New-Item -Itemtype Directory -Path "$TMP_DOWNLOAD_DIR_HOLDER.dir" +$TMP_DOWNLOAD_DIR_HOLDER.Delete() | Out-Null +trap { + if ($TMP_DOWNLOAD_DIR.Exists) { + try { Remove-Item $TMP_DOWNLOAD_DIR -Recurse -Force | Out-Null } + catch { Write-Warning "Cannot remove $TMP_DOWNLOAD_DIR" } + } +} + +New-Item -Itemtype Directory -Path "$MAVEN_HOME_PARENT" -Force | Out-Null + +# Download and Install Apache Maven +Write-Verbose "Couldn't find MAVEN_HOME, downloading and installing it ..." +Write-Verbose "Downloading from: $distributionUrl" +Write-Verbose "Downloading to: $TMP_DOWNLOAD_DIR/$distributionUrlName" + +$webclient = New-Object System.Net.WebClient +if ($env:MVNW_USERNAME -and $env:MVNW_PASSWORD) { + $webclient.Credentials = New-Object System.Net.NetworkCredential($env:MVNW_USERNAME, $env:MVNW_PASSWORD) +} +[Net.ServicePointManager]::SecurityProtocol = [Net.SecurityProtocolType]::Tls12 +$webclient.DownloadFile($distributionUrl, "$TMP_DOWNLOAD_DIR/$distributionUrlName") | Out-Null + +# If specified, validate the SHA-256 sum of the Maven distribution zip file +$distributionSha256Sum = (Get-Content -Raw "$scriptDir/.mvn/wrapper/maven-wrapper.properties" | ConvertFrom-StringData).distributionSha256Sum +if ($distributionSha256Sum) { + if ($USE_MVND) { + Write-Error "Checksum validation is not supported for maven-mvnd. `nPlease disable validation by removing 'distributionSha256Sum' from your maven-wrapper.properties." + } + Import-Module $PSHOME\Modules\Microsoft.PowerShell.Utility -Function Get-FileHash + if ((Get-FileHash "$TMP_DOWNLOAD_DIR/$distributionUrlName" -Algorithm SHA256).Hash.ToLower() -ne $distributionSha256Sum) { + Write-Error "Error: Failed to validate Maven distribution SHA-256, your Maven distribution might be compromised. If you updated your Maven version, you need to update the specified distributionSha256Sum property." + } +} + +# unzip and move +Expand-Archive "$TMP_DOWNLOAD_DIR/$distributionUrlName" -DestinationPath "$TMP_DOWNLOAD_DIR" | Out-Null +Rename-Item -Path "$TMP_DOWNLOAD_DIR/$distributionUrlNameMain" -NewName $MAVEN_HOME_NAME | Out-Null +try { + Move-Item -Path "$TMP_DOWNLOAD_DIR/$MAVEN_HOME_NAME" -Destination $MAVEN_HOME_PARENT | Out-Null +} catch { + if (! (Test-Path -Path "$MAVEN_HOME" -PathType Container)) { + Write-Error "fail to move MAVEN_HOME" + } +} finally { + try { Remove-Item $TMP_DOWNLOAD_DIR -Recurse -Force | Out-Null } + catch { Write-Warning "Cannot remove $TMP_DOWNLOAD_DIR" } +} + +Write-Output "MVN_CMD=$MAVEN_HOME/bin/$MVN_CMD" diff --git a/deps/rabbitmq_stream_management/test/http_SUITE_data/pom.xml b/deps/rabbitmq_stream_management/test/http_SUITE_data/pom.xml index 4af736f9a542..58f77c216290 100644 --- a/deps/rabbitmq_stream_management/test/http_SUITE_data/pom.xml +++ b/deps/rabbitmq_stream_management/test/http_SUITE_data/pom.xml @@ -20,22 +20,22 @@ rabbitmq-core@groups.vmware.com Team RabbitMQ - VMware, Inc. or its affiliates. + Broadcom, Inc. or its affiliates. https://rabbitmq.com [0.12.0-SNAPSHOT,) - 5.9.3 - 3.24.2 - 1.2.12 - 3.11.0 - 3.1.2 - 2.37.0 - 1.17.0 - 4.11.0 - 2.10.1 + 5.11.0 + 3.26.3 + 1.2.13 + 3.12.1 + 3.4.0 + 2.43.0 + 1.18.1 + 4.12.0 + 2.11.0 UTF-8 diff --git a/deps/rabbitmq_stream_management/test/http_SUITE_data/src/test/java/com/rabbitmq/stream/HttpTest.java b/deps/rabbitmq_stream_management/test/http_SUITE_data/src/test/java/com/rabbitmq/stream/HttpTest.java index 442c223263c5..90787333889d 100644 --- a/deps/rabbitmq_stream_management/test/http_SUITE_data/src/test/java/com/rabbitmq/stream/HttpTest.java +++ b/deps/rabbitmq_stream_management/test/http_SUITE_data/src/test/java/com/rabbitmq/stream/HttpTest.java @@ -11,7 +11,7 @@ // The Original Code is RabbitMQ. // // The Initial Developer of the Original Code is Pivotal Software, Inc. -// Copyright (c) 2020 VMware, Inc. or its affiliates. All rights reserved. +// Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. // package com.rabbitmq.stream; @@ -784,7 +784,9 @@ void permissions() throws Exception { "/stream/consumers/foo-virtual-host", "/stream/publishers/foo-virtual-host", "/stream/publishers/foo-virtual-host", - "/stream/publishers/%2F/foo-stream" + "/stream/publishers/%2F/foo-stream", + "/stream/%2F/foo-stream/tracking", + "/stream/foo-virtual-host/foo-stream/tracking", }) void shouldReturnNotFound(String endpoint) { assertThatThrownBy(() -> get(endpoint)).hasMessageContaining("404"); @@ -867,6 +869,112 @@ && connectionName(client) } } + @Test + void trackingInfo() throws Exception { + String endpoint = "/stream/%2F/" + stream + "/tracking"; + Callable> getTracking = () -> toMap(get(endpoint)); + Map tracking = getTracking.call(); + assertThat(tracking).hasSize(2).containsKeys("offsets", "writers"); + assertThat(trackingOffsets(tracking)).isEmpty(); + assertThat(trackingWriters(tracking)).isEmpty(); + + String consumerReference1 = "foo"; + String consumerReference2 = "bar"; + + Client client = cf.get(); + client.storeOffset(consumerReference1, stream, 42); + waitUntil(() -> client.queryOffset(consumerReference1, stream).getOffset() == 42); + tracking = getTracking.call(); + assertThat(trackingOffsets(tracking)).hasSize(1).containsEntry(consumerReference1, d(42)); + assertThat(trackingWriters(tracking)).isEmpty(); + + client.storeOffset(consumerReference1, stream, 55); + waitUntil(() -> client.queryOffset(consumerReference1, stream).getOffset() == 55); + tracking = getTracking.call(); + assertThat(trackingOffsets(tracking)).hasSize(1).containsEntry(consumerReference1, d(55)); + assertThat(trackingWriters(tracking)).isEmpty(); + + client.storeOffset(consumerReference2, stream, 12); + waitUntil(() -> client.queryOffset(consumerReference2, stream).getOffset() == 12); + tracking = getTracking.call(); + assertThat(trackingOffsets(tracking)) + .hasSize(2) + .containsEntry(consumerReference1, d(55)) + .containsEntry(consumerReference2, d(12)); + assertThat(trackingWriters(tracking)).isEmpty(); + + tracking = toMap(get(endpoint + "?type=offset")); + assertThat(tracking).hasSize(1).containsKey("offsets"); + + String publisherReference1 = "foobar1"; + String publisherReference2 = "foobar2"; + byte pub1 = 0; + byte pub2 = 1; + assertThat(client.declarePublisher(pub1, publisherReference1, stream).isOk()).isTrue(); + assertThat(client.declarePublisher(pub2, publisherReference2, stream).isOk()).isTrue(); + + client.publish(pub1, message(client), o -> 25); + waitUntil(() -> client.queryPublisherSequence(publisherReference1, stream) == 25); + + tracking = getTracking.call(); + assertThat(trackingOffsets(tracking)) + .hasSize(2) + .containsEntry(consumerReference1, d(55)) + .containsEntry(consumerReference2, d(12)); + assertThat(trackingWriters(tracking)).hasSize(1).containsEntry(publisherReference1, d(25)); + + client.publish(pub1, message(client), o -> 36); + waitUntil(() -> client.queryPublisherSequence(publisherReference1, stream) == 36); + + tracking = getTracking.call(); + assertThat(trackingOffsets(tracking)) + .hasSize(2) + .containsEntry(consumerReference1, d(55)) + .containsEntry(consumerReference2, d(12)); + assertThat(trackingWriters(tracking)).hasSize(1).containsEntry(publisherReference1, d(36)); + + client.publish(pub2, message(client), o -> 45); + waitUntil(() -> client.queryPublisherSequence(publisherReference2, stream) == 45); + + tracking = getTracking.call(); + assertThat(trackingOffsets(tracking)) + .hasSize(2) + .containsEntry(consumerReference1, d(55)) + .containsEntry(consumerReference2, d(12)); + assertThat(trackingWriters(tracking)) + .hasSize(2) + .containsEntry(publisherReference1, d(36)) + .containsEntry(publisherReference2, d(45)); + + tracking = toMap(get(endpoint + "?type=writer")); + assertThat(tracking).hasSize(1).containsKey("writers"); + + tracking = toMap(get(endpoint + "?type=all")); + assertThat(tracking).hasSize(2).containsKeys("offsets", "writers"); + + tracking = toMap(get(endpoint + "?type=unknown-means-all")); + assertThat(tracking).hasSize(2).containsKeys("offsets", "writers"); + } + + @SuppressWarnings("unchecked") + private static Map trackingOffsets(Map tracking) { + return (Map) tracking.get("offsets"); + } + + @SuppressWarnings("unchecked") + private static Map trackingWriters(Map tracking) { + return (Map) tracking.get("writers"); + } + + private static Double d(int value) { + return (double) value; + } + + private static List message(Client client) { + return Collections.singletonList( + client.messageBuilder().addData("hello".getBytes(StandardCharsets.UTF_8)).build()); + } + static class PermissionsTestConfiguration { final String user; final String endpoint; diff --git a/deps/rabbitmq_stream_management/test/http_SUITE_data/src/test/java/com/rabbitmq/stream/TestUtils.java b/deps/rabbitmq_stream_management/test/http_SUITE_data/src/test/java/com/rabbitmq/stream/TestUtils.java index 1f70845383e2..94b53dc211d8 100644 --- a/deps/rabbitmq_stream_management/test/http_SUITE_data/src/test/java/com/rabbitmq/stream/TestUtils.java +++ b/deps/rabbitmq_stream_management/test/http_SUITE_data/src/test/java/com/rabbitmq/stream/TestUtils.java @@ -11,7 +11,7 @@ // The Original Code is RabbitMQ. // // The Initial Developer of the Original Code is Pivotal Software, Inc. -// Copyright (c) 2020-2023 VMware, Inc. or its affiliates. All rights reserved. +// Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. // package com.rabbitmq.stream; diff --git a/deps/rabbitmq_top/.gitignore b/deps/rabbitmq_top/.gitignore deleted file mode 100644 index 1413a066a4a1..000000000000 --- a/deps/rabbitmq_top/.gitignore +++ /dev/null @@ -1,17 +0,0 @@ -.sw? -.*.sw? -*.beam -/.erlang.mk/ -/cover/ -/deps/ -/doc/ -/ebin/ -/escript/ -/escript.lock -/logs/ -/plugins/ -/plugins.lock -/sbin/ -/sbin.lock - -rabbitmq_top.d diff --git a/deps/rabbitmq_top/BUILD.bazel b/deps/rabbitmq_top/BUILD.bazel index 27af026d8da4..c4ffad8dae3d 100644 --- a/deps/rabbitmq_top/BUILD.bazel +++ b/deps/rabbitmq_top/BUILD.bazel @@ -46,7 +46,6 @@ rabbitmq_app( license_files = [":license_files"], priv = [":priv"], deps = [ - "//deps/amqp_client:erlang_app", "//deps/rabbit:erlang_app", # keep "//deps/rabbit_common:erlang_app", "//deps/rabbitmq_management:erlang_app", @@ -62,7 +61,7 @@ plt( name = "deps_plt", for_target = ":erlang_app", ignore_warnings = True, - libs = ["//deps/rabbitmq_cli:elixir"], # keep + libs = ["@rules_elixir//elixir"], # keep plt = "//:base_plt", ) diff --git a/deps/rabbitmq_top/README.md b/deps/rabbitmq_top/README.md index 018f5490c1bb..8a9a9226df73 100644 --- a/deps/rabbitmq_top/README.md +++ b/deps/rabbitmq_top/README.md @@ -62,6 +62,6 @@ You can build and install it like any other plugin (see ## License and Copyright -(c) 2007-2020 VMware, Inc. or its affiliates. +(c) 2007-2024 Broadcom. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. Released under the same license as RabbitMQ. diff --git a/deps/rabbitmq_top/app.bzl b/deps/rabbitmq_top/app.bzl index 31559679ffeb..75f5a2b91fad 100644 --- a/deps/rabbitmq_top/app.bzl +++ b/deps/rabbitmq_top/app.bzl @@ -23,7 +23,6 @@ def all_beam_files(name = "all_beam_files"): dest = "ebin", erlc_opts = "//:erlc_opts", deps = [ - "//deps/amqp_client:erlang_app", "//deps/rabbit_common:erlang_app", "//deps/rabbitmq_management:erlang_app", "//deps/rabbitmq_management_agent:erlang_app", @@ -100,7 +99,7 @@ def all_test_beam_files(name = "all_test_beam_files"): app_name = "rabbitmq_top", dest = "test", erlc_opts = "//:test_erlc_opts", - deps = ["//deps/amqp_client:erlang_app", "//deps/rabbit_common:erlang_app", "//deps/rabbitmq_management:erlang_app", "//deps/rabbitmq_management_agent:erlang_app"], + deps = ["//deps/rabbit_common:erlang_app", "//deps/rabbitmq_management:erlang_app", "//deps/rabbitmq_management_agent:erlang_app"], ) def test_suite_beam_files(name = "test_suite_beam_files"): diff --git a/deps/rabbitmq_top/priv/www/js/top.js b/deps/rabbitmq_top/priv/www/js/top.js index a1758996287f..3f70447f23e0 100644 --- a/deps/rabbitmq_top/priv/www/js/top.js +++ b/deps/rabbitmq_top/priv/www/js/top.js @@ -2,7 +2,7 @@ // License, v. 2.0. If a copy of the MPL was not distributed with this // file, You can obtain one at https://mozilla.org/MPL/2.0/. // -// Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved. +// Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. dispatcher_add(function(sammy) { sammy.get('#/top', function() { diff --git a/deps/rabbitmq_top/src/rabbit_top_app.erl b/deps/rabbitmq_top/src/rabbit_top_app.erl index b94e43b3f0f7..0d13243e6b70 100644 --- a/deps/rabbitmq_top/src/rabbit_top_app.erl +++ b/deps/rabbitmq_top/src/rabbit_top_app.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_top_app). diff --git a/deps/rabbitmq_top/src/rabbit_top_extension.erl b/deps/rabbitmq_top/src/rabbit_top_extension.erl index c4c8d4eca982..4c4d1a1ab591 100644 --- a/deps/rabbitmq_top/src/rabbit_top_extension.erl +++ b/deps/rabbitmq_top/src/rabbit_top_extension.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_top_extension). diff --git a/deps/rabbitmq_top/src/rabbit_top_sup.erl b/deps/rabbitmq_top/src/rabbit_top_sup.erl index e220cac259d6..80a18e05e2d1 100644 --- a/deps/rabbitmq_top/src/rabbit_top_sup.erl +++ b/deps/rabbitmq_top/src/rabbit_top_sup.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_top_sup). diff --git a/deps/rabbitmq_top/src/rabbit_top_util.erl b/deps/rabbitmq_top/src/rabbit_top_util.erl index 4d272735e288..9c833eb561fe 100644 --- a/deps/rabbitmq_top/src/rabbit_top_util.erl +++ b/deps/rabbitmq_top/src/rabbit_top_util.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_top_util). diff --git a/deps/rabbitmq_top/src/rabbit_top_wm_ets_tables.erl b/deps/rabbitmq_top/src/rabbit_top_wm_ets_tables.erl index 852467409663..f3d4601e9e77 100644 --- a/deps/rabbitmq_top/src/rabbit_top_wm_ets_tables.erl +++ b/deps/rabbitmq_top/src/rabbit_top_wm_ets_tables.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_top_wm_ets_tables). @@ -10,8 +10,6 @@ -export([init/2, to_json/2, content_types_provided/2, is_authorized/2]). -include_lib("rabbitmq_management_agent/include/rabbit_mgmt_records.hrl"). --include_lib("amqp_client/include/amqp_client.hrl"). - %%-------------------------------------------------------------------- init(Req, _State) -> diff --git a/deps/rabbitmq_top/src/rabbit_top_wm_process.erl b/deps/rabbitmq_top/src/rabbit_top_wm_process.erl index a7e37cb87013..507ca3f5ff8b 100644 --- a/deps/rabbitmq_top/src/rabbit_top_wm_process.erl +++ b/deps/rabbitmq_top/src/rabbit_top_wm_process.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_top_wm_process). @@ -14,8 +14,6 @@ [current_stacktrace, trap_exit, links, monitors, monitored_by]). -include_lib("rabbitmq_management_agent/include/rabbit_mgmt_records.hrl"). --include_lib("amqp_client/include/amqp_client.hrl"). - %%-------------------------------------------------------------------- init(Req, _State) -> diff --git a/deps/rabbitmq_top/src/rabbit_top_wm_processes.erl b/deps/rabbitmq_top/src/rabbit_top_wm_processes.erl index c75ce4a4b5e2..7dcf75fe55e3 100644 --- a/deps/rabbitmq_top/src/rabbit_top_wm_processes.erl +++ b/deps/rabbitmq_top/src/rabbit_top_wm_processes.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_top_wm_processes). @@ -10,8 +10,6 @@ -export([init/2, to_json/2, content_types_provided/2, is_authorized/2]). -include_lib("rabbitmq_management_agent/include/rabbit_mgmt_records.hrl"). --include_lib("amqp_client/include/amqp_client.hrl"). - %%-------------------------------------------------------------------- init(Req, _State) -> diff --git a/deps/rabbitmq_top/src/rabbit_top_worker.erl b/deps/rabbitmq_top/src/rabbit_top_worker.erl index c4e21e7d99de..b359560d6b86 100644 --- a/deps/rabbitmq_top/src/rabbit_top_worker.erl +++ b/deps/rabbitmq_top/src/rabbit_top_worker.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_top_worker). diff --git a/deps/rabbitmq_tracing/.gitignore b/deps/rabbitmq_tracing/.gitignore deleted file mode 100644 index 8ed19236abff..000000000000 --- a/deps/rabbitmq_tracing/.gitignore +++ /dev/null @@ -1,17 +0,0 @@ -.sw? -.*.sw? -*.beam -/.erlang.mk/ -/cover/ -/deps/ -/doc/ -/ebin/ -/escript/ -/escript.lock -/logs/ -/plugins/ -/plugins.lock -/sbin/ -/sbin.lock - -rabbitmq_tracing.d diff --git a/deps/rabbitmq_tracing/BUILD.bazel b/deps/rabbitmq_tracing/BUILD.bazel index b3d9c8241c60..1a5113bbc349 100644 --- a/deps/rabbitmq_tracing/BUILD.bazel +++ b/deps/rabbitmq_tracing/BUILD.bazel @@ -1,6 +1,8 @@ load("@rules_erlang//:eunit2.bzl", "eunit") load("@rules_erlang//:xref2.bzl", "xref") load("@rules_erlang//:dialyze.bzl", "dialyze", "plt") +load("//:rabbitmq_home.bzl", "rabbitmq_home") +load("//:rabbitmq_run.bzl", "rabbitmq_run") load( "//:rabbitmq.bzl", "BROKER_VERSION_REQUIREMENTS_ANY", @@ -69,7 +71,7 @@ plt( name = "deps_plt", for_target = ":erlang_app", ignore_warnings = True, - libs = ["//deps/rabbitmq_cli:elixir"], # keep + libs = ["@rules_elixir//elixir"], # keep plt = "//:base_plt", ) @@ -87,6 +89,10 @@ eunit( broker_for_integration_suites() +rabbitmq_integration_suite( + name = "config_schema_SUITE", +) + rabbitmq_integration_suite( name = "rabbit_tracing_SUITE", ) diff --git a/deps/rabbitmq_tracing/app.bzl b/deps/rabbitmq_tracing/app.bzl index 0ea42f7f6168..3b52a3e4b6da 100644 --- a/deps/rabbitmq_tracing/app.bzl +++ b/deps/rabbitmq_tracing/app.bzl @@ -82,6 +82,7 @@ def all_srcs(name = "all_srcs"): filegroup( name = "priv", srcs = [ + "priv/schema/rabbitmq_tracing.schema", "priv/www/js/tmpl/traces.ejs", "priv/www/js/tracing.js", ], @@ -128,3 +129,11 @@ def test_suite_beam_files(name = "test_suite_beam_files"): erlc_opts = "//:test_erlc_opts", deps = ["//deps/amqp_client:erlang_app", "//deps/rabbitmq_ct_helpers:erlang_app"], ) + erlang_bytecode( + name = "config_schema_SUITE_beam_files", + testonly = True, + srcs = ["test/config_schema_SUITE.erl"], + outs = ["test/config_schema_SUITE.beam"], + app_name = "rabbitmq_tracing", + erlc_opts = "//:test_erlc_opts", + ) diff --git a/deps/rabbitmq_tracing/priv/schema/rabbitmq_tracing.schema b/deps/rabbitmq_tracing/priv/schema/rabbitmq_tracing.schema new file mode 100644 index 000000000000..48e4981d8629 --- /dev/null +++ b/deps/rabbitmq_tracing/priv/schema/rabbitmq_tracing.schema @@ -0,0 +1,18 @@ +%% This Source Code Form is subject to the terms of the Mozilla Public +%% License, v. 2.0. If a copy of the MPL was not distributed with this +%% file, You can obtain one at https://mozilla.org/MPL/2.0/. +%% +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. +%% + +{mapping, "tracing.dir", "rabbitmq_tracing.directory", [ + {datatype, string}, + {validators, ["dir_writable"]}]}. + +{mapping, "tracing.username", "rabbitmq_tracing.username", [ + {datatype, string} +]}. + +{mapping, "tracing.password", "rabbitmq_tracing.password", [ + {datatype, string} +]}. diff --git a/deps/rabbitmq_tracing/src/rabbit_tracing_app.erl b/deps/rabbitmq_tracing/src/rabbit_tracing_app.erl index cdbbf0174019..99120f512f9e 100644 --- a/deps/rabbitmq_tracing/src/rabbit_tracing_app.erl +++ b/deps/rabbitmq_tracing/src/rabbit_tracing_app.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_tracing_app). diff --git a/deps/rabbitmq_tracing/src/rabbit_tracing_consumer.erl b/deps/rabbitmq_tracing/src/rabbit_tracing_consumer.erl index 3ba3831bdf82..4ac695db488f 100644 --- a/deps/rabbitmq_tracing/src/rabbit_tracing_consumer.erl +++ b/deps/rabbitmq_tracing/src/rabbit_tracing_consumer.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_tracing_consumer). diff --git a/deps/rabbitmq_tracing/src/rabbit_tracing_consumer_sup.erl b/deps/rabbitmq_tracing/src/rabbit_tracing_consumer_sup.erl index e4c3fea96a12..457986a7d4e8 100644 --- a/deps/rabbitmq_tracing/src/rabbit_tracing_consumer_sup.erl +++ b/deps/rabbitmq_tracing/src/rabbit_tracing_consumer_sup.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_tracing_consumer_sup). diff --git a/deps/rabbitmq_tracing/src/rabbit_tracing_files.erl b/deps/rabbitmq_tracing/src/rabbit_tracing_files.erl index 9170735090c6..62544e1ab94f 100644 --- a/deps/rabbitmq_tracing/src/rabbit_tracing_files.erl +++ b/deps/rabbitmq_tracing/src/rabbit_tracing_files.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_tracing_files). diff --git a/deps/rabbitmq_tracing/src/rabbit_tracing_mgmt.erl b/deps/rabbitmq_tracing/src/rabbit_tracing_mgmt.erl index b24e65fc5a98..10388234c1cf 100644 --- a/deps/rabbitmq_tracing/src/rabbit_tracing_mgmt.erl +++ b/deps/rabbitmq_tracing/src/rabbit_tracing_mgmt.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_tracing_mgmt). diff --git a/deps/rabbitmq_tracing/src/rabbit_tracing_sup.erl b/deps/rabbitmq_tracing/src/rabbit_tracing_sup.erl index 1253934d562a..8afddb33b3d1 100644 --- a/deps/rabbitmq_tracing/src/rabbit_tracing_sup.erl +++ b/deps/rabbitmq_tracing/src/rabbit_tracing_sup.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_tracing_sup). diff --git a/deps/rabbitmq_tracing/src/rabbit_tracing_traces.erl b/deps/rabbitmq_tracing/src/rabbit_tracing_traces.erl index 0debf0d0db50..aa47c4d07d2e 100644 --- a/deps/rabbitmq_tracing/src/rabbit_tracing_traces.erl +++ b/deps/rabbitmq_tracing/src/rabbit_tracing_traces.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_tracing_traces). diff --git a/deps/rabbitmq_tracing/src/rabbit_tracing_util.erl b/deps/rabbitmq_tracing/src/rabbit_tracing_util.erl index 4c1057a2b7c6..32330bb1dba1 100644 --- a/deps/rabbitmq_tracing/src/rabbit_tracing_util.erl +++ b/deps/rabbitmq_tracing/src/rabbit_tracing_util.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_tracing_util). diff --git a/deps/rabbitmq_tracing/src/rabbit_tracing_wm_file.erl b/deps/rabbitmq_tracing/src/rabbit_tracing_wm_file.erl index 6dfeea841de3..f4c1221bea90 100644 --- a/deps/rabbitmq_tracing/src/rabbit_tracing_wm_file.erl +++ b/deps/rabbitmq_tracing/src/rabbit_tracing_wm_file.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_tracing_wm_file). diff --git a/deps/rabbitmq_tracing/src/rabbit_tracing_wm_files.erl b/deps/rabbitmq_tracing/src/rabbit_tracing_wm_files.erl index 580a47c5c234..38970a216167 100644 --- a/deps/rabbitmq_tracing/src/rabbit_tracing_wm_files.erl +++ b/deps/rabbitmq_tracing/src/rabbit_tracing_wm_files.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_tracing_wm_files). diff --git a/deps/rabbitmq_tracing/src/rabbit_tracing_wm_trace.erl b/deps/rabbitmq_tracing/src/rabbit_tracing_wm_trace.erl index 0b7e36d4a063..e7e9f86c1457 100644 --- a/deps/rabbitmq_tracing/src/rabbit_tracing_wm_trace.erl +++ b/deps/rabbitmq_tracing/src/rabbit_tracing_wm_trace.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_tracing_wm_trace). diff --git a/deps/rabbitmq_tracing/src/rabbit_tracing_wm_traces.erl b/deps/rabbitmq_tracing/src/rabbit_tracing_wm_traces.erl index a67b386da8cf..86e9e665d9aa 100644 --- a/deps/rabbitmq_tracing/src/rabbit_tracing_wm_traces.erl +++ b/deps/rabbitmq_tracing/src/rabbit_tracing_wm_traces.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_tracing_wm_traces). diff --git a/deps/rabbitmq_amqp1_0/test/config_schema_SUITE.erl b/deps/rabbitmq_tracing/test/config_schema_SUITE.erl similarity index 70% rename from deps/rabbitmq_amqp1_0/test/config_schema_SUITE.erl rename to deps/rabbitmq_tracing/test/config_schema_SUITE.erl index b172f35d75d8..71eb9e7cd44d 100644 --- a/deps/rabbitmq_amqp1_0/test/config_schema_SUITE.erl +++ b/deps/rabbitmq_tracing/test/config_schema_SUITE.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(config_schema_SUITE). @@ -11,17 +11,17 @@ all() -> [ - run_snippets + run_snippets ]. %% ------------------------------------------------------------------- -%% Testsuite setup/teardown. +%% Test suite setup/teardown. %% ------------------------------------------------------------------- init_per_suite(Config) -> rabbit_ct_helpers:log_environment(), Config1 = rabbit_ct_helpers:run_setup_steps(Config), - rabbit_ct_config_schema:init_schemas(rabbitmq_amqp1_0, Config1). + rabbit_ct_config_schema:init_schemas(rabbitmq_tracing, Config1). end_per_suite(Config) -> @@ -31,24 +31,25 @@ init_per_testcase(Testcase, Config) -> rabbit_ct_helpers:testcase_started(Config, Testcase), Config1 = rabbit_ct_helpers:set_config(Config, [ {rmq_nodename_suffix, Testcase} - ]), + ]), rabbit_ct_helpers:run_steps(Config1, - rabbit_ct_broker_helpers:setup_steps() ++ - rabbit_ct_client_helpers:setup_steps()). + rabbit_ct_broker_helpers:setup_steps() ++ + rabbit_ct_client_helpers:setup_steps()). end_per_testcase(Testcase, Config) -> Config1 = rabbit_ct_helpers:run_steps(Config, - rabbit_ct_client_helpers:teardown_steps() ++ - rabbit_ct_broker_helpers:teardown_steps()), + rabbit_ct_client_helpers:teardown_steps() ++ + rabbit_ct_broker_helpers:teardown_steps()), rabbit_ct_helpers:testcase_finished(Config1, Testcase). %% ------------------------------------------------------------------- -%% Testcases. +%% Test cases %% ------------------------------------------------------------------- run_snippets(Config) -> ok = rabbit_ct_broker_helpers:rpc(Config, 0, - ?MODULE, run_snippets1, [Config]). + ?MODULE, run_snippets1, [Config]). run_snippets1(Config) -> rabbit_ct_config_schema:run_snippets(Config). + diff --git a/deps/rabbitmq_tracing/test/config_schema_SUITE_data/rabbitmq_tracing.snippets b/deps/rabbitmq_tracing/test/config_schema_SUITE_data/rabbitmq_tracing.snippets new file mode 100644 index 000000000000..543d367c283f --- /dev/null +++ b/deps/rabbitmq_tracing/test/config_schema_SUITE_data/rabbitmq_tracing.snippets @@ -0,0 +1,15 @@ +[ + {tracing_username, + "tracing.username = generated-93da9f621", + [{rabbitmq_tracing, [ + {username, "generated-93da9f621"} + ]}], + [rabbitmq_tracing]}, + + {tracing_password, + "tracing.password = 6bc258e9eac005659a84afcc41be61d93da9f621", + [{rabbitmq_tracing, [ + {password, "6bc258e9eac005659a84afcc41be61d93da9f621"} + ]}], + [rabbitmq_tracing]} +]. diff --git a/deps/rabbitmq_tracing/test/rabbit_tracing_SUITE.erl b/deps/rabbitmq_tracing/test/rabbit_tracing_SUITE.erl index 172848d312a6..089cdc132a74 100644 --- a/deps/rabbitmq_tracing/test/rabbit_tracing_SUITE.erl +++ b/deps/rabbitmq_tracing/test/rabbit_tracing_SUITE.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_tracing_SUITE). @@ -11,7 +11,6 @@ -define(LOG_DIR, "/var/tmp/rabbitmq-tracing/"). --include_lib("common_test/include/ct.hrl"). -include_lib("eunit/include/eunit.hrl"). -include_lib("amqp_client/include/amqp_client.hrl"). -include_lib("rabbitmq_ct_helpers/include/rabbit_mgmt_test.hrl"). diff --git a/deps/rabbitmq_trust_store/.gitignore b/deps/rabbitmq_trust_store/.gitignore index 0a8b81e945be..e0812518307d 100644 --- a/deps/rabbitmq_trust_store/.gitignore +++ b/deps/rabbitmq_trust_store/.gitignore @@ -1,21 +1,3 @@ -.sw? -*.orig -.*.sw? -*.beam -/.erlang.mk/ -/cover/ /debug/ -/deps/ -/doc/ -/ebin/ -/escript/ -/escript.lock -/logs/ -/plugins/ -/plugins.lock -/sbin/ -/sbin.lock - -rabbitmq_trust_store.d test/config_schema_SUITE_data/schema/ diff --git a/deps/rabbitmq_trust_store/README.md b/deps/rabbitmq_trust_store/README.md index 3a1d608bf1a6..6c36bcc9bac3 100644 --- a/deps/rabbitmq_trust_store/README.md +++ b/deps/rabbitmq_trust_store/README.md @@ -205,6 +205,6 @@ will build the plugin and put build artifacts under the `./plugins` directory. ## Copyright and License -(c) 2007-2020 VMware, Inc. or its affiliates. +(c) 2007-2024 Broadcom. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. Released under the MPL, the same license as RabbitMQ. diff --git a/deps/rabbitmq_trust_store/app.bzl b/deps/rabbitmq_trust_store/app.bzl index 4c3569a309f0..9f9c6bb21488 100644 --- a/deps/rabbitmq_trust_store/app.bzl +++ b/deps/rabbitmq_trust_store/app.bzl @@ -28,7 +28,6 @@ def all_beam_files(name = "all_beam_files"): beam = [":behaviours"], dest = "ebin", erlc_opts = "//:erlc_opts", - deps = ["//deps/rabbit_common:erlang_app"], ) def all_test_beam_files(name = "all_test_beam_files"): @@ -61,7 +60,6 @@ def all_test_beam_files(name = "all_test_beam_files"): beam = [":test_behaviours"], dest = "test", erlc_opts = "//:test_erlc_opts", - deps = ["//deps/rabbit_common:erlang_app"], ) def all_srcs(name = "all_srcs"): diff --git a/deps/rabbitmq_trust_store/priv/schema/rabbitmq_trust_store.schema b/deps/rabbitmq_trust_store/priv/schema/rabbitmq_trust_store.schema index 85fffbe7067c..d9cc4a2afa51 100644 --- a/deps/rabbitmq_trust_store/priv/schema/rabbitmq_trust_store.schema +++ b/deps/rabbitmq_trust_store/priv/schema/rabbitmq_trust_store.schema @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% {mapping, "trust_store.providers.$name", "rabbitmq_trust_store.providers", [ @@ -124,7 +124,7 @@ end}. [{datatype, {enum, [true, false]}}]}. {mapping, "trust_store.ssl_options.password", "rabbitmq_trust_store.ssl_options.password", - [{datatype, string}]}. + [{datatype, [tagged_binary, binary]}]}. {mapping, "trust_store.ssl_options.psk_identity", "rabbitmq_trust_store.ssl_options.psk_identity", [{datatype, string}]}. diff --git a/deps/rabbitmq_trust_store/src/rabbit_trust_store.erl b/deps/rabbitmq_trust_store/src/rabbit_trust_store.erl index 5106161a1d13..123d652bce6e 100644 --- a/deps/rabbitmq_trust_store/src/rabbit_trust_store.erl +++ b/deps/rabbitmq_trust_store/src/rabbit_trust_store.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_trust_store). @@ -18,7 +18,6 @@ code_change/3]). -include_lib("stdlib/include/ms_transform.hrl"). --include_lib("kernel/include/file.hrl"). -include_lib("public_key/include/public_key.hrl"). -type certificate() :: #'OTPCertificate'{}. diff --git a/deps/rabbitmq_trust_store/src/rabbit_trust_store_app.erl b/deps/rabbitmq_trust_store/src/rabbit_trust_store_app.erl index 0f596a305ec6..605a1e03c7c5 100644 --- a/deps/rabbitmq_trust_store/src/rabbit_trust_store_app.erl +++ b/deps/rabbitmq_trust_store/src/rabbit_trust_store_app.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_trust_store_app). diff --git a/deps/rabbitmq_trust_store/src/rabbit_trust_store_certificate_provider.erl b/deps/rabbitmq_trust_store/src/rabbit_trust_store_certificate_provider.erl index 4fb8c29490d0..5d79a6aac2ad 100644 --- a/deps/rabbitmq_trust_store/src/rabbit_trust_store_certificate_provider.erl +++ b/deps/rabbitmq_trust_store/src/rabbit_trust_store_certificate_provider.erl @@ -2,13 +2,11 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_trust_store_certificate_provider). --include_lib("public_key/include/public_key.hrl"). - -callback list_certs(Config) -> no_change | {ok, [{CertId, Attributes}], ProviderState} when Config :: list(), diff --git a/deps/rabbitmq_trust_store/src/rabbit_trust_store_file_provider.erl b/deps/rabbitmq_trust_store/src/rabbit_trust_store_file_provider.erl index 08a9c9e0667b..e0d8bc545d2c 100644 --- a/deps/rabbitmq_trust_store/src/rabbit_trust_store_file_provider.erl +++ b/deps/rabbitmq_trust_store/src/rabbit_trust_store_file_provider.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_trust_store_file_provider). diff --git a/deps/rabbitmq_trust_store/src/rabbit_trust_store_http_provider.erl b/deps/rabbitmq_trust_store/src/rabbit_trust_store_http_provider.erl index 2fc20e31f806..5e0aee535451 100644 --- a/deps/rabbitmq_trust_store/src/rabbit_trust_store_http_provider.erl +++ b/deps/rabbitmq_trust_store/src/rabbit_trust_store_http_provider.erl @@ -2,13 +2,11 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_trust_store_http_provider). --include_lib("public_key/include/public_key.hrl"). - -behaviour(rabbit_trust_store_certificate_provider). -define(PROFILE, ?MODULE). diff --git a/deps/rabbitmq_trust_store/src/rabbit_trust_store_sup.erl b/deps/rabbitmq_trust_store/src/rabbit_trust_store_sup.erl index 9c00cdaa8d05..eb52427cf3e4 100644 --- a/deps/rabbitmq_trust_store/src/rabbit_trust_store_sup.erl +++ b/deps/rabbitmq_trust_store/src/rabbit_trust_store_sup.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_trust_store_sup). @@ -10,9 +10,6 @@ -export([start_link/0]). -export([init/1]). --include_lib("rabbit_common/include/rabbit.hrl"). - - %% ... start_link() -> diff --git a/deps/rabbitmq_trust_store/test/config_schema_SUITE.erl b/deps/rabbitmq_trust_store/test/config_schema_SUITE.erl index 6c49ea252903..0e42aeb71d27 100644 --- a/deps/rabbitmq_trust_store/test/config_schema_SUITE.erl +++ b/deps/rabbitmq_trust_store/test/config_schema_SUITE.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(config_schema_SUITE). diff --git a/deps/rabbitmq_trust_store/test/config_schema_SUITE_data/rabbitmq_trust_store.snippets b/deps/rabbitmq_trust_store/test/config_schema_SUITE_data/rabbitmq_trust_store.snippets index d45f48ecef45..b8d7f0457e3d 100644 --- a/deps/rabbitmq_trust_store/test/config_schema_SUITE_data/rabbitmq_trust_store.snippets +++ b/deps/rabbitmq_trust_store/test/config_schema_SUITE_data/rabbitmq_trust_store.snippets @@ -24,5 +24,5 @@ {url,"https://example.com"}, {ssl_options, [{certfile,"test/config_schema_SUITE_data/certs/cert.pem"}, - {password,"i_am_password"}]}]}], + {password,<<"i_am_password">>}]}]}], [rabbitmq_trust_store]}]. diff --git a/deps/rabbitmq_trust_store/test/system_SUITE.erl b/deps/rabbitmq_trust_store/test/system_SUITE.erl index 531e0724eec2..742c438d01f2 100644 --- a/deps/rabbitmq_trust_store/test/system_SUITE.erl +++ b/deps/rabbitmq_trust_store/test/system_SUITE.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(system_SUITE). diff --git a/deps/rabbitmq_web_dispatch/.gitignore b/deps/rabbitmq_web_dispatch/.gitignore deleted file mode 100644 index ff4f97e1a487..000000000000 --- a/deps/rabbitmq_web_dispatch/.gitignore +++ /dev/null @@ -1,17 +0,0 @@ -.sw? -.*.sw? -*.beam -/.erlang.mk/ -/cover/ -/deps/ -/doc/ -/ebin/ -/escript/ -/escript.lock -/logs/ -/plugins/ -/plugins.lock -/sbin/ -/sbin.lock - -rabbitmq_web_dispatch.d diff --git a/deps/rabbitmq_web_dispatch/include/rabbitmq_web_dispatch_records.hrl b/deps/rabbitmq_web_dispatch/include/rabbitmq_web_dispatch_records.hrl index 977dd0145d25..f9c2d10d311e 100644 --- a/deps/rabbitmq_web_dispatch/include/rabbitmq_web_dispatch_records.hrl +++ b/deps/rabbitmq_web_dispatch/include/rabbitmq_web_dispatch_records.hrl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -record(context, {user, diff --git a/deps/rabbitmq_web_dispatch/src/rabbit_cowboy_middleware.erl b/deps/rabbitmq_web_dispatch/src/rabbit_cowboy_middleware.erl index 6fd19e4cb9ba..069e8087e12d 100644 --- a/deps/rabbitmq_web_dispatch/src/rabbit_cowboy_middleware.erl +++ b/deps/rabbitmq_web_dispatch/src/rabbit_cowboy_middleware.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_cowboy_middleware). diff --git a/deps/rabbitmq_web_dispatch/src/rabbit_cowboy_redirect.erl b/deps/rabbitmq_web_dispatch/src/rabbit_cowboy_redirect.erl index 95db3c8dcb08..bdcd4df009f4 100644 --- a/deps/rabbitmq_web_dispatch/src/rabbit_cowboy_redirect.erl +++ b/deps/rabbitmq_web_dispatch/src/rabbit_cowboy_redirect.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_cowboy_redirect). diff --git a/deps/rabbitmq_web_dispatch/src/rabbit_cowboy_stream_h.erl b/deps/rabbitmq_web_dispatch/src/rabbit_cowboy_stream_h.erl index 4a2c6c31c9f5..a0ce02757e40 100644 --- a/deps/rabbitmq_web_dispatch/src/rabbit_cowboy_stream_h.erl +++ b/deps/rabbitmq_web_dispatch/src/rabbit_cowboy_stream_h.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_cowboy_stream_h). diff --git a/deps/rabbitmq_web_dispatch/src/rabbit_web_dispatch.erl b/deps/rabbitmq_web_dispatch/src/rabbit_web_dispatch.erl index 5c0348d770cb..2545910a72ca 100644 --- a/deps/rabbitmq_web_dispatch/src/rabbit_web_dispatch.erl +++ b/deps/rabbitmq_web_dispatch/src/rabbit_web_dispatch.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_web_dispatch). diff --git a/deps/rabbitmq_web_dispatch/src/rabbit_web_dispatch_access_control.erl b/deps/rabbitmq_web_dispatch/src/rabbit_web_dispatch_access_control.erl index 2a610ee5e31e..f405547d82ac 100644 --- a/deps/rabbitmq_web_dispatch/src/rabbit_web_dispatch_access_control.erl +++ b/deps/rabbitmq_web_dispatch/src/rabbit_web_dispatch_access_control.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_web_dispatch_access_control). @@ -277,15 +277,8 @@ halt_response(Code, Type, Reason, ReqData, Context) -> rabbit_json:encode(Json), ReqData), {stop, ReqData1, Context}. -not_authenticated(Reason, ReqData, Context, - #auth_settings{auth_realm = AuthRealm} = AuthConfig) -> - case is_oauth2_enabled(AuthConfig) of - false -> - ReqData1 = cowboy_req:set_resp_header(<<"www-authenticate">>, AuthRealm, ReqData), - halt_response(401, not_authorized, Reason, ReqData1, Context); - true -> - halt_response(401, not_authorized, Reason, ReqData, Context) - end. +not_authenticated(Reason, ReqData, Context, _AuthConfig) -> + halt_response(401, not_authorized, Reason, ReqData, Context). format_reason(Tuple) when is_tuple(Tuple) -> tuple(Tuple); @@ -366,6 +359,3 @@ log_access_control_result(NotOK) -> is_basic_auth_disabled(#auth_settings{basic_auth_enabled = Enabled}) -> not Enabled. - -is_oauth2_enabled(#auth_settings{oauth2_enabled = Enabled}) -> - Enabled. diff --git a/deps/rabbitmq_web_dispatch/src/rabbit_web_dispatch_app.erl b/deps/rabbitmq_web_dispatch/src/rabbit_web_dispatch_app.erl index c66ff7e0cd73..bd8516624feb 100644 --- a/deps/rabbitmq_web_dispatch/src/rabbit_web_dispatch_app.erl +++ b/deps/rabbitmq_web_dispatch/src/rabbit_web_dispatch_app.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_web_dispatch_app). diff --git a/deps/rabbitmq_web_dispatch/src/rabbit_web_dispatch_listing_handler.erl b/deps/rabbitmq_web_dispatch/src/rabbit_web_dispatch_listing_handler.erl index fda1d08a276b..3d7af4ef36d9 100644 --- a/deps/rabbitmq_web_dispatch/src/rabbit_web_dispatch_listing_handler.erl +++ b/deps/rabbitmq_web_dispatch/src/rabbit_web_dispatch_listing_handler.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_web_dispatch_listing_handler). diff --git a/deps/rabbitmq_web_dispatch/src/rabbit_web_dispatch_registry.erl b/deps/rabbitmq_web_dispatch/src/rabbit_web_dispatch_registry.erl index 76f597e71c06..e4777160e056 100644 --- a/deps/rabbitmq_web_dispatch/src/rabbit_web_dispatch_registry.erl +++ b/deps/rabbitmq_web_dispatch/src/rabbit_web_dispatch_registry.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_web_dispatch_registry). diff --git a/deps/rabbitmq_web_dispatch/src/rabbit_web_dispatch_sup.erl b/deps/rabbitmq_web_dispatch/src/rabbit_web_dispatch_sup.erl index 940a06b1642b..701ad1a69249 100644 --- a/deps/rabbitmq_web_dispatch/src/rabbit_web_dispatch_sup.erl +++ b/deps/rabbitmq_web_dispatch/src/rabbit_web_dispatch_sup.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_web_dispatch_sup). @@ -30,8 +30,7 @@ ensure_listener(Listener) -> {Transport, TransportOpts, ProtoOpts} = preprocess_config(Listener), ProtoOptsMap = maps:from_list(ProtoOpts), StreamHandlers = stream_handlers_config(ProtoOpts), - rabbit_log:debug("Starting HTTP[S] listener with transport ~ts, options ~tp and protocol options ~tp, stream handlers ~tp", - [Transport, TransportOpts, ProtoOptsMap, StreamHandlers]), + rabbit_log:debug("Starting HTTP[S] listener with transport ~ts", [Transport]), CowboyOptsMap = maps:merge(#{env => #{rabbit_listener => Listener}, @@ -72,12 +71,9 @@ init([]) -> preprocess_config(Options) -> case proplists:get_value(ssl, Options) of true -> _ = rabbit_networking:ensure_ssl(), - case rabbit_networking:poodle_check('HTTP') of - ok -> case proplists:get_value(ssl_opts, Options) of - undefined -> auto_ssl(Options); - _ -> fix_ssl(Options) - end; - danger -> {ranch_tcp, transport_config(Options), protocol_config(Options)} + case proplists:get_value(ssl_opts, Options) of + undefined -> auto_ssl(Options); + _ -> fix_ssl(Options) end; _ -> {ranch_tcp, transport_config(Options), protocol_config(Options)} end. diff --git a/deps/rabbitmq_web_dispatch/src/rabbit_web_dispatch_util.erl b/deps/rabbitmq_web_dispatch/src/rabbit_web_dispatch_util.erl index 9295fad8d2a0..7a551bb613d6 100644 --- a/deps/rabbitmq_web_dispatch/src/rabbit_web_dispatch_util.erl +++ b/deps/rabbitmq_web_dispatch/src/rabbit_web_dispatch_util.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_web_dispatch_util). diff --git a/deps/rabbitmq_web_dispatch/src/webmachine_log_handler.erl b/deps/rabbitmq_web_dispatch/src/webmachine_log_handler.erl index 4d539c7f0ed0..0f4ca6d5c9ab 100644 --- a/deps/rabbitmq_web_dispatch/src/webmachine_log_handler.erl +++ b/deps/rabbitmq_web_dispatch/src/webmachine_log_handler.erl @@ -28,8 +28,6 @@ terminate/2, code_change/3]). --include("webmachine_logger.hrl"). - -ifdef(TEST). -include_lib("eunit/include/eunit.hrl"). -endif. diff --git a/deps/rabbitmq_web_dispatch/test/rabbit_web_dispatch_SUITE.erl b/deps/rabbitmq_web_dispatch/test/rabbit_web_dispatch_SUITE.erl index f31e9953c35a..50a2b171a405 100644 --- a/deps/rabbitmq_web_dispatch/test/rabbit_web_dispatch_SUITE.erl +++ b/deps/rabbitmq_web_dispatch/test/rabbit_web_dispatch_SUITE.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_web_dispatch_SUITE). @@ -10,7 +10,6 @@ -compile(export_all). --include_lib("common_test/include/ct.hrl"). -include_lib("eunit/include/eunit.hrl"). all() -> diff --git a/deps/rabbitmq_web_dispatch/test/rabbit_web_dispatch_unit_SUITE.erl b/deps/rabbitmq_web_dispatch/test/rabbit_web_dispatch_unit_SUITE.erl index c24a55857049..7c2fb9c69e01 100644 --- a/deps/rabbitmq_web_dispatch/test/rabbit_web_dispatch_unit_SUITE.erl +++ b/deps/rabbitmq_web_dispatch/test/rabbit_web_dispatch_unit_SUITE.erl @@ -2,14 +2,13 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_web_dispatch_unit_SUITE). -compile(export_all). --include_lib("common_test/include/ct.hrl"). -include_lib("eunit/include/eunit.hrl"). all() -> diff --git a/deps/rabbitmq_web_mqtt/.gitignore b/deps/rabbitmq_web_mqtt/.gitignore index 05dc56dccf48..0595211a7ee4 100644 --- a/deps/rabbitmq_web_mqtt/.gitignore +++ b/deps/rabbitmq_web_mqtt/.gitignore @@ -1,17 +1 @@ -*.swp -/.erlang.mk/ -/cover/ -/deps/ -/ebin/ -/escript/ -/escript.lock -/logs/ -/plugins/ -/plugins.lock -/sbin/ -/sbin.lock -/test/*.beam - -/rabbitmq_web_mqtt.d - test/config_schema_SUITE_data/schema/ diff --git a/deps/rabbitmq_web_mqtt/BUILD.bazel b/deps/rabbitmq_web_mqtt/BUILD.bazel index 9206e1812792..f9561e14ffaf 100644 --- a/deps/rabbitmq_web_mqtt/BUILD.bazel +++ b/deps/rabbitmq_web_mqtt/BUILD.bazel @@ -68,6 +68,9 @@ rabbitmq_app( xref( name = "xref", + additional_libs = [ + "//deps/rabbitmq_cli:erlang_app", # keep + ], target = ":erlang_app", ) @@ -75,8 +78,9 @@ plt( name = "deps_plt", for_target = ":erlang_app", ignore_warnings = True, - libs = ["//deps/rabbitmq_cli:elixir"], # keep + libs = ["@rules_elixir//elixir"], # keep plt = "//:base_plt", + deps = ["//deps/rabbitmq_cli:erlang_app"], # keep ) dialyze( @@ -91,6 +95,7 @@ eunit( compiled_suites = [ ":test_src_rabbit_ws_test_util_beam", ":test_src_rfc6455_client_beam", + ":test_rabbit_web_mqtt_test_util_beam", ], target = ":test_erlang_app", ) @@ -101,6 +106,16 @@ rabbitmq_integration_suite( name = "config_schema_SUITE", ) +rabbitmq_integration_suite( + name = "command_SUITE", + additional_beam = [ + "test/rabbit_web_mqtt_test_util.beam", + ], + runtime_deps = [ + "@emqtt//:erlang_app", + ], +) + rabbitmq_integration_suite( name = "proxy_protocol_SUITE", additional_beam = [ diff --git a/deps/rabbitmq_web_mqtt/Makefile b/deps/rabbitmq_web_mqtt/Makefile index 899aadb9309e..9919e7cb82cd 100644 --- a/deps/rabbitmq_web_mqtt/Makefile +++ b/deps/rabbitmq_web_mqtt/Makefile @@ -21,11 +21,13 @@ LOCAL_DEPS = ssl DEPS = rabbit_common rabbit cowboy rabbitmq_mqtt TEST_DEPS = emqtt rabbitmq_ct_helpers rabbitmq_ct_client_helpers rabbitmq_management +PLT_APPS += rabbitmqctl elixir cowlib + # FIXME: Add Ranch as a BUILD_DEPS to be sure the correct version is picked. # See rabbitmq-components.mk. BUILD_DEPS += ranch -dep_emqtt = git https://github.com/rabbitmq/emqtt.git master +dep_emqtt = git https://github.com/emqx/emqtt.git 1.11.0 DEP_EARLY_PLUGINS = rabbit_common/mk/rabbitmq-early-plugin.mk DEP_PLUGINS = rabbit_common/mk/rabbitmq-plugin.mk diff --git a/deps/rabbitmq_web_mqtt/README.md b/deps/rabbitmq_web_mqtt/README.md index accf14206abd..f5eb6e9ca43c 100644 --- a/deps/rabbitmq_web_mqtt/README.md +++ b/deps/rabbitmq_web_mqtt/README.md @@ -28,6 +28,6 @@ when building plugins from source. ## Copyright and License -(c) 2007-2020 VMware, Inc. or its affiliates. +(c) 2007-2024 Broadcom. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. Released under the same license as RabbitMQ. See LICENSE for details. diff --git a/deps/rabbitmq_web_mqtt/app.bzl b/deps/rabbitmq_web_mqtt/app.bzl index 6f54f7543425..17ab4ecacb84 100644 --- a/deps/rabbitmq_web_mqtt/app.bzl +++ b/deps/rabbitmq_web_mqtt/app.bzl @@ -9,6 +9,7 @@ def all_beam_files(name = "all_beam_files"): erlang_bytecode( name = "other_beam", srcs = [ + "src/Elixir.RabbitMQ.CLI.Ctl.Commands.ListWebMqttConnectionsCommand.erl", "src/rabbit_web_mqtt_app.erl", "src/rabbit_web_mqtt_handler.erl", "src/rabbit_web_mqtt_stream_handler.erl", @@ -19,6 +20,7 @@ def all_beam_files(name = "all_beam_files"): erlc_opts = "//:erlc_opts", deps = [ "//deps/rabbit_common:erlang_app", + "//deps/rabbitmq_cli:erlang_app", "//deps/rabbitmq_mqtt:erlang_app", "@cowboy//:erlang_app", ], @@ -34,6 +36,7 @@ def all_test_beam_files(name = "all_test_beam_files"): name = "test_other_beam", testonly = True, srcs = [ + "src/Elixir.RabbitMQ.CLI.Ctl.Commands.ListWebMqttConnectionsCommand.erl", "src/rabbit_web_mqtt_app.erl", "src/rabbit_web_mqtt_handler.erl", "src/rabbit_web_mqtt_stream_handler.erl", @@ -44,6 +47,7 @@ def all_test_beam_files(name = "all_test_beam_files"): erlc_opts = "//:test_erlc_opts", deps = [ "//deps/rabbit_common:erlang_app", + "//deps/rabbitmq_cli:erlang_app", "//deps/rabbitmq_mqtt:erlang_app", "@cowboy//:erlang_app", ], @@ -70,6 +74,7 @@ def all_srcs(name = "all_srcs"): filegroup( name = "srcs", srcs = [ + "src/Elixir.RabbitMQ.CLI.Ctl.Commands.ListWebMqttConnectionsCommand.erl", "src/rabbit_web_mqtt_app.erl", "src/rabbit_web_mqtt_handler.erl", "src/rabbit_web_mqtt_stream_handler.erl", @@ -128,3 +133,20 @@ def test_suite_beam_files(name = "test_suite_beam_files"): app_name = "rabbitmq_web_mqtt", erlc_opts = "//:test_erlc_opts", ) + erlang_bytecode( + name = "command_SUITE_beam_files", + testonly = True, + srcs = ["test/command_SUITE.erl"], + outs = ["test/command_SUITE.beam"], + app_name = "rabbitmq_web_mqtt", + erlc_opts = "//:test_erlc_opts", + deps = ["//deps/amqp_client:erlang_app", "//deps/rabbitmq_mqtt:erlang_app"], + ) + erlang_bytecode( + name = "test_rabbit_web_mqtt_test_util_beam", + testonly = True, + srcs = ["test/rabbit_web_mqtt_test_util.erl"], + outs = ["test/rabbit_web_mqtt_test_util.beam"], + app_name = "rabbitmq_web_mqtt", + erlc_opts = "//:test_erlc_opts", + ) diff --git a/deps/rabbitmq_web_mqtt/priv/schema/rabbitmq_web_mqtt.schema b/deps/rabbitmq_web_mqtt/priv/schema/rabbitmq_web_mqtt.schema index a69c1a208903..e4afd579d4b7 100644 --- a/deps/rabbitmq_web_mqtt/priv/schema/rabbitmq_web_mqtt.schema +++ b/deps/rabbitmq_web_mqtt/priv/schema/rabbitmq_web_mqtt.schema @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% {mapping, "web_mqtt.num_acceptors.tcp", "rabbitmq_web_mqtt.num_tcp_acceptors", @@ -56,7 +56,7 @@ {mapping, "web_mqtt.ssl.cacertfile", "rabbitmq_web_mqtt.ssl_config.cacertfile", [{datatype, string}, {validators, ["file_accessible"]}]}. {mapping, "web_mqtt.ssl.password", "rabbitmq_web_mqtt.ssl_config.password", - [{datatype, string}]}. + [{datatype, [tagged_binary, binary]}]}. {translation, "rabbitmq_web_mqtt.ssl_config", diff --git a/deps/rabbitmq_amqp1_0/src/Elixir.RabbitMQ.CLI.Ctl.Commands.ListAmqp10ConnectionsCommand.erl b/deps/rabbitmq_web_mqtt/src/Elixir.RabbitMQ.CLI.Ctl.Commands.ListWebMqttConnectionsCommand.erl similarity index 67% rename from deps/rabbitmq_amqp1_0/src/Elixir.RabbitMQ.CLI.Ctl.Commands.ListAmqp10ConnectionsCommand.erl rename to deps/rabbitmq_web_mqtt/src/Elixir.RabbitMQ.CLI.Ctl.Commands.ListWebMqttConnectionsCommand.erl index 08657366e2ae..cde3b937dd0f 100644 --- a/deps/rabbitmq_amqp1_0/src/Elixir.RabbitMQ.CLI.Ctl.Commands.ListAmqp10ConnectionsCommand.erl +++ b/deps/rabbitmq_web_mqtt/src/Elixir.RabbitMQ.CLI.Ctl.Commands.ListWebMqttConnectionsCommand.erl @@ -2,12 +2,13 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. --module('Elixir.RabbitMQ.CLI.Ctl.Commands.ListAmqp10ConnectionsCommand'). +-module('Elixir.RabbitMQ.CLI.Ctl.Commands.ListWebMqttConnectionsCommand'). + +-include_lib("rabbitmq_mqtt/include/rabbit_mqtt.hrl"). -behaviour('Elixir.RabbitMQ.CLI.CommandBehaviour'). --include("rabbit_amqp1_0.hrl"). -export([formatter/0, scopes/0, @@ -15,34 +16,40 @@ aliases/0, usage/0, usage_additional/0, + usage_doc_guides/0, banner/2, validate/2, merge_defaults/2, run/2, output/2, - help_section/0, - description/0]). + description/0, + help_section/0]). formatter() -> 'Elixir.RabbitMQ.CLI.Formatters.Table'. scopes() -> [ctl, diagnostics]. switches() -> [{verbose, boolean}]. aliases() -> [{'V', verbose}]. +description() -> <<"Lists all Web MQTT connections">>. + +help_section() -> + {plugin, web_mqtt}. + validate(Args, _) -> - ValidKeys = lists:map(fun atom_to_list/1, ?INFO_ITEMS), + InfoItems = lists:map(fun atom_to_list/1, ?INFO_ITEMS), case 'Elixir.RabbitMQ.CLI.Ctl.InfoKeys':validate_info_keys(Args, - ValidKeys) of + InfoItems) of {ok, _} -> ok; Error -> Error end. merge_defaults([], Opts) -> - merge_defaults([<<"pid">>], Opts); + merge_defaults([<<"client_id">>, <<"conn_name">>], Opts); merge_defaults(Args, Opts) -> {Args, maps:merge(#{verbose => false}, Opts)}. usage() -> - <<"list_amqp10_connections [ ...]">>. + <<"list_web_mqtt_connections [ ...]">>. usage_additional() -> Prefix = <<" must be one of ">>, @@ -51,10 +58,8 @@ usage_additional() -> {<<"">>, <>} ]. -description() -> <<"Lists AMQP 1.0 connections on the target node">>. - -help_section() -> - {plugin, 'amqp1.0'}. +usage_doc_guides() -> + [<<"https://rabbitmq.com/docs/web-mqtt">>]. run(Args, #{node := NodeName, timeout := Timeout, @@ -63,18 +68,19 @@ run(Args, #{node := NodeName, true -> ?INFO_ITEMS; false -> 'Elixir.RabbitMQ.CLI.Ctl.InfoKeys':prepare_info_keys(Args) end, + Nodes = 'Elixir.RabbitMQ.CLI.Core.Helpers':nodes_in_cluster(NodeName), 'Elixir.RabbitMQ.CLI.Ctl.RpcStream':receive_list_items( NodeName, - rabbit_amqp1_0, + rabbit_web_mqtt_app, emit_connection_info_all, [Nodes, InfoKeys], Timeout, InfoKeys, length(Nodes)). -banner(_, _) -> <<"Listing AMQP 1.0 connections ...">>. +banner(_, _) -> <<"Listing Web MQTT connections ...">>. output(Result, _Opts) -> 'Elixir.RabbitMQ.CLI.DefaultOutput':output(Result). diff --git a/deps/rabbitmq_web_mqtt/src/rabbit_web_mqtt_app.erl b/deps/rabbitmq_web_mqtt/src/rabbit_web_mqtt_app.erl index 67fb31b05cb4..fc6424ffae4f 100644 --- a/deps/rabbitmq_web_mqtt/src/rabbit_web_mqtt_app.erl +++ b/deps/rabbitmq_web_mqtt/src/rabbit_web_mqtt_app.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_web_mqtt_app). @@ -12,7 +12,9 @@ start/2, prep_stop/1, stop/1, - list_connections/0 + list_connections/0, + emit_connection_info_all/4, + emit_connection_info_local/3 ]). %% Dummy supervisor - see Ulf Wiger's comment at @@ -48,27 +50,33 @@ init([]) -> {ok, {{one_for_one, 1, 5}, []}}. -spec list_connections() -> [pid()]. list_connections() -> - PlainPids = connection_pids_of_protocol(?TCP_PROTOCOL), - TLSPids = connection_pids_of_protocol(?TLS_PROTOCOL), + PlainPids = rabbit_networking:list_local_connections_of_protocol(?TCP_PROTOCOL), + TLSPids = rabbit_networking:list_local_connections_of_protocol(?TLS_PROTOCOL), PlainPids ++ TLSPids. +-spec emit_connection_info_all([node()], rabbit_types:info_keys(), reference(), pid()) -> term(). +emit_connection_info_all(Nodes, Items, Ref, AggregatorPid) -> + Pids = [spawn_link(Node, ?MODULE, emit_connection_info_local, + [Items, Ref, AggregatorPid]) + || Node <- Nodes], + + rabbit_control_misc:await_emitters_termination(Pids). + +-spec emit_connection_info_local(rabbit_types:info_keys(), reference(), pid()) -> ok. +emit_connection_info_local(Items, Ref, AggregatorPid) -> + LocalPids = list_connections(), + emit_connection_info(Items, Ref, AggregatorPid, LocalPids). + +emit_connection_info(Items, Ref, AggregatorPid, Pids) -> + rabbit_control_misc:emitting_map_with_exit_handler( + AggregatorPid, Ref, + fun(Pid) -> + rabbit_web_mqtt_handler:info(Pid, Items) + end, Pids). %% %% Implementation %% -connection_pids_of_protocol(Protocol) -> - case rabbit_networking:ranch_ref_of_protocol(Protocol) of - undefined -> []; - AcceptorRef -> - lists:map(fun cowboy_ws_connection_pid/1, ranch:procs(AcceptorRef, connections)) - end. - --spec cowboy_ws_connection_pid(pid()) -> pid(). -cowboy_ws_connection_pid(RanchConnPid) -> - Children = supervisor:which_children(RanchConnPid), - {cowboy_clear, Pid, _, _} = lists:keyfind(cowboy_clear, 1, Children), - Pid. - mqtt_init() -> CowboyOpts0 = maps:from_list(get_env(cowboy_opts, [])), CowboyWsOpts = maps:from_list(get_env(cowboy_ws_opts, [])), diff --git a/deps/rabbitmq_web_mqtt/src/rabbit_web_mqtt_handler.erl b/deps/rabbitmq_web_mqtt/src/rabbit_web_mqtt_handler.erl index ac430021bcd0..67e99400b500 100644 --- a/deps/rabbitmq_web_mqtt/src/rabbit_web_mqtt_handler.erl +++ b/deps/rabbitmq_web_mqtt/src/rabbit_web_mqtt_handler.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_web_mqtt_handler). @@ -23,6 +23,7 @@ ]). -export([conserve_resources/3]). +-export([info/2]). %% cowboy_sub_protocol -export([upgrade/4, @@ -40,8 +41,7 @@ conserve = false :: boolean(), stats_timer :: option(rabbit_event:state()), keepalive = rabbit_mqtt_keepalive:init() :: rabbit_mqtt_keepalive:state(), - conn_name :: option(binary()), - should_use_fhc :: rabbit_types:option(boolean()) + conn_name :: option(binary()) }). -type state() :: #state{}. @@ -75,37 +75,38 @@ init(Req, Opts) -> undefined -> no_supported_sub_protocol(undefined, Req); Protocol -> - WsOpts0 = proplists:get_value(ws_opts, Opts, #{}), - WsOpts = maps:merge(#{compress => true}, WsOpts0), case lists:member(<<"mqtt">>, Protocol) of false -> no_supported_sub_protocol(Protocol, Req); true -> - ShouldUseFHC = application:get_env(?APP, use_file_handle_cache, true), - case ShouldUseFHC of - true -> ?LOG_INFO("Web MQTT: file handle cache use is enabled"); - false -> ?LOG_INFO("Web MQTT: file handle cache use is disabled") - end, + WsOpts0 = proplists:get_value(ws_opts, Opts, #{}), + WsOpts = maps:merge(#{compress => true}, WsOpts0), {?MODULE, cowboy_req:set_resp_header(<<"sec-websocket-protocol">>, <<"mqtt">>, Req), - #state{socket = maps:get(proxy_header, Req, undefined), should_use_fhc = ShouldUseFHC}, + #state{socket = maps:get(proxy_header, Req, undefined)}, WsOpts} end end. +%% We cannot use a gen_server call, because the handler process is a +%% special cowboy_websocket process (not a gen_server) which assumes +%% all gen_server calls are supervisor calls, and does not pass on the +%% request to this callback module. (see cowboy_websocket:loop/3 and +%% cowboy_children:handle_supervisor_call/4) However using a generic +%% gen:call with a special label ?MODULE works fine. +-spec info(pid(), rabbit_types:info_keys()) -> + rabbit_types:infos(). +info(Pid, all) -> + info(Pid, ?INFO_ITEMS); +info(Pid, Items) -> + {ok, Res} = gen:call(Pid, ?MODULE, {info, Items}), + Res. -spec websocket_init(state()) -> {cowboy_websocket:commands(), state()} | {cowboy_websocket:commands(), state(), hibernate}. -websocket_init(State0 = #state{socket = Sock, should_use_fhc = ShouldUseFHC}) -> +websocket_init(State0 = #state{socket = Sock}) -> logger:set_process_metadata(#{domain => ?RMQLOG_DOMAIN_CONN ++ [web_mqtt]}), - case ShouldUseFHC of - true -> - ok = file_handle_cache:obtain(); - false -> ok; - undefined -> - ok = file_handle_cache:obtain() - end, case rabbit_net:connection_string(Sock, inbound) of {ok, ConnStr} -> ConnName = rabbit_data_coercion:to_binary(ConnStr), @@ -167,10 +168,6 @@ websocket_info({'$gen_cast', QueueEvent = {queue_event, _, _}}, [State#state.conn_name, Reason]), stop(State#state{proc_state = PState}) end; -websocket_info({'$gen_cast', duplicate_id}, State) -> - %% Delete this backward compatibility clause when feature flag - %% delete_ra_cluster_mqtt_node becomes required. - websocket_info({'$gen_cast', {duplicate_id, true}}, State); websocket_info({'$gen_cast', {duplicate_id, SendWill}}, State = #state{proc_state = ProcState, conn_name = ConnName}) -> @@ -179,8 +176,9 @@ websocket_info({'$gen_cast', {duplicate_id, SendWill}}, rabbit_mqtt_processor:send_disconnect(?RC_SESSION_TAKEN_OVER, ProcState), defer_close(?CLOSE_NORMAL, SendWill), {[], State}; -websocket_info({'$gen_cast', {close_connection, Reason}}, State = #state{proc_state = ProcState, - conn_name = ConnName}) -> +websocket_info({'$gen_cast', {close_connection, Reason}}, + State = #state{proc_state = ProcState, + conn_name = ConnName}) -> ?LOG_WARNING("Web MQTT disconnecting client with ID '~s' (~p), reason: ~s", [rabbit_mqtt_processor:info(client_id, ProcState), ConnName, Reason]), case Reason of @@ -218,12 +216,16 @@ websocket_info({keepalive, Req}, State = #state{proc_state = ProcState, [ConnName, Reason]), stop(State) end; +websocket_info(credential_expired, + State = #state{proc_state = ProcState, + conn_name = ConnName}) -> + ?LOG_WARNING("Web MQTT disconnecting client with ID '~s' (~p) because credential expired", + [rabbit_mqtt_processor:info(client_id, ProcState), ConnName]), + rabbit_mqtt_processor:send_disconnect(?RC_MAXIMUM_CONNECT_TIME, ProcState), + defer_close(?CLOSE_NORMAL), + {[], State}; websocket_info(emit_stats, State) -> {[], emit_stats(State), hibernate}; -websocket_info({ra_event, _From, Evt}, - #state{proc_state = PState0} = State) -> - PState = rabbit_mqtt_processor:handle_ra_event(Evt, PState0), - {[], State#state{proc_state = PState}, hibernate}; websocket_info({{'DOWN', _QName}, _MRef, process, _Pid, _Reason} = Evt, State = #state{proc_state = PState0}) -> case rabbit_mqtt_processor:handle_down(Evt, PState0) of @@ -244,6 +246,10 @@ websocket_info(connection_created, State) -> rabbit_core_metrics:connection_created(self(), Infos), rabbit_event:notify(connection_created, Infos), {[], State, hibernate}; +websocket_info({?MODULE, From, {info, Items}}, State) -> + Infos = infos(Items, State), + gen:reply(From, Infos), + {[], State, hibernate}; websocket_info(Msg, State) -> ?LOG_WARNING("Web MQTT: unexpected message ~tp", [Msg]), {[], State, hibernate}. @@ -253,18 +259,10 @@ terminate(Reason, Request, #state{} = State) -> terminate(_Reason, _Request, {SendWill, #state{conn_name = ConnName, proc_state = PState, - keepalive = KState, - should_use_fhc = ShouldUseFHC} = State}) -> + keepalive = KState} = State}) -> ?LOG_INFO("Web MQTT closing connection ~ts", [ConnName]), maybe_emit_stats(State), _ = rabbit_mqtt_keepalive:cancel_timer(KState), - case ShouldUseFHC of - true -> - ok = file_handle_cache:release(); - false -> ok; - undefined -> - ok = file_handle_cache:release() - end, case PState of connect_packet_unprocessed -> ok; @@ -278,7 +276,9 @@ terminate(_Reason, _Request, no_supported_sub_protocol(Protocol, Req) -> %% The client MUST include “mqtt” in the list of WebSocket Sub Protocols it offers [MQTT-6.0.0-3]. ?LOG_ERROR("Web MQTT: 'mqtt' not included in client offered subprotocols: ~tp", [Protocol]), - {ok, cowboy_req:reply(400, #{<<"connection">> => <<"close">>}, Req), #state{}}. + {ok, + cowboy_req:reply(400, #{<<"connection">> => <<"close">>}, Req), + #state{}}. handle_data(Data, State0 = #state{}) -> case handle_data1(Data, State0) of diff --git a/deps/rabbitmq_web_mqtt/src/rabbit_web_mqtt_stream_handler.erl b/deps/rabbitmq_web_mqtt/src/rabbit_web_mqtt_stream_handler.erl index 68d6ad2f2987..ed2f83486781 100644 --- a/deps/rabbitmq_web_mqtt/src/rabbit_web_mqtt_stream_handler.erl +++ b/deps/rabbitmq_web_mqtt/src/rabbit_web_mqtt_stream_handler.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_web_mqtt_stream_handler). diff --git a/deps/rabbitmq_web_mqtt/test/command_SUITE.erl b/deps/rabbitmq_web_mqtt/test/command_SUITE.erl new file mode 100644 index 000000000000..c526d8c4f217 --- /dev/null +++ b/deps/rabbitmq_web_mqtt/test/command_SUITE.erl @@ -0,0 +1,176 @@ +%% This Source Code Form is subject to the terms of the Mozilla Public +%% License, v. 2.0. If a copy of the MPL was not distributed with this +%% file, You can obtain one at https://mozilla.org/MPL/2.0/. +%% +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. + + +-module(command_SUITE). +-compile([export_all, nowarn_export_all]). + +-include_lib("eunit/include/eunit.hrl"). +-include_lib("amqp_client/include/amqp_client.hrl"). +-include_lib("rabbitmq_mqtt/include/rabbit_mqtt.hrl"). + +-import(rabbit_web_mqtt_test_util, + [connect/3, connect/4]). + +-define(COMMAND, 'Elixir.RabbitMQ.CLI.Ctl.Commands.ListWebMqttConnectionsCommand'). + +all() -> + [ + {group, unit}, + {group, v5} + ]. + +groups() -> + [ + {unit, [], [merge_defaults]}, + {v5, [], [run, + user_property]} + ]. + +suite() -> + [ + {timetrap, {minutes, 10}} + ]. + +init_per_suite(Config) -> + rabbit_ct_helpers:log_environment(), + Config1 = rabbit_ct_helpers:set_config(Config, [ + {rmq_nodename_suffix, ?MODULE}, + {rmq_extra_tcp_ports, [tcp_port_mqtt_extra, + tcp_port_mqtt_tls_extra]}, + {rmq_nodes_clustered, true}, + {rmq_nodes_count, 3} + ]), + rabbit_ct_helpers:run_setup_steps(Config1, + rabbit_ct_broker_helpers:setup_steps() ++ + rabbit_ct_client_helpers:setup_steps()). + +end_per_suite(Config) -> + rabbit_ct_helpers:run_teardown_steps(Config, + rabbit_ct_client_helpers:teardown_steps() ++ + rabbit_ct_broker_helpers:teardown_steps()). + +init_per_group(unit, Config) -> + Config; +init_per_group(v5 = V5, Config) -> + rabbit_ct_helpers:set_config(Config, {mqtt_version, V5}). + +end_per_group(_, Config) -> + Config. + +init_per_testcase(Testcase, Config) -> + rabbit_ct_helpers:testcase_started(Config, Testcase). + +end_per_testcase(Testcase, Config) -> + rabbit_ct_helpers:testcase_finished(Config, Testcase). + +merge_defaults(_Config) -> + {[<<"client_id">>, <<"conn_name">>], #{verbose := false}} = + ?COMMAND:merge_defaults([], #{}), + + {[<<"other_key">>], #{verbose := true}} = + ?COMMAND:merge_defaults([<<"other_key">>], #{verbose => true}), + + {[<<"other_key">>], #{verbose := false}} = + ?COMMAND:merge_defaults([<<"other_key">>], #{verbose => false}). + + +run(BaseConfig) -> + Node = rabbit_ct_broker_helpers:get_node_config(BaseConfig, 0, nodename), + Config = [{websocket, true} | BaseConfig], + Opts = #{node => Node, timeout => 10_000, verbose => false}, + %% No connections + [] = 'Elixir.Enum':to_list(?COMMAND:run([], Opts)), + + %% Open an MQTT connection + C1 = connect(<<"simpleMqttClient">>, BaseConfig, [{ack_timeout, 1}]), + timer:sleep(200), + + %% No connections for MQTT-over-WebSockets, C1 is an MQTT connection + [] = 'Elixir.Enum':to_list(?COMMAND:run([<<"client_id">>], Opts)), + + %% Open a WebMQTT connection + + C2 = connect(<<"simpleWebMqttClient">>, Config, [{ack_timeout, 1}]), + timer:sleep(200), + + [[{client_id, <<"simpleWebMqttClient">>}]] = + 'Elixir.Enum':to_list(?COMMAND:run([<<"client_id">>], Opts)), + + C3 = connect(<<"simpleWebMqttClient1">>, Config, [{ack_timeout, 1}]), + timer:sleep(200), + + [[{client_id, <<"simpleWebMqttClient">>}, {user, <<"guest">>}], + [{client_id, <<"simpleWebMqttClient1">>}, {user, <<"guest">>}]] = + lists:sort( + 'Elixir.Enum':to_list( + ?COMMAND:run([<<"client_id">>, <<"user">>], Opts))), + + Port = rabbit_ct_broker_helpers:get_node_config(Config, 0, tcp_port_amqp), + start_amqp_connection(network, Node, Port), + + %% There are still just two Web MQTT connections + [[{client_id, <<"simpleWebMqttClient">>}], + [{client_id, <<"simpleWebMqttClient1">>}]] = + lists:sort('Elixir.Enum':to_list(?COMMAND:run([<<"client_id">>], Opts))), + + start_amqp_connection(direct, Node, Port), + timer:sleep(200), + + %% Still two Web MQTT connections + [[{client_id, <<"simpleWebMqttClient">>}], + [{client_id, <<"simpleWebMqttClient1">>}]] = + lists:sort('Elixir.Enum':to_list(?COMMAND:run([<<"client_id">>], Opts))), + + %% Verbose returns all keys + AllKeys = lists:map(fun(I) -> atom_to_binary(I) end, ?INFO_ITEMS), + [AllInfos1Con1, _AllInfos1Con2] = + 'Elixir.Enum':to_list(?COMMAND:run(AllKeys, Opts)), + [AllInfos2Con1, _AllInfos2Con2] = 'Elixir.Enum':to_list(?COMMAND:run([], Opts#{verbose => true})), + + %% Keys are INFO_ITEMS + InfoItemsSorted = lists:sort(?INFO_ITEMS), + ?assertEqual(InfoItemsSorted, lists:sort(proplists:get_keys(AllInfos1Con1))), + ?assertEqual(InfoItemsSorted, lists:sort(proplists:get_keys(AllInfos2Con1))), + + %% List Web MQTT connections from all nodes + C4 = connect(<<"simpleWebMqttClient2">>, Config, 1, [{ack_timeout, 1}]), + rabbit_ct_helpers:eventually( + ?_assertEqual( + [[{client_id, <<"simpleWebMqttClient">>}], + [{client_id, <<"simpleWebMqttClient1">>}], + [{client_id, <<"simpleWebMqttClient2">>}]], + lists:sort('Elixir.Enum':to_list(?COMMAND:run([<<"client_id">>], Opts))))), + + ok = emqtt:disconnect(C1), + ok = emqtt:disconnect(C2), + ok = emqtt:disconnect(C3), + ok = emqtt:disconnect(C4). + +user_property(BaseConfig) -> + Node = rabbit_ct_broker_helpers:get_node_config(BaseConfig, 0, nodename), + Config = [{websocket, true} | BaseConfig], + Opts = #{node => Node, timeout => 10_000, verbose => false}, + ClientId = <<"my-client">>, + UserProp = [{<<"name 1">>, <<"value 1">>}, + {<<"name 2">>, <<"value 2">>}, + %% "The same name is allowed to appear more than once." [v5 3.1.2.11.8] + {<<"name 2">>, <<"value 3">>}], + C = connect(ClientId, Config, 1, [{properties, #{'User-Property' => UserProp}}]), + rabbit_ct_helpers:eventually( + ?_assertEqual( + [[{client_id, ClientId}, + {user_property, UserProp}]], + 'Elixir.Enum':to_list(?COMMAND:run([<<"client_id">>, <<"user_property">>], Opts)))), + ok = emqtt:disconnect(C). + +start_amqp_connection(Type, Node, Port) -> + amqp_connection:start(amqp_params(Type, Node, Port)). + +amqp_params(network, _, Port) -> + #amqp_params_network{port = Port}; +amqp_params(direct, Node, _) -> + #amqp_params_direct{node = Node}. diff --git a/deps/rabbitmq_web_mqtt/test/config_schema_SUITE.erl b/deps/rabbitmq_web_mqtt/test/config_schema_SUITE.erl index ec256f442188..694d7ea5a25a 100644 --- a/deps/rabbitmq_web_mqtt/test/config_schema_SUITE.erl +++ b/deps/rabbitmq_web_mqtt/test/config_schema_SUITE.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(config_schema_SUITE). diff --git a/deps/rabbitmq_web_mqtt/test/config_schema_SUITE_data/rabbitmq_web_mqtt.snippets b/deps/rabbitmq_web_mqtt/test/config_schema_SUITE_data/rabbitmq_web_mqtt.snippets index f8ef2916f6ef..ab6735cbc830 100644 --- a/deps/rabbitmq_web_mqtt/test/config_schema_SUITE_data/rabbitmq_web_mqtt.snippets +++ b/deps/rabbitmq_web_mqtt/test/config_schema_SUITE_data/rabbitmq_web_mqtt.snippets @@ -85,7 +85,7 @@ {certfile,"test/config_schema_SUITE_data/certs/cert.pem"}, {keyfile,"test/config_schema_SUITE_data/certs/key.pem"}, {cacertfile,"test/config_schema_SUITE_data/certs/cacert.pem"}, - {password,"changeme"}]}]}], + {password,<<"changeme">>}]}]}], [rabbitmq_web_mqtt]}, {ssl, @@ -108,7 +108,7 @@ {certfile,"test/config_schema_SUITE_data/certs/cert.pem"}, {keyfile,"test/config_schema_SUITE_data/certs/key.pem"}, {cacertfile,"test/config_schema_SUITE_data/certs/cacert.pem"}, - {password,"changeme"}, + {password,<<"changeme">>}, {versions,['tlsv1.2','tlsv1.1']} ]}]}], @@ -145,7 +145,7 @@ {certfile,"test/config_schema_SUITE_data/certs/cert.pem"}, {keyfile,"test/config_schema_SUITE_data/certs/key.pem"}, {cacertfile,"test/config_schema_SUITE_data/certs/cacert.pem"}, - {password,"changeme"}, + {password,<<"changeme">>}, {honor_cipher_order, true}, {honor_ecc_order, true}, diff --git a/deps/rabbitmq_web_mqtt/test/proxy_protocol_SUITE.erl b/deps/rabbitmq_web_mqtt/test/proxy_protocol_SUITE.erl index 8100cb36dff4..d13426342d30 100644 --- a/deps/rabbitmq_web_mqtt/test/proxy_protocol_SUITE.erl +++ b/deps/rabbitmq_web_mqtt/test/proxy_protocol_SUITE.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(proxy_protocol_SUITE). diff --git a/deps/rabbitmq_web_mqtt/test/rabbit_web_mqtt_test_util.erl b/deps/rabbitmq_web_mqtt/test/rabbit_web_mqtt_test_util.erl new file mode 100644 index 000000000000..ee89668cf7e1 --- /dev/null +++ b/deps/rabbitmq_web_mqtt/test/rabbit_web_mqtt_test_util.erl @@ -0,0 +1,39 @@ +-module(rabbit_web_mqtt_test_util). + +-include_lib("eunit/include/eunit.hrl"). + +-export([connect/3, + connect/4 + ]). + +connect(ClientId, Config, AdditionalOpts) -> + connect(ClientId, Config, 0, AdditionalOpts). + +connect(ClientId, Config, Node, AdditionalOpts) -> + {C, Connect} = start_client(ClientId, Config, Node, AdditionalOpts), + {ok, _Properties} = Connect(C), + C. + +start_client(ClientId, Config, Node, AdditionalOpts) -> + {Port, WsOpts, Connect} = + case rabbit_ct_helpers:get_config(Config, websocket, false) of + false -> + {rabbit_ct_broker_helpers:get_node_config(Config, Node, tcp_port_mqtt), + [], + fun emqtt:connect/1}; + true -> + {rabbit_ct_broker_helpers:get_node_config(Config, Node, tcp_port_web_mqtt), + [{ws_path, "/ws"}], + fun emqtt:ws_connect/1} + end, + ProtoVer = proplists:get_value( + proto_ver, + AdditionalOpts, + rabbit_ct_helpers:get_config(Config, mqtt_version, v4)), + Options = [{host, "localhost"}, + {port, Port}, + {proto_ver, ProtoVer}, + {clientid, rabbit_data_coercion:to_binary(ClientId)} + ] ++ WsOpts ++ AdditionalOpts, + {ok, C} = emqtt:start_link(Options), + {C, Connect}. diff --git a/deps/rabbitmq_web_mqtt/test/src/rabbit_ws_test_util.erl b/deps/rabbitmq_web_mqtt/test/src/rabbit_ws_test_util.erl index 9b1f73bf19ab..c85fa5d3a458 100644 --- a/deps/rabbitmq_web_mqtt/test/src/rabbit_ws_test_util.erl +++ b/deps/rabbitmq_web_mqtt/test/src/rabbit_ws_test_util.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_ws_test_util). diff --git a/deps/rabbitmq_web_mqtt/test/src/rfc6455_client.erl b/deps/rabbitmq_web_mqtt/test/src/rfc6455_client.erl index fc8fd0f98fb9..f0079b858148 100644 --- a/deps/rabbitmq_web_mqtt/test/src/rfc6455_client.erl +++ b/deps/rabbitmq_web_mqtt/test/src/rfc6455_client.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rfc6455_client). diff --git a/deps/rabbitmq_web_mqtt/test/system_SUITE.erl b/deps/rabbitmq_web_mqtt/test/system_SUITE.erl index 19b0f873c46a..35af6e923d28 100644 --- a/deps/rabbitmq_web_mqtt/test/system_SUITE.erl +++ b/deps/rabbitmq_web_mqtt/test/system_SUITE.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(system_SUITE). diff --git a/deps/rabbitmq_web_mqtt_examples/.gitignore b/deps/rabbitmq_web_mqtt_examples/.gitignore deleted file mode 100644 index 6804c4c69c9c..000000000000 --- a/deps/rabbitmq_web_mqtt_examples/.gitignore +++ /dev/null @@ -1,17 +0,0 @@ -.sw? -.*.sw? -*.beam -/.erlang.mk/ -/cover/ -/deps/ -/doc/ -/ebin/ -/escript/ -/escript.lock -/logs/ -/plugins/ -/plugins.lock -/sbin/ -/sbin.lock - -/rabbitmq_web_mqtt_examples.d diff --git a/deps/rabbitmq_web_mqtt_examples/src/rabbit_web_mqtt_examples_app.erl b/deps/rabbitmq_web_mqtt_examples/src/rabbit_web_mqtt_examples_app.erl index 8c4f4f7f91fb..3eca741d98a4 100644 --- a/deps/rabbitmq_web_mqtt_examples/src/rabbit_web_mqtt_examples_app.erl +++ b/deps/rabbitmq_web_mqtt_examples/src/rabbit_web_mqtt_examples_app.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_web_mqtt_examples_app). diff --git a/deps/rabbitmq_web_stomp/.gitignore b/deps/rabbitmq_web_stomp/.gitignore index 00665f204471..0595211a7ee4 100644 --- a/deps/rabbitmq_web_stomp/.gitignore +++ b/deps/rabbitmq_web_stomp/.gitignore @@ -1,19 +1 @@ -.sw? -.*.sw? -*.beam -/.erlang.mk/ -/cover/ -/deps/ -/doc/ -/ebin/ -/escript/ -/escript.lock -/logs/ -/plugins/ -/plugins.lock -/sbin/ -/sbin.lock - -rabbitmq_web_stomp.d - test/config_schema_SUITE_data/schema/ diff --git a/deps/rabbitmq_web_stomp/BUILD.bazel b/deps/rabbitmq_web_stomp/BUILD.bazel index ed01db749c57..1015703a58e4 100644 --- a/deps/rabbitmq_web_stomp/BUILD.bazel +++ b/deps/rabbitmq_web_stomp/BUILD.bazel @@ -79,7 +79,7 @@ plt( name = "deps_plt", for_target = ":erlang_app", ignore_warnings = True, - libs = ["//deps/rabbitmq_cli:elixir"], # keep + libs = ["@rules_elixir//elixir"], # keep plt = "//:base_plt", ) diff --git a/deps/rabbitmq_web_stomp/Makefile b/deps/rabbitmq_web_stomp/Makefile index 727cbfaf184f..505d5d6f3926 100644 --- a/deps/rabbitmq_web_stomp/Makefile +++ b/deps/rabbitmq_web_stomp/Makefile @@ -22,6 +22,8 @@ endef DEPS = cowboy rabbit_common rabbit rabbitmq_stomp TEST_DEPS = rabbitmq_ct_helpers rabbitmq_ct_client_helpers +PLT_APPS += cowlib + # FIXME: Add Ranch as a BUILD_DEPS to be sure the correct version is picked. # See rabbitmq-components.mk. BUILD_DEPS += ranch diff --git a/deps/rabbitmq_web_stomp/README.md b/deps/rabbitmq_web_stomp/README.md index e0370abc0e95..b7ddef92fe81 100644 --- a/deps/rabbitmq_web_stomp/README.md +++ b/deps/rabbitmq_web_stomp/README.md @@ -29,6 +29,6 @@ will build the plugin and put build artifacts under the `./plugins` directory. ## Copyright and License -(c) 2007-2020 VMware, Inc. or its affiliates. +(c) 2007-2024 Broadcom. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. Released under the MPL, the same license as RabbitMQ. diff --git a/deps/rabbitmq_web_stomp/priv/schema/rabbitmq_web_stomp.schema b/deps/rabbitmq_web_stomp/priv/schema/rabbitmq_web_stomp.schema index b672088b9fb4..c16e74837563 100644 --- a/deps/rabbitmq_web_stomp/priv/schema/rabbitmq_web_stomp.schema +++ b/deps/rabbitmq_web_stomp/priv/schema/rabbitmq_web_stomp.schema @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% {mapping, "web_stomp.port", "rabbitmq_web_stomp.port", @@ -65,7 +65,7 @@ {mapping, "web_stomp.ssl.cacertfile", "rabbitmq_web_stomp.ssl_config.cacertfile", [{datatype, string}, {validators, ["file_accessible"]}]}. {mapping, "web_stomp.ssl.password", "rabbitmq_web_stomp.ssl_config.password", - [{datatype, string}]}. + [{datatype, [tagged_binary, binary]}]}. {translation, "rabbitmq_web_stomp.ssl_config", diff --git a/deps/rabbitmq_web_stomp/src/rabbit_web_stomp_app.erl b/deps/rabbitmq_web_stomp/src/rabbit_web_stomp_app.erl index f18ed3648066..fb6dcf661533 100644 --- a/deps/rabbitmq_web_stomp/src/rabbit_web_stomp_app.erl +++ b/deps/rabbitmq_web_stomp/src/rabbit_web_stomp_app.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_web_stomp_app). diff --git a/deps/rabbitmq_web_stomp/src/rabbit_web_stomp_connection_sup.erl b/deps/rabbitmq_web_stomp/src/rabbit_web_stomp_connection_sup.erl index 4d9cc8dcd6e1..fdec94ba2aad 100644 --- a/deps/rabbitmq_web_stomp/src/rabbit_web_stomp_connection_sup.erl +++ b/deps/rabbitmq_web_stomp/src/rabbit_web_stomp_connection_sup.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_web_stomp_connection_sup). diff --git a/deps/rabbitmq_web_stomp/src/rabbit_web_stomp_handler.erl b/deps/rabbitmq_web_stomp/src/rabbit_web_stomp_handler.erl index e6724b20a55d..c727ec3de505 100644 --- a/deps/rabbitmq_web_stomp/src/rabbit_web_stomp_handler.erl +++ b/deps/rabbitmq_web_stomp/src/rabbit_web_stomp_handler.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_web_stomp_handler). @@ -10,7 +10,6 @@ -behaviour(cowboy_sub_protocol). -include_lib("kernel/include/logger.hrl"). --include_lib("rabbit_common/include/logging.hrl"). -include_lib("rabbitmq_stomp/include/rabbit_stomp.hrl"). -include_lib("rabbitmq_stomp/include/rabbit_stomp_frame.hrl"). -include_lib("amqp_client/include/amqp_client.hrl"). @@ -43,8 +42,7 @@ peername, auth_hd, stats_timer, - connection, - should_use_fhc :: rabbit_types:option(boolean()) + connection }). -define(APP, rabbitmq_web_stomp). @@ -85,11 +83,6 @@ init(Req0, Opts) -> end, WsOpts0 = proplists:get_value(ws_opts, Opts, #{}), WsOpts = maps:merge(#{compress => true}, WsOpts0), - ShouldUseFHC = application:get_env(?APP, use_file_handle_cache, true), - case ShouldUseFHC of - true -> ?LOG_INFO("Web STOMP: file handle cache use is enabled"); - false -> ?LOG_INFO("Web STOMP: file handle cache use is disabled") - end, {?MODULE, Req, #state{ frame_type = proplists:get_value(type, Opts, text), heartbeat_sup = KeepaliveSup, @@ -99,18 +92,10 @@ init(Req0, Opts) -> conserve_resources = false, socket = SockInfo, peername = PeerAddr, - auth_hd = cowboy_req:header(<<"authorization">>, Req), - should_use_fhc = ShouldUseFHC + auth_hd = cowboy_req:header(<<"authorization">>, Req) }, WsOpts}. -websocket_init(State = #state{should_use_fhc = ShouldUseFHC}) -> - case ShouldUseFHC of - true -> - ok = file_handle_cache:obtain(); - false -> ok; - undefined -> - ok = file_handle_cache:obtain() - end, +websocket_init(State) -> process_flag(trap_exit, true), {ok, ProcessorState} = init_processor_state(State), {ok, rabbit_event:init_stats_timer( @@ -127,7 +112,7 @@ close_connection(Pid, Reason) -> init_processor_state(#state{socket=Sock, peername=PeerAddr, auth_hd=AuthHd}) -> Self = self(), - SendFun = fun (_Sync, Data) -> + SendFun = fun(Data) -> Self ! {send, Data}, ok end, @@ -331,15 +316,8 @@ maybe_block(State, _) -> stop(State) -> stop(State, 1000, "STOMP died"). -stop(State = #state{proc_state = ProcState, should_use_fhc = ShouldUseFHC}, CloseCode, Error0) -> +stop(State = #state{proc_state = ProcState}, CloseCode, Error0) -> maybe_emit_stats(State), - case ShouldUseFHC of - true -> - ok = file_handle_cache:release(); - false -> ok; - undefined -> - ok = file_handle_cache:release() - end, _ = rabbit_stomp_processor:flush_and_die(ProcState), Error1 = rabbit_data_coercion:to_binary(Error0), {[{close, CloseCode, Error1}], State}. diff --git a/deps/rabbitmq_web_stomp/src/rabbit_web_stomp_internal_event_handler.erl b/deps/rabbitmq_web_stomp/src/rabbit_web_stomp_internal_event_handler.erl index 16c97cb53f00..e2734c060774 100644 --- a/deps/rabbitmq_web_stomp/src/rabbit_web_stomp_internal_event_handler.erl +++ b/deps/rabbitmq_web_stomp/src/rabbit_web_stomp_internal_event_handler.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_web_stomp_internal_event_handler). diff --git a/deps/rabbitmq_web_stomp/src/rabbit_web_stomp_listener.erl b/deps/rabbitmq_web_stomp/src/rabbit_web_stomp_listener.erl index 4d0ac868724e..31293278ff99 100644 --- a/deps/rabbitmq_web_stomp/src/rabbit_web_stomp_listener.erl +++ b/deps/rabbitmq_web_stomp/src/rabbit_web_stomp_listener.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_web_stomp_listener). @@ -17,8 +17,6 @@ %% for testing purposes -export([get_binding_address/1, get_tcp_port/1, get_tcp_conf/2]). --include_lib("rabbitmq_stomp/include/rabbit_stomp.hrl"). - -import(rabbit_misc, [pget/2]). -define(TCP_PROTOCOL, 'http/web-stomp'). diff --git a/deps/rabbitmq_web_stomp/src/rabbit_web_stomp_middleware.erl b/deps/rabbitmq_web_stomp/src/rabbit_web_stomp_middleware.erl index affa3c944176..9797c0ca81c6 100644 --- a/deps/rabbitmq_web_stomp/src/rabbit_web_stomp_middleware.erl +++ b/deps/rabbitmq_web_stomp/src/rabbit_web_stomp_middleware.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_web_stomp_middleware). diff --git a/deps/rabbitmq_web_stomp/src/rabbit_web_stomp_stream_handler.erl b/deps/rabbitmq_web_stomp/src/rabbit_web_stomp_stream_handler.erl index 99b3b34dbb17..ba7655705fd4 100644 --- a/deps/rabbitmq_web_stomp/src/rabbit_web_stomp_stream_handler.erl +++ b/deps/rabbitmq_web_stomp/src/rabbit_web_stomp_stream_handler.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_web_stomp_stream_handler). diff --git a/deps/rabbitmq_web_stomp/src/rabbit_web_stomp_sup.erl b/deps/rabbitmq_web_stomp/src/rabbit_web_stomp_sup.erl index 4546024a6dbf..4b3b7a5ac860 100644 --- a/deps/rabbitmq_web_stomp/src/rabbit_web_stomp_sup.erl +++ b/deps/rabbitmq_web_stomp/src/rabbit_web_stomp_sup.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_web_stomp_sup). diff --git a/deps/rabbitmq_web_stomp/test/amqp_stomp_SUITE.erl b/deps/rabbitmq_web_stomp/test/amqp_stomp_SUITE.erl index 95bf438960b6..48f929d02a48 100644 --- a/deps/rabbitmq_web_stomp/test/amqp_stomp_SUITE.erl +++ b/deps/rabbitmq_web_stomp/test/amqp_stomp_SUITE.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(amqp_stomp_SUITE). diff --git a/deps/rabbitmq_web_stomp/test/config_schema_SUITE.erl b/deps/rabbitmq_web_stomp/test/config_schema_SUITE.erl index 81f44292c1dc..e71aaa80627d 100644 --- a/deps/rabbitmq_web_stomp/test/config_schema_SUITE.erl +++ b/deps/rabbitmq_web_stomp/test/config_schema_SUITE.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(config_schema_SUITE). diff --git a/deps/rabbitmq_web_stomp/test/config_schema_SUITE_data/rabbitmq_web_stomp.snippets b/deps/rabbitmq_web_stomp/test/config_schema_SUITE_data/rabbitmq_web_stomp.snippets index 8a41ce031b90..fc901e2d05a4 100644 --- a/deps/rabbitmq_web_stomp/test/config_schema_SUITE_data/rabbitmq_web_stomp.snippets +++ b/deps/rabbitmq_web_stomp/test/config_schema_SUITE_data/rabbitmq_web_stomp.snippets @@ -79,7 +79,7 @@ {certfile,"test/config_schema_SUITE_data/certs/cert.pem"}, {keyfile,"test/config_schema_SUITE_data/certs/key.pem"}, {cacertfile,"test/config_schema_SUITE_data/certs/cacert.pem"}, - {password,"changeme"}]}]}], + {password,<<"changeme">>}]}]}], [rabbitmq_web_stomp]}, {ssl, @@ -99,7 +99,7 @@ {certfile,"test/config_schema_SUITE_data/certs/cert.pem"}, {keyfile,"test/config_schema_SUITE_data/certs/key.pem"}, {cacertfile,"test/config_schema_SUITE_data/certs/cacert.pem"}, - {password,"changeme"}, + {password,<<"changeme">>}, {versions,['tlsv1.2','tlsv1.1']} ]}]}], @@ -136,7 +136,7 @@ {certfile,"test/config_schema_SUITE_data/certs/cert.pem"}, {keyfile,"test/config_schema_SUITE_data/certs/key.pem"}, {cacertfile,"test/config_schema_SUITE_data/certs/cacert.pem"}, - {password,"changeme"}, + {password,<<"changeme">>}, {honor_cipher_order, true}, {honor_ecc_order, true}, diff --git a/deps/rabbitmq_web_stomp/test/cowboy_websocket_SUITE.erl b/deps/rabbitmq_web_stomp/test/cowboy_websocket_SUITE.erl index 10b525bf7724..e49add069046 100644 --- a/deps/rabbitmq_web_stomp/test/cowboy_websocket_SUITE.erl +++ b/deps/rabbitmq_web_stomp/test/cowboy_websocket_SUITE.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(cowboy_websocket_SUITE). diff --git a/deps/rabbitmq_web_stomp/test/proxy_protocol_SUITE.erl b/deps/rabbitmq_web_stomp/test/proxy_protocol_SUITE.erl index 2d7868a2cc73..4becaaf2d3ef 100644 --- a/deps/rabbitmq_web_stomp/test/proxy_protocol_SUITE.erl +++ b/deps/rabbitmq_web_stomp/test/proxy_protocol_SUITE.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(proxy_protocol_SUITE). diff --git a/deps/rabbitmq_web_stomp/test/raw_websocket_SUITE.erl b/deps/rabbitmq_web_stomp/test/raw_websocket_SUITE.erl index 2e685bae0795..e0d67fd5e242 100644 --- a/deps/rabbitmq_web_stomp/test/raw_websocket_SUITE.erl +++ b/deps/rabbitmq_web_stomp/test/raw_websocket_SUITE.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(raw_websocket_SUITE). diff --git a/deps/rabbitmq_web_stomp/test/src/rabbit_ws_test_util.erl b/deps/rabbitmq_web_stomp/test/src/rabbit_ws_test_util.erl index c2df34dcd316..b2eecc83707a 100644 --- a/deps/rabbitmq_web_stomp/test/src/rabbit_ws_test_util.erl +++ b/deps/rabbitmq_web_stomp/test/src/rabbit_ws_test_util.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_ws_test_util). diff --git a/deps/rabbitmq_web_stomp/test/src/rfc6455_client.erl b/deps/rabbitmq_web_stomp/test/src/rfc6455_client.erl index d790eda08237..38927d2a6a39 100644 --- a/deps/rabbitmq_web_stomp/test/src/rfc6455_client.erl +++ b/deps/rabbitmq_web_stomp/test/src/rfc6455_client.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rfc6455_client). diff --git a/deps/rabbitmq_web_stomp/test/src/stomp.erl b/deps/rabbitmq_web_stomp/test/src/stomp.erl index bba9dff54f42..6b1849d869f8 100644 --- a/deps/rabbitmq_web_stomp/test/src/stomp.erl +++ b/deps/rabbitmq_web_stomp/test/src/stomp.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(stomp). diff --git a/deps/rabbitmq_web_stomp/test/unit_SUITE.erl b/deps/rabbitmq_web_stomp/test/unit_SUITE.erl index 6cd449e347fb..c59efad31a70 100644 --- a/deps/rabbitmq_web_stomp/test/unit_SUITE.erl +++ b/deps/rabbitmq_web_stomp/test/unit_SUITE.erl @@ -2,13 +2,11 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(unit_SUITE). --include_lib("common_test/include/ct.hrl"). - -compile(export_all). all() -> diff --git a/deps/rabbitmq_web_stomp_examples/.gitignore b/deps/rabbitmq_web_stomp_examples/.gitignore deleted file mode 100644 index 8f28f69b4f1a..000000000000 --- a/deps/rabbitmq_web_stomp_examples/.gitignore +++ /dev/null @@ -1,17 +0,0 @@ -.sw? -.*.sw? -*.beam -/.erlang.mk/ -/cover/ -/deps/ -/doc/ -/ebin/ -/escript/ -/escript.lock -/logs/ -/plugins/ -/plugins.lock -/sbin/ -/sbin.lock - -/rabbitmq_web_stomp_examples.d diff --git a/deps/rabbitmq_web_stomp_examples/src/rabbit_web_stomp_examples_app.erl b/deps/rabbitmq_web_stomp_examples/src/rabbit_web_stomp_examples_app.erl index 27110b75418c..bc6745a4835f 100644 --- a/deps/rabbitmq_web_stomp_examples/src/rabbit_web_stomp_examples_app.erl +++ b/deps/rabbitmq_web_stomp_examples/src/rabbit_web_stomp_examples_app.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_web_stomp_examples_app). diff --git a/deps/trust_store_http/.gitignore b/deps/trust_store_http/.gitignore deleted file mode 100644 index 7483b2429183..000000000000 --- a/deps/trust_store_http/.gitignore +++ /dev/null @@ -1,22 +0,0 @@ -*~ -.sw? -.*.sw? -*.beam -*.coverdata -/.erlang.mk/ -/cover/ -/deps/ -/doc/ -/ebin/ -/escript/ -/git-revisions.txt -/logs/ -/plugins/ -/rebar.config -/rebar.lock -/_rel/ -/sbin/ -/test/ct.cover.spec -/xrefr - -/trust_store_http.d diff --git a/deps/trust_store_http/BUILD.bazel b/deps/trust_store_http/BUILD.bazel index 25b784b68995..735f709cede4 100644 --- a/deps/trust_store_http/BUILD.bazel +++ b/deps/trust_store_http/BUILD.bazel @@ -33,7 +33,7 @@ rabbitmq_app( app_description = "Trust store HTTP server", app_module = "trust_store_http_app", app_name = "trust_store_http", - app_version = "1.0.0", + app_version = "4.0.0", beam_files = [":beam_files"], extra_apps = ["ssl"], license_files = [":license_files"], diff --git a/erlang.mk b/erlang.mk index 84e0d33fc8bf..1d2e3be2a9c4 100644 --- a/erlang.mk +++ b/erlang.mk @@ -17,7 +17,7 @@ ERLANG_MK_FILENAME := $(realpath $(lastword $(MAKEFILE_LIST))) export ERLANG_MK_FILENAME -ERLANG_MK_VERSION = 04c473a +ERLANG_MK_VERSION = 2022.05.31-72-gb8a27ab-dirty ERLANG_MK_WITHOUT = # Make 3.81 and 3.82 are deprecated. @@ -184,8 +184,9 @@ core_http_get = curl -Lf$(if $(filter-out 0,$(V)),,s)o $(call core_native_path,$ core_eq = $(and $(findstring $(1),$(2)),$(findstring $(2),$(1))) -# We skip files that contain spaces or '#' because they end up causing issues. -core_find = $(if $(wildcard $1),$(shell find $(1:%/=%) \( -type l -o -type f \) -name $(subst *,\*,$2) -not -name "*[ \#]*")) +# We skip files that contain spaces because they end up causing issues. +# Files that begin with a dot are already ignored by the wildcard function. +core_find = $(foreach f,$(wildcard $(1:%/=%)/*),$(if $(wildcard $f/.),$(call core_find,$f,$2),$(if $(filter $(subst *,%,$2),$f),$(if $(wildcard $f),$f)))) core_lc = $(subst A,a,$(subst B,b,$(subst C,c,$(subst D,d,$(subst E,e,$(subst F,f,$(subst G,g,$(subst H,h,$(subst I,i,$(subst J,j,$(subst K,k,$(subst L,l,$(subst M,m,$(subst N,n,$(subst O,o,$(subst P,p,$(subst Q,q,$(subst R,r,$(subst S,s,$(subst T,t,$(subst U,u,$(subst V,v,$(subst W,w,$(subst X,x,$(subst Y,y,$(subst Z,z,$(1))))))))))))))))))))))))))) @@ -800,7 +801,7 @@ pkg_cuttlefish_description = cuttlefish configuration abstraction pkg_cuttlefish_homepage = https://github.com/Kyorai/cuttlefish pkg_cuttlefish_fetch = git pkg_cuttlefish_repo = https://github.com/Kyorai/cuttlefish -pkg_cuttlefish_commit = master +pkg_cuttlefish_commit = main PACKAGES += damocles pkg_damocles_name = damocles @@ -3564,8 +3565,10 @@ export DEPS_DIR REBAR_DEPS_DIR = $(DEPS_DIR) export REBAR_DEPS_DIR +# When testing Erlang.mk and updating these, make sure +# to delete test/test_rebar_git before running tests again. REBAR3_GIT ?= https://github.com/erlang/rebar3 -REBAR3_COMMIT ?= 3f563feaf1091a1980241adefa83a32dd2eebf7c # 3.20.0 +REBAR3_COMMIT ?= bde4b54248d16280b2c70a244aca3bb7566e2033 # 3.23.0 CACHE_DEPS ?= 0 @@ -4018,7 +4021,7 @@ define dep_autopatch_rebar.erl false -> ok; {_, Files0} -> Files = [begin - hd(filelib:wildcard("$(call core_native_path,$(DEPS_DIR)/$1/src/**/" ++ filename:rootname(F) ++ ".*rl"))) + hd(filelib:wildcard("$(call core_native_path,$(DEPS_DIR)/$1/src/)**/" ++ filename:rootname(F) ++ ".*rl")) end || "src/" ++ F <- Files0], Names = [[" ", case lists:reverse(F) of "lre." ++ Elif -> lists:reverse(Elif); @@ -4147,8 +4150,8 @@ define dep_autopatch_rebar.erl "\t$$\(CC) -o $$\@ $$\? $$\(LDFLAGS) $$\(ERL_LDFLAGS) $$\(DRV_LDFLAGS) $$\(LDLIBS) $$\(EXE_LDFLAGS)", case {filename:extension(Output), $(PLATFORM)} of {[], _} -> "\n"; - {".so", darwin} -> "-shared\n"; - {".dylib", darwin} -> "-shared\n"; + {".so", darwin} -> " -shared\n"; + {".dylib", darwin} -> " -shared\n"; {_, darwin} -> "\n"; _ -> " -shared\n" end]) @@ -4439,6 +4442,49 @@ ERLANG_MK_QUERY_REL_DEPS_FILE = $(ERLANG_MK_TMP)/query-rel-deps.log ERLANG_MK_QUERY_TEST_DEPS_FILE = $(ERLANG_MK_TMP)/query-test-deps.log ERLANG_MK_QUERY_SHELL_DEPS_FILE = $(ERLANG_MK_TMP)/query-shell-deps.log +# Copyright (c) 2024, Loïc Hoguin +# This file is part of erlang.mk and subject to the terms of the ISC License. + +.PHONY: beam-cache-restore-app beam-cache-restore-test clean-beam-cache distclean-beam-cache + +BEAM_CACHE_DIR ?= $(ERLANG_MK_TMP)/beam-cache +PROJECT_BEAM_CACHE_DIR = $(BEAM_CACHE_DIR)/$(PROJECT) + +clean:: clean-beam-cache + +clean-beam-cache: + $(verbose) rm -rf $(PROJECT_BEAM_CACHE_DIR) + +distclean:: distclean-beam-cache + +$(PROJECT_BEAM_CACHE_DIR): + $(verbose) mkdir -p $(PROJECT_BEAM_CACHE_DIR) + +distclean-beam-cache: + $(gen_verbose) rm -rf $(BEAM_CACHE_DIR) + +beam-cache-restore-app: | $(PROJECT_BEAM_CACHE_DIR) + $(verbose) rm -rf $(PROJECT_BEAM_CACHE_DIR)/ebin-test +ifneq ($(wildcard ebin/),) + $(verbose) mv ebin/ $(PROJECT_BEAM_CACHE_DIR)/ebin-test +endif +ifneq ($(wildcard $(PROJECT_BEAM_CACHE_DIR)/ebin-app),) + $(gen_verbose) mv $(PROJECT_BEAM_CACHE_DIR)/ebin-app ebin/ +else + $(verbose) $(MAKE) --no-print-directory clean-app +endif + +beam-cache-restore-test: | $(PROJECT_BEAM_CACHE_DIR) + $(verbose) rm -rf $(PROJECT_BEAM_CACHE_DIR)/ebin-app +ifneq ($(wildcard ebin/),) + $(verbose) mv ebin/ $(PROJECT_BEAM_CACHE_DIR)/ebin-app +endif +ifneq ($(wildcard $(PROJECT_BEAM_CACHE_DIR)/ebin-test),) + $(gen_verbose) mv $(PROJECT_BEAM_CACHE_DIR)/ebin-test ebin/ +else + $(verbose) $(MAKE) --no-print-directory clean-app +endif + # Copyright (c) 2013-2016, Loïc Hoguin # This file is part of erlang.mk and subject to the terms of the ISC License. @@ -4494,7 +4540,7 @@ ifneq ($(wildcard src/),) # Targets. -app:: $(if $(wildcard ebin/test),clean) deps +app:: $(if $(wildcard ebin/test),beam-cache-restore-app) deps $(verbose) $(MAKE) --no-print-directory $(PROJECT).d $(verbose) $(MAKE) --no-print-directory app-build @@ -4665,7 +4711,6 @@ define makedep.erl end, MakeDepend = fun (F, Fd, Mod, StartLocation) -> - {ok, Filename} = file:pid2name(Fd), case io:parse_erl_form(Fd, undefined, StartLocation) of {ok, AbsData, EndLocation} -> case AbsData of @@ -4882,14 +4927,17 @@ define compile_test_erl endef ERL_TEST_FILES = $(call core_find,$(TEST_DIR)/,*.erl) + $(ERLANG_MK_TMP)/$(PROJECT).last-testdir-build: $(ERL_TEST_FILES) $(MAKEFILE_LIST) - $(eval FILES_TO_COMPILE := $(if $(filter $(MAKEFILE_LIST),$?),$(filter $(ERL_TEST_FILES),$^),$?)) +# When we have to recompile files in src/ the .d file always gets rebuilt. +# Therefore we want to ignore it when rebuilding test files. + $(eval FILES_TO_COMPILE := $(if $(filter $(filter-out $(PROJECT).d,$(MAKEFILE_LIST)),$?),$(filter $(ERL_TEST_FILES),$^),$(filter $(ERL_TEST_FILES),$?))) $(if $(strip $(FILES_TO_COMPILE)),$(call compile_test_erl,$(FILES_TO_COMPILE)) && touch $@) endif test-build:: IS_TEST=1 test-build:: ERLC_OPTS=$(TEST_ERLC_OPTS) -test-build:: $(if $(wildcard src),$(if $(wildcard ebin/test),,clean)) $(if $(IS_APP),,deps test-deps) +test-build:: $(if $(wildcard src),$(if $(wildcard ebin/test),,beam-cache-restore-test)) $(if $(IS_APP),,deps test-deps) # We already compiled everything when IS_APP=1. ifndef IS_APP ifneq ($(wildcard src),) @@ -5493,7 +5541,7 @@ endif $(verbose) mkdir config/ $(verbose) $(call core_render,bs_sys_config,config/sys.config) $(verbose) $(call core_render,bs_vm_args,config/vm.args) - $(verbose) awk '/^include erlang.mk/ && !ins {print "BUILD_DEPS += relx";ins=1};{print}' Makefile > Makefile.bak + $(verbose) awk '/^include erlang.mk/ && !ins {print "REL_DEPS += relx";ins=1};{print}' Makefile > Makefile.bak $(verbose) mv Makefile.bak Makefile new-app: @@ -5838,7 +5886,7 @@ else ci:: $(addprefix ci-,$(CI_OTP)) -ci-prepare: $(addprefix $(KERL_INSTALL_DIR)/,$(CI_OTP)) +ci-prepare: $(addprefix ci-prepare-,$(CI_OTP)) ci-setup:: $(verbose) : @@ -5850,7 +5898,10 @@ ci_verbose_0 = @echo " CI " $(1); ci_verbose = $(ci_verbose_$(V)) define ci_target -ci-$1: $(KERL_INSTALL_DIR)/$2 +ci-prepare-$1: $(KERL_INSTALL_DIR)/$2 + $(verbose) : + +ci-$1: ci-prepare-$1 $(verbose) $(MAKE) --no-print-directory clean $(ci_verbose) \ PATH="$(KERL_INSTALL_DIR)/$2/bin:$(PATH)" \ diff --git a/erlang_ls.config b/erlang_ls.config index 31f041feaf6d..377bef1e6f67 100644 --- a/erlang_ls.config +++ b/erlang_ls.config @@ -17,6 +17,7 @@ diagnostics: include_dirs: - "deps" - "deps/*/include" + - "deps/*/" - "extra_deps" - "extra_deps/*/include" lenses: diff --git a/mk/topic-branches.mk b/mk/topic-branches.mk deleted file mode 100644 index fb279eb3c252..000000000000 --- a/mk/topic-branches.mk +++ /dev/null @@ -1,48 +0,0 @@ -SCRATCH := $(CURDIR)/topic-branch-scratch - -GIT_FILTER_REPO := $(CURDIR)/bin/git-filter-repo - -$(GIT_FILTER_REPO): - mkdir -p $(TMPDIR) \ - && cd $(TMPDIR) \ - && curl -LO https://github.com/newren/git-filter-repo/releases/download/v2.28.0/git-filter-repo-2.28.0.tar.xz \ - && tar -xJf git-filter-repo-*.tar.xz \ - && mkdir -p $(CURDIR)/bin \ - && cp git-filter-repo-*/git-filter-repo $(GIT_FILTER_REPO) \ - && chmod +x $(GIT_FILTER_REPO) - -.PHONY: clean-state -clean-state: - @git diff-index --quiet HEAD -- \ - || (echo "Cannot proceed with uncommitted changes"; exit 1) - -PARENT_BRANCH ?= $(shell git rev-parse --abbrev-ref HEAD) - -define fetch_topic_branch -echo "Collecting commits from $(1)/$(2)..." \ -&& git clone --quiet git@github.com:rabbitmq/$(call rmq_cmp_repo_name,$(1)).git $(SCRATCH)/$(2)/repo-$(1) \ -&& cd $(SCRATCH)/$(2)/repo-$(1) \ -&& $(GIT_FILTER_REPO) --quiet --to-subdirectory-filter deps/$(1) \ -&& git checkout $(2) \ -&& git format-patch $(PARENT_BRANCH) \ -&& mkdir -p $(SCRATCH)/$(2)/$(1) \ -&& cp *.patch $(SCRATCH)/$(2)/$(1) \ -|| printf "Topic branch $(2) does not appear to exist in $(1).\n\n"; -endef - -define rebase_topic_branch -git am --reject $(sort $(wildcard $(1)/*.patch)); -endef - -fetch-topic-branch-%: $(GIT_FILTER_REPO) - $(eval TOPIC_BRANCH := $(subst fetch-topic-branch-,,$@)) - mkdir -p $(SCRATCH)/$(TOPIC_BRANCH) - @$(foreach dep,$(VENDORED_COMPONENTS),$(call fetch_topic_branch,$(dep),$(TOPIC_BRANCH))) - rm -rf $(SCRATCH)/$(TOPIC_BRANCH)/repo-* - -topic-branch-%: $(GIT_FILTER_REPO) clean-state - $(eval TOPIC_BRANCH := $(subst topic-branch-,,$@)) - ls $(SCRATCH)/$(TOPIC_BRANCH) \ - || (echo "Fetch the branch first with 'make fetch-$@')"; exit 1) - git checkout -b $(TOPIC_BRANCH) - @$(foreach dir,$(wildcard $(SCRATCH)/$(TOPIC_BRANCH)/*),$(call rebase_topic_branch,$(dir))) diff --git a/moduleindex.yaml b/moduleindex.yaml index 52c85ffcfd89..f6e7ba55babd 100755 --- a/moduleindex.yaml +++ b/moduleindex.yaml @@ -33,7 +33,6 @@ amqp10_client: - amqp10_client_app - amqp10_client_connection - amqp10_client_connection_sup -- amqp10_client_connections_sup - amqp10_client_frame_reader - amqp10_client_session - amqp10_client_sessions_sup @@ -45,8 +44,8 @@ amqp10_common: - amqp10_binary_parser - amqp10_framing - amqp10_framing0 -app: -- appup_src +- amqp10_util +- serial_number aten: - aten - aten_app @@ -57,15 +56,6 @@ aten: - aten_sup base64url: - base64url -bazdep: -- bazdep -certifi: -- certifi -- certifi_pt -codepath: -- codepath -cover: -- foo cowboy: - cowboy - cowboy_app @@ -75,6 +65,7 @@ cowboy: - cowboy_clock - cowboy_compress_h - cowboy_constraints +- cowboy_decompress_h - cowboy_handler - cowboy_http - cowboy_http2 @@ -122,8 +113,6 @@ credentials_obfuscation: ct_helper: - ct_helper - ct_helper_error_h -cth_styledout: -- cth_styledout cuttlefish: - conf_parse - cuttlefish @@ -148,40 +137,36 @@ cuttlefish: - cuttlefish_validator - cuttlefish_variable - cuttlefish_vmargs -dummy: -- dummy_app -- dummy_server -- dummy_sup eetcd: -- eetcd_auth_gen -- eetcd_cluster_gen -- eetcd_election_gen -- eetcd_health_gen -- eetcd_kv_gen -- eetcd_lease_gen -- eetcd_lock_gen -- eetcd_maintenance_gen -- eetcd_watch_gen +- auth_pb - eetcd - eetcd_app - eetcd_auth +- eetcd_auth_gen - eetcd_cluster +- eetcd_cluster_gen - eetcd_compare - eetcd_conn - eetcd_conn_sup - eetcd_data_coercion - eetcd_election +- eetcd_election_gen - eetcd_grpc +- eetcd_health_gen - eetcd_kv +- eetcd_kv_gen - eetcd_lease +- eetcd_lease_gen - eetcd_lease_sup - eetcd_lock +- eetcd_lock_gen - eetcd_maintenance +- eetcd_maintenance_gen - eetcd_op - eetcd_stream - eetcd_sup - eetcd_watch -- auth_pb +- eetcd_watch_gen - gogo_pb - health_pb - kv_pb @@ -200,30 +185,13 @@ emqtt: - emqtt_ws enough: - enough -erlc: -- first_erl -- foo -- foo_app -- foo_test_worker -- foo_worker -eunit: -- foo -eunit_surefire: -- foo -foo: -- java -- lisp -- pascal -- perl -foodep: -- foodep +eunit_formatters: +- binomial_heap +- eunit_progress gen_batch_server: - gen_batch_server getopt: - getopt -gpb: -- gpb -- gpb_compile gun: - gun - gun_app @@ -237,45 +205,12 @@ gun: - gun_tls - gun_ws - gun_ws_h -hackney: -- hackney -- hackney_app -- hackney_bstr -- hackney_connect -- hackney_connection -- hackney_connections -- hackney_cookie -- hackney_date -- hackney_headers -- hackney_headers_new -- hackney_http -- hackney_http_connect -- hackney_local_tcp -- hackney_manager -- hackney_metrics -- hackney_multipart -- hackney_pool -- hackney_pool_handler -- hackney_request -- hackney_response -- hackney_socks5 -- hackney_ssl -- hackney_ssl_certificate -- hackney_stream -- hackney_sup -- hackney_tcp -- hackney_trace -- hackney_url -- hackney_util -idna: -- idna -- idna_bidi -- idna_context -- idna_data -- idna_mapping -- idna_table -- idna_ucs -- punycode +horus: +- horus +- horus_cover +- horus_utils +host_triple: +- host_triple inet_tcp_proxy_dist: - inet_tcp_proxy_dist - inet_tcp_proxy_dist_app @@ -283,11 +218,10 @@ inet_tcp_proxy_dist: - inet_tcp_proxy_dist_controller - inet_tcp_proxy_dist_sup jose: -- jose_base -- jose_base64 -- jose_base64url - jose - jose_app +- jose_base64 +- jose_base64url - jose_block_encryptor - jose_chacha20_poly1305 - jose_chacha20_poly1305_crypto @@ -295,23 +229,16 @@ jose: - jose_chacha20_poly1305_unsupported - jose_crypto_compat - jose_curve25519 +- jose_curve25519_crypto +- jose_curve25519_fallback - jose_curve25519_libdecaf - jose_curve25519_libsodium - jose_curve25519_unsupported - jose_curve448 +- jose_curve448_crypto +- jose_curve448_fallback - jose_curve448_libdecaf - jose_curve448_unsupported -- jose_public_key -- jose_server -- jose_sha3 -- jose_sha3_keccakf1600_driver -- jose_sha3_keccakf1600_nif -- jose_sha3_libdecaf -- jose_sha3_unsupported -- jose_sup -- jose_xchacha20_poly1305 -- jose_xchacha20_poly1305_crypto -- jose_xchacha20_poly1305_unsupported - jose_json - jose_json_jason - jose_json_jiffy @@ -354,6 +281,7 @@ jose: - jose_jwe_alg_dir - jose_jwe_alg_ecdh_1pu - jose_jwe_alg_ecdh_es +- jose_jwe_alg_ecdh_ss - jose_jwe_alg_pbes2 - jose_jwe_alg_rsa - jose_jwe_alg_xc20p_kw @@ -390,6 +318,58 @@ jose: - jose_jws_alg_rsa_pkcs1_v1_5 - jose_jws_alg_rsa_pss - jose_jwt +- jose_public_key +- jose_server +- jose_sha3 +- jose_sha3_keccakf1600_driver +- jose_sha3_keccakf1600_nif +- jose_sha3_libdecaf +- jose_sha3_unsupported +- jose_sup +- jose_xchacha20_poly1305 +- jose_xchacha20_poly1305_crypto +- jose_xchacha20_poly1305_libsodium +- jose_xchacha20_poly1305_unsupported +katana_code: +- ktn_code +- ktn_dodger +- ktn_io_string +khepri: +- khepri +- khepri_adv +- khepri_app +- khepri_cluster +- khepri_condition +- khepri_event_handler +- khepri_evf +- khepri_export_erlang +- khepri_import_export +- khepri_machine +- khepri_machine_v0 +- khepri_path +- khepri_pattern_tree +- khepri_payload +- khepri_projection +- khepri_sproc +- khepri_sup +- khepri_tree +- khepri_tx +- khepri_tx_adv +- khepri_utils +khepri_mnesia_migration: +- khepri_mnesia_migration_app +- khepri_mnesia_migration_sup +- kmm_utils +- m2k_cluster_sync +- m2k_cluster_sync_sup +- m2k_export +- m2k_subscriber +- m2k_table_copy +- m2k_table_copy_sup +- m2k_table_copy_sup_sup +- mnesia_to_khepri +- mnesia_to_khepri_converter +- mnesia_to_khepri_example_converter meck: - meck - meck_args_matcher @@ -402,18 +382,11 @@ meck: - meck_proc - meck_ret_spec - meck_util -metrics: -- metrics -- metrics_dummy -- metrics_exometer -- metrics_folsom -mimerl: -- mimerl my_plugin: - my_plugin -neotoma: -- neotoma -- neotoma_parse +oauth2_client: +- jwt_helper +- oauth2_client observer_cli: - observer_cli - observer_cli_application @@ -432,6 +405,7 @@ osiris: - osiris - osiris_app - osiris_bench +- osiris_bloom - osiris_counters - osiris_ets - osiris_log @@ -445,44 +419,37 @@ osiris: - osiris_tracking - osiris_util - osiris_writer -parse_trans: -- ct_expand -- exprecs -- parse_trans -- parse_trans_codegen -- parse_trans_mod -- parse_trans_pp prometheus: -- prometheus_mnesia_collector -- prometheus_vm_dist_collector -- prometheus_vm_memory_collector -- prometheus_vm_msacc_collector -- prometheus_vm_statistics_collector -- prometheus_vm_system_info_collector -- prometheus_http -- prometheus_mnesia -- prometheus_test_instrumenter -- prometheus_protobuf_format -- prometheus_text_format -- prometheus_boolean -- prometheus_counter -- prometheus_gauge -- prometheus_histogram -- prometheus_quantile_summary -- prometheus_summary -- prometheus_model -- prometheus_model_helpers - prometheus +- prometheus_boolean - prometheus_buckets - prometheus_collector +- prometheus_counter - prometheus_format +- prometheus_gauge +- prometheus_histogram +- prometheus_http - prometheus_instrumenter - prometheus_metric - prometheus_metric_spec - prometheus_misc +- prometheus_mnesia +- prometheus_mnesia_collector +- prometheus_model +- prometheus_model_helpers +- prometheus_protobuf_format +- prometheus_quantile_summary - prometheus_registry +- prometheus_summary - prometheus_sup +- prometheus_test_instrumenter +- prometheus_text_format - prometheus_time +- prometheus_vm_dist_collector +- prometheus_vm_memory_collector +- prometheus_vm_msacc_collector +- prometheus_vm_statistics_collector +- prometheus_vm_system_info_collector proper: - proper - proper_arith @@ -510,29 +477,20 @@ proper: - proper_unicode - proper_unused_imports_remover - vararg -proto_gpb: -- foo -- foo_app -- foo_sup -proto_protobuffs: -- foo -- foo_app -- foo_sup -protobuffs: -- protobuffs -- protobuffs_compile quantile_estimator: - quantile - quantile_estimator ra: - ra - ra_app +- ra_aux - ra_bench - ra_counters - ra_dbg - ra_directory - ra_env - ra_ets_queue +- ra_file - ra_file_handle - ra_flru - ra_leaderboard @@ -561,6 +519,7 @@ ra: - ra_snapshot - ra_sup - ra_system +- ra_system_recover - ra_system_sup - ra_systems_sup rabbit: @@ -583,6 +542,13 @@ rabbit: - rabbit - rabbit_access_control - rabbit_alarm +- rabbit_amqp1_0 +- rabbit_amqp_management +- rabbit_amqp_reader +- rabbit_amqp_session +- rabbit_amqp_session_sup +- rabbit_amqp_util +- rabbit_amqp_writer - rabbit_amqqueue - rabbit_amqqueue_control - rabbit_amqqueue_process @@ -590,6 +556,7 @@ rabbit: - rabbit_amqqueue_sup_sup - rabbit_auth_backend_internal - rabbit_auth_mechanism_amqplain +- rabbit_auth_mechanism_anonymous - rabbit_auth_mechanism_cr_demo - rabbit_auth_mechanism_plain - rabbit_autoheal @@ -624,22 +591,32 @@ rabbit: - rabbit_cuttlefish - rabbit_db - rabbit_db_binding +- rabbit_db_binding_m2k_converter - rabbit_db_cluster - rabbit_db_exchange +- rabbit_db_exchange_m2k_converter +- rabbit_db_m2k_converter - rabbit_db_maintenance +- rabbit_db_maintenance_m2k_converter - rabbit_db_msup +- rabbit_db_msup_m2k_converter - rabbit_db_policy - rabbit_db_queue +- rabbit_db_queue_m2k_converter - rabbit_db_rtparams +- rabbit_db_rtparams_m2k_converter - rabbit_db_topic_exchange - rabbit_db_user +- rabbit_db_user_m2k_converter - rabbit_db_vhost - rabbit_db_vhost_defaults +- rabbit_db_vhost_m2k_converter - rabbit_dead_letter - rabbit_definitions - rabbit_definitions_hashing - rabbit_definitions_import_https - rabbit_definitions_import_local_filesystem +- rabbit_depr_ff_extra - rabbit_deprecated_features - rabbit_diagnostics - rabbit_direct @@ -655,6 +632,7 @@ rabbit: - rabbit_exchange_type_fanout - rabbit_exchange_type_headers - rabbit_exchange_type_invalid +- rabbit_exchange_type_local_random - rabbit_exchange_type_topic - rabbit_feature_flags - rabbit_ff_controller @@ -670,12 +648,15 @@ rabbit: - rabbit_fifo_dlx_sup - rabbit_fifo_dlx_worker - rabbit_fifo_index +- rabbit_fifo_q - rabbit_fifo_v0 - rabbit_fifo_v1 +- rabbit_fifo_v3 - rabbit_file - rabbit_global_counters - rabbit_guid - rabbit_health_check +- rabbit_khepri - rabbit_limiter - rabbit_log_channel - rabbit_log_connection @@ -684,26 +665,12 @@ rabbit: - rabbit_log_queue - rabbit_log_tail - rabbit_logger_exchange_h -- rabbit_looking_glass - rabbit_maintenance -- rabbit_memory_monitor - rabbit_message_interceptor - rabbit_metrics -- rabbit_mirror_queue_coordinator -- rabbit_mirror_queue_master - rabbit_mirror_queue_misc -- rabbit_mirror_queue_mode -- rabbit_mirror_queue_mode_all -- rabbit_mirror_queue_mode_exactly -- rabbit_mirror_queue_mode_nodes -- rabbit_mirror_queue_slave -- rabbit_mirror_queue_sync - rabbit_mnesia -- rabbit_mnesia_rename -- rabbit_msg_file -- rabbit_msg_record - rabbit_msg_store -- rabbit_msg_store_ets_index - rabbit_msg_store_gc - rabbit_networking - rabbit_networking_store @@ -725,19 +692,12 @@ rabbit: - rabbit_prelaunch_enabled_plugins_file - rabbit_prelaunch_feature_flags - rabbit_prelaunch_logging -- rabbit_prequeue - rabbit_priority_queue - rabbit_process - rabbit_queue_consumers - rabbit_queue_decorator - rabbit_queue_index - rabbit_queue_location -- rabbit_queue_location_client_local -- rabbit_queue_location_min_masters -- rabbit_queue_location_random -- rabbit_queue_location_validator -- rabbit_queue_master_location_misc -- rabbit_queue_master_locator - rabbit_queue_type - rabbit_queue_type_util - rabbit_quorum_memory_manager @@ -764,6 +724,7 @@ rabbit: - rabbit_tracking - rabbit_tracking_store - rabbit_upgrade_preparation +- rabbit_uri - rabbit_variable_queue - rabbit_version - rabbit_vhost @@ -773,6 +734,7 @@ rabbit: - rabbit_vhost_sup - rabbit_vhost_sup_sup - rabbit_vhost_sup_wrapper +- rabbit_vhosts - rabbit_vm - supervised_lifecycle - tcp_listener @@ -786,7 +748,6 @@ rabbit_common: - delegate - delegate_sup - file_handle_cache -- file_handle_cache_stats - gen_server2 - mirrored_supervisor_locks - mnesia_sync @@ -819,7 +780,6 @@ rabbit_common: - rabbit_json - rabbit_log - rabbit_misc -- rabbit_msg_store_index - rabbit_net - rabbit_nodes_common - rabbit_numerical @@ -835,6 +795,7 @@ rabbit_common: - rabbit_registry - rabbit_registry_class - rabbit_resource_monitor_misc +- rabbit_routing_parser - rabbit_runtime - rabbit_runtime_parameter - rabbit_semver @@ -847,21 +808,11 @@ rabbit_common: - worker_pool - worker_pool_sup - worker_pool_worker +rabbitmq_amqp_client: +- rabbitmq_amqp_address +- rabbitmq_amqp_client rabbitmq_amqp1_0: -- Elixir.RabbitMQ.CLI.Ctl.Commands.ListAmqp10ConnectionsCommand -- rabbit_amqp1_0 -- rabbit_amqp1_0_channel -- rabbit_amqp1_0_incoming_link -- rabbit_amqp1_0_link_util -- rabbit_amqp1_0_message -- rabbit_amqp1_0_outgoing_link -- rabbit_amqp1_0_reader -- rabbit_amqp1_0_session -- rabbit_amqp1_0_session_process -- rabbit_amqp1_0_session_sup -- rabbit_amqp1_0_session_sup_sup -- rabbit_amqp1_0_util -- rabbit_amqp1_0_writer +- rabbitmq_amqp1_0_noop rabbitmq_auth_backend_cache: - rabbit_auth_backend_cache - rabbit_auth_backend_cache_app @@ -879,9 +830,11 @@ rabbitmq_auth_backend_ldap: - rabbit_auth_backend_ldap_util - rabbit_log_ldap rabbitmq_auth_backend_oauth2: +- Elixir.RabbitMQ.CLI.Ctl.Commands.AddSigningKeyCommand - Elixir.RabbitMQ.CLI.Ctl.Commands.AddUaaKeyCommand - rabbit_auth_backend_oauth2 - rabbit_auth_backend_oauth2_app +- rabbit_oauth2_config - rabbit_oauth2_scope - uaa_jwks - uaa_jwt @@ -903,6 +856,7 @@ rabbitmq_aws: rabbitmq_consistent_hash_exchange: - Elixir.RabbitMQ.CLI.Diagnostics.Commands.ConsistentHashExchangeRingStateCommand - rabbit_db_ch_exchange +- rabbit_db_ch_exchange_m2k_converter - rabbit_exchange_type_consistent_hash rabbitmq_ct_client_helpers: - rabbit_ct_client_helpers @@ -942,8 +896,13 @@ rabbitmq_federation: - rabbit_log_federation rabbitmq_federation_management: - rabbit_federation_mgmt +rabbitmq_federation_prometheus: +- rabbit_federation_prometheus_app +- rabbit_federation_prometheus_collector +- rabbit_federation_prometheus_sup rabbitmq_jms_topic_exchange: - rabbit_db_jms_exchange +- rabbit_db_jms_exchange_m2k_converter - rabbit_jms_topic_exchange - sjx_evaluator rabbitmq_management: @@ -960,6 +919,7 @@ rabbitmq_management: - rabbit_mgmt_hsts - rabbit_mgmt_load_definitions - rabbit_mgmt_login +- rabbit_mgmt_nodes - rabbit_mgmt_oauth_bootstrap - rabbit_mgmt_reset_handler - rabbit_mgmt_stats @@ -982,6 +942,7 @@ rabbitmq_management: - rabbit_mgmt_wm_connections_vhost - rabbit_mgmt_wm_consumers - rabbit_mgmt_wm_definitions +- rabbit_mgmt_wm_deprecated_features - rabbit_mgmt_wm_environment - rabbit_mgmt_wm_exchange - rabbit_mgmt_wm_exchange_publish @@ -995,7 +956,6 @@ rabbitmq_management: - rabbit_mgmt_wm_health_check_alarms - rabbit_mgmt_wm_health_check_certificate_expiration - rabbit_mgmt_wm_health_check_local_alarms -- rabbit_mgmt_wm_health_check_node_is_mirror_sync_critical - rabbit_mgmt_wm_health_check_node_is_quorum_critical - rabbit_mgmt_wm_health_check_port_listener - rabbit_mgmt_wm_health_check_protocol_listener @@ -1063,14 +1023,9 @@ rabbitmq_management_agent: - rabbit_mgmt_metrics_gc - rabbit_mgmt_storage rabbitmq_mqtt: -- Elixir.RabbitMQ.CLI.Ctl.Commands.DecommissionMqttNodeCommand - Elixir.RabbitMQ.CLI.Ctl.Commands.ListMqttConnectionsCommand - mc_mqtt -- mqtt_machine -- mqtt_machine_v0 -- mqtt_node - rabbit_mqtt -- rabbit_mqtt_collector - rabbit_mqtt_confirms - rabbit_mqtt_ff - rabbit_mqtt_internal_event_handler @@ -1138,6 +1093,7 @@ rabbitmq_prometheus: - prometheus_process_collector - prometheus_rabbitmq_alarm_metrics_collector - prometheus_rabbitmq_core_metrics_collector +- prometheus_rabbitmq_dynamic_collector - prometheus_rabbitmq_global_metrics_collector - rabbit_prometheus_app - rabbit_prometheus_dispatcher @@ -1146,6 +1102,7 @@ rabbitmq_random_exchange: - rabbit_exchange_type_random rabbitmq_recent_history_exchange: - rabbit_db_rh_exchange +- rabbit_db_rh_exchange_m2k_converter - rabbit_exchange_type_recent_history rabbitmq_sharding: - rabbit_sharding_exchange_decorator @@ -1174,8 +1131,13 @@ rabbitmq_shovel: - rabbit_shovel_worker - rabbit_shovel_worker_sup rabbitmq_shovel_management: -- rabbit_shovel_mgmt +- rabbit_shovel_mgmt_shovel +- rabbit_shovel_mgmt_shovels - rabbit_shovel_mgmt_util +rabbitmq_shovel_prometheus: +- rabbit_shovel_prometheus_app +- rabbit_shovel_prometheus_collector +- rabbit_shovel_prometheus_sup rabbitmq_stomp: - Elixir.RabbitMQ.CLI.Ctl.Commands.ListStompConnectionsCommand - rabbit_stomp @@ -1195,6 +1157,7 @@ rabbitmq_stream: - Elixir.RabbitMQ.CLI.Ctl.Commands.ListStreamConsumersCommand - Elixir.RabbitMQ.CLI.Ctl.Commands.ListStreamGroupConsumersCommand - Elixir.RabbitMQ.CLI.Ctl.Commands.ListStreamPublishersCommand +- Elixir.RabbitMQ.CLI.Ctl.Commands.ListStreamTrackingCommand - rabbit_stream - rabbit_stream_connection_sup - rabbit_stream_manager @@ -1215,6 +1178,7 @@ rabbitmq_stream_management: - rabbit_stream_management_utils - rabbit_stream_mgmt_db - rabbit_stream_publishers_mgmt +- rabbit_stream_tracking_mgmt rabbitmq_top: - rabbit_top_app - rabbit_top_extension @@ -1258,6 +1222,7 @@ rabbitmq_web_dispatch: - webmachine_log - webmachine_log_handler rabbitmq_web_mqtt: +- Elixir.RabbitMQ.CLI.Ctl.Commands.ListWebMqttConnectionsCommand - rabbit_web_mqtt_app - rabbit_web_mqtt_handler - rabbit_web_mqtt_stream_handler @@ -1292,51 +1257,15 @@ ranch: - ranch_sup - ranch_tcp - ranch_transport -rebar: -- rebar -- rebar_abnfc_compiler -- rebar_app_utils -- rebar_appups -- rebar_asn1_compiler -- rebar_base_compiler -- rebar_cleaner -- rebar_config -- rebar_core -- rebar_cover_utils -- rebar_ct -- rebar_deps -- rebar_dia_compiler -- rebar_dialyzer -- rebar_edoc -- rebar_erlc_compiler -- rebar_erlydtl_compiler -- rebar_escripter -- rebar_eunit -- rebar_file_utils -- rebar_getopt -- rebar_lfe_compiler -- rebar_log -- rebar_metacmds -- rebar_mustache -- rebar_neotoma_compiler -- rebar_otp_app -- rebar_otp_appup -- rebar_port_compiler -- rebar_proto_compiler -- rebar_proto_gpb_compiler -- rebar_protobuffs_compiler -- rebar_qc -- rebar_rand_compat -- rebar_rel_utils -- rebar_reltool -- rebar_require_vsn -- rebar_shell -- rebar_subdirs -- rebar_templater -- rebar_upgrade -- rebar_utils -- rebar_xref -- rmemo +rebar3_format: +- default_formatter +- erlfmt_formatter +- otp_formatter +- rebar3_ast_formatter +- rebar3_format +- rebar3_format_prv +- rebar3_formatter +- sr_formatter recon: - recon - recon_alloc @@ -1356,14 +1285,6 @@ seshat: - seshat_app - seshat_counters_server - seshat_sup -ssl_verify_fun: -- ssl_verify_fingerprint -- ssl_verify_fun_cert_helpers -- ssl_verify_fun_encodings -- ssl_verify_hostname -- ssl_verify_pk -- ssl_verify_string -- ssl_verify_util stdout_formatter: - stdout_formatter - stdout_formatter_paragraph @@ -1404,8 +1325,3 @@ trust_store_http: - trust_store_http_sup - trust_store_invalid_handler - trust_store_list_handler -unicode_util_compat: -- string_compat -- unicode_util_compat -unuseddep: -- unuseddep diff --git a/packaging/base-image/Dockerfile b/packaging/base-image/Dockerfile deleted file mode 100644 index 13aad639ada2..000000000000 --- a/packaging/base-image/Dockerfile +++ /dev/null @@ -1,14 +0,0 @@ -# The official Canonical Ubuntu Bionic image is ideal from a security perspective, -# especially for the enterprises that we, the RabbitMQ team, have to deal with -FROM ubuntu:20.04 - -RUN set -eux; \ - apt-get update; \ - apt-get install -y lsb-release ubuntu-dbgsym-keyring; \ - echo "deb http://ddebs.ubuntu.com $(lsb_release -cs) main restricted universe multiverse" > /etc/apt/sources.list.d/ddebs.list; \ - echo "deb http://ddebs.ubuntu.com $(lsb_release -cs)-updates main restricted universe multiverse" >> /etc/apt/sources.list.d/ddebs.list; \ - echo "deb http://ddebs.ubuntu.com $(lsb_release -cs)-proposed main restricted universe multiverse" >> /etc/apt/sources.list.d/ddebs.list; \ - apt-get update; \ - apt-get install -y --no-install-recommends \ - libc6-dbg \ - libtinfo6-dbgsym diff --git a/packaging/common/LICENSE.tail b/packaging/common/LICENSE.tail index 2c888d2ed714..c9a466cc16e1 100644 --- a/packaging/common/LICENSE.tail +++ b/packaging/common/LICENSE.tail @@ -421,4 +421,4 @@ file, then You may include the notice in a location (such as a LICENSE file in a relevant directory) where a recipient would be likely to look for such a notice. -Copyright (c) 2007-2020 VMware, Inc. or its affiliates. +Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. diff --git a/packaging/docker-image/.dockerignore b/packaging/docker-image/.dockerignore new file mode 100644 index 000000000000..ab874d7224d8 --- /dev/null +++ b/packaging/docker-image/.dockerignore @@ -0,0 +1,3 @@ +test_configs +BUILD.bazel +Makefile diff --git a/packaging/docker-image/.gitignore b/packaging/docker-image/.gitignore new file mode 100644 index 000000000000..65b88cadab90 --- /dev/null +++ b/packaging/docker-image/.gitignore @@ -0,0 +1 @@ +package-generic-unix.tar.xz diff --git a/packaging/docker-image/10-default-guest-user.conf b/packaging/docker-image/10-defaults.conf similarity index 78% rename from packaging/docker-image/10-default-guest-user.conf rename to packaging/docker-image/10-defaults.conf index 3d905739f34b..e221e4b78b36 100644 --- a/packaging/docker-image/10-default-guest-user.conf +++ b/packaging/docker-image/10-defaults.conf @@ -6,3 +6,7 @@ ## https://www.rabbitmq.com/access-control.html#loopback-users ## https://www.rabbitmq.com/production-checklist.html#users loopback_users.guest = false + +## Send all logs to stdout/TTY. Necessary to see logs when running via +## a container +log.console = true \ No newline at end of file diff --git a/packaging/docker-image/20-management_agent.disable_metrics_collector.conf b/packaging/docker-image/20-management_agent.disable_metrics_collector.conf new file mode 100644 index 000000000000..6eb7a86cbf8b --- /dev/null +++ b/packaging/docker-image/20-management_agent.disable_metrics_collector.conf @@ -0,0 +1,2 @@ +# Enable Prometheus-style metrics by default (https://github.com/docker-library/rabbitmq/issues/419) +management_agent.disable_metrics_collector = true diff --git a/packaging/docker-image/BUILD.bazel b/packaging/docker-image/BUILD.bazel index d2ab14ea2b43..2828f8a8e2ea 100644 --- a/packaging/docker-image/BUILD.bazel +++ b/packaging/docker-image/BUILD.bazel @@ -1,303 +1,151 @@ +load("@bazel_skylib//rules:write_file.bzl", "write_file") +load("@container_structure_test//:defs.bzl", "container_structure_test") load( - "@io_bazel_rules_docker//container:container.bzl", - "container_image", - "container_layer", -) -load( - "@io_bazel_rules_docker//contrib:test.bzl", - "container_test", -) -load( - "@io_bazel_rules_docker//docker/util:run.bzl", - "container_run_and_commit_layer", -) -load( - "@io_bazel_rules_docker//docker/package_managers:download_pkgs.bzl", - "download_pkgs", -) -load( - "@io_bazel_rules_docker//docker/package_managers:install_pkgs.bzl", - "install_pkgs", + "@rules_oci//oci:defs.bzl", + "oci_image", + "oci_image_index", + "oci_push", + "oci_tarball", +) +load("//:rabbitmq.bzl", "APP_VERSION") + +filegroup( + name = "context-files", + srcs = [ + "10-defaults.conf", + "20-management_agent.disable_metrics_collector.conf", + "Dockerfile", + "docker-entrypoint.sh", + "//:package-generic-unix", + ], ) -BUILD_DEPS_PACKAGES = [ - "autoconf", - "ca-certificates", - "dpkg-dev", - "g++", - "gcc", - "libncurses5-dev", - "make", +_ARCHS = [ + "amd64", + "arm64", ] -REQUIRED_PACKAGES = [ - "gosu", - "ca-certificates", +_TAGS = [ + "docker", + "manual", + "no-sandbox", + "no-remote-exec", # buildbuddy runners do not have the emulator available ] -CONVENIENCE_PACKAGES = [ - "python3", - "dstat", - "sysstat", - "htop", - "nmon", - "tmux", - "neovim", +[ + genrule( + name = "docker-build-%s" % arch, + srcs = [ + ":context-files", + ], + outs = [ + "image-%s.tar" % arch, + ], + cmd = """set -euo pipefail + +CONTEXT="$$(mktemp -d)" + +cp $(locations :context-files) "$$CONTEXT" + +docker buildx \\ + build \\ + "$$CONTEXT" \\ + --platform linux/{arch} \\ + --build-arg RABBITMQ_VERSION="{rmq_version}" \\ + --output type=tar,dest=$(location image-{arch}.tar) $${{EXTRA_BUILDX_OPTS:-}} +""".format( + arch = arch, + rmq_version = APP_VERSION, + ), + tags = _TAGS, + ) + for arch in _ARCHS ] -FIRECRACKER_EXEC_PROPS = { - # https://www.buildbuddy.io/docs/rbe-microvms - "workload-isolation-type": "firecracker", - "init-dockerd": "true", - "recycle-runner": "true", - # Use the default buildbuddy RBE image - "container-image": "", -} - -download_pkgs( - name = "otp_pkgs", - exec_properties = FIRECRACKER_EXEC_PROPS, - image_tar = "@ubuntu2004//image", - packages = BUILD_DEPS_PACKAGES, - tags = ["manual"], -) - -download_pkgs( - name = "rabbitmq_pkgs", - exec_properties = FIRECRACKER_EXEC_PROPS, - image_tar = "@ubuntu2004//image", - packages = REQUIRED_PACKAGES + CONVENIENCE_PACKAGES, - tags = ["manual"], -) - -install_pkgs( - name = "otp_pkgs_image", - exec_properties = FIRECRACKER_EXEC_PROPS, - image_tar = "@ubuntu2004//image", - installables_tar = ":otp_pkgs.tar", - installation_cleanup_commands = "rm -rf /var/lib/apt/lists/*", - output_image_name = "otp_pkgs_image", - tags = ["manual"], -) - -install_pkgs( - name = "rabbitmq_pkgs_image", - exec_properties = FIRECRACKER_EXEC_PROPS, - image_tar = "@ubuntu2004//image", - installables_tar = ":rabbitmq_pkgs.tar", - installation_cleanup_commands = "rm -rf /var/lib/apt/lists/*", - output_image_name = "rabbitmq_pkgs_image", - tags = ["manual"], -) - -container_layer( - name = "openssl_source_layer", - directory = "/usr/local/src", - env = { - "OPENSSL_VERSION": "3.1.1", - }, - files = [ - "build_install_openssl.sh", - ], - tags = ["manual"], - tars = [ - "@openssl-3.1.1//file", - ], -) - -container_image( - name = "openssl_source", - base = ":otp_pkgs_image", - layers = [":openssl_source_layer"], - tags = ["manual"], -) - -container_run_and_commit_layer( - name = "openssl_layer", - commands = [ - "/usr/local/src/build_install_openssl.sh", - "rm /usr/local/src/build_install_openssl.sh", - ], - exec_properties = FIRECRACKER_EXEC_PROPS, - image = ":openssl_source.tar", - tags = ["manual"], -) - -container_image( - name = "otp_source", - base = ":otp_pkgs_image", - directory = "/usr/local/src", - files = [ - "build_install_otp.sh", - ], - layers = [ - ":openssl_layer", - ], - tags = ["manual"], - tars = select({ - "@erlang_config//:erlang_24_3": ["@otp_src_24//file"], - "@erlang_config//:erlang_25_0": ["@otp_src_25_0//file"], - "@erlang_config//:erlang_25_1": ["@otp_src_25_1//file"], - "@erlang_config//:erlang_25_2": ["@otp_src_25_2//file"], - "@erlang_config//:erlang_25_3": ["@otp_src_25_3//file"], - "@erlang_config//:erlang_26_0": ["@otp_src_26//file"], - }), -) - -container_run_and_commit_layer( - name = "otp_layer", - commands = [ - "/usr/local/src/build_install_otp.sh", - "rm /usr/local/src/build_install_otp.sh", - ], - exec_properties = FIRECRACKER_EXEC_PROPS, - image = ":otp_source.tar", - tags = ["manual"], -) - -container_layer( - name = "rabbitmq_tarball_layer", - directory = "/opt", - files = [ - "10-default-guest-user.conf", - "docker-entrypoint.sh", - "install_rabbitmq.sh", - ], - tags = ["manual"], - tars = [ - "//:package-generic-unix", - ], -) - -RABBITMQ_DATA_DIR = "/var/lib/rabbitmq" - -RABBITMQ_HOME = "/opt/rabbitmq" +write_file( + name = "cmd", + out = "cmd.txt", + # must match Dockerfile + content = ["rabbitmq-server"], +) + +write_file( + name = "entrypoint", + out = "entrypoint.txt", + # must match Dockerfile + content = ["docker-entrypoint.sh"], +) + +[ + oci_image( + name = "image-%s" % arch, + architecture = arch, + cmd = ":cmd", + entrypoint = ":entrypoint", + # must match Dockerfile + # docker inspect bazel/packaging/docker-image:rabbitmq-amd64 + # after + # bazel run //packaging/docker-image:rabbitmq-amd64 + # to check values + env = { + "PATH": "/opt/rabbitmq/sbin:/opt/erlang/bin:/opt/openssl/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin", + "ERLANG_INSTALL_PATH_PREFIX": "/opt/erlang", + "OPENSSL_INSTALL_PATH_PREFIX": "/opt/openssl", + "RABBITMQ_DATA_DIR": "/var/lib/rabbitmq", + "RABBITMQ_VERSION": APP_VERSION, + "RABBITMQ_HOME": "/opt/rabbitmq", + "HOME": "/var/lib/rabbitmq", + "LANG": "C.UTF-8", + "LANGUAGE": "C.UTF-8", + "LC_ALL": "C.UTF-8", + }, + os = "linux", + tags = _TAGS, + tars = [":image-%s.tar" % arch], + ) + for arch in _ARCHS +] -container_image( - name = "rabbitmq_tarball", - base = ":rabbitmq_pkgs_image", - env = { - "RABBITMQ_DATA_DIR": RABBITMQ_DATA_DIR, - "RABBITMQ_HOME": RABBITMQ_HOME, - "RABBITMQ_LOGS": "-", - }, - layers = [ - ":openssl_layer", - ":otp_layer", - ":rabbitmq_tarball_layer", - ], - tags = ["manual"], -) +[ + oci_tarball( + name = "rabbitmq-%s" % arch, + image = ":image-%s" % arch, + repo_tags = ["bazel/%s:rabbitmq-%s" % (package_name(), arch)], + tags = _TAGS, + ) + for arch in _ARCHS +] -container_run_and_commit_layer( - name = "rabbitmq_layer", - commands = [ - "/opt/install_rabbitmq.sh", - "rm /opt/install_rabbitmq.sh", +oci_image_index( + name = "image", + images = [ + ":image-%s" % arch + for arch in _ARCHS ], - exec_properties = FIRECRACKER_EXEC_PROPS, - image = ":rabbitmq_tarball.tar", - tags = ["manual"], + tags = _TAGS, ) -C_UTF8 = "C.UTF-8" - -container_image( +oci_tarball( name = "rabbitmq", - base = ":rabbitmq_pkgs_image", - cmd = ["rabbitmq-server"], - entrypoint = ["docker-entrypoint.sh"], - env = { - "RABBITMQ_DATA_DIR": RABBITMQ_DATA_DIR, - "RABBITMQ_HOME": RABBITMQ_HOME, - "RABBITMQ_LOGS": "-", - "HOME": RABBITMQ_DATA_DIR, - "PATH": "%s/sbin:$$PATH" % RABBITMQ_HOME, - "LANG": C_UTF8, - "LANGUAGE": C_UTF8, - "LC_ALL": C_UTF8, - }, - layers = [ - ":openssl_layer", - ":otp_layer", - ":rabbitmq_layer", - ], - ports = [ - "4369/tcp", # epmd - "5671/tcp", # amqp-tls - "5672/tcp", # amqp - "25672/tcp", # erlang - "15671/tcp", # management-tls - "15672/tcp", # management - "15691/tcp", # prometheus-tls - "15692/tcp", # prometheus - "5551/tcp", # stream-tls - "5552/tcp", # stream - "8883/tcp", # mqtt-tls - "1883/tcp", # mqtt - "15676/tcp", # web-mqtt-tls - "15675/tcp", # web-mqtt - "61614/tcp", # stomp-tls - "61613/tcp", # stomp - "15673/tcp", # web-stomp-tls - "15674/tcp", # web-stomp - "15670/tcp", # examples - ], - tags = ["manual"], - volumes = [ - RABBITMQ_DATA_DIR, - ], -) - -# Wrapper targets for the tarred images are required in order to be able to run -# commandTests in container_test targets. - -container_image( - name = "openssl_install_wrapper", - base = ":otp_source", - tags = ["manual"], -) - -container_image( - name = "otp_install_wrapper", - base = ":rabbitmq_pkgs_image", - layers = [ - ":otp_layer", - ], - tags = ["manual"], -) - -# Tests - -container_test( - name = "openssl_test", - configs = ["//packaging/docker-image/test_configs:openssl_ubuntu.yaml"], - exec_properties = FIRECRACKER_EXEC_PROPS, - image = ":openssl_install_wrapper", - tags = [ - "docker", - "manual", - ], -) - -container_test( - name = "otp_test", - configs = ["//packaging/docker-image/test_configs:otp_ubuntu.yaml"], - exec_properties = FIRECRACKER_EXEC_PROPS, - image = ":otp_install_wrapper", - tags = [ - "docker", - "manual", - ], -) + format = "oci", + image = ":image", + repo_tags = ["bazel/%s:rabbitmq" % package_name()], + tags = _TAGS, +) + +[ + container_structure_test( + name = "rabbitmq_test_%s" % arch, + configs = ["//packaging/docker-image/test_configs:rabbitmq_ubuntu.yaml"], + image = ":image-%s" % arch, + tags = _TAGS, + ) + for arch in _ARCHS +] -container_test( - name = "rabbitmq_test", - configs = ["//packaging/docker-image/test_configs:rabbitmq_ubuntu.yaml"], - exec_properties = FIRECRACKER_EXEC_PROPS, - image = ":rabbitmq", - tags = [ - "docker", - "manual", - ], +oci_push( + name = "push", + image = ":image", + repository = "index.docker.io/pivotalrabbitmq/rabbitmq", + tags = _TAGS, ) diff --git a/packaging/docker-image/Dockerfile b/packaging/docker-image/Dockerfile index ced2a49e760f..b74b68d5b468 100644 --- a/packaging/docker-image/Dockerfile +++ b/packaging/docker-image/Dockerfile @@ -1,77 +1,66 @@ -# The official Canonical Ubuntu Bionic image is ideal from a security perspective, +# +# Based on the generated file from https://github.com/docker-library/rabbitmq +# + +# The official Canonical Ubuntu Focal image is ideal from a security perspective, # especially for the enterprises that we, the RabbitMQ team, have to deal with -ARG BASE=ubuntu -FROM ${BASE}:20.04 + +FROM ubuntu:22.04 as build-base + +ARG BUILDKIT_SBOM_SCAN_STAGE=true RUN set -eux; \ - apt-get update; \ - apt-get install -y --no-install-recommends \ -# grab gosu for easy step-down from root - gosu \ - ; \ - rm -rf /var/lib/apt/lists/*; \ -# verify that the "gosu" binary works - gosu nobody true + apt-get update; \ + apt-get install -y --no-install-recommends \ + build-essential \ + ca-certificates \ + gnupg \ + libncurses5-dev \ + wget + +FROM build-base as openssl-builder + +ARG BUILDKIT_SBOM_SCAN_STAGE=true -# PGP key servers are too flaky for us to verify during every CI triggered build -# https://github.com/docker-library/official-images/issues/4252 -ARG SKIP_PGP_VERIFY=false # Default to a PGP keyserver that pgp-happy-eyeballs recognizes, but allow for substitutions locally -ARG PGP_KEYSERVER=ha.pool.sks-keyservers.net +ARG PGP_KEYSERVER=keyserver.ubuntu.com # If you are building this image locally and are getting `gpg: keyserver receive failed: No data` errors, -# run the build with a different PGP_KEYSERVER, e.g. docker build --tag rabbitmq:3.7 --build-arg PGP_KEYSERVER=pgpkeys.eu 3.7/ubuntu +# run the build with a different PGP_KEYSERVER, e.g. docker build --tag rabbitmq:4.0 --build-arg PGP_KEYSERVER=pgpkeys.eu 4.0/ubuntu # For context, see https://github.com/docker-library/official-images/issues/4252 -# Using the latest OpenSSL LTS release, with support until September 2023 - https://www.openssl.org/source/ -ENV OPENSSL_VERSION 1.1.1g -ENV OPENSSL_SOURCE_SHA256="ddb04774f1e32f0c49751e21b67216ac87852ceb056b75209af2443400636d46" -# https://www.openssl.org/community/omc.html -ENV OPENSSL_PGP_KEY_IDS="0x8657ABB260F056B1E5190839D9C4D26D0E604491 0x5B2545DAB21995F4088CEFAA36CEE4DEB00CFE33 0xED230BEC4D4F2518B9D7DF41F0DB4D21C1D35231 0xC1F33DD8CE1D4CC613AF14DA9195C48241FBF7DD 0x7953AC1FBC3DC8B3B292393ED5E9E43F7DF9EE8C 0xE5E52560DD91C556DDBDA5D02064C53641C25E5D" +ENV OPENSSL_VERSION 3.3.1 +ENV OPENSSL_SOURCE_SHA256="777cd596284c883375a2a7a11bf5d2786fc5413255efab20c50d6ffe6d020b7e" +# https://www.openssl.org/community/otc.html +# https://www.openssl.org/source/ +ENV OPENSSL_PGP_KEY_IDS="0x8657ABB260F056B1E5190839D9C4D26D0E604491 0xB7C1C14360F353A36862E4D5231C84CDDCC69C45 0xC1F33DD8CE1D4CC613AF14DA9195C48241FBF7DD 0x95A9908DDFA16830BE9FB9003D30A3A9FF1360DC 0x7953AC1FBC3DC8B3B292393ED5E9E43F7DF9EE8C 0xA21FAB74B0088AA361152586B8EF1A6BA9DA2D5C 0xE5E52560DD91C556DDBDA5D02064C53641C25E5D 0xEFC0A467D613CB83C7ED6D30D894E2CE8B3D79F5" -# Use the latest stable Erlang/OTP release - make find-latest-otp - https://github.com/erlang/otp/tags -ARG OTP_VERSION -ENV OTP_VERSION ${OTP_VERSION} +ENV OTP_VERSION 26.2.5 # TODO add PGP checking when the feature will be added to Erlang/OTP's build system -# http://erlang.org/pipermail/erlang-questions/2019-January/097067.html -ARG OTP_SHA256 -ENV OTP_SOURCE_SHA256=${OTP_SHA256} -ARG SKIP_OTP_VERIFY=false +# https://erlang.org/pipermail/erlang-questions/2019-January/097067.html +ENV OTP_SOURCE_SHA256="de155c4ad9baab2b9e6c96dbd03bf955575a04dd6feee9c08758beb28484c9f6" + +# install openssl & erlang to a path that isn't auto-checked for libs to prevent accidental use by system packages +ENV ERLANG_INSTALL_PATH_PREFIX /opt/erlang +ENV OPENSSL_INSTALL_PATH_PREFIX /opt/openssl # Install dependencies required to build Erlang/OTP from source # https://erlang.org/doc/installation_guide/INSTALL.html -# autoconf: Required to configure Erlang/OTP before compiling # dpkg-dev: Required to set up host & build type when compiling Erlang/OTP # gnupg: Required to verify OpenSSL artefacts # libncurses5-dev: Required for Erlang/OTP new shell & observer_cli - https://github.com/zhongwencool/observer_cli RUN set -eux; \ - \ - savedAptMark="$(apt-mark showmanual)"; \ - apt-get update; \ - apt-get install --yes --no-install-recommends \ - autoconf \ - ca-certificates \ - dpkg-dev \ - gcc \ - g++ \ - gnupg \ - libncurses5-dev \ - make \ - wget \ - ; \ - rm -rf /var/lib/apt/lists/*; \ - \ OPENSSL_SOURCE_URL="https://www.openssl.org/source/openssl-$OPENSSL_VERSION.tar.gz"; \ OPENSSL_PATH="/usr/local/src/openssl-$OPENSSL_VERSION"; \ - OPENSSL_CONFIG_DIR=/usr/local/etc/ssl; \ + OPENSSL_CONFIG_DIR="$OPENSSL_INSTALL_PATH_PREFIX/etc/ssl"; \ \ # Required by the crypto & ssl Erlang/OTP applications wget --progress dot:giga --output-document "$OPENSSL_PATH.tar.gz.asc" "$OPENSSL_SOURCE_URL.asc"; \ wget --progress dot:giga --output-document "$OPENSSL_PATH.tar.gz" "$OPENSSL_SOURCE_URL"; \ export GNUPGHOME="$(mktemp -d)"; \ for key in $OPENSSL_PGP_KEY_IDS; do \ - gpg --batch --keyserver "$PGP_KEYSERVER" --recv-keys "$key" || true; \ + gpg --batch --keyserver "$PGP_KEYSERVER" --recv-keys "$key"; \ done; \ - test "$SKIP_PGP_VERIFY" == "true" || gpg --batch --verify "$OPENSSL_PATH.tar.gz.asc" "$OPENSSL_PATH.tar.gz"; \ + gpg --batch --verify "$OPENSSL_PATH.tar.gz.asc" "$OPENSSL_PATH.tar.gz"; \ gpgconf --kill all; \ rm -rf "$GNUPGHOME"; \ echo "$OPENSSL_SOURCE_SHA256 *$OPENSSL_PATH.tar.gz" | sha256sum --check --strict -; \ @@ -80,35 +69,61 @@ RUN set -eux; \ \ # Configure OpenSSL for compilation cd "$OPENSSL_PATH"; \ +# without specifying "--libdir", Erlang will fail during "crypto:supports()" looking for a "pthread_atfork" function that doesn't exist (but only on arm32v7/armhf??) # OpenSSL's "config" script uses a lot of "uname"-based target detection... - MACHINE="$(dpkg-architecture --query DEB_BUILD_GNU_CPU)" \ + dpkgArch="$(dpkg --print-architecture)"; dpkgArch="${dpkgArch##*-}"; \ +# https://deb.debian.org/debian/dists/unstable/main/ + case "$dpkgArch" in \ +# https://github.com/openssl/openssl/blob/openssl-3.1.1/Configurations/10-main.conf#L860 (look for "linux-" and "linux64-" keys) + amd64) opensslMachine='linux-x86_64' ;; \ + arm64) opensslMachine='linux-aarch64' ;; \ +# https://github.com/openssl/openssl/blob/openssl-3.1.1/Configurations/10-main.conf#L736-L766 +# https://wiki.debian.org/ArchitectureSpecificsMemo#Architecture_baselines +# https://gcc.gnu.org/onlinedocs/gcc/ARM-Options.html + armhf) opensslMachine='linux-armv4'; opensslExtraConfig='-march=armv7-a+fp' ;; \ + i386) opensslMachine='linux-x86' ;; \ + ppc64el) opensslMachine='linux-ppc64le' ;; \ + riscv64) opensslMachine='linux64-riscv64' ;; \ + s390x) opensslMachine='linux64-s390x' ;; \ + *) echo >&2 "error: unsupported arch: '$apkArch'"; exit 1 ;; \ + esac; \ + MACHINE="$opensslMachine" \ RELEASE="4.x.y-z" \ SYSTEM='Linux' \ BUILD='???' \ - ./config \ + ./Configure \ + "$opensslMachine" \ + enable-fips \ + --prefix="$OPENSSL_INSTALL_PATH_PREFIX" \ --openssldir="$OPENSSL_CONFIG_DIR" \ -# add -rpath to avoid conflicts between our OpenSSL's "libssl.so" and the libssl package by making sure /usr/local/lib is searched first (but only for Erlang/OpenSSL to avoid issues with other tools using libssl; https://github.com/docker-library/rabbitmq/issues/364) - -Wl,-rpath=/usr/local/lib \ + --libdir="$OPENSSL_INSTALL_PATH_PREFIX/lib" \ +# add -rpath to avoid conflicts between our OpenSSL's "libssl.so" and the libssl package by making sure "$INSTALL_PATH_PREFIX/lib" is searched first (but only for Erlang/OpenSSL to avoid issues with other tools using libssl; https://github.com/docker-library/rabbitmq/issues/364) + -Wl,-rpath="$OPENSSL_INSTALL_PATH_PREFIX/lib" \ + ${opensslExtraConfig:-} \ ; \ # Compile, install OpenSSL, verify that the command-line works & development headers are present make -j "$(getconf _NPROCESSORS_ONLN)"; \ - make install_sw install_ssldirs; \ - cd ..; \ - rm -rf "$OPENSSL_PATH"*; \ + make install_sw install_ssldirs install_fips; \ ldconfig; \ # use Debian's CA certificates rmdir "$OPENSSL_CONFIG_DIR/certs" "$OPENSSL_CONFIG_DIR/private"; \ - ln -sf /etc/ssl/certs /etc/ssl/private "$OPENSSL_CONFIG_DIR"; \ + ln -sf /etc/ssl/certs /etc/ssl/private "$OPENSSL_CONFIG_DIR" + # smoke test - openssl version; \ - \ - OTP_SOURCE_URL="https://github.com/erlang/otp/archive/OTP-$OTP_VERSION.tar.gz"; \ +RUN $OPENSSL_INSTALL_PATH_PREFIX/bin/openssl version + +FROM openssl-builder as erlang-builder + +ARG BUILDKIT_SBOM_SCAN_STAGE=true + +RUN set -eux; \ + OTP_SOURCE_URL="https://github.com/erlang/otp/releases/download/OTP-$OTP_VERSION/otp_src_$OTP_VERSION.tar.gz"; \ OTP_PATH="/usr/local/src/otp-$OTP_VERSION"; \ \ # Download, verify & extract OTP_SOURCE mkdir -p "$OTP_PATH"; \ wget --progress dot:giga --output-document "$OTP_PATH.tar.gz" "$OTP_SOURCE_URL"; \ - test "$SKIP_OTP_VERIFY" = "true" || echo "$OTP_SOURCE_SHA256 *$OTP_PATH.tar.gz" | sha256sum --check --strict -; \ + echo "$OTP_SOURCE_SHA256 *$OTP_PATH.tar.gz" | sha256sum --check --strict -; \ tar --extract --file "$OTP_PATH.tar.gz" --directory "$OTP_PATH" --strip-components 1; \ \ # Configure Erlang/OTP for compilation, disable unused features & applications @@ -116,28 +131,32 @@ RUN set -eux; \ # ERL_TOP is required for Erlang/OTP makefiles to find the absolute path for the installation cd "$OTP_PATH"; \ export ERL_TOP="$OTP_PATH"; \ - ./otp_build autoconf; \ CFLAGS="$(dpkg-buildflags --get CFLAGS)"; export CFLAGS; \ -# add -rpath to avoid conflicts between our OpenSSL's "libssl.so" and the libssl package by making sure /usr/local/lib is searched first (but only for Erlang/OpenSSL to avoid issues with other tools using libssl; https://github.com/docker-library/rabbitmq/issues/364) - export CFLAGS="$CFLAGS -Wl,-rpath=/usr/local/lib"; \ +# add -rpath to avoid conflicts between our OpenSSL's "libssl.so" and the libssl package by making sure "$OPENSSL_INSTALL_PATH_PREFIX/lib" is searched first (but only for Erlang/OpenSSL to avoid issues with other tools using libssl; https://github.com/docker-library/rabbitmq/issues/364) + export CFLAGS="$CFLAGS -Wl,-rpath=$OPENSSL_INSTALL_PATH_PREFIX/lib"; \ hostArch="$(dpkg-architecture --query DEB_HOST_GNU_TYPE)"; \ buildArch="$(dpkg-architecture --query DEB_BUILD_GNU_TYPE)"; \ dpkgArch="$(dpkg --print-architecture)"; dpkgArch="${dpkgArch##*-}"; \ +# JIT is only supported on amd64 + arm64; https://github.com/erlang/otp/blob/OTP-25.3.2.2/erts/configure#L24306-L24347 + jitFlag=; \ + case "$dpkgArch" in \ + amd64 | arm64) jitFlag='--enable-jit' ;; \ + esac; \ ./configure \ + --prefix="$ERLANG_INSTALL_PATH_PREFIX" \ --host="$hostArch" \ --build="$buildArch" \ - --disable-dynamic-ssl-lib \ --disable-hipe \ --disable-sctp \ --disable-silent-rules \ - --enable-jit \ + --enable-builtin-zlib \ --enable-clock-gettime \ --enable-hybrid-heap \ --enable-kernel-poll \ - --enable-shared-zlib \ --enable-smp-support \ --enable-threads \ --with-microstate-accounting=extra \ + --with-ssl="$OPENSSL_INSTALL_PATH_PREFIX" \ --without-common_test \ --without-debugger \ --without-dialyzer \ @@ -156,80 +175,110 @@ RUN set -eux; \ --without-ssh \ --without-tftp \ --without-wx \ + $jitFlag \ ; \ + \ # Compile & install Erlang/OTP make -j "$(getconf _NPROCESSORS_ONLN)" GEN_OPT_FLGS="-O2 -fno-strict-aliasing"; \ make install; \ - cd ..; \ - rm -rf \ - "$OTP_PATH"* \ - /usr/local/lib/erlang/lib/*/examples \ - /usr/local/lib/erlang/lib/*/src \ - ; \ \ -# reset apt-mark's "manual" list so that "purge --auto-remove" will remove all build dependencies - apt-mark auto '.*' > /dev/null; \ - [ -z "$savedAptMark" ] || apt-mark manual $savedAptMark; \ - find /usr/local -type f -executable -exec ldd '{}' ';' \ - | awk '/=>/ { print $(NF-1) }' \ - | sort -u \ - | xargs -r dpkg-query --search \ - | cut -d: -f1 \ - | sort -u \ - | xargs -r apt-mark manual \ - ; \ - apt-get purge -y --auto-remove -o APT::AutoRemove::RecommendsImportant=false; \ +# Remove unnecessary files + find "$ERLANG_INSTALL_PATH_PREFIX/lib/erlang" -type d -name examples -exec rm -rf '{}' +; \ + find "$ERLANG_INSTALL_PATH_PREFIX/lib/erlang" -type d -name src -exec rm -rf '{}' +; \ + find "$ERLANG_INSTALL_PATH_PREFIX/lib/erlang" -type d -name include -exec rm -rf '{}' + + +# Check that Erlang/OTP crypto & ssl were compiled against OpenSSL correctly +ENV PATH $ERLANG_INSTALL_PATH_PREFIX/bin:$PATH +RUN find $ERLANG_INSTALL_PATH_PREFIX -type f -name 'crypto.so' -exec ldd {} \; | awk '/libcrypto\.so/ { if (!index($3,ENVIRON["OPENSSL_INSTALL_PATH_PREFIX"])) exit 1 }' +RUN erl -noshell -eval 'ok = crypto:start(), ok = io:format("~p~n~n~p~n~n", [crypto:supports(), ssl:versions()]), init:stop().' + +FROM ubuntu:22.04 + +# OPENSSL/ERLANG_INSTALL_PATH_PREFIX are defined in a different stage, so define them again +ENV ERLANG_INSTALL_PATH_PREFIX /opt/erlang +ENV OPENSSL_INSTALL_PATH_PREFIX /opt/openssl +COPY --from=erlang-builder $ERLANG_INSTALL_PATH_PREFIX $ERLANG_INSTALL_PATH_PREFIX +RUN echo '{"spdxVersion":"SPDX-2.3","SPDXID":"SPDXRef-DOCUMENT","name":"erlang-sbom","packages":[{"name":"erlang","versionInfo":"26.2.2","SPDXID":"SPDXRef-Package--erlang","externalRefs":[{"referenceCategory":"PACKAGE-MANAGER","referenceType":"purl","referenceLocator":"pkg:generic/erlang@26.2.2?os_name=ubuntu&os_version=22.04"}],"licenseDeclared":"Apache-2.0"}]}' > $ERLANG_INSTALL_PATH_PREFIX/erlang.spdx.json + +COPY --from=openssl-builder $OPENSSL_INSTALL_PATH_PREFIX $OPENSSL_INSTALL_PATH_PREFIX +RUN echo '{"spdxVersion":"SPDX-2.3","SPDXID":"SPDXRef-DOCUMENT","name":"openssl-sbom","packages":[{"name":"openssl","versionInfo":"3.1.5","SPDXID":"SPDXRef-Package--openssl","externalRefs":[{"referenceCategory":"PACKAGE-MANAGER","referenceType":"purl","referenceLocator":"pkg:generic/openssl@3.1.5?os_name=ubuntu&os_version=22.04"}],"licenseDeclared":"Apache-2.0"}]}' > $OPENSSL_INSTALL_PATH_PREFIX/openssl.spdx.json + +ENV PATH $ERLANG_INSTALL_PATH_PREFIX/bin:$OPENSSL_INSTALL_PATH_PREFIX/bin:$PATH + +ENV RABBITMQ_DATA_DIR /var/lib/rabbitmq + +RUN set -eux; \ +# Configure OpenSSL to use system certs + ln -vsf /etc/ssl/certs /etc/ssl/private "$OPENSSL_INSTALL_PATH_PREFIX/etc/ssl"; \ \ -# Check that OpenSSL still works after purging build dependencies +# Check that OpenSSL still works after copying from previous builder + ldconfig; \ + sed -i.ORIG -e "/\.include.*fips/ s!.*!.include $OPENSSL_INSTALL_PATH_PREFIX/etc/ssl/fipsmodule.cnf!" \ + -e '/# fips =/s/.*/fips = fips_sect/' "$OPENSSL_INSTALL_PATH_PREFIX/etc/ssl/openssl.cnf"; \ + sed -i.ORIG -e '/^activate/s/^/#/' "$OPENSSL_INSTALL_PATH_PREFIX/etc/ssl/fipsmodule.cnf"; \ + [ "$(command -v openssl)" = "$OPENSSL_INSTALL_PATH_PREFIX/bin/openssl" ]; \ openssl version; \ + openssl version -d; \ + \ # Check that Erlang/OTP crypto & ssl were compiled against OpenSSL correctly - erl -noshell -eval 'io:format("~p~n~n~p~n~n", [crypto:supports(), ssl:versions()]), init:stop().' - -ENV RABBITMQ_DATA_DIR=/var/lib/rabbitmq + erl -noshell -eval 'ok = crypto:start(), ok = io:format("~p~n~n~p~n~n", [crypto:supports(), ssl:versions()]), init:stop().'; \ + \ # Create rabbitmq system user & group, fix permissions & allow root user to connect to the RabbitMQ Erlang VM -RUN set -eux; \ groupadd --gid 999 --system rabbitmq; \ useradd --uid 999 --system --home-dir "$RABBITMQ_DATA_DIR" --gid rabbitmq rabbitmq; \ mkdir -p "$RABBITMQ_DATA_DIR" /etc/rabbitmq /etc/rabbitmq/conf.d /tmp/rabbitmq-ssl /var/log/rabbitmq; \ chown -fR rabbitmq:rabbitmq "$RABBITMQ_DATA_DIR" /etc/rabbitmq /etc/rabbitmq/conf.d /tmp/rabbitmq-ssl /var/log/rabbitmq; \ - chmod 777 "$RABBITMQ_DATA_DIR" /etc/rabbitmq /etc/rabbitmq/conf.d /tmp/rabbitmq-ssl /var/log/rabbitmq; \ + chmod 1777 "$RABBITMQ_DATA_DIR" /etc/rabbitmq /etc/rabbitmq/conf.d /tmp/rabbitmq-ssl /var/log/rabbitmq; \ ln -sf "$RABBITMQ_DATA_DIR/.erlang.cookie" /root/.erlang.cookie -# https://www.rabbitmq.com/signatures.html#importing-gpg -# ENV RABBITMQ_PGP_KEY_ID="0x0A9AF2115F4687BD29803A206B73A36E6026DFCA" -ENV RABBITMQ_HOME=/opt/rabbitmq +# Use the latest stable RabbitMQ release (https://www.rabbitmq.com/download.html) +ARG RABBITMQ_VERSION=4.0.0 +ENV RABBITMQ_VERSION=${RABBITMQ_VERSION} +ENV RABBITMQ_HOME /opt/rabbitmq + +# Add RabbitMQ to PATH +ENV PATH $RABBITMQ_HOME/sbin:$PATH -# Add RabbitMQ to PATH, send all logs to TTY -ENV PATH=$RABBITMQ_HOME/sbin:$PATH \ - RABBITMQ_LOGS=- +COPY package-generic-unix.tar.xz /usr/local/src/rabbitmq-$RABBITMQ_VERSION.tar.xz +# Install RabbitMQ RUN set -eux; \ - \ - savedAptMark="$(apt-mark showmanual)"; \ + export DEBIAN_FRONTEND=noninteractive; \ apt-get update; \ apt-get install --yes --no-install-recommends \ ca-certificates \ +# grab gosu for easy step-down from root + gosu \ +# Bring in tzdata so users could set the timezones through the environment + tzdata \ + ; \ +# verify that the "gosu" binary works + gosu nobody true; \ + \ + savedAptMark="$(apt-mark showmanual)"; \ + apt-get install --yes --no-install-recommends \ gnupg \ wget \ xz-utils \ ; \ rm -rf /var/lib/apt/lists/*; \ \ - apt-mark auto '.*' > /dev/null; \ - apt-mark manual $savedAptMark; \ - apt-get purge -y --auto-remove -o APT::AutoRemove::RecommendsImportant=false - -# Install RabbitMQ -ARG RABBITMQ_BUILD -COPY ${RABBITMQ_BUILD} $RABBITMQ_HOME - -RUN set -eux; \ + RABBITMQ_SOURCE_URL="https://github.com/rabbitmq/rabbitmq-server/releases/download/v$RABBITMQ_VERSION/rabbitmq-server-generic-unix-latest-toolchain-$RABBITMQ_VERSION.tar.xz"; \ + RABBITMQ_PATH="/usr/local/src/rabbitmq-$RABBITMQ_VERSION"; \ + \ + mkdir -p "$RABBITMQ_HOME"; \ + tar --extract --file "$RABBITMQ_PATH.tar.xz" --directory "$RABBITMQ_HOME" --strip-components 1; \ + rm -rf "$RABBITMQ_PATH"*; \ # Do not default SYS_PREFIX to RABBITMQ_HOME, leave it empty grep -qE '^SYS_PREFIX=\$\{RABBITMQ_HOME\}$' "$RABBITMQ_HOME/sbin/rabbitmq-defaults"; \ sed -i 's/^SYS_PREFIX=.*$/SYS_PREFIX=/' "$RABBITMQ_HOME/sbin/rabbitmq-defaults"; \ grep -qE '^SYS_PREFIX=$' "$RABBITMQ_HOME/sbin/rabbitmq-defaults"; \ chown -R rabbitmq:rabbitmq "$RABBITMQ_HOME"; \ \ + apt-mark auto '.*' > /dev/null; \ + apt-mark manual $savedAptMark; \ + apt-get purge -y --auto-remove -o APT::AutoRemove::RecommendsImportant=false; \ + \ # verify assumption of no stale cookies [ ! -e "$RABBITMQ_DATA_DIR/.erlang.cookie" ]; \ # Ensure RabbitMQ was installed correctly by running a few commands that do not depend on a running server, as the rabbitmq user @@ -238,7 +287,12 @@ RUN set -eux; \ gosu rabbitmq rabbitmqctl list_ciphers; \ gosu rabbitmq rabbitmq-plugins list; \ # no stale cookies - rm "$RABBITMQ_DATA_DIR/.erlang.cookie" + rm "$RABBITMQ_DATA_DIR/.erlang.cookie"; \ + \ + echo '{"spdxVersion":"SPDX-2.3","SPDXID":"SPDXRef-DOCUMENT","name":"rabbitmq-sbom","packages":[{"name":"rabbitmq","versionInfo":"4.0.0","SPDXID":"SPDXRef-Package--rabbitmq","externalRefs":[{"referenceCategory":"PACKAGE-MANAGER","referenceType":"purl","referenceLocator":"pkg:generic/rabbitmq@4.0.0?os_name=ubuntu&os_version=22.04"}],"licenseDeclared":"MPL-2.0 AND Apache-2.0"}]}' > $RABBITMQ_HOME/rabbitmq.spdx.json + +# Enable Prometheus-style metrics by default (https://github.com/docker-library/rabbitmq/issues/419) +RUN gosu rabbitmq rabbitmq-plugins enable --offline rabbitmq_prometheus # Added for backwards compatibility - users can simply COPY custom plugins to /plugins RUN ln -sf /opt/rabbitmq/plugins /plugins @@ -253,62 +307,26 @@ VOLUME $RABBITMQ_DATA_DIR # https://docs.docker.com/samples/library/ubuntu/#locales ENV LANG=C.UTF-8 LANGUAGE=C.UTF-8 LC_ALL=C.UTF-8 -COPY --chown=rabbitmq:rabbitmq 10-default-guest-user.conf /etc/rabbitmq/conf.d/ +COPY --chown=rabbitmq:rabbitmq 10-defaults.conf 20-management_agent.disable_metrics_collector.conf /etc/rabbitmq/conf.d/ COPY docker-entrypoint.sh /usr/local/bin/ ENTRYPOINT ["docker-entrypoint.sh"] -# EPMD AMQP-TLS AMQP ERLANG -EXPOSE 4369 5671 5672 25672 +EXPOSE 4369 5671 5672 15691 15692 25672 CMD ["rabbitmq-server"] -# rabbitmq_management -RUN rabbitmq-plugins enable --offline rabbitmq_management && \ - rabbitmq-plugins is_enabled rabbitmq_management --offline -# extract "rabbitmqadmin" from inside the "rabbitmq_management-X.Y.Z.ez" plugin zipfile + +RUN set eux; \ + rabbitmq-plugins enable --offline rabbitmq_management; \ +# make sure the metrics collector is re-enabled (disabled in the base image for Prometheus-style metrics by default) + rm -f /etc/rabbitmq/conf.d/20-management_agent.disable_metrics_collector.conf; \ +# grab "rabbitmqadmin" from inside the "rabbitmq_management-X.Y.Z" plugin folder # see https://github.com/docker-library/rabbitmq/issues/207 -# RabbitMQ 3.9 onwards uses uncompressed plugins by default, in which case extraction is -# unnecesary -RUN set -eux; \ - if [ -s /plugins/rabbitmq_management-*.ez ]; then \ - erl -noinput -eval ' \ - { ok, AdminBin } = zip:foldl(fun(FileInArchive, GetInfo, GetBin, Acc) -> \ - case Acc of \ - "" -> \ - case lists:suffix("/rabbitmqadmin", FileInArchive) of \ - true -> GetBin(); \ - false -> Acc \ - end; \ - _ -> Acc \ - end \ - end, "", init:get_plain_arguments()), \ - io:format("~s", [ AdminBin ]), \ - init:stop(). \ - ' -- /plugins/rabbitmq_management-*.ez > /usr/local/bin/rabbitmqadmin; \ - else \ - cp /plugins/rabbitmq_management-*/priv/www/cli/rabbitmqadmin /usr/local/bin/rabbitmqadmin; \ - fi; \ + cp /plugins/rabbitmq_management-*/priv/www/cli/rabbitmqadmin /usr/local/bin/rabbitmqadmin; \ [ -s /usr/local/bin/rabbitmqadmin ]; \ chmod +x /usr/local/bin/rabbitmqadmin; \ - apt-get update; apt-get install -y --no-install-recommends python3 dstat sysstat htop nmon tmux neovim; rm -rf /var/lib/apt/lists/*; \ + apt-get update; \ + apt-get install -y --no-install-recommends python3; \ + rm -rf /var/lib/apt/lists/*; \ rabbitmqadmin --version -# MANAGEMENT-TLS MANAGEMENT -EXPOSE 15671 15672 - -RUN rabbitmq-plugins enable --offline rabbitmq_prometheus && \ - rabbitmq-plugins is_enabled rabbitmq_prometheus --offline -# PROMETHEUS-TLS PROMETHEUS -EXPOSE 15691 15692 - -RUN rabbitmq-plugins enable --all -# STREAM-TLS STREAM -EXPOSE 5551 5552 -# MQTT-TLS MQTT -EXPOSE 8883 1883 -# WEB-MQTT-TLS WEB-MQTT -EXPOSE 15676 15675 -# STOMP-TLS STOMP -EXPOSE 61614 61613 -# WEB-STOMP-TLS WEB-STOMP -EXPOSE 15673 15674 -# EXAMPLES -EXPOSE 15670 + +EXPOSE 15671 15672 \ No newline at end of file diff --git a/packaging/docker-image/Makefile b/packaging/docker-image/Makefile index 6e82080b6057..3b442b5cb180 100644 --- a/packaging/docker-image/Makefile +++ b/packaging/docker-image/Makefile @@ -34,24 +34,14 @@ endif IMAGE_TAG_1 ?= $(subst +,-,$(VERSION)) endif -OTP_VERSION ?= 25.0.4 -OTP_SHA256 ?= 05878cb51a64b33c86836b12a21903075c300409b609ad5e941ddb0feb8c2120 REPO ?= pivotalrabbitmq/rabbitmq -SKIP_PGP_VERIFY ?= false -PGP_KEYSERVER ?= pgpkeys.eu -ALT1_PGP_KEYSERVER ?= keyserver.ubuntu.com -ALT2_PGP_KEYSERVER ?= pgpkeys.uk all: dist dist: - xzcat $(GENERIC_UNIX_ARCHIVE) | tar xvf - + cp -f $(GENERIC_UNIX_ARCHIVE) package-generic-unix.tar.xz docker build --pull \ - --build-arg SKIP_PGP_VERIFY=$(SKIP_PGP_VERIFY) \ - --build-arg PGP_KEYSERVER=$(PGP_KEYSERVER) \ - --build-arg OTP_VERSION=$(OTP_VERSION) \ - --build-arg OTP_SHA256=$(OTP_SHA256) \ - --build-arg RABBITMQ_BUILD=rabbitmq_server-$(VERSION) \ + --build-arg RABBITMQ_VERSION=$(VERSION) \ --tag $(REPO):$(IMAGE_TAG_1) \ . @@ -63,19 +53,4 @@ ifdef IMAGE_TAG_2 endif clean: - rm -rf rabbitmq_server-* - -OTP_VERSION_MATCH ?= 25[0-9.]+ -define LATEST_STABLE_OTP_VERSION -curl --silent --fail https://api.github.com/repos/erlang/otp/git/refs/tags | \ - jq -r '.[].ref | sub("refs/tags/OTP.{1}";"") | match("^$(OTP_VERSION_MATCH)$$") | .string' | \ - tail -n 1 -endef -.PHONY: find-otp-sha256 -find-otp-sha256: - @printf "Version: " && \ - export VERSION="$$($(LATEST_STABLE_OTP_VERSION))" && \ - echo "$$VERSION" && \ - printf "Checksum: " && \ - wget --continue --quiet --output-document="/tmp/OTP-$$VERSION.tar.gz" "https://github.com/erlang/otp/archive/OTP-$$VERSION.tar.gz" && \ - shasum -a 256 "/tmp/OTP-$$VERSION.tar.gz" + rm -f rabbitmq_server-* diff --git a/packaging/docker-image/build_install_openssl.sh b/packaging/docker-image/build_install_openssl.sh deleted file mode 100755 index 9c44c510be33..000000000000 --- a/packaging/docker-image/build_install_openssl.sh +++ /dev/null @@ -1,30 +0,0 @@ -#!/usr/bin/env bash - -set -euxo pipefail - -OPENSSL_PATH="/usr/local/src/openssl-$OPENSSL_VERSION" -OPENSSL_CONFIG_DIR=/usr/local/etc/ssl - -cd "$OPENSSL_PATH" -debMultiarch="$(dpkg-architecture --query DEB_HOST_MULTIARCH)" -# OpenSSL's "config" script uses a lot of "uname"-based target detection... -MACHINE="$(dpkg-architecture --query DEB_BUILD_GNU_CPU)" \ -RELEASE="4.x.y-z" \ -SYSTEM='Linux' \ -BUILD='???' \ -./config \ - --openssldir="$OPENSSL_CONFIG_DIR" \ - --libdir="lib/$debMultiarch" \ - -Wl,-rpath="/usr/local/lib/$debMultiarch" # add -rpath to avoid conflicts between our OpenSSL's "libssl.so" and the libssl package by making sure /usr/local/lib is searched first (but only for Erlang/OpenSSL to avoid issues with other tools using libssl; https://github.com/docker-library/rabbitmq/issues/364) - -# Compile, install OpenSSL, verify that the command-line works & development headers are present -make -j "$(getconf _NPROCESSORS_ONLN)" -make install_sw install_ssldirs -ldconfig -# use Debian's CA certificates -rmdir "$OPENSSL_CONFIG_DIR/certs" "$OPENSSL_CONFIG_DIR/private" -ln -sf /etc/ssl/certs /etc/ssl/private "$OPENSSL_CONFIG_DIR" -# cleanup sources -rm -rf "$OPENSSL_PATH"* -# smoke test -openssl version -a diff --git a/packaging/docker-image/build_install_otp.sh b/packaging/docker-image/build_install_otp.sh deleted file mode 100755 index 3bdddf2d1825..000000000000 --- a/packaging/docker-image/build_install_otp.sh +++ /dev/null @@ -1,63 +0,0 @@ -#!/usr/bin/env bash - -set -euxo pipefail - -OTP_PATH="$(cd /usr/local/src/otp-OTP-* && pwd)" - -# Configure Erlang/OTP for compilation, disable unused features & applications -# https://erlang.org/doc/applications.html -# ERL_TOP is required for Erlang/OTP makefiles to find the absolute path for the installation -cd "$OTP_PATH" -export ERL_TOP="$OTP_PATH" -./otp_build autoconf -CFLAGS="$(dpkg-buildflags --get CFLAGS)"; export CFLAGS -# add -rpath to avoid conflicts between our OpenSSL's "libssl.so" and the libssl package by making sure /usr/local/lib is searched first (but only for Erlang/OpenSSL to avoid issues with other tools using libssl; https://github.com/docker-library/rabbitmq/issues/364) -export CFLAGS="$CFLAGS -Wl,-rpath=/usr/local/lib/$(dpkg-architecture --query DEB_HOST_MULTIARCH)" -hostArch="$(dpkg-architecture --query DEB_HOST_GNU_TYPE)" -buildArch="$(dpkg-architecture --query DEB_BUILD_GNU_TYPE)" -dpkgArch="$(dpkg --print-architecture)"; dpkgArch="${dpkgArch##*-}" -./configure \ - --host="$hostArch" \ - --build="$buildArch" \ - --disable-dynamic-ssl-lib \ - --disable-hipe \ - --disable-sctp \ - --disable-silent-rules \ - --enable-jit \ - --enable-clock-gettime \ - --enable-hybrid-heap \ - --enable-kernel-poll \ - --enable-shared-zlib \ - --enable-smp-support \ - --enable-threads \ - --with-microstate-accounting=extra \ - --without-common_test \ - --without-debugger \ - --without-dialyzer \ - --without-diameter \ - --without-edoc \ - --without-erl_docgen \ - --without-et \ - --without-eunit \ - --without-ftp \ - --without-hipe \ - --without-jinterface \ - --without-megaco \ - --without-observer \ - --without-odbc \ - --without-reltool \ - --without-ssh \ - --without-tftp \ - --without-wx - -# Compile & install Erlang/OTP -make -j "$(getconf _NPROCESSORS_ONLN)" GEN_OPT_FLGS="-O2 -fno-strict-aliasing" -make install -cd .. -rm -rf \ - "$OTP_PATH"* \ - /usr/local/lib/erlang/lib/*/examples \ - /usr/local/lib/erlang/lib/*/src - -# Check that Erlang/OTP crypto & ssl were compiled against OpenSSL correctly -erl -noshell -eval 'io:format("~p~n~n~p~n~n", [crypto:supports(), ssl:versions()]), init:stop().' diff --git a/packaging/docker-image/docker-entrypoint.sh b/packaging/docker-image/docker-entrypoint.sh index 722dc1e235a7..4f8eba34b1f1 100755 --- a/packaging/docker-image/docker-entrypoint.sh +++ b/packaging/docker-image/docker-entrypoint.sh @@ -11,11 +11,8 @@ if [[ "$1" == rabbitmq* ]] && [ "$(id -u)" = '0' ]; then fi deprecatedEnvVars=( - RABBITMQ_DEFAULT_PASS RABBITMQ_DEFAULT_PASS_FILE - RABBITMQ_DEFAULT_USER RABBITMQ_DEFAULT_USER_FILE - RABBITMQ_DEFAULT_VHOST RABBITMQ_MANAGEMENT_SSL_CACERTFILE RABBITMQ_MANAGEMENT_SSL_CERTFILE RABBITMQ_MANAGEMENT_SSL_DEPTH diff --git a/packaging/docker-image/install_rabbitmq.sh b/packaging/docker-image/install_rabbitmq.sh deleted file mode 100644 index 29ba8c59534f..000000000000 --- a/packaging/docker-image/install_rabbitmq.sh +++ /dev/null @@ -1,54 +0,0 @@ -#!/usr/bin/env bash - -set -euxo pipefail - -mv /opt/rabbitmq_server-* $RABBITMQ_HOME - -groupadd --gid 999 --system rabbitmq -useradd --uid 999 --system --home-dir "$RABBITMQ_DATA_DIR" --gid rabbitmq rabbitmq -mkdir -p "$RABBITMQ_DATA_DIR" /etc/rabbitmq /etc/rabbitmq/conf.d /tmp/rabbitmq-ssl /var/log/rabbitmq -chown -fR rabbitmq:rabbitmq "$RABBITMQ_DATA_DIR" /etc/rabbitmq /etc/rabbitmq/conf.d /tmp/rabbitmq-ssl /var/log/rabbitmq -chmod 777 "$RABBITMQ_DATA_DIR" /etc/rabbitmq /etc/rabbitmq/conf.d /tmp/rabbitmq-ssl /var/log/rabbitmq -ln -sf "$RABBITMQ_DATA_DIR/.erlang.cookie" /root/.erlang.cookie - -export PATH="$RABBITMQ_HOME/sbin:$PATH" - -# Do not default SYS_PREFIX to RABBITMQ_HOME, leave it empty -grep -qE '^SYS_PREFIX=\$\{RABBITMQ_HOME\}$' "$RABBITMQ_HOME/sbin/rabbitmq-defaults" -sed -i 's/^SYS_PREFIX=.*$/SYS_PREFIX=/' "$RABBITMQ_HOME/sbin/rabbitmq-defaults" -grep -qE '^SYS_PREFIX=$' "$RABBITMQ_HOME/sbin/rabbitmq-defaults" -chown -R rabbitmq:rabbitmq "$RABBITMQ_HOME" - -# verify assumption of no stale cookies -[ ! -e "$RABBITMQ_DATA_DIR/.erlang.cookie" ] -# Ensure RabbitMQ was installed correctly by running a few commands that do not depend on a running server, as the rabbitmq user -# If they all succeed, it's safe to assume that things have been set up correctly -gosu rabbitmq rabbitmqctl help -gosu rabbitmq rabbitmqctl list_ciphers -gosu rabbitmq rabbitmq-plugins list -# no stale cookies -rm "$RABBITMQ_DATA_DIR/.erlang.cookie" - -# Added for backwards compatibility - users can simply COPY custom plugins to /plugins -ln -sf /opt/rabbitmq/plugins /plugins - -# move default config and docker entrypoint into place -mv /opt/10-default-guest-user.conf /etc/rabbitmq/conf.d/ -chown rabbitmq:rabbitmq /etc/rabbitmq/conf.d/10-default-guest-user.conf -mv /opt/docker-entrypoint.sh /usr/local/bin - -# rabbitmq_management -rabbitmq-plugins enable --offline rabbitmq_management && \ - rabbitmq-plugins is_enabled rabbitmq_management --offline -# extract "rabbitmqadmin" from inside the "rabbitmq_management-X.Y.Z.ez" plugin zipfile -# see https://github.com/docker-library/rabbitmq/issues/207 -# RabbitMQ 3.9 onwards uses uncompressed plugins by default, in which case extraction is -# unnecesary -cp /plugins/rabbitmq_management-*/priv/www/cli/rabbitmqadmin /usr/local/bin/rabbitmqadmin -[ -s /usr/local/bin/rabbitmqadmin ] -chmod +x /usr/local/bin/rabbitmqadmin -rabbitmqadmin --version - -# rabbitmq_prometheus -rabbitmq-plugins enable --offline rabbitmq_prometheus && \ - rabbitmq-plugins is_enabled rabbitmq_prometheus --offline diff --git a/packaging/docker-image/test_configs/openssl_ubuntu.yaml b/packaging/docker-image/test_configs/openssl_ubuntu.yaml deleted file mode 100644 index a2e402dee590..000000000000 --- a/packaging/docker-image/test_configs/openssl_ubuntu.yaml +++ /dev/null @@ -1,7 +0,0 @@ -schemaVersion: 2.0.0 - -commandTests: - - name: "openssl version" - command: "openssl" - args: ["version"] - expectedOutput: ["OpenSSL 3\\.1\\.1"] diff --git a/packaging/docker-image/test_configs/otp_ubuntu.yaml b/packaging/docker-image/test_configs/otp_ubuntu.yaml deleted file mode 100644 index a96dbe90f26b..000000000000 --- a/packaging/docker-image/test_configs/otp_ubuntu.yaml +++ /dev/null @@ -1,10 +0,0 @@ -schemaVersion: 2.0.0 - -commandTests: - - name: "otp version" - command: "erl" - args: - - -noshell - - -eval - - '{ok, Version} = file:read_file(filename:join([code:root_dir(), "releases", erlang:system_info(otp_release), "OTP_VERSION"])), io:fwrite(Version), halt().' - expectedOutput: ["2\\d\\.\\d+"] diff --git a/packaging/docker-image/test_configs/rabbitmq_ubuntu.yaml b/packaging/docker-image/test_configs/rabbitmq_ubuntu.yaml index 598283ba7eb1..6890754a3ae1 100644 --- a/packaging/docker-image/test_configs/rabbitmq_ubuntu.yaml +++ b/packaging/docker-image/test_configs/rabbitmq_ubuntu.yaml @@ -2,6 +2,7 @@ schemaVersion: 2.0.0 commandTests: - name: "rabbitmq-plugins" + setup: [["docker-entrypoint.sh"]] command: "rabbitmq-plugins" args: ["list"] expectedOutput: ["\\[E \\] rabbitmq_management"] diff --git a/plugins.mk b/plugins.mk index 7536c6705ae1..b822296da018 100644 --- a/plugins.mk +++ b/plugins.mk @@ -15,6 +15,7 @@ PLUGINS := rabbitmq_amqp1_0 \ rabbitmq_event_exchange \ rabbitmq_federation \ rabbitmq_federation_management \ + rabbitmq_federation_prometheus \ rabbitmq_jms_topic_exchange \ rabbitmq_management \ rabbitmq_management_agent \ @@ -30,6 +31,7 @@ PLUGINS := rabbitmq_amqp1_0 \ rabbitmq_sharding \ rabbitmq_shovel \ rabbitmq_shovel_management \ + rabbitmq_shovel_prometheus \ rabbitmq_stomp \ rabbitmq_stream \ rabbitmq_stream_management \ diff --git a/rabbitmq-components.mk b/rabbitmq-components.mk index 662681f0b5ea..161826f98acb 100644 --- a/rabbitmq-components.mk +++ b/rabbitmq-components.mk @@ -14,7 +14,7 @@ endif PROJECT_VERSION := $(RABBITMQ_VERSION) ifeq ($(PROJECT_VERSION),) -PROJECT_VERSION := $(shell \ +PROJECT_VERSION = $(shell \ if test -f git-revisions.txt; then \ head -n1 git-revisions.txt | \ awk '{print $$$(words $(PROJECT_DESCRIPTION) version);}'; \ @@ -39,6 +39,7 @@ endif dep_amqp_client = git_rmq-subfolder rabbitmq-erlang-client $(current_rmq_ref) $(base_rmq_ref) main dep_amqp10_client = git_rmq-subfolder rabbitmq-amqp1.0-client $(current_rmq_ref) $(base_rmq_ref) main +dep_oauth2_client = git_rmq-subfolder oauth2-client $(current_rmq_ref) $(base_rmq_ref) main dep_amqp10_common = git_rmq-subfolder rabbitmq-amqp1.0-common $(current_rmq_ref) $(base_rmq_ref) main dep_rabbit = git_rmq-subfolder rabbitmq-server $(current_rmq_ref) $(base_rmq_ref) main dep_rabbit_common = git_rmq-subfolder rabbitmq-common $(current_rmq_ref) $(base_rmq_ref) main @@ -61,6 +62,7 @@ dep_rabbitmq_dotnet_client = git_rmq rabbitmq-dotnet-client dep_rabbitmq_event_exchange = git_rmq-subfolder rabbitmq-event-exchange $(current_rmq_ref) $(base_rmq_ref) main dep_rabbitmq_federation = git_rmq-subfolder rabbitmq-federation $(current_rmq_ref) $(base_rmq_ref) main dep_rabbitmq_federation_management = git_rmq-subfolder rabbitmq-federation-management $(current_rmq_ref) $(base_rmq_ref) main +dep_rabbitmq_federation_prometheus = git_rmq-subfolder rabbitmq-federation-prometheus $(current_rmq_ref) $(base_rmq_ref) main dep_rabbitmq_java_client = git_rmq rabbitmq-java-client $(current_rmq_ref) $(base_rmq_ref) main dep_rabbitmq_jms_client = git_rmq rabbitmq-jms-client $(current_rmq_ref) $(base_rmq_ref) main dep_rabbitmq_jms_cts = git_rmq rabbitmq-jms-cts $(current_rmq_ref) $(base_rmq_ref) main @@ -79,6 +81,7 @@ dep_rabbitmq_peer_discovery_common = git_rmq-subfolder rabbitmq-peer-discover dep_rabbitmq_peer_discovery_consul = git_rmq-subfolder rabbitmq-peer-discovery-consul $(current_rmq_ref) $(base_rmq_ref) main dep_rabbitmq_peer_discovery_etcd = git_rmq-subfolder rabbitmq-peer-discovery-etcd $(current_rmq_ref) $(base_rmq_ref) main dep_rabbitmq_peer_discovery_k8s = git_rmq-subfolder rabbitmq-peer-discovery-k8s $(current_rmq_ref) $(base_rmq_ref) main +dep_rabbitmq_prelaunch = git_rmq-subfolder rabbitmq-prelaunch $(current_rmq_ref) $(base_rmq_ref) main dep_rabbitmq_prometheus = git_rmq-subfolder rabbitmq-prometheus $(current_rmq_ref) $(base_rmq_ref) main dep_rabbitmq_random_exchange = git_rmq-subfolder rabbitmq-random-exchange $(current_rmq_ref) $(base_rmq_ref) main dep_rabbitmq_recent_history_exchange = git_rmq-subfolder rabbitmq-recent-history-exchange $(current_rmq_ref) $(base_rmq_ref) main @@ -87,6 +90,7 @@ dep_rabbitmq_rtopic_exchange = git_rmq rabbitmq-rtopic-exchan dep_rabbitmq_sharding = git_rmq-subfolder rabbitmq-sharding $(current_rmq_ref) $(base_rmq_ref) main dep_rabbitmq_shovel = git_rmq-subfolder rabbitmq-shovel $(current_rmq_ref) $(base_rmq_ref) main dep_rabbitmq_shovel_management = git_rmq-subfolder rabbitmq-shovel-management $(current_rmq_ref) $(base_rmq_ref) main +dep_rabbitmq_shovel_prometheus = git_rmq-subfolder rabbitmq-shovel-prometheus $(current_rmq_ref) $(base_rmq_ref) main dep_rabbitmq_stomp = git_rmq-subfolder rabbitmq-stomp $(current_rmq_ref) $(base_rmq_ref) main dep_rabbitmq_stream = git_rmq-subfolder rabbitmq-stream $(current_rmq_ref) $(base_rmq_ref) main dep_rabbitmq_stream_common = git_rmq-subfolder rabbitmq-stream-common $(current_rmq_ref) $(base_rmq_ref) main @@ -106,28 +110,33 @@ dep_toke = git_rmq toke $(current_rmq_ref # Third-party dependencies version pinning. # -# We do that in this file, which is copied in all projects, to ensure -# all projects use the same versions. It avoids conflicts and makes it -# possible to work with rabbitmq-public-umbrella. +# We do that in this file, which is included by all projects, to ensure +# all projects use the same versions. It avoids conflicts. dep_accept = hex 0.3.5 -dep_cowboy = hex 2.10.0 -dep_cowlib = hex 2.12.1 +dep_cowboy = hex 2.12.0 +dep_cowlib = hex 2.13.0 dep_credentials_obfuscation = hex 3.4.0 -dep_looking_glass = git https://github.com/rabbitmq/looking_glass.git main -dep_prometheus = hex 4.10.0 -dep_ra = hex 2.6.3 +dep_cuttlefish = hex 3.4.0 +dep_gen_batch_server = hex 0.8.8 +dep_jose = hex 1.11.10 +dep_khepri = hex 0.14.0 +dep_khepri_mnesia_migration = hex 0.5.0 +dep_prometheus = hex 4.11.0 +dep_ra = hex 2.13.6 dep_ranch = hex 2.1.0 dep_recon = hex 2.5.3 dep_redbug = hex 2.0.7 dep_thoas = hex 1.0.0 -dep_observer_cli = hex 1.7.3 +dep_observer_cli = hex 1.7.5 +dep_seshat = git https://github.com/rabbitmq/seshat v0.6.1 dep_stdout_formatter = hex 0.2.4 dep_sysmon_handler = hex 1.3.0 RABBITMQ_COMPONENTS = amqp_client \ amqp10_common \ amqp10_client \ + oauth2_client \ rabbit \ rabbit_common \ rabbitmq_amqp1_0 \ @@ -149,6 +158,7 @@ RABBITMQ_COMPONENTS = amqp_client \ rabbitmq_event_exchange \ rabbitmq_federation \ rabbitmq_federation_management \ + rabbitmq_federation_prometheus \ rabbitmq_java_client \ rabbitmq_jms_client \ rabbitmq_jms_cts \ @@ -176,6 +186,7 @@ RABBITMQ_COMPONENTS = amqp_client \ rabbitmq_sharding \ rabbitmq_shovel \ rabbitmq_shovel_management \ + rabbitmq_shovel_prometheus \ rabbitmq_stomp \ rabbitmq_stream \ rabbitmq_stream_common \ @@ -343,38 +354,19 @@ prepare-dist:: @: # -------------------------------------------------------------------- -# Umbrella-specific settings. +# Monorepo-specific settings. # -------------------------------------------------------------------- # If the top-level project is a RabbitMQ component, we override # $(DEPS_DIR) for this project to point to the top-level's one. # -# We also verify that the guessed DEPS_DIR is actually named `deps`, -# to rule out any situation where it is a coincidence that we found a -# `rabbitmq-components.mk` up upper directories. +# We do the same for $(ERLANG_MK_TMP) as we want to keep the +# beam cache regardless of where we build. We also want to +# share Hex tarballs. -possible_deps_dir_1 = $(abspath ..) -possible_deps_dir_2 = $(abspath ../../..) - -ifeq ($(notdir $(possible_deps_dir_1)),deps) -ifneq ($(wildcard $(possible_deps_dir_1)/../rabbitmq-components.mk),) -deps_dir_overriden = 1 -DEPS_DIR ?= $(possible_deps_dir_1) -DISABLE_DISTCLEAN = 1 -endif -endif - -ifeq ($(deps_dir_overriden),) -ifeq ($(notdir $(possible_deps_dir_2)),deps) -ifneq ($(wildcard $(possible_deps_dir_2)/../rabbitmq-components.mk),) -deps_dir_overriden = 1 -DEPS_DIR ?= $(possible_deps_dir_2) -DISABLE_DISTCLEAN = 1 -endif -endif -endif - -ifneq ($(wildcard UMBRELLA.md),) +ifneq ($(PROJECT),rabbitmq_server_release) +DEPS_DIR ?= $(abspath ..) +ERLANG_MK_TMP ?= $(abspath ../../.erlang.mk) DISABLE_DISTCLEAN = 1 endif diff --git a/rabbitmq.bzl b/rabbitmq.bzl index 807ed33b7b1b..56d2bfa22484 100644 --- a/rabbitmq.bzl +++ b/rabbitmq.bzl @@ -22,8 +22,6 @@ STARTS_BACKGROUND_BROKER_TAG = "starts-background-broker" MIXED_VERSION_CLUSTER_TAG = "mixed-version-cluster" -ENABLE_FEATURE_MAYBE_EXPR = "-enable-feature maybe_expr" - RABBITMQ_ERLC_OPTS = DEFAULT_ERLC_OPTS + [ "-DINSTR_MOD=gm", ] @@ -39,7 +37,7 @@ RABBITMQ_DIALYZER_OPTS = [ "-Wunknown", ] -APP_VERSION = "3.13.0" +APP_VERSION = "4.0.0" BROKER_VERSION_REQUIREMENTS_ANY = """ {broker_version_requirements, []} @@ -57,6 +55,7 @@ ALL_PLUGINS = [ "//deps/rabbitmq_event_exchange:erlang_app", "//deps/rabbitmq_federation:erlang_app", "//deps/rabbitmq_federation_management:erlang_app", + "//deps/rabbitmq_federation_prometheus:erlang_app", "//deps/rabbitmq_jms_topic_exchange:erlang_app", "//deps/rabbitmq_management:erlang_app", "//deps/rabbitmq_mqtt:erlang_app", @@ -70,6 +69,7 @@ ALL_PLUGINS = [ "//deps/rabbitmq_sharding:erlang_app", "//deps/rabbitmq_shovel:erlang_app", "//deps/rabbitmq_shovel_management:erlang_app", + "//deps/rabbitmq_shovel_prometheus:erlang_app", "//deps/rabbitmq_stomp:erlang_app", "//deps/rabbitmq_stream:erlang_app", "//deps/rabbitmq_stream_management:erlang_app", @@ -86,8 +86,8 @@ ALL_PLUGINS = [ LABELS_WITH_TEST_VERSIONS = [ "//deps/amqp10_common:erlang_app", "//deps/rabbit_common:erlang_app", + "//deps/rabbitmq_prelaunch:erlang_app", "//deps/rabbit:erlang_app", - "//deps/rabbit/apps/rabbitmq_prelaunch:erlang_app", ] def all_plugins(rabbitmq_workspace = "@rabbitmq-server"): @@ -175,18 +175,20 @@ def rabbitmq_suite( deps = [], runtime_deps = [], **kwargs): + app_name = native.package_name().rpartition("/")[-1] # suite_name exists in the underying ct_test macro, but we don't # want to use the arg in rabbitmq-server, for the sake of clarity if suite_name != None: fail("rabbitmq_suite cannot be called with a suite_name attr") ct_test( name = name, + app_name = app_name, compiled_suites = [":{}_beam_files".format(name)] + additional_beam, - ct_run_extra_args = [ENABLE_FEATURE_MAYBE_EXPR], data = native.glob(["test/{}_data/**/*".format(name)]) + data, test_env = dict({ "RABBITMQ_CT_SKIP_AS_ERROR": "true", "LANG": "C.UTF-8", + "COVERDATA_TO_LCOV_APPS_DIRS": "deps:deps/rabbit/apps", }.items() + test_env.items()), deps = [":test_erlang_app"] + deps + runtime_deps, **kwargs @@ -221,6 +223,7 @@ def rabbitmq_integration_suite( deps = [], runtime_deps = [], **kwargs): + app_name = native.package_name().rpartition("/")[-1] # suite_name exists in the underying ct_test macro, but we don't # want to use the arg in rabbitmq-server, for the sake of clarity if suite_name != None: @@ -229,7 +232,7 @@ def rabbitmq_integration_suite( ":test_erlang_app", "//deps/rabbit_common:erlang_app", "//deps/rabbitmq_ct_helpers:erlang_app", - "//deps/rabbitmq_cli:elixir", + "@rules_elixir//elixir", "//deps/rabbitmq_cli:erlang_app", "//deps/rabbitmq_ct_client_helpers:erlang_app", ] @@ -239,10 +242,10 @@ def rabbitmq_integration_suite( ct_test( name = name, + app_name = app_name, suite_name = name, compiled_suites = [":{}_beam_files".format(name)] + additional_beam, tags = tags + [STARTS_BACKGROUND_BROKER_TAG], - ct_run_extra_args = [ENABLE_FEATURE_MAYBE_EXPR], data = native.glob(["test/{}_data/**/*".format(name)]) + data, test_env = dict({ "SKIP_MAKE_TEST_DIST": "true", @@ -252,6 +255,7 @@ def rabbitmq_integration_suite( "RABBITMQ_PLUGINS": "$TEST_SRCDIR/$TEST_WORKSPACE/{}/broker-for-tests-home/sbin/rabbitmq-plugins".format(package), "RABBITMQ_QUEUES": "$TEST_SRCDIR/$TEST_WORKSPACE/{}/broker-for-tests-home/sbin/rabbitmq-queues".format(package), "LANG": "C.UTF-8", + "COVERDATA_TO_LCOV_APPS_DIRS": "deps:deps/rabbit/apps", }.items() + test_env.items()), tools = [ ":rabbitmq-for-tests-run", @@ -265,7 +269,6 @@ def rabbitmq_integration_suite( suite_name = name, compiled_suites = [":{}_beam_files".format(name)] + additional_beam, tags = tags + [STARTS_BACKGROUND_BROKER_TAG, MIXED_VERSION_CLUSTER_TAG], - ct_run_extra_args = [ENABLE_FEATURE_MAYBE_EXPR], data = native.glob(["test/{}_data/**/*".format(name)]) + data, test_env = dict({ "SKIP_MAKE_TEST_DIST": "true", @@ -276,19 +279,21 @@ def rabbitmq_integration_suite( "quorum_queue,implicit_default_bindings,virtual_host_metadata,maintenance_mode_status,user_limits," + # required starting from 3.12.0 in rabbit: "feature_flags_v2,stream_queue,classic_queue_type_delivery_support,classic_mirrored_queue_version," + - "stream_single_active_consumer,direct_exchange_routing_v2,listener_records_in_ets,tracking_records_in_ets", + "stream_single_active_consumer,direct_exchange_routing_v2,listener_records_in_ets,tracking_records_in_ets," + # required starting from 3.12.0 in rabbitmq_management_agent: # empty_basic_get_metric, drop_unroutable_metric + # required starting from 4.0 in rabbit: + "message_containers,stream_update_config_command,stream_filtering,stream_sac_coordinator_unblock_group,restart_streams", "RABBITMQ_RUN": "$(location :rabbitmq-for-tests-run)", "RABBITMQCTL": "$TEST_SRCDIR/$TEST_WORKSPACE/{}/broker-for-tests-home/sbin/rabbitmqctl".format(package), "RABBITMQ_PLUGINS": "$TEST_SRCDIR/$TEST_WORKSPACE/{}/broker-for-tests-home/sbin/rabbitmq-plugins".format(package), "RABBITMQ_QUEUES": "$TEST_SRCDIR/$TEST_WORKSPACE/{}/broker-for-tests-home/sbin/rabbitmq-queues".format(package), - "RABBITMQ_RUN_SECONDARY": "$(location @rabbitmq-server-generic-unix-3.11//:rabbitmq-run)", + "RABBITMQ_RUN_SECONDARY": "$(location @rabbitmq-server-generic-unix-3.13//:rabbitmq-run)", "LANG": "C.UTF-8", }.items() + test_env.items()), tools = [ ":rabbitmq-for-tests-run", - "@rabbitmq-server-generic-unix-3.11//:rabbitmq-run", + "@rabbitmq-server-generic-unix-3.13//:rabbitmq-run", ] + tools, deps = assumed_deps + deps + runtime_deps, **kwargs diff --git a/rabbitmq_home.bzl b/rabbitmq_home.bzl index 0af142dfd630..03e6c1fa235c 100644 --- a/rabbitmq_home.bzl +++ b/rabbitmq_home.bzl @@ -1,6 +1,6 @@ +load("@rules_erlang//:ct.bzl", "additional_file_dest_relative_path") load("@rules_erlang//:erlang_app_info.bzl", "ErlangAppInfo", "flat_deps") load("@rules_erlang//:util.bzl", "path_join") -load("@rules_erlang//:ct.bzl", "additional_file_dest_relative_path") RabbitmqHomeInfo = provider( doc = "An assembled RABBITMQ_HOME dir", @@ -53,8 +53,8 @@ def _plugins_dir_links(ctx, plugin): for f in lib_info.beam: if f.is_directory: - if f.basename != "ebin": - fail("{} contains a directory in 'beam' that is not an ebin dir".format(lib_info.lib_name)) + if len(lib_info.beam) != 1: + fail("ErlangAppInfo.beam must be a collection of files, or a single ebin dir: {} {}".format(lib_info.app_name, lib_info.beam)) o = ctx.actions.declare_directory(path_join(plugin_path, "ebin")) else: o = ctx.actions.declare_file(path_join(plugin_path, "ebin", f.basename)) diff --git a/release-notes/3.11.0.md b/release-notes/3.11.0.md index 205f008fbb2d..88ca53e54f93 100644 --- a/release-notes/3.11.0.md +++ b/release-notes/3.11.0.md @@ -2,6 +2,8 @@ RabbitMQ 3.11 is a new feature release. +This release series [is no longer covered by community support](https://www.rabbitmq.com/release-information). + ## Highlights This release includes several new features and optimizations, graduates (makes mandatory) a number of feature flags, diff --git a/release-notes/3.11.10.md b/release-notes/3.11.10.md index 63aaeeed5eec..13af5388b0e9 100644 --- a/release-notes/3.11.10.md +++ b/release-notes/3.11.10.md @@ -30,7 +30,7 @@ Release notes can be found on GitHub at [rabbitmq-server/release-notes](https:// #### Enhancements - * Key classic mirrored queue (a deprecated feature) settings now can be overriden with + * Key classic mirrored queue (a deprecated feature) settings now can be overridden with operator policies. Contributed by @SimonUnge (AWS). diff --git a/release-notes/3.11.24.md b/release-notes/3.11.24.md new file mode 100644 index 000000000000..472a231d9648 --- /dev/null +++ b/release-notes/3.11.24.md @@ -0,0 +1,91 @@ +RabbitMQ `3.11.24` is a maintenance release in the `3.11.x` [release series](https://www.rabbitmq.com/versions.html). +This release series [goes out of community support on Dec 31, 2023](https://rabbitmq.com/versions.html). + +Please refer to the upgrade section from [v3.11.0 release notes](https://github.com/rabbitmq/rabbitmq-server/releases/tag/v3.11.0) +if upgrading from a version prior to 3.11.0. + +This release requires Erlang 25 and supports Erlang versions up to `25.3.x`. +[RabbitMQ and Erlang/OTP Compatibility Matrix](https://www.rabbitmq.com/which-erlang.html) has more details on +Erlang version requirements for RabbitMQ. + + +### Minimum Supported Erlang Version + +As of 3.11.0, RabbitMQ requires Erlang 25. Nodes **will fail to start** on older Erlang releases. + +Erlang 25 as our new baseline means much improved performance on ARM64 architectures, [profiling with flame graphs](https://blog.rabbitmq.com/posts/2022/05/flame-graphs/) +across all architectures, and the most recent TLS 1.3 implementation available to all RabbitMQ 3.11 users. + + +## Changes Worth Mentioning + +Release notes can be found on GitHub at [rabbitmq-server/release-notes](https://github.com/rabbitmq/rabbitmq-server/tree/v3.11.x/release-notes). + + +### Core Server + +#### Bug Fixes + + * Stream replication connections configured to use exclusively TLSv1.3 failed. + + GitHub issue:[#9678](https://github.com/rabbitmq/rabbitmq-server/pull/9678) + + * On startup, stream replicas will handle one more potential case of segment file corruption + after an unclean shutdown. + + Contributed by @gomoripeti (CloudAMQP). + + GitHub issue: [#9678](https://github.com/rabbitmq/rabbitmq-server/pull/9678) + + * `default_policies.*.queue_pattern` definition in `rabbitmq.conf` was incorrectly parsed. + + Contributed by @SimonUnge (AWS). + + GitHub issue: [#9546](https://github.com/rabbitmq/rabbitmq-server/pull/9546) + +#### Enhancements + + * Optimized stream index scans. Longer scans could result in some replicas stopping + with a timeout. + + GitHub issue:[#9678](https://github.com/rabbitmq/rabbitmq-server/pull/9678) + + * Classic queue storage version is now a supported key for [operator policies](https://rabbitmq.com/parameters.html#operator-policies). + + Contributed by @SignalWhisperer (AWS). + + GitHub issue: [#9549](https://github.com/rabbitmq/rabbitmq-server/pull/9549) + + * Nodes now log boot time at info level instead of debug. This piece of information + can be useful during root cause analysis. + + Contributed by @johanrhodin (CloudAMQP). + + GitHub issue: [#9466](https://github.com/rabbitmq/rabbitmq-server/pull/9466) + + +### Management Plugin + +#### Enhancements + + * HTTP API request body size is now limited to 10 MiB by default. + Two endpoints, one that accepts messages for publishing (note: publishing over the HTTP API is greatly discouraged) + and another for [definition import](https://rabbitmq.com/definitions.html#import), + will now reject larger transfers with a `400 Bad Request` response. + + GitHub issue: [#9708](https://github.com/rabbitmq/rabbitmq-server/pull/9708) + + * `DELETE /api/queues/{vhost}/{name}` now can delete exclusive queues. + + GitHub issue: [#8758](https://github.com/rabbitmq/rabbitmq-server/issues/8758) + + +## Dependency Upgrades + + * `osiris` has been upgraded to [`1.6.9`](https://github.com/rabbitmq/osiris/releases) + + +## Source Code Archives + +To obtain source code of the entire distribution, please download the archive named `rabbitmq-server-3.11.24.tar.xz` +instead of the source tarball produced by GitHub. diff --git a/release-notes/3.11.25.md b/release-notes/3.11.25.md new file mode 100644 index 000000000000..60517a533bc3 --- /dev/null +++ b/release-notes/3.11.25.md @@ -0,0 +1,70 @@ +RabbitMQ `3.11.25` is a maintenance release in the `3.11.x` [release series](https://www.rabbitmq.com/versions.html). +This release series [goes out of community support on Dec 31, 2023](https://rabbitmq.com/versions.html). + +Please refer to the upgrade section from [v3.11.0 release notes](https://github.com/rabbitmq/rabbitmq-server/releases/tag/v3.11.0) +if upgrading from a version prior to 3.11.0. + +This release requires Erlang 25 and supports Erlang versions up to `25.3.x`. +[RabbitMQ and Erlang/OTP Compatibility Matrix](https://www.rabbitmq.com/which-erlang.html) has more details on +Erlang version requirements for RabbitMQ. + + +### Minimum Supported Erlang Version + +As of 3.11.0, RabbitMQ requires Erlang 25. Nodes **will fail to start** on older Erlang releases. + +Erlang 25 as our new baseline means much improved performance on ARM64 architectures, [profiling with flame graphs](https://blog.rabbitmq.com/posts/2022/05/flame-graphs/) +across all architectures, and the most recent TLS 1.3 implementation available to all RabbitMQ 3.11 users. + + +## Changes Worth Mentioning + +Release notes can be found on GitHub at [rabbitmq-server/release-notes](https://github.com/rabbitmq/rabbitmq-server/tree/v3.11.x/release-notes). + + +### Core Server + +#### Bug Fixes + + * Avoids a potential exception in the `autoheal` partition handler. + + Contributed by @Ayanda-D. + + GitHub issue: [#9819](https://github.com/rabbitmq/rabbitmq-server/pull/9819) + +#### Enhancements + + * `raft.segment_max_entries` is now validated to prevent the value from overflowing its 16-bit segment file field. + Maximum supported value is now ``65535. + + GitHub issue: [#9748](https://github.com/rabbitmq/rabbitmq-server/pull/9748) + + +### Shovel Plugin + +#### Enhancements + + * Significantly faster Shovel startup in environments where there are many of them (one thousand or more). + + GitHub issue: [#9800](https://github.com/rabbitmq/rabbitmq-server/pull/9800) + + +### AMQP 1.0 Erlang Client + +#### Enhancements + + * User-provided credentials are now obfuscated using an one-off key pair generated on node boot. + This keeps sensitive client state information from being logged by the runtime exception logger. + + GitHub issue: [#9778](https://github.com/rabbitmq/rabbitmq-server/pull/9778) + + +## Dependency Upgrades + + None in this release. + + +## Source Code Archives + +To obtain source code of the entire distribution, please download the archive named `rabbitmq-server-3.11.25.tar.xz` +instead of the source tarball produced by GitHub. diff --git a/release-notes/3.11.26.md b/release-notes/3.11.26.md new file mode 100644 index 000000000000..0ae7ff39f037 --- /dev/null +++ b/release-notes/3.11.26.md @@ -0,0 +1,76 @@ +RabbitMQ `3.11.26` is a maintenance release in the `3.11.x` [release series](https://www.rabbitmq.com/versions.html). + +This release series [is no longer covered by community support](https://www.rabbitmq.com/release-information). + +Please refer to the upgrade section from [v3.11.0 release notes](https://github.com/rabbitmq/rabbitmq-server/releases/tag/v3.11.0) +if upgrading from a version prior to 3.11.0. + +This release requires Erlang 25 and supports Erlang versions up to `25.3.x`. +[RabbitMQ and Erlang/OTP Compatibility Matrix](https://www.rabbitmq.com/which-erlang.html) has more details on +Erlang version requirements for RabbitMQ. + + +### Minimum Supported Erlang Version + +As of 3.11.0, RabbitMQ requires Erlang 25. Nodes **will fail to start** on older Erlang releases. + +Erlang 25 as our new baseline means much improved performance on ARM64 architectures, [profiling with flame graphs](https://blog.rabbitmq.com/posts/2022/05/flame-graphs/) +across all architectures, and the most recent TLS 1.3 implementation available to all RabbitMQ 3.11 users. + + +## Changes Worth Mentioning + +Release notes can be found on GitHub at [rabbitmq-server/release-notes](https://github.com/rabbitmq/rabbitmq-server/tree/v3.11.x/release-notes). + + +### Core Broker + +#### Bug Fixes + + * When a topic permission was deleted, an [internal event](https://rabbitmq.com/logging.html#internal-events) of type `permission.deleted` + was emitted in some cases, instead of `topic.permission.deleted`. + + Investigated by @bedia. + + GitHub issue: [#9937](https://github.com/rabbitmq/rabbitmq-server/issues/9937) + + +### AMQP 1.0 Plugin + +#### Bug Fixes + + * Correctly block publishing AMQP 1.0 connections when a [resource alarm](https://rabbitmq.com/alarms.html) is in effect. + + GitHub issue: [#9955](https://github.com/rabbitmq/rabbitmq-server/pull/9955) + + +### Grafana Dashboard + +#### Enhancements + + * [Global counters for producers](https://github.com/rabbitmq/rabbitmq-server/pull/3127) are now available in the dashboard. + + Contributed by @johanrhodin (CloudAMQP) + + GitHub issue: [#9846](https://github.com/rabbitmq/rabbitmq-server/pull/9846) + + +### CLI Tools + +#### Enhancements + + * `rabbitmq-diagnostics list_policies_that_match [queue name]` is a new command + that simplifies troubleshooting of policy conflicts. + + GitHub issue: [#9916](https://github.com/rabbitmq/rabbitmq-server/pull/9916) + + +## Dependency Upgrades + + None in this release. + + +## Source Code Archives + +To obtain source code of the entire distribution, please download the archive named `rabbitmq-server-3.11.26.tar.xz` +instead of the source tarball produced by GitHub. diff --git a/release-notes/3.11.27.md b/release-notes/3.11.27.md new file mode 100644 index 000000000000..ecf962b73fbc --- /dev/null +++ b/release-notes/3.11.27.md @@ -0,0 +1,76 @@ +RabbitMQ `3.11.27` is a maintenance release in the `3.11.x` [release series](https://www.rabbitmq.com/versions.html). + +This release series [is no longer covered by community support](https://www.rabbitmq.com/release-information). + +Please refer to the upgrade section from [v3.11.0 release notes](https://github.com/rabbitmq/rabbitmq-server/releases/tag/v3.11.0) +if upgrading from a version prior to 3.11.0. + +This release requires Erlang 25 and supports Erlang versions up to `25.3.x`. +[RabbitMQ and Erlang/OTP Compatibility Matrix](https://www.rabbitmq.com/which-erlang.html) has more details on +Erlang version requirements for RabbitMQ. + + +### Minimum Supported Erlang Version + +As of 3.11.0, RabbitMQ requires Erlang 25. Nodes **will fail to start** on older Erlang releases. + +Erlang 25 as our new baseline means much improved performance on ARM64 architectures, [profiling with flame graphs](https://blog.rabbitmq.com/posts/2022/05/flame-graphs/) +across all architectures, and the most recent TLS 1.3 implementation available to all RabbitMQ 3.11 users. + + +## Changes Worth Mentioning + +Release notes can be found on GitHub at [rabbitmq-server/release-notes](https://github.com/rabbitmq/rabbitmq-server/tree/v3.11.x/release-notes). + + +### Core Broker + +#### Bug Fixes + + * Avoids a rare exception that could stop TCP socket writes on a client connection. + + GitHub issues: [#9991](https://github.com/rabbitmq/rabbitmq-server/issues/9991), [#9803](https://github.com/rabbitmq/rabbitmq-server/discussions/9803) + +#### Enhancements + + * Definition files that are virtual host-specific cannot be imported on boot. Such files will now be + detected early and the import process will terminate after logging a more informative message. + + Previous the import process would run into an obscure exception. + + GitHub issues: [#10068](https://github.com/rabbitmq/rabbitmq-server/issues/10068), [#10086](https://github.com/rabbitmq/rabbitmq-server/pull/10086) + + +### Shovel Plugin + +#### Bug Fixes + + * Avoids two Shovels being started after an upgrade from `3.11.25` or older versions. + + Two Shovels running concurrently would still transfer messages but because they act as + competing consumers (and publishers), this affected message ordering in the target queue. + + Contributed by @gomoripeti (CloudAMQP). + + GitHub issues: [#9965](https://github.com/rabbitmq/rabbitmq-server/pull/9965), [#10080](https://github.com/rabbitmq/rabbitmq-server/pull/10080) + + +### Management Plugin + +#### Bug Fixes + + * `DELETE /api/policies/{vhost}/{policy}` returned a 500 response instead of a 404 one + when target virtual host did not exist. + + GitHub issue: [#9983](https://github.com/rabbitmq/rabbitmq-server/issues/9983) + + +## Dependency Upgrades + + None in this release. + + +## Source Code Archives + +To obtain source code of the entire distribution, please download the archive named `rabbitmq-server-3.11.27.tar.xz` +instead of the source tarball produced by GitHub. diff --git a/release-notes/3.11.28.md b/release-notes/3.11.28.md new file mode 100644 index 000000000000..c5d1562211cb --- /dev/null +++ b/release-notes/3.11.28.md @@ -0,0 +1,73 @@ +RabbitMQ `3.11.28` is a maintenance release in the `3.11.x` [release series](https://www.rabbitmq.com/release-information). + +This release series [is no longer covered by community support](https://www.rabbitmq.com/release-information). + +Please refer to the upgrade section from [v3.11.0 release notes](https://github.com/rabbitmq/rabbitmq-server/releases/tag/v3.11.0) +if upgrading from a version prior to 3.11.0. + +This release requires Erlang 25 and supports Erlang versions up to `25.3.x`. +[RabbitMQ and Erlang/OTP Compatibility Matrix](https://www.rabbitmq.com/which-erlang.html) has more details on +Erlang version requirements for RabbitMQ. + + +### Minimum Supported Erlang Version + +As of 3.11.0, RabbitMQ requires Erlang 25. Nodes **will fail to start** on older Erlang releases. + +Erlang 25 as our new baseline means much improved performance on ARM64 architectures, [profiling with flame graphs](https://blog.rabbitmq.com/posts/2022/05/flame-graphs/) +across all architectures, and the most recent TLS 1.3 implementation available to all RabbitMQ 3.11 users. + + +## Changes Worth Mentioning + +Release notes can be found on GitHub at [rabbitmq-server/release-notes](https://github.com/rabbitmq/rabbitmq-server/tree/main/release-notes). + + +### Prometheus Plugin + +#### Enhancements + + * Metric label values now escape certain non-ASCII characters. + + Contributed by @gomoripeti (CloudAMQP). + + GitHub issue: [#10196](https://github.com/rabbitmq/rabbitmq-server/pull/10196) + + +### Management Plugin + +#### Bug Fixes + + * Reverted a change to `DELETE /api/queues/{vhost}/{name}` that allowed removal of + exclusive queues and introduced unexpected side effects. + + GitHub issue: [#10189](https://github.com/rabbitmq/rabbitmq-server/pull/10189) + + * Avoid log noise when an HTTP API request is issued against a booting + or very freshly booted node. + + Contributed by @gomoripeti (CloudAMQP). + + GitHub issue: [#10183](https://github.com/rabbitmq/rabbitmq-server/pull/10183) + + +### AWS Peer Discovery Plugin + +#### Enhancements + + * Type spec and test corrections. + + Contributed by @illotum (AWS). + + GitHub issue: [#10133](https://github.com/rabbitmq/rabbitmq-server/pull/10133) + + +## Dependency Upgrades + + None in this release. + + +## Source Code Archives + +To obtain source code of the entire distribution, please download the archive named `rabbitmq-server-3.11.28.tar.xz` +instead of the source tarball produced by GitHub. diff --git a/release-notes/3.12.0.md b/release-notes/3.12.0.md index 13b3a31056a0..34b4ccbb5eda 100644 --- a/release-notes/3.12.0.md +++ b/release-notes/3.12.0.md @@ -2,6 +2,8 @@ RabbitMQ `3.12.0` is a new feature release. +This release [goes out of community support](https://www.rabbitmq.com/release-information) on June 1st, 2024. + ## Highlights This release includes several new features, optimizations, and graduates (makes mandatory) a number of feature flags. @@ -340,6 +342,15 @@ This release includes all bug fixes shipped in the `3.11.x` series. * The `x-mqtt-dup` header will no longer be present for consumer deliveries as it wasn't used correctly. +### Federation Plugin + +#### Bug Fixes + + * Fixed a quorum queue-specific issue that could prevent queue federation links from making any progress. + + GitHub issue: [#8328](https://github.com/rabbitmq/rabbitmq-server/pull/8328) + + ### Management Plugin #### Enhancements diff --git a/release-notes/3.12.10.md b/release-notes/3.12.10.md new file mode 100644 index 000000000000..01cdcd443a1a --- /dev/null +++ b/release-notes/3.12.10.md @@ -0,0 +1,47 @@ +RabbitMQ `3.12.10` is a maintenance release in the `3.12.x` [release series](https://www.rabbitmq.com/versions.html). + +Please refer to the upgrade section from the [3.12.0 release notes](https://github.com/rabbitmq/rabbitmq-server/releases/tag/v3.12.0) +if upgrading from a version prior to 3.12.0. + +This release requires Erlang 25 and supports Erlang versions up to `26.1.x`. +[RabbitMQ and Erlang/OTP Compatibility Matrix](https://www.rabbitmq.com/which-erlang.html) has more details on +Erlang version requirements for RabbitMQ. + + +### Minimum Supported Erlang Version + +As of 3.12.0, RabbitMQ requires Erlang 25. Nodes **will fail to start** on older Erlang releases. + +Users upgrading from 3.11.x (or older releases) on Erlang 25 to 3.12.x on Erlang 26 +(both RabbitMQ *and* Erlang are upgraded at the same time) **must** consult +the [v3.12.0 release notes](https://github.com/rabbitmq/rabbitmq-server/releases/tag/v3.12.0) first. + + +## Changes Worth Mentioning + +Release notes can be found on GitHub at [rabbitmq-server/release-notes](https://github.com/rabbitmq/rabbitmq-server/tree/v3.12.x/release-notes). + + +### Shovel Plugin + +#### Bug Fixes + + * Avoids two Shovels being started after an upgrade from `3.12.6` or older versions. + + Two Shovels running concurrently would still transfer messages but because they act as + competing consumers (and publishers), this affected message ordering in the target queue. + + Contributed by @gomoripeti (CloudAMQP). + + GitHub issue: [#9965](https://github.com/rabbitmq/rabbitmq-server/pull/9965) + + +## Dependency Upgrades + +None in this release. + + +## Source Code Archives + +To obtain source code of the entire distribution, please download the archive named `rabbitmq-server-3.12.10.tar.xz` +instead of the source tarball produced by GitHub. diff --git a/release-notes/3.12.11.md b/release-notes/3.12.11.md new file mode 100644 index 000000000000..01f1fc8dbb8a --- /dev/null +++ b/release-notes/3.12.11.md @@ -0,0 +1,165 @@ +RabbitMQ `3.12.11` is a maintenance release in the `3.12.x` [release series](https://www.rabbitmq.com/versions.html). + +Please refer to the upgrade section from the [3.12.0 release notes](https://github.com/rabbitmq/rabbitmq-server/releases/tag/v3.12.0) +if upgrading from a version prior to 3.12.0. + +This release requires Erlang 25 and supports Erlang versions up to `26.1.x`. +[RabbitMQ and Erlang/OTP Compatibility Matrix](https://www.rabbitmq.com/which-erlang.html) has more details on +Erlang version requirements for RabbitMQ. + + +### Minimum Supported Erlang Version + +As of 3.12.0, RabbitMQ requires Erlang 25. Nodes **will fail to start** on older Erlang releases. + +Users upgrading from 3.11.x (or older releases) on Erlang 25 to 3.12.x on Erlang 26 +(both RabbitMQ *and* Erlang are upgraded at the same time) **must** consult +the [v3.12.0 release notes](https://github.com/rabbitmq/rabbitmq-server/releases/tag/v3.12.0) first. + + +## Changes Worth Mentioning + +Release notes can be found on GitHub at [rabbitmq-server/release-notes](https://github.com/rabbitmq/rabbitmq-server/tree/v3.12.x/release-notes). + + +### Core Broker + +#### Bug Fixes + + * Quorum queue declared when one of cluster nodes was down could trigger + connection exceptions. + + GitHub issue: [#10007](https://github.com/rabbitmq/rabbitmq-server/issues/10007) + + * Avoids a rare exception that could stop TCP socket writes on a client connection. + + GitHub issues: [#9991](https://github.com/rabbitmq/rabbitmq-server/issues/9991), [#9803](https://github.com/rabbitmq/rabbitmq-server/discussions/9803) + + * `queue_deleted` and `queue_created` [internal events](https://rabbitmq.com/logging.html#internal-events) now include queue type as a module name, + and not an inconsistent (with the other queue and stream types) value `classic`. + + GitHub issue: [#10142](https://github.com/rabbitmq/rabbitmq-server/pull/10142) + +#### Enhancements + + * Definition files that are virtual host-specific cannot be imported on boot. Such files will now be + detected early and the import process will terminate after logging a more informative message. + + Previously the import process would run into an obscure exception. + + GitHub issues: [#10068](https://github.com/rabbitmq/rabbitmq-server/issues/10068), [#10085](https://github.com/rabbitmq/rabbitmq-server/pull/10085) + + +### AMQP 1.0 Plugin + +#### Bug Fixes + + * Several AMQP 1.0 application properties are now more correctly converted + to AMQP 0-9-1 headers by cross-protocol Shovels. + + The priority property now populates an AMQP 1.0 header with the same name, + per AMQP 1.0 spec. + + This is a **potentially breaking change**. + + Contributed by @luos (Erlang Solutions). + + GitHub issues: [#10037](https://github.com/rabbitmq/rabbitmq-server/pull/10037), [#7508](https://github.com/rabbitmq/rabbitmq-server/issues/7508) + + +### Prometheus Plugin + +#### Enhancements + + * Metric label values now escape certain non-ASCII characters. + + Contributed by @gomoripeti (CloudAMQP). + + GitHub issue: [#10196](https://github.com/rabbitmq/rabbitmq-server/pull/10196) + + +### MQTT Plugin + +#### Bug Fixes + + * Avoids an exception when an MQTT client that used a QoS 0 subscription reconnects + and its original connection node is down. + + GitHub issue: [#10205](https://github.com/rabbitmq/rabbitmq-server/pull/10205) + + * Avoids an exception when an MQTT client connection was force-closed via the HTTP API. + + GitHub issue: [#10140](https://github.com/rabbitmq/rabbitmq-server/pull/10140) + + +### CLI Tools + +#### Bug Fixes + + * Certain CLI commands could not be run in a shell script loop, unless the script explicitly + redirected standard input. + + GitHub issue: [#10131](https://github.com/rabbitmq/rabbitmq-server/pull/10131) + +#### Enhancements + + * `rabbitmq-diagnostics cluster_status` now responds much quicker when a cluster node + has gone down, were shut down, or otherwise has become unreachable by the rest of the cluster. + + GitHub issue: [#10126](https://github.com/rabbitmq/rabbitmq-server/pull/10126) + + +### Management Plugin + +#### Bug Fixes + + * Reverted a change to `DELETE /api/queues/{vhost}/{name}` that allowed removal of + exclusive queues and introduced unexpected side effects. + + GitHub issue: [#10178](https://github.com/rabbitmq/rabbitmq-server/pull/10178) + + * `DELETE /api/policies/{vhost}/{policy}` returned a 500 response instead of a 404 one + when target virtual host did not exist. + + GitHub issue: [#9983](https://github.com/rabbitmq/rabbitmq-server/issues/9983) + + * Avoid log noise when an HTTP API request is issued against a booting + or very freshly booted node. + + Contributed by @gomoripeti (CloudAMQP). + + GitHub issue: [#10187](https://github.com/rabbitmq/rabbitmq-server/pull/10187) + +#### Enhancements + + * HTTP API endpoints that involves contacting multiple nodes now respond much quicker when a cluster node + has gone down, were shut down, or otherwise has become unreachable by the rest of the cluster + + GitHub issue: [#10123](https://github.com/rabbitmq/rabbitmq-server/pull/10123) + + * Definition exported for just one virtual host cannot be imported at node boot time. + Now such files are detected early with a clear log message and immediate node boot process termination. + + GitHub issues: [#10068](https://github.com/rabbitmq/rabbitmq-server/issues/10068), [#10072](https://github.com/rabbitmq/rabbitmq-server/pull/10072) + + +### AWS Peer Discovery Plugin + +#### Enhancements + + * Type spec and test corrections. + + Contributed by @illotum (AWS). + + GitHub issue: [#10134](https://github.com/rabbitmq/rabbitmq-server/pull/10134) + + +## Dependency Upgrades + + * `osiris` was updated to [`1.7.2`](https://github.com/rabbitmq/osiris/releases) + + +## Source Code Archives + +To obtain source code of the entire distribution, please download the archive named `rabbitmq-server-3.12.11.tar.xz` +instead of the source tarball produced by GitHub. diff --git a/release-notes/3.12.12.md b/release-notes/3.12.12.md new file mode 100644 index 000000000000..ed86fc0db88b --- /dev/null +++ b/release-notes/3.12.12.md @@ -0,0 +1,101 @@ +RabbitMQ `3.12.12` is a maintenance release in the `3.12.x` [release series](https://www.rabbitmq.com/versions.html). + +This release [goes out of community support](https://www.rabbitmq.com/release-information) on June 1st, 2024. + +Please refer to the upgrade section from the [3.12.0 release notes](https://github.com/rabbitmq/rabbitmq-server/releases/tag/v3.12.0) +if upgrading from a version prior to 3.12.0. + +This release requires Erlang 25 and supports Erlang versions up to `26.2.x`. +[RabbitMQ and Erlang/OTP Compatibility Matrix](https://www.rabbitmq.com/which-erlang.html) has more details on +Erlang version requirements for RabbitMQ. + + +### Minimum Supported Erlang Version + +As of 3.12.0, RabbitMQ requires Erlang 25. Nodes **will fail to start** on older Erlang releases. + +Users upgrading from 3.11.x (or older releases) on Erlang 25 to 3.12.x on Erlang 26 +(both RabbitMQ *and* Erlang are upgraded at the same time) **must** consult +the [v3.12.0 release notes](https://github.com/rabbitmq/rabbitmq-server/releases/tag/v3.12.0) first. + + +## Changes Worth Mentioning + +Release notes can be found on GitHub at [rabbitmq-server/release-notes](https://github.com/rabbitmq/rabbitmq-server/tree/v3.12.x/release-notes). + + +### Core Broker + +#### Bug Fixes + + * Environments with a lot of quorum queues could experience a large Erlang process + build-up. The build-up was temporary but with a sufficiently large number of + quorum queues it could last until the next round of periodic operations, + making it permanent and depriving the node of CPU resources. + + GitHub issue: [#10242](https://github.com/rabbitmq/rabbitmq-server/pull/10242) + + * RabbitMQ core failed to propagate more authentication and authorization context, for example, + MQTT client ID in case of MQTT connections, to authN and authZ backends. This was not intentional. + + GitHub issue: [#10230](https://github.com/rabbitmq/rabbitmq-server/pull/10230) + + * Nodes now takes more precaution about persisting feature flag state + (specifically the effects of in-flight changes) during node shutdown. + + GitHub issue: [#10279](https://github.com/rabbitmq/rabbitmq-server/pull/10279) + +#### Enhancements + + * Simplified some type specs. + + Contributed by @ariel-anieli. + + GitHub issue: [#10228](https://github.com/rabbitmq/rabbitmq-server/pull/10228) + + +### Stream Plugin + + * One returned error value did not match the [RabbitMQ Stream Protocol](https://github.com/rabbitmq/rabbitmq-server/blob/v3.9.x/deps/rabbitmq_stream/docs/PROTOCOL.adoc) specification. + + GitHub issue: [#10277](https://github.com/rabbitmq/rabbitmq-server/pull/10277) + + +### MQTT Plugin + +#### Bug Fixes + + * Recovering connections from QoS 0 consumers (subscribers) could fail if they were previously connected to a failed node. + + GitHub issue: [#10252](https://github.com/rabbitmq/rabbitmq-server/pull/10252) + + +### CLI Tools + +#### Bug Fixes + + * Since [#10131](https://github.com/rabbitmq/rabbitmq-server/pull/10131) (shipped in `3.12.11`), some CLI commands in certain scenarios could fail to accept input via standard output. + + GitHub issues: [#10270](https://github.com/rabbitmq/rabbitmq-server/pull/10270), [#10258](https://github.com/rabbitmq/rabbitmq-server/pull/10258) + + +### AWS Peer Discovery Plugin + +#### Enhancements + + * Updated some type specs. + + Contributed by @ariel-anieli. + + GitHub issue: [#10226](https://github.com/rabbitmq/rabbitmq-server/pull/10226) + + +## Dependency Upgrades + +None in this release. + + +## Source Code Archives + +To obtain source code of the entire distribution, please download the archive named `rabbitmq-server-3.12.12.tar.xz` +instead of the source tarball produced by GitHub. diff --git a/release-notes/3.12.13.md b/release-notes/3.12.13.md new file mode 100644 index 000000000000..bf5686268382 --- /dev/null +++ b/release-notes/3.12.13.md @@ -0,0 +1,115 @@ +RabbitMQ `3.12.13` is a maintenance release in the `3.12.x` [release series](https://www.rabbitmq.com/versions.html). + +This release [goes out of community support](https://www.rabbitmq.com/release-information) on June 1st, 2024. + +Please refer to the upgrade section from the [3.12.0 release notes](https://github.com/rabbitmq/rabbitmq-server/releases/tag/v3.12.0) +if upgrading from a version prior to 3.12.0. + +This release requires Erlang 25 and supports Erlang versions up to `26.2.x`. +[RabbitMQ and Erlang/OTP Compatibility Matrix](https://www.rabbitmq.com/which-erlang.html) has more details on +Erlang version requirements for RabbitMQ. + + +### Minimum Supported Erlang Version + +As of 3.12.0, RabbitMQ requires Erlang 25. Nodes **will fail to start** on older Erlang releases. + +Users upgrading from 3.11.x (or older releases) on Erlang 25 to 3.12.x on Erlang 26 +(both RabbitMQ *and* Erlang are upgraded at the same time) **must** consult +the [v3.12.0 release notes](https://github.com/rabbitmq/rabbitmq-server/releases/tag/v3.12.0) first. + + +## Changes Worth Mentioning + +Release notes can be found on GitHub at [rabbitmq-server/release-notes](https://github.com/rabbitmq/rabbitmq-server/tree/v3.12.x/release-notes). + + +### Core Broker + +#### Bug Fixes + + * When a channel is closed, its consumer metric samples will now be cleared differently + depending on the number of them. In [#9356](https://github.com/rabbitmq/rabbitmq-server/pull/9356), it was over optimized for the uncommon case with + a very large number of consumers per channel, hurting the baseline case with one or a few consumers + per channel. + + In part contributed by @SimonUnge (AWS). + + GitHub issue: [#10478](https://github.com/rabbitmq/rabbitmq-server/pull/10478) + + +### CLI Tools + +#### Enhancement + + * CLI tool startup time was reduced. + + GitHub issue: [#10461](https://github.com/rabbitmq/rabbitmq-server/pull/10461) + +#### Bug Fixes + + * JSON output formatter now avoids ANSI escape sequences. + + Contributed by @ariel-anieli. + + GitHub issue: [#8557](https://github.com/rabbitmq/rabbitmq-server/issues/8557) + + * ANSI escape sequences are no longer used on Windows. + + Contributed by @ariel-anieli. + + GitHub issue: [#2634](https://github.com/rabbitmq/rabbitmq-server/issues/2634) + + +### Stream Plugin + +#### Bug Fixes + + * If a stream publisher cannot be set up, a clearer message will be logged. + + GitHub issue: [#10524](https://github.com/rabbitmq/rabbitmq-server/pull/10524) + + +### Management Plugin + +#### Bug Fixes + + * `GET /api/nodes/{name}` failed with a 500 when called with a non-existed node name. + + GitHub issue: [#10330](https://github.com/rabbitmq/rabbitmq-server/issues/10330) + + +### Shovel Plugin + +#### Bug Fixes + + * AMQP 1.0 Shovels will no longer set a delivery mode header that is not meaningful in AMQP 1.0. + + Contributed by @luos (Erlang Solutions). + + GitHub issue: [#10503](https://github.com/rabbitmq/rabbitmq-server/pull/10503) + + +### Federation Plugin + +#### Bug Fixes + + * Upstream node shutdown could produce a scary looking exception in the log. + + GitHub issue: [#10473](https://github.com/rabbitmq/rabbitmq-server/pull/10473) + + * Exchange federation links could run into an exception. + + Contributed by @gomoripeti (CloudAMQP). + + GitHub issue: [#10305](https://github.com/rabbitmq/rabbitmq-server/pull/10305) + + +### Dependency Changes + + * `cowboy` was updated to [`2.11.0`](https://ninenines.eu/docs/en/cowboy/2.11/guide/migrating_from_2.10/) + +## Source Code Archives + +To obtain source code of the entire distribution, please download the archive named `rabbitmq-server-3.12.13.tar.xz` +instead of the source tarball produced by GitHub. diff --git a/release-notes/3.12.14.md b/release-notes/3.12.14.md new file mode 100644 index 000000000000..9109ae1393b8 --- /dev/null +++ b/release-notes/3.12.14.md @@ -0,0 +1,106 @@ +RabbitMQ `3.12.14` is a maintenance release in the `3.12.x` [release series](https://www.rabbitmq.com/versions.html). + +This release [goes out of community support](https://www.rabbitmq.com/release-information) on June 1st, 2024. + +Please refer to the upgrade section from the [3.12.0 release notes](https://github.com/rabbitmq/rabbitmq-server/releases/tag/v3.12.0) +if upgrading from a version prior to 3.12.0. + +This release requires Erlang 25 and supports Erlang versions up to `26.2.x`. +[RabbitMQ and Erlang/OTP Compatibility Matrix](https://www.rabbitmq.com/which-erlang.html) has more details on +Erlang version requirements for RabbitMQ. + + +### Minimum Supported Erlang Version + +As of 3.12.0, RabbitMQ requires Erlang 25. Nodes **will fail to start** on older Erlang releases. + +Users upgrading from 3.11.x (or older releases) on Erlang 25 to 3.12.x on Erlang 26 +(both RabbitMQ *and* Erlang are upgraded at the same time) **must** consult +the [v3.12.0 release notes](https://github.com/rabbitmq/rabbitmq-server/releases/tag/v3.12.0) first. + + +## Changes Worth Mentioning + +Release notes can be found on GitHub at [rabbitmq-server/release-notes](https://github.com/rabbitmq/rabbitmq-server/tree/v3.12.x/release-notes). + +### Core Broker + +#### Bug Fixes + + * Quorum queues are now more defensive when acquiring file handles. + + GitHub issue: [#10587](https://github.com/rabbitmq/rabbitmq-server/pull/10587) + +#### Enhancements + + * There is now a way to configure default queue type globally (that is, not per virtual host) + in `rabbitmq.conf`: + + ``` ini + # Built-in type aliases are "quorum", "classic", "stream" + default_queue_type = quorum + ``` + + Contributed by @SimonUnge. + + GitHub issue: [#11165](https://github.com/rabbitmq/rabbitmq-server/pull/11165) + + + * `channel_max_per_node` is a new per-node limit that allows to put a cap on the number + of AMQP 0-9-1 channels that can be concurrently open by all clients connected to a node: + + ``` ini + # rabbitmq.conf + channel_max_per_node = 5000 + ``` + + This is a guardrail mean to protect nodes from [application-level channel leaks](https://www.rabbitmq.com/docs/channels#channel-leaks). + + Contributed by @illotum. + + GitHub issue: [#10754](https://github.com/rabbitmq/rabbitmq-server/pull/10754) + + * [Definition import](https://www.rabbitmq.com/docs/definitions) did not handle a scenario where some virtual hosts did not have + the default queue type metadata key set. + + GitHub issue: [#10897](https://github.com/rabbitmq/rabbitmq-server/pull/10897) + + +### AMQP 1.0 Plugin + +#### Bug Fixes + + * Safer AMQP 1.0 => AMQP 0-9-1 message durability property conversion. + + GitHub issue: [#10568](https://github.com/rabbitmq/rabbitmq-server/pull/10568) + + +### Management Plugin + +#### Bug Fixes + + * When a tab (Connections, Queues and Streams, etc) is switched, a table configuration pane + from the previously selected tab is now hidden. + + Contributed by @ackepenek. + + GitHub issue: [#10799](https://github.com/rabbitmq/rabbitmq-server/pull/10799) + + +### JMS Topic Exchange Plugin + +#### Enhancements + + * The plugin now stores its state on multiple nodes. + + GitHub issue: [#11098](https://github.com/rabbitmq/rabbitmq-server/pull/11098) + + +### Dependency Changes + + * `cowboy` was updated to [`2.12.0`](https://ninenines.eu/docs/en/cowboy/2.12/guide/migrating_from_2.11/) + +## Source Code Archives + +To obtain source code of the entire distribution, please download the archive named `rabbitmq-server-3.12.14.tar.xz` +instead of the source tarball produced by GitHub. diff --git a/release-notes/3.12.5.md b/release-notes/3.12.5.md index fe000be36376..be4b5bce2ccf 100644 --- a/release-notes/3.12.5.md +++ b/release-notes/3.12.5.md @@ -1,10 +1,10 @@ - RabbitMQ `3.12.5` is a maintenance release in the `3.12.x` [release series](https://www.rabbitmq.com/versions.html). +**All users are encouraged to skip this version in favor of [`3.12.6`](https://github.com/rabbitmq/rabbitmq-server/releases/tag/v3.12.6)**. Please refer to the upgrade section from the [3.12.0 release notes](https://github.com/rabbitmq/rabbitmq-server/releases/tag/v3.12.0) if upgrading from a version prior to 3.12.0. -This release requires Erlang 25 and supports Erlang versions up to `26.0.x`. +This release requires Erlang 25 and supports Erlang versions up to `26.1.x`. [RabbitMQ and Erlang/OTP Compatibility Matrix](https://www.rabbitmq.com/which-erlang.html) has more details on Erlang version requirements for RabbitMQ. @@ -27,7 +27,7 @@ Release notes can be found on GitHub at [rabbitmq-server/release-notes](https:// #### Bug Fixes - * Quorum queue leader is now correctly observed in certain network failure scenarios. + * Quorum queue leader changes are now correctly observed in certain network failure scenarios. GitHub issues: [#9241](https://github.com/rabbitmq/rabbitmq-server/pull/9241), [#9227](https://github.com/rabbitmq/rabbitmq-server/pull/9227) @@ -47,6 +47,67 @@ Release notes can be found on GitHub at [rabbitmq-server/release-notes](https:// GitHub issue: [#9222](https://github.com/rabbitmq/rabbitmq-server/pull/9222) + * Avoids a potential exception in the credit flow subsystem. + + Contributed by @Ayanda-D. + + GitHub issue: [#9433](https://github.com/rabbitmq/rabbitmq-server/pull/9433) + + * Classic queues v2 (CQv2) that had priorities enabled were reporting their storage version + incorrectly. + + Contributed by @gomoripeti (CloudAMQP). + + GitHub issue: [#9370](https://github.com/rabbitmq/rabbitmq-server/issues/9370) + +#### Enhancements + + * Channels that had many thousands of consumers (usually due to a consumer leak in the application) + now consume a lot less CPU resources when the channel is closed. + + As part of this optimization, individual `consumer.deleted` internal events are no + longer emitted when a channel is closed, only a `channel.closed` event is. This also + help reduce audit event log size, since those `consumer.deleted` events provided + no useful information in this context. + + Contributed by @SimonUnge (AWS). + + GitHub issue: [#9356](https://github.com/rabbitmq/rabbitmq-server/pull/9356) + + * Initial forward compatibility with later Erlang 26.x and 27.0 releases. + + GitHub issue: [#9485](https://github.com/rabbitmq/rabbitmq-server/pull/9485) + + * Nodes now log boot time at info level instead of debug. This piece of information + can be useful during root cause analysis. + + Contributed by @johanrhodin (CloudAMQP). + + GitHub issue: [#9466](https://github.com/rabbitmq/rabbitmq-server/pull/9466) + + * Channel interceptors (defined by plugins) now can return one more type of errors. + + Contributed by @Ayanda-D. + + GitHub issue: [#9459](https://github.com/rabbitmq/rabbitmq-server/pull/9459) + + +### CLI Tools + +#### Bug Fixes + + * `rabbitmqctl delete_queue` now handles more queue replica failure scenarios + (that were previously handled by client operations and the HTTP API but not CLI tools). + + Contributed by @Ayanda-D. + + GitHub issue: [#9324](https://github.com/rabbitmq/rabbitmq-server/pull/9324) + + * `rabbitmq-streams delete_replica` now handles scenarios where the replica is hosted + on a node that is not accessiable (reachable). + + GitHub issue: [#9282](https://github.com/rabbitmq/rabbitmq-server/issues/9282) + ### MQTT Plugin @@ -58,9 +119,31 @@ Release notes can be found on GitHub at [rabbitmq-server/release-notes](https:// GitHub issue: [#9281](https://github.com/rabbitmq/rabbitmq-server/pull/9281) +### AMQP 1.0 Plugin + +#### Bug Fixes + + * Listing AMQP 1.0 connections in a system that had both AMQP 1.0 and AMQP 0-9-1 clients connected + resulted in exceptions logged and partial results returned to CLI tools. + + GitHub issue: [#9371](https://github.com/rabbitmq/rabbitmq-server/issues/9371) + + +### Prometheus Plugin + +#### Enhancements + + * The plugin now exposes a new gauge, `rabbitmq_unreachable_cluster_peers_count`, that indicates how many cluster + peers **cannot** be reached by this node. + + Contributed by @gomoripeti (CloudAMQP). + + GitHub issues: [#2508](https://github.com/rabbitmq/rabbitmq-server/issues/2508), [#9465](https://github.com/rabbitmq/rabbitmq-server/pull/9465) + + ## Dependency Upgrades - * `osiris` was upgraded to [`1.6.4`](https://github.com/rabbitmq/osiris/tags) + * `osiris` was upgraded to [`1.6.7`](https://github.com/rabbitmq/osiris/tags) ## Dependency Upgrades diff --git a/release-notes/3.12.6.md b/release-notes/3.12.6.md new file mode 100644 index 000000000000..52f7d72f6abe --- /dev/null +++ b/release-notes/3.12.6.md @@ -0,0 +1,53 @@ +RabbitMQ `3.12.6` is a maintenance release in the `3.12.x` [release series](https://www.rabbitmq.com/versions.html). + +Please refer to the upgrade section from the [3.12.0 release notes](https://github.com/rabbitmq/rabbitmq-server/releases/tag/v3.12.0) +if upgrading from a version prior to 3.12.0. + +This release requires Erlang 25 and supports Erlang versions up to `26.1.x`. +[RabbitMQ and Erlang/OTP Compatibility Matrix](https://www.rabbitmq.com/which-erlang.html) has more details on +Erlang version requirements for RabbitMQ. + + +### Minimum Supported Erlang Version + +As of 3.12.0, RabbitMQ requires Erlang 25. Nodes **will fail to start** on older Erlang releases. + +Users upgrading from 3.11.x (or older releases) on Erlang 25 to 3.12.x on Erlang 26 +(both RabbitMQ *and* Erlang are upgraded at the same time) **must** consult +the [v3.12.0 release notes](https://github.com/rabbitmq/rabbitmq-server/releases/tag/v3.12.0) first. + + +## Changes Worth Mentioning + +Release notes can be found on GitHub at [rabbitmq-server/release-notes](https://github.com/rabbitmq/rabbitmq-server/tree/v3.12.x/release-notes). + + +### Core Server + +#### Bug Fixes + + * `3.12.5` unintentionally shipped with a `seshat` version older than `0.6.1`. This can potentially + result in an incompatibility with the stream subsystem. + + GitHub issue: [#9499](https://github.com/rabbitmq/rabbitmq-server/pull/9499) + +#### Enhancements + + * Improved forward compatibility of classic queues with 3.13. + + GitHub issue: [#9508](https://github.com/rabbitmq/rabbitmq-server/pull/9508) + + +## Dependency Upgrades + + * `seshat` was upgraded to [`0.6.1`](https://github.com/rabbitmq/seshat/tags) + + +## Dependency Upgrades + +None in this release. + +## Source Code Archives + +To obtain source code of the entire distribution, please download the archive named `rabbitmq-server-3.12.6.tar.xz` +instead of the source tarball produced by GitHub. diff --git a/release-notes/3.12.7.md b/release-notes/3.12.7.md new file mode 100644 index 000000000000..bf9e61258003 --- /dev/null +++ b/release-notes/3.12.7.md @@ -0,0 +1,182 @@ +RabbitMQ `3.12.7` is a maintenance release in the `3.12.x` [release series](https://www.rabbitmq.com/versions.html). + +Please refer to the upgrade section from the [3.12.0 release notes](https://github.com/rabbitmq/rabbitmq-server/releases/tag/v3.12.0) +if upgrading from a version prior to 3.12.0. + +This release requires Erlang 25 and supports Erlang versions up to `26.1.x`. +[RabbitMQ and Erlang/OTP Compatibility Matrix](https://www.rabbitmq.com/which-erlang.html) has more details on +Erlang version requirements for RabbitMQ. + + +### Minimum Supported Erlang Version + +As of 3.12.0, RabbitMQ requires Erlang 25. Nodes **will fail to start** on older Erlang releases. + +Users upgrading from 3.11.x (or older releases) on Erlang 25 to 3.12.x on Erlang 26 +(both RabbitMQ *and* Erlang are upgraded at the same time) **must** consult +the [v3.12.0 release notes](https://github.com/rabbitmq/rabbitmq-server/releases/tag/v3.12.0) first. + + +## Changes Worth Mentioning + +Release notes can be found on GitHub at [rabbitmq-server/release-notes](https://github.com/rabbitmq/rabbitmq-server/tree/v3.12.x/release-notes). + + +### Core Server + +#### Bug Fixes + + * Stream replication connections configured to use exclusively TLSv1.3 failed. + + GitHub issue:[#9678](https://github.com/rabbitmq/rabbitmq-server/pull/9678) + + * On startup, stream replicas will handle one more potential case of segment file corruption + after an unclean shutdown. + + Contributed by @gomoripeti (CloudAMQP). + + GitHub issue: [#9678](https://github.com/rabbitmq/rabbitmq-server/pull/9678) + + * `default_policies.*.queue_pattern` definition in `rabbitmq.conf` was incorrectly parsed. + + Contributed by @SimonUnge (AWS). + + GitHub issue: [#9545](https://github.com/rabbitmq/rabbitmq-server/pull/9545) + + * Avoid log noise when inter-node connections frequently fail and recover. + + Contributed by @Ayanda-D. + + GitHub issue: [#9667](https://github.com/rabbitmq/rabbitmq-server/pull/9667) + +#### Enhancements + + * Optimized stream index scans. Longer scans could result in some replicas stopping + with a timeout. + + GitHub issue:[#9678](https://github.com/rabbitmq/rabbitmq-server/pull/9678) + + * Classic queue storage version is now a supported key for [operator policies](https://rabbitmq.com/parameters.html#operator-policies). + + Contributed by @SignalWhisperer (AWS). + + GitHub issue: [#9548](https://github.com/rabbitmq/rabbitmq-server/pull/9548) + + * [Queue length limit](https://rabbitmq.com/maxlength.html) overflow behavior now can be configured via [operator policies](https://rabbitmq.com/parameters.html#operator-policies). + + Contributed by @SimonUnge (AWS). + + GitHub issue: [#9636](https://github.com/rabbitmq/rabbitmq-server/issues/9636) + + +### CLI Tools + +#### Bug Fixes + + * `rabbitmq-streams list_stream_consumer_groups` incorrectly validated the set of columns it accepts. + + GitHub issue: [#9671](https://github.com/rabbitmq/rabbitmq-server/pull/9671) + +#### Enhancements + + * Several `list_stream_*` commands (available via both `rabbitmq-diagnostics` and `rabbitmq-streams`) commands now can + display replica node in addition to other fields. + + GitHub issue: [#9582](https://github.com/rabbitmq/rabbitmq-server/issues/9582) + + * `rabbitmqctl add_user` now can accept a [pre-generated salted password](https://rabbitmq.com/passwords.html) instead + of a plain text password, both as a positional argument and via standard input: + + ``` shell + # This is just an example, DO NOT use this value in production! + # The 2nd argument is a Base64-encoded pre-hashed and salted value of "guest4" + rabbitmqctl -- add_user "guest4" "BMT6cj/MsI+4UOBtsPPQWpQfk7ViRLj4VqpMTxu54FU3qa1G" --pre-hashed-password + # try authenticating with a pair of credentials + rabbitmqctl authenticate_user "guest4" "guest4" + ``` + + GitHub issue: [#9669](https://github.com/rabbitmq/rabbitmq-server/issues/9669) + + +### Management Plugin + +#### Bug Fixes + + * Message consumption with the "Nack message, requeue: true" option did not actually requeue deliveries. + + GitHub issue: [#9715](https://github.com/rabbitmq/rabbitmq-server/pull/9715) + +#### Enhancements + + * HTTP API request body size is now limited to 10 MiB by default. + Two endpoints, one that accepts messages for publishing (note: publishing over the HTTP API is greatly discouraged) + and another for [definition import](https://rabbitmq.com/definitions.html#import), + will now reject larger transfers with a `400 Bad Request` response. + + GitHub issue: [#9708](https://github.com/rabbitmq/rabbitmq-server/pull/9708) + + * `DELETE /api/queues/{vhost}/{name}` now can delete exclusive queues. + + GitHub issue: [#8758](https://github.com/rabbitmq/rabbitmq-server/issues/8758) + + * Key supported by operator policies are now grouped by queue type in the UI. + + GitHub issue: [#9544](https://github.com/rabbitmq/rabbitmq-server/pull/9544) + + +### MQTT Plugin + +#### Enhancements + + * Improved data safety for confirms in environments where the plugin uses classic queues. + + GitHub issue: [#9530](https://github.com/rabbitmq/rabbitmq-server/pull/9530) + + +### Web MQTT Plugin + +#### Bug Fixes + + * Avoid an exception when a not fully established MQTT-over-WebSockets connection terminated. + + Contributed by @gomoripeti (CloudAMQP). + + GitHub issue: [#9654](https://github.com/rabbitmq/rabbitmq-server/pull/9654) + + +### JMS Topic Exchange Plugin + +#### Bug Fixes + + * Recovery of bindings of durable queues bound to a transient JMS topic exchange failed. + + GitHub issue: [#9533](https://github.com/rabbitmq/rabbitmq-server/issues/9533) + + +### Sharding Plugin + +#### Bug Fixes + + * Recovery of bindings of durable queues bound to a transient `x-modulo-hash` exchange failed. + + GitHub issue: [#9533](https://github.com/rabbitmq/rabbitmq-server/issues/9533) + + +### Recent History Exchange Plugin + +#### Bug Fixes + + * Recovery of bindings of durable queues bound to a transient recent history exchange failed. + + GitHub issue: [#9533](https://github.com/rabbitmq/rabbitmq-server/issues/9533) + + +## Dependency Upgrades + + * `osiris` has been upgraded to [`1.6.9`](https://github.com/rabbitmq/osiris/releases) + + +## Source Code Archives + +To obtain source code of the entire distribution, please download the archive named `rabbitmq-server-3.12.7.tar.xz` +instead of the source tarball produced by GitHub. diff --git a/release-notes/3.12.8.md b/release-notes/3.12.8.md new file mode 100644 index 000000000000..d6d9275d3efe --- /dev/null +++ b/release-notes/3.12.8.md @@ -0,0 +1,70 @@ +RabbitMQ `3.12.8` is a maintenance release in the `3.12.x` [release series](https://www.rabbitmq.com/versions.html). + +Please refer to the upgrade section from the [3.12.0 release notes](https://github.com/rabbitmq/rabbitmq-server/releases/tag/v3.12.0) +if upgrading from a version prior to 3.12.0. + +This release requires Erlang 25 and supports Erlang versions up to `26.1.x`. +[RabbitMQ and Erlang/OTP Compatibility Matrix](https://www.rabbitmq.com/which-erlang.html) has more details on +Erlang version requirements for RabbitMQ. + + +### Minimum Supported Erlang Version + +As of 3.12.0, RabbitMQ requires Erlang 25. Nodes **will fail to start** on older Erlang releases. + +Users upgrading from 3.11.x (or older releases) on Erlang 25 to 3.12.x on Erlang 26 +(both RabbitMQ *and* Erlang are upgraded at the same time) **must** consult +the [v3.12.0 release notes](https://github.com/rabbitmq/rabbitmq-server/releases/tag/v3.12.0) first. + + +## Changes Worth Mentioning + +Release notes can be found on GitHub at [rabbitmq-server/release-notes](https://github.com/rabbitmq/rabbitmq-server/tree/v3.12.x/release-notes). + + +### Core Server + +#### Bug Fixes + + * Avoids a potential exception in the `autoheal` partition handler. + + Contributed by @Ayanda-D. + + GitHub issue: [#9818](https://github.com/rabbitmq/rabbitmq-server/pull/9818) + +#### Enhancements + + * `raft.segment_max_entries` is now validated to prevent the value from overflowing its 16-bit segment file field. + Maximum supported value is now `65535`. + + GitHub issue: [#9733](https://github.com/rabbitmq/rabbitmq-server/issues/9733) + + +### Shovel Plugin + +#### Enhancements + + * Significantly faster Shovel startup in environments where there are many of them (one thousand or more). + + GitHub issue: [#9796](https://github.com/rabbitmq/rabbitmq-server/pull/9796) + + +### AMQP 1.0 Erlang Client + +#### Enhancements + + * User-provided credentials are now obfuscated using an one-off key pair generated on node boot. + This keeps sensitive client state information from being logged by the runtime exception logger. + + GitHub issue: [#9777](https://github.com/rabbitmq/rabbitmq-server/pull/9777) + + +## Dependency Upgrades + +None in this release. + + +## Source Code Archives + +To obtain source code of the entire distribution, please download the archive named `rabbitmq-server-3.12.8.tar.xz` +instead of the source tarball produced by GitHub. diff --git a/release-notes/3.12.9.md b/release-notes/3.12.9.md new file mode 100644 index 000000000000..be9d69fb6f0c --- /dev/null +++ b/release-notes/3.12.9.md @@ -0,0 +1,98 @@ +RabbitMQ `3.12.9` is a maintenance release in the `3.12.x` [release series](https://www.rabbitmq.com/versions.html). + +Please refer to the upgrade section from the [3.12.0 release notes](https://github.com/rabbitmq/rabbitmq-server/releases/tag/v3.12.0) +if upgrading from a version prior to 3.12.0. + +This release requires Erlang 25 and supports Erlang versions up to `26.1.x`. +[RabbitMQ and Erlang/OTP Compatibility Matrix](https://www.rabbitmq.com/which-erlang.html) has more details on +Erlang version requirements for RabbitMQ. + + +### Minimum Supported Erlang Version + +As of 3.12.0, RabbitMQ requires Erlang 25. Nodes **will fail to start** on older Erlang releases. + +Users upgrading from 3.11.x (or older releases) on Erlang 25 to 3.12.x on Erlang 26 +(both RabbitMQ *and* Erlang are upgraded at the same time) **must** consult +the [v3.12.0 release notes](https://github.com/rabbitmq/rabbitmq-server/releases/tag/v3.12.0) first. + + +## Changes Worth Mentioning + +Release notes can be found on GitHub at [rabbitmq-server/release-notes](https://github.com/rabbitmq/rabbitmq-server/tree/v3.12.x/release-notes). + +### Core Broker + +#### Bug Fixes + + * When a topic permission was deleted, an [internal event](https://rabbitmq.com/logging.html#internal-events) of type `permission.deleted` + was emitted in some cases, instead of `topic.permission.deleted`. + + Investigated by @bedia. + + GitHub issue: [#9937](https://github.com/rabbitmq/rabbitmq-server/issues/9937) + + +### Shovel Plugin + +#### Bug Fixes + + * Shovels on `3.12.8` nodes failed during a rolling cluster upgrade due to internal + identifier format changes. + + Starting with this release, both old and new formats are supported for upgrade safety. + + GitHub issue: [#9894](https://github.com/rabbitmq/rabbitmq-server/pull/9894) + + +### Grafana Dashboard + +#### Enhancements + + * [Global counters for producers](https://github.com/rabbitmq/rabbitmq-server/pull/3127) are now available in the dashboard. + + Contributed by @johanrhodin (CloudAMQP) + + GitHub issue: [#9846](https://github.com/rabbitmq/rabbitmq-server/pull/9846) + + +### MQTT Plugin + +#### Bug Fixes + + * Avoids an unnecessary warning in the logs. + + GitHub issue: [#9885](https://github.com/rabbitmq/rabbitmq-server/pull/9885) + + +### CLI Tools + +#### Enhancements + + * `rabbitmq-diagnostics list_policies_that_match [queue name]` is a new command + that simplifies troubleshooting of policy conflicts. + + GitHub issue: [#9916](https://github.com/rabbitmq/rabbitmq-server/pull/9916) + + +### Management Plugin + +#### Enhancements + + * Nodes that have OAuth 2 enabled now redirect the user to the original landing page (if any) + after successful login with the IDP. + + Contributed by @dukex. + + GitHub issue: [#9851](https://github.com/rabbitmq/rabbitmq-server/pull/9851) + + +## Dependency Upgrades + +None in this release. + + +## Source Code Archives + +To obtain source code of the entire distribution, please download the archive named `rabbitmq-server-3.12.9.tar.xz` +instead of the source tarball produced by GitHub. diff --git a/release-notes/3.13.0.md b/release-notes/3.13.0.md index 6daba8782813..784549200a41 100644 --- a/release-notes/3.13.0.md +++ b/release-notes/3.13.0.md @@ -1,56 +1,66 @@ -## RabbitMQ 3.13.0-beta.6 +## RabbitMQ 3.13.0 -RabbitMQ `3.13.0-beta.6` is a preview of a new feature release. +RabbitMQ `3.13.0` is a new feature release. + +Starting June 1st, 2024, community support for this series will only be provided to [regularly contributing users](https://github.com/rabbitmq/rabbitmq-server/blob/main/COMMUNITY_SUPPORT.md) and those +who hold a valid [commercial support license](https://tanzu.vmware.com/rabbitmq/oss). ## Highlights -This release includes several new features, optimizations, and graduates (makes mandatory) a number of feature flags. +This release includes several new features, optimizations, internal changes in preparation for RabbitMQ 4.x, +and a [major update](https://www.rabbitmq.com/blog/2024/01/04/new-website) to the [RabbitMQ website](https://www.rabbitmq.com/). The user-facing areas that have seen the biggest improvements in this release are - * Support for [consumer-side stream filtering](https://github.com/rabbitmq/rabbitmq-server/pull/8207) + * [Khepri](https://www.youtube.com/watch?v=whVqpgvep90) now can be used as an [alternative schema data store](https://github.com/rabbitmq/rabbitmq-server/pull/7206) in RabbitMQ, replacing Mnesia * [MQTTv5 support](https://blog.rabbitmq.com/posts/2023/07/mqtt5) - * A new [common message container format](https://github.com/rabbitmq/rabbitmq-server/pull/5077) used internally, based on the AMQP 1.0 message container format + * Support for [consumer-side stream filtering](https://github.com/rabbitmq/rabbitmq-server/pull/8207) + * A new [common message container format](https://github.com/rabbitmq/rabbitmq-server/pull/5077) used internally, based on the AMQP 1.0 message format * Improved classic non-mirrored queue performance with message sizes larger than 4 KiB (or a different customized CQ index embedding threshold) - * Classic queues use version 2 of the storage implementation (CQv2). - This should significantly improve performance of non-mirrored classic queues + * Classic queues storage implementation version 2 (CQv2) is now highly recommended for all new deployments. + CQv2 meaningfully improves performance of non-mirrored classic queues for most workloads -This release also features many internal API improvements in preparation to 4.0 -with [Khepri](https://www.youtube.com/watch?v=huT-zmXvfuM). - -See Compatibility Notes below to learn about breaking or potentially breaking changes in this release. +See Compatibility Notes below to learn about **breaking or potentially breaking changes** in this release. ## Release Artifacts -RabbitMQ preview releases are distributed via [GitHub](https://github.com/rabbitmq/rabbitmq-server/releases). +RabbitMQ releases are distributed via [GitHub](https://github.com/rabbitmq/rabbitmq-server/releases). +[Debian](https://rabbitmq.com/install-debian.html) and [RPM packages](https://rabbitmq.com/install-rpm.html) are available via Cloudsmith mirrors. -[Community Docker image](https://hub.docker.com/_/rabbitmq/) is another installation option -for previews. It is updated with a delay (usually a few days). +[Community Docker image](https://hub.docker.com/_/rabbitmq/), [Chocolatey package](https://community.chocolatey.org/packages/rabbitmq), and the [Homebrew formula](https://www.rabbitmq.com/docs/install-homebrew) +are other installation options. They are updated with a delay. ## Erlang/OTP Compatibility Notes -This release [requires Erlang 26.0](https://www.rabbitmq.com/which-erlang.html) or later. +This release [requires Erlang 26.x](https://www.rabbitmq.com/docs/which-erlang). -[Provisioning Latest Erlang Releases](https://www.rabbitmq.com/which-erlang.html#erlang-repositories) explains +[Provisioning Latest Erlang Releases](https://www.rabbitmq.com/docs/which-erlang#erlang-repositories) explains what package repositories and tools can be used to provision latest patch versions of Erlang 26.x. + ## Upgrading to 3.13 ### Documentation guides on upgrades -See the [Upgrading guide](https://www.rabbitmq.com/upgrade.html) for documentation on upgrades and [RabbitMQ change log](https://www.rabbitmq.com/changelog.html) -for release notes of other releases. +See the [Upgrading guide](https://www.rabbitmq.com/docs/upgrade) for documentation on upgrades and [GitHub releases](https://github.com/rabbitmq/rabbitmq-server/releases) +for release notes of individual releases. + +Note that since 3.12.0 requires **all feature flags** to be enabled before upgrading, +there is no upgrade path from 3.11.24 (or a later patch release) straight to 3.13.0. ### Required Feature Flags -TBD +This release does not [graduate](https://www.rabbitmq.com/docs/feature-flags#graduation) any feature flags. + +However, all users are highly encouraged to enable all feature flags before upgrading to this release from +3.12.x. ### Mixed version cluster compatibility -RabbitMQ 3.13.0 nodes can run alongside `3.12.x` nodes. `3.12.x`-specific features can only be made available when all nodes in the cluster -upgrade to 3.13.0 or any other patch release in the new series. +RabbitMQ 3.13.0 nodes can run alongside `3.12.x` nodes. `3.13.x`-specific features can only be made available when all nodes in the cluster +upgrade to 3.13.0 or a later patch release in the new series. While operating in mixed version mode, some aspects of the system may not behave as expected. The list of known behavior changes is covered below. Once all nodes are upgraded to 3.13.0, these irregularities will go away. @@ -58,36 +68,195 @@ Once all nodes are upgraded to 3.13.0, these irregularities will go away. Mixed version clusters are a mechanism that allows rolling upgrade and are not meant to be run for extended periods of time (no more than a few hours). +### Recommended Post-upgrade Procedures +#### Switch Classic Queues to CQv2 -## Compatibility Notes +We recommend switching classic queues to CQv2 after **all cluster nodes** have been upgrades, +at first using policies, and then eventually using a setting in `rabbitmq.conf`. Upgrading +classic queues to CQv2 at boot time using the configuration file setting can be +potentially unsafe in environments where **deprecated classic mirrored queues still exist**. + +For new clusters, adopting CQv2 from the start is highly recommended: + +``` ini +# CQv2 should be used by default for all new clusters +classic_queue.default_version = 2 +``` -TBD +## Compatibility Notes + +This release includes a few potentially breaking changes. ### Minimum Supported Erlang Version -Starting with this release, RabbitMQ requires Erlang 26.0 or later versions. Nodes **will fail to start** +Starting with this release, RabbitMQ requires Erlang 26.x. Nodes **will fail to start** on older Erlang releases. ### Client Library Compatibility -Client libraries that were compatible with RabbitMQ `3.12.x` will be compatible with `3.13.0`. +Client libraries that were compatible with RabbitMQ `3.11.x` and `3.12.x` will be compatible with `3.13.0`. +RabbitMQ Stream Protocol clients must be upgraded to their latest versions in order to support +the stream filtering feature introduced in this release. + +### Consistency Model and Schema Modification Visibility Guarantees of Khepri and Mnesia + +Khepri has an important difference from Mnesia when it comes to schema modifications such as queue +or stream declarations, or binding declarations. These changes won't be noticeable with many workloads +but can affect some, in particular, certain integration tests. + +Consider two scenarios, A and B. + +#### Scenario A + +There is only one client. The client performs the following steps: + +1. It declares a queue Q +2. It binds Q to an exchange X +3. It publishes a message M to the exchange X +4. It expects the message to be routed to queue Q +5. It consumes the message + +In this scenario, there should be no observable difference in behavior. Client's expectations +will be met. + +#### Scenario B + +There are two clients, One and Two, connected to nodes R1 and R3, and using the same virtual host. +Node R2 has no client connections. + +Client One performs the following steps: + +1. It declares a queue Q +2. It binds Q to an exchange X +3. It gets a queue declaration confirmation back +4. It notifies client 2 or client 2 implicitly finds out that it has finished the steps above (for example, in an integration test) +5. Client Two publishes a message M to X +6. Clients One and Two expect the message to be routed to Q + +In this scenario, on step three Mnesia would return when **all** cluster nodes have committed an update. +Khepri, however, will return when **a majority** of nodes, including the node handling Client One's operations, +have returned. + +This may include nodes R1 and R2 but not node R3, meaning that message M published by Client Two connected to node R3 +in the above example **is not guaranteed not be routed**. + +Once all schema changes propagate to node R3, Client Two's subsequent +publishes on node R3 **will be guaranteed** to be routed. + +This trade-off of a Raft-based system that assume that a write accepted by a majority of nodes +can be considered a succeess. + +#### Workaround Strategies + +To satisfy Client Two's expectations in scenario B Khepri could perform **consistent** (involving a majority of replicas) +queries of bindings when routing messages but that would have a **significant** impact on throughput +of certain protocols (such as MQTT) and exchange/destination types (anything that resembles a topic exchange in AMQP 0-9-1). + +Applications that rely on multiple connections that depend on a shared topology have +several coping strategies. + +If an application uses two or more connections to different nodes, it can +declare its topology on boot and then injecting a short pause (1-2 seconds) before proceeding with +other operations. + +Applications that rely on dynamic topologies can switch to use a "static" set of +exchanges and bindings. + +Application components that do not need to use a shared topology can each configure +its own queues/streams/bindings. + +Test suites that use multiple connections to different nodes can choose to use just one connection or +connect to the same node, or inject a pause, or await a certain condition that indicates that the topology +is in place. + + +### TLS Defaults + +Starting with Erlang 26, client side [TLS peer certificate chain verification](https://www.rabbitmq.com/docs/ssl#peer-verification) settings are enabled by default in most contexts: +from federation links to shovels to TLS-enabled LDAP client connections. + +If using TLS peer certificate chain verification is not practical or necessary, it can be disabled. +Please refer to the docs of the feature in question, for example, +this one [on TLS-enabled LDAP client](http://rabbitmq.com/docs/ldap/#tls) connections. + + +### Management Plugin and HTTP API + +GET /api/queues` HTTP API endpoint has dropped several rarely used metrics, resulting in up to 25% in traffic saving. + +### MQTT Plugin + +`mqtt.subscription_ttl` (in milliseconds) configuration setting was replaced with `mqtt.max_session_expiry_interval_seconds` (in seconds). +A 3.13 RabbitMQ node will fail to boot if the old configuration setting is set. +For example, if you set `mqtt.subscription_ttl = 3600000` (1 hour) prior to 3.13, replace that setting with `mqtt.max_session_expiry_interval_seconds = 3600` (1 hour) in 3.13. + +### rabbitmqctl node_health_check is Now a No-Op + +`rabbitmqctl node_health_check` has been deprecated for over three years +and is now a no-op (does nothing). + +See the [Health Checks section](https://www.rabbitmq.com/docs/monitoring#health-checks) in the monitoring guide +to find out what modern alternatives are available. + +### openSUSE Leap Package is not Provided + +An openSUSE Leap package will not be provided with this release of RabbitMQ. + +This release requires Erlang 26 and there is an [Erlang 26 package available](https://download.opensuse.org/repositories/devel:/languages:/erlang:/Factory/openSUSE_Factory/x86_64/) from Erlang Factory +but the package depends on `glibc` 2.34, and all currently available openSUSE Leap releases +(up to 15.5) ship with 2.31 at most. + +Team RabbitMQ would like to continue building a openSUSE Leap package when a Leap 15.x-compatible Erlang 26 +package becomes publicly available. ### Getting Help -Any questions about this release, upgrades or RabbitMQ in general are welcome on the [RabbitMQ mailing list](https://groups.google.com/forum/#!forum/rabbitmq-users). +Any questions about this release, upgrades or RabbitMQ in general are welcome in [GitHub Discussions](https://github.com/rabbitmq/rabbitmq-server/discussions) or +on [our community Discord](https://rabbitmq.com/discord/). ## Changes Worth Mentioning Release notes are kept under [rabbitmq-server/release-notes](https://github.com/rabbitmq/rabbitmq-server/tree/main/release-notes). +### A New RabbitMQ Website + +This 3.13.0 release includes a change to the RabbitMQ website, [a major update of the website](https://www.rabbitmq.com/blog/2024/01/04/new-website). + +Some of it's great features include: + +* Access to doc guides for multiple release series: 3.13.x and 3.12.x, with more versions coming as new RabbitMQ release series come out +* A reworked table of contents and navigation +* Search over both doc guides and blog content + +**Note**: We hope you enjoy the new website, more improvements are coming soon, we are revising the documentation table of contents that you see now and also adding some navigational topics to help you move around and find the documentation you are looking for faster in the future. We will keep you posted! + ### Core Server #### Enhancements + * [Khepri](https://www.youtube.com/watch?v=whVqpgvep90) now can be used as an [alternative schema data store](https://github.com/rabbitmq/rabbitmq-server/pull/7206) + in RabbitMQ, by enabling a feature flag: + + ``` shell + rabbitmqctl enable_feature_flag khepri_db + ``` + + In practical terms this means that it will be possible to swap Mnesia for a Raft-based data store + that will **predictably recover from network partitions and node failures**, the same way [quorum queues](https://www.rabbitmq.com/docs/quorum-queues#leader-election) + and [streams](https://www.rabbitmq.com/docs/streams#leader-election) already do. At the same time, this means + that RabbitMQ clusters now **must have a majority of nodes online at all times**, or all client operations will be refused. + + Like quorum queues and streams, Khepri uses [RabbitMQ's Raft implementation](https://github.com/rabbitmq/ra) under the hood. With Khepri enabled, all key modern features + of RabbitMQ will use the same fundamental approach to recovery from failures, relying on a library that passes a [Jepsen test suite](https://github.com/rabbitmq/ra/#safety-verification). + + Team RabbitMQ intends to make Khepri the default schema database starting with RabbitMQ 4.0. + + GitHub issue: [#7206](https://github.com/rabbitmq/rabbitmq-server/pull/7206) + * Messages are now internally stored using a new common heavily AMQP 1.0-influenced container format. This is a major step towards a protocol-agnostic core: a common format that encapsulates a sum of data types used by the protocols RabbitMQ supports, plus annotations for routng, dead-lettering state, and other purposes. @@ -104,7 +273,7 @@ Release notes are kept under [rabbitmq-server/release-notes](https://github.com/ When the number of online replicas of a quorum queue goes below (or above) its target, new replicas will be automatically placed if enough cluster nodes are available. - This is a more automatic version of how [quorum queue replicas have originally been grown](https://rabbitmq.com/quorum-queues.html#replica-management). + This is a more automatic version of how [quorum queue replicas have originally been grown](https://www.rabbitmq.com/docs/quorum-queues#replica-management). For automatic shrinking of queue replicas, the user must opt in. @@ -112,25 +281,35 @@ Release notes are kept under [rabbitmq-server/release-notes](https://github.com/ GitHub issue: [#8218](https://github.com/rabbitmq/rabbitmq-server/pull/8218) - * Reduced memory footprint, improved memory use predictability and throughput of classic queues (version 2, or CQv2). - This particularly benefits classic queues with longer backlogs. + * Revisited peer discovery implementation that further reduces the probability of two or more + sets of nodes [forming separate clusters](https://www.rabbitmq.com/docs/cluster-formation#initial-formation-race-condition) when **all** cluster nodes are created at the same time and boot in parallel. - Classic queue v2 (CQv2) storage implementation **is now the default**. It is possible to switch - the default back to CQv1 using `rabbitmq.conf`: + GitHub issue: [#9797](https://github.com/rabbitmq/rabbitmq-server/pull/9797) - ``` ini - # uses CQv1 by default - classic_queue.default_version = 1 - ``` + * Classic queue storage v2 (CQv2) has matured and is now recommended for all users. + + We recommend switching classic queues to CQv2 after **all cluster nodes** have been upgraded to 3.13.0, + at first using policies, and then eventually using a setting in `rabbitmq.conf`. Upgrading + classic queues to CQv2 at boot time using the configuration file setting can be + potentially unsafe in environments where deprecated classic mirrored queues still exist. - Individual queues can be declared by passing `x-queue-version` argument and/or through a `queue-version` policy. + For new clusters, adopt CQv2 from the start by setting `classic_queue.default_version` in `rabbitmq.conf`: - GitHub issue: [#8308](https://github.com/rabbitmq/rabbitmq-server/pull/8308) + ``` ini + # only set this value for new clusters or after all nodes have been upgraded to 3.13 + classic_queue.default_version = 2 + ``` * Non-mirrored classic queues: optimizations of storage for larger (greater than 4 kiB) messages. GitHub issue: [#6090](https://github.com/rabbitmq/rabbitmq-server/pull/6090), [#8507](https://github.com/rabbitmq/rabbitmq-server/pull/8507) + * When a non-mirrored classic queue is declared, its placement node + is now selected with less interaction with cluster peers, speeding up the process + when some nodes have recently gone down. + + GitHub issue: [#10102](https://github.com/rabbitmq/rabbitmq-server/pull/10102) + * A subsystem for marking features as deprecated. GitHub issue: [#7390](https://github.com/rabbitmq/rabbitmq-server/pull/7390) @@ -142,6 +321,45 @@ Release notes are kept under [rabbitmq-server/release-notes](https://github.com/ GitHub issues: [#8834](https://github.com/rabbitmq/rabbitmq-server/pull/8834), [#8927](https://github.com/rabbitmq/rabbitmq-server/pull/8927) + * Classic queue storage version now can be set via [operator policies](https://www.rabbitmq.com/docs/parameters#operator-policies). + + Contributed by @SimonUnge (AWS). + + GitHub issue: [#9541](https://github.com/rabbitmq/rabbitmq-server/pull/9541) + + * `channel_max_per_node` allows limiting how many channels a node would allow clients to open, in total. + + This limit is easier to reason about than the per-node connection limit multiplied by `channel_max` (a per-connection limit for channels). + + Contributed by @SimonUnge (AWS). + + GitHub issue: [#10351](https://github.com/rabbitmq/rabbitmq-server/pull/10351) + + * `disk_free_limit.absolute` and vm_memory_high_watermark.absolute` now support more information units: `Mi`, `Gi`, `TB`, `Ti`, `PB`, `Pi`. + + In addition, there were some renaming of the existing keys: + + * `K` now means "kilobyte" and not "kibibyte" + * `M` now means "megabyte" and not "mebibyte" + * `G` now means "gigabyte" and not "gibibyte" + + There is no consensus on how these single letter suffixes should be interpreted (as their power of 2 or power of 10 variants), + so RabbitMQ has adopted a widely used [convention adopted by Kubernetes](https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-memory). + + GitHub issues: [#10310](https://github.com/rabbitmq/rabbitmq-server/issues/10310), [#10348](https://github.com/rabbitmq/rabbitmq-server/pull/10348) + + * Improved efficiency of definition imports in environments with a lot of virtual hosts. + + Contributed by @AyandaD. + + GitHub issue: [#10320](https://github.com/rabbitmq/rabbitmq-server/pull/10320) + + * When a [consumer delivery timeout](https://www.rabbitmq.com/docs/consumers#acknowledgement-timeout) hits, a more informative message is now logged. + + Contributed by @rluvaton. + + GitHub issue: [#10446](https://github.com/rabbitmq/rabbitmq-server/pull/10446) + #### Bug Fixes This release includes all bug fixes shipped in the `3.12.x` series. @@ -154,12 +372,73 @@ This release includes all bug fixes shipped in the `3.12.x` series. GitHub issue: [#8491](https://github.com/rabbitmq/rabbitmq-server/pull/8491) + * Feature flag state is now persisted in a safer way during node shutdown. + + GitHub issue: [#10279](https://github.com/rabbitmq/rabbitmq-server/pull/10279) + + * Feature flag synchronization between nodes now avoids a potential race condition. + + GitHub issue: [#10027](https://github.com/rabbitmq/rabbitmq-server/pull/10027) + * The state of node maintenance status across the cluster is now replicated. It previously was accessible to all nodes but not replicated. GitHub issue: [#9005](https://github.com/rabbitmq/rabbitmq-server/pull/9005) +### CLI Tools + +#### Deprecations + + * `rabbitmqctl rename_cluster_node` and `rabbitmqctl update_cluster_nodes` are now no-ops. + + They were not safe to use with quorum queues and streams, and are completely incompatible with Khepri. + + GitHub issue: [#10369](https://github.com/rabbitmq/rabbitmq-server/pull/10369) + +#### Enhancements + + * `rabbitmq-diagnostics cluster_status` now responds significantly faster when some cluster + nodes are not reachable. + + GitHub issue: [#10101](https://github.com/rabbitmq/rabbitmq-server/pull/10101) + + * `rabbitmqctl list_deprecated_features` is a new command that lists deprecated features + that are deprecated used on the target node. + + GitHub issues: [#9901](https://github.com/rabbitmq/rabbitmq-server/pull/9901), [#7390](https://github.com/rabbitmq/rabbitmq-server/pull/7390) + + +### Management Plugin + +#### Enhancements + + * New API endpoint, `GET /api/stream/{vhost}/{name}/tracking`, can be used to track + publisher and consumer offsets in a stream. + + GitHub issue: [#9642](https://github.com/rabbitmq/rabbitmq-server/pull/9642) + + * Several rarely used queue metrics were removed to reduce inter-node data transfers + and CPU burn during API response generation. The effects will be particularly pronounced + for the `GET /api/queues` endpoint used without filtering or pagination, which can produce + enormously large responses. + + A couple of relevant queue metrics or state fields were lifted to the top level. + + **This is a potentially breaking change**. + + Note that [Prometheus](https://www.rabbitmq.com/docs/prometheus) is the recommended option for monitoring, + not the management plugin's HTTP API. + + GitHub issues: [#9437](https://github.com/rabbitmq/rabbitmq-server/issues/9437), [#9578](https://github.com/rabbitmq/rabbitmq-server/pull/9578), [#9633](https://github.com/rabbitmq/rabbitmq-server/pull/9633) + +#### Bug Fixes + + * `GET /api/nodes/{name}` failed with a 500 when called with a non-existed node name. + + GitHub issue: [#10330](https://github.com/rabbitmq/rabbitmq-server/issues/10330) + + ### Stream Plugin #### Enhancements @@ -172,6 +451,25 @@ This release includes all bug fixes shipped in the `3.12.x` series. GitHub issue: [#8207](https://github.com/rabbitmq/rabbitmq-server/pull/8207) + * Stream connections now support JWT (OAuth 2) token renewal. The renewal is client-initiated + shortly before token expiry. Therefore, this feature requires stream protocol clients to be updated. + + GitHub issue: [#9187](https://github.com/rabbitmq/rabbitmq-server/pull/9187) + + * Stream connections are now aware of JWT (OAuth 2) token expiry. + + GitHub issue: [#10292](https://github.com/rabbitmq/rabbitmq-server/issues/10292) + +#### Bug Fixes + + * Stream (replica) membership changes safety improvements. + + GitHub issue: [#10331](https://github.com/rabbitmq/rabbitmq-server/pull/10331) + + * Stream protocol connections now complies with the connection limit in target virtual host. + + GitHub issue: [#9946](https://github.com/rabbitmq/rabbitmq-server/pull/9946) + ### MQTT Plugin @@ -181,11 +479,15 @@ This release includes all bug fixes shipped in the `3.12.x` series. GitHub issues: [#7263](https://github.com/rabbitmq/rabbitmq-server/pull/7263), [#8681](https://github.com/rabbitmq/rabbitmq-server/pull/8681) + * MQTT clients that use QoS 0 now can reconnect more reliably when the node they were connected to fails. + + GitHub issue: [#10203](https://github.com/rabbitmq/rabbitmq-server/pull/10203) + * Negative message acknowledgements are now propagated to MQTTv5 clients. GitHub issue: [#9034](https://github.com/rabbitmq/rabbitmq-server/pull/9034) - * **Potential incompatibility**: `mqtt.subscription_ttl` configuration setting is now deprecated in favor of + * **Potential incompatibility**: `mqtt.subscription_ttl` configuration was replaced with `mqtt.max_session_expiry_interval_seconds` that targets MQTTv5. GitHub issue: [#8846](https://github.com/rabbitmq/rabbitmq-server/pull/8846) @@ -195,17 +497,55 @@ This release includes all bug fixes shipped in the `3.12.x` series. #### Bug Fixes - * During AMQP 1.0 to AMQP 0-9-1 conversion, the Correlation ID message property is now stored as `x-correlation-id` (instead of `x-correlation`) for values longer than 255 bytes. + * During AMQP 1.0 to AMQP 0-9-1 conversion, the Correlation ID message property is now stored as `x-correlation-id` (instead of `x-correlation`) for values longer than 255 bytes. **This is a potentially breaking change**. GitHub issue: [#8680](https://github.com/rabbitmq/rabbitmq-server/pull/8680) + * AMQP 1.0 connections are now throttled when the node hits a [resource alarm](https://www.rabbitmq.com/docs/alarms). + + GitHub issue: [#9953](https://github.com/rabbitmq/rabbitmq-server/pull/9953) + + +### OAuth 2 AuthN and AuthZ Backend Plugin + +#### Enhancements + + * RabbitMQ nodes now allow for multiple OAuth 2 resources to be configured. Each resource can use + a different identity provider (for example, one can be powered by Keycloak and another by Azure Active Directory). + + This allows for identity provider infrastructure changes (say, provider A is replaced with provider B over time) + that do not affect RabbitMQ's ability to authenticate clients and authorize the operations they attempt to perform. + + GitHub issue: [#10012](https://github.com/rabbitmq/rabbitmq-server/pull/10012) + + * The plugin now performs discovery of certain properties for OpenID-compliant identity providers. + + GitHub issue: [#10012](https://github.com/rabbitmq/rabbitmq-server/pull/10012) + + +### Peer Discovery AWS Plugin + +#### Enhancements + + * It is now possible to override how node's hostname is extracted from AWS API responses during peer discovery. + + This is done using `cluster_formation.aws.hostname_path`, a collection of keys that will be used to + traverse the response and extract nested values from it. The list is comma-separated. + + The default value is a single value list, `privateDnsName`. + + Contributed by @illotum (AWS). + + GitHub issue: [#10097](https://github.com/rabbitmq/rabbitmq-server/pull/10097) + ### Dependency Changes - * `ra` was upgraded to [`2.6.3`](https://github.com/rabbitmq/ra/releases) - * `osiris` was upgraded to [`1.6.2`](https://github.com/rabbitmq/osiris/tags) + * `ra` was upgraded to [`2.9.1`](https://github.com/rabbitmq/ra/releases) + * `osiris` was updated to [`1.7.2`](https://github.com/rabbitmq/osiris/releases) + * `cowboy` was updated to [`2.11.0`](https://ninenines.eu/docs/en/cowboy/2.11/guide/migrating_from_2.10/) ## Source Code Archives diff --git a/release-notes/3.13.1.md b/release-notes/3.13.1.md new file mode 100644 index 000000000000..3611a35a68ec --- /dev/null +++ b/release-notes/3.13.1.md @@ -0,0 +1,163 @@ +## RabbitMQ 3.13.1 + +RabbitMQ `3.13.1` is a maintenance release in the `3.13.x` [release series](https://www.rabbitmq.com/release-information). + +Starting June 1st, 2024, community support for this series will only be provided to [regularly contributing users](https://github.com/rabbitmq/rabbitmq-server/blob/main/COMMUNITY_SUPPORT.md) and those +who hold a valid [commercial support license](https://tanzu.vmware.com/rabbitmq/oss). + +Please refer to the upgrade section from the [3.13.0 release notes](https://github.com/rabbitmq/rabbitmq-server/releases/tag/v3.13.0) +if upgrading from a version prior to 3.13.0. + +This release requires Erlang 26 and supports Erlang versions up to `26.2.x`. +[RabbitMQ and Erlang/OTP Compatibility Matrix](https://www.rabbitmq.com/docs/which-erlang) has more details on +Erlang version requirements for RabbitMQ. + + +### Minimum Supported Erlang Version + +As of 3.13.0, RabbitMQ requires Erlang 26. Nodes **will fail to start** on older Erlang releases. + +Users upgrading from 3.12.x (or older releases) on Erlang 25 to 3.13.x on Erlang 26 +(both RabbitMQ *and* Erlang are upgraded at the same time) **must** consult +the [v3.12.0 release notes](https://github.com/rabbitmq/rabbitmq-server/releases/tag/v3.12.0) and [v3.13.0 release notes](https://github.com/rabbitmq/rabbitmq-server/releases/tag/v3.13.0) first. + + +## Changes Worth Mentioning + +Release notes can be found on GitHub at [rabbitmq-server/release-notes](https://github.com/rabbitmq/rabbitmq-server/tree/v3.13.x/release-notes). + + +### Core Broker + +#### Bug Fixes + + * Classic queue v2 message store compaction could fail behind under high enough load, + significantly increasing node's disk space footprint. + + GitHub issues: [#10696](https://github.com/rabbitmq/rabbitmq-server/pull/10696), [#10681](https://github.com/rabbitmq/rabbitmq-server/discussions/10681) + + * Improved quorum queue safety in mixed version clusters. + + GitHub issue: [#10664](https://github.com/rabbitmq/rabbitmq-server/pull/10664) + + * When Khepri was enabled and virtual host recovery failed, subsequent recovery + attempts also failed. + + GitHub issue: [#10742](https://github.com/rabbitmq/rabbitmq-server/pull/10742) + + * Messages published without any headers set on them did not have a header property + set on them. This change compared to 3.12.x was not intentional. + + GitHub issues: [#10623](https://github.com/rabbitmq/rabbitmq-server/pull/10623), [#10620](https://github.com/rabbitmq/rabbitmq-server/discussions/10620) + + * Free disk space monitor on Windows ran into an exception if external call + to `win32sysinfo.exe` timed out. + + GitHub issue: [#10597](https://github.com/rabbitmq/rabbitmq-server/issues/10597) + +#### Enhancements + + * `channel_max_per_node` is a new per-node limit that allows to put a cap on the number + of AMQP 0-9-1 channels that can be concurrently open by all clients connected to a node: + + ``` ini + # rabbitmq.conf + channel_max_per_node = 5000 + ``` + + This is a guardrail mean to protect nodes from [application-level channel leaks](https://www.rabbitmq.com/docs/channels#channel-leaks). + + Contributed by @illotum (AWS). + + GitHub issue: [#10754](https://github.com/rabbitmq/rabbitmq-server/pull/10754) + + +### Stream Plugin + +#### Bug Fixes + + * Avoids a Windows-specific stream log corruption that affected some deployments. + + GitHub issue: [#10822](https://github.com/rabbitmq/rabbitmq-server/pull/10822) + + * When a [super stream](https://www.rabbitmq.com/blog/2022/07/13/rabbitmq-3-11-feature-preview-super-streams) cannot be created because of a duplicate partition name, + a more informative error message is now used. + + GitHub issue: [#10535](https://github.com/rabbitmq/rabbitmq-server/issues/10535) + + +### CLI Tools + +#### Bug Fixes + + * `rabbitmq-plugins list --formatter=json --silent` will no longer emit any warnings + when some of the plugins in the [enabled plugins file](https://www.rabbitmq.com/docs/plugins#enabled-plugins-file) are missing. + + Contributed by @Ayanda-D. + + GitHub issue: [#10870](https://github.com/rabbitmq/rabbitmq-server/pull/10870) + + +### OAuth 2 Plugin + +#### Bug Fixes + + * Configuring a JWKS URL without specifying a CA certificate resulted + in an exception with Erlang 26's TLS implementation. + + GitHub issue: [#8547](https://github.com/rabbitmq/rabbitmq-server/issues/8547) + + +### Management Plugin + +#### Bug Fixes + + * Set default `sort` query parameter value for better compatibility with an external + Prometheus scraper. Note that the [built-in Prometheus plugin](https://www.rabbitmq.com/docs/prometheus) + is the recommended way of [monitoring](https://www.rabbitmq.com/docs/monitoring) RabbitMQ using Prometheus-compatible tools. + + GitHub issue: [#10610](https://github.com/rabbitmq/rabbitmq-server/pull/10610) + + * When a tab (Connections, Queues and Streams, etc) is switched, a table configuration pane + from the previously selected tab is now hidden. + + Contributed by @ackepenek. + + GitHub issue: [#10799](https://github.com/rabbitmq/rabbitmq-server/pull/10799) + +#### Enhancements + + * `GET /api/queues/{vhost}/{name}` now supports `enable_queue_totals` as well as `disable_stats`. + This combination of query parameters can be used to retrieve message counters while + greatly reducing the number of metrics returned by the endpoints. + + Contributed by @aaron-seo (AWS). + + GitHub issue: [#10839](https://github.com/rabbitmq/rabbitmq-server/pull/10839) + + +### Federation Plugin + +#### Enhancements + + * Exchange federation now can be configured to use a custom queue type for their internal buffers. + + To use a quorum queue, set the `queue-type` federation policy key to `quorum`. + + GitHub issues: [#4683](https://github.com/rabbitmq/rabbitmq-server/issues/4683), [#10663](https://github.com/rabbitmq/rabbitmq-server/pull/10663) + + * `rabbitmq_federation_running_link_count` is a new metric provided via Prometheus. + + GitHub issue: [#10345](https://github.com/rabbitmq/rabbitmq-server/issues/10345) + + +### Dependency Changes + + * `osiris` was updated to [`1.8.1`](https://github.com/rabbitmq/osiris/releases) + * `khepri` was upgraded to [`0.13.0`](https://github.com/rabbitmq/khepri/releases) + * `cowboy` was updated to [`2.12.0`](https://ninenines.eu/docs/en/cowboy/2.12/guide/migrating_from_2.11/) + +## Source Code Archives + +To obtain source code of the entire distribution, please download the archive named `rabbitmq-server-3.13.1.tar.xz` +instead of the source tarball produced by GitHub. diff --git a/release-notes/3.13.2.md b/release-notes/3.13.2.md new file mode 100644 index 000000000000..4afe71b30bce --- /dev/null +++ b/release-notes/3.13.2.md @@ -0,0 +1,186 @@ +## RabbitMQ 3.13.2 + +RabbitMQ `3.13.2` is a maintenance release in the `3.13.x` [release series](https://www.rabbitmq.com/release-information). + +Starting June 1st, 2024, community support for this series will only be provided to [regularly contributing users](https://github.com/rabbitmq/rabbitmq-server/blob/main/COMMUNITY_SUPPORT.md) and those +who hold a valid [commercial support license](https://tanzu.vmware.com/rabbitmq/oss). + +Please refer to the upgrade section from the [3.13.0 release notes](https://github.com/rabbitmq/rabbitmq-server/releases/tag/v3.13.0) +if upgrading from a version prior to 3.13.0. + +This release requires Erlang 26 and supports Erlang versions up to `26.2.x`. +[RabbitMQ and Erlang/OTP Compatibility Matrix](https://www.rabbitmq.com/docs/which-erlang) has more details on +Erlang version requirements for RabbitMQ. + + +### Minimum Supported Erlang Version + +As of 3.13.0, RabbitMQ requires Erlang 26. Nodes **will fail to start** on older Erlang releases. + +Users upgrading from 3.12.x (or older releases) on Erlang 25 to 3.13.x on Erlang 26 +(both RabbitMQ *and* Erlang are upgraded at the same time) **must** consult +the [v3.12.0 release notes](https://github.com/rabbitmq/rabbitmq-server/releases/tag/v3.12.0) and [v3.13.0 release notes](https://github.com/rabbitmq/rabbitmq-server/releases/tag/v3.13.0) first. + + +## Changes Worth Mentioning + +Release notes can be found on GitHub at [rabbitmq-server/release-notes](https://github.com/rabbitmq/rabbitmq-server/tree/v3.13.x/release-notes). + + +### Core Broker + +#### Bug Fixes + + * Several Quorum queues WAL and segment file operations are now more resilient to certain filesystem operation failures. + + GitHub issue: [#11113](https://github.com/rabbitmq/rabbitmq-server/pull/11113) + + * Classic queues v2 could run into an exception after a node restart. + + GitHub issue: [#11111](https://github.com/rabbitmq/rabbitmq-server/pull/11111) + + * Peer discovery failed in at some IPv6-only environments. This behavior was new in 3.13.x. + + GitHub issue: [#10728](https://github.com/rabbitmq/rabbitmq-server/issues/10728) + + * `rabbitmqctl stop_app` is now faster, in particular for nodes that are not under significant load. + + GitHub issue: [#11075](https://github.com/rabbitmq/rabbitmq-server/pull/11075) + + * `x-death` counter was not incremented for messages that expired due to [message TTL](). + This behavior was new in 3.13.x. + + GitHub issue: [#10709](https://github.com/rabbitmq/rabbitmq-server/issues/10709) + + * Quorum queue replica removal now more resilient in clusters under close to peak load, + a condition that can trigger timeouts for certain operations involving multiple nodes. + + GitHub issue: [#11065](https://github.com/rabbitmq/rabbitmq-server/pull/11065) + + * `rabbitmq-server` (the shell script) now propagetes the exit code from the runtime process. + + Contributed by @giner. + + GitHub issue: [#10819](https://github.com/rabbitmq/rabbitmq-server/pull/10819) + +#### Enhancements + + * [Definition import](https://www.rabbitmq.com/docs/definitions) did not handle a scenario where some virtual hosts did not have + the default queue type metadata key set. + + GitHub issue: [#10897](https://github.com/rabbitmq/rabbitmq-server/pull/10897) + + * When a virtual host is deleted, several more [internal events](https://www.rabbitmq.com/docs/logging#internal-events) are emitted: for example, + the events related to removal of user permissions and runtime parameters associated + with the virtual host. + + GitHub issue: [#11077](https://github.com/rabbitmq/rabbitmq-server/pull/11077) + + +### CLI Tools + +#### Bug Fixes + + * `rabbitmqctl list_unresponsive_queues` now supports the (queue) `type` column. + + Contributed by @aaron-seo. + + GitHub issue: [#11081](https://github.com/rabbitmq/rabbitmq-server/pull/11081) + + +### MQTT Plugin + +#### Bug Fixes + + * MQTT clients that did not configure a will (message) delay interval could run into + an exception due to an unnecessary permission check on the will target. + + GitHub issue: [#11024](https://github.com/rabbitmq/rabbitmq-server/pull/11024) + + * Messages published by MQTT clients were missing the `timestamp_in_ms` (the more precise header). + This behavior was new in 3.13.x. + + GitHub issue: [#10925](https://github.com/rabbitmq/rabbitmq-server/pull/10925) + + * Messages published using QoS 0 were unintentionally marked as durable internally. + + GitHub issue: [#11012](https://github.com/rabbitmq/rabbitmq-server/pull/11012) + + +### Management Plugin + +#### Bug Fixes + + * `GET /api/queues/{vhost}/{name}` could return duplicate keys for quorum queues. + + GitHub issue: [#10929](https://github.com/rabbitmq/rabbitmq-server/issues/10929) + + * Several endpoints responded with a 500 instead of a 404 when target virtual host + was non-existent. + + Partially contributed by @LoisSotoLopez. + + GitHub issue: [#10901](https://github.com/rabbitmq/rabbitmq-server/issues/10901) + + +### OAuth 2 AuthN/AuthZ Plugin + +#### Enhancements + + * The [OpenID Connect RP-Initiated Logout](https://openid.net/specs/openid-connect-rpinitiated-1_0.html) feature is now only used if the identity provider service + lists it as supported. + + GitHub issue: [#11067](https://github.com/rabbitmq/rabbitmq-server/issues/11067) + + +### Kubernetes Peer Discovery Plugin + +#### Enhancements + + * More [TLS client settings](https://www.rabbitmq.com/docs/ssl) now can be configured: + + ``` ini + cluster_formation.k8s.tls.cacertfile = /path/to/kubernetes/api/ca/certificate.pem + cluster_formation.k8s.tls.certfile = /path/to/client/tls/certificate.pem + cluster_formation.k8s.tls.keyfile = /path/to/client/tls/private_key.pem + + cluster_formation.k8s.tls.verify = verify_peer + cluster_formation.k8s.tls.fail_if_no_peer_cert = true + + ``` + + GitHub issue: [#10916](https://github.com/rabbitmq/rabbitmq-server/pull/10916) + + +### JMS Topic Exchange Plugin + +#### Enhancements + + * The plugin now stores its state on multiple nodes. + + GitHub issue: [#11091](https://github.com/rabbitmq/rabbitmq-server/pull/11091) + + +### Shovel Plugin + +#### Bug Fixes + + * Shovel metrics and internal state are now deleted when their shovel is, regardless of what node + it was hosted on and what node was targeted by the deleting (CLI or HTTP API) operation. + + GitHub issue: [#11101](https://github.com/rabbitmq/rabbitmq-server/pull/11101) + + * `rabbitmqctl list_shovels` CLI command now will list shovels running on all cluster nodes + and not just the target node. + + GitHub issue: [#11119](https://github.com/rabbitmq/rabbitmq-server/pull/11119) + + +### Dependency Changes + + * `ra` was updated to [`2.10.0`](https://github.com/rabbitmq/ra/releases) + +## Source Code Archives + +To obtain source code of the entire distribution, please download the archive named `rabbitmq-server-3.13.2.tar.xz` +instead of the source tarball produced by GitHub. diff --git a/release-notes/3.13.3.md b/release-notes/3.13.3.md new file mode 100644 index 000000000000..d3ef85d7fd8d --- /dev/null +++ b/release-notes/3.13.3.md @@ -0,0 +1,167 @@ +## RabbitMQ 3.13.3 + +RabbitMQ `3.13.3` is a maintenance release in the `3.13.x` [release series](https://www.rabbitmq.com/release-information). + +**Please skip this release and upgrade straight to `3.13.6`** or a later version (if available). + +Starting June 1st, 2024, community support for this series will only be provided to [regularly contributing users](https://github.com/rabbitmq/rabbitmq-server/blob/main/COMMUNITY_SUPPORT.md) and those +who hold a valid [commercial support license](https://tanzu.vmware.com/rabbitmq/oss). + +Please refer to the upgrade section from the [3.13.0 release notes](https://github.com/rabbitmq/rabbitmq-server/releases/tag/v3.13.0) +if upgrading from a version prior to 3.13.0. + +This release requires Erlang 26 and supports Erlang versions up to `26.2.x`. +[RabbitMQ and Erlang/OTP Compatibility Matrix](https://www.rabbitmq.com/docs/which-erlang) has more details on +Erlang version requirements for RabbitMQ. + + +### Minimum Supported Erlang Version + +As of 3.13.0, RabbitMQ requires Erlang 26. Nodes **will fail to start** on older Erlang releases. + +Users upgrading from 3.12.x (or older releases) on Erlang 25 to 3.13.x on Erlang 26 +(both RabbitMQ *and* Erlang are upgraded at the same time) **must** consult +the [v3.12.0 release notes](https://github.com/rabbitmq/rabbitmq-server/releases/tag/v3.12.0) and [v3.13.0 release notes](https://github.com/rabbitmq/rabbitmq-server/releases/tag/v3.13.0) first. + + +## Changes Worth Mentioning + +Release notes can be found on GitHub at [rabbitmq-server/release-notes](https://github.com/rabbitmq/rabbitmq-server/tree/v3.13.x/release-notes). + + +### Core Broker + +#### Bug Fixes + + * Fixes an exception in classic queue message store that produced large scary looking log entries. + No data was lost as a result of the exception but clients could run into a channel error. + + GitHub issue: [#11292](https://github.com/rabbitmq/rabbitmq-server/pull/11292) + + * Corrected several 3.13-specific issues related to how the `x-death` headers are populated during [dead lettering](https://www.rabbitmq.com/docs/dlx). + + GitHub issues: [#11160](https://github.com/rabbitmq/rabbitmq-server/issues/11160), [#11159](https://github.com/rabbitmq/rabbitmq-server/issues/11159), [#11174](https://github.com/rabbitmq/rabbitmq-server/pull/11174), [#11339](https://github.com/rabbitmq/rabbitmq-server/pull/11339), [#10709](https://github.com/rabbitmq/rabbitmq-server/issues/10709), [#11331](https://github.com/rabbitmq/rabbitmq-server/issues/11331) + + * Per-virtual host queue (and stream) limit is now enforced for AMQP 1.0, MQTT, RabbitMQ Stream Protocol and STOMP as well as AMQP 0-9-1. + + Contributed by @SimonUnge. + + GitHub issue: [#11293](https://github.com/rabbitmq/rabbitmq-server/pull/11293) + + * Periodic replica reconciliation of quorum queues now reacts to node shutdown in cluster where Khepri is enabled. + + Contributed by @SimonUnge. + + GitHub issue: [#11134](https://github.com/rabbitmq/rabbitmq-server/pull/11134) + + * Declaration of an exchange of a non-existent type will now report a more suitable "precondition failed" + error to the client. + + Contributed by @carlhoerberg. + + GitHub issue: [#11215](https://github.com/rabbitmq/rabbitmq-server/pull/11215) + + * Avoids a scary looking log message during node shutdown in certain plugin configurations. + + GitHub issue: [#11323](https://github.com/rabbitmq/rabbitmq-server/pull/11323) + +#### Enhancements + + * `x-death` headers used to provide metadata about [dead-lettering](https://www.rabbitmq.com/docs/dlx) are now included + for messages consumed from a stream. + + GitHub issue: [#11173](https://github.com/rabbitmq/rabbitmq-server/issues/11173) + + * Classic queue message store recovery was optimized (peak memory footprint-wise) for cases where large (multiple MiB in size) messages + were routed to multiple queues. + + Contributed by @gomoripeti. + + GitHub issue: [#11072](https://github.com/rabbitmq/rabbitmq-server/issues/11072) + + * Besides the previously existing option of configuring default queue type per virtual host, + there is now a "global" per node default that can be set via `rabbitmq.conf`: + + ``` ini + # Changes default queue type for all clients connected to the configured node + # to quorum queues. + # This is just an example, not all queues should be quorum queues. + # See https://www.rabbitmq.com/docs/quorum-queues to learn more. + default_queue_type = quorum + ``` + + Contributed by @SimonUnge. + + GitHub issue: [#11163](https://github.com/rabbitmq/rabbitmq-server/pull/11163) + + * When a virtual host process stops, fails or is restarted, a clear message will now be logged. + + GitHub issue: [#11276](https://github.com/rabbitmq/rabbitmq-server/pull/11276) + + + +### CLI Tools + +#### Bug Fixes + + * `rabbitmq-plugins list` incorrectly marked disabled plugins as "pending an upgrade". + + Partially contributed by @gomoripeti. + + GitHub issue: [#11198](https://github.com/rabbitmq/rabbitmq-server/pull/11198) + + * `rabbitmqctl check_if_any_deprecated_features_are_used` could run into an exception. + + Partially contributed by @metron2. + + GitHub issue: [#11194](https://github.com/rabbitmq/rabbitmq-server/pull/11194) + + +### Prometheus Plugin + +#### Enhancements + + * A new Prometheus-exposed metric, `rabbit_stream_segments`, indicates how many stream segment files + there are on the target node. + + Contributed by @markus812498. + + GitHub issue: [#11325](https://github.com/rabbitmq/rabbitmq-server/pull/11325) + + +### Management Plugin + +#### Bug Fixes + + * After signing out of management UI, the page was not refreshed to reflect updated login (session) status. + + GitHub issue: [#11224](https://github.com/rabbitmq/rabbitmq-server/issues/11224) + + +### Shovel Management Plugin + +#### Bug Fixes + + * `rabbitmqctl delete_shovel` is now more effective at deleting Shovels that + cannot start (for example, because they cannot connect using the configured URIs) and + repeatedly fail, get restarted, fail again, get restarted, and so on. + + GitHub issue: [#11324](https://github.com/rabbitmq/rabbitmq-server/pull/11324) + + * `fail_if_no_peer_cert`, a server-side TLS setting, was removed from Shovel URI examples. + Erlang 26's TLS implementation will refuse to accept it as a client-side setting whereas + previously it was quietly ignored. + + Contributed by @womblep. + + GitHub issue: [#11318](https://github.com/rabbitmq/rabbitmq-server/pull/11318) + + +### Dependency Changes + +None in this release. + +## Source Code Archives + +To obtain source code of the entire distribution, please download the archive named `rabbitmq-server-3.13.3.tar.xz` +instead of the source tarball produced by GitHub. diff --git a/release-notes/3.13.4.md b/release-notes/3.13.4.md new file mode 100644 index 000000000000..cfce6641377a --- /dev/null +++ b/release-notes/3.13.4.md @@ -0,0 +1,214 @@ +## RabbitMQ 3.13.4 + +RabbitMQ `3.13.4` is a maintenance release in the `3.13.x` [release series](https://www.rabbitmq.com/release-information). + +**Please skip this release and upgrade straight to `3.13.6`** or a later version (if available). + +Starting June 1st, 2024, community support for this series will only be provided to [regularly contributing users](https://github.com/rabbitmq/rabbitmq-server/blob/main/COMMUNITY_SUPPORT.md) and those +who hold a valid [commercial support license](https://tanzu.vmware.com/rabbitmq/oss). + +Please refer to the upgrade section from the [3.13.0 release notes](https://github.com/rabbitmq/rabbitmq-server/releases/tag/v3.13.0) +if upgrading from a version prior to 3.13.0. + +This release requires Erlang 26 and supports Erlang versions up to `26.2.x`. +[RabbitMQ and Erlang/OTP Compatibility Matrix](https://www.rabbitmq.com/docs/which-erlang) has more details on +Erlang version requirements for RabbitMQ. + + +### Minimum Supported Erlang Version + +As of 3.13.0, RabbitMQ requires Erlang 26. Nodes **will fail to start** on older Erlang releases. + +Users upgrading from 3.12.x (or older releases) on Erlang 25 to 3.13.x on Erlang 26 +(both RabbitMQ *and* Erlang are upgraded at the same time) **must** consult +the [v3.12.0 release notes](https://github.com/rabbitmq/rabbitmq-server/releases/tag/v3.12.0) and [v3.13.0 release notes](https://github.com/rabbitmq/rabbitmq-server/releases/tag/v3.13.0) first. + + +## Changes Worth Mentioning + +Release notes can be found on GitHub at [rabbitmq-server/release-notes](https://github.com/rabbitmq/rabbitmq-server/tree/v3.13.x/release-notes). + + +### Core Broker + +#### Bug Fixes + + * A rolling upgrade from `3.12.14` to `3.13.x` could run into an exception. + + GitHub issue: [#11380](https://github.com/rabbitmq/rabbitmq-server/issues/11380) + + * When an existing virtual host was re-imported from a definitions file, + its default queue type (DQT) was cleared (reset) if that field was missing in the imported + definitions. + + Now the existing DQT is preserved. + + GitHub issue: [#11457](https://github.com/rabbitmq/rabbitmq-server/pull/11457) + + * When a queue was declared without an explicitly provided `x-queue-type` but a default + queue type (DQT) set (for its virtual host), its redeclaration did not consider + the DQT during the [property equivalence check](https://www.rabbitmq.com/docs/queues#property-equivalence) stage. + + GitHub issue: [#11541](https://github.com/rabbitmq/rabbitmq-server/pull/11541) + + * Feature flag controller could run into a deadlock in some upgrade scenarios. + + GitHub issue: [#11414](https://github.com/rabbitmq/rabbitmq-server/pull/11414) + + * In mixed `3.13.x` and `3.12.x` clusters, when a [Direct Reply-to](https://www.rabbitmq.com/docs/direct-reply-to) client (the app that initiates requests) + was connected to the `3.13` node and the server (the app that responds) was connected to the `3.12` node, + the response was lost due to a message format conversion exception. + + GitHub issue: [#11401](https://github.com/rabbitmq/rabbitmq-server/pull/11401) + +#### Enhancements + + * In some parallel cluster formation scenarios where definitions were [imported on node boot](https://www.rabbitmq.com/docs/definitions#import-on-boot), + the virtual hosts created by the import can only be started on a subset of nodes. This is so + because not all cluster peers are known at virtual host creation time. + + To reconcile (repair) this state, nodes will periodically check that all virtual hosts are initialized + on all cluster nodes. This happens every thirty seconds for the first five minutes + since node boot. As long as the cluster is fully formed within that amount of time, + all nodes will have performed initialization for all virtual hosts that exist. + + GitHub issue: [#11408](https://github.com/rabbitmq/rabbitmq-server/pull/11408) + + * Quorum queue leader replicas now initiate reconciliation (repair) of their + replicas, if there are any missing, more frequently, making quorum queues + more defensive in the case of (**highly discouraged**) [grow-then-shrink upgrades](https://www.rabbitmq.com/docs/upgrade#grow-then-shrink). + + As part of this change, the CPU cost of reconciliation was reduced, now accounting + for less than 1% of the CPU with 10K quorum queues in some test environments. + + Contributed by @SimonUnge. + + GitHub issue: [#11029](https://github.com/rabbitmq/rabbitmq-server/discussions/11029) + + * In the case where the `vhost_max` node limit is reached, the node will log specific errors + when a new virtual host is (unsuccessfully) added. + + Contributed by @SimonUnge. + + GitHub issue: [#11589](https://github.com/rabbitmq/rabbitmq-server/pull/11589) + + * Elapsed time in the logs is now measured using [monotonic time](https://www.erlang.org/doc/apps/erts/time_correction.html). + + GitHub issue: [#11396](https://github.com/rabbitmq/rabbitmq-server/pull/11396) + + +### CLI Tools + +#### Bug Fixes + + * `rabbitmq-diagnostics check_if_node_is_quorum_critical` could report a false positive + when some quorum queue replcas where very recently added or very recently restarted. + + GitHub issue: [#11524](https://github.com/rabbitmq/rabbitmq-server/pull/11524) + + * `rabbitmqctl list_unresponsive_queues` ran into an exception if there were connected MQTT clients + with QoS 0 subscriptions. + + Partially contributed by @gomoripeti. + + GitHub issue: [#11434](https://github.com/rabbitmq/rabbitmq-server/issues/11434) + +#### Enhancements + + * CLI tools now can be built with Elixir 1.17.x. + + Contributed by @VlkrS. + + GitHub issue: [#11529](https://github.com/rabbitmq/rabbitmq-server/pull/11529) + + +### OAuth 2 Plugin + +#### Enhancements + + * OpenID Connect discovery endpoint now can be overridden for identity providers with + non-standard configurations. + + GitHub issue: [#11103](https://github.com/rabbitmq/rabbitmq-server/issues/11103) + + +### Management Plugin + +#### Bug Fixes + + * Virtual host metadata was not included into definition files exported via the HTTP API. + + GitHub issue: [#10515](https://github.com/rabbitmq/rabbitmq-server/issues/10515) + + * When Khepri was enabled and a majority of cluster members were down, adding a virtual host + failed with an unhelpful exception. + + GitHub issue: [#11590](https://github.com/rabbitmq/rabbitmq-server/pull/11590) + +#### Enhancements + + * When default queue type is set on a virtual host but not for individual queues, + the exported queues will have `x-queue-type` set to the default type in the + exported definitions document. + + GitHub issue: [#10515](https://github.com/rabbitmq/rabbitmq-server/issues/10515) + + * Management UI will now display the number of cores available to the node. + + GitHub issue: [#11382](https://github.com/rabbitmq/rabbitmq-server/pull/11382) + + * OAuth 2-specific JavaScript files are now only loaded if the OAuth 2 plugin is enabled + on the node. + + GitHub issue: [#11421](https://github.com/rabbitmq/rabbitmq-server/issues/11421) + + +### HTTP AuthN/AuthZ Backend Plugin + +#### Enhancements + + * TLS-related settings, in particular related to [peer certificate chain verification](https://www.rabbitmq.com/docs/ssl#peer-verification), now can be + configured for this plugin: + + ``` ini + auth_http.ssl_options.verify = verify_none + auth_http.ssl_options.fail_if_no_peer_cert = false + ``` + + Please remember that disabling peer certificate chain verification makes the system + less secure and susceptible to [Man-in-the-Middle attacks](https://en.wikipedia.org/wiki/Man-in-the-middle_attack). + Consider enabling the verification in production systems when possible. + + GitHub issue: [#10281](https://github.com/rabbitmq/rabbitmq-server/issues/10281) + + +### etcd Peer Discovery Plugin + +#### Bug Fixes + + * The plugin failed to extract discovered nodes name correctly in earlier `3.13.x` + releases. + + GitHub issue: [#11445](https://github.com/rabbitmq/rabbitmq-server/pull/11445) + + +### Tracing Plugin + +#### Enhancements + + * `tracing.dir`, `tracing.username` and `tracing.password` are the three Tracing plugin + settings that can be set via `rabbitmq.conf`. + + GitHub issue: [#11554](https://github.com/rabbitmq/rabbitmq-server/issues/11554) + + +### Dependency Changes + + * Ra was [upgraded to `2.11.0`](https://github.com/rabbitmq/ra/releases) + * Osiris was [upgraded to `1.8.2`](https://github.com/rabbitmq/osiris/releases) + * Jose was [upgraded to `1.11.10`](https://github.com/potatosalad/erlang-jose/releases) + +## Source Code Archives + +To obtain source code of the entire distribution, please download the archive named `rabbitmq-server-3.13.4.tar.xz` +instead of the source tarball produced by GitHub. diff --git a/release-notes/3.13.5.md b/release-notes/3.13.5.md new file mode 100644 index 000000000000..8c2687cc2880 --- /dev/null +++ b/release-notes/3.13.5.md @@ -0,0 +1,122 @@ +## RabbitMQ 3.13.5 + +RabbitMQ `3.13.5` is a maintenance release in the `3.13.x` [release series](https://www.rabbitmq.com/release-information). + +**Please skip this release and upgrade straight to `3.13.6`** or a later version (if available). + +Starting June 1st, 2024, community support for this series will only be provided to [regularly contributing users](https://github.com/rabbitmq/rabbitmq-server/blob/main/COMMUNITY_SUPPORT.md) and those +who hold a valid [commercial support license](https://tanzu.vmware.com/rabbitmq/oss). + +Please refer to the upgrade section from the [3.13.0 release notes](https://github.com/rabbitmq/rabbitmq-server/releases/tag/v3.13.0) +if upgrading from a version prior to 3.13.0. + +This release requires Erlang 26 and supports Erlang versions up to `26.2.x`. +[RabbitMQ and Erlang/OTP Compatibility Matrix](https://www.rabbitmq.com/docs/which-erlang) has more details on +Erlang version requirements for RabbitMQ. + + +### Minimum Supported Erlang Version + +As of 3.13.0, RabbitMQ requires Erlang 26. Nodes **will fail to start** on older Erlang releases. + +Users upgrading from 3.12.x (or older releases) on Erlang 25 to 3.13.x on Erlang 26 +(both RabbitMQ *and* Erlang are upgraded at the same time) **must** consult +the [v3.12.0 release notes](https://github.com/rabbitmq/rabbitmq-server/releases/tag/v3.12.0) and [v3.13.0 release notes](https://github.com/rabbitmq/rabbitmq-server/releases/tag/v3.13.0) first. + + +## Changes Worth Mentioning + +Release notes can be found on GitHub at [rabbitmq-server/release-notes](https://github.com/rabbitmq/rabbitmq-server/tree/v3.13.x/release-notes). + + +### Core Broker + +#### Bug Fixes + + * Quorum queue replicas could fail to recover in certain scenarios. + + GitHub issue: [#11769](https://github.com/rabbitmq/rabbitmq-server/pull/11769) + + * Safer AMQP 0-9-1 to AMQP 1.0 (the internal message format) conversion for longer string values. + + GitHub issue: [#11737](https://github.com/rabbitmq/rabbitmq-server/pull/11737) + + * When a message that contained an `x-deaths` [dead-lettering](https://www.rabbitmq.com/docs/dlx) header was republished "as is" by a client, + the `time` field in the dead lettering events was not correctly converted for AMQP 0-9-1 clients. + + GitHub issue: [#11608](https://github.com/rabbitmq/rabbitmq-server/pull/11608) + + * [Direct Reply-to](https://www.rabbitmq.com/docs/direct-reply-to) failed with an exception when firehose tracing was enabled. + + GitHub issue: [#11666](https://github.com/rabbitmq/rabbitmq-server/pull/11666) + + +### CLI Tools + +#### Bug Fixes + + * `rabbitmqctl export_definitions` failed if cluster contained custom federation upstream set definitions. + + GitHub issue: [#11612](https://github.com/rabbitmq/rabbitmq-server/issues/11612) + + +### MQTT Plugin + +#### Bug Fixes + + * An abrupt client TCP connection closure could result in a spike in that connection's memory footprint. + + GitHub issue: [#11683](https://github.com/rabbitmq/rabbitmq-server/pull/11683) + + +### Shovel Plugin + +#### Enhancements + + * Improved AMQP 1.0 to AMQP 0-9-1 conversion for shovels. + + Contributed by @luos. + + GitHub issue: [#10037](https://github.com/rabbitmq/rabbitmq-server/pull/10037) + + +### etcd Peer Discovery Plugin + +#### Bug Fixes + + * Nodes now register themselves before running peer discovery, reducing the probability of + first (usually) two nodes to boot potentially forming two initial clusters. + + GitHub issues: [#11647](https://github.com/rabbitmq/rabbitmq-server/pull/11647), [#11646](https://github.com/rabbitmq/rabbitmq-server/pull/11646) + + +### Consul Peer Discovery Plugin + +#### Bug Fixes + + * Nodes now register themselves before running peer discovery, reducing the probability of + first (usually) two nodes to boot potentially forming two initial clusters. + + GitHub issues: [#11647](https://github.com/rabbitmq/rabbitmq-server/pull/11647), [#11646](https://github.com/rabbitmq/rabbitmq-server/pull/11646) + + +### AWS Peer Discovery Plugin + +#### Enhancements + + * Forward compatibility: handle AWS API responses that use empty HTTP response bodies. + + Contributed by @SimonUnge. + + GitHub issue: [#11722](https://github.com/rabbitmq/rabbitmq-server/pull/11722) + + + +### Dependency Changes + + * Ra was [upgraded to `2.13.3`](https://github.com/rabbitmq/ra/releases) + +## Source Code Archives + +To obtain source code of the entire distribution, please download the archive named `rabbitmq-server-3.13.5.tar.xz` +instead of the source tarball produced by GitHub. diff --git a/release-notes/3.13.6.md b/release-notes/3.13.6.md new file mode 100644 index 000000000000..4f47a935bc7b --- /dev/null +++ b/release-notes/3.13.6.md @@ -0,0 +1,61 @@ +## RabbitMQ 3.13.6 + +RabbitMQ `3.13.6` is a maintenance release in the `3.13.x` [release series](https://www.rabbitmq.com/release-information). + +This upgrade is **highly recommended** to all users currently on earlier `3.13.x` series and +in particular between `3.13.3` and `3.13.5`, inclusive. + +Starting June 1st, 2024, community support for this series will only be provided to [regularly contributing users](https://github.com/rabbitmq/rabbitmq-server/blob/main/COMMUNITY_SUPPORT.md) and those +who hold a valid [commercial support license](https://tanzu.vmware.com/rabbitmq/oss). + +Please refer to the upgrade section from the [3.13.0 release notes](https://github.com/rabbitmq/rabbitmq-server/releases/tag/v3.13.0) +if upgrading from a version prior to 3.13.0. + +This release requires Erlang 26 and supports Erlang versions up to `26.2.x`. +[RabbitMQ and Erlang/OTP Compatibility Matrix](https://www.rabbitmq.com/docs/which-erlang) has more details on +Erlang version requirements for RabbitMQ. + + +### Minimum Supported Erlang Version + +As of 3.13.0, RabbitMQ requires Erlang 26. Nodes **will fail to start** on older Erlang releases. + +Users upgrading from 3.12.x (or older releases) on Erlang 25 to 3.13.x on Erlang 26 +(both RabbitMQ *and* Erlang are upgraded at the same time) **must** consult +the [v3.12.0 release notes](https://github.com/rabbitmq/rabbitmq-server/releases/tag/v3.12.0) and [v3.13.0 release notes](https://github.com/rabbitmq/rabbitmq-server/releases/tag/v3.13.0) first. + + +## Changes Worth Mentioning + +Release notes can be found on GitHub at [rabbitmq-server/release-notes](https://github.com/rabbitmq/rabbitmq-server/tree/v3.13.x/release-notes). + + +### Core Broker + +#### Bug Fixes + + * Quorum queue validation on startup was too strict and prevented upgrades from certain older versions from succeeding. + This validation has been reduced from an error to a warning. + + GitHub issue: [https://github.com/rabbitmq/rabbitmq-server/issues/11789](#11789), [#11794](https://github.com/rabbitmq/rabbitmq-server/pull/11794) + +#### Enhancements + + * Stream replication port range now can be configured via `rabbitmq.conf`: + + ``` + stream.replication.port_range.min = 4000 + stream.replication.port_range.max = 4600 + ``` + + GitHub issue: [#11774](https://github.com/rabbitmq/rabbitmq-server/pull/11774) + + +### Dependency Changes + + * Ra was [upgraded to `2.13.5`](https://github.com/rabbitmq/ra/releases) + +## Source Code Archives + +To obtain source code of the entire distribution, please download the archive named `rabbitmq-server-3.13.6.tar.xz` +instead of the source tarball produced by GitHub. diff --git a/release-notes/3.6.4.md b/release-notes/3.6.4.md index f2b3c86f78bd..772cfdbfa1e1 100644 --- a/release-notes/3.6.4.md +++ b/release-notes/3.6.4.md @@ -1,6 +1,6 @@ ## RabbitMQ 3.6.4 -`3.6.4` is a maintanence release. +`3.6.4` is a maintenance release. ### Server diff --git a/release-notes/3.8.10.md b/release-notes/3.8.10.md index 547eede86ced..29b1426b80f5 100644 --- a/release-notes/3.8.10.md +++ b/release-notes/3.8.10.md @@ -102,7 +102,7 @@ and [RabbitMQ community Slack](https://rabbitmq-slack.herokuapp.com/). * `rabbitmq-diagnostics check_if_node_is_quorum_critical` returned a false positive for a node [marked for maintenance](https://www.rabbitmq.com/upgrade.html#maintenance-mode). Given the refinement to the `rabbitmq-upgrade drain` command in [rabbitmq/rabbitmq-server#2474](https://github.com/rabbitmq/rabbitmq-server/issues/2474), `rabbitmq-diagnostics check_if_node_is_quorum_critical` now will unconditionally return a success - if target node is under maintanence. + if target node is under maintenance. GitHub issue: [rabbitmq/rabbitmq-server#2469](https://github.com/rabbitmq/rabbitmq-server/issues/2469) diff --git a/release-notes/3.8.3.md b/release-notes/3.8.3.md index b45cfc652201..32f24a49a49f 100644 --- a/release-notes/3.8.3.md +++ b/release-notes/3.8.3.md @@ -120,7 +120,7 @@ Any questions about this release, upgrades or RabbitMQ in general are welcome on GitHub issue: [rabbitmq/rabbitmq-server#2222](https://github.com/rabbitmq/rabbitmq-server/issues/2222) * Every cluster now features a persistent internal cluster ID that can be used by core features or plugins. - Unlike the human-readable cluster name, the value cannot be overriden by the user. + Unlike the human-readable cluster name, the value cannot be overridden by the user. GitHub issue: [rabbitmq/rabbitmq-server#2226](https://github.com/rabbitmq/rabbitmq-server/pull/2226) diff --git a/release-notes/4.0.0.md b/release-notes/4.0.0.md new file mode 100644 index 000000000000..ccb086831cc3 --- /dev/null +++ b/release-notes/4.0.0.md @@ -0,0 +1,332 @@ +## RabbitMQ 4.0.0-beta.5 + +RabbitMQ `4.0.0-beta.5` is a preview of a new major release. + +Starting June 1st, 2024, community support for this series will only be provided to [regularly contributing users](https://github.com/rabbitmq/rabbitmq-server/blob/main/COMMUNITY_SUPPORT.md) and those who hold a valid [commercial support license](https://tanzu.vmware.com/rabbitmq/oss). + +## Highlights + +Some key improvements in this release are listed below. + + * [Khepri](https://www.youtube.com/watch?v=whVqpgvep90), an [alternative schema data store](https://github.com/rabbitmq/rabbitmq-server/pull/7206) developed to replace Mnesia, has matured + * [AMQP 1.0 is now a core protocol](https://www.rabbitmq.com/blog/2024/08/05/native-amqp) that is always enabled. Its plugin is now a no-op that only exists to simplify upgrades. + * The AMQP 1.0 implementation is now significantly more efficient: its peak throughput is [more than double than that of 3.13.x](https://www.rabbitmq.com/blog/2024/08/21/amqp-benchmarks) + on some workloads + * Efficient sub-linear [quorum queue recovery on node startup using checkpoints](https://github.com/rabbitmq/rabbitmq-server/pull/10637) + * Quorum queues now [support priorities](https://github.com/rabbitmq/rabbitmq-server/pull/10637) (but not exactly the same way as classic queues) + * [AMQP 1.0 clients now can manage topologies](https://github.com/rabbitmq/rabbitmq-server/pull/10559) similarly to how AMQP 0-9-1 clients do it + * The AMQP 1.0 convention (address format) used for interacting with with AMQP 0-9-1 entities [is now easier to reason about](https://www.rabbitmq.com/docs/next/amqp#address) + * Mirroring (replication) of classic queues [was removed](https://github.com/rabbitmq/rabbitmq-server/pull/9815) after several years of deprecation. For replicated messaging data types, + use quorum queues and/or streams. Non-replicated classic queues remain and their development continues + * Classic queue [storage efficiency improvements](https://github.com/rabbitmq/rabbitmq-server/pull/11112), in particular recovery time and storage of multi-MiB messages + * Nodes with multiple enabled plugins and little on disk data to recover now [start up to 20-30% faster](https://github.com/rabbitmq/rabbitmq-server/pull/10989) + * New exchange type: [Local Random Exchange](https://rabbitmq.com/docs/next/local-random-exchange) + +See Compatibility Notes below to learn about **breaking or potentially breaking changes** in this release. + +## Breaking Changes and Compatibility Notes + +### Classic Queues is Now a Non-Replicated Queue Type + +After three years of deprecated, classic queue mirroring was completely removed in this version. +[Quorum queues](https://www.rabbitmq.com/docs/quorum-queues) and [streams](https://www.rabbitmq.com/docs/streams) are two mature +replicated data types offered by RabbitMQ 4.x. Classic queues continue being supported without any breaking changes +for client libraries and applications but they are now a non-replicated queue type. + +After an upgrade to 4.0, all classic queue mirroring-related parts of policies will have no effect. +Classic queues will continue to work like before but with only one replica. + +Clients will be able to connect to any node to publish to and consume from any non-replicated classic queues. +Therefore applications will be able to use the same classic queues as before. + +See [Mirrored Classic Queues Migration to Quorum Queues](https://www.rabbitmq.com/docs/migrate-mcq-to-qq) for guidance +on how to migrate to quorum queues for the parts of the system that really need to use replication. + +### Quorum Queues Now Have a Default Redelivery Limit + +Quorum queues now have a default [redelivery limit](https://www.rabbitmq.com/docs/next/quorum-queues#poison-message-handling) set to `20`. + +### CQv1 Storage Implementation was Removed + +CQv1, [the original classic queue storage layer, was removed](https://github.com/rabbitmq/rabbitmq-server/pull/10656) +except for the part that's necessary for upgrades to CQv2 (the 2nd generation). + +In case `rabbitmq.conf` explicitly sets `classic_queue.default_version` to `1` like so + +``` ini +# this configuration value is no longer supported, +# remove this line or set the version to 2 +classic_queue.default_version = 1 +``` + +nodes will now fail to start. Removing the line will make the node start and perform +the migration from CQv1 to CQv2. + +### Settings `cluster_formation.randomized_startup_delay_range.*` were Removed + +The following two deprecated `rabbitmq.conf` settings were [removed](https://github.com/rabbitmq/rabbitmq-server/pull/12050): +``` +cluster_formation.randomized_startup_delay_range.min +cluster_formation.randomized_startup_delay_range.max +``` +RabbitMQ 4.0 will fail to boot if these settings are configured in `rabbitmq.conf`. + +### Several Disk I/O-Related Metrics were Removed + +Several I/O-related metrics are dropped, they should be [monitored at the infrastructure and kernel layers](https://www.rabbitmq.com/docs/monitoring#system-metrics) + +### Default Maximum Message Size Reduced to 16 MiB + +Default maximum message size is reduced to 16 MiB (from 128 MiB). + +The limit can be increased via a `rabbitmq.conf` setting: + +```ini +# 32 MiB +max_message_size = 33554432 +``` + +However, it is recommended that such large multi-MiB messages are put into a blob store, and their +IDs are passed around in messages instead of the entire payload. + +### AMQP 1.0 + +RabbitMQ 3.13 `rabbitmq.conf` setting `rabbitmq_amqp1_0.default_vhost` is unsupported in RabbitMQ 4.0. + +Instead `default_vhost` will be used to determine the default vhost an AMQP 1.0 client connects to(i.e. when the AMQP 1.0 client does not define the vhost in the `hostname` field of the `open` frame). + +### MQTT + +RabbitMQ 3.13 [rabbitmq.conf](https://www.rabbitmq.com/docs/configure#config-file) settings `mqtt.default_user`, `mqtt.default_password`, +and `amqp1_0.default_user` are unsupported in RabbitMQ 4.0. + +Instead, set the new RabbitMQ 4.0 settings `anonymous_login_user` and `anonymous_login_pass` (both values default to `guest`). +For production scenarios, [disallow anonymous logins](https://www.rabbitmq.com/docs/next/production-checklist#anonymous-login). + +### Shovels + +RabbitMQ Shovels will be able connect to a RabbitMQ 4.0 node via AMQP 1.0 only when the Shovel runs on a RabbitMQ node >= `3.13.7`. + + +## Erlang/OTP Compatibility Notes + +This release [requires Erlang 26.2](https://www.rabbitmq.com/docs/which-erlang). + +[Provisioning Latest Erlang Releases](https://www.rabbitmq.com/docs/which-erlang#erlang-repositories) explains +what package repositories and tools can be used to provision latest patch versions of Erlang 26.x. + + +## Release Artifacts + +RabbitMQ releases are distributed via [GitHub](https://github.com/rabbitmq/rabbitmq-server/releases). +[Debian](https://rabbitmq.com/docs/install-debian/) and [RPM packages](https://rabbitmq.com/docs/install-rpm/) are available via +repositories maintained by the RabbitMQ Core Team. + +[Community Docker image](https://hub.docker.com/_/rabbitmq/), [Chocolatey package](https://community.chocolatey.org/packages/rabbitmq), and the [Homebrew formula](https://www.rabbitmq.com/docs/install-homebrew) +are other installation options. They are updated with a delay. + + +## Upgrading to 4.0 + +### Documentation guides on upgrades + +See the [Upgrading guide](https://www.rabbitmq.com/docs/upgrade) for documentation on upgrades and [GitHub releases](https://github.com/rabbitmq/rabbitmq-server/releases) +for release notes of individual releases. + +This release series only supports upgrades from `3.13.x`. + +This release requires **all feature flags** in the 3.x series (specifically `3.13.x`) to be enabled before upgrading, +there is no upgrade path from 3.12.14 (or a later patch release) straight to `4.0.0`. + +### Required Feature Flags + +This release [graduates](https://www.rabbitmq.com/docs/feature-flags#graduation) all feature flags introduced up to `3.13.0`. + +All users must enable all stable [feature flags] before upgrading to 4.0 from +the latest available 3.13.x patch release. + +### Mixed version cluster compatibility + +RabbitMQ 4.0.0 nodes can run alongside `3.13.x` nodes. `4.0.x`-specific features can only be made available when all nodes in the cluster +upgrade to 4.0.0 or a later patch release in the new series. + +While operating in mixed version mode, some aspects of the system may not behave as expected. The list of known behavior changes will be covered in future updates. +Once all nodes are upgraded to 4.0.0, these irregularities will go away. + +Mixed version clusters are a mechanism that allows rolling upgrade and are not meant to be run for extended +periods of time (no more than a few hours). + +### Recommended Post-upgrade Procedures + +Set a low priority dead lettering policy for all quorum queues to dead letter to a stream or similar +so that messages that reach the new default delivery limit of 20 aren't lost completely +when no dead lettering policy is in place. + +TBD + +## Changes Worth Mentioning + +This section is incomplete and will be expanded as 4.0 approaches its release candidate stage. + +### Core Server + +#### Enhancements + + * Efficient sub-linear quorum queue recovery on node startup using checkpoints. + + GitHub issue: [#10637](https://github.com/rabbitmq/rabbitmq-server/pull/10637) + + * Classic queue storage v2 (CQv2) optimizations. For example, CQv2 recovery time on node boot + is now twice as fast for some data sets. + + GitHub issue: [#11112](https://github.com/rabbitmq/rabbitmq-server/pull/11112) + + * Node startup time improvements. For some environments, nodes with very small on disk data sets + now start about 25% quicker. + + GitHub issue: [#10989](https://github.com/rabbitmq/rabbitmq-server/pull/10989) + + * Quorum queues now support [priorities](https://www.rabbitmq.com/docs/next/quorum-queues#priorities). However, + there are difference with how priorities work in classic queues. + + GitHub issue: [#10637](https://github.com/rabbitmq/rabbitmq-server/pull/10637) + + * Per-message metadata stored in the quorum queue Raft log now uses less disk space. + + GitHub issue: [#8261](https://github.com/rabbitmq/rabbitmq-server/issues/8261) + + * Single Active Consumer (SAC) implementation of quorum queues now respects consumer priorities. + + GitHub issue: [#8261](https://github.com/rabbitmq/rabbitmq-server/issues/8261) + + * `rabbitmq.conf` now supports [encrypted values](https://www.rabbitmq.com/docs/next/configure#configuration-encryption) + with a prefix: + + ``` ini + default_user = bunnies-444 + default_pass = encrypted:F/bjQkteQENB4rMUXFKdgsJEpYMXYLzBY/AmcYG83Tg8AOUwYP7Oa0Q33ooNEpK9 + ``` + + GitHub issue: [#11989](https://github.com/rabbitmq/rabbitmq-server/pull/11989) + + * All feature flags up to `3.13.0` have [graduated](https://www.rabbitmq.com/docs/feature-flags#graduation) and are now mandatory. + + GitHub issue: [#11659](https://github.com/rabbitmq/rabbitmq-server/pull/11659) + + * Quorum queues now use a default [redelivery limit](https://www.rabbitmq.com/docs/next/quorum-queues#poison-message-handling) of 20. + + GitHub issue: [#11937](https://github.com/rabbitmq/rabbitmq-server/pull/11937) + + * `queue_master_locator` queue setting has been deprecated in favor of `queue_leader_locator` used by quorum queues + and streams. + + GitHub issue: [#10702](https://github.com/rabbitmq/rabbitmq-server/issues/10702) + + +### AMQP 1.0 + +#### Bug Fixes + + * AMQP 0-9-1 to AMQP 1.0 string data type conversion improvements. + + GitHub issue: [#11715](https://github.com/rabbitmq/rabbitmq-server/pull/11715) + +#### Enhancements + + * [AMQP 1.0 is now a core protocol](https://www.rabbitmq.com/blog/2024/08/05/native-amqp) that is always enabled. + Its plugin is now a no-op that only exists to simplify upgrades. + + GitHub issues: [#9022](https://github.com/rabbitmq/rabbitmq-server/pull/9022), [#10662](https://github.com/rabbitmq/rabbitmq-server/pull/10662) + + * The AMQP 1.0 implementation is now significantly more efficient: its peak throughput is [more than double than that of 3.13.x](https://www.rabbitmq.com/blog/2024/08/21/amqp-benchmarks) + on some workloads. + + GitHub issue: [#9022](https://github.com/rabbitmq/rabbitmq-server/pull/9022) + + * For AMQP 1.0, [resource alarms]() only block inbound `TRANSFER` frames instead of blocking all traffic. + + GitHub issue: [#9022](https://github.com/rabbitmq/rabbitmq-server/pull/9022) + + * AMQP 1.0 clients now can manage topologies (queues, exchanges, bindings). + + GitHub issue: [#10559](https://github.com/rabbitmq/rabbitmq-server/pull/10559) + + * AMQP 1.0 implementation now supports a new (v2) address format for referencing queues, exchanges, and so on. + + GitHub issues: [#11604](https://github.com/rabbitmq/rabbitmq-server/pull/11604), [#11618](https://github.com/rabbitmq/rabbitmq-server/pull/11618) + + * AMQP 1.0 implementation now supports consumer priorities. + + GitHub issue: [#11705](https://github.com/rabbitmq/rabbitmq-server/pull/11705) + + * Client-provided connection name will now be logged for AMQP 1.0 connections. + + GitHub issue: [#11958](https://github.com/rabbitmq/rabbitmq-server/issues/11958) + + +### Streams + +#### Enhancements + + * Stream filtering is now supported for AMQP 1.0 clients. + + GitHub issue: [#10098](https://github.com/rabbitmq/rabbitmq-server/pull/10098) + + +### Prometheus Plugin + +#### Enhancements + + * [Detailed memory breakdown](https://www.rabbitmq.com/docs/memory-use) metrics are now exposed via the Prometheus scraping endpoint. + + GitHub issue: [#11743](https://github.com/rabbitmq/rabbitmq-server/issues/11743) + + * New per-exchange and per-queue metrics. + + Contributed by @LoisSotoLopez. + + GitHub issue: [#11559](https://github.com/rabbitmq/rabbitmq-server/pull/11559) + + * Shovel and Federation metrics are now available via two new plugins: `rabbitmq_shovel_prometheus` and `rabbitmq_federation_prometheus`. + + Contributed by @SimonUnge. + + GitHub issue: [#11942](https://github.com/rabbitmq/rabbitmq-server/pull/11942) + + +### Shovel Plugin + +#### Enhancements + + * Shovels now can be configured to use pre-declared topologies. This is primarily useful in environments where + schema definition comes from [definitions](https://www.rabbitmq.com/docs/definitions). + + GitHub issue: [#10501](https://github.com/rabbitmq/rabbitmq-server/issues/10501) + + +### Local Random Exchange Plugin + +This is an initial release that includes [Local Random Exchange](https://www.rabbitmq.com/docs/next/local-random-exchange). + +GitHub issues: [#8334](https://github.com/rabbitmq/rabbitmq-server/pull/8334), [#10091](https://github.com/rabbitmq/rabbitmq-server/pull/10091). + + +### STOMP Plugin + +#### Enhancements + + * STOMP now supports consumer priorities. + + GitHub issue: [#11947](https://github.com/rabbitmq/rabbitmq-server/pull/11947) + + +### Dependency Changes + + * Ra was [upgraded to `2.13.6`](https://github.com/rabbitmq/ra/releases) + * Khepri was [upgraded to `0.14.0`](https://github.com/rabbitmq/khepri/releases) + * Cuttlefish was [upgraded to `3.4.0`](https://github.com/Kyorai/cuttlefish/releases) + +## Source Code Archives + +To obtain source code of the entire distribution, please download the archive named `rabbitmq-server-4.0.0-beta.4.tar.xz` +instead of the source tarball produced by GitHub. diff --git a/release-notes/README-1.8.1.txt b/release-notes/README-1.8.1.txt index 382d758a362e..41b511099970 100644 --- a/release-notes/README-1.8.1.txt +++ b/release-notes/README-1.8.1.txt @@ -26,7 +26,7 @@ enhancements now enforces the negotiated maximum frame size - AMQP 0.9.1 guidance on error constants is now followed more closely and 0.9.1 error codes are produced in more situations - - SSL compatiblity under R14A has been improved + - SSL compatibility under R14A has been improved java client ----------- diff --git a/release-notes/README-3.4.0.txt b/release-notes/README-3.4.0.txt index df04486cd903..7ec55913c014 100644 --- a/release-notes/README-3.4.0.txt +++ b/release-notes/README-3.4.0.txt @@ -218,7 +218,7 @@ dependency change 26095 drop support for Java 1.5 licencing change -24543 make the Java client additionally avaliable under the ASL2 +24543 make the Java client additionally available under the ASL2 .net client diff --git a/scripts/bazel/rabbitmq-run.sh b/scripts/bazel/rabbitmq-run.sh index 051bd5987606..af45cf8a239a 100755 --- a/scripts/bazel/rabbitmq-run.sh +++ b/scripts/bazel/rabbitmq-run.sh @@ -4,6 +4,8 @@ set -euo pipefail GREEN='\033[0;32m' NO_COLOR='\033[0m' +export PATH="{ERLANG_HOME}/bin:$PATH" + rmq_realpath() { local path=$1 diff --git a/scripts/rabbitmq-script-wrapper b/scripts/rabbitmq-script-wrapper index da8fe252e7b0..44689ada9001 100644 --- a/scripts/rabbitmq-script-wrapper +++ b/scripts/rabbitmq-script-wrapper @@ -4,7 +4,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2018-2020 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. ## SCRIPT="$(basename "$0")" diff --git a/scripts/rabbitmq-server.ocf b/scripts/rabbitmq-server.ocf index 7bf3fff36880..ffcd455e7e7f 100755 --- a/scripts/rabbitmq-server.ocf +++ b/scripts/rabbitmq-server.ocf @@ -4,7 +4,7 @@ ## License, v. 2.0. If a copy of the MPL was not distributed with this ## file, You can obtain one at https://mozilla.org/MPL/2.0/. ## -## Copyright (c) 2018-2020 VMware, Inc. or its affiliates. All rights reserved. +## Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. ## ## diff --git a/tools/erlang_ls.bzl b/tools/erlang_ls.bzl index c699e70fb2ec..c95dcddf1c9d 100644 --- a/tools/erlang_ls.bzl +++ b/tools/erlang_ls.bzl @@ -47,18 +47,6 @@ def _deps_symlinks(ctx): # special case symlinks for generated sources commands.append("") - commands.append(_ln_command( - target = path_join("..", "..", "..", "bazel-bin", "deps", "rabbit_common", "include", "rabbit_framing.hrl"), - source = path_join("deps", "rabbit_common", "include", "rabbit_framing.hrl"), - )) - commands.append(_ln_command( - target = path_join("..", "..", "..", "bazel-bin", "deps", "rabbit_common", "src", "rabbit_framing_amqp_0_8.erl"), - source = path_join("deps", "rabbit_common", "src", "rabbit_framing_amqp_0_8.erl"), - )) - commands.append(_ln_command( - target = path_join("..", "..", "..", "bazel-bin", "deps", "rabbit_common", "src", "rabbit_framing_amqp_0_9_1.erl"), - source = path_join("deps", "rabbit_common", "src", "rabbit_framing_amqp_0_9_1.erl"), - )) commands.append(_ln_command( target = path_join("..", "..", "..", "bazel-bin", "deps", "amqp10_common", "include", "amqp10_framing.hrl"), source = path_join("deps", "amqp10_common", "include", "amqp10_framing.hrl"), diff --git a/user-template.bazelrc b/user-template.bazelrc index 2941389554e2..3bffd5018365 100644 --- a/user-template.bazelrc +++ b/user-template.bazelrc @@ -1,17 +1,14 @@ # rabbitmqctl wait shells out to 'ps', which is broken in the bazel macOS # sandbox (https://github.com/bazelbuild/bazel/issues/7448) -# adding "--spawn_strategy=local" to the invocation is a workaround -build --spawn_strategy=local +# adding "--strategy=TestRunner=local" to the invocation is a workaround +build --strategy=TestRunner=local # --experimental_strict_action_env breaks memory size detection on macOS, # so turn it off for local runs build --noexperimental_strict_action_env -build:buildbuddy --experimental_strict_action_env # don't re-run flakes automatically on the local machine build --flaky_test_attempts=1 # write common test logs to logs/ dir build --@rules_erlang//:ct_logdir=/absolute/expanded/path/to/this/repo/logs - -build:buildbuddy --remote_header=x-buildbuddy-api-key=YOUR_API_KEY